1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-08 10:04:25 +00:00

Merge branch 'best_practice_policies' into 337_policy_description

# Conflicts:
#	samples/README.md
This commit is contained in:
Shuting Zhao 2019-10-14 13:21:10 -07:00
commit 75806146c6
149 changed files with 413 additions and 1024 deletions

View file

@ -1,27 +0,0 @@
# Best Practice Policies
| Best practice | Policy | scenario|
|------------------------------------------------|------------------------------------|---------------------|
| Run as non-root user | [policy_validate_deny_runasrootuser.yaml](policy_validate_deny_runasrootuser.yaml) | best_practices |
| Disallow automount api credentials | []() | best_practices |
| Disallow privileged and privilege escalation | [policy_validate_container_disallow_priviledgedprivelegesecalation.yaml](policy_validate_container_disallow_priviledgedprivelegesecalation.yaml) | best_practices |
| Disallow use of host networking and ports | [policy_validate_host_network_port.yaml](policy_validate_host_network_port.yaml) | best_practices |
| Disallow use of host filesystem | [policy_validate_host_path.yaml](policy_validate_host_path.yaml) |
| Disallow hostPID and hostIPC | [policy_validate_hostpid_hosipc.yaml](policy_validate_hostpid_hosipc.yaml) | best_practices |
| Require read only root filesystem | [policy_validate_not_readonly_rootfilesystem.yaml](policy_validate_not_readonly_rootfilesystem.yaml) | best_practices |
| Disallow node ports | [policy_validate_disallow_node_port.yaml](policy_validate_disallow_node_port.yaml) | best_practices |
| Allow trusted registries | [policy_validate_whitelist_image_registries.yaml](policy_validate_whitelist_image_registries.yaml) | best_practices |
| Require resource requests and limits | [policy_validate_pod_resources.yaml](policy_validate_pod_resources.yaml) | best_practices |
| Require pod liveness and readiness probes | [policy_validate_pod_probes.yaml](policy_validate_pod_probes.yaml) | best_practices |
| Require an image tag | [policy_validate_image_tag_notspecified_deny.yaml](policy_validate_image_tag_notspecified_deny.yaml) | best_practices |
| Disallow latest tag and pull IfNotPresent | [policy_validate_image_latest_ifnotpresent_deny.yaml](policy_validate_image_latest_ifnotpresent_deny.yaml) |
| Require a namespace (disallow default) | [policy_validate_default_namespace.yaml](policy_validate_default_namespace.yaml) | best_practices |
| Prevent mounting of default service account | [policy_validate_disallow_default_serviceaccount.yaml](policy_validate_disallow_default_serviceaccount.yaml) |
| Require a default network policy | [policy_validate_default_network_policy.yaml](policy_validate_default_network_policy.yaml) | best_practices |
| Require namespace quotas and limit ranges | [policy_validate_namespace_quota.yaml](policy_validate_namespace_quota.yaml) | best_practices |
| Allow an FSGroup that owns the pod's volumes | [policy_validate_fsgroup.yaml](policy_validate_fsgroup.yaml) |
| Require SELinux level of the container | [policy_validate_selinux_context.yaml](policy_validate_selinux_context.yaml) |
| Allow default Proc Mount type | [policy_validate_default_proc_mount.yaml](policy_validate_default_proc_mount.yaml) |
| Allow certain capability to be added | [policy_validate_container_capabilities.yaml](policy_validate_container_capabilities.yaml) |
| Allow local tcp/udp port range | [policy_validate_sysctl_configs.yaml](policy_validate_sysctl_configs.yaml) |
| Allowed volume plugins | [policy_validate_volume_whitelist.yaml](policy_validate_volume_whitelist.yaml) |

View file

@ -1,29 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-deny-privileged-disallowpriviligedescalation
spec:
validationFailureAction: "audit"
rules:
- name: deny-privileged-disallowpriviligedescalation
exclude:
resources:
namespaces:
- kube-system
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false"
anyPattern:
- spec:
securityContext:
allowPrivilegeEscalation: false
privileged: false
- spec:
containers:
- name: "*"
securityContext:
allowPrivilegeEscalation: false
privileged: false

View file

@ -1,26 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-namespace
spec:
rules:
- name: check-default-namespace
match:
resources:
kinds:
- Pod
validate:
message: "A none 'default' namespace is required"
pattern:
metadata:
namespace: "!default"
- name: check-namespace-exist
match:
resources:
kinds:
- Pod
validate:
message: "A namespace is required"
pattern:
metadata:
namespace: "?*"

View file

@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: defaultgeneratenetworkpolicy
spec:
rules:
- name: "default-networkpolicy"
match:
resources:
kinds:
- Namespace
name: "devtest"
generate:
kind: NetworkPolicy
name: defaultnetworkpolicy
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress
- Egress
# allow all ingress traffic from pods within this namespace
ingress:
- {}
# allow all egress traffic
egress:
- {}

View file

@ -1,21 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: disallow-node-port
spec:
rules:
- name: disallow-node-port
exclude:
resources:
namespaces:
- kube-system
match:
resources:
kinds:
- Service
validate:
message: "Disallow service of type NodePort"
pattern:
spec:
type: "!NodePort"

View file

@ -1,20 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-host-network-hostport
spec:
rules:
- name: validate-host-network-hostport
match:
resources:
kinds:
- Pod
validate:
message: "hostNetwork and hostPort are not allowed"
pattern:
spec:
hostNetwork: false
containers:
- name: "*"
ports:
- hostPort: null

View file

@ -1,18 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image
spec:
rules:
- name: validate-tag
match:
resources:
kinds:
- Pod
validate:
message: "imagePullPolicy 'IfNotPresent' forbidden with image tag 'latest'"
pattern:
spec:
containers:
- (image): "*:latest"
imagePullPolicy: "!IfNotPresent"

View file

@ -1,29 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image
spec:
rules:
- name: validate-tag
match:
resources:
kinds:
- Pod
validate:
message: "An image tag is required"
pattern:
spec:
containers:
- image: "*:*"
- name: validate-latest
match:
resources:
kinds:
- Pod
validate:
message: "imagePullPolicy 'Always' required with tag 'latest'"
pattern:
spec:
containers:
- (image): "*latest"
imagePullPolicy: Always

View file

@ -1,17 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image-tag-notlatest
spec:
rules:
- name: image-tag-notlatest
match:
resources:
kinds:
- Pod
validate:
message: "image tag 'latest' forbidden"
pattern:
spec:
containers:
- image: "!*:latest"

View file

@ -1,17 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image-tag-notspecified
spec:
rules:
- name: image-tag-notspecified
match:
resources:
kinds:
- Pod
validate:
message: "image tag not specified"
pattern:
spec:
containers:
- image: "*:*"

View file

@ -1,20 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-namespace-quota
spec:
rules:
- name: validate-namespace-quota
match:
resources:
kinds:
- Namespace
generate:
kind: ResourceQuota
name: "defaultresourcequota"
spec:
hard:
requests.cpu: "*"
requests.memory: "*"
limits.cpu: "*"
limits.memory: "*"

View file

@ -1,22 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-not-readonly-rootfilesystem
spec:
rules:
- name: validate-not-readonly-rootfilesystem
exclude:
resources:
namespaces:
- kube-system
match:
resources:
kinds:
- Pod
validate:
message: "Container should not have read-only rootfilesystem"
pattern:
spec:
containers:
- securityContext:
readOnlyRootFilesystem: false

View file

@ -1,25 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-probes
spec:
validationFailureAction: "audit"
rules:
- name: check-probes
match:
resources:
kinds:
- Pod
# exclude:
# namespaces:
# - kube-system
validate:
message: "Liveness and readiness probes are required"
pattern:
spec:
containers:
livenessProbe:
periodSeconds: ">0"
readinessProbe:
periodSeconds: ">0"

View file

@ -1,20 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-image-registry
spec:
rules:
- name: validate-image-registry
match:
resources:
kinds:
- Pod
validate:
message: "White list of image registries"
anyPattern:
- spec:
containers:
- image: "https://private.registry.io/*"
- spec:
containers:
- image: "*nirmata*"

View file

@ -1,19 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx
labels:
app: "nirmata-nginx"
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest

View file

@ -1,26 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: check-resources
spec:
# validationFailureAction: "audit"
rules:
- name: check-pod-resources
message: "CPU and memory resource requests and limits are required"
match:
resources:
kinds:
- Pod
name: myapp-pod
validate:
pattern:
spec:
containers:
- name: "*"
resources:
requests:
memory: "?*"
cpu: "?*"
limits:
memory: "?*"
cpu: "?*"

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: image-with-hostpath
labels:
app.type: prod
namespace: "my-namespace"
spec:
containers:
- name: image-with-hostpath
image: docker.io/nautiker/curl
volumeMounts:
- name: var-lib-etcd
mountPath: /var/lib
volumes:
- name: var-lib-etcd
hostPath:
path: /var/lib

View file

@ -1,11 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: nginx
image: nginx:latest
imagePullPolicy: IfNotPresent

View file

@ -1,11 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: nginx
image: nginx:1.12
imagePullPolicy: IfNotPresent

View file

@ -1,10 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: nginx
image: nginx:latest

View file

@ -1,10 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: nginx
image: nginx

View file

@ -1,10 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: nginx
image: nginx:latest

View file

@ -1,4 +0,0 @@
kind: Namespace
apiVersion: v1
metadata:
name: "test-namespace-quota"

View file

@ -1,10 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: sec-ctx-unprivileged
spec:
# securityContext:
# runAsNonRoot: true
containers:
- name: imagen-with-hostpath
image: nginxinc/nginx-unprivileged

View file

@ -1,10 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: myapp-pod
labels:
app: myapp
spec:
containers:
- name: nginx
image: nginx

View file

@ -1,8 +0,0 @@
apiVersion: v1
kind: Pod
metadata:
name: nirmata-nginx
spec:
containers:
- name: nirmata-nginx
image: nirmata/nginx

View file

@ -30,10 +30,6 @@ func Test_validate_healthChecks(t *testing.T) {
testScenario(t, "/test/scenarios/test/scenario_validate_healthChecks.yaml")
}
func Test_validate_imageRegistries(t *testing.T) {
testScenario(t, "/test/scenarios/test/scenario_validate_imageRegistries.yaml")
}
func Test_validate_nonRootUsers(t *testing.T) {
testScenario(t, "/test/scenarios/test/scenario_validate_nonRootUser.yaml")
}
@ -43,14 +39,6 @@ func Test_generate_networkPolicy(t *testing.T) {
}
// namespace is blank, not "default" as testrunner evaulates the policyengine, but the "default" is added by kubeapiserver
func Test_validate_image_latest_ifnotpresent_deny(t *testing.T) {
testScenario(t, "/test/scenarios/test/scenario_validate_image_latest_ifnotpresent_deny.yaml")
}
func Test_validate_image_latest_ifnotpresent_pass(t *testing.T) {
testScenario(t, "test/scenarios/test/scenario_validate_image_latest_ifnotpresent_pass.yaml")
}
func Test_validate_image_pullpolicy_notalways_deny(t *testing.T) {
testScenario(t, "test/scenarios/test/scenario_validate_image_pullpolicy_notalways_deny.yaml")
@ -80,10 +68,6 @@ func Test_validate_disallow_default_namespace(t *testing.T) {
testScenario(t, "test/scenarios/test/scenario_validate_disallow_default_namespace.yaml")
}
func Test_validate_host_path(t *testing.T) {
testScenario(t, "test/scenarios/test/scenario_validate_host_path.yaml")
}
func Test_validate_host_network_port(t *testing.T) {
testScenario(t, "test/scenarios/test/scenario_validate_disallow_host_network_hostport.yaml")
}

View file

@ -1,6 +1,6 @@
# Best Practice Policies
Best practice policies are recommended policies that can be applied to yoru Kubernetes clusters with minimal changes. To import these policies [install Kyverno](../documentation/installation.md) and import the resources as follows:
Best practice policies are designed to be applied to your Kubernetes clusters with minimal changes. To import these policies [install Kyverno](../documentation/installation.md) and import the resources as follows:
````bash
kubectl create -f https://github.com/nirmata/kyverno/raw/master/samples/best_practices/
@ -8,10 +8,9 @@ kubectl create -f https://github.com/nirmata/kyverno/raw/master/samples/best_pra
More information on each best-practice policy is provided below:
## Run as non-root user
By default, processes in a container run as a root user (uid 0). To prevent compromising the host, a best practice is to specify a least privileged user ID when building the container image, and require that application containers run as non root users.
By default, processes in a container run as a root user (uid 0). To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users i.e. set `runAsNonRoot` to `true`.
***Policy YAML***: [deny_runasrootuser.yaml](best_practices/deny_runasrootuser.yaml)
@ -19,129 +18,132 @@ By default, processes in a container run as a root user (uid 0). To prevent comp
* [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
## Disallow automounte API credentials
## Disallow automount of Service Account credentials
One can access the API from inside a pod using automatically mounted service account credentials by default. To restrict access, opt out of automounting API credentials for any pod by setting `automountServiceAccountToken` to `false`.
Kubernetes automounts default service account credentials in each pod. To restrict access, opt out of automounting credentials by setting `automountServiceAccountToken` to `false`.
***Policy YAML***: [disallow_automountingapicred.yaml](best_practices/disallow_automountingapicred.yaml)
## Disallow use of default namespace
Namespaces are a way to divide cluster resources between multiple users. When multiple users or teams are sharing a single cluster, it is recommended to isolate different workloads and aviod using default namespace.
Namespaces are a way to segment and isolate cluster resources across multiple users. When multiple users or teams are sharing a single cluster, it is recommended to isolate different workloads and restrict use of the default namespace.
***Policy YAML***: [disallow_default_namespace.yaml](best_practices/disallow_default_namespace.yaml)
## Disallow use of host filesystem
Using the volume of type hostpath can easily lose data when a node crashes. Disable use of hostpath prevent data loss.
The volume of type `hostpath` binds pods to a specific host, and data persisted in the volume is dependent on the life of the node. In a shared cluster, it is recommeded that applications are independent of hosts.
***Policy YAML***: [disallow_host_filesystem.yaml](best_practices/disallow_host_filesystem.yaml)
## Disallow `hostNetwork` and `hostPort`
Using `hostPort` and `hostNetwork` limits the number of nodes the pod can be scheduled on, as the pod is bound to the host thats its mapped to.
To avoid this limitation, use a validate rule to make sure these attributes are set to null and false.
Using `hostPort` and `hostNetwork` allows pods to share the host network stack, allowing potential snooping of network traffic from an application pod.
***Policy YAML***: [disallow_host_network_hostport.yaml](best_practices/disallow_host_network_hostport.yaml)
## Disallow `hostPID` and `hostIPC`
Sharing the host's PID namespace allows visibility of process on the host, potentially exposing porcess information.
Sharing the host's IPC namespace allows container process to communicate with processes on the host.
To avoid pod container from having visilbility to host process space, we can check `hostPID` and `hostIPC` are set as `false`.
Sharing the host's PID namespace allows visibility of process on the host, potentially exposing process information.
Sharing the host's IPC namespace allows the container process to communicate with processes on the host. To avoid pod container from having visibility to host process space, validate that `hostPID` and `hostIPC` are set to `false`.
***Policy YAML***: [disallow_hostpid_hostipc.yaml](best_practices/disallow_hostpid_hostipc.yaml)
## Disallow node port
Node port ranged service is advertised to the public and can be scanned and probed from others exposing all nodes.
NetworkPolicy resources can currently only control NodePorts by allowing or disallowing all traffic on them. Unless required it is recommend to disable use to service type `NodePort`.
## Restrict service type `NodePort`
A Kubernetes service of type NodePort uses a host port to receive traffic from any source. A `NetworkPolicy` resource cannot be used to control traffic to host ports. Although `NodePort` services can be useful, their use must be limited to services with additional upstream security checks.
***Policy YAML***: [disallow_node_port.yaml](best_practices/disallow_node_port.yaml)
## Disable privileged containers
A process within priveleged containers get almost the same priveleges that are available to processes outside a container providing almost unrestricited host access. With `securityContext.allowPrivilegeEscalation` enabled the process can gain ore priveleges that its parent.
To restrcit the priveleges it is recommend to run pod containers with `securityContext.priveleged` as `false` and
`allowPrivilegeEscalation` as `false`
Privileged containers are defined as any container where the container uid 0 is mapped to the hosts uid 0. A process within privileged containers can get unrestricted host access. With `securityContext.allowPrivilegeEscalation` enabled a process can gain privileges from its parent.
To disallow privileged containers and the escalation of privileges it is recommended to run pod containers with `securityContext.priveleged` as `false` and `allowPrivilegeEscalation` as `false`.
***Policy YAML***: [disallow_priviledged_priviligedescalation.yaml](best_practices/disallow_priviledged_priviligedescalation.yaml)
## Default deny all ingress traffic
When no policies exist in a namespace, Kubernetes allows all ingress and egress traffic to and from pods in that namespace. A "default" isolation policy for a namespace denys any ingress traffic to the pods in that namespace, this ensures that even pods that arent selected by any other NetworkPolicy will still be isolated.
By default, Kubernetes allows all ingress and egress traffic to and from pods within a cluster. A "default" `NetworkPolicy` resource for a namespace should be used to deny all ingress traffic to the pods in that namespace. Additional `NetworkPolicy` resources can then be configured to allow desired traffic to application pods.
***Policy YAML***: [require_default_network_policy.yaml](best_practices/require_default_network_policy.yaml)
## Disallow latest image tag
Using the `:latest` tag when deploying containers in production makes it harder to track which version of the image is running and more difficult to roll back properly. Specifying a none latest image tag prevents a lot of errors from occurring when versions are mismatched.
The `:latest` tag is mutable and can lead to unexpected errors if the image changes. A best practice is to use an immutable tag that maps to a specific version of an application pod.
***Policy YAML***: [require_image_tag_not_latest.yaml](best_practices/require_image_tag_not_latest.yaml)
## Configure namespace limits and quotas
## Default namesapce quotas
To limit the number of objects, as well as the total amount of compute that may be consumed by an application, it is important to create resource limits and quotas for each namespace.
In order to limit the quantity of objects, as well as the total amount of compute resources that may be consumed by an application, it is essential to create one resource quota for each namespace by cluster administrator.
***Policy YAML***: [require_namespace_quota.yaml](best_practices/require_namespace_quota.yaml)
**Additional Information**
* [Resource Quota](https://kubernetes.io/docs/concepts/policy/resource-quotas/)
***Policy YAML***: [require_namespace_quota.yaml](best_practices/require_namespace_quota.yaml)
## Require pod resource requests and limits
## Require resource quota
When several users or teams share a cluster with a fixed number of nodes, there is a concern that one team could use more than its fair share of resources. To prevent a team taking up more than their fair share of the cluster, it is usually a best practice to configure resource quota for the application.
As application workloads share cluster resources, it is important to limit resources requested and consumed by each pod. It is recommended to require `resources.requests` and `resources.limits` per pod. If a namespace level request or limit is specified, defaults will automatically be applied to each pod based on the `LimitRange` configuration.
***Policy YAML***: [require_pod_requests_limits.yaml](best_practices/require_pod_requests_limits.yaml)
## Default health probe
## Require `livenessProbe` and `readinessProbe`
Setting the health probe ensures an application is highly-avaiable and resilient. Health checks are a simple way to let the system know if an application is broken, and it helps the application quickly recover from failure.
For each pod, a `livenessProbe` is carried out by the kubelet to determine when to restart a container. A `readinessProbe` is used by services and deployments to determine if the pod is ready to recieve network traffic.
Both liveness and readiness probes need to be configured to manage the pod lifecycle during restarts and upgrades.
***Policy YAML***: [require_probes.yaml](best_practices/require_probes.yaml)
## Read-only root filesystem
A read-only root file system helps to enforce an immutable infrastrucutre strategy, the container only need to write on mounted volume that persist the state. An immutable root filesystem can also prevent malicious binaries from writing to the host system.
A read-only root file system helps to enforce an immutable infrastructure strategy; the container only needs to write on the mounted volume that persists the state. An immutable root filesystem can also prevent malicious binaries from writing to the host system.
***Policy YAML***: [require_readonly_rootfilesystem.yaml](best_practices/require_readonly_rootfilesystem.yaml)
## Trusted image registries
## Disallow unknown image registries
Images from unrecognized registry can introduce complexity to maintain the application. By specifying trusted registries help reducing such complexity. Follow instructoin [here](https://github.com/nirmata/kyverno/blob/master/documentation/writing-policies-validate.md#operators) to add allowed registries using `OR` operator.
Images from unknown registries may not be scanned and secured. Requiring use of known registries helps reduce threat exposure. You can customize this policy to allow image registries that you trust.
***Policy YAML***: [trusted_image_registries.yaml](best_practices/trusted_image_registries.yaml)
# Additional Policies
Additional policies list some policies that can also assist in maintaing kubernetes clusters.
# More Policies
The policies listed here provide additional best practices that should be considered for production use. These policies may require workload specific configutration.
## Assign Linux capabilities inside Pod
Linux divides the privileges traditionally, associated with superuser into distinct units, known as capabilities, which can be independently enabled or disabled by listing them in `securityContext.capabilites`.
Linux divides the privileges traditionally associated with superuser into distinct units, known as capabilities, which can be independently enabled or disabled by listing them in `securityContext.capabilites`.
***Policy YAML***: [policy_validate_container_capabilities.yaml](best_practices/policy_validate_container_capabilities.yaml)
***Policy YAML***: [policy_validate_container_capabilities.yaml](more/policy_validate_container_capabilities.yaml)
**Additional Information**
* [List of linux capabilities](https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h)
## Check userID, groupIP & fsgroup used inside a Pod
All processes inside the pod can be made to run with specific user and groupID by setting runAsUser and runAsGroup respectively. fsGroup can be specified to make sure any file created in the volume with have the specified groupID. These options can be used validate the IDs used for user and group.
***Policy YAML***: [policy_validate_container_capabilities.yaml](best_practices/policy_validate_user_group_fsgroup_id.yaml)
## Check userID, groupIP & fsgroup used inside a Pod
All processes inside the pod can be made to run with specific user and groupID by setting `runAsUser` and `runAsGroup` respectively. `fsGroup` can be specified to make sure any file created in the volume with have the specified groupID. These options can be used to validate the IDs used for user and group.
***Policy YAML***: [policy_validate_container_capabilities.yaml](more/policy_validate_user_group_fsgroup_id.yaml)
## Configure kernel parameters inside pod
Sysctl interface allows to modify kernel parameters at runtime and in the pod can be specified under `securityContext.sysctls`. If kernel parameters in the pod are to be modified should be handled cautiosly, and a policy with rules restricting these options will be helpful. We can control minimum and maximum port that a network connection can use as its source(local) port by checking net.ipv4.ip_local_port_range
The Sysctl interface allows to modify kernel parameters at runtime and in the pod can be specified under `securityContext.sysctls`. If kernel parameters in the pod are to be modified, should be handled cautiously, and policy with rules restricting these options will be helpful. We can control minimum and maximum port that a network connection can use as its source(local) port by checking net.ipv4.ip_local_port_range
***Policy YAML***: [policy_validate_container_capabilities.yaml](best_practices/policy_validate_user_group_fsgroup_id.yaml)
***Policy YAML***: [policy_validate_container_capabilities.yaml](more/policy_validate_user_group_fsgroup_id.yaml)
**Additional Information**
* [List of supported namespaced sysctl interfaces](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/)

View file

@ -13,7 +13,7 @@ spec:
kinds:
- Pod
validate:
message: "A none 'default' namespace is required"
message: "Using 'default' namespace is restricted"
pattern:
metadata:
namespace: "!default"

View file

@ -13,7 +13,7 @@ spec:
kinds:
- Pod
validate:
message: "hostNetwork and hostPort are not allowed"
message: "Defining hostNetwork and hostPort are not allowed."
pattern:
spec:
(hostNetwork): false

View file

@ -8,7 +8,6 @@ metadata:
spec:
rules:
- name: deny-privileged-priviligedescalation
exclude:
match:
resources:
kinds:

View file

@ -6,10 +6,6 @@ spec:
validationFailureAction: "audit"
rules:
- name: deny-runasrootuser
exclude:
resources:
namespaces:
- kube-system
match:
resources:
kinds:

View file

@ -21,4 +21,4 @@ spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress
- Ingress

View file

@ -13,7 +13,7 @@ spec:
kinds:
- Pod
validate:
message: "image tag not specified"
message: "Image tag not specified"
pattern:
spec:
containers:
@ -24,7 +24,7 @@ spec:
kinds:
- Pod
validate:
message: "set image tag to a specific version"
message: "Using 'latest' image tag is restricted. Set image tag to a specific version"
pattern:
spec:
containers:

View file

@ -1,5 +1,5 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
kind: ClusterPolicy
metadata:
name: check-resource
annotations:

View file

@ -14,7 +14,7 @@ spec:
kinds:
- Pod
validate:
message: "Allow certain capability to be added"
message: "Allow certain linux capability"
pattern:
spec:
containers:

View file

@ -1,10 +1,11 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-hostpid-hostipc
name: validate-fsgroup
spec:
validationFailureAction: "audit"
rules:
- name: validate-hostpid-hostipc
- name: validate-fsgroup
exclude:
resources:
namespaces:
@ -14,8 +15,8 @@ spec:
kinds:
- Pod
validate:
message: "Disallow use of host's pid namespace and host's ipc namespace"
message: "directory should have group ID 2000"
pattern:
spec:
(hostPID): "!true"
hostIPC: false
securityContext:
fsGroup: 2000

View file

@ -13,7 +13,7 @@ spec:
kinds:
- Pod
validate:
message: "user ID should be 1000"
message: "User ID should be 1000"
pattern:
spec:
securityContext:
@ -24,7 +24,7 @@ spec:
kinds:
- Pod
validate:
message: "group ID should be 3000"
message: "Group ID should be 3000"
pattern:
spec:
securityContext:

View file

@ -1,27 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-cpu-memory
spec:
rules:
- name: check-pod-resources
match:
resources:
kinds:
- Pod
validate:
message: "CPU and memory resource requests and limits are required"
pattern:
spec:
containers:
# 'name: *' selects all containers in the pod
- name: "*"
resources:
limits:
# '?' requires 1 alphanumeric character and '*' means that there can be 0 or more characters.
# Using them together e.g. '?*' requires at least one character.
memory: "?*"
cpu: "?*"
requests:
memory: "?*"
cpu: "?*"

View file

@ -1,32 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: PolicyViolation
metadata:
name: pv1
spec:
policy: check-cpu-memory
resource:
kind: Pod
namespace: ""
name: pod1
rules:
- name: r1
type: Mutation
status: Failed
message: test mesaage for rule failure
---
apiVersion: kyverno.io/v1alpha1
kind: PolicyViolation
metadata:
name: pv2
spec:
policy: check-cpu-memory
resource:
kind: Pod
namespace: ""
name: pod1
rules:
- name: r1
type: Mutation
status: Failed
message: test mesaage for rule failure
---

View file

@ -1,25 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
cli: test
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nirmata/nginx:green
# imagePullPolicy: Always
ports:
- containerPort: 80
- name: nginx1
image: launcher.gcr.io/google/nginx1

View file

@ -1,86 +0,0 @@
apiVersion : kyverno.io/v1alpha1
kind : ClusterPolicy
metadata :
name : policy-deployment
spec :
rules:
- name: add-label
match:
resources:
kinds :
- Deployment
selector :
matchLabels :
cli: test
mutate:
patches:
- path: /metadata/labels/isMutated
op: add
value: "true"
overlay:
spec:
template:
spec:
containers:
# if the image nginx, set the imagePullPolicy to Always
- (image): "*nginx*"
imagePullPolicy: "Always"
- name: add-label2
match:
resources:
kinds :
- Deployment
selector :
matchLabels :
cli: test
mutate:
patches:
- path: /metadata/labels/app1
op: replace
value: "nginx_is_mutated"
- name: add-label3
match:
resources:
kinds :
- Deployment
selector :
matchLabels :
cli: test
mutate:
patches:
- path: /metadata/labels/app2
op: add
value: "nginx_is_mutated2"
- name: check-image
match:
resources:
kinds :
- Deployment
selector :
matchLabels :
cli: test
validate:
message: "The imagePullPolicy must be Always when using image nginx"
pattern:
spec:
template:
spec:
containers:
- (image): "*nginx*"
imagePullPolicy: "Always"
- name: check-registries
match:
resources:
kinds:
- Deployment
- StatefulSet
validate:
message: "Registry is not allowed"
pattern:
spec:
template:
spec:
containers:
- name: "*"
# Check allowed registries
image: "*nirmata/* | launcher.gcr.io/*"

View file

@ -1,7 +1,7 @@
# file path relative to project root
input:
policy: examples/cli/policy_deployment.yaml
resource: examples/cli/nginx.yaml
policy: test/scenarios/cli/policy_deployment.yaml
resource: test/scenarios/cli/nginx.yaml
expected:
passes: true
mutation:
@ -19,8 +19,8 @@ expected:
- "Rule check-image: Validation succesfully."
---
input:
policy: examples/cli/policy_deployment.yaml
resource: examples/cli/ghost.yaml
policy: test/scenarios/cli/policy_deployment.yaml
resource: test/scenarios/cli/ghost.yaml
expected:
passes: true
mutation:

View file

@ -1,28 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: defaultgeneratenetworkpolicy
spec:
rules:
- name: "default-networkpolicy"
match:
resources:
kinds:
- Namespace
name: "devtest"
generate:
kind: NetworkPolicy
name: defaultnetworkpolicy
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress
- Egress
# allow all ingress traffic from pods within this namespace
ingress:
- {}
# allow all egress traffic
egress:
- {}

View file

@ -1,27 +0,0 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: check-container-security-context
spec:
# validationFailureAction: "audit"
rules:
- name: check-root-user
exclude:
resources:
namespaces:
- kube-system
match:
resources:
kinds:
- Pod
validate:
message: "Root user is not allowed. Set runAsNonRoot to true."
anyPattern:
- spec:
securityContext:
runAsNonRoot: true
- spec:
containers:
- name: "*"
securityContext:
runAsNonRoot: true

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: lb1
namespace: test
spec:
selector:
app: app
ports:
- port: 8765 # random
targetPort: 9376 # random
type: LoadBalancer

View file

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: lb2
namespace: test
spec:
selector:
app: app
ports:
- port: 8765 # random
targetPort: 9376 # random
type: LoadBalancer

View file

@ -1,4 +0,0 @@
kind: Namespace
apiVersion: v1
metadata:
name: "test"

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Pod
metadata:
name: check-privileged-cfg
spec:
containers:
- name: check-privileged-cfg
image: nginxinc/nginx-unprivileged
securityContext:
allowPrivilegeEscalation: true
privileged: false

View file

@ -1,21 +0,0 @@
# file path relative to project root
input:
policy: examples/cli/policy_deployment.yaml
resource: examples/cli/nginx.yaml
expected:
mutation:
patchedresource: test/output/nginx.yaml
policyresponse:
policy: policy-deployment
resource:
kind: Deployment
apiVersion: 'apps/v1'
namespace: ''
name: nginx-deployment
rules:
- name: add-label
type: Mutation
success: true
message: succesfully process JSON patches
# patches: `[{"path":"/metadata/labels/isMutated","op":"add","value":"true"},
# {"path":"/metadata/labels/app","op":"replace","value":"nginx_is_mutated"}]`

View file

@ -1,7 +1,7 @@
# file path relative to project root
input:
policy: samples/best_practices/require_default_network_policy.yaml
resource: test/manifest/require_default_network_policy.yaml
resource: test/scenarios/resources/require_default_network_policy.yaml
expected:
generation:
generatedResources:

View file

@ -1,6 +1,6 @@
# file path relative to project root
input:
policy: test/scenarios/policy_mutate_endpoint.yaml
policy: test/scenarios/mutate/policy_mutate_endpoint.yaml
resource: test/scenarios/resources/resource_mutate_endpoint.yaml
expected:
mutation:

View file

@ -1,6 +1,6 @@
# file path relative to project root
input:
policy: test/scenarios/policy_mutate_imagePullPolicy.yaml
policy: test/scenarios/mutate/policy_mutate_imagePullPolicy.yaml
resource: test/scenarios/resources/resource_mutate_imagePullPolicy.yaml
expected:
mutation:

View file

@ -0,0 +1,19 @@
# file path relative to project root
input:
policy: test/scenarios/mutate/policy_mutate_pod_disable_automountingapicred.yaml
resource: test/scenarios/mutate/resource_mutate_pod_disable_automountingapicred.yaml
expected:
mutation:
patchedresource: test/output/output_mutate_pod_disable_automoutingapicred.yaml
policyresponse:
policy: mutate-pod-disable-automoutingapicred
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: myapp-pod
rules:
- name: pod-disable-automoutingapicred
type: Mutation
message: "succesfully process overlay"
success: true

View file

@ -1,6 +1,6 @@
# file path relative to project root
input:
policy: test/scenarios/policy_mutate_validate_qos.yaml
policy: test/scenarios/mutate/policy_mutate_validate_qos.yaml
resource: test/scenarios/resources/resource_mutate_validate_qos.yaml
expected:
mutation:

View file

@ -1,20 +0,0 @@
# file path relative to project root
input:
policy: test/scenarios/policy_validate_loadblancer.yaml
resource: test/scenarios/resource_lb1.yaml
loadresources:
- examples/query/resource_ns_test.yaml
expected:
query:
policyresponse:
policy: query1
resource:
kind: Service
apiVersion: v1
namespace: test
name: lb1
rules:
- name: Max one service of type LoadBalancer in namespace test
type: Query
success: true
message: "Query rule Max one service of type LoadBalancer in namespace test success. (recieved) 1 == (expected) <=1"

View file

@ -1,20 +0,0 @@
# file path relative to project root
input:
policy: test/scenarios/policy_validate_no_loadblancer.yaml
resource: test/scenarios/resource_lb1.yaml
loadresources:
- test/scenarios/resource_ns_test.yaml
expected:
query:
policyresponse:
policy: query1
resource:
kind: Service
apiVersion: v1
namespace: test
name: lb1
rules:
- name: No service of type LoadBalancer in namespace test
type: Query
success: false
message: "Query rule No service of type LoadBalancer in namespace test failed, (recieved) 1!=(expected) 0"

View file

@ -1,7 +1,7 @@
# file path relative to project root
input:
policy: samples/best_practices/require_image_tag_not_latest.yaml
resource: test/manifest/require_image_tag_not_latest_deny.yaml
resource: test/scenarios/resources/require_image_tag_not_latest_deny.yaml
expected:
validation:
policyresponse:
@ -18,5 +18,5 @@ expected:
success: true
- name: image-tag-not-latest
type: Validation
message: "Validation rule 'image-tag-not-latest' failed at '/spec/containers/0/image/' for resource Pod//myapp-pod. set image tag to a specific version"
message: "Validation rule 'image-tag-not-latest' failed at '/spec/containers/0/image/' for resource Pod//myapp-pod. Using 'latest' image tag is restricted. Set image tag to a specific version"
success: false

View file

@ -1,7 +1,7 @@
# file path relative to project root
input:
policy: samples/best_practices/require_image_tag_not_latest.yaml
resource: test/manifest/require_image_tag_not_latest_notag.yaml
resource: test/scenarios/resources/require_image_tag_not_latest_notag.yaml
expected:
validation:
policyresponse:
@ -14,7 +14,7 @@ expected:
rules:
- name: image-tag-notspecified
type: Validation
message: Validation rule 'image-tag-notspecified' failed at '/spec/containers/0/image/' for resource Pod//myapp-pod. image tag not specified
message: Validation rule 'image-tag-notspecified' failed at '/spec/containers/0/image/' for resource Pod//myapp-pod. Image tag not specified
success: false
- name: image-tag-not-latest
type: Validation

View file

@ -1,7 +1,7 @@
# file path relative to project root
input:
policy: samples/best_practices/require_image_tag_not_latest.yaml
resource: examples/best_practices/resources/resource_validate_image_tag_latest_pass.yaml
resource: test/scenarios/resources/resource_validate_image_tag_latest_pass.yaml
expected:
validation:
policyresponse:

View file

@ -1,6 +1,6 @@
# file path relative to project root
input:
policy: test/scenarios/policy_validate_containerSecurityContext.yaml
policy: test/scenarios/validate/policy_validate_containerSecurityContext.yaml
resource: test/scenarios/resources/resource_validate_containerSecurityContext.yaml
expected:
validation:

View file

@ -1,8 +1,8 @@
# file path relative to project root
input:
policy: samples/additional/policy_validate_container_capabilities.yaml
resource: examples/best_practices/resources/resource_validate_container_capabilities.yaml
policy: samples/more/policy_validate_container_capabilities.yaml
resource: test/scenarios/resources/resource_validate_container_capabilities.yaml
expected:
validation:
policyresponse:
@ -15,5 +15,5 @@ expected:
rules:
- name: validate-container-capablities
type: Validation
message: "Validation rule 'validate-container-capablities' failed at '/spec/containers/0/securityContext/capabilities/add/0/' for resource Pod//add-capabilities. Allow certain capability to be added"
message: "Validation rule 'validate-container-capablities' failed at '/spec/containers/0/securityContext/capabilities/add/0/' for resource Pod//add-capabilities. Allow certain linux capability"
success: false

Some files were not shown because too many files have changed in this diff Show more