mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 10:28:36 +00:00
Remove sample Dir and Remove testcases from test_runner (#1686)
* remove sample Dir and remove testcases form test_runner Signed-off-by: vyankatesh <vyankatesh@neualto.com> * change git URL for test Signed-off-by: vyankatesh <vyankatesh@neualto.com> * fix fmt issue Signed-off-by: vyankatesh <vyankatesh@neualto.com> * remove unused policy and test yamls Signed-off-by: vyankatesh <vyankatesh@neualto.com> * fix yaml path issue Signed-off-by: vyankatesh <vyankatesh@neualto.com> Co-authored-by: vyankatesh <vyankatesh@neualto.com>
This commit is contained in:
parent
60653eb620
commit
04dc3ddfe3
112 changed files with 30 additions and 2243 deletions
8
.github/workflows/e2e.yaml
vendored
8
.github/workflows/e2e.yaml
vendored
|
@ -5,7 +5,6 @@ on:
|
|||
- 'main'
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
- 'samples/**'
|
||||
- 'charts/**'
|
||||
- 'docs/**'
|
||||
pull_request:
|
||||
|
@ -13,7 +12,6 @@ on:
|
|||
- 'main'
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
- 'samples/**'
|
||||
- 'charts/**'
|
||||
- 'docs/**'
|
||||
|
||||
|
@ -27,7 +25,7 @@ jobs:
|
|||
- name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Set up Go
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
|
@ -39,6 +37,10 @@ jobs:
|
|||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
|
||||
- name: Test Policy
|
||||
run: |
|
||||
make run_testcmd_policy
|
||||
|
||||
- name: gofmt check
|
||||
run: |
|
||||
|
|
5
Makefile
5
Makefile
|
@ -174,6 +174,11 @@ test-e2e:
|
|||
go test ./test/e2e/... -v
|
||||
$(eval export E2E="")
|
||||
|
||||
#Test TestCmd Policy
|
||||
run_testcmd_policy:
|
||||
go build -o kyvernoctl cmd/cli/kubectl-kyverno/main.go
|
||||
./kyvernoctl test https://github.com/kyverno/policies/main
|
||||
|
||||
# godownloader create downloading script for kyverno-cli
|
||||
godownloader:
|
||||
godownloader .goreleaser.yml --repo kyverno/kyverno -o ./scripts/install-cli.sh --source="raw"
|
||||
|
|
|
@ -16,7 +16,7 @@ func Test_Apply(t *testing.T) {
|
|||
|
||||
testcases := []TestCase{
|
||||
{
|
||||
PolicyPaths: []string{"../../../samples/best_practices/disallow_latest_tag.yaml"},
|
||||
PolicyPaths: []string{"../../../test/best_practices/disallow_latest_tag.yaml"},
|
||||
ResourcePaths: []string{"../../../test/resources/pod_with_version_tag.yaml"},
|
||||
expectedPolicyReports: []preport.PolicyReport{
|
||||
{
|
||||
|
@ -31,7 +31,7 @@ func Test_Apply(t *testing.T) {
|
|||
},
|
||||
},
|
||||
{
|
||||
PolicyPaths: []string{"../../../samples/best_practices/require_pod_requests_limits.yaml"},
|
||||
PolicyPaths: []string{"../../../test/best_practices/require_pod_requests_limits.yaml"},
|
||||
ResourcePaths: []string{"../../../test/resources/pod_with_latest_tag.yaml"},
|
||||
expectedPolicyReports: []preport.PolicyReport{
|
||||
{
|
||||
|
|
|
@ -26,7 +26,7 @@ func Test_Exclude(t *testing.T) {
|
|||
dir, err := os.Getwd()
|
||||
baseDir := filepath.Dir(filepath.Dir(dir))
|
||||
assert.NilError(t, err)
|
||||
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
|
||||
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
@ -57,7 +57,7 @@ func Test_CronJobOnly(t *testing.T) {
|
|||
dir, err := os.Getwd()
|
||||
baseDir := filepath.Dir(filepath.Dir(dir))
|
||||
assert.NilError(t, err)
|
||||
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
|
||||
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
@ -90,7 +90,7 @@ func Test_CronJob_hasExclude(t *testing.T) {
|
|||
baseDir := filepath.Dir(filepath.Dir(dir))
|
||||
assert.NilError(t, err)
|
||||
|
||||
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
|
||||
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func Test_CronJobAndDeployment(t *testing.T) {
|
|||
dir, err := os.Getwd()
|
||||
baseDir := filepath.Dir(filepath.Dir(dir))
|
||||
assert.NilError(t, err)
|
||||
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
|
||||
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
|
||||
if err != nil {
|
||||
t.Log(err)
|
||||
}
|
||||
|
|
|
@ -10,10 +10,6 @@ func Test_Mutate_Validate_qos(t *testing.T) {
|
|||
testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
|
||||
}
|
||||
|
||||
func Test_disallow_root_user(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_root_user.yaml")
|
||||
}
|
||||
|
||||
func Test_disallow_priviledged(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_priviledged.yaml")
|
||||
}
|
||||
|
@ -22,18 +18,6 @@ func Test_validate_healthChecks(t *testing.T) {
|
|||
testScenario(t, "/test/scenarios/other/scenario_validate_healthChecks.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_latest_tag(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_latest_tag.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_require_image_tag_not_latest_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_latest_tag_pass.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_default_namespace(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_default_namespace.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_host_network_port(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_host_network_port.yaml")
|
||||
}
|
||||
|
@ -42,10 +26,6 @@ func Test_validate_host_PID_IPC(t *testing.T) {
|
|||
testScenario(t, "test/scenarios/samples/best_practices/disallow_host_pid_ipc.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_ro_rootfs(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/require_ro_rootfs.yaml")
|
||||
}
|
||||
|
||||
//TODO: support generate
|
||||
// func Test_add_ns_quota(t *testing.T) {
|
||||
// testScenario(t, "test/scenarios/samples/best_practices/add_ns_quota.yaml")
|
||||
|
@ -67,14 +47,6 @@ func Test_validate_volume_whitelist(t *testing.T) {
|
|||
testScenario(t, "test/scenarios/other/scenario_validate_volume_whiltelist.yaml")
|
||||
}
|
||||
|
||||
func Test_require_pod_requests_limits(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/require_pod_requests_limits.yaml")
|
||||
}
|
||||
|
||||
func Test_require_probes(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/require_probes.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_bind_mounts_fail(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_bind_mounts_fail.yaml")
|
||||
}
|
||||
|
@ -83,22 +55,10 @@ func Test_validate_disallow_bind_mounts_pass(t *testing.T) {
|
|||
testScenario(t, "test/scenarios/samples/best_practices/disallow_bind_mounts_pass.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_new_capabilities(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/samples/best_practices/disallow_new_capabilities.yaml")
|
||||
}
|
||||
|
||||
func Test_disallow_sysctls(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/samples/best_practices/disallow_sysctls.yaml")
|
||||
}
|
||||
|
||||
func Test_disallow_docker_sock_mount(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_docker_sock_mount.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_helm_tiller(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_helm_tiller.yaml")
|
||||
}
|
||||
|
||||
func Test_add_safe_to_evict(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/add_safe_to_evict.yaml")
|
||||
}
|
||||
|
@ -115,14 +75,6 @@ func Test_validate_restrict_automount_sa_token_pass(t *testing.T) {
|
|||
testScenario(t, "test/scenarios/samples/more/restrict_automount_sa_token.yaml")
|
||||
}
|
||||
|
||||
func Test_restrict_node_port(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/more/restrict_node_port.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_restrict_image_registries(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/more/restrict_image_registries.yaml")
|
||||
}
|
||||
|
||||
func Test_known_ingress(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/more/restrict_ingress_classes.yaml")
|
||||
}
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
# Add default labels to objects
|
||||
|
||||
Labels are important pieces of metadata that can be attached to just about anything in Kubernetes. They are often used to tag various resources as being associated in some way. Kubernetes has no ability to assign a series of "default" labels to incoming objects. This sample policy shows you how to assign one or multiple labels by default to any object you wish. Here it shows adding a label called `custom-foo-label` with value `my-bar-default` to resources of type `Pod`, `Service`, and `Namespace` but others can be added or removed as desired.
|
||||
|
||||
Alternatively, you may wish to only add the `custom-foo-label` if it is not already present in the creation request. For example, if a user/process submits a request for a new `Namespace` object and the manifest already includes the label `custom-foo-label` with a value of `custom-value`, Kyverno can leave this label untouched which results in the newly-created object having the label `custom-foo-label=custom-value` instead of `my-bar-default`. In order to do this, enclose the label in the sample manifest in `+()` so the key name becomes `+(custom-foo-label)`. This conditional instructs Kyverno to only add the label if absent.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[add_default_labels.yaml](more/add_default_labels.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-default-labels
|
||||
spec:
|
||||
background: false
|
||||
rules:
|
||||
- name: add-default-labels
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
- Service
|
||||
- Namespace
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
metadata:
|
||||
labels:
|
||||
custom-foo-label: my-bar-default
|
||||
```
|
|
@ -1,42 +0,0 @@
|
|||
# Default deny all ingress traffic
|
||||
|
||||
By default, Kubernetes allows communications across all pods within a cluster. Network policies and, a CNI that supports network policies, must be used to restrict communications.
|
||||
|
||||
A default `NetworkPolicy` should be configured for each namespace to default deny all ingress traffic to the pods in the namespace. Application teams can then configure additional `NetworkPolicy` resources to allow desired traffic to application pods from select sources.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[add_network_policy.yaml](best_practices/add_network_policy.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-networkpolicy
|
||||
spec:
|
||||
rules:
|
||||
- name: default-deny-ingress
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
name: "*"
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- "kube-system"
|
||||
- "default"
|
||||
- "kube-public"
|
||||
- "kyverno"
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: default-deny-ingress
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
# select all pods in the namespace
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
|
||||
````
|
|
@ -1,62 +0,0 @@
|
|||
# Configure namespace limits and quotas
|
||||
|
||||
To limit the number of resources like CPU and memory, as well as objects that may be consumed by workloads in a namespace, it is important to configure resource limits and quotas for each namespace. The generated default limitrange sets the default quotas for a container.
|
||||
|
||||
## Additional Information
|
||||
|
||||
* [Resource Quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[add_ns_quota.yaml](best_practices/add_ns_quota.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-ns-quota
|
||||
spec:
|
||||
rules:
|
||||
- name: generate-resourcequota
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- "kube-system"
|
||||
- "default"
|
||||
- "kube-public"
|
||||
- "kyverno"
|
||||
generate:
|
||||
kind: ResourceQuota
|
||||
name: default-resourcequota
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
hard:
|
||||
requests.cpu: '4'
|
||||
requests.memory: '16Gi'
|
||||
limits.cpu: '4'
|
||||
limits.memory: '16Gi'
|
||||
- name: generate-limitrange
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: LimitRange
|
||||
name: default-limitrange
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
cpu: 500m
|
||||
memory: 1Gi
|
||||
defaultRequest:
|
||||
cpu: 200m
|
||||
memory: 256Mi
|
||||
type: Container
|
||||
````
|
|
@ -1,50 +0,0 @@
|
|||
# Mutate pods with `emptyDir` and `hostPath` with `safe-to-evict`
|
||||
|
||||
The Kubernetes cluster autoscaler does not evict pods that use `hostPath` or `emptyDir` volumes. To allow eviction of these pods, the following annotation must be added to the pods:
|
||||
|
||||
````yaml
|
||||
cluster-autoscaler.kubernetes.io/safe-to-evict: true
|
||||
````
|
||||
|
||||
This policy matches and mutates pods with `emptyDir` and `hostPath` volumes to add the `safe-to-evict` annotation if it is not specified.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[add_safe_to_evict_annotation.yaml](best_practices/add_safe_to_evict.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-safe-to-evict
|
||||
spec:
|
||||
rules:
|
||||
- name: "annotate-empty-dir"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
metadata:
|
||||
annotations:
|
||||
+(cluster-autoscaler.kubernetes.io/safe-to-evict): "true"
|
||||
spec:
|
||||
volumes:
|
||||
- (emptyDir): {}
|
||||
- name: annotate-host-path
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
metadata:
|
||||
annotations:
|
||||
+(cluster-autoscaler.kubernetes.io/safe-to-evict): "true"
|
||||
spec:
|
||||
volumes:
|
||||
- (hostPath):
|
||||
path: "*"
|
||||
|
||||
````
|
|
@ -1,39 +0,0 @@
|
|||
# Add RuntimeDefault Seccomp Profile Security Context to pods
|
||||
|
||||
Seccomp Profiles restrict the system calls that can be made from a process. The Linux kernel has a few hundred system calls, but most of them are not needed by any given process. If a process can be compromised and tricked into making other system calls, though, it may lead to a security vulnerability that could result in the compromise of the whole system. By restricting what system calls can be made, seccomp is a key component for building application sandboxes. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
|
||||
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[add_pod_default_seccompprofile.yaml](more/add_pod_default_seccompprofile.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-pod-default-seccompprofile
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
spec:
|
||||
background: false
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: add-pod-default-seccompprofile
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- "kube-system"
|
||||
- "kube-public"
|
||||
- "default"
|
||||
- "kyverno"
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
spec:
|
||||
securityContext:
|
||||
seccompProfile:
|
||||
type: RuntimeDefault
|
||||
```
|
|
@ -1,49 +0,0 @@
|
|||
# Check userID, groupIP & fsgroup
|
||||
|
||||
All processes inside the pod can be made to run with a specific user and groupID by setting `runAsUser` and `runAsGroup`, respectively. `fsGroup` can be specified to make sure any file created in the volume will have the specified groupID. These options can be used to validate the IDs used for user and group.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[policy_validate_user_group_fsgroup_id.yaml](more/restrict_usergroup_fsgroup_id.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-userid-groupid-fsgroup
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-userid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "User ID should be 1000"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: '1000'
|
||||
- name: validate-groupid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Group ID should be 3000"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsGroup: '3000'
|
||||
- name: validate-fsgroup
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "fsgroup should be 2000"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
fsGroup: '2000'
|
||||
````
|
|
@ -1,78 +0,0 @@
|
|||
# Create Pod Anti-Affinity
|
||||
|
||||
In cases where you wish to run applications with multiple replicas, it may be required to ensure those Pods are separated from each other for availability purposes. While a `DaemonSet` resource would accomplish similar goals, your `Deployment` object may need fewer replicas than there are nodes. Pod anti-affinity rules ensures that Pods are separated from each other. Inversely, affinity rules ensure they are co-located.
|
||||
|
||||
This sample policy configures all Deployments with Pod anti-affinity rules with the `preferredDuringSchedulingIgnoredDuringExecution` option. It requires the topology key exists on all nodes with the key name of `kubernetes.io/hostname` and requires that that label `app` is applied to the Deployment.
|
||||
|
||||
In order to test the policy, you can use this sample Deployment manifest below.
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: busybox
|
||||
distributed: required
|
||||
name: busybox
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: busybox
|
||||
distributed: required
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: busybox
|
||||
distributed: required
|
||||
spec:
|
||||
containers:
|
||||
- image: busybox:1.28
|
||||
name: busybox
|
||||
command: ["sleep", "9999"]
|
||||
```
|
||||
|
||||
## More Information
|
||||
|
||||
* [Inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[create_pod_antiaffinity.yaml](more/create_pod_antiaffinity.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: insert-podantiaffinity
|
||||
spec:
|
||||
rules:
|
||||
- name: insert-podantiaffinity
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
preconditions:
|
||||
# This precondition ensures that the label `app` is applied to Pods within the Deployment resource.
|
||||
- key: "{{request.object.metadata.labels.app}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
# Add the `affinity` key and others if not already specified in the Deployment manifest.
|
||||
+(affinity):
|
||||
+(podAntiAffinity):
|
||||
+(preferredDuringSchedulingIgnoredDuringExecution):
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app
|
||||
operator: In
|
||||
values:
|
||||
- "{{request.object.metadata.labels.app}}"
|
||||
```
|
|
@ -1,28 +0,0 @@
|
|||
# Disallow use of bind mounts (`hostPath` volumes)
|
||||
|
||||
The volume of type `hostPath` allows pods to use host bind mounts (i.e. directories and volumes mounted to a host path) in containers. Using host resources can be used to access shared data or escalate privileges. Also, this couples pods to a specific host and data persisted in the `hostPath` volume is coupled to the life of the node leading to potential pod scheduling failures. It is highly recommended that applications are designed to be decoupled from the underlying infrastructure (in this case, nodes).
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_bind_mounts.yaml](best_practices/disallow_bind_mounts.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-bind-mounts
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-hostPath
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Host path volumes are not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
=(volumes):
|
||||
- X(hostPath): "null"
|
||||
````
|
|
@ -1,71 +0,0 @@
|
|||
# Disallow use of default namespace
|
||||
|
||||
Kubernetes namespaces are an optional feature that provide a way to segment and isolate cluster resources across multiple applications and users. As a best practice, workloads should be isolated with namespaces. Namespaces should be required and the default (empty) namespace should not be used.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_default_namespace.yaml](best_practices/disallow_default_namespace.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-default-namespace
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/category: Workload Isolation
|
||||
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
|
||||
that provide a way to segment and isolate cluster resources across multiple
|
||||
applications and users. As a best practice, workloads should be isolated with
|
||||
namespaces. Namespaces should be required and the default (empty) namespace
|
||||
should not be used.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: require-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "A namespace is required"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
- name: validate-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: require-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "A namespace is required for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
````
|
|
@ -1,29 +0,0 @@
|
|||
# Disallow Docker socket bind mount
|
||||
|
||||
The Docker socket bind mount allows access to the Docker daemon on the node. This access can be used for privilege escalation and to manage containers outside of Kubernetes, and hence should not be allowed.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_docker_sock_mount.yaml](best_practices/disallow_docker_sock_mount.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-docker-sock-mount
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-docker-sock-mount
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Use of the Docker Unix socket is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
=(volumes):
|
||||
- =(hostPath):
|
||||
path: "!/var/run/docker.sock"
|
||||
````
|
|
@ -1,29 +0,0 @@
|
|||
# Disallow Helm Tiller
|
||||
|
||||
Tiller, in the [now-deprecated Helm v2](https://helm.sh/blog/helm-v2-deprecation-timeline/), has known security challenges. It requires administrative privileges and acts as a shared resource accessible to any authenticated user. Tiller can lead to privilge escalation as restricted users can impact other users.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_helm_tiller.yaml](best_practices/disallow_helm_tiller.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-helm-tiller
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-helm-tiller
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Helm Tiller is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
image: "!*tiller*"
|
||||
````
|
|
@ -1,41 +0,0 @@
|
|||
|
||||
# Disallow `hostNetwork` and `hostPort`
|
||||
|
||||
Using `hostPort` and `hostNetwork` allows pods to share the host networking stack allowing potential snooping of network traffic across application pods.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_host_network_port.yaml](best_practices/disallow_host_network_port.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-host-network-port
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-host-network
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Use of hostNetwork is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
=(hostNetwork): false
|
||||
- name: validate-host-port
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Use of hostPort is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
=(ports):
|
||||
- X(hostPort): "null"
|
||||
````
|
|
@ -1,30 +0,0 @@
|
|||
# Disallow `hostPID` and `hostIPC`
|
||||
|
||||
Sharing the host's PID namespace allows an application pod to gain visibility of processes on the host, potentially exposing sensitive information. Sharing the host's IPC namespace also allows the container process to communicate with processes on the host.
|
||||
|
||||
To avoid the pod container from having visibility to the host process space, validate that `hostPID` and `hostIPC` are set to `false`.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_host_pid_ipc.yaml](best_practices/disallow_host_pid_ipc.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-host-pid-ipc
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-hostPID-hostIPC
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Use of host PID and IPC namespaces is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
=(hostPID): "false"
|
||||
=(hostIPC): "false"
|
||||
````
|
|
@ -1,39 +0,0 @@
|
|||
# Disallow latest image tag
|
||||
|
||||
The `:latest` tag is mutable and can lead to unexpected errors if the upstream image changes. A best practice is to use an immutable tag that maps to a specific and tested version of an application image.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_latest_tag.yaml](best_practices/disallow_latest_tag.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-latest-tag
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: require-image-tag
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "An image tag is required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
||||
- name: validate-image-tag
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using a mutable image tag e.g. 'latest' is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "!*:latest"
|
||||
````
|
|
@ -1,33 +0,0 @@
|
|||
# Disallow new capabilities
|
||||
|
||||
Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities that escalate the level of kernel access and allow other potentially dangerous behaviors. This policy enforces that containers cannot add new capabilities. Other policies can be used to set default capabilities.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_new_capabilities.yaml](best_practices/disallow_new_capabilities.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-new-capabilities
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-add-capabilities
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "New capabilities cannot be added"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
=(securityContext):
|
||||
=(capabilities):
|
||||
X(add): null
|
||||
````
|
|
@ -1,46 +0,0 @@
|
|||
# Diallow privileged containers
|
||||
|
||||
Privileged containers are defined as any container where the container uid 0 is mapped to the host's uid 0. A process within a privileged container can get unrestricted host access. With `securityContext.allowPrivilegeEscalation` enabled, a process can gain privileges from its parent.
|
||||
|
||||
To disallow privileged containers and privilege escalation, run pod containers with `securityContext.privileged` set to `false` and `securityContext.allowPrivilegeEscalation` set to `false`.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_privileged.yaml](best_practices/disallow_privileged.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-privileged
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-privileged
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Privileged mode is not allowed. Set privileged to false"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- =(securityContext):
|
||||
# https://github.com/kubernetes/api/blob/7dc09db16fb8ff2eee16c65dc066c85ab3abb7ce/core/v1/types.go#L5707-L5711
|
||||
# k8s default to false
|
||||
=(privileged): false
|
||||
- name: validate-allowPrivilegeEscalation
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation to false"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
# https://github.com/kubernetes/api/blob/7dc09db16fb8ff2eee16c65dc066c85ab3abb7ce/core/v1/types.go#L5754
|
||||
allowPrivilegeEscalation: false
|
||||
````
|
|
@ -1,43 +0,0 @@
|
|||
# Run as non-root user
|
||||
|
||||
By default, all processes in a container run as the root user (uid 0). To prevent potential compromise of container hosts, specify a non-root user and least privileged user ID when building the container image and require that application containers run as non-root users (i.e. set `runAsNonRoot` to `true`).
|
||||
|
||||
## Additional Information
|
||||
|
||||
* [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_root_user.yaml](best_practices/disallow_root_user.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-root-user
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-runAsNonRoot
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Running as root user is not allowed. Set runAsNonRoot to true"
|
||||
anyPattern:
|
||||
- spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
- spec:
|
||||
securityContext:
|
||||
runAsUser: ">0"
|
||||
- spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
runAsNonRoot: true
|
||||
- spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
runAsUser: ">0"
|
||||
````
|
|
@ -1,38 +0,0 @@
|
|||
# Disallow Secrets from environment variables
|
||||
|
||||
Secrets in Kubernetes are often sensitive pieces of information whose content should be protected. Although they can be used in many ways, when mounting them as environment variables, some applications can write their values to STDOUT revealing this sensitive information in log files and potentially other exposure. As a best practice, Kubernetes Secrets should be mounted instead as volumes.
|
||||
|
||||
This sample policy checks any incoming Pod manifests and ensures that Secrets are not mounted as environment variables.
|
||||
|
||||
## More Information
|
||||
|
||||
* [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_secrets_from_env_vars.yaml](more/disallow_secrets_from_env_vars.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: secrets-not-from-env-vars
|
||||
spec:
|
||||
background: false
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: secrets-not-from-env-vars
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Secrets must be mounted as volumes, not as environment variables."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
=(env):
|
||||
- =(valueFrom):
|
||||
X(secretKeyRef): "null"
|
||||
```
|
|
@ -1,32 +0,0 @@
|
|||
# Disallow changes to kernel parameters
|
||||
|
||||
The Sysctl interface allows modifications to kernel parameters at runtime. In a Kubernetes, pod these parameters can be specified under `securityContext.sysctls`. Kernel parameter modifications can be used for exploits and should be restricted.
|
||||
|
||||
## Additional Information
|
||||
|
||||
* [List of supported namespaced sysctl interfaces](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[disallow_sysctls.yaml](best_practices/disallow_sysctls.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-sysctls
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-sysctls
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Changes to kernel parameters are not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
X(sysctls): null
|
||||
````
|
|
@ -1,88 +0,0 @@
|
|||
# Require `livenessProbe` and `readinessProbe` are different
|
||||
|
||||
Pod liveness and readiness probes are often used as a check to ensure either the health of an already running Pod or when one is ready to receive traffic. For a sample policy with more information and which contains a validation rule that both are present, see [require_probes.yaml](RequirePodProbes.md).
|
||||
|
||||
This sample checks to ensure that `livenessProbe` and `readinessProbe` are configured differently. When these two probes are configured but are set up the same way, race conditions can result as Kubernetes continues to kill and recreate a Pod never letting it enter a running state. This sample satisfies a common best practice in which these probes, if extant, not overlap and potentially cause this condition.
|
||||
|
||||
In this sample policy, a series of `deny` rules exist, one per container, to compare the `livenessProbe` map to the `readinessProbe`. If any container in a Pod potentially having multiple is found to have identical probes, its creation will be blocked. Note that in this sample policy the `validationFailureAction` is set to `enforce` due to the use of a `deny` rule rather than a `validate` rule. By using the annotation `pod-policies.kyverno.io/autogen-controllers`, it modifies the default behavior and ensures that only Pods originating from DaemonSet, Deployment, and StatefulSet objects are validated.
|
||||
|
||||
If you may potentially have more than four containers in a Pod against which this policy should operate, duplicate one of the rules found within and change the array member of the `containers` key in fields `key` and `value`. For example, to match against a potential fifth container, duplicate a rule and change `containers[3]` to `containers[4]`.
|
||||
|
||||
## More Information
|
||||
|
||||
* [Kyverno Deny Rules](https://kyverno.io/docs/writing-policies/validate/#deny-rules)
|
||||
* [Kyverno Auto-Gen Rules for Pod Controllers](https://kyverno.io/docs/writing-policies/autogen/)
|
||||
* [Configure Liveness, Readiness and Startup Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[ensure_probes_different.yaml](more/ensure_probes_different.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-probes
|
||||
annotations:
|
||||
# Only applies to pods originating from DaemonSet, Deployment, or StatefulSet.
|
||||
pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
# Checks the first container in a Pod.
|
||||
- name: validate-probes-c0
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes cannot be the same."
|
||||
# A `deny` rule is different in structure than a `validate` rule and inverts the check. It uses `conditions` written in JMESPath notation upon which to base its decisions.
|
||||
deny:
|
||||
conditions:
|
||||
# In this condition, it checks the entire map structure of the `readinessProbe` against that of the `livenessProbe`. If both are found to be equal, the Pod creation
|
||||
# request will be denied.
|
||||
- key: "{{ request.object.spec.containers[0].readinessProbe }}"
|
||||
operator: Equals
|
||||
value: "{{ request.object.spec.containers[0].livenessProbe }}"
|
||||
# Checks the second container in a Pod.
|
||||
- name: validate-probes-c1
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes cannot be the same."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.object.spec.containers[1].readinessProbe }}"
|
||||
operator: Equals
|
||||
value: "{{ request.object.spec.containers[1].livenessProbe }}"
|
||||
# Checks the third container in a Pod.
|
||||
- name: validate-probes-c2
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes cannot be the same."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.object.spec.containers[2].readinessProbe }}"
|
||||
operator: Equals
|
||||
value: "{{ request.object.spec.containers[2].livenessProbe }}"
|
||||
# Checks the fourth container in a Pod.
|
||||
- name: validate-probes-c3
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes cannot be the same."
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{ request.object.spec.containers[3].readinessProbe }}"
|
||||
operator: Equals
|
||||
value: "{{ request.object.spec.containers[3].livenessProbe }}"
|
||||
```
|
|
@ -1,88 +0,0 @@
|
|||
# Sample Policies
|
||||
|
||||
Sample policies are designed to be applied to your Kubernetes clusters with minimal changes.
|
||||
|
||||
The policies are mostly validation rules in `audit` mode (i.e. your existing workloads will not be impacted, but will be audited for policy compliance). It is recommended that all policies be tested and observed in a non-production environment before setting `enforce` mode.
|
||||
|
||||
## Best Practice Policies
|
||||
|
||||
These policies are highly recommended.
|
||||
|
||||
1. [Disallow root user](DisallowRootUser.md)
|
||||
1. [Disallow privileged containers](DisallowPrivilegedContainers.md)
|
||||
1. [Disallow new capabilities](DisallowNewCapabilities.md)
|
||||
1. [Disallow kernel parameter changes](DisallowSysctls.md)
|
||||
1. [Disallow use of bind mounts (`hostPath` volumes)](DisallowBindMounts.md)
|
||||
1. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
|
||||
1. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
|
||||
1. [Disallow `hostPID` and `hostIPC`](DisallowHostPIDIPC.md)
|
||||
1. [Disallow use of default namespace](DisallowDefaultNamespace.md)
|
||||
1. [Disallow latest image tag](DisallowLatestTag.md)
|
||||
1. [Disallow Helm Tiller](DisallowHelmTiller.md)
|
||||
1. [Require read-only root filesystem](RequireReadOnlyRootFS.md)
|
||||
1. [Require pod resource requests and limits](RequirePodRequestsLimits.md)
|
||||
1. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
|
||||
1. [Add default network policy](AddDefaultNetworkPolicy.md)
|
||||
1. [Add namespace quotas](AddNamespaceQuotas.md)
|
||||
1. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](AddSafeToEvict.md)
|
||||
|
||||
## Additional Policies
|
||||
|
||||
These policies provide additional best practices and are worthy of close consideration. These policies may require specific changes for your workloads and environments.
|
||||
|
||||
1. [Restrict image registries](RestrictImageRegistries.md)
|
||||
1. [Restrict `NodePort` services](RestrictNodePort.md)
|
||||
1. [Restrict `LoadBalancer` services](RestrictLoadBalancer.md)
|
||||
1. [Restrict auto-mount of service account credentials](RestrictAutomountSAToken.md)
|
||||
1. [Restrict ingress classes](RestrictIngressClasses.md)
|
||||
1. [Restrict User Group](CheckUserGroup.md)
|
||||
1. [Require pods are labeled](RequireLabels.md)
|
||||
1. [Require pods have certain labels](RequireCertainLabels.md)
|
||||
1. [Require Deployments have multiple replicas](RequireDeploymentsHaveReplicas.md)
|
||||
1. [Spread Pods across topology](SpreadPodsAcrossTopology.md)
|
||||
1. [Create Pod Anti-Affinity](CreatePodAntiAffinity.md)
|
||||
1. [Ensure Pod `livenessProbe` and `readinessProbe` are different](EnsurePodProbesDifferent.md)
|
||||
1. [Disallow mounting Secrets as environment variables](DisallowSecretsFromEnvVars.md)
|
||||
1. [Add default labels](AddDefaultLabels.md)
|
||||
1. [Require all Pods drop all capabilities](RequirePodsDropAll.md)
|
||||
1. [Add seccompProfile securityContext ](AddSeccompProfile.md)
|
||||
|
||||
## Miscellaneous Policies
|
||||
|
||||
Policies in this group are either highly-specific, involve third-party CRDs, or may be variations on standard Best Practice or Additional policies.
|
||||
|
||||
1. [Require `imagePullPolicy` of `Always` for images not using `latest` tags](RequireImagePullPolicyAlways.md)
|
||||
1. [Require images using `latest` tag not use `imagePullPolicy` of `Always`](RequireLatestImagesNotUseAlways.md)
|
||||
|
||||
## Applying the sample policies
|
||||
|
||||
To apply these policies to your cluster, install Kyverno and import the policies as follows:
|
||||
|
||||
### Install Kyverno
|
||||
|
||||
````sh
|
||||
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/definitions/release/install.yaml
|
||||
````
|
||||
|
||||
<small>[(installation docs)](../documentation/installation.md)</small>
|
||||
|
||||
### Apply Kyverno Policies
|
||||
|
||||
To start applying policies to your cluster, first clone the repo:
|
||||
|
||||
````bash
|
||||
git clone https://github.com/kyverno/kyverno.git
|
||||
cd kyverno
|
||||
````
|
||||
|
||||
Import best practices from [here](best_practices):
|
||||
|
||||
````bash
|
||||
kubectl create -f samples/best_practices
|
||||
````
|
||||
|
||||
Import additional policies from [here](more):
|
||||
|
||||
````bash
|
||||
kubectl create -f samples/more/
|
||||
````
|
|
@ -1,31 +0,0 @@
|
|||
# Require certain labels
|
||||
|
||||
In many cases, you may require that at least a certain number of labels are assigned to each Pod from a select list of approved labels. This sample policy demonstrates the [`anyPattern`](https://kyverno.io/docs/writing-policies/validate/#anypattern---logical-or-across-multiple-validation-patterns) option in a policy by requiring any of the two possible labels defined within. A pod must either have the label `app.kubernetes.io/name` or `app.kubernetes.io/component` defined. If you would rather validate that all Pods have multiple labels in an AND fashion rather than OR, check out the [require_labels](RequireLabels.md) example.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_certain_labels.yaml](best_practices/require_certain_labels.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-certain-labels
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-certain-labels
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "The label `app.kubernetes.io/name` or `app.kubernetes.io/component` is required."
|
||||
anyPattern:
|
||||
- metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: "?*"
|
||||
- metadata:
|
||||
labels:
|
||||
app.kubernetes.io/component: "?*"
|
||||
```
|
|
@ -1,40 +0,0 @@
|
|||
# Require deployments have multiple replicas
|
||||
|
||||
Deployments with only a single replica produce availability concerns should that single replica fail. In most cases, you would want Deployment objects to have more than one replica to ensure continued availability if not scale.
|
||||
|
||||
This sample policy requires that Deployments have more than one replica excluding a list of system namespaces.
|
||||
|
||||
## More Information
|
||||
|
||||
* [Kubernetes Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_deployments_have_multiple_replicas.yaml](more/require_deployments_have_multiple_replicas.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: deployment-has-multiple-replicas
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: deployment-has-multiple-replicas
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kyverno
|
||||
- kube-system
|
||||
- kube-node-lease
|
||||
- kube-public
|
||||
validate:
|
||||
message: "Deployments must have more than one replica to ensure availability."
|
||||
pattern:
|
||||
spec:
|
||||
replicas: ">1"
|
||||
```
|
|
@ -1,29 +0,0 @@
|
|||
# Require `imagePullPolicy` is set to `Always` for images not using `latest` tags
|
||||
|
||||
By default, Kubernetes sets the `imagePullPolicy` for images which specify a tag to be `IfNotPresent`. In some cases, this may not be desired where the image could be rebuilt upstream. This sample policy ensures that all containers have their `imagePullPolicy` set to `Always`.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[imagepullpolicy-always.yaml](misc/imagepullpolicy-always.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: imagepullpolicy-always
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
background: false
|
||||
rules:
|
||||
- name: imagepullpolicy-always
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "The imagePullPolicy must be set to `Always` for all containers when a tag other than `latest` is used."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- imagePullPolicy: Always
|
||||
```
|
|
@ -1,36 +0,0 @@
|
|||
# Require labels
|
||||
|
||||
Labels are a fundamental and important way to assign descriptive metadata to Kubernetes resources, especially Pods. Labels are especially important as the number of applications grow and are composed in different ways.
|
||||
|
||||
This sample policy requires that the label `app.kubernetes.io/name` be defined on all Pods. If you wish to require that all Pods have multiple labels defined (as opposed to [any labels from an approved list](RequireCertainLabels.md)), this policy can be altered by adding more labels.
|
||||
|
||||
## More Information
|
||||
|
||||
* [Common labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_labels.yaml](best_practices/require_labels.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-labels
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: check-for-labels
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "The label `app.kubernetes.io/name` is required."
|
||||
pattern:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: "?*"
|
||||
# You can add more labels if you wish the policy to validate more than just one is present. Uncomment the below line, or add new ones.
|
||||
#app.kubernetes.io/component: "?*
|
||||
```
|
|
@ -1,32 +0,0 @@
|
|||
# Require images using `latest` tag set `imagePullPolicy` to not `Always`
|
||||
|
||||
When using the `latest` tag for images, although generally [not a best practice](DisallowLatestTag.md), Kubernetes defaults its `imagePullPolicy` to `Always`. Since Docker Hub has instituted a [rate-limiting policy](https://www.docker.com/blog/what-you-need-to-know-about-upcoming-docker-hub-rate-limiting/), this could result in reaching that limit faster than anticipated, which could mean errors for other Pods in the cluster or across the enterprise. Ensuring those `latest`-tagged images do not use the default of `Always` is one way to ensure pulls are only when needed.
|
||||
|
||||
This sample policy checks the `image` value and ensures that if `:latest` is defined that the `imagePullPolicy` must use something other than the value of `Always`. Note that if no tag is defined, Kyverno will not see that as a violation of the policy.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[latestimage-notalways.yaml](misc/latestimage-notalways.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: latestimage-notalways
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
background: false
|
||||
rules:
|
||||
- name: latestimage-notalways
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "When using the `latest` tag, the `imagePullPolicy` must not use `Always`."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- (image): "*:latest"
|
||||
imagePullPolicy: "!Always"
|
||||
```
|
|
@ -1,42 +0,0 @@
|
|||
# Require `livenessProbe` and `readinessProbe`
|
||||
|
||||
Liveness and readiness probes need to be configured to correctly manage a pod's lifecycle during deployments, restarts, and upgrades.
|
||||
|
||||
For each pod, a periodic `livenessProbe` is performed by the kubelet to determine if the pod's containers are running or need to be restarted. A `readinessProbe` is used by services and deployments to determine if the pod is ready to receive network traffic.
|
||||
|
||||
In this sample policy, a validation rule checks to ensure that all Pods have both a liveness and a readiness probe defined by looking at the `periodSeconds` field. By using the annotation `pod-policies.kyverno.io/autogen-controllers`, it modifies the default behavior and ensures that only Pods originating from DaemonSet, Deployment, and StatefulSet objects are validated.
|
||||
|
||||
## More Information
|
||||
|
||||
* [Kyverno Auto-Gen Rules for Pod Controllers](https://kyverno.io/docs/writing-policies/autogen/)
|
||||
* [Configure Liveness, Readiness and Startup Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_probes.yaml](best_practices/require_probes.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-pod-probes
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-livenessProbe-readinessProbe
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- livenessProbe:
|
||||
periodSeconds: ">0"
|
||||
readinessProbe:
|
||||
periodSeconds: ">0"
|
||||
```
|
|
@ -1,35 +0,0 @@
|
|||
# Require pod resource requests and limits
|
||||
|
||||
Application workloads share cluster resources. Hence, it is important to manage resources assigned to each pod. It is recommended that `resources.requests.cpu`, `resources.requests.memory` and `resources.limits.memory` are configured per pod. Other resources such as GPUs may also be specified as needed.
|
||||
|
||||
If a namespace level request or limit is specified, defaults will automatically be applied to each pod based on the `LimitRange` configuration.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_pod_requests_limits.yaml](best_practices/require_pod_requests_limits.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-pod-requests-limits
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-resources
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "CPU and memory resource requests and limits are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
requests:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
||||
limits:
|
||||
memory: "?*"
|
||||
````
|
|
@ -1,49 +0,0 @@
|
|||
# Require Pods Drop All Capabilities
|
||||
|
||||
Containers may optionally ask for specific Linux capabilities without requiring root on the node. As a security best practice, containers should only specify exactly which capabilities they need. This starts with dropping all capabilities and only selectively adding ones back.
|
||||
|
||||
This example policy requires that all containers drop all capabilities.
|
||||
|
||||
## More information
|
||||
|
||||
* [Set Capabilities for a Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_drop_all.yaml](more/require_drop_all.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: drop-all-capabilities
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: drop-all-containers
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Drop all must be defined for every container in the Pod."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
- name: drop-all-initcontainers
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Drop all must be defined for every container in the Pod."
|
||||
pattern:
|
||||
spec:
|
||||
initContainers:
|
||||
- securityContext:
|
||||
capabilities:
|
||||
drop: ["ALL"]
|
||||
```
|
|
@ -1,29 +0,0 @@
|
|||
# Require read-only root filesystem
|
||||
|
||||
A read-only root filesystem helps to enforce an immutable infrastructure strategy; the container only needs to write to mounted volumes that can persist state even if the container exits. An immutable root filesystem can also prevent malicious binaries from writing to the host system.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[require_ro_rootfs.yaml](best_practices/require_ro_rootfs.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-ro-rootfs
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-readOnlyRootFilesystem
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Root filesystem must be read-only"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
readOnlyRootFilesystem: true
|
||||
````
|
|
@ -1,27 +0,0 @@
|
|||
# Restrict auto-mount of Service Account tokens
|
||||
|
||||
Kubernetes automatically mounts service account credentials in each pod. The service account may be assigned roles allowing pods to access API resources. To restrict access, opt out of auto-mounting tokens by setting `automountServiceAccountToken` to `false`.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[restrict_automount_sa_token.yaml](more/restrict_automount_sa_token.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-automount-sa-token
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-automountServiceAccountToken
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Auto-mounting of Service Account tokens is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
automountServiceAccountToken: false
|
||||
````
|
|
@ -1,31 +0,0 @@
|
|||
# Disallow unknown image registries
|
||||
|
||||
Images from unknown registries may not be scanned and secured. Requiring the use of trusted registries helps reduce threat exposure and is considered a common Kubernetes best practice.
|
||||
|
||||
This sample policy requires that all images come from either `k8s.gcr.io` or `gcr.io`. You can customize this policy to allow other or different image registries that you trust. Alternatively, you can invert the check to allow images from all other registries except one (or a list) by changing the `image` field to `image: "!k8s.gcr.io"`.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[restrict_image_registries.yaml](more/restrict_image_registries.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-image-registries
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-registries
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Unknown image registry."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
# Allows images from either k8s.gcr.io or gcr.io.
|
||||
- image: "k8s.gcr.io/* | gcr.io/*"
|
||||
````
|
|
@ -1,28 +0,0 @@
|
|||
# Restrict ingress classes
|
||||
|
||||
It can be useful to restrict Ingress resources to a set of known ingress classes that are allowed in the cluster. You can customize this policy to allow ingress classes that are configured in the cluster.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[restrict_ingress_classes.yaml](more/restrict_ingress_classes.yaml)
|
||||
|
||||
````yaml
|
||||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-ingress-classes
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-ingress
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Ingress
|
||||
validate:
|
||||
message: "Unknown ingress class"
|
||||
pattern:
|
||||
metadata:
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: "F5 | nginx"
|
||||
````
|
|
@ -1,29 +0,0 @@
|
|||
# Restrict use of `LoadBalancer` services
|
||||
|
||||
A Kubernetes service of type `LoadBalancer` typically requires the use of a cloud provider to realize the infrastructure on the backend. Doing so has the side effect of increased cost and potentially bypassing existing `Ingress` resource(s) which are preferred methods of issuing traffic to a Kubernetes cluster. The use of Services of type `LoadBalancer` should therefore be carefully controlled or restricted across the cluster.
|
||||
|
||||
This sample policy checks for any services of type `LoadBalancer`. Change `validationFailureAction` to `enforce` to block their creation.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[restrict_loadbalancer.yaml](more/restrict_loadbalancer.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: no-loadbalancers
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: no-LoadBalancer
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
validate:
|
||||
message: "Service of type LoadBalancer is not allowed."
|
||||
pattern:
|
||||
spec:
|
||||
type: "!LoadBalancer"
|
||||
```
|
|
@ -1,32 +0,0 @@
|
|||
# Restrict use of `NodePort` services
|
||||
|
||||
A Kubernetes service of type `NodePort` uses a host port (on every node in the cluster) to receive traffic from any source.
|
||||
|
||||
Kubernetes Network Policies cannot be used to control traffic to host ports.
|
||||
|
||||
Although NodePort services can be useful, their use should be limited to services with additional upstream security checks.
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[restrict_node_port.yaml](more/restrict_node_port.yaml)
|
||||
|
||||
````yaml
|
||||
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-nodeport
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-nodeport
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
validate:
|
||||
message: "Services of type NodePort are not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
type: "!NodePort"
|
||||
````
|
|
@ -1,75 +0,0 @@
|
|||
# Spread pods across topology
|
||||
|
||||
When having a Kubernetes cluster that spans multiple availability zones, it is often desired to spread your Pods out among them in a way which controls where they land. This can be advantageous in ensuring that, should one of those zones fail, your application continues to run in a more predictable way and with less potential loss.
|
||||
|
||||
This sample policy configures all Deployments having the label of `required: true` to be spread amongst hosts which are labeled with the key name of `zone`. It does this only to Deployments which do not already have the field `topologySpreadConstraints` set.
|
||||
|
||||
**NOTE:** When deploying this policy to a Kubernetes cluster less than version 1.19, some feature gate flags will need to be enabled. Please see the [More Information](#more-information) section below.
|
||||
|
||||
In order to test the policy, you can use this sample Deployment manifest below.
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: busybox
|
||||
distributed: required
|
||||
name: busybox
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: busybox
|
||||
distributed: required
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: busybox
|
||||
distributed: required
|
||||
spec:
|
||||
containers:
|
||||
- image: busybox:1.28
|
||||
name: busybox
|
||||
command: ["sleep", "9999"]
|
||||
```
|
||||
|
||||
## More Information
|
||||
|
||||
* [Pod Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/)
|
||||
|
||||
## Policy YAML
|
||||
|
||||
[spread_pods_across_topology.yaml](more/spread_pods_across_topology.yaml)
|
||||
|
||||
```yaml
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: spread-pods
|
||||
spec:
|
||||
rules:
|
||||
- name: spread-pods-across-nodes
|
||||
# Matches any Deployment with the label `distributed=required`
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
selector:
|
||||
matchLabels:
|
||||
distributed: required
|
||||
# Mutates the incoming Deployment.
|
||||
mutate:
|
||||
patchStrategicMerge:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
# Adds the topologySpreadConstraints field if non-existent in the request.
|
||||
+(topologySpreadConstraints):
|
||||
- maxSkew: 1
|
||||
topologyKey: zone
|
||||
whenUnsatisfiable: DoNotSchedule
|
||||
labelSelector:
|
||||
matchLabels:
|
||||
distributed: required
|
||||
```
|
|
@ -1,62 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-default-namespace
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/category: Workload Isolation
|
||||
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
|
||||
that provide a way to segment and isolate cluster resources across multiple
|
||||
applications and users. As a best practice, workloads should be isolated with
|
||||
namespaces. Namespaces should be required and the default (empty) namespace
|
||||
should not be used.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: require-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "A namespace is required"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
- name: validate-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: require-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "A namespace is required for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
|
|
@ -1,24 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-docker-sock-mount
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: The Docker socket bind mount allows access to the
|
||||
Docker daemon on the node. This access can be used for privilege escalation and
|
||||
to manage containers outside of Kubernetes, and hence should not be allowed.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-docker-sock-mount
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Use of the Docker Unix socket is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
=(volumes):
|
||||
- =(hostPath):
|
||||
path: "!/var/run/docker.sock"
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-helm-tiller
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: Tiller has known security challenges. It requires administrative privileges and acts as a shared
|
||||
resource accessible to any authenticated user. Tiller can lead to privilge escalation as restricted users can impact other users.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-helm-tiller
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Helm Tiller is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
image: "!*tiller*"
|
|
@ -1,29 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-new-capabilities
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: Linux allows defining fine-grained permissions using
|
||||
capabilities. With Kubernetes, it is possible to add capabilities that escalate the
|
||||
level of kernel access and allow other potentially dangerous behaviors. This policy
|
||||
enforces that containers cannot add new capabilities. Other policies can be used to set
|
||||
default capabilities.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-add-capabilities
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "New capabilities cannot be added"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
=(securityContext):
|
||||
=(capabilities):
|
||||
X(add): null
|
|
@ -1,35 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-root-user
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: By default, processes in a container run as a
|
||||
root user (uid 0). To prevent potential compromise of container hosts, specify a
|
||||
least privileged user ID when building the container image and require that
|
||||
application containers run as non root users.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-runAsNonRoot
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Running as root is not allowed. Set runAsNonRoot to true, or use runAsUser"
|
||||
anyPattern:
|
||||
- spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
- spec:
|
||||
securityContext:
|
||||
runAsUser: ">0"
|
||||
- spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
runAsNonRoot: true
|
||||
- spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
runAsUser: ">0"
|
|
@ -1,30 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-pod-probes
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet
|
||||
policies.kyverno.io/category: Workload Management
|
||||
policies.kyverno.io/description: Liveness and readiness probes need to be configured to
|
||||
correctly manage a pods lifecycle during deployments, restarts, and upgrades. For each
|
||||
pod, a periodic `livenessProbe` is performed by the kubelet to determine if the pod's
|
||||
containers are running or need to be restarted. A `readinessProbe` is used by services
|
||||
and deployments to determine if the pod is ready to receive network traffic.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-livenessProbe-readinessProbe
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- livenessProbe:
|
||||
periodSeconds: ">0"
|
||||
readinessProbe:
|
||||
periodSeconds: ">0"
|
||||
|
|
@ -1,25 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: require-ro-rootfs
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: A read-only root file system helps to enforce an immutable
|
||||
infrastructure strategy; the container only needs to write on the mounted volume that p
|
||||
ersists the state. An immutable root filesystem can also prevent malicious binaries from
|
||||
writing to the host system.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-readOnlyRootFilesystem
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Root filesystem must be read-only"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
readOnlyRootFilesystem: true
|
|
@ -1,19 +0,0 @@
|
|||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: imagepullpolicy-always
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
background: false
|
||||
rules:
|
||||
- name: imagepullpolicy-always
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "The imagePullPolicy must be set to `Always` for all containers when a tag other than `latest` is used."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- imagePullPolicy: Always
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion : kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-image-registries
|
||||
annotations:
|
||||
policies.kyverno.io/category: Workload Management
|
||||
policies.kyverno.io/description: Images from unknown registries may not be scanned and secured.
|
||||
Requiring use of known registries helps reduce threat exposure.
|
||||
spec:
|
||||
validationFailureAction: audit
|
||||
rules:
|
||||
- name: validate-registries
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Unknown image registry."
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "k8s.gcr.io/* | gcr.io/*"
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: restrict-nodeport
|
||||
annotations:
|
||||
policies.kyverno.io/category: Workload Isolation
|
||||
policies.kyverno.io/description: A Kubernetes service of type NodePort uses a
|
||||
host port to receive traffic from any source. A 'NetworkPolicy' resource cannot be used
|
||||
to control traffic to host ports. Although 'NodePort' services can be useful, their use
|
||||
must be limited to services with additional upstream security checks.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-nodeport
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
validate:
|
||||
message: "Services of type NodePort are not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
type: "!NodePort"
|
||||
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: check-root-user
|
||||
spec:
|
||||
# securityContext:
|
||||
# runAsNonRoot: true
|
||||
containers:
|
||||
- name: check-root-user
|
||||
image: nginxinc/nginx-unprivileged
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
|
@ -1,11 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: myapp-pod
|
||||
namespace: default
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-with-docker-sock-mount
|
||||
spec:
|
||||
containers:
|
||||
- name: myshell
|
||||
image: "ubuntu:18.04"
|
||||
command:
|
||||
- /bin/sleep
|
||||
- "300"
|
||||
volumes:
|
||||
- name: dockersock
|
||||
hostPath:
|
||||
path: /var/run/docker.sock
|
|
@ -1,8 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: pod-helm-tiller
|
||||
spec:
|
||||
containers:
|
||||
- name: helm-tiller
|
||||
image: docker.io/tiller:latest
|
|
@ -1,15 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: add-new-capabilities
|
||||
spec:
|
||||
containers:
|
||||
- name: add-new-capabilities
|
||||
image: "ubuntu:18.04"
|
||||
command:
|
||||
- /bin/sleep
|
||||
- "300"
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: my-service
|
||||
spec:
|
||||
ports:
|
||||
- name: http
|
||||
nodePort: 31080
|
||||
port: 80
|
||||
protocol: TCP
|
||||
targetPort: 8080
|
||||
type: NodePort
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: myapp-pod
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: myapp-pod
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
resources:
|
||||
requests:
|
||||
memory: "256Mi"
|
||||
cpu: "0.5"
|
||||
limits:
|
||||
memory: "256Mi"
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: myapp-pod
|
||||
labels:
|
||||
app: myapp
|
||||
spec:
|
||||
containers:
|
||||
- name: goproxy
|
||||
image: k8s.gcr.io/goproxy:0.1
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
readinessProbe:
|
||||
tcpSocket:
|
||||
port: 8080
|
||||
periodSeconds: 10
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: ghost-with-readonly-rootfilesystem
|
||||
spec:
|
||||
containers:
|
||||
- name: ghost
|
||||
image: ghost
|
||||
securityContext:
|
||||
readOnlyRootFilesystem: false
|
|
@ -1,8 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: k8s-nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: k8s-nginx
|
||||
image: k8s.gcr.io/nginx:1.7.9
|
|
@ -1,6 +1,6 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/add_network_policy.yaml
|
||||
policy: test/best_practices/add_network_policy.yaml
|
||||
resource: test/resources/require_default_network_policy.yaml
|
||||
expected:
|
||||
generation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/add_ns_quota.yaml
|
||||
policy: test/best_practices/add_ns_quota.yaml
|
||||
resource: test/resources/require_namespace_quota.yaml
|
||||
expected:
|
||||
generation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path is relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/add_safe_to_evict.yaml
|
||||
policy: test/best_practices/add_safe_to_evict.yaml
|
||||
resource: test/resources/pod-with-emptydir.yaml
|
||||
expected:
|
||||
mutation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path is relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/add_safe_to_evict.yaml
|
||||
policy: test/best_practices/add_safe_to_evict.yaml
|
||||
resource: test/resources/pod-with-hostpath.yaml
|
||||
expected:
|
||||
mutation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path is relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/add_safe_to_evict.yaml
|
||||
policy: test/best_practices/add_safe_to_evict.yaml
|
||||
resource: test/resources/pod-with-default-volume.yaml
|
||||
expected:
|
||||
mutation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_bind_mounts.yaml
|
||||
policy: test/best_practices/disallow_bind_mounts.yaml
|
||||
resource: test/resources/disallow_host_filesystem.yaml
|
||||
expected:
|
||||
validation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_bind_mounts.yaml
|
||||
policy: test/best_practices/disallow_bind_mounts.yaml
|
||||
resource: test/resources/disallow_host_filesystem_pass.yaml
|
||||
expected:
|
||||
validation:
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_default_namespace.yaml
|
||||
resource: test/resources/disallow_default_namespace.yaml
|
||||
expected:
|
||||
validation:
|
||||
policyresponse:
|
||||
policy: disallow-default-namespace
|
||||
resource:
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
# this is set to pass resource NS check
|
||||
# actual validation is defined through rule success=false
|
||||
namespace: 'default'
|
||||
name: myapp-pod
|
||||
rules:
|
||||
- name: validate-namespace
|
||||
type: Validation
|
||||
success: false
|
||||
- name: require-namespace
|
||||
type: Validation
|
||||
success: true
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_docker_sock_mount.yaml
|
||||
resource: test/resources/disallow_docker_sock_mount.yaml
|
||||
expected:
|
||||
validation:
|
||||
policyresponse:
|
||||
policy: disallow-docker-sock-mount
|
||||
resource:
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
namespace: ''
|
||||
name: pod-with-docker-sock-mount
|
||||
rules:
|
||||
- name: validate-docker-sock-mount
|
||||
type: Validation
|
||||
message: "validation error: Use of the Docker Unix socket is not allowed. Rule validate-docker-sock-mount failed at path /spec/volumes/0/hostPath/path/"
|
||||
success: false
|
|
@ -1,6 +1,6 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_host_network_port.yaml
|
||||
policy: test/best_practices/disallow_host_network_port.yaml
|
||||
resource: test/resources/disallow_host_network_hostport.yaml
|
||||
expected:
|
||||
validation:
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_host_pid_ipc.yaml
|
||||
policy: test/best_practices/disallow_host_pid_ipc.yaml
|
||||
resource: test/resources/disallow_hostpid_hostipc.yaml
|
||||
expected:
|
||||
validation:
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_latest_tag.yaml
|
||||
resource: test/resources/pod_with_latest_tag.yaml
|
||||
expected:
|
||||
validation:
|
||||
policyresponse:
|
||||
policy: disallow-latest-tag
|
||||
resource:
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
namespace: ''
|
||||
name: myapp-pod
|
||||
rules:
|
||||
- name: require-image-tag
|
||||
type: Validation
|
||||
success: true
|
||||
- name: validate-image-tag
|
||||
type: Validation
|
||||
success: false
|
|
@ -1,20 +0,0 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_latest_tag.yaml
|
||||
resource: test/resources/pod_with_version_tag.yaml
|
||||
expected:
|
||||
validation:
|
||||
policyresponse:
|
||||
policy: disallow-latest-tag
|
||||
resource:
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
namespace: ''
|
||||
name: myapp-pod
|
||||
rules:
|
||||
- name: require-image-tag
|
||||
type: Validation
|
||||
success: true
|
||||
- name: validate-image-tag
|
||||
type: Validation
|
||||
success: true
|
|
@ -1,17 +0,0 @@
|
|||
# file path relative to project root
|
||||
input:
|
||||
policy: samples/best_practices/disallow_new_capabilities.yaml
|
||||
resource: test/resources/disallow_new_capabilities.yaml
|
||||
expected:
|
||||
validation:
|
||||
policyresponse:
|
||||
policy: disallow-new-capabilities
|
||||
resource:
|
||||
kind: Pod
|
||||
apiVersion: v1
|
||||
namespace: ''
|
||||
name: "add-new-capabilities"
|
||||
rules:
|
||||
- name: validate-add-capabilities
|
||||
type: Validation
|
||||
success: false
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue