mirror of
https://github.com/kyverno/kyverno.git
synced 2025-04-08 10:04:25 +00:00
Merge commit '37c25daa17ad046f739e74d803cb78d887805bb4' into 346_validate_policy
# Conflicts: # pkg/api/kyverno/v1alpha1/utils.go
This commit is contained in:
commit
7239b4d9b7
241 changed files with 1780 additions and 2918 deletions
.travis.ymlREADME.md
definitions
documentation
examples
best_practices
README.mdpolicy_validate_container_disallow_priviledgedprivelegesecalation.yamlpolicy_validate_default_network_policy.yamlpolicy_validate_disallow_node_port.yamlpolicy_validate_host_network_port.yamlpolicy_validate_host_path.yamlpolicy_validate_hostpid_hosipc.yamlpolicy_validate_image_latest_ifnotpresent_deny.yamlpolicy_validate_image_pullpolicy_notalways_deny.yamlpolicy_validate_image_registries.yamlpolicy_validate_image_tag.yamlpolicy_validate_image_tag_latest_deny.yamlpolicy_validate_image_tag_notspecified_deny.yamlpolicy_validate_not_readonly_rootfilesystem.yamlpolicy_validate_pod_probes.yaml
resources
cli
policy_generate_networkPolicy.yamlpolicy_mutate_imagePullPolicy.yamlpolicy_validate_containerSecurityContext.yamlpolicy_validate_imageRegistries.yamlpolicy_validate_nonRootUser.yamlresources
test
pkg
api/kyverno/v1alpha1
engine
namespace
policy
policyviolation
testrunner
webhooks
samples
README.md
best_practices
deny_runasrootuser.yamldisallow_automountingapicred.yamldisallow_default_namespace.yamldisallow_host_filesystem.yamldisallow_host_network_hostport.yamldisallow_hostpid_hostipc.yamldisallow_node_port.yamldisallow_priviledged_priviligedescalation.yamlpolicy_validate_deny_runasrootuser.yamlrequire_default_network_policy.yamlrequire_image_tag_not_latest.yamlrequire_namespace_quota.yamlrequire_pod_requests_limits.yamlrequire_probes.yamlrequire_readonly_rootfilesystem.yamltrusted_image_registries.yaml
more
test
ConfigMapGenerator-SecretGenerator
CronJob
DaemonSet
Deployment
Endpoint
HorizontalPodAutoscaler
Ingress
Job
LimitRange
Namespace
NetworkPolicy
18
.travis.yml
18
.travis.yml
|
@ -6,7 +6,14 @@ go:
|
|||
branches:
|
||||
only:
|
||||
- master
|
||||
|
||||
before_install:
|
||||
- |
|
||||
if ! git diff --name-only $TRAVIS_COMMIT_RANGE | grep -qvE '(.md)|^(LICENSE)'
|
||||
then
|
||||
echo "Not running CI since only docs were changed."
|
||||
exit
|
||||
fi
|
||||
|
||||
# Skip the install step. Don't `go get` dependencies. Only build with the code
|
||||
# in vendor/
|
||||
install: true
|
||||
|
@ -18,6 +25,11 @@ script:
|
|||
after_script:
|
||||
- curl -d "repo=https://github.com/nirmata/kyverno" https://goreportcard.com/checks
|
||||
|
||||
# only push images if the branch is master
|
||||
after_success:
|
||||
- docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
- make docker-publish
|
||||
- |
|
||||
if [ $TRAVIS_PULL_REQUEST == 'false' ]
|
||||
then
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
make docker-publish
|
||||
fi
|
|
@ -115,7 +115,7 @@ spec:
|
|||
|
||||
### 4. More examples
|
||||
|
||||
Additional examples are available in [examples](/examples).
|
||||
Refer to a list of curated of [kyverno policies](/samples/README.md) to follow kubernetes best practices.
|
||||
|
||||
## License
|
||||
|
||||
|
@ -149,7 +149,6 @@ Tools like [Kustomize](https://github.com/kubernetes-sigs/kustomize) can be used
|
|||
* [Testing Policies](documentation/testing-policies.md)
|
||||
* [Using kubectl](documentation/testing-policies.md#Test-using-kubectl)
|
||||
* [Using the Kyverno CLI](documentation/testing-policies.md#Test-using-the-Kyverno-CLI)
|
||||
* [Examples](examples/)
|
||||
|
||||
## Roadmap
|
||||
|
||||
|
|
|
@ -269,7 +269,7 @@ subjects:
|
|||
name: kyverno-service-account
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: kyverno
|
||||
|
@ -277,6 +277,9 @@ metadata:
|
|||
labels:
|
||||
app: kyverno
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kyverno
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
|
|
|
@ -209,7 +209,7 @@ subjects:
|
|||
name: kyverno-service-account
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
namespace: kyverno
|
||||
|
@ -217,6 +217,9 @@ metadata:
|
|||
labels:
|
||||
app: kyverno
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kyverno
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
|
|
|
@ -36,9 +36,11 @@ There is no operator for `equals` as providing a field value in the pattern requ
|
|||
## Anchors
|
||||
| Anchor | Tag | Behavior |
|
||||
|------------- |----- |---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| Conditional | () | If tag with the given value is specified, then following resource elements must satisfy the conditions.<br>e.g. <br><code> (image):"*:latest" <br>imagePullPolicy: "!IfNotPresent"</code><br> If image has tag latest then, imagePullPolicy cannot be IfNotPresent. |
|
||||
| Equality | =() | if tag is specified, then it should have the provided value.<br>e.g.<br><code> =(hostPath):<br> path: "!/var/lib" </code><br> If hostPath is defined then the path cannot be /var/lib |
|
||||
| Existance | ^() | It can be specified on the list/array type only. If there exists at least one resource in the list that satisfies the pattern.<br>e.g. <br><code> ^(containers):<br> - image: nginx:latest </code><br> There must exist at least one container with image nginx:latest. |
|
||||
| Conditional | () | If tag with the given value is specified, then following resource elements must satisfy the conditions.<br/>e.g. <br/><code> (image):"*:latest" <br/> imagePullPolicy: "!IfNotPresent"</code> <br/> If image has tag latest then, imagePullPolicy cannot be IfNotPresent. |
|
||||
| Equality | =() | If tag is specified, then it should have the provided value.<br/>e.g.<br/><code> =(hostPath):<br/> path: "!/var/lib" </code><br/> If hostPath is defined then the path cannot be /var/lib |
|
||||
| Existance | ^() | It can be specified on the list/array type only. If there exists at least one resource in the list that satisfies the pattern.<br/>e.g. <br/><code> ^(containers):<br/> - image: nginx:latest </code><br/> There must exist at least one container with image nginx:latest. |
|
||||
| Negation | X() | A tag with negation anchor cannot be present in the resource. The value of the tag is never evaulated as the tag is not expected to be there. <br/>e.g. <br/><code> X(hostPath):</code><br/> Hostpath tag cannot be defined. |
|
||||
|
||||
## Example
|
||||
The next rule prevents the creation of Deployment, StatefuleSet and DaemonSet resources without label 'app' in selector:
|
||||
````yaml
|
||||
|
@ -98,13 +100,13 @@ spec :
|
|||
validate:
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- ^(name): "*"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$(<=./../../limits/memory)"
|
||||
limits:
|
||||
memory: "2048Mi"
|
||||
^(containers):
|
||||
- (name): "*"
|
||||
resources:
|
||||
requests:
|
||||
memory: "$(<=./../../limits/memory)"
|
||||
limits:
|
||||
memory: "2048Mi"
|
||||
````
|
||||
|
||||
### Allow OR across overlay pattern
|
||||
|
|
|
@ -1,21 +0,0 @@
|
|||
# Best Practice Policies
|
||||
|
||||
| Best practice | Policy
|
||||
|------------------------------------------------|-----------------------------------------------------------------------|
|
||||
| Run as non-root user | [policy_validate_deny_runasrootuser.yaml](policy_validate_deny_runasrootuser.yaml) |
|
||||
| Disallow privileged and privilege escalation | [policy_validate_container_disallow_priviledgedprivelegesecalation.yaml](policy_validate_container_disallow_priviledgedprivelegesecalation.yaml) |
|
||||
| Disallow use of host networking and ports | [policy_validate_host_network_port.yaml](policy_validate_host_network_port.yaml) |
|
||||
| Disallow use of host filesystem | [policy_validate_host_path.yaml](policy_validate_host_path.yaml) |
|
||||
| Disallow hostPID and hostIPC | [policy_validate_hostpid_hosipc.yaml](policy_validate_hostpid_hosipc.yaml) |
|
||||
| Require read only root filesystem | [policy_validate_not_readonly_rootfilesystem.yaml](policy_validate_not_readonly_rootfilesystem.yaml) |
|
||||
| Disallow node ports | [policy_validate_disallow_node_port.yaml](policy_validate_disallow_node_port.yaml) |
|
||||
| Allow trusted registries | [policy_validate_image_registries.yaml](policy_validate_image_registries.yaml) |
|
||||
| Require resource requests and limits | [policy_validate_pod_resources.yaml](policy_validate_pod_resources.yaml) |
|
||||
| Require pod liveness and readiness probes | [policy_validate_pod_probes.yaml](policy_validate_pod_probes.yaml) |
|
||||
| Require an image tag | [policy_validate_image_tag_notspecified_deny.yaml](policy_validate_image_tag_notspecified_deny.yaml) |
|
||||
| Disallow latest tag and pull IfNotPresent | [policy_validate_image_latest_ifnotpresent_deny.yaml](policy_validate_image_latest_ifnotpresent_deny.yaml) |
|
||||
| Require a namespace (disallow default) | [policy_validate_default_namespace.yaml](policy_validate_default_namespace.yaml) |
|
||||
| Disallow use of kube-system namespace | |
|
||||
| Prevent mounting of default service account | [policy_validate_disallow_default_serviceaccount.yaml](policy_validate_disallow_default_serviceaccount.yaml) |
|
||||
| Require a default network policy | [policy_validate_default_network_policy.yaml](policy_validate_default_network_policy.yaml) |
|
||||
| Require namespace quotas and limit ranges | [policy_validate_namespace_quota.yaml](policy_validate_namespace_quota.yaml) |
|
|
@ -1,29 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-deny-privileged-disallowpriviligedescalation
|
||||
spec:
|
||||
validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: deny-privileged-disallowpriviligedescalation
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false"
|
||||
anyPattern:
|
||||
- spec:
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
- spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: defaultgeneratenetworkpolicy
|
||||
spec:
|
||||
rules:
|
||||
- name: "default-networkpolicy"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
name: "devtest"
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: defaultnetworkpolicy
|
||||
data:
|
||||
spec:
|
||||
# select all pods in the namespace
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
# allow all ingress traffic from pods within this namespace
|
||||
ingress:
|
||||
- {}
|
||||
# allow all egress traffic
|
||||
egress:
|
||||
- {}
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-node-port
|
||||
spec:
|
||||
rules:
|
||||
- name: disallow-node-port
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
validate:
|
||||
message: "Disallow service of type NodePort"
|
||||
pattern:
|
||||
spec:
|
||||
type: "!NodePort"
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-host-network-port
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-host-network-port
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Host network and port are not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
hostNetwork: false
|
||||
containers:
|
||||
- name: "*"
|
||||
ports:
|
||||
- hostPort: null
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-host-path
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-host-path
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Host path '/var/lib/' is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
volumes:
|
||||
- =(hostPath):
|
||||
path: "!/var/lib"
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-hostpid-hostipc
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-hostpid-hostipc
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Disallow use of host's pid namespace and host's ipc namespace"
|
||||
pattern:
|
||||
spec:
|
||||
hostPID: false
|
||||
hostIPC: false
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-tag
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "imagePullPolicy 'IfNotPresent' forbidden with image tag 'latest'"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- (image): "*:latest"
|
||||
imagePullPolicy: "!IfNotPresent"
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image-pullpolicy-notalways
|
||||
spec:
|
||||
rules:
|
||||
- name: image-pullpolicy-notalways
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "image pull policy 'Always' forbidden"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- imagePullPolicy: "!Always"
|
|
@ -1,19 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image-registry
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-image-registry
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Image registry is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
# Check allowed registries
|
||||
image: "*nirmata* | https://private.registry.io/*"
|
|
@ -1,29 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-tag
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "An image tag is required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
||||
- name: validate-latest
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "imagePullPolicy 'Always' required with tag 'latest'"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- (image): "*latest"
|
||||
imagePullPolicy: Always
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image-tag-notlatest
|
||||
spec:
|
||||
rules:
|
||||
- name: image-tag-notlatest
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "image tag 'latest' forbidden"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "!*:latest"
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image-tag-notspecified
|
||||
spec:
|
||||
rules:
|
||||
- name: image-tag-notspecified
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "image tag not specified"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-not-readonly-rootfilesystem
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-not-readonly-rootfilesystem
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Container should not have read-only rootfilesystem"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
readOnlyRootFilesystem: false
|
|
@ -1,25 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-probes
|
||||
spec:
|
||||
validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: check-probes
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
# exclude:
|
||||
# namespaces:
|
||||
# - kube-system
|
||||
validate:
|
||||
message: "Liveness and readiness probes are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
livenessProbe:
|
||||
periodSeconds: ">0"
|
||||
readinessProbe:
|
||||
periodSeconds: ">0"
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx
|
||||
labels:
|
||||
app: "nirmata-nginx"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:latest
|
|
@ -1,26 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: check-resources
|
||||
spec:
|
||||
# validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: check-pod-resources
|
||||
message: "CPU and memory resource requests and limits are required"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
name: myapp-pod
|
||||
validate:
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
resources:
|
||||
requests:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
||||
limits:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
|
@ -1,18 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: image-with-hostpath
|
||||
labels:
|
||||
app.type: prod
|
||||
namespace: "my-namespace"
|
||||
spec:
|
||||
containers:
|
||||
- name: image-with-hostpath
|
||||
image: docker.io/nautiker/curl
|
||||
volumeMounts:
|
||||
- name: var-lib-etcd
|
||||
mountPath: /var/lib
|
||||
volumes:
|
||||
- name: var-lib-etcd
|
||||
hostPath:
|
||||
path: /var/lib
|
|
@ -1,27 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: check-cpu-memory
|
||||
spec:
|
||||
rules:
|
||||
- name: check-pod-resources
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "CPU and memory resource requests and limits are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
# 'name: *' selects all containers in the pod
|
||||
- name: "*"
|
||||
resources:
|
||||
limits:
|
||||
# '?' requires 1 alphanumeric character and '*' means that there can be 0 or more characters.
|
||||
# Using them together e.g. '?*' requires at least one character.
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
||||
requests:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
|
@ -1,32 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: PolicyViolation
|
||||
metadata:
|
||||
name: pv1
|
||||
spec:
|
||||
policy: check-cpu-memory
|
||||
resource:
|
||||
kind: Pod
|
||||
namespace: ""
|
||||
name: pod1
|
||||
rules:
|
||||
- name: r1
|
||||
type: Mutation
|
||||
status: Failed
|
||||
message: test mesaage for rule failure
|
||||
---
|
||||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: PolicyViolation
|
||||
metadata:
|
||||
name: pv2
|
||||
spec:
|
||||
policy: check-cpu-memory
|
||||
resource:
|
||||
kind: Pod
|
||||
namespace: ""
|
||||
name: pod1
|
||||
rules:
|
||||
- name: r1
|
||||
type: Mutation
|
||||
status: Failed
|
||||
message: test mesaage for rule failure
|
||||
---
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: defaultgeneratenetworkpolicy
|
||||
spec:
|
||||
rules:
|
||||
- name: "default-networkpolicy"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
name: "devtest"
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: defaultnetworkpolicy
|
||||
data:
|
||||
spec:
|
||||
# select all pods in the namespace
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
# allow all ingress traffic from pods within this namespace
|
||||
ingress:
|
||||
- {}
|
||||
# allow all egress traffic
|
||||
egress:
|
||||
- {}
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: image-pull-policy
|
||||
spec:
|
||||
rules:
|
||||
- name: image-pull-policy
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
selector:
|
||||
matchLabels:
|
||||
app : nginxlatest
|
||||
exclude:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
mutate:
|
||||
overlay:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
# select images which end with :latest
|
||||
- (image): "*latest"
|
||||
# require that the imagePullPolicy is "IfNotPresent"
|
||||
imagePullPolicy: IfNotPresent
|
|
@ -1,27 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: container-security-context
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-user-privilege
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
selector :
|
||||
matchLabels:
|
||||
app.type: prod
|
||||
validate:
|
||||
message: "validate container security contexts"
|
||||
pattern:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
runAsNonRoot: true
|
||||
allowPrivilegeEscalation: false
|
||||
# fields can be customized
|
||||
# privileged: false
|
||||
# readOnlyRootFilesystem: true
|
|
@ -1,25 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: check-registries
|
||||
spec:
|
||||
rules:
|
||||
- name: check-registries
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Deployment
|
||||
- StatefulSet
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nirmata-nginx
|
||||
validate:
|
||||
message: "Registry is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
# Check allowed registries
|
||||
image: "*nirmata* | https://private.registry.io/*"
|
|
@ -1,27 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: check-container-security-context
|
||||
spec:
|
||||
# validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: check-root-user
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Root user is not allowed. Set runAsNonRoot to true."
|
||||
anyPattern:
|
||||
- spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
- spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sec-ctx-unprivileged
|
||||
spec:
|
||||
# securityContext:
|
||||
# runAsNonRoot: true
|
||||
containers:
|
||||
- name: imagen-with-hostpath
|
||||
image: nginxinc/nginx-unprivileged
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: Policy
|
||||
metadata:
|
||||
name: check-resources
|
||||
spec:
|
||||
validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: check-pod-resources
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "CPU and memory resource requests and limits are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
# 'name: *' selects all containers in the pod
|
||||
- name: "*"
|
||||
resources:
|
||||
requests:
|
||||
# '?' requires 1 alphanumeric character and '*' means that there can be 0 or more characters.
|
||||
# Using them together e.g. '?*' requires at least one character.
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
||||
limits:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
|
@ -179,3 +179,12 @@ func joinAnchors(anchorPatterns []anchor) string {
|
|||
|
||||
return strings.Join(res, " || ")
|
||||
}
|
||||
|
||||
func hasNegationAnchor(str string) (bool, string) {
|
||||
left := "X("
|
||||
right := ")"
|
||||
if len(str) < len(left)+len(right) {
|
||||
return false, str
|
||||
}
|
||||
return (str[:len(left)] == left && str[len(str)-len(right):] == right), str[len(left) : len(str)-len(right)]
|
||||
}
|
||||
|
|
|
@ -21,11 +21,41 @@ func CreateElementHandler(element string, pattern interface{}, path string) Vali
|
|||
return NewExistanceHandler(element, pattern, path)
|
||||
case isEqualityAnchor(element):
|
||||
return NewEqualityHandler(element, pattern, path)
|
||||
case isNegationAnchor(element):
|
||||
return NewNegationHandler(element, pattern, path)
|
||||
default:
|
||||
return NewDefaultHandler(element, pattern, path)
|
||||
}
|
||||
}
|
||||
|
||||
func NewNegationHandler(anchor string, pattern interface{}, path string) ValidationHandler {
|
||||
return NegationHandler{
|
||||
anchor: anchor,
|
||||
pattern: pattern,
|
||||
path: path,
|
||||
}
|
||||
}
|
||||
|
||||
//NegationHandler provides handler for check if the tag in anchor is not defined
|
||||
type NegationHandler struct {
|
||||
anchor string
|
||||
pattern interface{}
|
||||
path string
|
||||
}
|
||||
|
||||
//Handle process negation handler
|
||||
func (nh NegationHandler) Handle(resourceMap map[string]interface{}, originPattern interface{}) (string, error) {
|
||||
anchorKey := removeAnchor(nh.anchor)
|
||||
currentPath := nh.path + anchorKey + "/"
|
||||
// if anchor is present in the resource then fail
|
||||
if _, ok := resourceMap[anchorKey]; ok {
|
||||
// no need to process elements in value as key cannot be present in resource
|
||||
return currentPath, fmt.Errorf("Validation rule failed at %s, field %s is disallowed", currentPath, anchorKey)
|
||||
}
|
||||
// key is not defined in the resource
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func NewEqualityHandler(anchor string, pattern interface{}, path string) ValidationHandler {
|
||||
return EqualityHandler{
|
||||
anchor: anchor,
|
||||
|
@ -150,7 +180,7 @@ func (eh ExistanceHandler) Handle(resourceMap map[string]interface{}, originPatt
|
|||
case []interface{}:
|
||||
typedPattern, ok := eh.pattern.([]interface{})
|
||||
if !ok {
|
||||
return currentPath, fmt.Errorf("Invalid pattern type %T: Pattern has to be of lis to compare against resource", eh.pattern)
|
||||
return currentPath, fmt.Errorf("Invalid pattern type %T: Pattern has to be of list to compare against resource", eh.pattern)
|
||||
}
|
||||
// get the first item in the pattern array
|
||||
patternMap := typedPattern[0]
|
||||
|
@ -187,7 +217,7 @@ func getAnchorsResourcesFromMap(patternMap map[string]interface{}) (map[string]i
|
|||
anchors := map[string]interface{}{}
|
||||
resources := map[string]interface{}{}
|
||||
for key, value := range patternMap {
|
||||
if isConditionAnchor(key) || isExistanceAnchor(key) || isEqualityAnchor(key) {
|
||||
if isConditionAnchor(key) || isExistanceAnchor(key) || isEqualityAnchor(key) || isNegationAnchor(key) {
|
||||
anchors[key] = value
|
||||
continue
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
)
|
||||
|
||||
//Generate apply generation rules on a resource
|
||||
func Generate(client *client.Client, policy kyverno.ClusterPolicy, ns unstructured.Unstructured) (response EngineResponseNew) {
|
||||
func Generate(client *client.Client, policy kyverno.ClusterPolicy, ns unstructured.Unstructured) (response EngineResponse) {
|
||||
startTime := time.Now()
|
||||
// policy information
|
||||
func() {
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
)
|
||||
|
||||
// Mutate performs mutation. Overlay first and then mutation patches
|
||||
func Mutate(policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (response EngineResponseNew) {
|
||||
func Mutate(policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (response EngineResponse) {
|
||||
startTime := time.Now()
|
||||
// policy information
|
||||
func() {
|
||||
|
|
|
@ -7,8 +7,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
//EngineResponseNew engine response to the action
|
||||
type EngineResponseNew struct {
|
||||
//EngineResponse engine response to the action
|
||||
type EngineResponse struct {
|
||||
// Resource patched with the engine action changes
|
||||
PatchedResource unstructured.Unstructured
|
||||
// Policy Response
|
||||
|
@ -74,7 +74,7 @@ type RuleStats struct {
|
|||
}
|
||||
|
||||
//IsSuccesful checks if any rule has failed or not
|
||||
func (er EngineResponseNew) IsSuccesful() bool {
|
||||
func (er EngineResponse) IsSuccesful() bool {
|
||||
for _, r := range er.PolicyResponse.Rules {
|
||||
if !r.Success {
|
||||
return false
|
||||
|
@ -84,7 +84,7 @@ func (er EngineResponseNew) IsSuccesful() bool {
|
|||
}
|
||||
|
||||
//GetPatches returns all the patches joined
|
||||
func (er EngineResponseNew) GetPatches() [][]byte {
|
||||
func (er EngineResponse) GetPatches() [][]byte {
|
||||
var patches [][]byte
|
||||
for _, r := range er.PolicyResponse.Rules {
|
||||
if r.Patches != nil {
|
||||
|
@ -96,16 +96,16 @@ func (er EngineResponseNew) GetPatches() [][]byte {
|
|||
}
|
||||
|
||||
//GetFailedRules returns failed rules
|
||||
func (er EngineResponseNew) GetFailedRules() []string {
|
||||
func (er EngineResponse) GetFailedRules() []string {
|
||||
return er.getRules(false)
|
||||
}
|
||||
|
||||
//GetSuccessRules returns success rules
|
||||
func (er EngineResponseNew) GetSuccessRules() []string {
|
||||
func (er EngineResponse) GetSuccessRules() []string {
|
||||
return er.getRules(true)
|
||||
}
|
||||
|
||||
func (er EngineResponseNew) getRules(success bool) []string {
|
||||
func (er EngineResponse) getRules(success bool) []string {
|
||||
var rules []string
|
||||
for _, r := range er.PolicyResponse.Rules {
|
||||
if r.Success == success {
|
||||
|
|
|
@ -305,6 +305,16 @@ func isEqualityAnchor(str string) bool {
|
|||
return (str[:len(left)] == left && str[len(str)-len(right):] == right)
|
||||
}
|
||||
|
||||
func isNegationAnchor(str string) bool {
|
||||
left := "X("
|
||||
right := ")"
|
||||
if len(str) < len(left)+len(right) {
|
||||
return false
|
||||
}
|
||||
//TODO: trim spaces ?
|
||||
return (str[:len(left)] == left && str[len(str)-len(right):] == right)
|
||||
}
|
||||
|
||||
func isAddingAnchor(key string) bool {
|
||||
const left = "+("
|
||||
const right = ")"
|
||||
|
@ -340,7 +350,7 @@ func removeAnchor(key string) string {
|
|||
return key[1 : len(key)-1]
|
||||
}
|
||||
|
||||
if isExistanceAnchor(key) || isAddingAnchor(key) || isEqualityAnchor(key) {
|
||||
if isExistanceAnchor(key) || isAddingAnchor(key) || isEqualityAnchor(key) || isNegationAnchor(key) {
|
||||
return key[2 : len(key)-1]
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
//Validate applies validation rules from policy on the resource
|
||||
func Validate(policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (response EngineResponseNew) {
|
||||
func Validate(policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (response EngineResponse) {
|
||||
startTime := time.Now()
|
||||
// policy information
|
||||
func() {
|
||||
|
|
|
@ -2767,3 +2767,176 @@ func TestValidate_existenceAnchor_pass(t *testing.T) {
|
|||
}
|
||||
assert.Assert(t, er.IsSuccesful())
|
||||
}
|
||||
|
||||
func TestValidate_negationAnchor_deny(t *testing.T) {
|
||||
rawPolicy := []byte(`
|
||||
{
|
||||
"apiVersion": "kyverno.io/v1alpha1",
|
||||
"kind": "ClusterPolicy",
|
||||
"metadata": {
|
||||
"name": "validate-host-path"
|
||||
},
|
||||
"spec": {
|
||||
"rules": [
|
||||
{
|
||||
"name": "validate-host-path",
|
||||
"match": {
|
||||
"resources": {
|
||||
"kinds": [
|
||||
"Pod"
|
||||
]
|
||||
}
|
||||
},
|
||||
"validate": {
|
||||
"message": "Host path is not allowed",
|
||||
"pattern": {
|
||||
"spec": {
|
||||
"volumes": [
|
||||
{
|
||||
"name": "*",
|
||||
"X(hostPath)": null
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
rawResource := []byte(`
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "image-with-hostpath",
|
||||
"labels": {
|
||||
"app.type": "prod",
|
||||
"namespace": "my-namespace"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "image-with-hostpath",
|
||||
"image": "docker.io/nautiker/curl",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "var-lib-etcd",
|
||||
"mountPath": "/var/lib"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "var-lib-etcd",
|
||||
"hostPath": {
|
||||
"path": "/var/lib1"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
} `)
|
||||
|
||||
var policy kyverno.ClusterPolicy
|
||||
json.Unmarshal(rawPolicy, &policy)
|
||||
|
||||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(policy, *resourceUnstructured)
|
||||
msgs := []string{"Validation rule 'validate-host-path' failed at '/spec/volumes/0/hostPath/' for resource Pod//image-with-hostpath. Host path is not allowed"}
|
||||
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
}
|
||||
assert.Assert(t, !er.IsSuccesful())
|
||||
}
|
||||
|
||||
func TestValidate_negationAnchor_pass(t *testing.T) {
|
||||
rawPolicy := []byte(`
|
||||
{
|
||||
"apiVersion": "kyverno.io/v1alpha1",
|
||||
"kind": "ClusterPolicy",
|
||||
"metadata": {
|
||||
"name": "validate-host-path"
|
||||
},
|
||||
"spec": {
|
||||
"rules": [
|
||||
{
|
||||
"name": "validate-host-path",
|
||||
"match": {
|
||||
"resources": {
|
||||
"kinds": [
|
||||
"Pod"
|
||||
]
|
||||
}
|
||||
},
|
||||
"validate": {
|
||||
"message": "Host path is not allowed",
|
||||
"pattern": {
|
||||
"spec": {
|
||||
"volumes": [
|
||||
{
|
||||
"name": "*",
|
||||
"X(hostPath)": null
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
rawResource := []byte(`
|
||||
{
|
||||
"apiVersion": "v1",
|
||||
"kind": "Pod",
|
||||
"metadata": {
|
||||
"name": "image-with-hostpath",
|
||||
"labels": {
|
||||
"app.type": "prod",
|
||||
"namespace": "my-namespace"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"containers": [
|
||||
{
|
||||
"name": "image-with-hostpath",
|
||||
"image": "docker.io/nautiker/curl",
|
||||
"volumeMounts": [
|
||||
{
|
||||
"name": "var-lib-etcd",
|
||||
"mountPath": "/var/lib"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"volumes": [
|
||||
{
|
||||
"name": "var-lib-etcd",
|
||||
"emptyDir": {}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
`)
|
||||
|
||||
var policy kyverno.ClusterPolicy
|
||||
json.Unmarshal(rawPolicy, &policy)
|
||||
|
||||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(policy, *resourceUnstructured)
|
||||
msgs := []string{"Validation rule 'validate-host-path' succesfully validated"}
|
||||
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
}
|
||||
assert.Assert(t, er.IsSuccesful())
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ func buildKey(policy, pv, kind, ns, name, rv string) string {
|
|||
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []engine.EngineResponseNew {
|
||||
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []engine.EngineResponse {
|
||||
// convert to unstructured
|
||||
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
|
||||
if err != nil {
|
||||
|
@ -99,7 +99,7 @@ func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []e
|
|||
// get all the policies that have a generate rule and resource description satifies the namespace
|
||||
// apply policy on resource
|
||||
policies := listpolicies(ns, nsc.pLister)
|
||||
var engineResponses []engine.EngineResponseNew
|
||||
var engineResponses []engine.EngineResponse
|
||||
for _, policy := range policies {
|
||||
// pre-processing, check if the policy and resource version has been processed before
|
||||
if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
|
||||
|
@ -185,13 +185,13 @@ func listpolicies(ns unstructured.Unstructured, pLister kyvernolister.ClusterPol
|
|||
return filteredpolicies
|
||||
}
|
||||
|
||||
func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) engine.EngineResponseNew {
|
||||
func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) engine.EngineResponse {
|
||||
var policyStats []policyctr.PolicyStat
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse engine.PolicyResponse) {
|
||||
ps := policyctr.PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.MutationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.GenerationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
)
|
||||
|
||||
func (nsc *NamespaceController) report(engineResponses []engine.EngineResponseNew) {
|
||||
func (nsc *NamespaceController) report(engineResponses []engine.EngineResponse) {
|
||||
// generate events
|
||||
// generate policy violations
|
||||
for _, er := range engineResponses {
|
||||
|
@ -25,7 +25,7 @@ func (nsc *NamespaceController) report(engineResponses []engine.EngineResponseNe
|
|||
}
|
||||
|
||||
//reportEvents generates events for the failed resources
|
||||
func reportEvents(engineResponse engine.EngineResponseNew, eventGen event.Interface) {
|
||||
func reportEvents(engineResponse engine.EngineResponse, eventGen event.Interface) {
|
||||
if engineResponse.IsSuccesful() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
// applyPolicy applies policy on a resource
|
||||
//TODO: generation rules
|
||||
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (responses []engine.EngineResponseNew) {
|
||||
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (responses []engine.EngineResponse) {
|
||||
startTime := time.Now()
|
||||
var policyStats []PolicyStat
|
||||
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
|
||||
|
@ -54,8 +54,8 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
|
|||
policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
var engineResponses []engine.EngineResponseNew
|
||||
var engineResponse engine.EngineResponseNew
|
||||
var engineResponses []engine.EngineResponse
|
||||
var engineResponse engine.EngineResponse
|
||||
var err error
|
||||
|
||||
//MUTATION
|
||||
|
@ -79,7 +79,7 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
|
|||
//TODO: GENERATION
|
||||
return engineResponses
|
||||
}
|
||||
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (engine.EngineResponseNew, error) {
|
||||
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (engine.EngineResponse, error) {
|
||||
engineResponse := engine.Mutate(policy, resource)
|
||||
if !engineResponse.IsSuccesful() {
|
||||
glog.V(4).Infof("mutation had errors reporting them")
|
||||
|
@ -95,11 +95,11 @@ func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured,
|
|||
}
|
||||
|
||||
// getFailedOverallRuleInfo gets detailed info for over-all mutation failure
|
||||
func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse engine.EngineResponseNew) (engine.EngineResponseNew, error) {
|
||||
func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse engine.EngineResponse) (engine.EngineResponse, error) {
|
||||
rawResource, err := resource.MarshalJSON()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("unable to marshal resource: %v\n", err)
|
||||
return engine.EngineResponseNew{}, err
|
||||
return engine.EngineResponse{}, err
|
||||
}
|
||||
|
||||
// resource does not match so there was a mutation rule violated
|
||||
|
@ -112,14 +112,14 @@ func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse
|
|||
patch, err := jsonpatch.DecodePatch(utils.JoinPatches(rule.Patches))
|
||||
if err != nil {
|
||||
glog.V(4).Infof("unable to decode patch %s: %v", rule.Patches, err)
|
||||
return engine.EngineResponseNew{}, err
|
||||
return engine.EngineResponse{}, err
|
||||
}
|
||||
|
||||
// apply the patches returned by mutate to the original resource
|
||||
patchedResource, err := patch.Apply(rawResource)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("unable to apply patch %s: %v", rule.Patches, err)
|
||||
return engine.EngineResponseNew{}, err
|
||||
return engine.EngineResponse{}, err
|
||||
}
|
||||
|
||||
if !jsonpatch.Equal(patchedResource, rawResource) {
|
||||
|
|
|
@ -198,6 +198,14 @@ func (pc *PolicyController) addPolicyViolation(obj interface{}) {
|
|||
// them to see if anyone wants to adopt it.
|
||||
ps := pc.getPolicyForPolicyViolation(pv)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", pv.Name)
|
||||
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted policy violation %s: %v", pv.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", pv.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Policy Violation %s added.", pv.Name)
|
||||
|
@ -246,6 +254,13 @@ func (pc *PolicyController) updatePolicyViolation(old, cur interface{}) {
|
|||
if labelChanged || controllerRefChanged {
|
||||
ps := pc.getPolicyForPolicyViolation(curPV)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", curPV.Name)
|
||||
if err := pc.pvControl.DeletePolicyViolation(curPV.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted policy violation %s: %v", curPV.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", curPV.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan PolicyViolation %s updated", curPV.Name)
|
||||
|
@ -316,8 +331,8 @@ func (pc *PolicyController) getPolicyForPolicyViolation(pv *kyverno.ClusterPolic
|
|||
if err != nil || len(policies) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
|
||||
// there should never be more than one deployment returned by the above method.
|
||||
// Because all PolicyViolations's belonging to a Policy should have a unique label key,
|
||||
// there should never be more than one Policy returned by the above method.
|
||||
// If that happens we should probably dynamically repair the situation by ultimately
|
||||
// trying to clean up one of the controllers, for now we just return the older one
|
||||
if len(policies) > 1 {
|
||||
|
@ -741,6 +756,7 @@ func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(met
|
|||
//PVControlInterface provides interface to operate on policy violation resource
|
||||
type PVControlInterface interface {
|
||||
PatchPolicyViolation(name string, data []byte) error
|
||||
DeletePolicyViolation(name string) error
|
||||
}
|
||||
|
||||
// RealPVControl is the default implementation of PVControlInterface.
|
||||
|
@ -755,6 +771,11 @@ func (r RealPVControl) PatchPolicyViolation(name string, data []byte) error {
|
|||
return err
|
||||
}
|
||||
|
||||
//DeletePolicyViolation deletes the policy violation
|
||||
func (r RealPVControl) DeletePolicyViolation(name string) error {
|
||||
return r.Client.KyvernoV1alpha1().ClusterPolicyViolations().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
|
||||
//
|
||||
// The CanAdopt() function calls getObject() to fetch the latest value,
|
||||
|
|
|
@ -16,11 +16,11 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolicy) []engine.EngineResponseNew {
|
||||
func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolicy) []engine.EngineResponse {
|
||||
// Parse through all the resources
|
||||
// drops the cache after configured rebuild time
|
||||
pc.rm.Drop()
|
||||
var engineResponses []engine.EngineResponseNew
|
||||
var engineResponses []engine.EngineResponse
|
||||
// get resource that are satisfy the resource description defined in the rules
|
||||
resourceMap := listResources(pc.client, policy, pc.filterK8Resources)
|
||||
for _, resource := range resourceMap {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
)
|
||||
|
||||
func (pc *PolicyController) report(engineResponses []engine.EngineResponseNew) {
|
||||
func (pc *PolicyController) report(engineResponses []engine.EngineResponse) {
|
||||
// generate events
|
||||
// generate policy violations
|
||||
for _, policyInfo := range engineResponses {
|
||||
|
@ -26,7 +26,7 @@ func (pc *PolicyController) report(engineResponses []engine.EngineResponseNew) {
|
|||
}
|
||||
|
||||
//reportEvents generates events for the failed resources
|
||||
func reportEvents(engineResponse engine.EngineResponseNew, eventGen event.Interface) {
|
||||
func reportEvents(engineResponse engine.EngineResponse, eventGen event.Interface) {
|
||||
if engineResponse.IsSuccesful() {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -28,31 +28,7 @@ func BuildPolicyViolation(policy string, resource kyverno.ResourceSpec, fRules [
|
|||
return pv
|
||||
}
|
||||
|
||||
// buildPolicyViolationsForAPolicy returns a policy violation object if there are any rules that fail
|
||||
// func buildPolicyViolationsForAPolicy(pi info.PolicyInfo) kyverno.PolicyViolation {
|
||||
// var fRules []kyverno.ViolatedRule
|
||||
// var pv kyverno.PolicyViolation
|
||||
// for _, r := range pi.Rules {
|
||||
// if !r.IsSuccessful() {
|
||||
// fRules = append(fRules, kyverno.ViolatedRule{Name: r.Name, Message: r.GetErrorString(), Type: r.RuleType.String()})
|
||||
// }
|
||||
// }
|
||||
// if len(fRules) > 0 {
|
||||
// glog.V(4).Infof("building policy violation for policy %s on resource %s/%s/%s", pi.Name, pi.RKind, pi.RNamespace, pi.RName)
|
||||
// // there is an error
|
||||
// pv = BuildPolicyViolation(pi.Name, kyverno.ResourceSpec{
|
||||
// Kind: pi.RKind,
|
||||
// Namespace: pi.RNamespace,
|
||||
// Name: pi.RName,
|
||||
// },
|
||||
// fRules,
|
||||
// )
|
||||
|
||||
// }
|
||||
// return pv
|
||||
// }
|
||||
|
||||
func buildPVForPolicy(er engine.EngineResponseNew) kyverno.ClusterPolicyViolation {
|
||||
func buildPVForPolicy(er engine.EngineResponse) kyverno.ClusterPolicyViolation {
|
||||
var violatedRules []kyverno.ViolatedRule
|
||||
glog.V(4).Infof("building policy violation for engine response %v", er)
|
||||
for _, r := range er.PolicyResponse.Rules {
|
||||
|
@ -78,7 +54,7 @@ func buildPVForPolicy(er engine.EngineResponseNew) kyverno.ClusterPolicyViolatio
|
|||
}
|
||||
|
||||
//CreatePV creates policy violation resource based on the engine responses
|
||||
func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset, engineResponses []engine.EngineResponseNew) {
|
||||
func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset, engineResponses []engine.EngineResponse) {
|
||||
var pvs []kyverno.ClusterPolicyViolation
|
||||
for _, er := range engineResponses {
|
||||
// ignore creation of PV for resoruces that are yet to be assigned a name
|
||||
|
@ -130,53 +106,6 @@ func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyver
|
|||
}
|
||||
}
|
||||
|
||||
// //GeneratePolicyViolations generate policyViolation resources for the rules that failed
|
||||
// //TODO: check if pvListerSynced is needed
|
||||
// func GeneratePolicyViolations(pvListerSynced cache.InformerSynced, pvLister kyvernolister.PolicyViolationLister, client *kyvernoclient.Clientset, policyInfos []info.PolicyInfo) {
|
||||
// var pvs []kyverno.PolicyViolation
|
||||
// for _, policyInfo := range policyInfos {
|
||||
// if !policyInfo.IsSuccessful() {
|
||||
// if pv := buildPolicyViolationsForAPolicy(policyInfo); !reflect.DeepEqual(pv, kyverno.PolicyViolation{}) {
|
||||
// pvs = append(pvs, pv)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// if len(pvs) > 0 {
|
||||
// for _, newPv := range pvs {
|
||||
// // generate PolicyViolation objects
|
||||
// glog.V(4).Infof("creating policyViolation resource for policy %s and resource %s/%s/%s", newPv.Spec.Policy, newPv.Spec.Kind, newPv.Spec.Namespace, newPv.Spec.Name)
|
||||
|
||||
// // check if there was a previous violation for policy & resource combination
|
||||
// curPv, err := getExistingPolicyViolationIfAny(pvListerSynced, pvLister, newPv)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// if curPv == nil {
|
||||
// // no existing policy violation, create a new one
|
||||
// _, err := client.KyvernoV1alpha1().PolicyViolations().Create(&newPv)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// }
|
||||
// continue
|
||||
// }
|
||||
// // compare the policyviolation spec for existing resource if present else
|
||||
// if reflect.DeepEqual(curPv.Spec, newPv.Spec) {
|
||||
// // if they are equal there has been no change so dont update the polivy violation
|
||||
// glog.Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
|
||||
// continue
|
||||
// }
|
||||
// // spec changed so update the policyviolation
|
||||
// //TODO: wont work, as name is not defined yet
|
||||
// _, err = client.KyvernoV1alpha1().PolicyViolations().Update(&newPv)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
//TODO: change the name
|
||||
func getExistingPolicyViolationIfAny(pvListerSynced cache.InformerSynced, pvLister kyvernolister.ClusterPolicyViolationLister, newPv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
|
||||
// TODO: check for existing ov using label selectors on resource and policy
|
||||
|
|
|
@ -137,10 +137,18 @@ func runTestCase(t *testing.T, tc scaseT) bool {
|
|||
// apply policy
|
||||
// convert policy -> kyverno.Policy
|
||||
policy := loadPolicy(t, tc.Input.Policy)
|
||||
if policy == nil {
|
||||
t.Error("Policy no loaded")
|
||||
t.FailNow()
|
||||
}
|
||||
// convert resource -> unstructured.Unstructured
|
||||
resource := loadPolicyResource(t, tc.Input.Resource)
|
||||
if resource == nil {
|
||||
t.Error("Resources no loaded")
|
||||
t.FailNow()
|
||||
}
|
||||
|
||||
var er engine.EngineResponseNew
|
||||
var er engine.EngineResponse
|
||||
// Mutation
|
||||
er = engine.Mutate(*policy, *resource)
|
||||
// validate te response
|
||||
|
|
|
@ -3,111 +3,115 @@ package testrunner
|
|||
import "testing"
|
||||
|
||||
func Test_Mutate_EndPoint(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_mutate_endPpoint.yaml")
|
||||
}
|
||||
|
||||
func Test_Mutate_imagePullPolicy(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_mutate_imagePullPolicy.yaml")
|
||||
testScenario(t, "/test/scenarios/other/scenario_mutate_endpoint.yaml")
|
||||
}
|
||||
|
||||
func Test_Mutate_Validate_qos(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_mutate_validate_qos.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_containerSecurityContext(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_validate_containerSecurityContext.yaml")
|
||||
testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_deny_runasrootuser(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_deny_runasrootuser.yaml")
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_deny_runasrootuser.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_priviledgedprivelegesecalation(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_container_disallow_priviledgedprivelegesecalation.yaml")
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_priviledged_privelegesecalation.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_healthChecks(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_validate_healthChecks.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_imageRegistries(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_validate_imageRegistries.yaml")
|
||||
testScenario(t, "/test/scenarios/other/scenario_validate_healthChecks.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_nonRootUsers(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_validate_nonRootUser.yaml")
|
||||
testScenario(t, "/test/scenarios/samples/best_practices/scenario_validate_nonRootUser.yaml")
|
||||
}
|
||||
|
||||
func Test_generate_networkPolicy(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_generate_networkPolicy.yaml")
|
||||
testScenario(t, "/test/scenarios/samples/best_practices/scenario_generate_networkPolicy.yaml")
|
||||
}
|
||||
|
||||
// namespace is blank, not "default" as testrunner evaulates the policyengine, but the "default" is added by kubeapiserver
|
||||
func Test_validate_image_latest_ifnotpresent_deny(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/test/scenario_validate_image_latest_ifnotpresent_deny.yaml")
|
||||
|
||||
func Test_validate_require_image_tag_not_latest_deny(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_valiadate_require_image_tag_not_latest_deny.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_latest_ifnotpresent_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_latest_ifnotpresent_pass.yaml")
|
||||
func Test_validate_require_image_tag_not_latest_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_valiadate_require_image_tag_not_latest_pass.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_tag_notspecified_deny(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_tag_notspecified_deny.yaml")
|
||||
func Test_validate_disallow_automoutingapicred_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_automountingapicred.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_tag_notspecified_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_tag_notspecified_pass.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_pullpolicy_notalways_deny(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_pullpolicy_notalways_deny.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_pullpolicy_notalways_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_pullpolicy_notalways_pass.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_tag_latest_deny(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_tag_latest_deny.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_image_tag_latest_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_image_tag_latest_pass.yaml")
|
||||
}
|
||||
|
||||
func Test_mutate_pod_disable_automoutingapicred_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_mutate_pod_disable_automountingapicred.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_default_namespace(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_default_namespace.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_host_path(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_host_path.yaml")
|
||||
func Test_validate_disallow_default_namespace(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_default_namespace.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_host_network_port(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_host_network_port.yaml")
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_host_network_hostport.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_hostPID_hostIPC(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_hostpid_hostipc.yaml")
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_hostpid_hostipc.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_not_readonly_rootfilesystem(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_not_readonly_rootfilesystem.yaml")
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_require_readonly_rootfilesystem.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_namespace_quota(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_namespace_quota.yaml")
|
||||
func Test_validate_require_namespace_quota(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_require_namespace_quota.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_node_port(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_disallow_node_port.yaml")
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_node_port.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_default_serviceaccount(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/test/scenario_validate_disallow_default_serviceaccount.yaml")
|
||||
testScenario(t, "test/scenarios/other/scenario_validate_disallow_default_serviceaccount.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_fsgroup(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/more/scenario_validate_fsgroup.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_selinux_context(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/other/scenario_validate_selinux_context.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_proc_mount(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/other/scenario_validate_default_proc_mount.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_container_capabilities(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/more/scenario_validate_container_capabilities.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_sysctl(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/more/scenario_validate_sysctl_configs.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_volume_whitelist(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/other/scenario_validate_volume_whiltelist.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_trusted_image_registries(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_trusted_image_registries.yaml")
|
||||
}
|
||||
|
||||
func Test_require_pod_requests_limits(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_require_pod_requests_limits.yaml")
|
||||
}
|
||||
|
||||
func Test_require_probes(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_probes.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_host_filesystem_fail(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_host_filesystem.yaml")
|
||||
}
|
||||
|
||||
func Test_validate_disallow_host_filesystem_pass(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_host_filesystem_pass.yaml")
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ var kindToResource = map[string]string{
|
|||
"Endpoints": "endpoints",
|
||||
"Namespace": "namespaces",
|
||||
"Secret": "secrets",
|
||||
"Service": "services",
|
||||
"Deployment": "deployments",
|
||||
"NetworkPolicy": "networkpolicies",
|
||||
}
|
||||
|
|
|
@ -10,13 +10,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
policyAnnotation = "policies.kyverno.io"
|
||||
// lastAppliedPatches = policyAnnotation + "last-applied-patches"
|
||||
policyAnnotation = "policies.kyverno.io/patches"
|
||||
)
|
||||
|
||||
type policyPatch struct {
|
||||
PolicyName string `json:"policyname"`
|
||||
// RulePatches []string `json:"patches"`
|
||||
PolicyName string `json:"policyname"`
|
||||
RulePatches interface{} `json:"patches"`
|
||||
}
|
||||
|
||||
|
@ -32,28 +30,32 @@ type response struct {
|
|||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
func generateAnnotationPatches(annotations map[string]string, policyResponse engine.PolicyResponse) []byte {
|
||||
func generateAnnotationPatches(annotations map[string]string, engineResponses []engine.EngineResponse) []byte {
|
||||
if annotations == nil {
|
||||
annotations = map[string]string{}
|
||||
annotations = make(map[string]string)
|
||||
}
|
||||
|
||||
var patchResponse response
|
||||
value := generateAnnotationsFromPolicyResponse(policyResponse)
|
||||
value := annotationFromEngineResponses(engineResponses)
|
||||
if value == nil {
|
||||
// no patches or error while processing patches
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, ok := annotations[policyAnnotation]; ok {
|
||||
// create update patch string
|
||||
patchResponse = response{
|
||||
Path: "/metadata/annotations/" + policyAnnotation,
|
||||
Op: "replace",
|
||||
Path: "/metadata/annotations/" + policyAnnotation,
|
||||
Value: string(value),
|
||||
}
|
||||
} else {
|
||||
// insert 'policies.kyverno.io' entry in annotation map
|
||||
annotations[policyAnnotation] = string(value)
|
||||
patchResponse = response{
|
||||
Path: "/metadata/annotations",
|
||||
Op: "add",
|
||||
Value: map[string]string{policyAnnotation: string(value)},
|
||||
Path: "/metadata/annotations",
|
||||
Value: annotations,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -68,109 +70,59 @@ func generateAnnotationPatches(annotations map[string]string, policyResponse eng
|
|||
return patchByte
|
||||
}
|
||||
|
||||
// func prepareAnnotationPatches(resource *unstructured.Unstructured, policyInfos []info.PolicyInfo) []byte {
|
||||
// annots := resource.GetAnnotations()
|
||||
// if annots == nil {
|
||||
// annots = map[string]string{}
|
||||
// }
|
||||
|
||||
// var patchResponse response
|
||||
// value := annotationFromPolicies(policyInfos)
|
||||
// if _, ok := annots[policyAnnotation]; ok {
|
||||
// // create update patch string
|
||||
// patchResponse = response{
|
||||
// Op: "replace",
|
||||
// Path: "/metadata/annotations/" + policyAnnotation,
|
||||
// Value: string(value),
|
||||
// }
|
||||
// } else {
|
||||
// patchResponse = response{
|
||||
// Op: "add",
|
||||
// Path: "/metadata/annotations",
|
||||
// Value: map[string]string{policyAnnotation: string(value)},
|
||||
// }
|
||||
// }
|
||||
|
||||
// patchByte, _ := json.Marshal(patchResponse)
|
||||
|
||||
// // check the patch
|
||||
// _, err := jsonpatch.DecodePatch([]byte("[" + string(patchByte) + "]"))
|
||||
// if err != nil {
|
||||
// glog.Errorf("Failed to make patch from annotation'%s', err: %v\n ", string(patchByte), err)
|
||||
// }
|
||||
|
||||
// return patchByte
|
||||
// }
|
||||
|
||||
// func annotationFromPolicies(policyInfos []info.PolicyInfo) []byte {
|
||||
// var policyPatches []policyPatch
|
||||
// for _, policyInfo := range policyInfos {
|
||||
// var pp policyPatch
|
||||
|
||||
// pp.PolicyName = policyInfo.Name
|
||||
// pp.RulePatches = annotationFromPolicy(policyInfo)
|
||||
// policyPatches = append(policyPatches, pp)
|
||||
// }
|
||||
|
||||
// result, _ := json.Marshal(policyPatches)
|
||||
|
||||
// return result
|
||||
// }
|
||||
|
||||
func generateAnnotationsFromPolicyResponse(policyResponse engine.PolicyResponse) []byte {
|
||||
var rulePatches []rulePatch
|
||||
// generate annotation for each mutation JSON patch to be applied on the resource
|
||||
for _, rule := range policyResponse.Rules {
|
||||
var patchmap map[string]string
|
||||
patch := engine.JoinPatches(rule.Patches)
|
||||
if err := json.Unmarshal(patch, &patchmap); err != nil {
|
||||
glog.Errorf("Failed to parse patch bytes, err: %v\n", err)
|
||||
func annotationFromEngineResponses(engineResponses []engine.EngineResponse) []byte {
|
||||
var policyPatches []policyPatch
|
||||
for _, engineResponse := range engineResponses {
|
||||
if !engineResponse.IsSuccesful() {
|
||||
glog.V(3).Infof("Policy %s failed, skip preparing annotation\n", engineResponse.PolicyResponse.Policy)
|
||||
continue
|
||||
}
|
||||
|
||||
rp := rulePatch{
|
||||
RuleName: rule.Name,
|
||||
Op: patchmap["op"],
|
||||
Path: patchmap["path"]}
|
||||
|
||||
rulePatches = append(rulePatches, rp)
|
||||
glog.V(4).Infof("Annotation value prepared: %v\n", rulePatches)
|
||||
var pp policyPatch
|
||||
rulePatches := annotationFromPolicyResponse(engineResponse.PolicyResponse)
|
||||
if rulePatches == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
pp.RulePatches = rulePatches
|
||||
pp.PolicyName = engineResponse.PolicyResponse.Policy
|
||||
policyPatches = append(policyPatches, pp)
|
||||
}
|
||||
patch, err := json.Marshal(rulePatches)
|
||||
if err != nil {
|
||||
glog.Infof("failed to marshall: %v", err)
|
||||
|
||||
// return nil if there's no patches
|
||||
// otherwise result = null, len(result) = 4
|
||||
if policyPatches == nil {
|
||||
return nil
|
||||
}
|
||||
return patch
|
||||
|
||||
result, _ := json.Marshal(policyPatches)
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// func annotationFromPolicy(policyInfo info.PolicyInfo) []rulePatch {
|
||||
// if !policyInfo.IsSuccessful() {
|
||||
// glog.V(2).Infof("Policy %s failed, skip preparing annotation\n", policyInfo.Name)
|
||||
// return nil
|
||||
// }
|
||||
func annotationFromPolicyResponse(policyResponse engine.PolicyResponse) []rulePatch {
|
||||
var rulePatches []rulePatch
|
||||
for _, ruleInfo := range policyResponse.Rules {
|
||||
for _, patch := range ruleInfo.Patches {
|
||||
var patchmap map[string]interface{}
|
||||
if err := json.Unmarshal(patch, &patchmap); err != nil {
|
||||
glog.Errorf("Failed to parse patch bytes, err: %v\n", err)
|
||||
continue
|
||||
}
|
||||
|
||||
// var rulePatches []rulePatch
|
||||
// for _, ruleInfo := range policyInfo.Rules {
|
||||
rp := rulePatch{
|
||||
RuleName: ruleInfo.Name,
|
||||
Op: patchmap["op"].(string),
|
||||
Path: patchmap["path"].(string)}
|
||||
|
||||
// for _, patch := range ruleInfo.Patches {
|
||||
// var patchmap map[string]string
|
||||
rulePatches = append(rulePatches, rp)
|
||||
glog.V(4).Infof("Annotation value prepared: %v\n", rulePatches)
|
||||
}
|
||||
}
|
||||
|
||||
// if err := json.Unmarshal(patch, &patchmap); err != nil {
|
||||
// glog.Errorf("Failed to parse patch bytes, err: %v\n", err)
|
||||
// continue
|
||||
// }
|
||||
if len(rulePatches) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// rp := rulePatch{
|
||||
// RuleName: ruleInfo.Name,
|
||||
// Op: patchmap["op"],
|
||||
// Path: patchmap["path"]}
|
||||
|
||||
// rulePatches = append(rulePatches, rp)
|
||||
// glog.V(4).Infof("Annotation value prepared: %v\n", rulePatches)
|
||||
// }
|
||||
// }
|
||||
|
||||
// return rulePatches
|
||||
// }
|
||||
return rulePatches
|
||||
}
|
||||
|
|
92
pkg/webhooks/annotations_test.go
Normal file
92
pkg/webhooks/annotations_test.go
Normal file
|
@ -0,0 +1,92 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"gotest.tools/assert"
|
||||
)
|
||||
|
||||
func newPolicyResponse(policy, rule string, patchesStr []string, success bool) engine.PolicyResponse {
|
||||
var patches [][]byte
|
||||
for _, p := range patchesStr {
|
||||
patches = append(patches, []byte(p))
|
||||
}
|
||||
|
||||
return engine.PolicyResponse{
|
||||
Policy: policy,
|
||||
Rules: []engine.RuleResponse{
|
||||
engine.RuleResponse{
|
||||
Name: rule,
|
||||
Patches: patches,
|
||||
Success: success},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newEngineResponse(policy, rule string, patchesStr []string, success bool) engine.EngineResponse {
|
||||
return engine.EngineResponse{
|
||||
PolicyResponse: newPolicyResponse(policy, rule, patchesStr, success),
|
||||
}
|
||||
}
|
||||
|
||||
func Test_empty_annotation(t *testing.T) {
|
||||
patchStr := `{ "op": "replace", "path": "/spec/containers/0/imagePullPolicy", "value": "IfNotPresent" }`
|
||||
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true)
|
||||
|
||||
annPatches := generateAnnotationPatches(nil, []engine.EngineResponse{engineResponse})
|
||||
expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"[{\"policyname\":\"mutate-container\",\"patches\":[{\"rulename\":\"default-imagepullpolicy\",\"op\":\"replace\",\"path\":\"/spec/containers/0/imagePullPolicy\"}]}]"}}`
|
||||
assert.Assert(t, string(annPatches) == expectedPatches)
|
||||
}
|
||||
|
||||
func Test_exist_annotation(t *testing.T) {
|
||||
annotation := map[string]string{
|
||||
"test": "annotation",
|
||||
}
|
||||
|
||||
patchStr := `{ "op": "replace", "path": "/spec/containers/0/imagePullPolicy", "value": "IfNotPresent" }`
|
||||
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true)
|
||||
annPatches := generateAnnotationPatches(annotation, []engine.EngineResponse{engineResponse})
|
||||
|
||||
expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"[{\"policyname\":\"mutate-container\",\"patches\":[{\"rulename\":\"default-imagepullpolicy\",\"op\":\"replace\",\"path\":\"/spec/containers/0/imagePullPolicy\"}]}]","test":"annotation"}}`
|
||||
assert.Assert(t, string(annPatches) == expectedPatches)
|
||||
}
|
||||
|
||||
func Test_exist_kyverno_annotation(t *testing.T) {
|
||||
annotation := map[string]string{
|
||||
"policies.kyverno.io/patches": "old-annotation",
|
||||
}
|
||||
|
||||
patchStr := `{ "op": "replace", "path": "/spec/containers/0/imagePullPolicy", "value": "IfNotPresent" }`
|
||||
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true)
|
||||
annPatches := generateAnnotationPatches(annotation, []engine.EngineResponse{engineResponse})
|
||||
|
||||
expectedPatches := `{"op":"replace","path":"/metadata/annotations/policies.kyverno.io/patches","value":"[{\"policyname\":\"mutate-container\",\"patches\":[{\"rulename\":\"default-imagepullpolicy\",\"op\":\"replace\",\"path\":\"/spec/containers/0/imagePullPolicy\"}]}]"}`
|
||||
assert.Assert(t, string(annPatches) == expectedPatches)
|
||||
}
|
||||
|
||||
func Test_annotation_nil_patch(t *testing.T) {
|
||||
annotation := map[string]string{
|
||||
"policies.kyverno.io/patches": "old-annotation",
|
||||
}
|
||||
|
||||
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", nil, true)
|
||||
annPatches := generateAnnotationPatches(annotation, []engine.EngineResponse{engineResponse})
|
||||
|
||||
assert.Assert(t, annPatches == nil)
|
||||
|
||||
engineResponseNew := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{""}, true)
|
||||
annPatchesNew := generateAnnotationPatches(annotation, []engine.EngineResponse{engineResponseNew})
|
||||
assert.Assert(t, annPatchesNew == nil)
|
||||
}
|
||||
|
||||
func Test_annotation_failed_Patch(t *testing.T) {
|
||||
annotation := map[string]string{
|
||||
"policies.kyverno.io/patches": "old-annotation",
|
||||
}
|
||||
|
||||
engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", nil, false)
|
||||
annPatches := generateAnnotationPatches(annotation, []engine.EngineResponse{engineResponse})
|
||||
|
||||
assert.Assert(t, annPatches == nil)
|
||||
}
|
|
@ -70,7 +70,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) (bool
|
|||
return true, nil, ""
|
||||
}
|
||||
|
||||
var engineResponses []engine.EngineResponseNew
|
||||
var engineResponses []engine.EngineResponse
|
||||
for _, policy := range policies {
|
||||
|
||||
// check if policy has a rule for the admission request kind
|
||||
|
@ -91,15 +91,17 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) (bool
|
|||
}
|
||||
// gather patches
|
||||
patches = append(patches, engineResponse.GetPatches()...)
|
||||
// generate annotations
|
||||
if annPatches := generateAnnotationPatches(resource.GetAnnotations(), engineResponse.PolicyResponse); annPatches != nil {
|
||||
patches = append(patches, annPatches)
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Mutation from policy %s has applied succesfully to %s %s/%s", policy.Name, request.Kind.Kind, resource.GetNamespace(), resource.GetName())
|
||||
//TODO: check if there is an order to policy application on resource
|
||||
// resource = &engineResponse.PatchedResource
|
||||
}
|
||||
|
||||
// generate annotations
|
||||
if annPatches := generateAnnotationPatches(resource.GetAnnotations(), engineResponses); annPatches != nil {
|
||||
patches = append(patches, annPatches)
|
||||
}
|
||||
|
||||
// ADD EVENTS
|
||||
events := generateEvents(engineResponses, (request.Operation == v1beta1.Update))
|
||||
ws.eventGen.Add(events...)
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
//generateEvents generates event info for the engine responses
|
||||
func generateEvents(engineResponses []engine.EngineResponseNew, onUpdate bool) []event.Info {
|
||||
func generateEvents(engineResponses []engine.EngineResponse, onUpdate bool) []event.Info {
|
||||
var events []event.Info
|
||||
if !isResponseSuccesful(engineResponses) {
|
||||
for _, er := range engineResponses {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
)
|
||||
|
||||
func isResponseSuccesful(engineReponses []engine.EngineResponseNew) bool {
|
||||
func isResponseSuccesful(engineReponses []engine.EngineResponse) bool {
|
||||
for _, er := range engineReponses {
|
||||
if !er.IsSuccesful() {
|
||||
return false
|
||||
|
@ -20,7 +20,7 @@ func isResponseSuccesful(engineReponses []engine.EngineResponseNew) bool {
|
|||
|
||||
// returns true -> if there is even one policy that blocks resource request
|
||||
// returns false -> if all the policies are meant to report only, we dont block resource request
|
||||
func toBlockResource(engineReponses []engine.EngineResponseNew) bool {
|
||||
func toBlockResource(engineReponses []engine.EngineResponse) bool {
|
||||
for _, er := range engineReponses {
|
||||
if er.PolicyResponse.ValidationFailureAction == Enforce {
|
||||
glog.V(4).Infof("ValidationFailureAction set to enforce for policy %s , blocking resource request ", er.PolicyResponse.Policy)
|
||||
|
@ -31,7 +31,7 @@ func toBlockResource(engineReponses []engine.EngineResponseNew) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
func getErrorMsg(engineReponses []engine.EngineResponseNew) string {
|
||||
func getErrorMsg(engineReponses []engine.EngineResponse) string {
|
||||
var str []string
|
||||
for _, er := range engineReponses {
|
||||
if !er.IsSuccesful() {
|
||||
|
@ -86,7 +86,7 @@ func processResourceWithPatches(patch []byte, resource []byte) []byte {
|
|||
if patch == nil {
|
||||
return nil
|
||||
}
|
||||
glog.Info(string(resource))
|
||||
|
||||
resource, err := engine.ApplyPatchNew(resource, patch)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to patch resource: %v", err)
|
||||
|
|
|
@ -76,7 +76,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pat
|
|||
return true, ""
|
||||
}
|
||||
|
||||
var engineResponses []engine.EngineResponseNew
|
||||
var engineResponses []engine.EngineResponse
|
||||
for _, policy := range policies {
|
||||
|
||||
if !utils.ContainsString(getApplicableKindsForPolicy(policy), request.Kind.Kind) {
|
||||
|
|
157
samples/README.md
Normal file
157
samples/README.md
Normal file
|
@ -0,0 +1,157 @@
|
|||
# Best Practice Policies
|
||||
|
||||
Best practice policies are designed to be applied to your Kubernetes clusters with minimal changes. To import these policies [install Kyverno](../documentation/installation.md) and import the resources as follows:
|
||||
|
||||
````bash
|
||||
kubectl create -f https://github.com/nirmata/kyverno/raw/master/samples/best_practices/
|
||||
````
|
||||
|
||||
More information on each best-practice policy is provided below:
|
||||
|
||||
## Run as non-root user
|
||||
|
||||
By default, processes in a container run as a root user (uid 0). To prevent potential compromise of container hosts, specify a least privileged user ID when building the container image and require that application containers run as non root users i.e. set `runAsNonRoot` to `true`.
|
||||
|
||||
***Policy YAML***: [deny_runasrootuser.yaml](best_practices/deny_runasrootuser.yaml)
|
||||
|
||||
**Additional Information**
|
||||
* [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
|
||||
|
||||
|
||||
## Disallow automount of Service Account credentials
|
||||
|
||||
Kubernetes automounts default service account credentials in each pod. To restrict access, opt out of automounting credentials by setting `automountServiceAccountToken` to `false`.
|
||||
|
||||
***Policy YAML***: [disallow_automountingapicred.yaml](best_practices/disallow_automountingapicred.yaml)
|
||||
|
||||
|
||||
## Disallow use of default namespace
|
||||
|
||||
With many users spread across multiple teams, restricting use of the default namespace and subdividing the cluster by namesoace isolates workloads.
|
||||
|
||||
***Policy YAML***: [disallow_default_namespace.yaml](best_practices/disallow_default_namespace.yaml)
|
||||
|
||||
|
||||
## Disallow use of host filesystem
|
||||
|
||||
The volume of type `hostpath` binds pods to a specific host, and data persisted in the volume is dependent on the life of the node. In a shared cluster, it is recommeded that applications are independent of hosts.
|
||||
|
||||
***Policy YAML***: [disallow_host_filesystem.yaml](best_practices/disallow_host_filesystem.yaml)
|
||||
|
||||
|
||||
## Disallow `hostNetwork` and `hostPort`
|
||||
|
||||
Using `hostPort` and `hostNetwork` allows pods to share the host network stack, allowing potential snooping of network traffic from an application pod.
|
||||
|
||||
***Policy YAML***: [disallow_host_network_hostport.yaml](best_practices/disallow_host_network_hostport.yaml)
|
||||
|
||||
|
||||
## Disallow `hostPID` and `hostIPC`
|
||||
|
||||
Sharing the host's PID namespace allows visibility of process on the host, potentially exposing process information.
|
||||
Sharing the host's IPC namespace allows the container process to communicate with processes on the host. To avoid pod container from having visibility to host process space, validate that `hostPID` and `hostIPC` are set to `false`.
|
||||
|
||||
***Policy YAML***: [disallow_hostpid_hostipc.yaml](best_practices/disallow_hostpid_hostipc.yaml)
|
||||
|
||||
|
||||
## Restrict service type `NodePort`
|
||||
|
||||
A Kubernetes service of type NodePort uses a host port to receive traffic from any source. A `NetworkPolicy` resource cannot be used to control traffic to host ports. Although `NodePort` services can be useful, their use must be limited to services with additional upstream security checks.
|
||||
|
||||
***Policy YAML***: [disallow_node_port.yaml](best_practices/disallow_node_port.yaml)
|
||||
|
||||
|
||||
## Disable privileged containers
|
||||
|
||||
Privileged containers are defined as any container where the container uid 0 is mapped to the host’s uid 0. A process within privileged containers can get unrestricted host access. With `securityContext.allowPrivilegeEscalation` enabled a process can gain privileges from its parent.
|
||||
To disallow privileged containers and the escalation of privileges it is recommended to run pod containers with `securityContext.priveleged` as `false` and `allowPrivilegeEscalation` as `false`.
|
||||
|
||||
***Policy YAML***: [disallow_priviledged_priviligedescalation.yaml](best_practices/disallow_priviledged_priviligedescalation.yaml)
|
||||
|
||||
## Default deny all ingress traffic
|
||||
|
||||
By default, Kubernetes allows all ingress and egress traffic to and from pods within a cluster. A "default" `NetworkPolicy` resource for a namespace should be used to deny all ingress traffic to the pods in that namespace. Additional `NetworkPolicy` resources can then be configured to allow desired traffic to application pods.
|
||||
|
||||
***Policy YAML***: [require_default_network_policy.yaml](best_practices/require_default_network_policy.yaml)
|
||||
|
||||
|
||||
## Disallow latest image tag
|
||||
|
||||
The `:latest` tag is mutable and can lead to unexpected errors if the image changes. A best practice is to use an immutable tag that maps to a specific version of an application pod.
|
||||
|
||||
***Policy YAML***: [require_image_tag_not_latest.yaml](best_practices/require_image_tag_not_latest.yaml)
|
||||
|
||||
## Configure namespace limits and quotas
|
||||
|
||||
To limit the number of objects, as well as the total amount of compute that may be consumed by an application, it is important to create resource limits and quotas for each namespace.
|
||||
|
||||
***Policy YAML***: [require_namespace_quota.yaml](best_practices/require_namespace_quota.yaml)
|
||||
|
||||
**Additional Information**
|
||||
* [Resource Quota](https://kubernetes.io/docs/concepts/policy/resource-quotas/)
|
||||
|
||||
|
||||
## Require pod resource requests and limits
|
||||
|
||||
As application workloads share cluster resources, it is important to limit resources requested and consumed by each pod. It is recommended to require `resources.requests` and `resources.limits` per pod. If a namespace level request or limit is specified, defaults will automatically be applied to each pod based on the `LimitRange` configuration.
|
||||
|
||||
***Policy YAML***: [require_pod_requests_limits.yaml](best_practices/require_pod_requests_limits.yaml)
|
||||
|
||||
|
||||
## Require `livenessProbe` and `readinessProbe`
|
||||
|
||||
For each pod, a `livenessProbe` is carried out by the kubelet to determine when to restart a container. A `readinessProbe` is used by services and deployments to determine if the pod is ready to recieve network traffic.
|
||||
Both liveness and readiness probes need to be configured to manage the pod lifecycle during restarts and upgrades.
|
||||
|
||||
***Policy YAML***: [require_probes.yaml](best_practices/require_probes.yaml)
|
||||
|
||||
|
||||
## Read-only root filesystem
|
||||
|
||||
A read-only root file system helps to enforce an immutable infrastructure strategy; the container only needs to write on the mounted volume that persists the state. An immutable root filesystem can also prevent malicious binaries from writing to the host system.
|
||||
|
||||
***Policy YAML***: [require_readonly_rootfilesystem.yaml](best_practices/require_readonly_rootfilesystem.yaml)
|
||||
|
||||
|
||||
## Disallow unknown image registries
|
||||
|
||||
Images from unknown registries may not be scanned and secured. Requiring use of known registries helps reduce threat exposure. You can customize this policy to allow image registries that you trust.
|
||||
|
||||
***Policy YAML***: [trusted_image_registries.yaml](best_practices/trusted_image_registries.yaml)
|
||||
|
||||
|
||||
# More Policies
|
||||
|
||||
The policies listed here provide additional best practices that should be considered for production use. These policies may require workload specific configutration.
|
||||
|
||||
## Assign Linux capabilities inside Pod
|
||||
|
||||
Linux divides the privileges traditionally associated with superuser into distinct units, known as capabilities, which can be independently enabled or disabled by listing them in `securityContext.capabilites`.
|
||||
|
||||
***Policy YAML***: [policy_validate_container_capabilities.yaml](more/policy_validate_container_capabilities.yaml)
|
||||
|
||||
**Additional Information**
|
||||
* [List of linux capabilities](https://github.com/torvalds/linux/blob/master/include/uapi/linux/capability.h)
|
||||
|
||||
|
||||
## Check userID, groupIP & fsgroup used inside a Pod
|
||||
All processes inside the pod can be made to run with specific user and groupID by setting `runAsUser` and `runAsGroup` respectively. `fsGroup` can be specified to make sure any file created in the volume with have the specified groupID. These options can be used to validate the IDs used for user and group.
|
||||
|
||||
***Policy YAML***: [policy_validate_user_group_fsgroup_id.yaml](more/policy_validate_user_group_fsgroup_id.yaml)
|
||||
|
||||
|
||||
## Configure kernel parameters inside pod
|
||||
|
||||
The Sysctl interface allows to modify kernel parameters at runtime and in the pod can be specified under `securityContext.sysctls`. If kernel parameters in the pod are to be modified, should be handled cautiously, and policy with rules restricting these options will be helpful. We can control minimum and maximum port that a network connection can use as its source(local) port by checking net.ipv4.ip_local_port_range
|
||||
|
||||
***Policy YAML***: [policy_validate_sysctl_configs.yaml](more/policy_validate_sysctl_configs.yaml)
|
||||
|
||||
**Additional Information**
|
||||
* [List of supported namespaced sysctl interfaces](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/)
|
||||
|
||||
|
||||
## Check userID, groupIP & fsgroup used inside a Pod
|
||||
|
||||
All processes inside the pod can be made to run with specific user and groupID by setting `runAsUser` and `runAsGroup` respectively. `fsGroup` can be specified to make sure any file created in the volume with have the specified groupID. These options can be used to validate the IDs used for user and group.
|
||||
|
||||
***Policy YAML***: [policy_validate_user_group_fsgroup_id.yaml](more/policy_validate_user_group_fsgroup_id.yaml)
|
28
samples/best_practices/deny_runasrootuser.yaml
Normal file
28
samples/best_practices/deny_runasrootuser.yaml
Normal file
|
@ -0,0 +1,28 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-deny-runasrootuser
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security Context
|
||||
policies.kyverno.io/description: By default, processes in a container run as a
|
||||
root user (uid 0). To prevent potential compromise of container hosts, specify a
|
||||
least privileged user ID when building the container image and require that
|
||||
application containers run as non root users.
|
||||
spec:
|
||||
rules:
|
||||
- name: deny-runasrootuser
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Root user is not allowed. Set runAsNonRoot to true."
|
||||
anyPattern:
|
||||
- spec:
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
- spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
21
samples/best_practices/disallow_automountingapicred.yaml
Normal file
21
samples/best_practices/disallow_automountingapicred.yaml
Normal file
|
@ -0,0 +1,21 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-disallow-automoutingapicred
|
||||
annotations:
|
||||
policies.kyverno.io/category: API Server Access Control
|
||||
policies.kyverno.io/description: Kubernetes automounts default service account credentials in each pod.
|
||||
To restrict access, opt out of automounting credentials by setting 'automountServiceAccountToken' to 'false'.
|
||||
spec:
|
||||
rules:
|
||||
- name: disallow-automoutingapicred
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Deny automounting API credentials"
|
||||
pattern:
|
||||
spec:
|
||||
=(serviceAccountName): "*"
|
||||
automountServiceAccountToken: false
|
|
@ -2,6 +2,10 @@ apiVersion: kyverno.io/v1alpha1
|
|||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-namespace
|
||||
annotations:
|
||||
policies.kyverno.io/category: Workload Isolation
|
||||
policies.kyverno.io/description: With many users spread across multiple teams, restricting
|
||||
use of the default namespace and subdividing the cluster by namesoace isolates workloads.
|
||||
spec:
|
||||
rules:
|
||||
- name: check-default-namespace
|
||||
|
@ -10,7 +14,7 @@ spec:
|
|||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "A none 'default' namespace is required"
|
||||
message: "Using 'default' namespace is restricted"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
22
samples/best_practices/disallow_host_filesystem.yaml
Normal file
22
samples/best_practices/disallow_host_filesystem.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion: "kyverno.io/v1alpha1"
|
||||
kind: "ClusterPolicy"
|
||||
metadata:
|
||||
name: "deny-use-of-host-fs"
|
||||
annotations:
|
||||
policies.kyverno.io/category: Data Protection
|
||||
policies.kyverno.io/description: The volume of type 'hostpath' binds pods to a specific host,
|
||||
and data persisted in the volume is dependent on the life of the node. In a shared cluster,
|
||||
it is recommeded that applications are independent of hosts.
|
||||
spec:
|
||||
rules:
|
||||
- name: "deny-use-of-host-fs"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- "Pod"
|
||||
validate:
|
||||
message: "Host path is not allowed"
|
||||
pattern:
|
||||
spec:
|
||||
volumes:
|
||||
- X(hostPath): null
|
24
samples/best_practices/disallow_host_network_hostport.yaml
Normal file
24
samples/best_practices/disallow_host_network_hostport.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-host-network-hostport
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: Using 'hostPort' and 'hostNetwork' allows pods to share
|
||||
the host network stack, allowing potential snooping of network traffic from an application pod.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-host-network-hostport
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Defining hostNetwork and hostPort are not allowed."
|
||||
pattern:
|
||||
spec:
|
||||
(hostNetwork): false
|
||||
containers:
|
||||
- name: "*"
|
||||
ports:
|
||||
- hostPort: null
|
23
samples/best_practices/disallow_hostpid_hostipc.yaml
Normal file
23
samples/best_practices/disallow_hostpid_hostipc.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-hostpid-hostipc
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: Sharing the host's PID namespace allows visibility of process
|
||||
on the host, potentially exposing process information. Sharing the host's IPC namespace allows
|
||||
the container process to communicate with processes on the host. To avoid pod container from
|
||||
having visibility to host process space, validate that 'hostPID' and 'hostIPC' are set to 'false'.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-hostpid-hostipc
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Disallow use of host's pid namespace and host's ipc namespace"
|
||||
pattern:
|
||||
spec:
|
||||
(hostPID): "!true"
|
||||
hostIPC: false
|
23
samples/best_practices/disallow_node_port.yaml
Normal file
23
samples/best_practices/disallow_node_port.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-node-port
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security
|
||||
policies.kyverno.io/description: A Kubernetes service of type NodePort uses a
|
||||
host port to receive traffic from any source. A 'NetworkPolicy' resource cannot be used
|
||||
to control traffic to host ports. Although 'NodePort' services can be useful, their use
|
||||
must be limited to services with additional upstream security checks.
|
||||
spec:
|
||||
rules:
|
||||
- name: disallow-node-port
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Service
|
||||
validate:
|
||||
message: "Disallow service of type NodePort"
|
||||
pattern:
|
||||
spec:
|
||||
type: "!NodePort"
|
||||
|
|
@ -0,0 +1,32 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-deny-privileged-priviligedescalation
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security Context
|
||||
policies.kyverno.io/description: Privileged containers are defined as any container
|
||||
where the container uid 0 is mapped to the host’s uid 0. A process within privileged
|
||||
containers can get unrestricted host access. With 'securityContext.allowPrivilegeEscalation'
|
||||
enabled a process can gain privileges from its parent. To disallow privileged containers
|
||||
and the escalation of privileges it is recommended to run pod containers with
|
||||
'securityContext.priveleged' as 'false' and 'allowPrivilegeEscalation' as 'false'.
|
||||
spec:
|
||||
rules:
|
||||
- name: deny-privileged-priviligedescalation
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false"
|
||||
anyPattern:
|
||||
- spec:
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
||||
- spec:
|
||||
containers:
|
||||
- name: "*"
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
privileged: false
|
|
@ -3,13 +3,8 @@ kind: ClusterPolicy
|
|||
metadata:
|
||||
name: validate-deny-runasrootuser
|
||||
spec:
|
||||
validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: deny-runasrootuser
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
27
samples/best_practices/require_default_network_policy.yaml
Normal file
27
samples/best_practices/require_default_network_policy.yaml
Normal file
|
@ -0,0 +1,27 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: default-deny-ingress-networkpolicy
|
||||
annotations:
|
||||
policies.kyverno.io/category: NetworkPolicy
|
||||
policies.kyverno.io/description: By default, Kubernetes allows all ingress and egress traffic
|
||||
to and from pods within a cluster. A "default" NetworkPolicy resource for a namespace should
|
||||
be used to deny all ingress traffic to the pods in that namespace. Additional NetworkPolicy
|
||||
resources can then be configured to allow desired traffic to application pods.
|
||||
spec:
|
||||
rules:
|
||||
- name: "default-deny-ingress"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
name: "*"
|
||||
generate:
|
||||
kind: NetworkPolicy
|
||||
name: default-deny-ingress
|
||||
data:
|
||||
spec:
|
||||
# select all pods in the namespace
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Ingress
|
33
samples/best_practices/require_image_tag_not_latest.yaml
Normal file
33
samples/best_practices/require_image_tag_not_latest.yaml
Normal file
|
@ -0,0 +1,33 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-image-tag
|
||||
annotations:
|
||||
policies.kyverno.io/category: Image
|
||||
policies.kyverno.io/description: The ':latest' tag is mutable and can lead to
|
||||
unexpected errors if the image changes. A best practice is to use an immutable
|
||||
tag that maps to a specific version of an application pod.
|
||||
spec:
|
||||
rules:
|
||||
- name: image-tag-notspecified
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Image tag not specified"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "*:*"
|
||||
- name: image-tag-not-latest
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Using 'latest' image tag is restricted. Set image tag to a specific version"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "!*:latest"
|
|
@ -2,6 +2,11 @@ apiVersion: kyverno.io/v1alpha1
|
|||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-namespace-quota
|
||||
annotations:
|
||||
policies.kyverno.io/category: Resource Quota
|
||||
policies.kyverno.io/description: To limit the number of objects, as well as the
|
||||
total amount of compute that may be consumed by an application, it is important
|
||||
to create resource limits and quotas for each namespace.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-namespace-quota
|
30
samples/best_practices/require_pod_requests_limits.yaml
Normal file
30
samples/best_practices/require_pod_requests_limits.yaml
Normal file
|
@ -0,0 +1,30 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: check-resource
|
||||
annotations:
|
||||
policies.kyverno.io/category: Resource Quota
|
||||
policies.kyverno.io/description: As application workloads share cluster resources, it is important
|
||||
to limit resources requested and consumed by each pod. It is recommended to require
|
||||
'resources.requests' and 'resources.limits' per pod. If a namespace level request or limit is
|
||||
specified, defaults will automatically be applied to each pod based on the 'LimitRange' configuration.
|
||||
spec:
|
||||
validationFailureAction: "audit"
|
||||
rules:
|
||||
- name: check-resource-request-limit
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "CPU and memory resource requests and limits are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- resources:
|
||||
requests:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
||||
limits:
|
||||
memory: "?*"
|
||||
cpu: "?*"
|
27
samples/best_practices/require_probes.yaml
Normal file
27
samples/best_practices/require_probes.yaml
Normal file
|
@ -0,0 +1,27 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-probes
|
||||
annotations:
|
||||
policies.kyverno.io/category: Health Check
|
||||
policies.kyverno.io/description: For each pod, a 'livenessProbe' is carried out by the kubelet to
|
||||
determine when to restart a container. A 'readinessProbe' is used by services and deployments to
|
||||
determine if the pod is ready to recieve network traffic. Both liveness and readiness probes
|
||||
need to be configured to manage the pod lifecycle during restarts and upgrades.
|
||||
spec:
|
||||
rules:
|
||||
- name: check-probes
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Liveness and readiness probes are required"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- livenessProbe:
|
||||
periodSeconds: ">0"
|
||||
readinessProbe:
|
||||
periodSeconds: ">0"
|
||||
|
24
samples/best_practices/require_readonly_rootfilesystem.yaml
Normal file
24
samples/best_practices/require_readonly_rootfilesystem.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-readonly-rootfilesystem
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security Context
|
||||
policies.kyverno.io/description: A read-only root file system helps to enforce an immutable
|
||||
infrastructure strategy; the container only needs to write on the mounted volume that p
|
||||
ersists the state. An immutable root filesystem can also prevent malicious binaries from
|
||||
writing to the host system.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-readonly-rootfilesystem
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Container require read-only rootfilesystem"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
readOnlyRootFilesystem: true
|
22
samples/best_practices/trusted_image_registries.yaml
Normal file
22
samples/best_practices/trusted_image_registries.yaml
Normal file
|
@ -0,0 +1,22 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: trusted-registries
|
||||
annotations:
|
||||
policies.kyverno.io/category: Image
|
||||
policies.kyverno.io/description: Images from unknown registries may not be scanned and secured.
|
||||
Requiring use of known registries helps reduce threat exposure. You can customize this policy
|
||||
to allow image registries that you trust.
|
||||
spec:
|
||||
rules:
|
||||
- name: trusted-registries
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Deny untrusted registries"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- image: "k8s.gcr.io/* | gcr.io/*"
|
24
samples/more/policy_validate_container_capabilities.yaml
Normal file
24
samples/more/policy_validate_container_capabilities.yaml
Normal file
|
@ -0,0 +1,24 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-container-capablities
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security Context
|
||||
policies.kyverno.io/description: Linux divides the privileges traditionally associated with
|
||||
superuser into distinct units, known as capabilities, which can be independently enabled
|
||||
or disabled by listing them in 'securityContext.capabilites'.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-container-capablities
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Allow certain linux capability"
|
||||
pattern:
|
||||
spec:
|
||||
containers:
|
||||
- securityContext:
|
||||
capabilities:
|
||||
add: ["NET_ADMIN"]
|
26
samples/more/policy_validate_sysctl_configs.yaml
Normal file
26
samples/more/policy_validate_sysctl_configs.yaml
Normal file
|
@ -0,0 +1,26 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-allow-portrange-with-sysctl
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security Context
|
||||
policies.kyverno.io/description: The Sysctl interface allows to modify kernel parameters at
|
||||
runtime and in the pod can be specified under 'securityContext.sysctls'. If kernel parameters
|
||||
in the pod are to be modified, should be handled cautiously, and policy with rules restricting
|
||||
these options will be helpful. We can control minimum and maximum port that a network connection
|
||||
can use as its source(local) port by checking 'net.ipv4.ip_local_port_range'.
|
||||
spec:
|
||||
rules:
|
||||
- name: allow-portrange-with-sysctl
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Allowed port range is from 1024 to 65535"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
sysctls:
|
||||
- name: net.ipv4.ip_local_port_range
|
||||
value: "1024 65535"
|
48
samples/more/policy_validate_user_group_fsgroup_id.yaml
Normal file
48
samples/more/policy_validate_user_group_fsgroup_id.yaml
Normal file
|
@ -0,0 +1,48 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: validate-userid-groupid-fsgroup
|
||||
annotations:
|
||||
policies.kyverno.io/category: Security Context
|
||||
policies.kyverno.io/description: All processes inside the pod can be made to run with specific user
|
||||
and groupID by setting 'runAsUser' and 'runAsGroup' respectively. 'fsGroup' can be specified
|
||||
to make sure any file created in the volume with have the specified groupID. These options can be
|
||||
used to validate the IDs used for user and group.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-userid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "User ID should be 1000"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsUser: '1000'
|
||||
- name: validate-groupid
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "Group ID should be 3000"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
runAsGroup: '3000'
|
||||
- name: validate-fsgroup
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
message: "fsgroup should be 2000"
|
||||
pattern:
|
||||
spec:
|
||||
securityContext:
|
||||
fsGroup: '2000'
|
||||
# Alls processes inside the pod can be made to run with specific user and groupID by setting runAsUser and runAsGroup respectively.
|
||||
# fsGroup can be specified to make sure any file created in the volume with have the specified groupID.
|
||||
# The above parameters can also be used in a validate policy to restrict user & group IDs.
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: game-config
|
||||
namespace: default
|
||||
labels:
|
||||
originalLabel : isHere
|
||||
data:
|
||||
ui.properties : |
|
||||
color.good=green
|
||||
color.bad=red
|
||||
|
||||
game.properties : |
|
||||
enemies=predators
|
||||
lives=3
|
||||
|
||||
configmap.data: |
|
||||
ns=default
|
||||
labels=originalLabel
|
||||
labelscount=1
|
|
@ -1,7 +0,0 @@
|
|||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: "ns2"
|
||||
labels:
|
||||
LabelForSelector : "namespace2"
|
||||
|
|
@ -1,20 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata :
|
||||
name: "policy-configmapgenerator-test"
|
||||
spec:
|
||||
rules:
|
||||
- name: "copyCM"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
LabelForSelector : "namespace2"
|
||||
generate :
|
||||
kind: ConfigMap
|
||||
name : copied-cm
|
||||
clone:
|
||||
namespace : default
|
||||
name : game-config
|
|
@ -1,93 +0,0 @@
|
|||
# This is a test-policy with patch, configMapGenerator with and without "copyFrom" option,
|
||||
# secretGenerator with and without "copyFrom" option.
|
||||
# To apply this policy you need to create secret and configMap in "default" namespace
|
||||
# and then create a namespace
|
||||
|
||||
apiVersion : kyverno.io/v1alpha1
|
||||
kind : ClusterPolicy
|
||||
metadata :
|
||||
name : "policy-ns-patch-cmg-sg"
|
||||
spec :
|
||||
rules:
|
||||
- name: "patchNamespace2"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
LabelForSelector : "namespace2"
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/metadata/labels/isMutatedByPolicy"
|
||||
op: add
|
||||
value: "true"
|
||||
|
||||
- name: "copyCM"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
LabelForSelector : "namespace2"
|
||||
generate :
|
||||
kind: ConfigMap
|
||||
name : copied-cm
|
||||
clone:
|
||||
namespace : default
|
||||
name : game-config
|
||||
|
||||
- name: "generateCM"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
LabelForSelector : "namespace2"
|
||||
generate :
|
||||
kind: ConfigMap
|
||||
name : generated-cm
|
||||
data :
|
||||
data:
|
||||
secretData: "very sensitive data from cmg"
|
||||
database: mongodb
|
||||
database_uri: mongodb://localhost:27017
|
||||
keys: |
|
||||
image.public.key=771
|
||||
rsa.public.key=42
|
||||
|
||||
- name: "generateSecret"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
name: ns2
|
||||
generate :
|
||||
kind: Secret
|
||||
name : generated-secrets
|
||||
data :
|
||||
foo : bar
|
||||
app.properties : /
|
||||
foo1=bar1
|
||||
foo2=bar2
|
||||
ui.properties : /
|
||||
foo1=bar1
|
||||
foo2=bar2
|
||||
|
||||
- name: "copySecret"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
name: ns2
|
||||
generate :
|
||||
kind: Secret
|
||||
name : copied-secrets
|
||||
copyFrom :
|
||||
namespace : default
|
||||
name : mysecret
|
||||
data :
|
||||
foo : bar
|
||||
secretData: "data from sg"
|
|
@ -1,10 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: mysecret
|
||||
labels:
|
||||
originalLabel : isHere
|
||||
type: Opaque
|
||||
data:
|
||||
username: dXNlcg==
|
||||
password: cGFzc3dvcmQ=
|
|
@ -1,62 +0,0 @@
|
|||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: hello
|
||||
labels :
|
||||
label : "original"
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: busybox
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- date; echo Hello from the Kubernetes cluster
|
||||
restartPolicy: OnFailure
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: hellow
|
||||
labels :
|
||||
label : "original"
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: 12hello
|
||||
image: busybox
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- date; echo Hello from the Kubernetes cluster
|
||||
restartPolicy: OnFailure
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: hello23
|
||||
labels:
|
||||
label: "original"
|
||||
spec:
|
||||
schedule: "*/1 * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: hel32lo
|
||||
image: busybox
|
||||
args:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- date; echo Hello from the Kubernetes cluster
|
||||
restartPolicy: OnFailure
|
|
@ -1,39 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: policy-cronjob
|
||||
spec:
|
||||
rules:
|
||||
- name: pCJ
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- CronJob
|
||||
name: "?ell*"
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/metadata/labels/isMutated"
|
||||
op: add
|
||||
value: "true"
|
||||
- path : "/spec/schedule"
|
||||
op : replace
|
||||
value : "* */1 * * *"
|
||||
- path: "/metadata/labels/label"
|
||||
op: add
|
||||
value: "not_original"
|
||||
- path: "/metadata/labels/label234e3"
|
||||
op: remove
|
||||
validate:
|
||||
message: "This resource is broken"
|
||||
pattern:
|
||||
metadata:
|
||||
labels:
|
||||
label: "not_original"
|
||||
spec:
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: "h*"
|
||||
image: busybox
|
|
@ -1,43 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: fluentd-elasticsearch
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: fluentd-logging
|
||||
originalLabel : isHere
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
name: fluentd-elasticsearch
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: fluentd-elasticsearch
|
||||
spec:
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
containers:
|
||||
- name: fluentd-elasticsearch
|
||||
image: k8s.gcr.io/fluentd-elasticsearch:1.20
|
||||
resources:
|
||||
limits:
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
volumeMounts:
|
||||
- name: varlog
|
||||
mountPath: /var/log
|
||||
- name: varlibdockercontainers
|
||||
mountPath: /var/lib/docker/containers
|
||||
readOnly: true
|
||||
terminationGracePeriodSeconds: 30
|
||||
volumes:
|
||||
- name: varlog
|
||||
hostPath:
|
||||
path: /var/log
|
||||
- name: varlibdockercontainers
|
||||
hostPath:
|
||||
path: /var/lib/docker/containers
|
|
@ -1,29 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: policy-daemonset
|
||||
spec:
|
||||
rules:
|
||||
- name: "Patch and Volume validation"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
name: fluentd-elasticsearch
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/metadata/labels/isMutated"
|
||||
op: add
|
||||
value: "true"
|
||||
- path: "/metadata/labels/originalLabel"
|
||||
op: remove
|
||||
validate:
|
||||
message: "This daemonset is broken"
|
||||
pattern:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
volumeMounts:
|
||||
- name: varlibdockercontainers
|
||||
readOnly: false
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.7.9
|
||||
ports:
|
||||
- containerPort: 80
|
|
@ -1,25 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind : ClusterPolicy
|
||||
metadata :
|
||||
name : policy-deployment
|
||||
spec :
|
||||
rules:
|
||||
- name: "First policy v2"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Deployment
|
||||
mutate:
|
||||
patches:
|
||||
- path: /metadata/labels/isMutated
|
||||
op: add
|
||||
value: "true"
|
||||
- path: /metadata/labels/app
|
||||
op: replace
|
||||
value: "nginx_is_mutated"
|
||||
validate:
|
||||
message: "Because I like only mutated resources"
|
||||
pattern:
|
||||
metadata:
|
||||
labels:
|
||||
app: "*mutated"
|
|
@ -1,13 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: test-endpoint
|
||||
labels:
|
||||
label : test
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: 192.168.10.171
|
||||
ports:
|
||||
- name: secure-connection
|
||||
port: 443
|
||||
protocol: TCP
|
|
@ -1,34 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind : ClusterPolicy
|
||||
metadata :
|
||||
name : policy-endpoints
|
||||
spec :
|
||||
rules:
|
||||
- name: pEP
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Endpoints
|
||||
selector:
|
||||
matchLabels:
|
||||
label : test
|
||||
mutate:
|
||||
patches:
|
||||
- path : "/subsets/0/ports/0/port"
|
||||
op : replace
|
||||
value: 9663
|
||||
- path : "/subsets/0"
|
||||
op: add
|
||||
value:
|
||||
addresses:
|
||||
- ip: "192.168.10.172"
|
||||
ports:
|
||||
- name: load-balancer-connection
|
||||
port: 80
|
||||
protocol: UDP
|
||||
validate:
|
||||
message: "This resource has wrong IP"
|
||||
pattern:
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: "192.168.10.171|192.168.10.172"
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: autoscaling/v2beta1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: wildfly-example
|
||||
labels:
|
||||
originalLabel: isHere
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
name: wildfly-example
|
||||
minReplicas: 1
|
||||
maxReplicas: 5
|
||||
metrics:
|
||||
- type: Resource
|
||||
resource:
|
||||
name: cpu
|
||||
targetAverageUtilization: 80
|
||||
- type: Resource
|
||||
resource:
|
||||
name: memory
|
||||
targetAverageValue: 1000Mi
|
|
@ -1,32 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: policy-hpa
|
||||
spec :
|
||||
rules:
|
||||
- name: hpa1
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- HorizontalPodAutoscaler
|
||||
selector:
|
||||
matchLabels:
|
||||
originalLabel: isHere
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/metadata/labels/isMutated"
|
||||
op: add
|
||||
value: "true"
|
||||
- op: replace
|
||||
path: "/spec/metrics/1/resource/targetAverageValue"
|
||||
value: "959Mi"
|
||||
validate:
|
||||
message: "There is wrong resorce request or apiVersion"
|
||||
pattern:
|
||||
spec:
|
||||
scaleTargetRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
# metrics:
|
||||
# - type: Resource
|
||||
# resource:
|
||||
# name: cpu|memory
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: test-ingress
|
||||
labels:
|
||||
originalLabel: isHere
|
||||
annotations:
|
||||
nginx.ingress.kubernetes.io/rewrite-target: /
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: /testpath
|
||||
backend:
|
||||
serviceName: testprod
|
||||
servicePort: 80
|
|
@ -1,32 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata :
|
||||
name : policy-ingress
|
||||
spec :
|
||||
rules:
|
||||
- name: ingress1
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Ingress
|
||||
selector:
|
||||
matchLabels:
|
||||
originalLabel: isHere
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/metadata/labels/isMutated"
|
||||
op: add
|
||||
value: "true"
|
||||
- path : "/spec/rules/0/http/paths/0/path"
|
||||
op : replace
|
||||
value: "/mutatedpath"
|
||||
validate:
|
||||
message: "Ingress allowed only for prod services"
|
||||
pattern:
|
||||
spec:
|
||||
rules:
|
||||
- http:
|
||||
paths:
|
||||
- path: "*"
|
||||
backend:
|
||||
serviceName: "*prod"
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
name: pi
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: piv0
|
||||
image: perl
|
||||
command: ["perl"]
|
||||
ports:
|
||||
- containerPort: 90
|
||||
protocol: TCP
|
||||
- name: pi
|
||||
image: perl
|
||||
command: ["perl"]
|
||||
- name: piv1
|
||||
image: perl
|
||||
command: ["perl"]
|
||||
restartPolicy: Never
|
||||
backoffLimit: 4
|
|
@ -1,55 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: policy-job-perl-bigint
|
||||
spec :
|
||||
rules:
|
||||
- name: job2
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Job
|
||||
name: pi
|
||||
mutate:
|
||||
overlay:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- (name): piv0
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
- name: job1
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Job
|
||||
name: pi
|
||||
mutate:
|
||||
overlay:
|
||||
metadata:
|
||||
labels:
|
||||
isOverlayed: "true"
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: "pi1"
|
||||
image: "vasylev.perl"
|
||||
- name: "pi2"
|
||||
image: "maxov.perl"
|
||||
patches:
|
||||
- path : "/spec/template/spec/containers/0/command"
|
||||
op : add
|
||||
value: [ "-Mbignum=bpi", "-wle", "print bpi(2000)" ]
|
||||
- path : "/spec/backoffLimit"
|
||||
op: add
|
||||
value: 10
|
||||
validate:
|
||||
message: "This job should not be restarted"
|
||||
pattern:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
|
@ -1,14 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: LimitRange
|
||||
metadata:
|
||||
name: test-mem-limit-range
|
||||
labels:
|
||||
containerSize: minimal
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
memory: 512Mi
|
||||
cpu: 10m
|
||||
defaultRequest:
|
||||
memory: 256Mi
|
||||
type: Container
|
|
@ -1,26 +0,0 @@
|
|||
apiVersion : kyverno.io/v1alpha1
|
||||
kind : ClusterPolicy
|
||||
metadata :
|
||||
name : policy-limitrange
|
||||
spec :
|
||||
rules:
|
||||
- name: "rule"
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- LimitRange
|
||||
selector:
|
||||
matchLabels:
|
||||
containerSize: minimal
|
||||
mutate:
|
||||
patches:
|
||||
- path : "/spec/limits/0/default/memory"
|
||||
op : add
|
||||
value: 384Mi
|
||||
validate:
|
||||
message: "The CPU value is incorrect"
|
||||
pattern:
|
||||
spec:
|
||||
limits:
|
||||
- default:
|
||||
cpu: 10m
|
|
@ -1,7 +0,0 @@
|
|||
kind: Namespace
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: namespace-not-modified
|
||||
labels:
|
||||
LabelForSelector : "namespace"
|
||||
replaced : "no"
|
|
@ -1,23 +0,0 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterPolicy
|
||||
metadata :
|
||||
name : policy-namespace
|
||||
|
||||
spec :
|
||||
rules:
|
||||
- name: ns1
|
||||
match:
|
||||
resources:
|
||||
kinds :
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
LabelForSelector : "namespace"
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/metadata/labels/replaced"
|
||||
op: add
|
||||
value: "yes"
|
||||
- path: "/metadata/name"
|
||||
op: replace
|
||||
value: "modified-namespace"
|
|
@ -1,36 +0,0 @@
|
|||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: test-network-policy
|
||||
labels:
|
||||
originalLabel : isHere
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector:
|
||||
matchLabels:
|
||||
role: db
|
||||
policyTypes:
|
||||
- Ingress
|
||||
- Egress
|
||||
ingress:
|
||||
- from:
|
||||
- ipBlock:
|
||||
cidr: 172.17.0.0/16
|
||||
except:
|
||||
- 172.17.129.0/24
|
||||
- namespaceSelector:
|
||||
matchLabels:
|
||||
project: myproject
|
||||
- podSelector:
|
||||
matchLabels:
|
||||
role: frontend
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 6379
|
||||
egress:
|
||||
- to:
|
||||
- ipBlock:
|
||||
cidr: 10.0.0.0/24
|
||||
ports:
|
||||
- protocol: TCP
|
||||
port: 5978
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue