1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-09 10:42:22 +00:00

Merge branch 'main' into schema_gen

# Conflicts:
#	pkg/generate/cleanup/controller.go
#	pkg/generate/controller.go
This commit is contained in:
Shuting Zhao 2020-11-13 17:46:34 -08:00
commit 781fbfb82c
17 changed files with 238 additions and 45 deletions

11
OWNERS.md Normal file
View file

@ -0,0 +1,11 @@
approvers:
- JimBugwadia
- realshuting
- NoSkillGirl
- evalsocket
reviewers:
- JimBugwadia
- realshuting
- NoSkillGirl
- evalsocket

3
go.sum
View file

@ -341,6 +341,7 @@ github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSN
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@ -1114,6 +1115,7 @@ k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
mvdan.cc/interfacer v0.0.0-20180901003855-c20040233aed/go.mod h1:Xkxe497xwlCKkIaQYRfC7CSLworTXY9RMqwhhCm+8Nc=
mvdan.cc/lint v0.0.0-20170908181259-adc824a0674b/go.mod h1:2odslEg/xrtNQqCYg2/jCoyKnw3vv5biOc3JnIcYfL4=
@ -1137,6 +1139,7 @@ sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1
sigs.k8s.io/structured-merge-diff v1.0.1 h1:LOs1LZWMsz1xs77Phr/pkB4LFaavH7IVq/3+WTN9XTA=
sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=

View file

@ -51,7 +51,7 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
// add configmap json data to context
if err := AddResourceToContext(log, rule.Context, resCache, jsonContext); err != nil {
log.Info("cannot add configmaps to context", "reason", err.Error())
log.V(4).Info("cannot add configmaps to context", "reason", err.Error())
return nil
}

View file

@ -160,7 +160,7 @@ func Test_variableSubstitutionPathNotExist(t *testing.T) {
Context: ctx,
NewResource: *resourceUnstructured}
er := Mutate(policyContext)
expectedErrorStr := "could not find variable request.object.metadata.name1 at path /spec/name"
expectedErrorStr := "variable request.object.metadata.name1 not found (path: /spec/name)"
t.Log(er.PolicyResponse.Rules[0].Message)
assert.Equal(t, er.PolicyResponse.Rules[0].Message, expectedErrorStr)
}

View file

@ -1330,7 +1330,7 @@ func Test_VariableSubstitutionPathNotExistInPattern(t *testing.T) {
NewResource: *resourceUnstructured}
er := Validate(policyContext)
assert.Assert(t, !er.PolicyResponse.Rules[0].Success)
assert.Equal(t, er.PolicyResponse.Rules[0].Message, "Validation error: ; Validation rule 'test-path-not-exist' failed. 'could not find variable request.object.metadata.name1 at path /spec/containers/0/name'")
assert.Equal(t, er.PolicyResponse.Rules[0].Message, "Validation error: ; Validation rule 'test-path-not-exist' failed. 'variable request.object.metadata.name1 not found (path: /spec/containers/0/name)'")
}
func Test_VariableSubstitutionPathNotExistInAnyPattern_OnePatternStatisfies(t *testing.T) {
@ -1512,7 +1512,7 @@ func Test_VariableSubstitutionPathNotExistInAnyPattern_AllPathNotPresent(t *test
NewResource: *resourceUnstructured}
er := Validate(policyContext)
assert.Assert(t, !er.PolicyResponse.Rules[0].Success)
assert.Equal(t, er.PolicyResponse.Rules[0].Message, "Substitutions failed: [could not find variable request.object.metadata.name1 at path /spec/template/spec/containers/0/name could not find variable request.object.metadata.name2 at path /spec/template/spec/containers/0/name]")
assert.Equal(t, er.PolicyResponse.Rules[0].Message, "Substitutions failed: [variable request.object.metadata.name1 not found (path: /spec/template/spec/containers/0/name) variable request.object.metadata.name2 not found (path: /spec/template/spec/containers/0/name)]")
}
func Test_VariableSubstitutionPathNotExistInAnyPattern_AllPathPresent_NonePatternSatisfy(t *testing.T) {

View file

@ -76,7 +76,7 @@ type NotFoundVariableErr struct {
}
func (n NotFoundVariableErr) Error() string {
return fmt.Sprintf("could not find variable %v at path %v", n.variable, n.path)
return fmt.Sprintf("variable %v not found (path: %v)", n.variable, n.path)
}
// subValR resolves the variables if defined

View file

@ -268,7 +268,7 @@ func (c *Controller) syncGenerateRequest(key string) error {
startTime := time.Now()
logger.V(4).Info("started syncing generate request", "startTime", startTime)
defer func() {
logger.V(4).Info("finished syncying generate request", "processingTIme", time.Since(startTime).String())
logger.V(4).Info("finished syncing generate request", "processingTIme", time.Since(startTime).String())
}()
_, grName, err := cache.SplitMetaNamespaceKey(key)
if errors.IsNotFound(err) {

View file

@ -30,7 +30,7 @@ func (c *Controller) processGR(gr *kyverno.GenerateRequest) error {
resource, err = getResource(c.client, gr.Spec.Resource)
if err != nil {
// Dont update status
logger.Error(err, "resource does not exist or is yet to be created, requeueing")
logger.V(3).Info("resource does not exist or is pending creation, re-queueing", "details", err.Error())
return err
}
@ -58,7 +58,8 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
for _, e := range gr.Status.GeneratedResources {
resp, err := c.client.GetResource(e.APIVersion, e.Kind, e.Namespace, e.Name)
if err != nil {
logger.Error(err, "Generated resource failed to get", "Resource", e.Name)
logger.Error(err, "failed to find generated resource", "name", e.Name)
continue
}
labels := resp.GetLabels()
@ -68,9 +69,11 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
}
}
}
return nil, nil
}
logger.Error(err, "error in getting policy")
logger.Error(err, "error in fetching policy")
return nil, err
}
@ -117,8 +120,10 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
if !r.Success {
grList, err := c.kyvernoClient.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).List(contextdefault.TODO(), metav1.ListOptions{})
if err != nil {
logger.Error(err, "failed to list generate requests")
continue
}
for _, v := range grList.Items {
if engineResponse.PolicyResponse.Policy == v.Spec.Policy && engineResponse.PolicyResponse.Resource.Name == v.Spec.Resource.Name && engineResponse.PolicyResponse.Resource.Kind == v.Spec.Resource.Kind && engineResponse.PolicyResponse.Resource.Namespace == v.Spec.Resource.Namespace {
err := c.kyvernoClient.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Delete(contextdefault.TODO(), v.GetName(), metav1.DeleteOptions{})
@ -161,8 +166,8 @@ func (c *Controller) applyGeneratePolicy(log logr.Logger, policyContext engine.P
if !rule.HasGenerate() {
continue
}
startTime := time.Now()
startTime := time.Now()
processExisting := false
if len(rule.MatchResources.Kinds) > 0 {
@ -183,6 +188,8 @@ func (c *Controller) applyGeneratePolicy(log logr.Logger, policyContext engine.P
genResource, err := applyRule(log, c.client, rule, resource, ctx, policy.Name, gr, processExisting)
if err != nil {
log.Error(err, "failed to apply generate rule", "policy", policy.Name,
"rule", rule.Name, "resource", resource.GetName())
return nil, err
}

View file

@ -65,7 +65,7 @@ func (ws *WebhookServer) HandleMutation(
err := ws.openAPIController.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetKind())
if err != nil {
logger.Error(err, "validation error", "policy", policy.Name)
logger.V(4).Info("validation error", "policy", policy.Name, "error", err)
continue
}

View file

@ -22,11 +22,12 @@ spec:
- Namespace
name: "*"
exclude:
namespaces:
- "kube-system"
- "default"
- "kube-public"
- "kyverno"
resources:
namespaces:
- "kube-system"
- "default"
- "kube-public"
- "kyverno"
generate:
kind: NetworkPolicy
name: default-deny-ingress

View file

@ -1,6 +1,6 @@
# Sample Policies
Sample policies are designed to be applied to your Kubernetes clusters with minimal changes.
Sample policies are designed to be applied to your Kubernetes clusters with minimal changes.
The policies are mostly validation rules in `audit` mode i.e. your existing workloads will not be impacted, but will be audited for policy complaince.
@ -9,45 +9,49 @@ The policies are mostly validation rules in `audit` mode i.e. your existing work
These policies are highly recommended.
1. [Disallow root user](DisallowRootUser.md)
2. [Disallow privileged containers](DisallowPrivilegedContainers.md)
3. [Disallow new capabilities](DisallowNewCapabilities.md)
4. [Disallow kernel parameter changes](DisallowSysctls.md)
5. [Disallow use of bind mounts (`hostPath` volumes)](DisallowBindMounts.md)
6. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
7. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
8. [Disallow `hostPID` and `hostIPC`](DisallowHostPIDIPC.md)
9. [Disallow use of default namespace](DisallowDefaultNamespace.md)
10. [Disallow latest image tag](DisallowLatestTag.md)
11. [Disallow Helm Tiller](DisallowHelmTiller.md)
12. [Require read-only root filesystem](RequireReadOnlyRootFS.md)
13. [Require pod resource requests and limits](RequirePodRequestsLimits.md)
14. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
15. [Add default network policy](AddDefaultNetworkPolicy.md)
16. [Add namespace quotas](AddNamespaceQuotas.md)
17. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](AddSafeToEvict.md)
1. [Disallow privileged containers](DisallowPrivilegedContainers.md)
1. [Disallow new capabilities](DisallowNewCapabilities.md)
1. [Disallow kernel parameter changes](DisallowSysctls.md)
1. [Disallow use of bind mounts (`hostPath` volumes)](DisallowBindMounts.md)
1. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
1. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
1. [Disallow `hostPID` and `hostIPC`](DisallowHostPIDIPC.md)
1. [Disallow use of default namespace](DisallowDefaultNamespace.md)
1. [Disallow latest image tag](DisallowLatestTag.md)
1. [Disallow Helm Tiller](DisallowHelmTiller.md)
1. [Require read-only root filesystem](RequireReadOnlyRootFS.md)
1. [Require pod resource requests and limits](RequirePodRequestsLimits.md)
1. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
1. [Add default network policy](AddDefaultNetworkPolicy.md)
1. [Add namespace quotas](AddNamespaceQuotas.md)
1. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](AddSafeToEvict.md)
## Additional Policies
These policies provide additional best practices and are worthy of close consideration. These policies may require specific changes for your workloads and environments.
These policies provide additional best practices and are worthy of close consideration. These policies may require specific changes for your workloads and environments.
17. [Restrict image registries](RestrictImageRegistries.md)
18. [Restrict `NodePort` services](RestrictNodePort.md)
19. [Restrict auto-mount of service account credentials](RestrictAutomountSAToken.md)
20. [Restrict ingress classes](RestrictIngressClasses.md)
21. [Restrict User Group](CheckUserGroup.md)
1. [Restrict image registries](RestrictImageRegistries.md)
1. [Restrict `NodePort` services](RestrictNodePort.md)
1. [Restrict auto-mount of service account credentials](RestrictAutomountSAToken.md)
1. [Restrict ingress classes](RestrictIngressClasses.md)
1. [Restrict User Group](CheckUserGroup.md)
1. [Require pods are labeled](RequireLabels.md)
1. [Require pods have certain labels](RequireCertainLabels.md)
1. [Require Deployments have multiple replicas](RequireDeploymentsHaveReplicas.md)
## Applying the sample policies
To apply these policies to your cluster, install Kyverno and import the policies as follows:
**Install Kyverno**
### Install Kyverno**
````sh
kubectl create -f https://github.com/kyverno/kyverno/raw/master/definitions/install.yaml
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/definitions/release/install.yaml
````
<small>[(installation docs)](../documentation/installation.md)</small>
**Apply Kyverno Policies**
### Apply Kyverno Policies**
To start applying policies to your cluster, first clone the repo:
@ -56,15 +60,14 @@ git clone https://github.com/kyverno/kyverno.git
cd kyverno
````
Import best_practices from [here](best_pratices):
Import best practices from [here](best_pratices):
````bash
kubectl create -f samples/best_practices
````
Import addition policies from [here](more):
Import additional policies from [here](more):
````bash
kubectl create -f samples/more/
````

View file

@ -0,0 +1,31 @@
# Require certain labels
In many cases, you may require that at least a certain number of labels are assigned to each Pod from a select list of approved labels. This sample policy demonstrates the [`anyPattern`](https://kyverno.io/docs/writing-policies/validate/#anypattern---logical-or-across-multiple-validation-patterns) option in a policy by requiring any of the two possible labels defined within. A pod must either have the label `app.kubernetes.io/name` or `app.kubernetes.io/component` defined.
## Policy YAML
[require_certain_labels.yaml](best_practices/require_certain_labels.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-certain-labels
spec:
validationFailureAction: audit
rules:
- name: validate-certain-labels
match:
resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` or `app.kubernetes.io/component` is required."
anyPattern:
- metadata:
labels:
app.kubernetes.io/name: "?*"
- metadata:
labels:
app.kubernetes.io/component: "?*"
```

View file

@ -0,0 +1,40 @@
# Require deployments have multiple replicas
Deployments with only a single replica produce availability concerns should that single replica fail. In most cases, you would want Deployment objects to have more than one replica to ensure continued availability if not scale.
This sample policy requires that Deployments have more than one replica excluding a list of system namespaces.
## More Information
* [Kubernetes Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
## Policy YAML
[require_deployments_have_multiple_replicas.yaml](more/require_deployments_have_multiple_replicas.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deployment-has-multiple-replicas
spec:
validationFailureAction: audit
rules:
- name: deployment-has-multiple-replicas
match:
resources:
kinds:
- Deployment
exclude:
resources:
namespaces:
- kyverno
- kube-system
- kube-node-lease
- kube-public
validate:
message: "Deployments must have more than one replica to ensure availability."
pattern:
spec:
replicas: ">1"
```

34
samples/RequireLabels.md Normal file
View file

@ -0,0 +1,34 @@
# Require labels
Labels are a fundamental and important way to assign descriptive metadata to Kubernetes resources, especially Pods. Labels are especially important as the number of applications grow and are composed in different ways.
This sample policy requires that the label `app.kubernetes.io/name` be defined on all Pods. If you wish to require that all Pods have multiple labels defined (as opposed to [any labels from an approved list](RequireCertainLabels.md)), this policy can be altered by adding an additional rule block which checks for a second (or third, etc.) label name.
## More Information
* [Common labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/)
## Policy YAML
[require_labels.yaml](best_practices/require_labels.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-labels
spec:
validationFailureAction: audit
rules:
- name: check-for-labels
match:
resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` is required."
pattern:
metadata:
labels:
app.kubernetes.io/name: "?*"
```

View file

@ -0,0 +1,21 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-certain-labels
spec:
validationFailureAction: audit
rules:
- name: validate-certain-labels
match:
resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` or `app.kubernetes.io/component` is required."
anyPattern:
- metadata:
labels:
app.kubernetes.io/name: "?*"
- metadata:
labels:
app.kubernetes.io/component: "?*"

View file

@ -0,0 +1,18 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-labels
spec:
validationFailureAction: audit
rules:
- name: check-for-labels
match:
resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` is required."
pattern:
metadata:
labels:
app.kubernetes.io/name: "?*"

View file

@ -0,0 +1,24 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deployment-has-multiple-replicas
spec:
validationFailureAction: audit
rules:
- name: deployment-has-multiple-replicas
match:
resources:
kinds:
- Deployment
exclude:
resources:
namespaces:
- kyverno
- kube-system
- kube-node-lease
- kube-public
validate:
message: "Deployments must have more than one replica to ensure availability."
pattern:
spec:
replicas: ">1"