mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
merge master changes
This commit is contained in:
commit
26ae7e2052
38 changed files with 498 additions and 212 deletions
4
.github/workflows/release.yaml
vendored
4
.github/workflows/release.yaml
vendored
|
@ -1,11 +1,9 @@
|
|||
name: prereleaser
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
|
||||
jobs:
|
||||
releaser:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -36,6 +34,8 @@ jobs:
|
|||
access-token: ${{ secrets.ACCESS_TOKEN }}
|
||||
deploy-branch: gh-pages
|
||||
charts-folder: charts
|
||||
- name: Update new version in krew-index
|
||||
uses: rajatjindal/krew-release-bot@v0.0.38
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -14,7 +14,6 @@ builds:
|
|||
- windows
|
||||
goarch:
|
||||
- amd64
|
||||
goarm: [6, 7]
|
||||
archives:
|
||||
- id: kyverno-cli-archive
|
||||
name_template: |-
|
||||
|
@ -26,12 +25,12 @@ archives:
|
|||
{{- end -}}
|
||||
builds:
|
||||
- kyverno-cli
|
||||
replacements:
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
format: zip
|
||||
replacements:
|
||||
386: i386
|
||||
amd64: x86_64
|
||||
files: ["LICENSE"]
|
||||
checksum:
|
||||
name_template: "checksums.txt"
|
||||
|
|
46
.krew.yaml
Normal file
46
.krew.yaml
Normal file
|
@ -0,0 +1,46 @@
|
|||
apiVersion: krew.googlecontainertools.github.com/v1alpha2
|
||||
kind: Plugin
|
||||
metadata:
|
||||
name: kyverno
|
||||
spec:
|
||||
version: {{ .TagName }}
|
||||
homepage: https://github.com/nirmata/kyverno
|
||||
platforms:
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: linux
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/nirmata/kyverno/releases/download/{{ .TagName }}/kyverno-cli_{{ .TagName }}_linux_x86_64.tar.gz" .TagName }}
|
||||
files:
|
||||
- from: kyverno
|
||||
to: .
|
||||
- from: LICENSE
|
||||
to: .
|
||||
bin: kyverno
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: darwin
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/nirmata/kyverno/releases/download/{{ .TagName }}/kyverno-cli_{{ .TagName }}_darwin_x86_64.tar.gz" .TagName }}
|
||||
files:
|
||||
- from: kyverno
|
||||
to: .
|
||||
- from: LICENSE
|
||||
to: .
|
||||
bin: kyverno
|
||||
- selector:
|
||||
matchLabels:
|
||||
os: windows
|
||||
arch: amd64
|
||||
{{addURIAndSha "https://github.com/nirmata/kyverno/releases/download/{{ .TagName }}/kyverno-cli_{{ .TagName }}_windows_x86_64.zip" .TagName }}
|
||||
files:
|
||||
- from: kyverno.exe
|
||||
to: .
|
||||
- from: LICENSE
|
||||
to: .
|
||||
bin: kyverno.exe
|
||||
shortDescription: Kyverno is a policy engine for kubernetes
|
||||
description: |+2
|
||||
Kyverno is used to test kyverno policies and apply policies to resources files
|
||||
caveats: |
|
||||
The plugin requires access to create Policy and CustomResources
|
|
@ -209,7 +209,6 @@ func main() {
|
|||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pInformer.Kyverno().V1().GenerateRequests(),
|
||||
eventGenerator,
|
||||
pvgen,
|
||||
kubedynamicInformer,
|
||||
statusSync.Listener,
|
||||
log.Log.WithName("GenerateController"),
|
||||
|
@ -231,6 +230,16 @@ func main() {
|
|||
log.Log.WithName("PolicyCacheController"),
|
||||
)
|
||||
|
||||
auditHandler := webhooks.NewValidateAuditHandler(
|
||||
pCacheController.Cache,
|
||||
eventGenerator,
|
||||
statusSync.Listener,
|
||||
pvgen,
|
||||
kubeInformer.Rbac().V1().RoleBindings(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
log.Log.WithName("ValidateAuditHandler"),
|
||||
)
|
||||
|
||||
// CONFIGURE CERTIFICATES
|
||||
tlsPair, err := client.InitTLSPemPair(clientConfig, fqdncn)
|
||||
if err != nil {
|
||||
|
@ -282,6 +291,7 @@ func main() {
|
|||
pvgen,
|
||||
grgen,
|
||||
rWebhookWatcher,
|
||||
auditHandler,
|
||||
supportMudateValidate,
|
||||
cleanUp,
|
||||
log.Log.WithName("WebhookServer"),
|
||||
|
@ -307,6 +317,7 @@ func main() {
|
|||
go pvgen.Run(1, stopCh)
|
||||
go statusSync.Run(1, stopCh)
|
||||
go pCacheController.Run(1, stopCh)
|
||||
go auditHandler.Run(10, stopCh)
|
||||
openAPISync.Run(1, stopCh)
|
||||
|
||||
// verifys if the admission control is enabled and active
|
||||
|
|
|
@ -1,13 +1,10 @@
|
|||
<small>_[documentation](/README.md#documentation) / kyverno-cli_</small>
|
||||
<small>*[documentation](/README.md#documentation) / kyverno-cli*</small>
|
||||
|
||||
|
||||
# Kyverno CLI
|
||||
|
||||
The Kyverno Command Line Interface (CLI) is designed to validate policies and test the behavior of applying policies to resources before adding the policy to a cluster. It can be used as a kubectl plugin and as a standalone CLI.
|
||||
|
||||
## Install the CLI
|
||||
|
||||
The Kyverno CLI binary is distributed with each release. You can install the CLI for your platform from the [releases](https://github.com/nirmata/kyverno/releases) site.
|
||||
|
||||
## Build the CLI
|
||||
|
||||
You can build the CLI binary locally, then move the binary into a directory in your PATH.
|
||||
|
@ -19,10 +16,14 @@ make cli
|
|||
mv ./cmd/cli/kubectl-kyverno/kyverno /usr/local/bin/kyverno
|
||||
```
|
||||
|
||||
You can also use curl to install kyverno-cli
|
||||
|
||||
You can also use [Krew](https://github.com/kubernetes-sigs/krew)
|
||||
```bash
|
||||
curl -L https://raw.githubusercontent.com/nirmata/kyverno/master/scripts/install-cli.sh | bash
|
||||
# Install kyverno using krew plugin manager
|
||||
kubectl krew install kyverno
|
||||
|
||||
#example
|
||||
kuberctl kyverno version
|
||||
|
||||
```
|
||||
|
||||
## Install via AUR (archlinux)
|
||||
|
@ -40,54 +41,38 @@ yay -S kyverno-git
|
|||
Prints the version of kyverno used by the CLI.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
kyverno version
|
||||
```
|
||||
|
||||
#### Validate
|
||||
|
||||
Validates a policy, can validate multiple policy resource description files or even an entire folder containing policy resource description
|
||||
files. Currently supports files with resource description in YAML.
|
||||
files. Currently supports files with resource description in yaml.
|
||||
|
||||
Example:
|
||||
|
||||
```
|
||||
kyverno validate /path/to/policy1.yaml /path/to/policy2.yaml /path/to/folderFullOfPolicies
|
||||
```
|
||||
|
||||
#### Apply
|
||||
|
||||
Applies policies on resources, and supports applying multiple policies on multiple resources in a single command.
|
||||
Also supports applying the given policies to an entire cluster. The current kubectl context will be used to access the cluster.
|
||||
Will return results to stdout.
|
||||
|
||||
Apply to a resource:
|
||||
|
||||
```bash
|
||||
```
|
||||
kyverno apply /path/to/policy.yaml --resource /path/to/resource.yaml
|
||||
```
|
||||
|
||||
Apply to all matching resources in a cluster:
|
||||
|
||||
```bash
|
||||
```
|
||||
kyverno apply /path/to/policy.yaml --cluster > policy-results.txt
|
||||
```
|
||||
|
||||
Apply multiple policies to multiple resources:
|
||||
|
||||
```bash
|
||||
```
|
||||
kyverno apply /path/to/policy1.yaml /path/to/folderFullOfPolicies --resource /path/to/resource1.yaml --resource /path/to/resource2.yaml --cluster
|
||||
```
|
||||
|
||||
##### Exit Codes
|
||||
|
||||
The CLI exits with diffenent exit codes:
|
||||
|
||||
| Message | Exit Code |
|
||||
| ------------------------------------- | --------- |
|
||||
| executes successfully | 0 |
|
||||
| one or more policy rules are violated | 1 |
|
||||
| policy validation failed | 2 |
|
||||
|
||||
<small>_Read Next >> [Sample Policies](/samples/README.md)_</small>
|
||||
<small>*Read Next >> [Sample Policies](/samples/README.md)*</small>
|
||||
|
|
1
go.mod
1
go.mod
|
@ -16,6 +16,7 @@ require (
|
|||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
|
||||
github.com/ory/go-acc v0.2.1 // indirect
|
||||
github.com/pkg/errors v0.8.1
|
||||
github.com/prometheus/common v0.4.1
|
||||
github.com/rogpeppe/godef v1.1.2 // indirect
|
||||
github.com/spf13/cobra v0.0.5
|
||||
|
|
2
go.sum
2
go.sum
|
@ -606,6 +606,7 @@ github.com/minio/highwayhash v1.0.0/go.mod h1:xQboMTeM9nY9v/LlAOxFctujiv5+Aq2hR5
|
|||
github.com/minio/lsync v1.0.1/go.mod h1:tCFzfo0dlvdGl70IT4IAK/5Wtgb0/BrTmo/jE8pArKA=
|
||||
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5 h1:CjDeQ78sVdDrENJff3EUwVMUv9GfTL4NyLvjE/Bvrd8=
|
||||
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5/go.mod h1:HH1U0HOUzfjsCGlGCncWDh8L3zPejMhMDDsLAETqXs0=
|
||||
github.com/minio/minio-go/v6 v6.0.44 h1:CVwVXw+uCOcyMi7GvcOhxE8WgV+Xj8Vkf2jItDf/EGI=
|
||||
github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg=
|
||||
github.com/minio/parquet-go v0.0.0-20191231003236-20b3c07bcd2c/go.mod h1:sl82d+TnCE7qeaNJazHdNoG9Gpyl9SZYfleDAQWrsls=
|
||||
github.com/minio/sha256-simd v0.1.1/go.mod h1:B5e1o+1/KgNmWrSQK08Y6Z1Vb5pwIktudl0J58iy0KM=
|
||||
|
@ -748,6 +749,7 @@ github.com/segmentio/analytics-go v3.0.1+incompatible/go.mod h1:C7CYBtQWk4vRk2Ry
|
|||
github.com/segmentio/backo-go v0.0.0-20160424052352-204274ad699c/go.mod h1:kJ9mm9YmoWSkk+oQ+5Cj8DEoRCX2JT6As4kEtIIOp1M=
|
||||
github.com/serenize/snaker v0.0.0-20171204205717-a683aaf2d516/go.mod h1:Yow6lPLSAXx2ifx470yD/nUe22Dv5vBvxK/UK9UUTVs=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible h1:1eaJvGomDnH74/5cF4CTmTbLHAriGFsTZppLXDX93OM=
|
||||
github.com/shirou/gopsutil v2.18.12+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
|
|
|
@ -55,6 +55,9 @@ func mutateResourceWithOverlay(resource unstructured.Unstructured, overlay inter
|
|||
// ForceMutate does not check any conditions, it simply mutates the given resource
|
||||
func ForceMutate(ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (unstructured.Unstructured, error) {
|
||||
var err error
|
||||
logger := log.Log.WithName("EngineForceMutate").WithValues("policy", policy.Name, "kind", resource.GetKind(),
|
||||
"namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if !rule.HasMutate() {
|
||||
continue
|
||||
|
@ -81,7 +84,7 @@ func ForceMutate(ctx context.EvalInterface, policy kyverno.ClusterPolicy, resour
|
|||
|
||||
if rule.Mutation.Patches != nil {
|
||||
var resp response.RuleResponse
|
||||
resp, resource = mutate.ProcessPatches(log.Log, rule, resource)
|
||||
resp, resource = mutate.ProcessPatches(logger.WithValues("rule", rule.Name), rule, resource)
|
||||
if !resp.Success {
|
||||
return unstructured.Unstructured{}, fmt.Errorf(resp.Message)
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resou
|
|||
resp.Type = utils.Mutation.String()
|
||||
defer func() {
|
||||
resp.RuleStats.ProcessingTime = time.Since(startTime)
|
||||
logger.V(4).Info("finished applying overlay rule", "processingTime", resp.RuleStats.ProcessingTime)
|
||||
logger.V(4).Info("finished applying overlay rule", "processingTime", resp.RuleStats.ProcessingTime.String())
|
||||
}()
|
||||
|
||||
patches, overlayerr := processOverlayPatches(logger, resource.UnstructuredContent(), overlay)
|
||||
|
|
|
@ -27,9 +27,9 @@ func checkConditions(log logr.Logger, resource, overlay interface{}, path string
|
|||
// condition never be true in this case
|
||||
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
|
||||
if hasNestedAnchors(overlay) {
|
||||
log.V(4).Info(fmt.Sprintf("Found anchor on different types of element at path %s: overlay %T, resource %T", path, overlay, resource))
|
||||
log.V(4).Info(fmt.Sprintf("element type mismatch at path %s: overlay %T, resource %T", path, overlay, resource))
|
||||
return path, newOverlayError(conditionFailure,
|
||||
fmt.Sprintf("Found anchor on different types of element at path %s: overlay %T %v, resource %T %v", path, overlay, overlay, resource, resource))
|
||||
fmt.Sprintf("element type mismatch at path %s: overlay %T %v, resource %T %v", path, overlay, overlay, resource, resource))
|
||||
|
||||
}
|
||||
return "", overlayError{}
|
||||
|
@ -112,8 +112,8 @@ func validateConditionAnchorMap(resourceMap, anchors map[string]interface{}, pat
|
|||
// resource - A: B2
|
||||
func compareOverlay(resource, overlay interface{}, path string) (string, overlayError) {
|
||||
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
|
||||
log.Log.V(4).Info("Found anchor on different types of element", "overlay", overlay, "resource", resource)
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("Found anchor on different types of element: overlay %T, resource %T", overlay, resource))
|
||||
log.Log.V(4).Info("element type mismatch", "overlay", overlay, "resource", resource)
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("element type mismatch: overlay %T, resource %T", overlay, resource))
|
||||
}
|
||||
|
||||
switch typedOverlay := overlay.(type) {
|
||||
|
|
|
@ -150,7 +150,7 @@ func TestMeetConditions_DifferentTypes(t *testing.T) {
|
|||
// anchor exist
|
||||
|
||||
_, err = meetConditions(log.Log, resource, overlay)
|
||||
assert.Assert(t, strings.Contains(err.Error(), "Found anchor on different types of element at path /subsets/"))
|
||||
assert.Assert(t, strings.Contains(err.Error(), "element type mismatch at path /subsets/"))
|
||||
}
|
||||
|
||||
func TestMeetConditions_anchosInSameObject(t *testing.T) {
|
||||
|
|
|
@ -28,7 +28,7 @@ func ProcessPatches(log logr.Logger, rule kyverno.Rule, resource unstructured.Un
|
|||
resp.Type = utils.Mutation.String()
|
||||
defer func() {
|
||||
resp.RuleStats.ProcessingTime = time.Since(startTime)
|
||||
logger.V(4).Info("finished JSON patch", "processingTime", resp.RuleStats.ProcessingTime)
|
||||
logger.V(4).Info("applied JSON patch", "processingTime", resp.RuleStats.ProcessingTime.String())
|
||||
}()
|
||||
|
||||
// convert to RAW
|
||||
|
|
|
@ -22,23 +22,24 @@ const (
|
|||
PodControllersAnnotation = "pod-policies.kyverno.io/autogen-controllers"
|
||||
//PodTemplateAnnotation defines the annotation key for Pod-Template
|
||||
PodTemplateAnnotation = "pod-policies.kyverno.io/autogen-applied"
|
||||
PodControllerRuleName = "autogen-pod-ctrl-annotation"
|
||||
)
|
||||
|
||||
// Mutate performs mutation. Overlay first and then mutation patches
|
||||
func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
|
||||
startTime := time.Now()
|
||||
policy := policyContext.Policy
|
||||
resource := policyContext.NewResource
|
||||
patchedResource := policyContext.NewResource
|
||||
ctx := policyContext.Context
|
||||
logger := log.Log.WithName("Mutate").WithValues("policy", policy.Name, "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
logger := log.Log.WithName("EngineMutate").WithValues("policy", policy.Name, "kind", patchedResource.GetKind(),
|
||||
"namespace", patchedResource.GetNamespace(), "name", patchedResource.GetName())
|
||||
|
||||
logger.V(4).Info("start policy processing", "startTime", startTime)
|
||||
startMutateResultResponse(&resp, policy, resource)
|
||||
|
||||
startMutateResultResponse(&resp, policy, patchedResource)
|
||||
defer endMutateResultResponse(logger, &resp, startTime)
|
||||
|
||||
patchedResource := policyContext.NewResource
|
||||
|
||||
if autoGenAnnotationApplied(patchedResource) && policy.HasAutoGenAnnotation() {
|
||||
if policy.HasAutoGenAnnotation() && excludePod(patchedResource) {
|
||||
logger.V(5).Info("Skip applying policy, Pod has ownerRef set", "policy", policy.GetName())
|
||||
resp.PatchedResource = patchedResource
|
||||
return
|
||||
}
|
||||
|
@ -47,14 +48,14 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
|
|||
var ruleResponse response.RuleResponse
|
||||
logger := logger.WithValues("rule", rule.Name)
|
||||
//TODO: to be checked before calling the resources as well
|
||||
if !rule.HasMutate() && !strings.Contains(PodControllers, resource.GetKind()) {
|
||||
if !rule.HasMutate() && !strings.Contains(PodControllers, patchedResource.GetKind()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the resource satisfies the filter conditions defined in the rule
|
||||
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
|
||||
// dont satisfy a policy rule resource description
|
||||
if err := MatchesResourceDescription(resource, rule, policyContext.AdmissionInfo); err != nil {
|
||||
if err := MatchesResourceDescription(patchedResource, rule, policyContext.AdmissionInfo); err != nil {
|
||||
logger.V(3).Info("resource not matched", "reason", err.Error())
|
||||
continue
|
||||
}
|
||||
|
@ -112,22 +113,6 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
|
|||
return resp
|
||||
}
|
||||
|
||||
// skip inserting on existing resource
|
||||
if policy.HasAutoGenAnnotation() && strings.Contains(PodControllers, resource.GetKind()) {
|
||||
if !patchedResourceHasPodControllerAnnotation(patchedResource) {
|
||||
var ruleResponse response.RuleResponse
|
||||
ruleResponse, patchedResource = mutate.ProcessOverlay(logger, PodControllerRuleName, podTemplateRule.Mutation.Overlay, patchedResource)
|
||||
if !ruleResponse.Success {
|
||||
logger.Info("failed to insert annotation for podTemplate", "error", ruleResponse.Message)
|
||||
} else {
|
||||
if ruleResponse.Success && ruleResponse.Patches != nil {
|
||||
logger.V(3).Info("inserted annotation for podTemplate")
|
||||
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// send the patched resource
|
||||
resp.PatchedResource = patchedResource
|
||||
return resp
|
||||
|
@ -170,23 +155,5 @@ func startMutateResultResponse(resp *response.EngineResponse, policy kyverno.Clu
|
|||
|
||||
func endMutateResultResponse(logger logr.Logger, resp *response.EngineResponse, startTime time.Time) {
|
||||
resp.PolicyResponse.ProcessingTime = time.Since(startTime)
|
||||
logger.V(4).Info("finished processing policy", "processingTime", resp.PolicyResponse.ProcessingTime, "mutationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
|
||||
}
|
||||
|
||||
// podTemplateRule mutate pod template with annotation
|
||||
// pod-policies.kyverno.io/autogen-applied=true
|
||||
var podTemplateRule = kyverno.Rule{
|
||||
Mutation: kyverno.Mutation{
|
||||
Overlay: map[string]interface{}{
|
||||
"spec": map[string]interface{}{
|
||||
"template": map[string]interface{}{
|
||||
"metadata": map[string]interface{}{
|
||||
"annotations": map[string]interface{}{
|
||||
"+(" + PodTemplateAnnotation + ")": "true",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
logger.V(4).Info("finished processing policy", "processingTime", resp.PolicyResponse.ProcessingTime.String(), "mutationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
|
||||
}
|
||||
|
|
|
@ -201,11 +201,10 @@ func copyConditions(original []kyverno.Condition) []kyverno.Condition {
|
|||
return copy
|
||||
}
|
||||
|
||||
// autoGenAnnotationApplied checks if a Pod has annotation "pod-policies.kyverno.io/autogen-applied"
|
||||
func autoGenAnnotationApplied(resource unstructured.Unstructured) bool {
|
||||
// excludePod checks if a Pod has ownerRef set
|
||||
func excludePod(resource unstructured.Unstructured) bool {
|
||||
if resource.GetKind() == "Pod" {
|
||||
ann := resource.GetAnnotations()
|
||||
if _, ok := ann[PodTemplateAnnotation]; ok {
|
||||
if len(resource.GetOwnerReferences()) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -125,6 +125,7 @@ func validateArray(log logr.Logger, resourceArray, patternArray []interface{}, o
|
|||
}
|
||||
default:
|
||||
// In all other cases - detect type and handle each array element with validateResourceElement
|
||||
if len(resourceArray) >= len(patternArray) {
|
||||
for i, patternElement := range patternArray {
|
||||
currentPath := path + strconv.Itoa(i) + "/"
|
||||
path, err := validateResourceElement(log, resourceArray[i], patternElement, originPattern, currentPath)
|
||||
|
@ -132,8 +133,10 @@ func validateArray(log logr.Logger, resourceArray, patternArray []interface{}, o
|
|||
return path, err
|
||||
}
|
||||
}
|
||||
}else{
|
||||
return "", fmt.Errorf("Validate Array failed, array length mismatch, resource Array len is %d and pattern Array len is %d", len(resourceArray), len(patternArray))
|
||||
}
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) {
|
|||
oldR := policyContext.OldResource
|
||||
ctx := policyContext.Context
|
||||
admissionInfo := policyContext.AdmissionInfo
|
||||
logger := log.Log.WithName("Validate").WithValues("policy", policy.Name)
|
||||
logger := log.Log.WithName("EngineValidate").WithValues("policy", policy.Name)
|
||||
|
||||
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
|
||||
logger = logger.WithValues("kind", oldR.GetKind(), "namespace", oldR.GetNamespace(), "name", oldR.GetName())
|
||||
|
@ -93,7 +93,7 @@ func startResultResponse(resp *response.EngineResponse, policy kyverno.ClusterPo
|
|||
|
||||
func endResultResponse(log logr.Logger, resp *response.EngineResponse, startTime time.Time) {
|
||||
resp.PolicyResponse.ProcessingTime = time.Since(startTime)
|
||||
log.V(4).Info("finshed processing", "processingTime", resp.PolicyResponse.ProcessingTime, "validationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
|
||||
log.V(4).Info("finshed processing", "processingTime", resp.PolicyResponse.ProcessingTime.String(), "validationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
|
||||
}
|
||||
|
||||
func incrementAppliedCount(resp *response.EngineResponse) {
|
||||
|
@ -103,6 +103,10 @@ func incrementAppliedCount(resp *response.EngineResponse) {
|
|||
|
||||
func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse {
|
||||
resp := &response.EngineResponse{}
|
||||
if policy.HasAutoGenAnnotation() && excludePod(resource) {
|
||||
log.V(5).Info("Skip applying policy, Pod has ownerRef set", "policy", policy.GetName())
|
||||
return resp
|
||||
}
|
||||
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if !rule.HasValidate() {
|
||||
|
@ -142,7 +146,8 @@ func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.
|
|||
func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse {
|
||||
resp := &response.EngineResponse{}
|
||||
|
||||
if autoGenAnnotationApplied(resource) && policy.HasAutoGenAnnotation() {
|
||||
if policy.HasAutoGenAnnotation() && excludePod(resource) {
|
||||
log.V(5).Info("Skip applying policy, Pod has ownerRef set", "policy", policy.GetName())
|
||||
return resp
|
||||
}
|
||||
|
||||
|
@ -226,7 +231,7 @@ func validatePatterns(log logr.Logger, ctx context.EvalInterface, resource unstr
|
|||
resp.Type = utils.Validation.String()
|
||||
defer func() {
|
||||
resp.RuleStats.ProcessingTime = time.Since(startTime)
|
||||
logger.V(4).Info("finished processing rule", "processingTime", resp.RuleStats.ProcessingTime)
|
||||
logger.V(4).Info("finished processing rule", "processingTime", resp.RuleStats.ProcessingTime.String())
|
||||
}()
|
||||
|
||||
// work on a copy of validation rule
|
||||
|
|
|
@ -248,7 +248,7 @@ func (c *Controller) syncGenerateRequest(key string) error {
|
|||
startTime := time.Now()
|
||||
logger.Info("started syncing generate request", "startTime", startTime)
|
||||
defer func() {
|
||||
logger.V(4).Info("finished syncying generate request", "processingTIme", time.Since(startTime))
|
||||
logger.V(4).Info("finished syncying generate request", "processingTIme", time.Since(startTime).String())
|
||||
}()
|
||||
_, grName, err := cache.SplitMetaNamespaceKey(key)
|
||||
if errors.IsNotFound(err) {
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
@ -52,8 +51,6 @@ type Controller struct {
|
|||
pSynced cache.InformerSynced
|
||||
// grSynced returns true if the Generate Request store has been synced at least once
|
||||
grSynced cache.InformerSynced
|
||||
// policy violation generator
|
||||
pvGenerator policyviolation.GeneratorInterface
|
||||
// dyanmic sharedinformer factory
|
||||
dynamicInformer dynamicinformer.DynamicSharedInformerFactory
|
||||
//TODO: list of generic informers
|
||||
|
@ -70,7 +67,6 @@ func NewController(
|
|||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
grInformer kyvernoinformer.GenerateRequestInformer,
|
||||
eventGen event.Interface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
dynamicInformer dynamicinformer.DynamicSharedInformerFactory,
|
||||
policyStatus policystatus.Listener,
|
||||
log logr.Logger,
|
||||
|
@ -79,7 +75,6 @@ func NewController(
|
|||
client: client,
|
||||
kyvernoClient: kyvernoclient,
|
||||
eventGen: eventGen,
|
||||
pvGenerator: pvGenerator,
|
||||
//TODO: do the math for worst case back off and make sure cleanup runs after that
|
||||
// as we dont want a deleted GR to be re-queue
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
|
||||
|
@ -268,7 +263,7 @@ func (c *Controller) syncGenerateRequest(key string) error {
|
|||
startTime := time.Now()
|
||||
logger.Info("started sync", "key", key, "startTime", startTime)
|
||||
defer func() {
|
||||
logger.V(4).Info("finished sync", "key", key, "processingTime", time.Since(startTime))
|
||||
logger.V(4).Info("finished sync", "key", key, "processingTime", time.Since(startTime).String())
|
||||
}()
|
||||
_, grName, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
|
|
|
@ -28,7 +28,7 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
|
|||
name = ns + "/" + name
|
||||
}
|
||||
|
||||
logger.V(3).Info("applyPolicy", "resource", name, "processingTime", time.Since(startTime))
|
||||
logger.V(3).Info("applyPolicy", "resource", name, "processingTime", time.Since(startTime).String())
|
||||
}()
|
||||
|
||||
var engineResponses []response.EngineResponse
|
||||
|
@ -81,10 +81,6 @@ func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse
|
|||
for index, rule := range engineResponse.PolicyResponse.Rules {
|
||||
log.V(4).Info("verifying if policy rule was applied before", "rule", rule.Name)
|
||||
|
||||
if rule.Name == engine.PodControllerRuleName {
|
||||
continue
|
||||
}
|
||||
|
||||
patches := rule.Patches
|
||||
|
||||
patch, err := jsonpatch.DecodePatch(utils.JoinPatches(patches))
|
||||
|
|
|
@ -304,7 +304,7 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
startTime := time.Now()
|
||||
logger.V(4).Info("started syncing policy", "key", key, "startTime", startTime)
|
||||
defer func() {
|
||||
logger.V(4).Info("finished syncing policy", "key", key, "processingTime", time.Since(startTime))
|
||||
logger.V(4).Info("finished syncing policy", "key", key, "processingTime", time.Since(startTime).String())
|
||||
}()
|
||||
|
||||
policy, err := pc.pLister.Get(key)
|
||||
|
|
|
@ -109,6 +109,7 @@ func (pc *PolicyController) getPolicyForNamespacedPolicyViolation(pv *kyverno.Po
|
|||
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
|
||||
policies, err := pc.pLister.GetPolicyForNamespacedPolicyViolation(pv)
|
||||
if err != nil || len(policies) == 0 {
|
||||
logger.V(4).Info("missing policy for namespaced policy violation", "reason", err.Error())
|
||||
return nil
|
||||
}
|
||||
// Because all PolicyViolations's belonging to a Policy should have a unique label key,
|
||||
|
|
|
@ -12,6 +12,9 @@ import (
|
|||
type pMap struct {
|
||||
sync.RWMutex
|
||||
dataMap map[PolicyType][]*kyverno.ClusterPolicy
|
||||
|
||||
// nameCacheMap stores the names of all existing policies in dataMap
|
||||
nameCacheMap map[PolicyType]map[string]bool
|
||||
}
|
||||
|
||||
// policyCache ...
|
||||
|
@ -29,9 +32,17 @@ type Interface interface {
|
|||
|
||||
// newPolicyCache ...
|
||||
func newPolicyCache(log logr.Logger) Interface {
|
||||
namesCache := map[PolicyType]map[string]bool{
|
||||
Mutate: make(map[string]bool),
|
||||
ValidateEnforce: make(map[string]bool),
|
||||
ValidateAudit: make(map[string]bool),
|
||||
Generate: make(map[string]bool),
|
||||
}
|
||||
|
||||
return &policyCache{
|
||||
pMap{
|
||||
dataMap: make(map[PolicyType][]*kyverno.ClusterPolicy),
|
||||
nameCacheMap: namesCache,
|
||||
},
|
||||
log,
|
||||
}
|
||||
|
@ -54,29 +65,15 @@ func (pc *policyCache) Remove(policy *kyverno.ClusterPolicy) {
|
|||
pc.Logger.V(4).Info("policy is removed from cache", "name", policy.GetName())
|
||||
}
|
||||
|
||||
// buildCacheMap builds the map to store the names of all existing
|
||||
// policies in the cache, it is used to aviod adding duplicate policies
|
||||
func buildCacheMap(policies []*kyverno.ClusterPolicy) map[string]bool {
|
||||
cacheMap := make(map[string]bool)
|
||||
|
||||
for _, p := range policies {
|
||||
name := p.GetName()
|
||||
if !cacheMap[name] {
|
||||
cacheMap[p.GetName()] = true
|
||||
}
|
||||
}
|
||||
|
||||
return cacheMap
|
||||
}
|
||||
|
||||
func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
||||
m.Lock()
|
||||
defer m.Unlock()
|
||||
|
||||
enforcePolicy := policy.Spec.ValidationFailureAction == "enforce"
|
||||
mutateMap := buildCacheMap(m.dataMap[Mutate])
|
||||
validateMap := buildCacheMap(m.dataMap[ValidateEnforce])
|
||||
generateMap := buildCacheMap(m.dataMap[Generate])
|
||||
mutateMap := m.nameCacheMap[Mutate]
|
||||
validateEnforceMap := m.nameCacheMap[ValidateEnforce]
|
||||
validateAuditMap := m.nameCacheMap[ValidateAudit]
|
||||
generateMap := m.nameCacheMap[Generate]
|
||||
|
||||
pName := policy.GetName()
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
|
@ -90,9 +87,10 @@ func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
|||
continue
|
||||
}
|
||||
|
||||
if rule.HasValidate() && enforcePolicy {
|
||||
if !validateMap[pName] {
|
||||
validateMap[pName] = true
|
||||
if rule.HasValidate() {
|
||||
if enforcePolicy {
|
||||
if !validateEnforceMap[pName] {
|
||||
validateEnforceMap[pName] = true
|
||||
|
||||
validatePolicy := m.dataMap[ValidateEnforce]
|
||||
m.dataMap[ValidateEnforce] = append(validatePolicy, policy)
|
||||
|
@ -100,6 +98,16 @@ func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
|||
continue
|
||||
}
|
||||
|
||||
// ValidateAudit
|
||||
if !validateAuditMap[pName] {
|
||||
validateAuditMap[pName] = true
|
||||
|
||||
validatePolicy := m.dataMap[ValidateAudit]
|
||||
m.dataMap[ValidateAudit] = append(validatePolicy, policy)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if rule.HasGenerate() {
|
||||
if !generateMap[pName] {
|
||||
generateMap[pName] = true
|
||||
|
@ -110,6 +118,11 @@ func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
|||
continue
|
||||
}
|
||||
}
|
||||
|
||||
m.nameCacheMap[Mutate] = mutateMap
|
||||
m.nameCacheMap[ValidateEnforce] = validateEnforceMap
|
||||
m.nameCacheMap[ValidateAudit] = validateAuditMap
|
||||
m.nameCacheMap[Generate] = generateMap
|
||||
}
|
||||
|
||||
func (m *pMap) get(key PolicyType) []*kyverno.ClusterPolicy {
|
||||
|
|
|
@ -55,6 +55,26 @@ func Test_Add_Duplicate_Policy(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func Test_Add_Validate_Audit(t *testing.T) {
|
||||
pCache := newPolicyCache(log.Log)
|
||||
policy := newPolicy(t)
|
||||
|
||||
pCache.Add(policy)
|
||||
pCache.Add(policy)
|
||||
|
||||
policy.Spec.ValidationFailureAction = "audit"
|
||||
pCache.Add(policy)
|
||||
pCache.Add(policy)
|
||||
|
||||
if len(pCache.Get(ValidateEnforce)) != 1 {
|
||||
t.Errorf("expected 1 validate enforce policy, found %v", len(pCache.Get(ValidateEnforce)))
|
||||
}
|
||||
|
||||
if len(pCache.Get(ValidateAudit)) != 1 {
|
||||
t.Errorf("expected 1 validate audit policy, found %v", len(pCache.Get(ValidateAudit)))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Remove_From_Empty_Cache(t *testing.T) {
|
||||
pCache := newPolicyCache(log.Log)
|
||||
policy := newPolicy(t)
|
||||
|
|
|
@ -5,5 +5,6 @@ type PolicyType uint8
|
|||
const (
|
||||
Mutate PolicyType = 1 << iota
|
||||
ValidateEnforce
|
||||
ValidateAudit
|
||||
Generate
|
||||
)
|
||||
|
|
|
@ -29,7 +29,6 @@ func (d dummyStatusUpdater) PolicyName() string {
|
|||
return "policy1"
|
||||
}
|
||||
|
||||
|
||||
type dummyLister struct {
|
||||
}
|
||||
|
||||
|
|
|
@ -159,7 +159,7 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
}
|
||||
|
||||
func (gen *Generator) runWorker() {
|
||||
for gen.processNextWorkitem() {
|
||||
for gen.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -186,7 +186,7 @@ func (gen *Generator) handleErr(err error, key interface{}) {
|
|||
logger.Error(err, "dropping key out of the queue", "key", key)
|
||||
}
|
||||
|
||||
func (gen *Generator) processNextWorkitem() bool {
|
||||
func (gen *Generator) processNextWorkItem() bool {
|
||||
logger := gen.log
|
||||
obj, shutdown := gen.queue.Get()
|
||||
if shutdown {
|
||||
|
@ -197,11 +197,13 @@ func (gen *Generator) processNextWorkitem() bool {
|
|||
defer gen.queue.Done(obj)
|
||||
var keyHash string
|
||||
var ok bool
|
||||
|
||||
if keyHash, ok = obj.(string); !ok {
|
||||
gen.queue.Forget(obj)
|
||||
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
|
||||
return nil
|
||||
}
|
||||
|
||||
// lookup data store
|
||||
info := gen.dataStore.lookup(keyHash)
|
||||
if reflect.DeepEqual(info, Info{}) {
|
||||
|
@ -210,14 +212,17 @@ func (gen *Generator) processNextWorkitem() bool {
|
|||
logger.Info("empty key")
|
||||
return nil
|
||||
}
|
||||
|
||||
err := gen.syncHandler(info)
|
||||
gen.handleErr(err, obj)
|
||||
return nil
|
||||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to process item")
|
||||
return true
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
|
|
@ -6,9 +6,9 @@ func Test_Mutate_EndPoint(t *testing.T) {
|
|||
testScenario(t, "/test/scenarios/other/scenario_mutate_endpoint.yaml")
|
||||
}
|
||||
|
||||
// func Test_Mutate_Validate_qos(t *testing.T) {
|
||||
// testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
|
||||
// }
|
||||
func Test_Mutate_Validate_qos(t *testing.T) {
|
||||
testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
|
||||
}
|
||||
|
||||
func Test_disallow_root_user(t *testing.T) {
|
||||
testScenario(t, "test/scenarios/samples/best_practices/disallow_root_user.yaml")
|
||||
|
|
|
@ -251,7 +251,7 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
|
|||
startTime := time.Now()
|
||||
wrc.log.Info("removing prior webhook configurations")
|
||||
defer func() {
|
||||
wrc.log.V(4).Info("removed webhookcongfigurations", "processingTime", time.Since(startTime))
|
||||
wrc.log.V(4).Info("removed webhookcongfigurations", "processingTime", time.Since(startTime).String())
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
|
|
@ -29,11 +29,11 @@ func isResponseSuccesful(engineReponses []response.EngineResponse) bool {
|
|||
func toBlockResource(engineReponses []response.EngineResponse, log logr.Logger) bool {
|
||||
for _, er := range engineReponses {
|
||||
if !er.IsSuccessful() && er.PolicyResponse.ValidationFailureAction == Enforce {
|
||||
log.Info("spec.ValidationFailureAction set to enforcel blocking resource request", "policy", er.PolicyResponse.Policy)
|
||||
log.Info("spec.ValidationFailureAction set to enforce blocking resource request", "policy", er.PolicyResponse.Policy)
|
||||
return true
|
||||
}
|
||||
}
|
||||
log.V(4).Info("sepc.ValidationFailureAction set to auit for all applicable policies;allowing resource reques; reporting with policy violation ")
|
||||
log.V(4).Info("sepc.ValidationFailureAction set to auit for all applicable policies, won't block resource operation")
|
||||
return false
|
||||
}
|
||||
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
|
@ -11,25 +13,27 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
"github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks/generate"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
//HandleGenerate handles admission-requests for policies with generate rules
|
||||
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo) (bool, string) {
|
||||
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo) {
|
||||
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
|
||||
logger.V(4).Info("incoming request")
|
||||
var engineResponses []response.EngineResponse
|
||||
|
||||
if len(policies) == 0 {
|
||||
return true, ""
|
||||
return
|
||||
}
|
||||
// convert RAW to unstructured
|
||||
resource, err := utils.ConvertToUnstructured(request.Object.Raw)
|
||||
if err != nil {
|
||||
//TODO: skip applying the admission control ?
|
||||
logger.Error(err, "failed to convert RAR resource to unstructured format")
|
||||
return true, ""
|
||||
return
|
||||
}
|
||||
|
||||
// CREATE resources, do not have name, assigned in admission-request
|
||||
|
@ -52,10 +56,14 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Adds Generate Request to a channel(queue size 1000) to generators
|
||||
if err := applyGenerateRequest(ws.grGenerator, userRequestInfo, request.Operation, engineResponses...); err != nil {
|
||||
//TODO: send appropriate error
|
||||
return false, "Kyverno blocked: failed to create Generate Requests"
|
||||
if failedResponse := applyGenerateRequest(ws.grGenerator, userRequestInfo, request.Operation, engineResponses...); err != nil {
|
||||
// report failure event
|
||||
for _, failedGR := range failedResponse {
|
||||
events := failedEvents(fmt.Errorf("failed to create Generate Request: %v", failedGR.err), failedGR.gr, *resource)
|
||||
ws.eventGen.Add(events...)
|
||||
}
|
||||
}
|
||||
|
||||
// Generate Stats wont be used here, as we delegate the generate rule
|
||||
|
@ -66,16 +74,19 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
|
|||
// HandleGeneration always returns success
|
||||
|
||||
// Filter Policies
|
||||
return true, ""
|
||||
return
|
||||
}
|
||||
|
||||
func applyGenerateRequest(gnGenerator generate.GenerateRequests, userRequestInfo kyverno.RequestInfo, action v1beta1.Operation, engineResponses ...response.EngineResponse) error {
|
||||
func applyGenerateRequest(gnGenerator generate.GenerateRequests, userRequestInfo kyverno.RequestInfo,
|
||||
action v1beta1.Operation, engineResponses ...response.EngineResponse) (failedGenerateRequest []generateRequestResponse) {
|
||||
|
||||
for _, er := range engineResponses {
|
||||
if err := gnGenerator.Apply(transform(userRequestInfo, er), action); err != nil {
|
||||
return err
|
||||
gr := transform(userRequestInfo, er)
|
||||
if err := gnGenerator.Apply(gr, action); err != nil {
|
||||
failedGenerateRequest = append(failedGenerateRequest, generateRequestResponse{gr: gr, err: err})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return
|
||||
}
|
||||
|
||||
func transform(userRequestInfo kyverno.RequestInfo, er response.EngineResponse) kyverno.GenerateRequestSpec {
|
||||
|
@ -162,3 +173,41 @@ func updateAverageTime(newTime time.Duration, oldAverageTimeString string, avera
|
|||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
||||
type generateRequestResponse struct {
|
||||
gr v1.GenerateRequestSpec
|
||||
err error
|
||||
}
|
||||
|
||||
func (resp generateRequestResponse) info() string {
|
||||
return strings.Join([]string{resp.gr.Resource.Kind, resp.gr.Resource.Namespace, resp.gr.Resource.Name}, "/")
|
||||
}
|
||||
|
||||
func (resp generateRequestResponse) error() string {
|
||||
return resp.err.Error()
|
||||
}
|
||||
|
||||
func failedEvents(err error, gr kyverno.GenerateRequestSpec, resource unstructured.Unstructured) []event.Info {
|
||||
var events []event.Info
|
||||
// Cluster Policy
|
||||
pe := event.Info{}
|
||||
pe.Kind = "ClusterPolicy"
|
||||
// cluserwide-resource
|
||||
pe.Name = gr.Policy
|
||||
pe.Reason = event.PolicyFailed.String()
|
||||
pe.Source = event.GeneratePolicyController
|
||||
pe.Message = fmt.Sprintf("policy failed to apply on resource %s/%s/%s: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
|
||||
events = append(events, pe)
|
||||
|
||||
// Resource
|
||||
re := event.Info{}
|
||||
re.Kind = resource.GetKind()
|
||||
re.Namespace = resource.GetNamespace()
|
||||
re.Name = resource.GetName()
|
||||
re.Reason = event.PolicyFailed.String()
|
||||
re.Source = event.GeneratePolicyController
|
||||
re.Message = fmt.Sprintf("policy %s failed to apply: %v", gr.Policy, err)
|
||||
events = append(events, re)
|
||||
|
||||
return events
|
||||
}
|
||||
|
|
|
@ -10,8 +10,11 @@ import (
|
|||
|
||||
//HandlePolicyValidation performs the validation check on policy resource
|
||||
func (ws *WebhookServer) policyValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
logger := ws.log.WithValues("action", "policyvalidation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
|
||||
|
||||
//TODO: can this happen? wont this be picked by OpenAPI spec schema ?
|
||||
if err := policyvalidate.Validate(request.Object.Raw, ws.client, false, ws.openAPIController); err != nil {
|
||||
logger.Error(err, "faield to validate policy")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
|
|
|
@ -7,11 +7,12 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
|
@ -22,6 +23,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
context2 "github.com/nirmata/kyverno/pkg/engine/context"
|
||||
enginutils "github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/openapi"
|
||||
"github.com/nirmata/kyverno/pkg/policycache"
|
||||
|
@ -30,7 +32,6 @@ import (
|
|||
tlsutils "github.com/nirmata/kyverno/pkg/tls"
|
||||
userinfo "github.com/nirmata/kyverno/pkg/userinfo"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
enginutils "github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks/generate"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
|
@ -98,6 +99,9 @@ type WebhookServer struct {
|
|||
grGenerator *generate.Generator
|
||||
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
|
||||
|
||||
auditHandler AuditHandler
|
||||
|
||||
log logr.Logger
|
||||
openAPIController *openapi.Controller
|
||||
|
||||
|
@ -123,6 +127,7 @@ func NewWebhookServer(
|
|||
pvGenerator policyviolation.GeneratorInterface,
|
||||
grGenerator *generate.Generator,
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
|
||||
auditHandler AuditHandler,
|
||||
supportMudateValidate bool,
|
||||
cleanUp chan<- struct{},
|
||||
log logr.Logger,
|
||||
|
@ -161,6 +166,7 @@ func NewWebhookServer(
|
|||
pvGenerator: pvGenerator,
|
||||
grGenerator: grGenerator,
|
||||
resourceWebhookWatcher: resourceWebhookWatcher,
|
||||
auditHandler: auditHandler,
|
||||
log: log,
|
||||
openAPIController: openAPIController,
|
||||
supportMudateValidate: supportMudateValidate,
|
||||
|
@ -226,7 +232,7 @@ func (ws *WebhookServer) handlerFunc(handler func(request *v1beta1.AdmissionRequ
|
|||
|
||||
admissionReview.Response = handler(request)
|
||||
writeResponse(rw, admissionReview)
|
||||
logger.V(4).Info("request processed", "processingTime", time.Since(startTime).Milliseconds())
|
||||
logger.V(4).Info("request processed", "processingTime", time.Since(startTime).String())
|
||||
|
||||
return
|
||||
}
|
||||
|
@ -258,7 +264,6 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
mutatePolicies := ws.pCache.Get(policycache.Mutate)
|
||||
validatePolicies := ws.pCache.Get(policycache.ValidateEnforce)
|
||||
generatePolicies := ws.pCache.Get(policycache.Generate)
|
||||
|
@ -290,7 +295,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
|
|||
userRequestInfo := v1.RequestInfo{
|
||||
Roles: roles,
|
||||
ClusterRoles: clusterRoles,
|
||||
AdmissionUserInfo: request.UserInfo}
|
||||
AdmissionUserInfo: *request.UserInfo.DeepCopy()}
|
||||
|
||||
// build context
|
||||
ctx := context2.NewContext()
|
||||
|
@ -325,8 +330,11 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
|
|||
logger.V(6).Info("", "patchedResource", string(patchedResource))
|
||||
|
||||
if ws.resourceWebhookWatcher != nil && ws.resourceWebhookWatcher.RunValidationInMutatingWebhook == "true" {
|
||||
// push admission request to audit handler, this won't block the admission request
|
||||
ws.auditHandler.Add(request.DeepCopy())
|
||||
|
||||
// VALIDATION
|
||||
ok, msg := ws.HandleValidation(request, validatePolicies, patchedResource, ctx, userRequestInfo)
|
||||
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
|
@ -344,25 +352,14 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
|
|||
|
||||
// GENERATE
|
||||
// Only applied during resource creation and update
|
||||
// Success -> Generate Request CR created successsfully
|
||||
// Success -> Generate Request CR created successfully
|
||||
// Failed -> Failed to create Generate Request CR
|
||||
|
||||
if request.Operation == v1beta1.Create || request.Operation == v1beta1.Update {
|
||||
|
||||
ok, msg := ws.HandleGenerate(request, generatePolicies, ctx, userRequestInfo)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: msg,
|
||||
},
|
||||
}
|
||||
}
|
||||
go ws.HandleGenerate(request.DeepCopy(), generatePolicies, ctx, userRequestInfo)
|
||||
}
|
||||
|
||||
// Succesfful processing of mutation & validation rules in policy
|
||||
// Succesful processing of mutation & validation rules in policy
|
||||
patchType := v1beta1.PatchTypeJSONPatch
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
|
@ -405,7 +402,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
}
|
||||
}
|
||||
|
||||
oldResource, err := ws.client.GetResource(resource.GetKind(),resource.GetNamespace(),resource.GetName());
|
||||
oldResource, err := ws.client.GetResource(resource.GetKind(), resource.GetNamespace(), resource.GetName())
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to get resource")
|
||||
|
@ -452,6 +449,9 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
}
|
||||
}
|
||||
|
||||
// push admission request to audit handler, this won't block the admission request
|
||||
ws.auditHandler.Add(request.DeepCopy())
|
||||
|
||||
policies := ws.pCache.Get(policycache.ValidateEnforce)
|
||||
if len(policies) == 0 {
|
||||
logger.V(4).Info("No enforce Validation policy found, returning")
|
||||
|
@ -463,8 +463,14 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
if containRBACinfo(policies) {
|
||||
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request)
|
||||
if err != nil {
|
||||
// TODO(shuting): continue apply policy if error getting roleRef?
|
||||
logger.Error(err, "failed to get RBAC information for request")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -489,7 +495,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
logger.Error(err, "failed to load service account in context")
|
||||
}
|
||||
|
||||
ok, msg := ws.HandleValidation(request, policies, nil, ctx, userRequestInfo)
|
||||
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
|
|
170
pkg/webhooks/validate_audit.go
Normal file
170
pkg/webhooks/validate_audit.go
Normal file
|
@ -0,0 +1,170 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/minio/minio/cmd/logger"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
"github.com/nirmata/kyverno/pkg/constant"
|
||||
enginectx "github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policycache"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/userinfo"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
rbacinformer "k8s.io/client-go/informers/rbac/v1"
|
||||
rbaclister "k8s.io/client-go/listers/rbac/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
workQueueName = "validate-audit-handler"
|
||||
workQueueRetryLimit = 3
|
||||
)
|
||||
|
||||
// Handler applies validate audit policies to the admission request
|
||||
// the handler adds the request to the work queue and returns immediately
|
||||
// the request is processed in background, with the exact same logic
|
||||
// when process the admission request in the webhook
|
||||
type AuditHandler interface {
|
||||
Add(request *v1beta1.AdmissionRequest)
|
||||
Run(workers int, stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
type auditHandler struct {
|
||||
client *kyvernoclient.Clientset
|
||||
queue workqueue.RateLimitingInterface
|
||||
pCache policycache.Interface
|
||||
eventGen event.Interface
|
||||
statusListener policystatus.Listener
|
||||
pvGenerator policyviolation.GeneratorInterface
|
||||
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
rbSynced cache.InformerSynced
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
crbSynced cache.InformerSynced
|
||||
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
// NewValidateAuditHandler returns a new instance of audit policy handler
|
||||
func NewValidateAuditHandler(pCache policycache.Interface,
|
||||
eventGen event.Interface,
|
||||
statusListener policystatus.Listener,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
rbInformer rbacinformer.RoleBindingInformer,
|
||||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
log logr.Logger) AuditHandler {
|
||||
|
||||
return &auditHandler{
|
||||
pCache: pCache,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
eventGen: eventGen,
|
||||
statusListener: statusListener,
|
||||
pvGenerator: pvGenerator,
|
||||
rbLister: rbInformer.Lister(),
|
||||
rbSynced: rbInformer.Informer().HasSynced,
|
||||
crbLister: crbInformer.Lister(),
|
||||
crbSynced: crbInformer.Informer().HasSynced,
|
||||
log: log,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *auditHandler) Add(request *v1beta1.AdmissionRequest) {
|
||||
h.log.V(4).Info("admission request added", "uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
|
||||
h.queue.Add(request)
|
||||
}
|
||||
|
||||
func (h *auditHandler) Run(workers int, stopCh <-chan struct{}) {
|
||||
h.log.V(4).Info("starting")
|
||||
|
||||
defer func() {
|
||||
utilruntime.HandleCrash()
|
||||
h.log.V(4).Info("shutting down")
|
||||
}()
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, h.rbSynced, h.crbSynced) {
|
||||
logger.Info("failed to sync informer cache")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(h.runWorker, constant.GenerateControllerResync, stopCh)
|
||||
}
|
||||
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (h *auditHandler) runWorker() {
|
||||
for h.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (h *auditHandler) processNextWorkItem() bool {
|
||||
obj, shutdown := h.queue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
|
||||
defer h.queue.Done(obj)
|
||||
|
||||
request, ok := obj.(*v1beta1.AdmissionRequest)
|
||||
if !ok {
|
||||
h.queue.Forget(obj)
|
||||
logger.Info("incorrect type: expecting type 'AdmissionRequest'", "object", obj)
|
||||
return false
|
||||
}
|
||||
|
||||
err := h.process(request)
|
||||
h.handleErr(err)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
|
||||
var roles, clusterRoles []string
|
||||
var err error
|
||||
|
||||
logger := h.log.WithName("process")
|
||||
policies := h.pCache.Get(policycache.ValidateAudit)
|
||||
|
||||
// getRoleRef only if policy has roles/clusterroles defined
|
||||
if containRBACinfo(policies) {
|
||||
roles, clusterRoles, err = userinfo.GetRoleRef(h.rbLister, h.crbLister, request)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get RBAC information for request")
|
||||
}
|
||||
}
|
||||
|
||||
userRequestInfo := v1.RequestInfo{
|
||||
Roles: roles,
|
||||
ClusterRoles: clusterRoles,
|
||||
AdmissionUserInfo: request.UserInfo}
|
||||
|
||||
// build context
|
||||
ctx := enginectx.NewContext()
|
||||
err = ctx.AddRequest(request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to load incoming request in context")
|
||||
}
|
||||
|
||||
err = ctx.AddUserInfo(userRequestInfo)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to load userInfo in context")
|
||||
}
|
||||
err = ctx.AddSA(userRequestInfo.AdmissionUserInfo.Username)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to load service account in context")
|
||||
}
|
||||
|
||||
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.pvGenerator, logger)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *auditHandler) handleErr(err error) {
|
||||
|
||||
}
|
|
@ -5,6 +5,9 @@ import (
|
|||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
|
@ -21,12 +24,16 @@ import (
|
|||
// HandleValidation handles validating webhook admission request
|
||||
// If there are no errors in validating rule we apply generation rules
|
||||
// patchedResource is the (resource + patches) after applying mutation rules
|
||||
func (ws *WebhookServer) HandleValidation(
|
||||
func HandleValidation(
|
||||
request *v1beta1.AdmissionRequest,
|
||||
policies []*kyverno.ClusterPolicy,
|
||||
patchedResource []byte,
|
||||
ctx *context.Context,
|
||||
userRequestInfo kyverno.RequestInfo) (bool, string) {
|
||||
userRequestInfo kyverno.RequestInfo,
|
||||
statusListener policystatus.Listener,
|
||||
eventGen event.Interface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
log logr.Logger) (bool, string) {
|
||||
|
||||
if len(policies) == 0 {
|
||||
return true, ""
|
||||
|
@ -37,7 +44,7 @@ func (ws *WebhookServer) HandleValidation(
|
|||
resourceName = request.Namespace + "/" + resourceName
|
||||
}
|
||||
|
||||
logger := ws.log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation)
|
||||
logger := log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation)
|
||||
|
||||
// Get new and old resource
|
||||
newR, oldR, err := utils.ExtractResources(patchedResource, request)
|
||||
|
@ -76,7 +83,7 @@ func (ws *WebhookServer) HandleValidation(
|
|||
continue
|
||||
}
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
ws.statusListener.Send(validateStats{
|
||||
statusListener.Send(validateStats{
|
||||
resp: engineResponse,
|
||||
})
|
||||
if !engineResponse.IsSuccessful() {
|
||||
|
@ -101,7 +108,7 @@ func (ws *WebhookServer) HandleValidation(
|
|||
// all policies were applied succesfully.
|
||||
// create an event on the resource
|
||||
events := generateEvents(engineResponses, blocked, (request.Operation == v1beta1.Update), logger)
|
||||
ws.eventGen.Add(events...)
|
||||
eventGen.Add(events...)
|
||||
if blocked {
|
||||
logger.V(4).Info("resource blocked")
|
||||
return false, getEnforceFailureErrorMsg(engineResponses)
|
||||
|
@ -110,8 +117,8 @@ func (ws *WebhookServer) HandleValidation(
|
|||
// ADD POLICY VIOLATIONS
|
||||
// violations are created with resource on "audit"
|
||||
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
|
||||
ws.pvGenerator.Add(pvInfos...)
|
||||
// report time end
|
||||
pvGenerator.Add(pvInfos...)
|
||||
|
||||
return true, ""
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue