1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 16:06:56 +00:00
kyverno/pkg/policy/existing.go
shuting 5e07ecc5f3
Add Policy Report (#1229)
* add report in cli

* policy report crd added

* policy report added

* configmap added

* added jobs

* added jobs

* bug fixed

* added logic for cli

* common function added

* sub command added for policy report

* subcommand added for report

* common package changed

* configmap added

* added logic for kyverno cli

* added logic for jobs

* added logic for jobs

* added logic for jobs

* added logic for cli

* buf fix

* cli changes

* count bug fix

* docs added for command

* go fmt

* refactor codebase

* remove policy controller for policyreport

* policy report removed

* bug fixes

* bug fixes

* added job trigger if needed

* job deletation logic added

* build failed fix

* fixed e2e test

* remove hard coded variables

* packages adde

* improvment added in jobs sheduler

* policy report yaml added

* cronjob added

* small fixes

* remove background sync

* documentation added for report command

* remove extra log

* small improvement

* tested policy report

* revert hardcoded changes

* changes for demo

* demo changes

* resource aggrigation added

* More changes

* More changes

* - resolve PR comments; - refactor jobs controller

* set rbac for jobs

* add clean up in job controller

* add short names

* remove application scope for policyreport

* move job controller to policyreport

* add report logic in command apply

* - update policy report types;  - upgrade k8s library; - update code gen

* temporarily comment out code to pass CI build

* generate / update policyreport to cluster

* add unit test for CLI report

* add test for apply - generate policy report

* fix unit test

* - remove job controller; - remove in-memory configmap; - clean up kustomize manifest

* remove dependency

* add reportRequest / clusterReportRequest

* clean up policy report

* generate report request

* update crd clusterReportRequest

* - update json tag of report summary; - update definition manifests; -  fix dclient creation

* aggregate reportRequest into policy report

* fix unit tests

* - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report

* remove

* generate reportRequest in kyverno namespace

* update resource filter in helm chart

* - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest

* generate policy report in background scan

* skip generating report change request if there's entry results

* fix results entry removal when policy / rule gets deleted

* rename apiversion from policy.kubernetes.io to policy.k8s.io

* update summary.* to lower case

* move reportChangeRequest to kyverno.io/v1alpha1

* remove policy report flag

* fix report update

* clean up policy violation CRD

* remove violation CRD from manifest

* clean up policy violation code - remove pvGenerator

* change severity fields to lower case

* update import library

* set report category

Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com>
Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com>
Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00

165 lines
5.5 KiB
Go

package policy
import (
"sync"
"time"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/response"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPolicy) []response.EngineResponse {
logger := pc.log.WithValues("policy", policy.Name)
// Parse through all the resources
// drops the cache after configured rebuild time
pc.rm.Drop()
var engineResponses []response.EngineResponse
// get resource that are satisfy the resource description defined in the rules
resourceMap := pc.listResources(policy)
for _, resource := range resourceMap {
// pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
logger.V(4).Info("policy and resource already processed", "policyResourceVersion", policy.ResourceVersion, "resourceResourceVersion", resource.GetResourceVersion(), "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
continue
}
// apply the policy on each
engineResponse := applyPolicy(*policy, resource, logger, pc.configHandler.GetExcludeGroupRole(), pc.resCache)
// get engine response for mutation & validation independently
engineResponses = append(engineResponses, engineResponse...)
// post-processing, register the resource as processed
pc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
}
return engineResponses
}
func (pc *PolicyController) listResources(policy *kyverno.ClusterPolicy) map[string]unstructured.Unstructured {
pc.log.V(4).Info("list resources to be processed")
// key uid
resourceMap := map[string]unstructured.Unstructured{}
for _, rule := range policy.Spec.Rules {
for _, k := range rule.MatchResources.Kinds {
resourceSchema, _, err := pc.client.DiscoveryClient.FindResource("", k)
if err != nil {
pc.log.Error(err, "failed to find resource", "kind", k)
continue
}
if !resourceSchema.Namespaced {
rMap := GetResourcesPerNamespace(k, pc.client, "", rule, pc.configHandler, pc.log)
MergeResources(resourceMap, rMap)
} else {
namespaces := GetNamespacesForRule(&rule, pc.nsLister, pc.log)
for _, ns := range namespaces {
rMap := GetResourcesPerNamespace(k, pc.client, ns, rule, pc.configHandler, pc.log)
MergeResources(resourceMap, rMap)
}
}
}
}
return excludeAutoGenResources(*policy, resourceMap, pc.log)
}
// excludeAutoGenResources filter out the pods / jobs with ownerReference
func excludeAutoGenResources(policy kyverno.ClusterPolicy, resourceMap map[string]unstructured.Unstructured, log logr.Logger) map[string]unstructured.Unstructured {
for uid, r := range resourceMap {
if engine.SkipPolicyApplication(policy, r) {
log.V(4).Info("exclude resource", "namespace", r.GetNamespace(), "kind", r.GetKind(), "name", r.GetName())
delete(resourceMap, uid)
}
}
return resourceMap
}
//Condition defines condition type
type Condition int
const (
//NotEvaluate to not evaluate condition
NotEvaluate Condition = 0
// Process to evaluate condition
Process Condition = 1
// Skip to ignore/skip the condition
Skip Condition = 2
)
// merge b into a map
func mergeResources(a, b map[string]unstructured.Unstructured) {
for k, v := range b {
a[k] = v
}
}
//NewResourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) *ResourceManager {
rm := ResourceManager{
data: make(map[string]interface{}),
time: time.Now(),
rebuildTime: rebuildTime,
}
// set time it was built
return &rm
}
// ResourceManager stores the details on already processed resources for caching
type ResourceManager struct {
// we drop and re-build the cache
// based on the memory consumer of by the map
data map[string]interface{}
mux sync.RWMutex
time time.Time
rebuildTime int64 // after how many seconds should we rebuild the cache
}
type resourceManager interface {
ProcessResource(policy, pv, kind, ns, name, rv string) bool
//TODO removeResource(kind, ns, name string) error
RegisterResource(policy, pv, kind, ns, name, rv string)
// reload
Drop()
}
//Drop drop the cache after every rebuild interval mins
//TODO: or drop based on the size
func (rm *ResourceManager) Drop() {
timeSince := time.Since(rm.time)
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.data = map[string]interface{}{}
rm.time = time.Now()
}
}
var empty struct{}
//RegisterResource stores if the policy is processed on this resource version
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
rm.mux.Lock()
defer rm.mux.Unlock()
// add the resource
key := buildKey(policy, pv, kind, ns, name, rv)
rm.data[key] = empty
}
//ProcessResource returns true if the policy was not applied on the resource
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
rm.mux.RLock()
defer rm.mux.RUnlock()
key := buildKey(policy, pv, kind, ns, name, rv)
_, ok := rm.data[key]
return !ok
}
func buildKey(policy, pv, kind, ns, name, rv string) string {
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
}