From 1613434c46c5215174476abc4a916d6f5a462b25 Mon Sep 17 00:00:00 2001 From: Shivkumar Dudhani Date: Fri, 8 Nov 2019 20:45:26 -0800 Subject: [PATCH] 458 cleanup (#464) * cleanup of policy violation on policy spec changes + refactoring * remove unused code * remove duplicate types * cleanup references * fix info log and clean code * code clean * remove dead code --- main.go | 4 +- pkg/api/kyverno/v1alpha1/utils.go | 10 +++ pkg/config/dynamicconfig.go | 11 ++- pkg/policy/cleanup.go | 127 ++++++++++++++++++++++++++++++ pkg/policy/controller.go | 6 +- pkg/policy/report.go | 30 ++++--- pkg/policyviolation/controller.go | 2 +- pkg/policyviolation/helpers.go | 66 ++++++---------- 8 files changed, 189 insertions(+), 67 deletions(-) create mode 100644 pkg/policy/cleanup.go diff --git a/main.go b/main.go index 60ea2d6688..643b8d3c91 100644 --- a/main.go +++ b/main.go @@ -151,8 +151,8 @@ func main() { // Start the components pInformer.Start(stopCh) kubeInformer.Start(stopCh) - if err := configData.Run(kubeInformer.Core().V1().ConfigMaps(), stopCh); err != nil { - glog.Fatalf("Unable loading dynamic configuration: %v\n", err) + if err := configData.Run(stopCh); err != nil { + glog.Fatalf("Unable to load dynamic configuration: %v\n", err) } go pc.Run(1, stopCh) go pvc.Run(1, stopCh) diff --git a/pkg/api/kyverno/v1alpha1/utils.go b/pkg/api/kyverno/v1alpha1/utils.go index f5c247fde4..2385ff4136 100644 --- a/pkg/api/kyverno/v1alpha1/utils.go +++ b/pkg/api/kyverno/v1alpha1/utils.go @@ -62,3 +62,13 @@ func (rs ResourceSpec) ToKey() string { } return rs.Kind + "." + rs.Namespace + "." + rs.Name } + +//BuildKey builds the key +func BuildResourceKey(kind, namespace, name string) string { + resource := ResourceSpec{ + Kind: kind, + Namespace: namespace, + Name: name, + } + return resource.ToKey() +} diff --git a/pkg/config/dynamicconfig.go b/pkg/config/dynamicconfig.go index ce95288a22..46a51cf351 100644 --- a/pkg/config/dynamicconfig.go +++ b/pkg/config/dynamicconfig.go @@ -29,6 +29,8 @@ type ConfigData struct { mux sync.RWMutex // configuration data filters []k8Resource + // hasynced + cmListerSycned cache.InformerSynced } // ToFilter checks if the given resource is set to be filtered in the configuration @@ -55,8 +57,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration") } cd := ConfigData{ - client: rclient, - cmName: os.Getenv(cmNameEnv), + client: rclient, + cmName: os.Getenv(cmNameEnv), + cmListerSycned: cmInformer.Informer().HasSynced, } //TODO: this has been added to backward support command line arguments // will be removed in future and the configuration will be set only via configmaps @@ -73,9 +76,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI return &cd } -func (cd *ConfigData) Run(cmInformer informers.ConfigMapInformer, stopCh <-chan struct{}) error { +func (cd *ConfigData) Run(stopCh <-chan struct{}) error { // wait for cache to populate first time - if !cache.WaitForCacheSync(stopCh, cmInformer.Informer().HasSynced) { + if !cache.WaitForCacheSync(stopCh, cd.cmListerSycned) { return fmt.Errorf("Configuration: Failed to sync informer cache") } return nil diff --git a/pkg/policy/cleanup.go b/pkg/policy/cleanup.go new file mode 100644 index 0000000000..768dad8d25 --- /dev/null +++ b/pkg/policy/cleanup.go @@ -0,0 +1,127 @@ +package policy + +import ( + "fmt" + "reflect" + + "github.com/golang/glog" + kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1" + kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1" + dclient "github.com/nirmata/kyverno/pkg/dclient" + "github.com/nirmata/kyverno/pkg/engine" + "github.com/nirmata/kyverno/pkg/policyviolation" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" +) + +func (pc *PolicyController) cleanUpPolicyViolation(pResponse engine.PolicyResponse) { + // 1- check if there is violation on resource (label:Selector) + // 2- check if there is violation on owner + // - recursively get owner by queries the api server for owner information of the resource + + // there can be multiple violations as a resource can have multiple owners + pvs, err := getPv(pc.pvLister, pc.client, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Namespace, pResponse.Resource.Name) + if err != nil { + glog.Errorf("failed to cleanUp violations: %v", err) + } + for _, pv := range pvs { + if reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) { + continue + } + glog.V(4).Infof("cleanup violations %s, on %s/%s/%s", pv.Name, pv.Spec.Kind, pv.Spec.Namespace, pv.Spec.Name) + if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil { + glog.Errorf("failed to delete policy violation: %v", err) + continue + } + } +} + +func getPv(pvLister kyvernolister.ClusterPolicyViolationLister, client *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.ClusterPolicyViolation, error) { + var pvs []kyverno.ClusterPolicyViolation + var err error + // Check Violation on resource + pv, err := getPVOnResource(pvLister, policyName, kind, namespace, name) + if err != nil { + glog.V(4).Infof("error while fetching pv: %v", err) + return []kyverno.ClusterPolicyViolation{}, err + } + if !reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) { + // found a pv on resource + pvs = append(pvs, pv) + return pvs, nil + } + // Check Violations on owner + pvs, err = getPVonOwnerRef(pvLister, client, policyName, kind, namespace, name) + if err != nil { + glog.V(4).Infof("error while fetching pv: %v", err) + return []kyverno.ClusterPolicyViolation{}, err + } + return pvs, nil +} + +func getPVonOwnerRef(pvLister kyvernolister.ClusterPolicyViolationLister, dclient *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.ClusterPolicyViolation, error) { + var pvs []kyverno.ClusterPolicyViolation + // get resource + resource, err := dclient.GetResource(kind, namespace, name) + if err != nil { + glog.V(4).Infof("error while fetching the resource: %v", err) + return pvs, err + } + // get owners + // getOwners returns nil if there is any error + owners := policyviolation.GetOwners(dclient, *resource) + // as we can have multiple top level owners to a resource + // check if pv exists on each one + // does not check for cycles + for _, owner := range owners { + pv, err := getPVOnResource(pvLister, policyName, owner.Kind, owner.Namespace, owner.Name) + if err != nil { + glog.Errorf("error while fetching resource owners: %v", err) + continue + } + pvs = append(pvs, pv) + } + return pvs, nil +} + +// Wont do the claiming of objects, just lookup based on selectors and owner references +func getPVOnResource(pvLister kyvernolister.ClusterPolicyViolationLister, policyName, kind, namespace, name string) (kyverno.ClusterPolicyViolation, error) { + resourceKey := kyverno.BuildResourceKey(kind, namespace, name) + labelMap := map[string]string{"policy": policyName, "resource": resourceKey} + pvSelector, err := converLabelToSelector(labelMap) + if err != nil { + glog.Errorf("failed to generate label sector for policy %s and resourcr %s", policyName, resourceKey) + return kyverno.ClusterPolicyViolation{}, fmt.Errorf("failed to generate label sector for policy %s and resourcr %s", policyName, resourceKey) + } + + pvs, err := pvLister.List(pvSelector) + if err != nil { + glog.Errorf("unable to list policy violations with label selector %v: %v", pvSelector, err) + return kyverno.ClusterPolicyViolation{}, err + } + if len(pvs) > 1 { + glog.Errorf("more than one policy violation exists with labels %v", labelMap) + return kyverno.ClusterPolicyViolation{}, fmt.Errorf("more than one policy violation exists with labels %v", labelMap) + } + if len(pvs) == 0 { + glog.V(4).Infof("policy violation does not exist with labels %v", labelMap) + return kyverno.ClusterPolicyViolation{}, nil + } + // return a copy of pv + return *pvs[0], nil +} + +func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) { + ls := &metav1.LabelSelector{} + err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil) + if err != nil { + return nil, err + } + + policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls) + if err != nil { + return nil, fmt.Errorf("invalid label selector: %v", err) + } + + return policyViolationSelector, nil +} diff --git a/pkg/policy/controller.go b/pkg/policy/controller.go index 32f62e3cb3..b838c1ea9a 100644 --- a/pkg/policy/controller.go +++ b/pkg/policy/controller.go @@ -387,7 +387,6 @@ func (pc *PolicyController) processNextWorkItem() bool { return false } defer pc.queue.Done(key) - err := pc.syncHandler(key.(string)) pc.handleErr(err, key) @@ -451,11 +450,10 @@ func (pc *PolicyController) syncPolicy(key string) error { return err } // process policies on existing resources - policyInfos := pc.processExistingResources(*p) + engineResponses := pc.processExistingResources(*p) // report errors - pc.report(policyInfos) + pc.cleanupAndReport(engineResponses) // fetch the policy again via the aggreagator to remain consistent - // return pc.statusAggregator.UpdateViolationCount(p.Name, pvList) return pc.syncStatusOnly(p, pvList) } diff --git a/pkg/policy/report.go b/pkg/policy/report.go index d126b225e2..1b15311f73 100644 --- a/pkg/policy/report.go +++ b/pkg/policy/report.go @@ -9,20 +9,24 @@ import ( "github.com/nirmata/kyverno/pkg/policyviolation" ) -func (pc *PolicyController) report(engineResponses []engine.EngineResponse) { - // generate events - // generate policy violations - for _, policyInfo := range engineResponses { - // events - // success - policy applied on resource - // failure - policy/rule failed to apply on the resource - reportEvents(policyInfo, pc.eventGen) - // policy violations - // failure - policy/rule failed to apply on the resource +// for each policy-resource response +// - has violation -> report +// - no violation -> cleanup policy violations(resource or resource owner) +func (pc *PolicyController) cleanupAndReport(engineResponses []engine.EngineResponse) { + for _, eResponse := range engineResponses { + if !eResponse.IsSuccesful() { + // failure - policy/rule failed to apply on the resource + reportEvents(eResponse, pc.eventGen) + // generate policy violation + // Only created on resource, not resource owners + policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses) + } else { + // cleanup existing violations if any + // if there is any error in clean up, we dont re-queue the resource + // it will be re-tried in the next controller cache resync + pc.cleanUpPolicyViolation(eResponse.PolicyResponse) + } } - - // generate policy violation - policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses) } //reportEvents generates events for the failed resources diff --git a/pkg/policyviolation/controller.go b/pkg/policyviolation/controller.go index deeee4d5f1..b10062da3d 100644 --- a/pkg/policyviolation/controller.go +++ b/pkg/policyviolation/controller.go @@ -269,7 +269,7 @@ func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.Cluster for _, resource := range resources.Items { glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName()) - owners := getOwners(pvc.client, resource) + owners := GetOwners(pvc.client, resource) // owner of resource matches violation resourceSpec // remove policy violation as the blocked request got created if containsOwner(owners, curPv) { diff --git a/pkg/policyviolation/helpers.go b/pkg/policyviolation/helpers.go index 4191686a97..1741bc9a34 100644 --- a/pkg/policyviolation/helpers.go +++ b/pkg/policyviolation/helpers.go @@ -133,7 +133,7 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs [] violatedRules := newViolatedRules(er, msg) // create violation on resource owner (if exist) when action is set to enforce - owners := getOwners(dclient, er.PatchedResource) + owners := GetOwners(dclient, er.PatchedResource) // standaloneresource, set pvResourceSpec with resource itself if len(owners) == 0 { @@ -146,13 +146,7 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs [] } for _, owner := range owners { - // resource has owner, set pvResourceSpec with owner info - pvResourceSpec := kyverno.ResourceSpec{ - Namespace: owner.namespace, - Kind: owner.kind, - Name: owner.name, - } - pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules)) + pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, owner, violatedRules)) } return } @@ -208,32 +202,18 @@ func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) return policyViolationSelector, nil } -type pvResourceOwner struct { - kind string - namespace string - name string -} - -func (o pvResourceOwner) toKey() string { - if o.namespace == "" { - return o.kind + "." + o.name - } - return o.kind + "." + o.namespace + "." + o.name -} - -// pass in unstr rather than using the client to get the unstr +//GetOwners pass in unstr rather than using the client to get the unstr // as if name is empty then GetResource panic as it returns a list -func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvResourceOwner { +func GetOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []kyverno.ResourceSpec { resourceOwners := unstr.GetOwnerReferences() if len(resourceOwners) == 0 { - return []pvResourceOwner{pvResourceOwner{ - kind: unstr.GetKind(), - namespace: unstr.GetNamespace(), - name: unstr.GetName(), + return []kyverno.ResourceSpec{kyverno.ResourceSpec{ + Kind: unstr.GetKind(), + Namespace: unstr.GetNamespace(), + Name: unstr.GetName(), }} } - - var owners []pvResourceOwner + var owners []kyverno.ResourceSpec for _, resourceOwner := range resourceOwners { unstrParent, err := dclient.GetResource(resourceOwner.Kind, unstr.GetNamespace(), resourceOwner.Name) if err != nil { @@ -241,7 +221,7 @@ func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvRes return nil } - owners = append(owners, getOwners(dclient, *unstrParent)...) + owners = append(owners, GetOwners(dclient, *unstrParent)...) } return owners } @@ -274,11 +254,11 @@ func newViolatedRules(er engine.EngineResponse, msg string) (violatedRules []kyv return } -func containsOwner(owners []pvResourceOwner, pv *kyverno.ClusterPolicyViolation) bool { - curOwner := pvResourceOwner{ - kind: pv.Spec.ResourceSpec.Kind, - name: pv.Spec.ResourceSpec.Name, - namespace: pv.Spec.ResourceSpec.Namespace, +func containsOwner(owners []kyverno.ResourceSpec, pv *kyverno.ClusterPolicyViolation) bool { + curOwner := kyverno.ResourceSpec{ + Kind: pv.Spec.ResourceSpec.Kind, + Namespace: pv.Spec.ResourceSpec.Namespace, + Name: pv.Spec.ResourceSpec.Name, } for _, targetOwner := range owners { @@ -301,26 +281,26 @@ func validDependantForDeployment(client appsv1.AppsV1Interface, curPv kyverno.Cl return false } - owner := pvResourceOwner{ - kind: curPv.Spec.ResourceSpec.Kind, - namespace: curPv.Spec.ResourceSpec.Namespace, - name: curPv.Spec.ResourceSpec.Name, + owner := kyverno.ResourceSpec{ + Kind: curPv.Spec.ResourceSpec.Kind, + Namespace: curPv.Spec.ResourceSpec.Namespace, + Name: curPv.Spec.ResourceSpec.Name, } - deploy, err := client.Deployments(owner.namespace).Get(owner.name, metav1.GetOptions{}) + deploy, err := client.Deployments(owner.Namespace).Get(owner.Name, metav1.GetOptions{}) if err != nil { - glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err) + glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err) return false } expectReplicaset, err := deployutil.GetNewReplicaSet(deploy, client) if err != nil { - glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err) + glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err) return false } if reflect.DeepEqual(expectReplicaset, v1.ReplicaSet{}) { - glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.namespace, owner.kind, owner.name) + glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.Namespace, owner.Kind, owner.Name) return false } var actualReplicaset *v1.ReplicaSet