1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-30 19:35:06 +00:00

458 cleanup (#464)

* cleanup of policy violation on policy spec changes + refactoring

* remove unused code

* remove duplicate types

* cleanup references

* fix info log and clean code

* code clean

* remove dead code
This commit is contained in:
Shivkumar Dudhani 2019-11-08 20:45:26 -08:00 committed by GitHub
parent 687c0c6470
commit 1613434c46
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
8 changed files with 189 additions and 67 deletions

View file

@ -151,8 +151,8 @@ func main() {
// Start the components
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
if err := configData.Run(kubeInformer.Core().V1().ConfigMaps(), stopCh); err != nil {
glog.Fatalf("Unable loading dynamic configuration: %v\n", err)
if err := configData.Run(stopCh); err != nil {
glog.Fatalf("Unable to load dynamic configuration: %v\n", err)
}
go pc.Run(1, stopCh)
go pvc.Run(1, stopCh)

View file

@ -62,3 +62,13 @@ func (rs ResourceSpec) ToKey() string {
}
return rs.Kind + "." + rs.Namespace + "." + rs.Name
}
//BuildKey builds the key
func BuildResourceKey(kind, namespace, name string) string {
resource := ResourceSpec{
Kind: kind,
Namespace: namespace,
Name: name,
}
return resource.ToKey()
}

View file

@ -29,6 +29,8 @@ type ConfigData struct {
mux sync.RWMutex
// configuration data
filters []k8Resource
// hasynced
cmListerSycned cache.InformerSynced
}
// ToFilter checks if the given resource is set to be filtered in the configuration
@ -55,8 +57,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
}
cd := ConfigData{
client: rclient,
cmName: os.Getenv(cmNameEnv),
client: rclient,
cmName: os.Getenv(cmNameEnv),
cmListerSycned: cmInformer.Informer().HasSynced,
}
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
@ -73,9 +76,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
return &cd
}
func (cd *ConfigData) Run(cmInformer informers.ConfigMapInformer, stopCh <-chan struct{}) error {
func (cd *ConfigData) Run(stopCh <-chan struct{}) error {
// wait for cache to populate first time
if !cache.WaitForCacheSync(stopCh, cmInformer.Informer().HasSynced) {
if !cache.WaitForCacheSync(stopCh, cd.cmListerSycned) {
return fmt.Errorf("Configuration: Failed to sync informer cache")
}
return nil

127
pkg/policy/cleanup.go Normal file
View file

@ -0,0 +1,127 @@
package policy
import (
"fmt"
"reflect"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/policyviolation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
func (pc *PolicyController) cleanUpPolicyViolation(pResponse engine.PolicyResponse) {
// 1- check if there is violation on resource (label:Selector)
// 2- check if there is violation on owner
// - recursively get owner by queries the api server for owner information of the resource
// there can be multiple violations as a resource can have multiple owners
pvs, err := getPv(pc.pvLister, pc.client, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Namespace, pResponse.Resource.Name)
if err != nil {
glog.Errorf("failed to cleanUp violations: %v", err)
}
for _, pv := range pvs {
if reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) {
continue
}
glog.V(4).Infof("cleanup violations %s, on %s/%s/%s", pv.Name, pv.Spec.Kind, pv.Spec.Namespace, pv.Spec.Name)
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {
glog.Errorf("failed to delete policy violation: %v", err)
continue
}
}
}
func getPv(pvLister kyvernolister.ClusterPolicyViolationLister, client *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.ClusterPolicyViolation, error) {
var pvs []kyverno.ClusterPolicyViolation
var err error
// Check Violation on resource
pv, err := getPVOnResource(pvLister, policyName, kind, namespace, name)
if err != nil {
glog.V(4).Infof("error while fetching pv: %v", err)
return []kyverno.ClusterPolicyViolation{}, err
}
if !reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) {
// found a pv on resource
pvs = append(pvs, pv)
return pvs, nil
}
// Check Violations on owner
pvs, err = getPVonOwnerRef(pvLister, client, policyName, kind, namespace, name)
if err != nil {
glog.V(4).Infof("error while fetching pv: %v", err)
return []kyverno.ClusterPolicyViolation{}, err
}
return pvs, nil
}
func getPVonOwnerRef(pvLister kyvernolister.ClusterPolicyViolationLister, dclient *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.ClusterPolicyViolation, error) {
var pvs []kyverno.ClusterPolicyViolation
// get resource
resource, err := dclient.GetResource(kind, namespace, name)
if err != nil {
glog.V(4).Infof("error while fetching the resource: %v", err)
return pvs, err
}
// get owners
// getOwners returns nil if there is any error
owners := policyviolation.GetOwners(dclient, *resource)
// as we can have multiple top level owners to a resource
// check if pv exists on each one
// does not check for cycles
for _, owner := range owners {
pv, err := getPVOnResource(pvLister, policyName, owner.Kind, owner.Namespace, owner.Name)
if err != nil {
glog.Errorf("error while fetching resource owners: %v", err)
continue
}
pvs = append(pvs, pv)
}
return pvs, nil
}
// Wont do the claiming of objects, just lookup based on selectors and owner references
func getPVOnResource(pvLister kyvernolister.ClusterPolicyViolationLister, policyName, kind, namespace, name string) (kyverno.ClusterPolicyViolation, error) {
resourceKey := kyverno.BuildResourceKey(kind, namespace, name)
labelMap := map[string]string{"policy": policyName, "resource": resourceKey}
pvSelector, err := converLabelToSelector(labelMap)
if err != nil {
glog.Errorf("failed to generate label sector for policy %s and resourcr %s", policyName, resourceKey)
return kyverno.ClusterPolicyViolation{}, fmt.Errorf("failed to generate label sector for policy %s and resourcr %s", policyName, resourceKey)
}
pvs, err := pvLister.List(pvSelector)
if err != nil {
glog.Errorf("unable to list policy violations with label selector %v: %v", pvSelector, err)
return kyverno.ClusterPolicyViolation{}, err
}
if len(pvs) > 1 {
glog.Errorf("more than one policy violation exists with labels %v", labelMap)
return kyverno.ClusterPolicyViolation{}, fmt.Errorf("more than one policy violation exists with labels %v", labelMap)
}
if len(pvs) == 0 {
glog.V(4).Infof("policy violation does not exist with labels %v", labelMap)
return kyverno.ClusterPolicyViolation{}, nil
}
// return a copy of pv
return *pvs[0], nil
}
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
return policyViolationSelector, nil
}

View file

@ -387,7 +387,6 @@ func (pc *PolicyController) processNextWorkItem() bool {
return false
}
defer pc.queue.Done(key)
err := pc.syncHandler(key.(string))
pc.handleErr(err, key)
@ -451,11 +450,10 @@ func (pc *PolicyController) syncPolicy(key string) error {
return err
}
// process policies on existing resources
policyInfos := pc.processExistingResources(*p)
engineResponses := pc.processExistingResources(*p)
// report errors
pc.report(policyInfos)
pc.cleanupAndReport(engineResponses)
// fetch the policy again via the aggreagator to remain consistent
// return pc.statusAggregator.UpdateViolationCount(p.Name, pvList)
return pc.syncStatusOnly(p, pvList)
}

View file

@ -9,20 +9,24 @@ import (
"github.com/nirmata/kyverno/pkg/policyviolation"
)
func (pc *PolicyController) report(engineResponses []engine.EngineResponse) {
// generate events
// generate policy violations
for _, policyInfo := range engineResponses {
// events
// success - policy applied on resource
// failure - policy/rule failed to apply on the resource
reportEvents(policyInfo, pc.eventGen)
// policy violations
// failure - policy/rule failed to apply on the resource
// for each policy-resource response
// - has violation -> report
// - no violation -> cleanup policy violations(resource or resource owner)
func (pc *PolicyController) cleanupAndReport(engineResponses []engine.EngineResponse) {
for _, eResponse := range engineResponses {
if !eResponse.IsSuccesful() {
// failure - policy/rule failed to apply on the resource
reportEvents(eResponse, pc.eventGen)
// generate policy violation
// Only created on resource, not resource owners
policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses)
} else {
// cleanup existing violations if any
// if there is any error in clean up, we dont re-queue the resource
// it will be re-tried in the next controller cache resync
pc.cleanUpPolicyViolation(eResponse.PolicyResponse)
}
}
// generate policy violation
policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses)
}
//reportEvents generates events for the failed resources

View file

@ -269,7 +269,7 @@ func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.Cluster
for _, resource := range resources.Items {
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
owners := getOwners(pvc.client, resource)
owners := GetOwners(pvc.client, resource)
// owner of resource matches violation resourceSpec
// remove policy violation as the blocked request got created
if containsOwner(owners, curPv) {

View file

@ -133,7 +133,7 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []
violatedRules := newViolatedRules(er, msg)
// create violation on resource owner (if exist) when action is set to enforce
owners := getOwners(dclient, er.PatchedResource)
owners := GetOwners(dclient, er.PatchedResource)
// standaloneresource, set pvResourceSpec with resource itself
if len(owners) == 0 {
@ -146,13 +146,7 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []
}
for _, owner := range owners {
// resource has owner, set pvResourceSpec with owner info
pvResourceSpec := kyverno.ResourceSpec{
Namespace: owner.namespace,
Kind: owner.kind,
Name: owner.name,
}
pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules))
pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, owner, violatedRules))
}
return
}
@ -208,32 +202,18 @@ func converLabelToSelector(labelMap map[string]string) (labels.Selector, error)
return policyViolationSelector, nil
}
type pvResourceOwner struct {
kind string
namespace string
name string
}
func (o pvResourceOwner) toKey() string {
if o.namespace == "" {
return o.kind + "." + o.name
}
return o.kind + "." + o.namespace + "." + o.name
}
// pass in unstr rather than using the client to get the unstr
//GetOwners pass in unstr rather than using the client to get the unstr
// as if name is empty then GetResource panic as it returns a list
func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvResourceOwner {
func GetOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []kyverno.ResourceSpec {
resourceOwners := unstr.GetOwnerReferences()
if len(resourceOwners) == 0 {
return []pvResourceOwner{pvResourceOwner{
kind: unstr.GetKind(),
namespace: unstr.GetNamespace(),
name: unstr.GetName(),
return []kyverno.ResourceSpec{kyverno.ResourceSpec{
Kind: unstr.GetKind(),
Namespace: unstr.GetNamespace(),
Name: unstr.GetName(),
}}
}
var owners []pvResourceOwner
var owners []kyverno.ResourceSpec
for _, resourceOwner := range resourceOwners {
unstrParent, err := dclient.GetResource(resourceOwner.Kind, unstr.GetNamespace(), resourceOwner.Name)
if err != nil {
@ -241,7 +221,7 @@ func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvRes
return nil
}
owners = append(owners, getOwners(dclient, *unstrParent)...)
owners = append(owners, GetOwners(dclient, *unstrParent)...)
}
return owners
}
@ -274,11 +254,11 @@ func newViolatedRules(er engine.EngineResponse, msg string) (violatedRules []kyv
return
}
func containsOwner(owners []pvResourceOwner, pv *kyverno.ClusterPolicyViolation) bool {
curOwner := pvResourceOwner{
kind: pv.Spec.ResourceSpec.Kind,
name: pv.Spec.ResourceSpec.Name,
namespace: pv.Spec.ResourceSpec.Namespace,
func containsOwner(owners []kyverno.ResourceSpec, pv *kyverno.ClusterPolicyViolation) bool {
curOwner := kyverno.ResourceSpec{
Kind: pv.Spec.ResourceSpec.Kind,
Namespace: pv.Spec.ResourceSpec.Namespace,
Name: pv.Spec.ResourceSpec.Name,
}
for _, targetOwner := range owners {
@ -301,26 +281,26 @@ func validDependantForDeployment(client appsv1.AppsV1Interface, curPv kyverno.Cl
return false
}
owner := pvResourceOwner{
kind: curPv.Spec.ResourceSpec.Kind,
namespace: curPv.Spec.ResourceSpec.Namespace,
name: curPv.Spec.ResourceSpec.Name,
owner := kyverno.ResourceSpec{
Kind: curPv.Spec.ResourceSpec.Kind,
Namespace: curPv.Spec.ResourceSpec.Namespace,
Name: curPv.Spec.ResourceSpec.Name,
}
deploy, err := client.Deployments(owner.namespace).Get(owner.name, metav1.GetOptions{})
deploy, err := client.Deployments(owner.Namespace).Get(owner.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err)
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err)
return false
}
expectReplicaset, err := deployutil.GetNewReplicaSet(deploy, client)
if err != nil {
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err)
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err)
return false
}
if reflect.DeepEqual(expectReplicaset, v1.ReplicaSet{}) {
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.namespace, owner.kind, owner.name)
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.Namespace, owner.Kind, owner.Name)
return false
}
var actualReplicaset *v1.ReplicaSet