From 9656975b5a659b4fc0d06264e1e33e66b96e8f9d Mon Sep 17 00:00:00 2001 From: shravan Date: Sat, 7 Mar 2020 12:53:37 +0530 Subject: [PATCH] 527 renamed package and send listner instead of entire sync object --- cmd/kyverno/main.go | 11 ++++--- pkg/generate/controller.go | 12 ++++---- pkg/generate/generate.go | 4 +-- pkg/{policyStatus => policystatus}/main.go | 10 +++++-- pkg/policyviolation/clusterpv.go | 18 ++++++------ pkg/policyviolation/generator.go | 34 +++++++++++----------- pkg/policyviolation/namespacedpv.go | 18 ++++++------ pkg/webhooks/generation.go | 4 +-- pkg/webhooks/mutation.go | 4 +-- pkg/webhooks/server.go | 8 ++--- pkg/webhooks/validation.go | 4 +-- 11 files changed, 65 insertions(+), 62 deletions(-) rename pkg/{policyStatus => policystatus}/main.go (94%) diff --git a/cmd/kyverno/main.go b/cmd/kyverno/main.go index 6a396b8f2e..5066591e1e 100644 --- a/cmd/kyverno/main.go +++ b/cmd/kyverno/main.go @@ -5,8 +5,6 @@ import ( "flag" "time" - "github.com/nirmata/kyverno/pkg/policyStatus" - "github.com/golang/glog" "github.com/nirmata/kyverno/pkg/checker" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" @@ -17,6 +15,7 @@ import ( "github.com/nirmata/kyverno/pkg/generate" generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup" "github.com/nirmata/kyverno/pkg/policy" + "github.com/nirmata/kyverno/pkg/policystatus" "github.com/nirmata/kyverno/pkg/policystore" "github.com/nirmata/kyverno/pkg/policyviolation" "github.com/nirmata/kyverno/pkg/signal" @@ -138,7 +137,7 @@ func main() { pInformer.Kyverno().V1().ClusterPolicies()) // Policy Status Handler - deals with all logic related to policy status - statusSync := policyStatus.NewSync( + statusSync := policystatus.NewSync( pclient, policyMetaStore) @@ -148,7 +147,7 @@ func main() { client, pInformer.Kyverno().V1().ClusterPolicyViolations(), pInformer.Kyverno().V1().PolicyViolations(), - statusSync) + statusSync.Listener) // POLICY CONTROLLER // - reconciliation policy and policy violation @@ -182,7 +181,7 @@ func main() { egen, pvgen, kubedynamicInformer, - statusSync, + statusSync.Listener, ) // GENERATE REQUEST CLEANUP // -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout @@ -224,7 +223,7 @@ func main() { kubeInformer.Rbac().V1().ClusterRoleBindings(), egen, webhookRegistrationClient, - statusSync, + statusSync.Listener, configData, policyMetaStore, pvgen, diff --git a/pkg/generate/controller.go b/pkg/generate/controller.go index 6d877d9448..d40ed2af0d 100644 --- a/pkg/generate/controller.go +++ b/pkg/generate/controller.go @@ -11,7 +11,7 @@ import ( kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" dclient "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/event" - "github.com/nirmata/kyverno/pkg/policyStatus" + "github.com/nirmata/kyverno/pkg/policystatus" "github.com/nirmata/kyverno/pkg/policyviolation" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -59,7 +59,7 @@ type Controller struct { // only support Namespaces for re-evalutation on resource updates nsInformer informers.GenericInformer - policyStatus *policyStatus.Sync + policyStatusListener policystatus.Listener } //NewController returns an instance of the Generate-Request Controller @@ -71,7 +71,7 @@ func NewController( eventGen event.Interface, pvGenerator policyviolation.GeneratorInterface, dynamicInformer dynamicinformer.DynamicSharedInformerFactory, - policyStatus *policyStatus.Sync, + policyStatus policystatus.Listener, ) *Controller { c := Controller{ client: client, @@ -80,9 +80,9 @@ func NewController( pvGenerator: pvGenerator, //TODO: do the math for worst case back off and make sure cleanup runs after that // as we dont want a deleted GR to be re-queue - queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"), - dynamicInformer: dynamicInformer, - policyStatus: policyStatus, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"), + dynamicInformer: dynamicInformer, + policyStatusListener: policyStatus, } c.statusControl = StatusControl{client: kyvernoclient} diff --git a/pkg/generate/generate.go b/pkg/generate/generate.go index 512c76bcdf..bff1dbbca6 100644 --- a/pkg/generate/generate.go +++ b/pkg/generate/generate.go @@ -125,10 +125,10 @@ func (c *Controller) applyGeneratePolicy(policyContext engine.PolicyContext, gr } if gr.Status.State == "" { - c.policyStatus.Listener <- generateSyncStats{ + c.policyStatusListener.Send(generateSyncStats{ policyName: policy.Name, ruleNameToProcessingTime: ruleNameToProcessingTime, - } + }) } return genResources, nil diff --git a/pkg/policyStatus/main.go b/pkg/policystatus/main.go similarity index 94% rename from pkg/policyStatus/main.go rename to pkg/policystatus/main.go index 498ab2a0fc..ff3caaa7cc 100644 --- a/pkg/policyStatus/main.go +++ b/pkg/policystatus/main.go @@ -1,4 +1,4 @@ -package policyStatus +package policystatus import ( "sync" @@ -22,9 +22,15 @@ type policyStore interface { Get(policyName string) (*v1.ClusterPolicy, error) } +type Listener chan statusUpdater + +func (l Listener) Send(s statusUpdater) { + l <- s +} + type Sync struct { cache *cache - Listener chan statusUpdater + Listener Listener client *versioned.Clientset policyStore policyStore } diff --git a/pkg/policyviolation/clusterpv.go b/pkg/policyviolation/clusterpv.go index d041c32828..c9336f8059 100644 --- a/pkg/policyviolation/clusterpv.go +++ b/pkg/policyviolation/clusterpv.go @@ -9,7 +9,7 @@ import ( kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" client "github.com/nirmata/kyverno/pkg/dclient" - "github.com/nirmata/kyverno/pkg/policyStatus" + "github.com/nirmata/kyverno/pkg/policystatus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,19 +22,19 @@ type clusterPV struct { // policy violation interface kyvernoInterface kyvernov1.KyvernoV1Interface // update policy stats with violationCount - policyStatus *policyStatus.Sync + policyStatusListener policystatus.Listener } func newClusterPV(dclient *client.Client, cpvLister kyvernolister.ClusterPolicyViolationLister, kyvernoInterface kyvernov1.KyvernoV1Interface, - policyStatus *policyStatus.Sync, + policyStatus policystatus.Listener, ) *clusterPV { cpv := clusterPV{ - dclient: dclient, - cpvLister: cpvLister, - kyvernoInterface: kyvernoInterface, - policyStatus: policyStatus, + dclient: dclient, + cpvLister: cpvLister, + kyvernoInterface: kyvernoInterface, + policyStatusListener: policyStatus, } return &cpv } @@ -100,7 +100,7 @@ func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error { } if newPv.Annotations["fromSync"] != "true" { - cpv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules} + cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}) } glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec) @@ -126,7 +126,7 @@ func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) err glog.Infof("cluster policy violation updated for resource %v", newPv.Spec.ResourceSpec) if newPv.Annotations["fromSync"] != "true" { - cpv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules} + cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}) } return nil } diff --git a/pkg/policyviolation/generator.go b/pkg/policyviolation/generator.go index 6c491112ee..db5ca63fcd 100644 --- a/pkg/policyviolation/generator.go +++ b/pkg/policyviolation/generator.go @@ -14,7 +14,7 @@ import ( kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" - "github.com/nirmata/kyverno/pkg/policyStatus" + "github.com/nirmata/kyverno/pkg/policystatus" dclient "github.com/nirmata/kyverno/pkg/dclient" unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -38,10 +38,10 @@ type Generator struct { // returns true if the cluster policy store has been synced at least once pvSynced cache.InformerSynced // returns true if the namespaced cluster policy store has been synced at at least once - nspvSynced cache.InformerSynced - queue workqueue.RateLimitingInterface - dataStore *dataStore - policyStatus *policyStatus.Sync + nspvSynced cache.InformerSynced + queue workqueue.RateLimitingInterface + dataStore *dataStore + policyStatusListener policystatus.Listener } //NewDataStore returns an instance of data store @@ -107,17 +107,17 @@ func NewPVGenerator(client *kyvernoclient.Clientset, dclient *dclient.Client, pvInformer kyvernoinformer.ClusterPolicyViolationInformer, nspvInformer kyvernoinformer.PolicyViolationInformer, - policyStatus *policyStatus.Sync) *Generator { + policyStatus policystatus.Listener) *Generator { gen := Generator{ - kyvernoInterface: client.KyvernoV1(), - dclient: dclient, - cpvLister: pvInformer.Lister(), - pvSynced: pvInformer.Informer().HasSynced, - nspvLister: nspvInformer.Lister(), - nspvSynced: nspvInformer.Informer().HasSynced, - queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), - dataStore: newDataStore(), - policyStatus: policyStatus, + kyvernoInterface: client.KyvernoV1(), + dclient: dclient, + cpvLister: pvInformer.Lister(), + pvSynced: pvInformer.Informer().HasSynced, + nspvLister: nspvInformer.Lister(), + nspvSynced: nspvInformer.Informer().HasSynced, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), + dataStore: newDataStore(), + policyStatusListener: policyStatus, } return &gen } @@ -224,10 +224,10 @@ func (gen *Generator) syncHandler(info Info) error { builder := newPvBuilder() if info.Resource.GetNamespace() == "" { // cluster scope resource generate a clusterpolicy violation - handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatus) + handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatusListener) } else { // namespaced resources generated a namespaced policy violation in the namespace of the resource - handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatus) + handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatusListener) } failure := false diff --git a/pkg/policyviolation/namespacedpv.go b/pkg/policyviolation/namespacedpv.go index 80496312c9..ff32748d0b 100644 --- a/pkg/policyviolation/namespacedpv.go +++ b/pkg/policyviolation/namespacedpv.go @@ -9,7 +9,7 @@ import ( kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" client "github.com/nirmata/kyverno/pkg/dclient" - "github.com/nirmata/kyverno/pkg/policyStatus" + "github.com/nirmata/kyverno/pkg/policystatus" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -22,19 +22,19 @@ type namespacedPV struct { // policy violation interface kyvernoInterface kyvernov1.KyvernoV1Interface // update policy status with violationCount - policyStatus *policyStatus.Sync + policyStatusListener policystatus.Listener } func newNamespacedPV(dclient *client.Client, nspvLister kyvernolister.PolicyViolationLister, kyvernoInterface kyvernov1.KyvernoV1Interface, - policyStatus *policyStatus.Sync, + policyStatus policystatus.Listener, ) *namespacedPV { nspv := namespacedPV{ - dclient: dclient, - nspvLister: nspvLister, - kyvernoInterface: kyvernoInterface, - policyStatus: policyStatus, + dclient: dclient, + nspvLister: nspvLister, + kyvernoInterface: kyvernoInterface, + policyStatusListener: policyStatus, } return &nspv } @@ -99,7 +99,7 @@ func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error { } if newPv.Annotations["fromSync"] != "true" { - nspv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules} + nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}) } glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec) return nil @@ -122,7 +122,7 @@ func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error } if newPv.Annotations["fromSync"] != "true" { - nspv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules} + nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}) } glog.Infof("namespaced policy violation updated for resource %v", newPv.Spec.ResourceSpec) return nil diff --git a/pkg/webhooks/generation.go b/pkg/webhooks/generation.go index 4e06dd9b09..956c568cac 100644 --- a/pkg/webhooks/generation.go +++ b/pkg/webhooks/generation.go @@ -66,9 +66,9 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic if len(engineResponse.PolicyResponse.Rules) > 0 { // some generate rules do apply to the resource engineResponses = append(engineResponses, engineResponse) - ws.status.Listener <- generateStats{ + ws.statusListener.Send(generateStats{ resp: engineResponse, - } + }) } } // Adds Generate Request to a channel(queue size 1000) to generators diff --git a/pkg/webhooks/mutation.go b/pkg/webhooks/mutation.go index 09e4fe80ba..2723b00110 100644 --- a/pkg/webhooks/mutation.go +++ b/pkg/webhooks/mutation.go @@ -61,9 +61,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou policyContext.Policy = policy engineResponse := engine.Mutate(policyContext) engineResponses = append(engineResponses, engineResponse) - ws.status.Listener <- mutateStats{ - resp: engineResponse, - } + ws.statusListener.Send(mutateStats{resp: engineResponse}) if !engineResponse.IsSuccesful() { glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, resource.GetNamespace(), resource.GetName()) continue diff --git a/pkg/webhooks/server.go b/pkg/webhooks/server.go index e7733e5f72..b0ca988449 100644 --- a/pkg/webhooks/server.go +++ b/pkg/webhooks/server.go @@ -18,7 +18,7 @@ import ( "github.com/nirmata/kyverno/pkg/config" client "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/event" - "github.com/nirmata/kyverno/pkg/policyStatus" + "github.com/nirmata/kyverno/pkg/policystatus" "github.com/nirmata/kyverno/pkg/policystore" "github.com/nirmata/kyverno/pkg/policyviolation" tlsutils "github.com/nirmata/kyverno/pkg/tls" @@ -55,7 +55,7 @@ type WebhookServer struct { // webhook registration client webhookRegistrationClient *webhookconfig.WebhookRegistrationClient // API to send policy stats for aggregation - status *policyStatus.Sync + statusListener policystatus.Listener // helpers to validate against current loaded configuration configHandler config.Interface // channel for cleanup notification @@ -82,7 +82,7 @@ func NewWebhookServer( crbInformer rbacinformer.ClusterRoleBindingInformer, eventGen event.Interface, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, - statusSync *policyStatus.Sync, + statusSync policystatus.Listener, configHandler config.Interface, pMetaStore policystore.LookupInterface, pvGenerator policyviolation.GeneratorInterface, @@ -112,7 +112,7 @@ func NewWebhookServer( crbSynced: crbInformer.Informer().HasSynced, eventGen: eventGen, webhookRegistrationClient: webhookRegistrationClient, - status: statusSync, + statusListener: statusSync, configHandler: configHandler, cleanUp: cleanUp, lastReqTime: resourceWebhookWatcher.LastReqTime, diff --git a/pkg/webhooks/validation.go b/pkg/webhooks/validation.go index 04f76a08f5..54a7fbdf3e 100644 --- a/pkg/webhooks/validation.go +++ b/pkg/webhooks/validation.go @@ -71,9 +71,9 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol continue } engineResponses = append(engineResponses, engineResponse) - ws.status.Listener <- validateStats{ + ws.statusListener.Send(validateStats{ resp: engineResponse, - } + }) if !engineResponse.IsSuccesful() { glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, newR.GetNamespace(), newR.GetName()) continue