1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-29 10:55:05 +00:00

527 renamed package and send listner instead of entire sync object

This commit is contained in:
shravan 2020-03-07 12:53:37 +05:30
parent 6206852262
commit 9656975b5a
11 changed files with 65 additions and 62 deletions

View file

@ -5,8 +5,6 @@ import (
"flag"
"time"
"github.com/nirmata/kyverno/pkg/policyStatus"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/checker"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
@ -17,6 +15,7 @@ import (
"github.com/nirmata/kyverno/pkg/generate"
generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/signal"
@ -138,7 +137,7 @@ func main() {
pInformer.Kyverno().V1().ClusterPolicies())
// Policy Status Handler - deals with all logic related to policy status
statusSync := policyStatus.NewSync(
statusSync := policystatus.NewSync(
pclient,
policyMetaStore)
@ -148,7 +147,7 @@ func main() {
client,
pInformer.Kyverno().V1().ClusterPolicyViolations(),
pInformer.Kyverno().V1().PolicyViolations(),
statusSync)
statusSync.Listener)
// POLICY CONTROLLER
// - reconciliation policy and policy violation
@ -182,7 +181,7 @@ func main() {
egen,
pvgen,
kubedynamicInformer,
statusSync,
statusSync.Listener,
)
// GENERATE REQUEST CLEANUP
// -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout
@ -224,7 +223,7 @@ func main() {
kubeInformer.Rbac().V1().ClusterRoleBindings(),
egen,
webhookRegistrationClient,
statusSync,
statusSync.Listener,
configData,
policyMetaStore,
pvgen,

View file

@ -11,7 +11,7 @@ import (
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyStatus"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policyviolation"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -59,7 +59,7 @@ type Controller struct {
// only support Namespaces for re-evalutation on resource updates
nsInformer informers.GenericInformer
policyStatus *policyStatus.Sync
policyStatusListener policystatus.Listener
}
//NewController returns an instance of the Generate-Request Controller
@ -71,7 +71,7 @@ func NewController(
eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
dynamicInformer dynamicinformer.DynamicSharedInformerFactory,
policyStatus *policyStatus.Sync,
policyStatus policystatus.Listener,
) *Controller {
c := Controller{
client: client,
@ -80,9 +80,9 @@ func NewController(
pvGenerator: pvGenerator,
//TODO: do the math for worst case back off and make sure cleanup runs after that
// as we dont want a deleted GR to be re-queue
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
dynamicInformer: dynamicInformer,
policyStatus: policyStatus,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
dynamicInformer: dynamicInformer,
policyStatusListener: policyStatus,
}
c.statusControl = StatusControl{client: kyvernoclient}

View file

@ -125,10 +125,10 @@ func (c *Controller) applyGeneratePolicy(policyContext engine.PolicyContext, gr
}
if gr.Status.State == "" {
c.policyStatus.Listener <- generateSyncStats{
c.policyStatusListener.Send(generateSyncStats{
policyName: policy.Name,
ruleNameToProcessingTime: ruleNameToProcessingTime,
}
})
}
return genResources, nil

View file

@ -1,4 +1,4 @@
package policyStatus
package policystatus
import (
"sync"
@ -22,9 +22,15 @@ type policyStore interface {
Get(policyName string) (*v1.ClusterPolicy, error)
}
type Listener chan statusUpdater
func (l Listener) Send(s statusUpdater) {
l <- s
}
type Sync struct {
cache *cache
Listener chan statusUpdater
Listener Listener
client *versioned.Clientset
policyStore policyStore
}

View file

@ -9,7 +9,7 @@ import (
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/policyStatus"
"github.com/nirmata/kyverno/pkg/policystatus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -22,19 +22,19 @@ type clusterPV struct {
// policy violation interface
kyvernoInterface kyvernov1.KyvernoV1Interface
// update policy stats with violationCount
policyStatus *policyStatus.Sync
policyStatusListener policystatus.Listener
}
func newClusterPV(dclient *client.Client,
cpvLister kyvernolister.ClusterPolicyViolationLister,
kyvernoInterface kyvernov1.KyvernoV1Interface,
policyStatus *policyStatus.Sync,
policyStatus policystatus.Listener,
) *clusterPV {
cpv := clusterPV{
dclient: dclient,
cpvLister: cpvLister,
kyvernoInterface: kyvernoInterface,
policyStatus: policyStatus,
dclient: dclient,
cpvLister: cpvLister,
kyvernoInterface: kyvernoInterface,
policyStatusListener: policyStatus,
}
return &cpv
}
@ -100,7 +100,7 @@ func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
}
if newPv.Annotations["fromSync"] != "true" {
cpv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
@ -126,7 +126,7 @@ func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) err
glog.Infof("cluster policy violation updated for resource %v", newPv.Spec.ResourceSpec)
if newPv.Annotations["fromSync"] != "true" {
cpv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
return nil
}

View file

@ -14,7 +14,7 @@ import (
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
"github.com/nirmata/kyverno/pkg/policyStatus"
"github.com/nirmata/kyverno/pkg/policystatus"
dclient "github.com/nirmata/kyverno/pkg/dclient"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -38,10 +38,10 @@ type Generator struct {
// returns true if the cluster policy store has been synced at least once
pvSynced cache.InformerSynced
// returns true if the namespaced cluster policy store has been synced at at least once
nspvSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
dataStore *dataStore
policyStatus *policyStatus.Sync
nspvSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
dataStore *dataStore
policyStatusListener policystatus.Listener
}
//NewDataStore returns an instance of data store
@ -107,17 +107,17 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
dclient *dclient.Client,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
nspvInformer kyvernoinformer.PolicyViolationInformer,
policyStatus *policyStatus.Sync) *Generator {
policyStatus policystatus.Listener) *Generator {
gen := Generator{
kyvernoInterface: client.KyvernoV1(),
dclient: dclient,
cpvLister: pvInformer.Lister(),
pvSynced: pvInformer.Informer().HasSynced,
nspvLister: nspvInformer.Lister(),
nspvSynced: nspvInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
dataStore: newDataStore(),
policyStatus: policyStatus,
kyvernoInterface: client.KyvernoV1(),
dclient: dclient,
cpvLister: pvInformer.Lister(),
pvSynced: pvInformer.Informer().HasSynced,
nspvLister: nspvInformer.Lister(),
nspvSynced: nspvInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
dataStore: newDataStore(),
policyStatusListener: policyStatus,
}
return &gen
}
@ -224,10 +224,10 @@ func (gen *Generator) syncHandler(info Info) error {
builder := newPvBuilder()
if info.Resource.GetNamespace() == "" {
// cluster scope resource generate a clusterpolicy violation
handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatus)
handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatusListener)
} else {
// namespaced resources generated a namespaced policy violation in the namespace of the resource
handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatus)
handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatusListener)
}
failure := false

View file

@ -9,7 +9,7 @@ import (
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/policyStatus"
"github.com/nirmata/kyverno/pkg/policystatus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -22,19 +22,19 @@ type namespacedPV struct {
// policy violation interface
kyvernoInterface kyvernov1.KyvernoV1Interface
// update policy status with violationCount
policyStatus *policyStatus.Sync
policyStatusListener policystatus.Listener
}
func newNamespacedPV(dclient *client.Client,
nspvLister kyvernolister.PolicyViolationLister,
kyvernoInterface kyvernov1.KyvernoV1Interface,
policyStatus *policyStatus.Sync,
policyStatus policystatus.Listener,
) *namespacedPV {
nspv := namespacedPV{
dclient: dclient,
nspvLister: nspvLister,
kyvernoInterface: kyvernoInterface,
policyStatus: policyStatus,
dclient: dclient,
nspvLister: nspvLister,
kyvernoInterface: kyvernoInterface,
policyStatusListener: policyStatus,
}
return &nspv
}
@ -99,7 +99,7 @@ func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
}
if newPv.Annotations["fromSync"] != "true" {
nspv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
return nil
@ -122,7 +122,7 @@ func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error
}
if newPv.Annotations["fromSync"] != "true" {
nspv.policyStatus.Listener <- violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules}
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
glog.Infof("namespaced policy violation updated for resource %v", newPv.Spec.ResourceSpec)
return nil

View file

@ -66,9 +66,9 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
if len(engineResponse.PolicyResponse.Rules) > 0 {
// some generate rules do apply to the resource
engineResponses = append(engineResponses, engineResponse)
ws.status.Listener <- generateStats{
ws.statusListener.Send(generateStats{
resp: engineResponse,
}
})
}
}
// Adds Generate Request to a channel(queue size 1000) to generators

View file

@ -61,9 +61,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
policyContext.Policy = policy
engineResponse := engine.Mutate(policyContext)
engineResponses = append(engineResponses, engineResponse)
ws.status.Listener <- mutateStats{
resp: engineResponse,
}
ws.statusListener.Send(mutateStats{resp: engineResponse})
if !engineResponse.IsSuccesful() {
glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, resource.GetNamespace(), resource.GetName())
continue

View file

@ -18,7 +18,7 @@ import (
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyStatus"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
tlsutils "github.com/nirmata/kyverno/pkg/tls"
@ -55,7 +55,7 @@ type WebhookServer struct {
// webhook registration client
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// API to send policy stats for aggregation
status *policyStatus.Sync
statusListener policystatus.Listener
// helpers to validate against current loaded configuration
configHandler config.Interface
// channel for cleanup notification
@ -82,7 +82,7 @@ func NewWebhookServer(
crbInformer rbacinformer.ClusterRoleBindingInformer,
eventGen event.Interface,
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
statusSync *policyStatus.Sync,
statusSync policystatus.Listener,
configHandler config.Interface,
pMetaStore policystore.LookupInterface,
pvGenerator policyviolation.GeneratorInterface,
@ -112,7 +112,7 @@ func NewWebhookServer(
crbSynced: crbInformer.Informer().HasSynced,
eventGen: eventGen,
webhookRegistrationClient: webhookRegistrationClient,
status: statusSync,
statusListener: statusSync,
configHandler: configHandler,
cleanUp: cleanUp,
lastReqTime: resourceWebhookWatcher.LastReqTime,

View file

@ -71,9 +71,9 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
continue
}
engineResponses = append(engineResponses, engineResponse)
ws.status.Listener <- validateStats{
ws.statusListener.Send(validateStats{
resp: engineResponse,
}
})
if !engineResponse.IsSuccesful() {
glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, newR.GetNamespace(), newR.GetName())
continue