1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-07 00:17:13 +00:00
kyverno/pkg/namespace/controller.go

256 lines
7.9 KiB
Go
Raw Normal View History

2019-08-14 14:56:53 -07:00
package namespace
import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
2019-11-13 13:41:08 -08:00
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
2019-11-12 14:41:29 -08:00
"github.com/nirmata/kyverno/pkg/config"
2019-08-14 14:56:53 -07:00
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
2019-08-20 17:35:40 -07:00
"github.com/nirmata/kyverno/pkg/policy"
2019-11-12 14:41:29 -08:00
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
2019-08-14 14:56:53 -07:00
"k8s.io/apimachinery/pkg/api/errors"
2019-08-17 09:58:14 -07:00
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
2019-11-13 13:41:08 -08:00
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
2019-08-14 14:56:53 -07:00
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
v1Informer "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
maxRetries = 15
)
//NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
type NamespaceController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
syncHandler func(nsKey string) error
enqueueNs func(ns *v1.Namespace)
2019-08-14 18:40:33 -07:00
//nsLister provides expansion to the namespace lister to inject GVK for the resource
nsLister NamespaceListerExpansion
2019-08-14 14:56:53 -07:00
// nLsister can list/get namespaces from the shared informer's store
2019-08-14 18:40:33 -07:00
// nsLister v1CoreLister.NamespaceLister
2019-08-14 14:56:53 -07:00
// nsListerSynced returns true if the Namespace store has been synced at least once
nsListerSynced cache.InformerSynced
// pvLister can list/get policy violation from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
2019-08-14 14:56:53 -07:00
// pvListerSynced retrns true if the Policy store has been synced at least once
pvListerSynced cache.InformerSynced
// pvLister can list/get policy violation from the shared informer's store
pvLister kyvernolister.ClusterPolicyViolationLister
2019-08-20 17:35:40 -07:00
// API to send policy stats for aggregation
policyStatus policy.PolicyStatusInterface
2019-08-14 14:56:53 -07:00
// eventGen provides interface to generate evenets
eventGen event.Interface
// Namespaces that need to be synced
queue workqueue.RateLimitingInterface
// Resource manager, manages the mapping for already processed resource
rm resourceManager
2019-10-18 17:38:46 -07:00
// helpers to validate against current loaded configuration
configHandler config.Interface
2019-11-12 14:41:29 -08:00
// store to hold policy meta data for faster lookup
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
2019-08-14 14:56:53 -07:00
}
//NewNamespaceController returns a new Controller to manage generation rules
func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
nsInformer v1Informer.NamespaceInformer,
pInformer kyvernoinformer.ClusterPolicyInformer,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
2019-08-20 17:35:40 -07:00
policyStatus policy.PolicyStatusInterface,
eventGen event.Interface,
2019-11-12 14:41:29 -08:00
configHandler config.Interface,
pvGenerator policyviolation.GeneratorInterface,
pMetaStore policystore.LookupInterface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
2019-08-14 14:56:53 -07:00
// create the controller
nsc := &NamespaceController{
2019-11-12 14:41:29 -08:00
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
2019-10-18 17:38:46 -07:00
configHandler: configHandler,
2019-11-12 14:41:29 -08:00
pMetaStore: pMetaStore,
pvGenerator: pvGenerator,
2019-08-14 14:56:53 -07:00
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addNamespace,
UpdateFunc: nsc.updateNamespace,
DeleteFunc: nsc.deleteNamespace,
})
2019-09-28 15:39:06 -07:00
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addPolicy,
UpdateFunc: nsc.updatePolicy,
})
2019-08-14 14:56:53 -07:00
nsc.enqueueNs = nsc.enqueue
nsc.syncHandler = nsc.syncNamespace
2019-08-14 18:40:33 -07:00
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
2019-08-14 14:56:53 -07:00
nsc.nsListerSynced = nsInformer.Informer().HasSynced
nsc.pLister = pInformer.Lister()
nsc.pvListerSynced = pInformer.Informer().HasSynced
nsc.pvLister = pvInformer.Lister()
2019-08-20 17:35:40 -07:00
nsc.policyStatus = policyStatus
2019-08-14 14:56:53 -07:00
// resource manager
// rebuild after 300 seconds/ 5 mins
nsc.rm = NewResourceManager(300)
2019-08-14 14:56:53 -07:00
return nsc
}
2019-09-28 15:39:06 -07:00
func (nsc *NamespaceController) addPolicy(obj interface{}) {
p := obj.(*kyverno.ClusterPolicy)
// check if the policy has generate rule
if generateRuleExists(p) {
// process policy
nsc.processPolicy(p)
}
}
func (nsc *NamespaceController) updatePolicy(old, cur interface{}) {
curP := cur.(*kyverno.ClusterPolicy)
// check if the policy has generate rule
if generateRuleExists(curP) {
// process policy
nsc.processPolicy(curP)
}
}
2019-08-14 18:40:33 -07:00
2019-08-14 14:56:53 -07:00
func (nsc *NamespaceController) addNamespace(obj interface{}) {
ns := obj.(*v1.Namespace)
glog.V(4).Infof("Adding Namespace %s", ns.Name)
nsc.enqueueNs(ns)
}
func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
oldNs := old.(*v1.Namespace)
curNs := cur.(*v1.Namespace)
if curNs.ResourceVersion == oldNs.ResourceVersion {
// Periodic resync will send update events for all known Namespace.
// Two different versions of the same replica set will always have different RVs.
return
}
glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
ns, _ := obj.(*v1.Namespace)
glog.V(4).Infof("Deleting Namespace %s", ns.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
key, err := cache.MetaNamespaceKeyFunc(ns)
if err != nil {
glog.Error(err)
return
}
nsc.queue.Add(key)
}
//Run to run the controller
func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer nsc.queue.ShutDown()
glog.Info("Starting namespace controller")
defer glog.Info("Shutting down namespace controller")
if ok := cache.WaitForCacheSync(stopCh, nsc.nsListerSynced); !ok {
return
}
for i := 0; i < workerCount; i++ {
go wait.Until(nsc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (nsc *NamespaceController) worker() {
for nsc.processNextWorkItem() {
}
}
func (nsc *NamespaceController) processNextWorkItem() bool {
key, quit := nsc.queue.Get()
if quit {
return false
}
defer nsc.queue.Done(key)
err := nsc.syncHandler(key.(string))
nsc.handleErr(err, key)
return true
}
func (nsc *NamespaceController) handleErr(err error, key interface{}) {
if err == nil {
nsc.queue.Forget(key)
return
}
if nsc.queue.NumRequeues(key) < maxRetries {
2019-08-14 18:40:33 -07:00
glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
2019-08-14 14:56:53 -07:00
nsc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
nsc.queue.Forget(key)
}
func (nsc *NamespaceController) syncNamespace(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()
2019-08-14 18:40:33 -07:00
namespace, err := nsc.nsLister.GetResource(key)
2019-08-14 14:56:53 -07:00
if errors.IsNotFound(err) {
2019-08-14 18:40:33 -07:00
glog.V(2).Infof("namespace %v has been deleted", key)
2019-08-14 14:56:53 -07:00
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
n := namespace.DeepCopy()
// skip processing namespace if its been filtered
// exclude the filtered resources
if nsc.configHandler.ToFilter("", namespace.Name, "") {
//TODO: improve the text
2019-09-28 15:39:06 -07:00
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
return nil
}
2019-08-14 14:56:53 -07:00
// process generate rules
2019-08-26 13:34:42 -07:00
engineResponses := nsc.processNamespace(*n)
2019-08-14 14:56:53 -07:00
// report errors
2019-08-26 13:34:42 -07:00
nsc.report(engineResponses)
2019-08-14 14:56:53 -07:00
return nil
}