mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-07 00:17:13 +00:00
195 lines
6 KiB
Go
195 lines
6 KiB
Go
|
package policycontroller
|
||
|
|
||
|
import (
|
||
|
"fmt"
|
||
|
"log"
|
||
|
"time"
|
||
|
|
||
|
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||
|
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||
|
policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
|
||
|
infomertypes "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1"
|
||
|
lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||
|
violation "github.com/nirmata/kube-policy/pkg/violation"
|
||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||
|
|
||
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||
|
"k8s.io/apimachinery/pkg/util/wait"
|
||
|
"k8s.io/client-go/tools/cache"
|
||
|
"k8s.io/client-go/util/workqueue"
|
||
|
)
|
||
|
|
||
|
//PolicyController for CRD
|
||
|
type PolicyController struct {
|
||
|
kubeClient *kubeClient.KubeClient
|
||
|
policyLister lister.PolicyLister
|
||
|
policyInterface policyclientset.Interface
|
||
|
policySynced cache.InformerSynced
|
||
|
violationBuilder violation.PolicyViolationGenerator
|
||
|
logger *log.Logger
|
||
|
queue workqueue.RateLimitingInterface
|
||
|
}
|
||
|
|
||
|
// NewPolicyController from cmd args
|
||
|
func NewPolicyController(policyInterface policyclientset.Interface,
|
||
|
policyInformer infomertypes.PolicyInformer,
|
||
|
violationBuilder violation.PolicyViolationGenerator,
|
||
|
logger *log.Logger,
|
||
|
kubeClient *kubeClient.KubeClient) *PolicyController {
|
||
|
|
||
|
controller := &PolicyController{
|
||
|
kubeClient: kubeClient,
|
||
|
policyLister: policyInformer.Lister(),
|
||
|
policyInterface: policyInterface,
|
||
|
policySynced: policyInformer.Informer().HasSynced,
|
||
|
violationBuilder: violationBuilder,
|
||
|
logger: logger,
|
||
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), policyWorkQueueName),
|
||
|
//TODO Event Builder, this will used to record events with policy cannot be processed, using eventBuilder as we can restrict the event types
|
||
|
}
|
||
|
|
||
|
policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||
|
AddFunc: controller.createPolicyHandler,
|
||
|
UpdateFunc: controller.updatePolicyHandler,
|
||
|
DeleteFunc: controller.deletePolicyHandler,
|
||
|
})
|
||
|
return controller
|
||
|
}
|
||
|
|
||
|
func (pc *PolicyController) createPolicyHandler(resource interface{}) {
|
||
|
pc.enqueuePolicy(resource)
|
||
|
}
|
||
|
|
||
|
func (pc *PolicyController) updatePolicyHandler(oldResource, newResource interface{}) {
|
||
|
newPolicy := newResource.(*types.Policy)
|
||
|
oldPolicy := oldResource.(*types.Policy)
|
||
|
if newPolicy.ResourceVersion == oldPolicy.ResourceVersion {
|
||
|
return
|
||
|
}
|
||
|
pc.enqueuePolicy(newResource)
|
||
|
}
|
||
|
func (pc *PolicyController) deletePolicyHandler(resource interface{}) {
|
||
|
var object metav1.Object
|
||
|
var ok bool
|
||
|
if object, ok = resource.(metav1.Object); !ok {
|
||
|
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
|
||
|
return
|
||
|
}
|
||
|
pc.logger.Printf("policy deleted: %s", object.GetName())
|
||
|
}
|
||
|
|
||
|
func (pc *PolicyController) enqueuePolicy(obj interface{}) {
|
||
|
var key string
|
||
|
var err error
|
||
|
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||
|
utilruntime.HandleError(err)
|
||
|
return
|
||
|
}
|
||
|
pc.queue.Add(key)
|
||
|
}
|
||
|
|
||
|
// Run is main controller thread
|
||
|
func (pc *PolicyController) Run(stopCh <-chan struct{}) error {
|
||
|
defer utilruntime.HandleCrash()
|
||
|
defer pc.queue.ShutDown()
|
||
|
|
||
|
pc.logger.Printf("starting policy controller")
|
||
|
|
||
|
pc.logger.Printf("waiting for infomer caches to sync")
|
||
|
if ok := cache.WaitForCacheSync(stopCh, pc.policySynced); !ok {
|
||
|
return fmt.Errorf("failed to wait for caches to sync")
|
||
|
}
|
||
|
|
||
|
pc.logger.Println("starting policy controller workers")
|
||
|
for i := 0; i < policyControllerWorkerCount; i++ {
|
||
|
go wait.Until(pc.runWorker, time.Second, stopCh)
|
||
|
}
|
||
|
|
||
|
pc.logger.Println("started policy controller workers")
|
||
|
<-stopCh
|
||
|
pc.logger.Println("shutting down policy controller workers")
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// runWorker is a long-running function that will continually call the
|
||
|
// processNextWorkItem function in order to read and process a message on the
|
||
|
// workqueue.
|
||
|
func (pc *PolicyController) runWorker() {
|
||
|
for pc.processNextWorkItem() {
|
||
|
}
|
||
|
}
|
||
|
|
||
|
// processNextWorkItem will read a single work item off the workqueue and
|
||
|
// attempt to process it, by calling the syncHandler.
|
||
|
func (pc *PolicyController) processNextWorkItem() bool {
|
||
|
obj, shutdown := pc.queue.Get()
|
||
|
if shutdown {
|
||
|
return false
|
||
|
}
|
||
|
|
||
|
err := func(obj interface{}) error {
|
||
|
defer pc.queue.Done(obj)
|
||
|
err := pc.syncHandler(obj)
|
||
|
pc.handleErr(err, obj)
|
||
|
return nil
|
||
|
}(obj)
|
||
|
if err != nil {
|
||
|
utilruntime.HandleError(err)
|
||
|
return true
|
||
|
}
|
||
|
return true
|
||
|
}
|
||
|
|
||
|
func (pc *PolicyController) handleErr(err error, key interface{}) {
|
||
|
if err == nil {
|
||
|
pc.queue.Forget(key)
|
||
|
return
|
||
|
}
|
||
|
|
||
|
// This controller retries 5 times if something goes wrong. After that, it stops trying.
|
||
|
if pc.queue.NumRequeues(key) < policyWorkQueueRetryLimit {
|
||
|
|
||
|
pc.logger.Printf("Error syncing events %v: %v", key, err)
|
||
|
|
||
|
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||
|
// queue and the re-enqueue history, the key will be processed later again.
|
||
|
pc.queue.AddRateLimited(key)
|
||
|
return
|
||
|
}
|
||
|
|
||
|
pc.queue.Forget(key)
|
||
|
// Report to an external entity that, even after several retries, we could not successfully process this key
|
||
|
utilruntime.HandleError(err)
|
||
|
pc.logger.Printf("Dropping the key %q out of the queue: %v", key, err)
|
||
|
}
|
||
|
|
||
|
func (pc *PolicyController) syncHandler(obj interface{}) error {
|
||
|
var key string
|
||
|
var ok bool
|
||
|
if key, ok = obj.(string); !ok {
|
||
|
return fmt.Errorf("expected string in workqueue but got %#v", obj)
|
||
|
}
|
||
|
// convert the namespace/name string into distinct namespace and name
|
||
|
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||
|
if err != nil {
|
||
|
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key))
|
||
|
return nil
|
||
|
}
|
||
|
|
||
|
// Get Policy resource with namespace/name
|
||
|
policy, err := pc.policyLister.Policies(namespace).Get(name)
|
||
|
if err != nil {
|
||
|
if errors.IsNotFound(err) {
|
||
|
utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key))
|
||
|
return nil
|
||
|
}
|
||
|
return err
|
||
|
}
|
||
|
// process policy on existing resource
|
||
|
// get the violations and pass to violation Builder
|
||
|
// get the events and pass to event Builder
|
||
|
fmt.Println(policy)
|
||
|
return nil
|
||
|
}
|