1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-13 19:28:55 +00:00

remove cluster and namespace PV controller

This commit is contained in:
shivkumar dudhani 2019-11-26 18:21:09 -08:00
parent 678b7416c1
commit 2476940ddf
4 changed files with 0 additions and 780 deletions

View file

@ -142,27 +142,6 @@ func main() {
glog.Fatalf("error creating policy controller: %v\n", err)
}
// POLICY VIOLATION CONTROLLER
// policy violation cleanup if the corresponding resource is deleted
// status: lastUpdatTime
pvc, err := policyviolation.NewPolicyViolationController(
client,
pclient,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().ClusterPolicyViolations())
if err != nil {
glog.Fatalf("error creating cluster policy violation controller: %v\n", err)
}
nspvc, err := policyviolation.NewNamespacedPolicyViolationController(
client,
pclient,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().NamespacedPolicyViolations())
if err != nil {
glog.Fatalf("error creating namespaced policy violation controller: %v\n", err)
}
// GENERATE CONTROLLER
// - watches for Namespace resource and generates resource based on the policy generate rule
nsc := namespace.NewNamespaceController(
@ -221,8 +200,6 @@ func main() {
go configData.Run(stopCh)
go policyMetaStore.Run(stopCh)
go pc.Run(1, stopCh)
go pvc.Run(1, stopCh)
go nspvc.Run(1, stopCh)
go egen.Run(1, stopCh)
go nsc.Run(1, stopCh)
go pvgen.Run(1, stopCh)

View file

@ -1,333 +0,0 @@
package policyviolation
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a PolicyViolation will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
)
var controllerKind = kyverno.SchemeGroupVersion.WithKind("ClusterPolicyViolation")
// PolicyViolationController manages the policy violation resource
// - sync the lastupdate time
// - check if the resource is active
type PolicyViolationController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
eventRecorder record.EventRecorder
syncHandler func(pKey string) error
enqueuePolicyViolation func(policy *kyverno.ClusterPolicyViolation)
// Policys that need to be synced
queue workqueue.RateLimitingInterface
// pvLister can list/get policy violation from the shared informer's store
pvLister kyvernolister.ClusterPolicyViolationLister
// pLister can list/get policy from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// pListerSynced returns true if the Policy store has been synced at least once
pListerSynced cache.InformerSynced
// pvListerSynced retrns true if the Policy store has been synced at least once
pvListerSynced cache.InformerSynced
//pvControl is used for updating status/cleanup policy violation
pvControl PVControlInterface
}
//NewPolicyViolationController creates a new NewPolicyViolationController
func NewPolicyViolationController(client *client.Client, kyvernoClient *kyvernoclient.Clientset, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer) (*PolicyViolationController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventInterface, err := client.GetEventsInterface()
if err != nil {
return nil, err
}
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pvc := PolicyViolationController{
kyvernoClient: kyvernoClient,
client: client,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policyviolation_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policyviolation"),
}
pvc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pvc.eventRecorder}
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pvc.addPolicyViolation,
UpdateFunc: pvc.updatePolicyViolation,
DeleteFunc: pvc.deletePolicyViolation,
})
pvc.enqueuePolicyViolation = pvc.enqueue
pvc.syncHandler = pvc.syncPolicyViolation
pvc.pLister = pInformer.Lister()
pvc.pvLister = pvInformer.Lister()
pvc.pListerSynced = pInformer.Informer().HasSynced
pvc.pvListerSynced = pvInformer.Informer().HasSynced
return &pvc, nil
}
func (pvc *PolicyViolationController) addPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.ClusterPolicyViolation)
glog.V(4).Infof("Adding PolicyViolation %s", pv.Name)
pvc.enqueuePolicyViolation(pv)
}
func (pvc *PolicyViolationController) updatePolicyViolation(old, cur interface{}) {
oldPv := old.(*kyverno.ClusterPolicyViolation)
curPv := cur.(*kyverno.ClusterPolicyViolation)
glog.V(4).Infof("Updating Policy Violation %s", oldPv.Name)
if err := pvc.syncLastUpdateTimeStatus(curPv, oldPv); err != nil {
glog.Errorf("Failed to update lastUpdateTime in PolicyViolation %s status: %v", curPv.Name, err)
}
pvc.enqueuePolicyViolation(curPv)
}
func (pvc *PolicyViolationController) deletePolicyViolation(obj interface{}) {
pv, ok := obj.(*kyverno.ClusterPolicyViolation)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pv, ok = tombstone.Obj.(*kyverno.ClusterPolicyViolation)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a PolicyViolation %#v", obj))
return
}
}
glog.V(4).Infof("Deleting PolicyViolation %s", pv.Name)
pvc.enqueuePolicyViolation(pv)
}
func (pvc *PolicyViolationController) enqueue(policyViolation *kyverno.ClusterPolicyViolation) {
key, err := cache.MetaNamespaceKeyFunc(policyViolation)
if err != nil {
glog.Error(err)
return
}
pvc.queue.Add(key)
}
// Run begins watching and syncing.
func (pvc *PolicyViolationController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pvc.queue.ShutDown()
glog.Info("Starting policyviolation controller")
defer glog.Info("Shutting down policyviolation controller")
if !cache.WaitForCacheSync(stopCh, pvc.pListerSynced, pvc.pvListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(pvc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (pvc *PolicyViolationController) worker() {
for pvc.processNextWorkItem() {
}
}
func (pvc *PolicyViolationController) processNextWorkItem() bool {
key, quit := pvc.queue.Get()
if quit {
return false
}
defer pvc.queue.Done(key)
err := pvc.syncHandler(key.(string))
pvc.handleErr(err, key)
return true
}
func (pvc *PolicyViolationController) handleErr(err error, key interface{}) {
if err == nil {
pvc.queue.Forget(key)
return
}
if pvc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing PolicyViolation %v: %v", key, err)
pvc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping policyviolation %q out of the queue: %v", key, err)
pvc.queue.Forget(key)
}
func (pvc *PolicyViolationController) syncPolicyViolation(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing policy violation %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing cluster policy violation %q (%v)", key, time.Since(startTime))
}()
policyViolation, err := pvc.pvLister.Get(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("PolicyViolation %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
pv := policyViolation.DeepCopy()
// Check if the policy violation resource is active
if err := pvc.syncActiveResource(pv); err != nil {
return err
}
// If policy violations is on resource owner,
// check if the resource owner is active
if err := pvc.syncBlockedResource(pv); err != nil {
return err
}
return pvc.syncStatusOnly(pv)
}
func (pvc *PolicyViolationController) syncActiveResource(curPv *kyverno.ClusterPolicyViolation) error {
// check if the resource is active or not ?
rspec := curPv.Spec.ResourceSpec
// get resource
_, err := pvc.client.GetResource(rspec.Kind, "", rspec.Name)
if errors.IsNotFound(err) {
if err := pvc.pvControl.RemovePolicyViolation(curPv.Name); err != nil {
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
return err
}
glog.V(4).Infof("removing policy violation %s as the corresponding resource %s/%s does not exist anymore", curPv.Name, rspec.Kind, rspec.Name)
return nil
}
if err != nil {
glog.V(4).Infof("error while retrieved resource %s/%s: %v", rspec.Kind, rspec.Name, err)
return err
}
return nil
}
// syncBlockedResource remove inactive policy violation
// when rejected resource created in the cluster
func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.ClusterPolicyViolation) error {
for _, violatedRule := range curPv.Spec.ViolatedRules {
if reflect.DeepEqual(violatedRule.ManagedResource, kyverno.ManagedResourceSpec{}) {
continue
}
// get resource
blockedResource := violatedRule.ManagedResource
resources, _ := pvc.client.ListResource(blockedResource.Kind, "", nil)
for _, resource := range resources.Items {
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
owners := map[kyverno.ResourceSpec]interface{}{}
GetOwner(pvc.client, owners, resource)
// owner of resource matches violation resourceSpec
// remove policy violation as the blocked request got created
if _, ok := owners[curPv.Spec.ResourceSpec]; ok {
// pod -> replicaset1; deploy -> replicaset2
// if replicaset1 == replicaset2, the pod is
// no longer an active child of deploy, skip removing pv
if !validDependantForDeployment(pvc.client.GetAppsV1Interface(), curPv.Spec.ResourceSpec, resource) {
glog.V(4).Infof("")
continue
}
// resource created, remove policy violation
if err := pvc.pvControl.RemovePolicyViolation(curPv.Name); err != nil {
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
return err
}
glog.V(4).Infof("removed policy violation %s as the blocked resource %s successfully created, owner: %s",
curPv.Name, blockedResource.Kind, strings.ReplaceAll(curPv.Spec.ResourceSpec.ToKey(), ".", "/"))
}
}
}
return nil
}
//syncStatusOnly updates the policyviolation status subresource
// status:
func (pvc *PolicyViolationController) syncStatusOnly(curPv *kyverno.ClusterPolicyViolation) error {
// newStatus := calculateStatus(pv)
return nil
}
//TODO: think this through again
//syncLastUpdateTimeStatus updates the policyviolation lastUpdateTime if anything in ViolationSpec changed
// - lastUpdateTime : (time stamp when the policy violation changed)
func (pvc *PolicyViolationController) syncLastUpdateTimeStatus(curPv *kyverno.ClusterPolicyViolation, oldPv *kyverno.ClusterPolicyViolation) error {
// check if there is any change in policy violation information
if !updated(curPv, oldPv) {
return nil
}
// update the lastUpdateTime
newPolicyViolation := curPv
newPolicyViolation.Status = kyverno.PolicyViolationStatus{LastUpdateTime: metav1.Now()}
return pvc.pvControl.UpdateStatusPolicyViolation(newPolicyViolation)
}
func updated(curPv *kyverno.ClusterPolicyViolation, oldPv *kyverno.ClusterPolicyViolation) bool {
return !reflect.DeepEqual(curPv.Spec, oldPv.Spec)
//TODO check if owner reference changed, then should we update the lastUpdateTime as well ?
}
type PVControlInterface interface {
UpdateStatusPolicyViolation(newPv *kyverno.ClusterPolicyViolation) error
RemovePolicyViolation(name string) error
}
// RealPVControl is the default implementation of PVControlInterface.
type RealPVControl struct {
Client kyvernoclient.Interface
Recorder record.EventRecorder
}
//UpdateStatusPolicyViolation updates the status for policy violation
func (r RealPVControl) UpdateStatusPolicyViolation(newPv *kyverno.ClusterPolicyViolation) error {
_, err := r.Client.KyvernoV1().ClusterPolicyViolations().UpdateStatus(newPv)
return err
}
//RemovePolicyViolation removes the policy violation
func (r RealPVControl) RemovePolicyViolation(name string) error {
return r.Client.KyvernoV1().ClusterPolicyViolations().Delete(name, &metav1.DeleteOptions{})
}

View file

@ -1,91 +0,0 @@
package policyviolation
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
deployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
return policyViolationSelector, nil
}
// validDependantForDeployment checks if resource (pod) matches the intent of the given deployment
// explicitly handles deployment-replicaset-pod relationship
func validDependantForDeployment(client appsv1.AppsV1Interface, pvResourceSpec kyverno.ResourceSpec, resource unstructured.Unstructured) bool {
if resource.GetKind() != "Pod" {
return false
}
// only handles deploymeny-replicaset-pod relationship
if pvResourceSpec.Kind != "Deployment" {
return false
}
owner := kyverno.ResourceSpec{
Kind: pvResourceSpec.Kind,
Name: pvResourceSpec.Name,
}
start := time.Now()
deploy, err := client.Deployments(resource.GetNamespace()).Get(owner.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.Kind, resource.GetNamespace(), owner.Name, err)
return false
}
glog.V(4).Infof("Time getting deployment %v", time.Since(start))
// TODO(shuting): replace typed client AppsV1Interface
expectReplicaset, err := deployutil.GetNewReplicaSet(deploy, client)
if err != nil {
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.Kind, resource.GetNamespace(), owner.Name, err)
return false
}
if reflect.DeepEqual(expectReplicaset, v1.ReplicaSet{}) {
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", resource.GetNamespace(), owner.Kind, owner.Name)
return false
}
var actualReplicaset *v1.ReplicaSet
for _, podOwner := range resource.GetOwnerReferences() {
if podOwner.Kind != "ReplicaSet" {
continue
}
actualReplicaset, err = client.ReplicaSets(resource.GetNamespace()).Get(podOwner.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get replicaset from %s/%s/%s: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
return false
}
if reflect.DeepEqual(actualReplicaset, v1.ReplicaSet{}) {
glog.V(2).Infof("no replicaset found for Pod/%s/%s", resource.GetNamespace(), podOwner.Name)
return false
}
if expectReplicaset.Name == actualReplicaset.Name {
return true
}
}
return false
}

View file

@ -1,333 +0,0 @@
package policyviolation
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
var nspvcontrollerKind = kyverno.SchemeGroupVersion.WithKind("NamespacedPolicyViolation")
// PolicyViolationController manages the policy violation resource
// - sync the lastupdate time
// - check if the resource is active
type NamespacedPolicyViolationController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
eventRecorder record.EventRecorder
syncHandler func(pKey string) error
enqueuePolicyViolation func(policy *kyverno.NamespacedPolicyViolation)
// Policys that need to be synced
queue workqueue.RateLimitingInterface
// nspvLister can list/get policy violation from the shared informer's store
nspvLister kyvernolister.NamespacedPolicyViolationLister
// pLister can list/get policy from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// pListerSynced returns true if the Policy store has been synced at least once
pListerSynced cache.InformerSynced
// pvListerSynced retrns true if the Policy store has been synced at least once
nspvListerSynced cache.InformerSynced
//pvControl is used for updating status/cleanup policy violation
pvControl NamespacedPVControlInterface
}
//NewPolicyViolationController creates a new NewPolicyViolationController
func NewNamespacedPolicyViolationController(client *client.Client, kyvernoClient *kyvernoclient.Clientset, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.NamespacedPolicyViolationInformer) (*NamespacedPolicyViolationController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventInterface, err := client.GetEventsInterface()
if err != nil {
return nil, err
}
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pvc := NamespacedPolicyViolationController{
kyvernoClient: kyvernoClient,
client: client,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ns_policyviolation_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ns_policyviolation"),
}
pvc.pvControl = RealNamespacedPVControl{Client: kyvernoClient, Recorder: pvc.eventRecorder}
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pvc.addPolicyViolation,
UpdateFunc: pvc.updatePolicyViolation,
DeleteFunc: pvc.deletePolicyViolation,
})
pvc.enqueuePolicyViolation = pvc.enqueue
pvc.syncHandler = pvc.syncPolicyViolation
pvc.pLister = pInformer.Lister()
pvc.nspvLister = pvInformer.Lister()
pvc.pListerSynced = pInformer.Informer().HasSynced
pvc.nspvListerSynced = pvInformer.Informer().HasSynced
return &pvc, nil
}
func (pvc *NamespacedPolicyViolationController) addPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.NamespacedPolicyViolation)
glog.V(4).Infof("Adding Namespaced Policy Violation %s", pv.Name)
pvc.enqueuePolicyViolation(pv)
}
func (pvc *NamespacedPolicyViolationController) updatePolicyViolation(old, cur interface{}) {
oldPv := old.(*kyverno.NamespacedPolicyViolation)
curPv := cur.(*kyverno.NamespacedPolicyViolation)
glog.V(4).Infof("Updating Namespaced Policy Violation %s", oldPv.Name)
if err := pvc.syncLastUpdateTimeStatus(curPv, oldPv); err != nil {
glog.Errorf("Failed to update lastUpdateTime in NamespacedPolicyViolation %s status: %v", curPv.Name, err)
}
pvc.enqueuePolicyViolation(curPv)
}
func (pvc *NamespacedPolicyViolationController) deletePolicyViolation(obj interface{}) {
pv, ok := obj.(*kyverno.NamespacedPolicyViolation)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pv, ok = tombstone.Obj.(*kyverno.NamespacedPolicyViolation)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a NamespacedPolicyViolation %#v", obj))
return
}
}
glog.V(4).Infof("Deleting NamespacedPolicyViolation %s", pv.Name)
pvc.enqueuePolicyViolation(pv)
}
func (pvc *NamespacedPolicyViolationController) enqueue(policyViolation *kyverno.NamespacedPolicyViolation) {
key, err := cache.MetaNamespaceKeyFunc(policyViolation)
if err != nil {
glog.Error(err)
return
}
pvc.queue.Add(key)
}
// Run begins watching and syncing.
func (pvc *NamespacedPolicyViolationController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pvc.queue.ShutDown()
glog.Info("Starting Namespaced policyviolation controller")
defer glog.Info("Shutting down Namespaced policyviolation controller")
if !cache.WaitForCacheSync(stopCh, pvc.pListerSynced, pvc.nspvListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(pvc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (pvc *NamespacedPolicyViolationController) worker() {
for pvc.processNextWorkItem() {
}
}
func (pvc *NamespacedPolicyViolationController) processNextWorkItem() bool {
key, quit := pvc.queue.Get()
if quit {
return false
}
defer pvc.queue.Done(key)
err := pvc.syncHandler(key.(string))
pvc.handleErr(err, key)
return true
}
func (pvc *NamespacedPolicyViolationController) handleErr(err error, key interface{}) {
if err == nil {
pvc.queue.Forget(key)
return
}
if pvc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing PolicyViolation %v: %v", key, err)
pvc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping policyviolation %q out of the queue: %v", key, err)
pvc.queue.Forget(key)
}
func (pvc *NamespacedPolicyViolationController) syncPolicyViolation(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing policy violation %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing namespaced policy violation %q (%v)", key, time.Since(startTime))
}()
// tags: NAMESPACE/NAME
ns, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return fmt.Errorf("error getting namespaced policy violation key %v", key)
}
policyViolation, err := pvc.nspvLister.NamespacedPolicyViolations(ns).Get(name)
if errors.IsNotFound(err) {
glog.V(2).Infof("PolicyViolation %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
pv := policyViolation.DeepCopy()
if err := pvc.syncActiveResource(pv); err != nil {
glog.V(4).Infof("not syncing policy violation status")
return err
}
// cleanup pv with dependant
if err := pvc.syncBlockedResource(pv); err != nil {
return err
}
return pvc.syncStatusOnly(pv)
}
func (pvc *NamespacedPolicyViolationController) syncActiveResource(curPv *kyverno.NamespacedPolicyViolation) error {
// check if the resource is active or not ?
rspec := curPv.Spec.ResourceSpec
// get resource
_, err := pvc.client.GetResource(rspec.Kind, curPv.Namespace, rspec.Name)
if errors.IsNotFound(err) {
// TODO: does it help to retry?
// resource is not found
// remove the violation
if err := pvc.pvControl.RemovePolicyViolation(curPv.Namespace, curPv.Name); err != nil {
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
return err
}
glog.V(4).Infof("removing policy violation %s as the corresponding resource %s/%s/%s does not exist anymore", curPv.Name, rspec.Kind, curPv.Namespace, rspec.Name)
return nil
}
if err != nil {
glog.V(4).Infof("error while retrieved resource %s/%s/%s: %v", rspec.Kind, curPv.Namespace, rspec.Name, err)
return err
}
return nil
}
// syncBlockedResource remove inactive policy violation
// when rejected resource created in the cluster
func (pvc *NamespacedPolicyViolationController) syncBlockedResource(curPv *kyverno.NamespacedPolicyViolation) error {
for _, violatedRule := range curPv.Spec.ViolatedRules {
if reflect.DeepEqual(violatedRule.ManagedResource, kyverno.ManagedResourceSpec{}) {
continue
}
// get resource
blockedResource := violatedRule.ManagedResource
resources, _ := pvc.client.ListResource(blockedResource.Kind, curPv.Namespace, nil)
for _, resource := range resources.Items {
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
owners := map[kyverno.ResourceSpec]interface{}{}
GetOwner(pvc.client, owners, resource) // owner of resource matches violation resourceSpec
// remove policy violation as the blocked request got created
if _, ok := owners[curPv.Spec.ResourceSpec]; ok {
// pod -> replicaset1; deploy -> replicaset2
// if replicaset1 == replicaset2, the pod is
// no longer an active child of deploy, skip removing pv
if !validDependantForDeployment(pvc.client.GetAppsV1Interface(), curPv.Spec.ResourceSpec, resource) {
glog.V(4).Infof("")
continue
}
// resource created, remove policy violation
if err := pvc.pvControl.RemovePolicyViolation(curPv.Namespace, curPv.Name); err != nil {
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
return err
}
glog.V(4).Infof("removed policy violation %s as the blocked resource %s/%s successfully created, owner: %s",
curPv.Name, blockedResource.Kind, curPv.Namespace, strings.ReplaceAll(curPv.Spec.ResourceSpec.ToKey(), ".", "/"))
}
}
}
return nil
}
//syncStatusOnly updates the policyviolation status subresource
// status:
func (pvc *NamespacedPolicyViolationController) syncStatusOnly(curPv *kyverno.NamespacedPolicyViolation) error {
// newStatus := calculateStatus(pv)
return nil
}
//TODO: think this through again
//syncLastUpdateTimeStatus updates the policyviolation lastUpdateTime if anything in ViolationSpec changed
// - lastUpdateTime : (time stamp when the policy violation changed)
func (pvc *NamespacedPolicyViolationController) syncLastUpdateTimeStatus(curPv *kyverno.NamespacedPolicyViolation, oldPv *kyverno.NamespacedPolicyViolation) error {
// check if there is any change in policy violation information
if !updatedNamespaced(curPv, oldPv) {
return nil
}
// update the lastUpdateTime
newPolicyViolation := curPv
newPolicyViolation.Status = kyverno.PolicyViolationStatus{LastUpdateTime: metav1.Now()}
return pvc.pvControl.UpdateStatusPolicyViolation(newPolicyViolation)
}
func updatedNamespaced(curPv *kyverno.NamespacedPolicyViolation, oldPv *kyverno.NamespacedPolicyViolation) bool {
return !reflect.DeepEqual(curPv.Spec, oldPv.Spec)
//TODO check if owner reference changed, then should we update the lastUpdateTime as well ?
}
type NamespacedPVControlInterface interface {
UpdateStatusPolicyViolation(newPv *kyverno.NamespacedPolicyViolation) error
RemovePolicyViolation(ns, name string) error
}
// RealNamespacedPVControl is the default implementation of NamespacedPVControlInterface.
type RealNamespacedPVControl struct {
Client kyvernoclient.Interface
Recorder record.EventRecorder
}
//UpdateStatusPolicyViolation updates the status for policy violation
func (r RealNamespacedPVControl) UpdateStatusPolicyViolation(newPv *kyverno.NamespacedPolicyViolation) error {
_, err := r.Client.KyvernoV1().NamespacedPolicyViolations(newPv.Namespace).UpdateStatus(newPv)
return err
}
//RemovePolicyViolation removes the policy violation
func (r RealNamespacedPVControl) RemovePolicyViolation(ns, name string) error {
return r.Client.KyvernoV1().NamespacedPolicyViolations(ns).Delete(name, &metav1.DeleteOptions{})
}