1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 10:28:36 +00:00

refactor: ur cleaner controller (#3974)

* fix: move ur controller filtering in reconciler

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: mark ur retry on conflict

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: test data

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: add filter back in update ur handler

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: added some logs about attempts and increased backoff

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: reconciliation logic

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: Test_Generate_Synchronize_Flag

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: small nits

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* refactor: interface and logger

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: remove useless Control and ControlInterface

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* chore: use GetObjectWithTombstone helper

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* chore: reoder methods

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: is not found check

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: move check in reconcile code

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix: stop mutating cached resource in ur controller (#4003)

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
(cherry picked from commit dac733755b)
Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-05-24 15:30:00 +02:00 committed by GitHub
parent bea0b794d5
commit 73fdbd3e76
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 105 additions and 179 deletions

View file

@ -295,7 +295,7 @@ func main() {
configuration,
)
grcc, err := generatecleanup.NewController(
grcc := generatecleanup.NewController(
kubeClient,
kyvernoClient,
dynamicClient,
@ -303,12 +303,7 @@ func main() {
kyvernoV1.Policies(),
kyvernoV1beta1.UpdateRequests(),
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("GenerateCleanUpController"),
)
if err != nil {
setupLog.Error(err, "Failed to create generate cleanup controller")
os.Exit(1)
}
policyCache := policycache.NewCache()
policyCacheController := policycachecontroller.NewController(policyCache, kyvernoV1.ClusterPolicies(), kyvernoV1.Policies())

View file

@ -1,9 +1,10 @@
package cleanup
import (
"context"
"strconv"
"time"
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
@ -14,7 +15,9 @@ import (
pkgCommon "github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/dclient"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -29,37 +32,29 @@ const (
maxRetries = 10
)
// Controller manages life-cycle of generate-requests
type Controller struct {
// dynamic client implementation
client dclient.Interface
type Controller interface {
// Run starts workers
Run(int, <-chan struct{})
}
// typed client for kyverno CRDs
// controller manages life-cycle of generate-requests
type controller struct {
// clients
client dclient.Interface
kyvernoClient kyvernoclient.Interface
// informers
pInformer kyvernov1informers.ClusterPolicyInformer
urInformer kyvernov1beta1informers.UpdateRequestInformer
// control is used to delete the UR
control ControlInterface
// ur that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get cluster policy from the shared informer's store
pLister kyvernov1listers.ClusterPolicyLister
// npLister can list/get namespace policy from the shared informer's store
// listers
pLister kyvernov1listers.ClusterPolicyLister
npLister kyvernov1listers.PolicyLister
// urLister can list/get update request from the shared informer's store
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister
// nsLister can list/get namespaces from the shared informer's store
nsLister corev1listers.NamespaceLister
// logger
log logr.Logger
// queue
queue workqueue.RateLimitingInterface
}
// NewController returns a new controller instance to manage generate-requests
@ -71,41 +66,42 @@ func NewController(
npInformer kyvernov1informers.PolicyInformer,
urInformer kyvernov1beta1informers.UpdateRequestInformer,
namespaceInformer corev1informers.NamespaceInformer,
log logr.Logger,
) (*Controller, error) {
c := Controller{
kyvernoClient: kyvernoclient,
) Controller {
return &controller{
client: client,
kyvernoClient: kyvernoclient,
pInformer: pInformer,
urInformer: urInformer,
pLister: pInformer.Lister(),
npLister: npInformer.Lister(),
urLister: urInformer.Lister().UpdateRequests(config.KyvernoNamespace()),
nsLister: namespaceInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "generate-request-cleanup"),
log: log,
}
c.control = Control{client: kyvernoclient}
c.pLister = pInformer.Lister()
c.npLister = npInformer.Lister()
c.urLister = urInformer.Lister().UpdateRequests(config.KyvernoNamespace())
c.nsLister = namespaceInformer.Lister()
return &c, nil
}
func (c *Controller) deletePolicy(obj interface{}) {
logger := c.log
p, ok := obj.(*kyvernov1.ClusterPolicy)
func (c *controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
logger.Info("starting")
defer logger.Info("shutting down")
c.pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deletePolicy, // we only cleanup if the policy is delete
})
c.urInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deleteUR,
})
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
func (c *controller) deletePolicy(obj interface{}) {
p, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1.ClusterPolicy)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Info("couldn't get object from tombstone", "obj", obj)
return
}
p, ok = tombstone.Obj.(*kyvernov1.ClusterPolicy)
if !ok {
logger.Info("Tombstone contained object that is not a Update Request", "obj", obj)
return
}
logger.Info("Failed to get deleted object", "obj", obj)
return
}
logger.V(4).Info("deleting policy", "name", p.Name)
@ -143,77 +139,36 @@ func (c *Controller) deletePolicy(obj interface{}) {
}
}
func (c *Controller) deleteUR(obj interface{}) {
logger := c.log
ur, ok := obj.(*kyvernov1beta1.UpdateRequest)
func (c *controller) deleteUR(obj interface{}) {
ur, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1beta1.UpdateRequest)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
ur, ok = tombstone.Obj.(*kyvernov1beta1.UpdateRequest)
if !ok {
logger.Info("ombstone contained object that is not a Update Request", "obj", obj)
return
}
logger.Info("Failed to get deleted object", "obj", obj)
return
}
if ur.Status.Handler != "" {
return
}
c.enqueue(ur)
}
func (c *Controller) enqueue(ur *kyvernov1beta1.UpdateRequest) {
// skip enqueueing Pending requests
if ur.Status.State == kyvernov1beta1.Pending {
return
}
logger := c.log
func (c *controller) enqueue(ur *kyvernov1beta1.UpdateRequest) {
key, err := cache.MetaNamespaceKeyFunc(ur)
if err != nil {
logger.Error(err, "failed to extract key")
return
}
logger.V(5).Info("enqueue update request", "name", ur.Name)
c.queue.Add(key)
}
// Run starts the update-request re-conciliation loop
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
logger := c.log
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
logger.Info("starting")
defer logger.Info("shutting down")
c.pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deletePolicy, // we only cleanup if the policy is delete
})
c.urInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deleteUR,
})
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just de-queues items, processes them, and marks them done.
// It enforces that the syncUpdateRequest is never invoked concurrently with the same key.
func (c *Controller) worker() {
func (c *controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
func (c *controller) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
@ -225,8 +180,7 @@ func (c *Controller) processNextWorkItem() bool {
return true
}
func (c *Controller) handleErr(err error, key interface{}) {
logger := c.log
func (c *controller) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
return
@ -248,8 +202,8 @@ func (c *Controller) handleErr(err error, key interface{}) {
c.queue.Forget(key)
}
func (c *Controller) syncUpdateRequest(key string) error {
logger := c.log.WithValues("key", key)
func (c *controller) syncUpdateRequest(key string) error {
logger := logger.WithValues("key", key)
var err error
startTime := time.Now()
logger.V(4).Info("started syncing update request", "startTime", startTime)
@ -257,37 +211,70 @@ func (c *Controller) syncUpdateRequest(key string) error {
logger.V(4).Info("finished syncing update request", "processingTIme", time.Since(startTime).String())
}()
_, urName, err := cache.SplitMetaNamespaceKey(key)
if apierrors.IsNotFound(err) {
logger.Info("update request has been deleted")
return nil
}
if err != nil {
return err
}
ur, err := c.urLister.Get(urName)
if err != nil {
if apierrors.IsNotFound(err) {
logger.Info("update request has been deleted")
return nil
}
return err
}
if ur.Status.State == kyvernov1beta1.Pending {
return nil
}
pNamespace, pName, err := cache.SplitMetaNamespaceKey(ur.Spec.Policy)
if err != nil {
return err
}
if pNamespace == "" {
_, err = c.pLister.Get(pName)
} else {
_, err = c.npLister.Policies(pNamespace).Get(pName)
}
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
logger.Error(err, "failed to get policy, deleting the update request", "key", ur.Spec.Policy)
return c.control.Delete(ur.Name)
return c.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(context.TODO(), ur.Name, metav1.DeleteOptions{})
}
return c.processUR(*ur)
}
func (c *controller) processUR(ur kyvernov1beta1.UpdateRequest) error {
logger := logger.WithValues("kind", ur.Kind, "namespace", ur.Namespace, "name", ur.Name)
// 1- Corresponding policy has been deleted
// then we don't delete the generated resources
// 2- The trigger resource is deleted, then delete the generated resources
if !ownerResourceExists(logger, c.client, ur) {
deleteUR := false
// check retry count in annotaion
urAnnotations := ur.Annotations
if val, ok := urAnnotations["generate.kyverno.io/retry-count"]; ok {
retryCount, err := strconv.ParseUint(val, 10, 32)
if err != nil {
logger.Error(err, "unable to convert retry-count")
return err
}
if retryCount >= 5 {
deleteUR = true
}
}
if deleteUR {
if err := deleteGeneratedResources(logger, c.client, ur); err != nil {
return err
}
// - trigger-resource is deleted
// - generated-resources are deleted
// - > Now delete the UpdateRequest CR
return c.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(context.TODO(), ur.Name, metav1.DeleteOptions{})
}
}
return nil
}

View file

@ -0,0 +1,5 @@
package cleanup
import "sigs.k8s.io/controller-runtime/pkg/log"
var logger = log.Log.WithName("cleanup")

View file

@ -1,24 +0,0 @@
package cleanup
import (
"context"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
"github.com/kyverno/kyverno/pkg/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ControlInterface manages resource deletes
type ControlInterface interface {
Delete(gr string) error
}
// Control provides implementation to manage resource
type Control struct {
client kyvernoclient.Interface
}
// Delete deletes the specified resource
func (c Control) Delete(gr string) error {
return c.client.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(context.TODO(), gr, metav1.DeleteOptions{})
}

View file

@ -1,49 +1,12 @@
package cleanup
import (
"strconv"
"github.com/go-logr/logr"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/dclient"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
func (c *Controller) processUR(ur kyvernov1beta1.UpdateRequest) error {
logger := c.log.WithValues("kind", ur.Kind, "namespace", ur.Namespace, "name", ur.Name)
// 1- Corresponding policy has been deleted
// then we don't delete the generated resources
// 2- The trigger resource is deleted, then delete the generated resources
if !ownerResourceExists(logger, c.client, ur) {
deleteUR := false
// check retry count in annotaion
urAnnotations := ur.Annotations
if val, ok := urAnnotations[kyvernov1beta1.URGenerateRetryCountAnnotation]; ok {
retryCount, err := strconv.ParseUint(val, 10, 32)
if err != nil {
logger.Error(err, "unable to convert retry-count")
return err
}
if retryCount >= 5 {
deleteUR = true
}
}
if deleteUR {
if err := deleteGeneratedResources(logger, c.client, ur); err != nil {
return err
}
// - trigger-resource is deleted
// - generated-resources are deleted
// - > Now delete the UpdateRequest CR
return c.control.Delete(ur.Name)
}
}
return nil
}
func ownerResourceExists(log logr.Logger, client dclient.Interface, ur kyvernov1beta1.UpdateRequest) bool {
_, err := client.GetResource("", ur.Spec.Resource.Kind, ur.Spec.Resource.Namespace, ur.Spec.Resource.Name)
// trigger resources has been deleted

View file

@ -35,6 +35,7 @@ const (
)
type Controller interface {
// Run starts workers
Run(int, <-chan struct{})
}
@ -96,7 +97,6 @@ func NewController(
return &c
}
// Run starts workers
func (c *controller) Run(workers int, stopCh <-chan struct{}) {
defer runtime.HandleCrash()
defer c.queue.ShutDown()