mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-15 17:51:20 +00:00
refactor: to remove generate cleanup controller (#4041)
- refactored cleanup controller - handle delete policy Signed-off-by: Mritunjay Sharma <mritunjaysharma394@gmail.com>
This commit is contained in:
parent
c1b1cbb7da
commit
c07f6bd8a8
5 changed files with 59 additions and 344 deletions
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/background"
|
||||
generatecleanup "github.com/kyverno/kyverno/pkg/background/generate/cleanup"
|
||||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/wrappers"
|
||||
|
@ -358,15 +357,6 @@ func main() {
|
|||
configuration,
|
||||
)
|
||||
|
||||
grcc := generatecleanup.NewController(
|
||||
kyvernoClient,
|
||||
dynamicClient,
|
||||
kyvernoV1.ClusterPolicies(),
|
||||
kyvernoV1.Policies(),
|
||||
kyvernoV1beta1.UpdateRequests(),
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
)
|
||||
|
||||
policyCache := policycache.NewCache()
|
||||
policyCacheController := policycachecontroller.NewController(policyCache, kyvernoV1.ClusterPolicies(), kyvernoV1.Policies())
|
||||
|
||||
|
@ -491,7 +481,6 @@ func main() {
|
|||
go certManager.Run(stopCh)
|
||||
go policyCtrl.Run(2, prgen.ReconcileCh, reportReqGen.CleanupChangeRequest, stopCh)
|
||||
go prgen.Run(1, stopCh)
|
||||
go grcc.Run(1, stopCh)
|
||||
}
|
||||
|
||||
kubeClientLeaderElection, err := kubernetes.NewForConfig(clientConfig)
|
||||
|
|
|
@ -1,288 +0,0 @@
|
|||
package cleanup
|
||||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernov1beta1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1beta1"
|
||||
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
|
||||
kyvernov1beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/wrappers"
|
||||
pkgCommon "github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
maxRetries = 10
|
||||
)
|
||||
|
||||
type Controller interface {
|
||||
// Run starts workers
|
||||
Run(int, <-chan struct{})
|
||||
}
|
||||
|
||||
// controller manages life-cycle of generate-requests
|
||||
type controller struct {
|
||||
// clients
|
||||
client dclient.Interface
|
||||
kyvernoClient kyvernoclient.Interface
|
||||
|
||||
// informers
|
||||
pInformer kyvernov1informers.ClusterPolicyInformer
|
||||
urInformer kyvernov1beta1informers.UpdateRequestInformer
|
||||
|
||||
// listers
|
||||
pLister kyvernov1listers.ClusterPolicyLister
|
||||
npLister kyvernov1listers.PolicyLister
|
||||
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister
|
||||
nsLister corev1listers.NamespaceLister
|
||||
|
||||
informersSynced []cache.InformerSynced
|
||||
|
||||
// queue
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
// NewController returns a new controller instance to manage generate-requests
|
||||
func NewController(
|
||||
kyvernoclient kyvernoclient.Interface,
|
||||
client dclient.Interface,
|
||||
pInformer kyvernov1informers.ClusterPolicyInformer,
|
||||
npInformer kyvernov1informers.PolicyInformer,
|
||||
urInformer kyvernov1beta1informers.UpdateRequestInformer,
|
||||
namespaceInformer corev1informers.NamespaceInformer,
|
||||
) Controller {
|
||||
c := &controller{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoclient,
|
||||
pInformer: pInformer,
|
||||
urInformer: urInformer,
|
||||
pLister: pInformer.Lister(),
|
||||
npLister: npInformer.Lister(),
|
||||
urLister: urInformer.Lister().UpdateRequests(config.KyvernoNamespace()),
|
||||
nsLister: namespaceInformer.Lister(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "generate-request-cleanup"),
|
||||
}
|
||||
|
||||
c.informersSynced = []cache.InformerSynced{pInformer.Informer().HasSynced, npInformer.Informer().HasSynced, urInformer.Informer().HasSynced, namespaceInformer.Informer().HasSynced}
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *controller) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
logger.Info("starting")
|
||||
defer logger.Info("shutting down")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("generate-request-cleanup", stopCh, c.informersSynced...) {
|
||||
return
|
||||
}
|
||||
|
||||
c.pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: c.deletePolicy, // we only cleanup if the policy is delete
|
||||
})
|
||||
c.urInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
DeleteFunc: c.deleteUR,
|
||||
})
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(c.worker, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (c *controller) deletePolicy(obj interface{}) {
|
||||
p, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1.ClusterPolicy)
|
||||
if !ok {
|
||||
logger.Info("Failed to get deleted object", "obj", obj)
|
||||
return
|
||||
}
|
||||
|
||||
logger.V(4).Info("deleting policy", "name", p.Name)
|
||||
|
||||
generatePolicyWithClone := pkgCommon.ProcessDeletePolicyForCloneGenerateRule(p, c.client, c.kyvernoClient, c.urLister, p.GetName(), logger)
|
||||
|
||||
// get the generated resource name from update request for log
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
kyvernov1beta1.URGeneratePolicyLabel: p.Name,
|
||||
}))
|
||||
|
||||
urList, err := c.urLister.List(selector)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request for the resource", "label", kyvernov1beta1.URGeneratePolicyLabel)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ur := range urList {
|
||||
for _, generatedResource := range ur.Status.GeneratedResources {
|
||||
logger.V(4).Info("retaining resource", "apiVersion", generatedResource.APIVersion, "kind", generatedResource.Kind, "name", generatedResource.Name, "namespace", generatedResource.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
if !generatePolicyWithClone {
|
||||
urs, err := c.urLister.GetUpdateRequestsForClusterPolicy(p.Name)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update request for the policy", "name", p.Name)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ur := range urs {
|
||||
logger.V(4).Info("enqueue the ur for cleanup", "ur name", ur.Name)
|
||||
c.enqueue(ur)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controller) deleteUR(obj interface{}) {
|
||||
ur, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1beta1.UpdateRequest)
|
||||
if !ok {
|
||||
logger.Info("Failed to get deleted object", "obj", obj)
|
||||
return
|
||||
}
|
||||
if ur.Status.Handler != "" {
|
||||
return
|
||||
}
|
||||
c.enqueue(ur)
|
||||
}
|
||||
|
||||
func (c *controller) enqueue(ur *kyvernov1beta1.UpdateRequest) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(ur)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to extract key")
|
||||
return
|
||||
}
|
||||
logger.V(5).Info("enqueue update request", "name", ur.Name)
|
||||
c.queue.Add(key)
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just de-queues items, processes them, and marks them done.
|
||||
// It enforces that the syncUpdateRequest is never invoked concurrently with the same key.
|
||||
func (c *controller) worker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *controller) processNextWorkItem() bool {
|
||||
key, quit := c.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer c.queue.Done(key)
|
||||
err := c.syncUpdateRequest(key.(string))
|
||||
c.handleErr(err, key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *controller) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
c.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.V(4).Info("dropping update request", "key", key, "error", err.Error())
|
||||
c.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if c.queue.NumRequeues(key) < maxRetries {
|
||||
logger.V(3).Info("retrying update request", "key", key, "error", err.Error())
|
||||
c.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
logger.Error(err, "failed to cleanup update request", "key", key)
|
||||
c.queue.Forget(key)
|
||||
}
|
||||
|
||||
func (c *controller) syncUpdateRequest(key string) error {
|
||||
logger := logger.WithValues("key", key)
|
||||
var err error
|
||||
startTime := time.Now()
|
||||
logger.V(4).Info("started syncing update request", "startTime", startTime)
|
||||
defer func() {
|
||||
logger.V(4).Info("finished syncing update request", "processingTIme", time.Since(startTime).String())
|
||||
}()
|
||||
_, urName, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ur, err := c.urLister.Get(urName)
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
logger.Info("update request has been deleted")
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if ur.Status.State == kyvernov1beta1.Pending {
|
||||
return nil
|
||||
}
|
||||
pNamespace, pName, err := cache.SplitMetaNamespaceKey(ur.Spec.Policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if pNamespace == "" {
|
||||
_, err = c.pLister.Get(pName)
|
||||
} else {
|
||||
_, err = c.npLister.Policies(pNamespace).Get(pName)
|
||||
}
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
logger.Error(err, "failed to get policy, deleting the update request", "key", ur.Spec.Policy)
|
||||
return c.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(context.TODO(), ur.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
return c.processUR(*ur)
|
||||
}
|
||||
|
||||
func (c *controller) processUR(ur kyvernov1beta1.UpdateRequest) error {
|
||||
logger := logger.WithValues("kind", ur.Kind, "namespace", ur.Namespace, "name", ur.Name)
|
||||
// 1- Corresponding policy has been deleted
|
||||
// then we don't delete the generated resources
|
||||
|
||||
// 2- The trigger resource is deleted, then delete the generated resources
|
||||
if !ownerResourceExists(logger, c.client, ur) {
|
||||
deleteUR := false
|
||||
// check retry count in annotaion
|
||||
urAnnotations := ur.Annotations
|
||||
if val, ok := urAnnotations["generate.kyverno.io/retry-count"]; ok {
|
||||
retryCount, err := strconv.ParseUint(val, 10, 32)
|
||||
if err != nil {
|
||||
logger.Error(err, "unable to convert retry-count")
|
||||
return err
|
||||
}
|
||||
|
||||
if retryCount >= 5 {
|
||||
deleteUR = true
|
||||
}
|
||||
}
|
||||
|
||||
if deleteUR {
|
||||
if err := deleteGeneratedResources(logger, c.client, ur); err != nil {
|
||||
return err
|
||||
}
|
||||
// - trigger-resource is deleted
|
||||
// - generated-resources are deleted
|
||||
// - > Now delete the UpdateRequest CR
|
||||
return c.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(context.TODO(), ur.Name, metav1.DeleteOptions{})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package cleanup
|
||||
|
||||
import "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
var logger = log.Log.WithName("cleanup")
|
|
@ -1,34 +0,0 @@
|
|||
package cleanup
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
||||
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
func ownerResourceExists(log logr.Logger, client dclient.Interface, ur kyvernov1beta1.UpdateRequest) bool {
|
||||
_, err := client.GetResource("", ur.Spec.Resource.Kind, ur.Spec.Resource.Namespace, ur.Spec.Resource.Name)
|
||||
// trigger resources has been deleted
|
||||
if apierrors.IsNotFound(err) {
|
||||
return false
|
||||
}
|
||||
if err != nil {
|
||||
log.Error(err, "failed to get resource", "genKind", ur.Spec.Resource.Kind, "genNamespace", ur.Spec.Resource.Namespace, "genName", ur.Spec.Resource.Name)
|
||||
}
|
||||
// if there was an error while querying the resources we don't delete the generated resources
|
||||
// but expect the deletion in next reconciliation loop
|
||||
return true
|
||||
}
|
||||
|
||||
func deleteGeneratedResources(log logr.Logger, client dclient.Interface, ur kyvernov1beta1.UpdateRequest) error {
|
||||
for _, genResource := range ur.Status.GeneratedResources {
|
||||
err := client.DeleteResource("", genResource.Kind, genResource.Namespace, genResource.Name, false)
|
||||
if err != nil && !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
log.V(3).Info("generated resource deleted", "genKind", ur.Spec.Resource.Kind, "genNamespace", ur.Spec.Resource.Namespace, "genName", ur.Spec.Resource.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -16,11 +16,13 @@ import (
|
|||
kyvernov1beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/wrappers"
|
||||
pkgCommon "github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
|
@ -268,6 +270,13 @@ func (c *controller) updatePolicy(_, obj interface{}) {
|
|||
}
|
||||
|
||||
func (c *controller) deletePolicy(obj interface{}) {
|
||||
p, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1.ClusterPolicy)
|
||||
if !ok {
|
||||
logger.Info("Failed to get deleted object", "obj", obj)
|
||||
return
|
||||
}
|
||||
|
||||
logger.V(4).Info("deleting policy", "name", p.Name)
|
||||
key, err := cache.MetaNamespaceKeyFunc(kubeutils.GetObjectWithTombstone(obj))
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to compute policy key")
|
||||
|
@ -278,8 +287,41 @@ func (c *controller) deletePolicy(obj interface{}) {
|
|||
logger.Error(err, "failed to list update requests for policy", "key", key)
|
||||
return
|
||||
}
|
||||
|
||||
generatePolicyWithClone := pkgCommon.ProcessDeletePolicyForCloneGenerateRule(p, c.client, c.kyvernoClient, c.urLister, p.GetName(), logger)
|
||||
|
||||
// get the generated resource name from update request for log
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
kyvernov1beta1.URGeneratePolicyLabel: p.Name,
|
||||
}))
|
||||
|
||||
urList, err := c.urLister.List(selector)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request for the resource", "label", kyvernov1beta1.URGeneratePolicyLabel)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ur := range urList {
|
||||
for _, generatedResource := range ur.Status.GeneratedResources {
|
||||
logger.V(4).Info("retaining resource", "apiVersion", generatedResource.APIVersion, "kind", generatedResource.Kind, "name", generatedResource.Name, "namespace", generatedResource.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
if !generatePolicyWithClone {
|
||||
urs, err := c.urLister.GetUpdateRequestsForClusterPolicy(p.Name)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update request for the policy", "name", p.Name)
|
||||
return
|
||||
}
|
||||
// re-evaluate the UR as the policy was updated
|
||||
for _, ur := range urs {
|
||||
logger.V(4).Info("enqueue the ur for cleanup", "ur name", ur.Name)
|
||||
c.enqueueUpdateRequest(ur)
|
||||
}
|
||||
}
|
||||
// re-evaluate the UR as the policy was updated
|
||||
for _, ur := range urs {
|
||||
logger.V(4).Info("enqueue the ur for cleanup", "ur name", ur.Name)
|
||||
c.enqueueUpdateRequest(ur)
|
||||
}
|
||||
}
|
||||
|
@ -296,13 +338,24 @@ func (c *controller) updateUR(_, cur interface{}) {
|
|||
}
|
||||
|
||||
func (c *controller) deleteUR(obj interface{}) {
|
||||
ur, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1beta1.UpdateRequest)
|
||||
if ok {
|
||||
// sync Handler will remove it from the queue
|
||||
c.enqueueUpdateRequest(ur)
|
||||
} else {
|
||||
logger.Info("Failed to get deleted object", "obj", obj)
|
||||
ur, ok := obj.(*kyvernov1beta1.UpdateRequest)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
logger.Info("Couldn't get object from tombstone", "obj", obj)
|
||||
return
|
||||
}
|
||||
ur, ok = tombstone.Obj.(*kyvernov1beta1.UpdateRequest)
|
||||
if !ok {
|
||||
logger.Info("tombstone contained object that is not a Update Request CR", "obj", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
if ur.Status.Handler != "" {
|
||||
return
|
||||
}
|
||||
// sync Handler will remove it from the queue
|
||||
c.enqueueUpdateRequest(ur)
|
||||
}
|
||||
|
||||
func (c *controller) processUR(ur *kyvernov1beta1.UpdateRequest) error {
|
||||
|
|
Loading…
Reference in a new issue