From a6aaffded3a23f29574a2da614a6061a10bfa844 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Charles-Edouard=20Br=C3=A9t=C3=A9ch=C3=A9?= Date: Wed, 7 Dec 2022 11:30:47 +0100 Subject: [PATCH] feat: add cleanup handler (#5576) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat: add cleanup handler Signed-off-by: Charles-Edouard Brétéché * cleanup handler Signed-off-by: Charles-Edouard Brétéché * cleanup Signed-off-by: Charles-Edouard Brétéché * cleanup Signed-off-by: Charles-Edouard Brétéché * service Signed-off-by: Charles-Edouard Brétéché * cleanup Signed-off-by: Charles-Edouard Brétéché * fix Signed-off-by: Charles-Edouard Brétéché * codegen Signed-off-by: Charles-Edouard Brétéché Signed-off-by: Charles-Edouard Brétéché Signed-off-by: Charles-Edouard Brétéché --- Makefile | 3 +- charts/kyverno/README.md | 1 + .../cleanup-controller/clusterrole.yaml | 2 + .../cleanup-controller/deployment.yaml | 4 + charts/kyverno/values.yaml | 9 ++ .../{ => handlers/admission}/handlers.go | 10 +- .../handlers/cleanup/handlers.go | 73 ++++++++++++ cmd/cleanup-controller/main.go | 19 ++- cmd/cleanup-controller/server.go | 45 ++++++-- pkg/controllers/cleanup/controller.go | 108 +++++++++++++++--- pkg/controllers/cleanup/utils.go | 54 --------- .../report/aggregate/controller.go | 4 +- pkg/utils/controller/metadata.go | 9 ++ pkg/utils/controller/selector.go | 16 +++ scripts/kyverno.yaml | 16 +++ 15 files changed, 284 insertions(+), 89 deletions(-) rename cmd/cleanup-controller/{ => handlers/admission}/handlers.go (70%) create mode 100644 cmd/cleanup-controller/handlers/cleanup/handlers.go delete mode 100644 pkg/controllers/cleanup/utils.go create mode 100644 pkg/utils/controller/selector.go create mode 100644 scripts/kyverno.yaml diff --git a/Makefile b/Makefile index 6d8c3e952c..d0d62af521 100644 --- a/Makefile +++ b/Makefile @@ -730,8 +730,7 @@ kind-deploy-kyverno: $(HELM) kind-load-all ## Build images, load them in kind cl --set image.tag=$(IMAGE_TAG_DEV) \ --set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \ --set initImage.tag=$(IMAGE_TAG_DEV) \ - --set initContainer.extraArgs={--loggingFormat=text} \ - --set "extraArgs={--loggingFormat=text}" + --values ./scripts/kyverno.yaml @echo Restart kyverno pods... >&2 @kubectl rollout restart deployment -n kyverno diff --git a/charts/kyverno/README.md b/charts/kyverno/README.md index 4f32bf7487..002ddc76db 100644 --- a/charts/kyverno/README.md +++ b/charts/kyverno/README.md @@ -223,6 +223,7 @@ The command removes all the Kubernetes components associated with the chart and | cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted | | cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy | | cleanupController.image.pullSecrets | list | `[]` | Image pull secrets | +| cleanupController.args | list | `["--cleanupService=https://{{ template \"kyverno.cleanup-controller.deploymentName\" . }}.{{ template \"kyverno.namespace\" . }}.svc"]` | Arguments passed to the container on the command line | | cleanupController.service.port | int | `443` | Service port. | | cleanupController.service.type | string | `"ClusterIP"` | Service type. | | cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. | diff --git a/charts/kyverno/templates/cleanup-controller/clusterrole.yaml b/charts/kyverno/templates/cleanup-controller/clusterrole.yaml index 1d0bfb26af..e36bf93c61 100644 --- a/charts/kyverno/templates/cleanup-controller/clusterrole.yaml +++ b/charts/kyverno/templates/cleanup-controller/clusterrole.yaml @@ -35,6 +35,7 @@ rules: - update - watch {{- with .Values.cleanupController.rbac.clusterRole.extraResources }} + {{- range . }} - apiGroups: {{- toYaml .apiGroups | nindent 6 }} resources: @@ -43,5 +44,6 @@ rules: - delete - list {{- end }} + {{- end }} {{- end }} {{- end }} diff --git a/charts/kyverno/templates/cleanup-controller/deployment.yaml b/charts/kyverno/templates/cleanup-controller/deployment.yaml index 316089e11b..999df35a1d 100644 --- a/charts/kyverno/templates/cleanup-controller/deployment.yaml +++ b/charts/kyverno/templates/cleanup-controller/deployment.yaml @@ -39,6 +39,10 @@ spec: - containerPort: 8000 name: metrics protocol: TCP + {{- with .Values.cleanupController.args }} + args: + {{- tpl (toYaml .) $ | nindent 12 }} + {{- end }} env: - name: KYVERNO_NAMESPACE valueFrom: diff --git a/charts/kyverno/values.yaml b/charts/kyverno/values.yaml index 462e5c1af3..4a32d5c8b9 100644 --- a/charts/kyverno/values.yaml +++ b/charts/kyverno/values.yaml @@ -488,6 +488,11 @@ cleanupController: clusterRole: # -- Extra resource permissions to add in the cluster role extraResources: [] + # - apiGroups: + # - '' + # resources: + # - pods + # -- Enable cleanup controller. enabled: true @@ -509,6 +514,10 @@ cleanupController: pullSecrets: [] # - secretName + # -- Arguments passed to the container on the command line + args: + - --cleanupService=https://{{ template "kyverno.cleanup-controller.deploymentName" . }}.{{ template "kyverno.namespace" . }}.svc + service: # -- Service port. port: 443 diff --git a/cmd/cleanup-controller/handlers.go b/cmd/cleanup-controller/handlers/admission/handlers.go similarity index 70% rename from cmd/cleanup-controller/handlers.go rename to cmd/cleanup-controller/handlers/admission/handlers.go index b557642586..d2400ccdb2 100644 --- a/cmd/cleanup-controller/handlers.go +++ b/cmd/cleanup-controller/handlers/admission/handlers.go @@ -1,4 +1,4 @@ -package main +package admission import ( "context" @@ -11,17 +11,17 @@ import ( admissionv1 "k8s.io/api/admission/v1" ) -type cleanupPolicyHandlers struct { +type handlers struct { client dclient.Interface } -func NewHandlers(client dclient.Interface) CleanupPolicyHandlers { - return &cleanupPolicyHandlers{ +func New(client dclient.Interface) *handlers { + return &handlers{ client: client, } } -func (h *cleanupPolicyHandlers) Validate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse { +func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse { policy, _, err := admissionutils.GetCleanupPolicies(request) if err != nil { logger.Error(err, "failed to unmarshal policies from admission request") diff --git a/cmd/cleanup-controller/handlers/cleanup/handlers.go b/cmd/cleanup-controller/handlers/cleanup/handlers.go new file mode 100644 index 0000000000..7ef628bcb5 --- /dev/null +++ b/cmd/cleanup-controller/handlers/cleanup/handlers.go @@ -0,0 +1,73 @@ +package cleanup + +import ( + "context" + "time" + + "github.com/go-logr/logr" + kyvernov1alpha1 "github.com/kyverno/kyverno/api/kyverno/v1alpha1" + kyvernov1alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1" + "github.com/kyverno/kyverno/pkg/clients/dclient" + controllerutils "github.com/kyverno/kyverno/pkg/utils/controller" + "k8s.io/apimachinery/pkg/util/sets" + "k8s.io/client-go/tools/cache" +) + +type handlers struct { + client dclient.Interface + cpolLister kyvernov1alpha1listers.ClusterCleanupPolicyLister + polLister kyvernov1alpha1listers.CleanupPolicyLister +} + +func New( + client dclient.Interface, + cpolLister kyvernov1alpha1listers.ClusterCleanupPolicyLister, + polLister kyvernov1alpha1listers.CleanupPolicyLister, +) *handlers { + return &handlers{ + client: client, + cpolLister: cpolLister, + polLister: polLister, + } +} + +func (h *handlers) Cleanup(ctx context.Context, logger logr.Logger, name string, _ time.Time) error { + logger.Info("cleaning up...") + namespace, name, err := cache.SplitMetaNamespaceKey(name) + if err != nil { + return err + } + policy, err := h.lookupPolicy(namespace, name) + if err != nil { + return err + } + return h.executePolicy(ctx, logger, policy) +} + +func (h *handlers) lookupPolicy(namespace, name string) (kyvernov1alpha1.CleanupPolicyInterface, error) { + if namespace == "" { + return h.cpolLister.Get(name) + } else { + return h.polLister.CleanupPolicies(namespace).Get(name) + } +} + +func (h *handlers) executePolicy(ctx context.Context, logger logr.Logger, policy kyvernov1alpha1.CleanupPolicyInterface) error { + spec := policy.GetSpec() + kinds := sets.NewString(spec.MatchResources.GetKinds()...) + for kind := range kinds { + logger := logger.WithValues("kind", kind) + logger.Info("processing...") + list, err := h.client.ListResource(ctx, "", kind, policy.GetNamespace(), nil) + if err != nil { + return err + } + for i := range list.Items { + if !controllerutils.IsManagedByKyverno(&list.Items[i]) { + logger := logger.WithValues("name", list.Items[i].GetName(), "namespace", list.Items[i].GetNamespace()) + logger.Info("item...") + } + } + } + return nil +} diff --git a/cmd/cleanup-controller/main.go b/cmd/cleanup-controller/main.go index 119c0d6779..ba22ab5897 100644 --- a/cmd/cleanup-controller/main.go +++ b/cmd/cleanup-controller/main.go @@ -1,10 +1,13 @@ package main import ( + "flag" "os" "sync" "time" + admissionhandlers "github.com/kyverno/kyverno/cmd/cleanup-controller/handlers/admission" + cleanuphandlers "github.com/kyverno/kyverno/cmd/cleanup-controller/handlers/cleanup" "github.com/kyverno/kyverno/cmd/internal" kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions" dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic" @@ -22,12 +25,16 @@ const ( ) func main() { + var cleanupService string + flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError) + flagset.StringVar(&cleanupService, "cleanupService", "https://cleanup-controller.kyverno.svc", "The url to join the cleanup service.") // config appConfig := internal.NewConfiguration( internal.WithProfiling(), internal.WithMetrics(), internal.WithTracing(), internal.WithKubeconfig(), + internal.WithFlagSets(flagset), ) // parse flags internal.ParseFlags(appConfig) @@ -56,18 +63,24 @@ func main() { kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies(), kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies(), kubeInformer.Batch().V1().CronJobs(), + cleanupService, ), cleanup.Workers, ) secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister() + cpolLister := kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies().Lister() + polLister := kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies().Lister() // start informers and wait for cache sync if !internal.StartInformersAndWaitForCacheSync(ctx, kubeKyvernoInformer, kubeInformer, kyvernoInformer) { os.Exit(1) } var wg sync.WaitGroup controller.Run(ctx, logger.WithName("cleanup-controller"), &wg) + // create handlers + admissionHandlers := admissionhandlers.New(dClient) + cleanupHandlers := cleanuphandlers.New(dClient, cpolLister, polLister) + // create server server := NewServer( - NewHandlers(dClient), func() ([]byte, []byte, error) { secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls") if err != nil { @@ -75,8 +88,10 @@ func main() { } return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil }, + admissionHandlers.Validate, + cleanupHandlers.Cleanup, ) - // start webhooks server + // start server server.Run(ctx.Done()) // wait for termination signal wg.Wait() diff --git a/cmd/cleanup-controller/server.go b/cmd/cleanup-controller/server.go index 1e76cb1b1c..51a2226f6c 100644 --- a/cmd/cleanup-controller/server.go +++ b/cmd/cleanup-controller/server.go @@ -8,13 +8,17 @@ import ( "github.com/go-logr/logr" "github.com/julienschmidt/httprouter" + "github.com/kyverno/kyverno/pkg/controllers/cleanup" "github.com/kyverno/kyverno/pkg/logging" "github.com/kyverno/kyverno/pkg/webhooks/handlers" admissionv1 "k8s.io/api/admission/v1" + apierrors "k8s.io/apimachinery/pkg/api/errors" ) -// ValidatingWebhookServicePath is the path for validation webhook -const ValidatingWebhookServicePath = "/validate" +const ( + // validatingWebhookServicePath is the path for validation webhook + validatingWebhookServicePath = "/validate" +) type Server interface { // Run TLS server in separate thread and returns control immediately @@ -23,32 +27,51 @@ type Server interface { Stop(context.Context) } -type CleanupPolicyHandlers interface { - // Validate performs the validation check on policy resources - Validate(context.Context, logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse -} - type server struct { server *http.Server } -type TlsProvider func() ([]byte, []byte, error) +type ( + TlsProvider = func() ([]byte, []byte, error) + ValidationHandler = func(context.Context, logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse + CleanupHandler = func(context.Context, logr.Logger, string, time.Time) error +) // NewServer creates new instance of server accordingly to given configuration func NewServer( - policyHandlers CleanupPolicyHandlers, tlsProvider TlsProvider, + validationHandler ValidationHandler, + cleanupHandler CleanupHandler, ) Server { policyLogger := logging.WithName("cleanup-policy") + cleanupLogger := logging.WithName("cleanup") mux := httprouter.New() mux.HandlerFunc( "POST", - ValidatingWebhookServicePath, - handlers.FromAdmissionFunc("VALIDATE", policyHandlers.Validate). + validatingWebhookServicePath, + handlers.FromAdmissionFunc("VALIDATE", validationHandler). WithSubResourceFilter(). WithAdmission(policyLogger.WithName("validate")). ToHandlerFunc(), ) + mux.HandlerFunc( + "GET", + cleanup.CleanupServicePath, + func(w http.ResponseWriter, r *http.Request) { + policy := r.URL.Query().Get("policy") + logger := cleanupLogger.WithValues("policy", policy) + err := cleanupHandler(r.Context(), logger, policy, time.Now()) + if err == nil { + w.WriteHeader(http.StatusOK) + } else { + if apierrors.IsNotFound(err) { + w.WriteHeader(http.StatusNotFound) + } else { + w.WriteHeader(http.StatusInternalServerError) + } + } + }, + ) return &server{ server: &http.Server{ Addr: ":9443", diff --git a/pkg/controllers/cleanup/controller.go b/pkg/controllers/cleanup/controller.go index bd42ae3396..0d04208d3d 100644 --- a/pkg/controllers/cleanup/controller.go +++ b/pkg/controllers/cleanup/controller.go @@ -2,6 +2,7 @@ package cleanup import ( "context" + "fmt" "time" "github.com/go-logr/logr" @@ -12,14 +13,21 @@ import ( "github.com/kyverno/kyverno/pkg/controllers" controllerutils "github.com/kyverno/kyverno/pkg/utils/controller" batchv1 "k8s.io/api/batch/v1" + corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" batchv1informers "k8s.io/client-go/informers/batch/v1" "k8s.io/client-go/kubernetes" batchv1listers "k8s.io/client-go/listers/batch/v1" + "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) +const ( + // CleanupServicePath is the path for triggering cleanup + CleanupServicePath = "/cleanup" +) + type controller struct { // clients client kubernetes.Interface @@ -33,6 +41,9 @@ type controller struct { queue workqueue.RateLimitingInterface cpolEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.ClusterCleanupPolicy] polEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.CleanupPolicy] + + // config + cleanupService string } const ( @@ -46,16 +57,18 @@ func NewController( cpolInformer kyvernov1alpha1informers.ClusterCleanupPolicyInformer, polInformer kyvernov1alpha1informers.CleanupPolicyInformer, cjInformer batchv1informers.CronJobInformer, + cleanupService string, ) controllers.Controller { queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName) c := &controller{ - client: client, - cpolLister: cpolInformer.Lister(), - polLister: polInformer.Lister(), - cjLister: cjInformer.Lister(), - queue: queue, - cpolEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.ClusterCleanupPolicy](logger, cpolInformer.Informer(), queue), - polEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.CleanupPolicy](logger, polInformer.Informer(), queue), + client: client, + cpolLister: cpolInformer.Lister(), + polLister: polInformer.Lister(), + cjLister: cjInformer.Lister(), + queue: queue, + cpolEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.ClusterCleanupPolicy](logger, cpolInformer.Informer(), queue), + polEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.CleanupPolicy](logger, polInformer.Informer(), queue), + cleanupService: cleanupService, } controllerutils.AddEventHandlersT( cjInformer.Informer(), @@ -121,6 +134,64 @@ func (c *controller) getCronjob(namespace, name string) (*batchv1.CronJob, error return cj, nil } +func (c *controller) buildCronJob(cronJob *batchv1.CronJob, pol kyvernov1alpha1.CleanupPolicyInterface) error { + // TODO: find a better way to do that, it looks like resources returned by WATCH don't have the GVK + apiVersion := "kyverno.io/v1alpha1" + kind := "CleanupPolicy" + if pol.GetNamespace() == "" { + kind = "ClusterCleanupPolicy" + } + policyName, err := cache.MetaNamespaceKeyFunc(pol) + if err != nil { + return err + } + // set owner reference + cronJob.OwnerReferences = []metav1.OwnerReference{ + { + APIVersion: apiVersion, + Kind: kind, + Name: pol.GetName(), + UID: pol.GetUID(), + }, + } + var successfulJobsHistoryLimit int32 = 0 + var failedJobsHistoryLimit int32 = 1 + // set spec + cronJob.Spec = batchv1.CronJobSpec{ + Schedule: pol.GetSpec().Schedule, + SuccessfulJobsHistoryLimit: &successfulJobsHistoryLimit, + FailedJobsHistoryLimit: &failedJobsHistoryLimit, + ConcurrencyPolicy: batchv1.ForbidConcurrent, + JobTemplate: batchv1.JobTemplateSpec{ + Spec: batchv1.JobSpec{ + Template: corev1.PodTemplateSpec{ + Spec: corev1.PodSpec{ + RestartPolicy: corev1.RestartPolicyOnFailure, + Containers: []corev1.Container{ + { + Name: "cleanup", + Image: "curlimages/curl:7.86.0", + Args: []string{ + "-k", + // TODO: ca + // "--cacert", + // "/tmp/ca.crt", + fmt.Sprintf("%s%s?policy=%s", c.cleanupService, CleanupServicePath, policyName), + }, + }, + }, + }, + }, + }, + }, + } + // set labels + controllerutils.SetManagedByKyvernoLabel(cronJob) + controllerutils.SetManagedByKyvernoLabel(&cronJob.Spec.JobTemplate) + controllerutils.SetManagedByKyvernoLabel(&cronJob.Spec.JobTemplate.Spec.Template) + return nil +} + func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, namespace, name string) error { policy, err := c.getPolicy(namespace, name) if err != nil { @@ -134,17 +205,28 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam if namespace == "" { cronjobNs = config.KyvernoNamespace() } - if cronjob, err := c.getCronjob(cronjobNs, string(policy.GetUID())); err != nil { + observed, err := c.getCronjob(cronjobNs, string(policy.GetUID())) + if err != nil { if !apierrors.IsNotFound(err) { return err } - cronjob := getCronJobForTriggerResource(policy) - _, err = c.client.BatchV1().CronJobs(cronjobNs).Create(ctx, cronjob, metav1.CreateOptions{}) + observed = &batchv1.CronJob{ + ObjectMeta: metav1.ObjectMeta{ + Name: string(policy.GetUID()), + Namespace: cronjobNs, + }, + } + } + if observed.ResourceVersion == "" { + err := c.buildCronJob(observed, policy) + if err != nil { + return err + } + _, err = c.client.BatchV1().CronJobs(cronjobNs).Create(ctx, observed, metav1.CreateOptions{}) return err } else { - _, err = controllerutils.Update(ctx, cronjob, c.client.BatchV1().CronJobs(cronjobNs), func(cronjob *batchv1.CronJob) error { - cronjob.Spec.Schedule = policy.GetSpec().Schedule - return nil + _, err = controllerutils.Update(ctx, observed, c.client.BatchV1().CronJobs(cronjobNs), func(observed *batchv1.CronJob) error { + return c.buildCronJob(observed, policy) }) return err } diff --git a/pkg/controllers/cleanup/utils.go b/pkg/controllers/cleanup/utils.go deleted file mode 100644 index 429d140fa4..0000000000 --- a/pkg/controllers/cleanup/utils.go +++ /dev/null @@ -1,54 +0,0 @@ -package cleanup - -import ( - kyvernov1alpha1 "github.com/kyverno/kyverno/api/kyverno/v1alpha1" - batchv1 "k8s.io/api/batch/v1" - corev1 "k8s.io/api/core/v1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -func getCronJobForTriggerResource(pol kyvernov1alpha1.CleanupPolicyInterface) *batchv1.CronJob { - // TODO: find a better way to do that, it looks like resources returned by WATCH don't have the GVK - apiVersion := "kyverno.io/v1alpha1" - kind := "CleanupPolicy" - if pol.GetNamespace() == "" { - kind = "ClusterCleanupPolicy" - } - cronjob := &batchv1.CronJob{ - ObjectMeta: metav1.ObjectMeta{ - Name: string(pol.GetUID()), - OwnerReferences: []metav1.OwnerReference{ - { - APIVersion: apiVersion, - Kind: kind, - Name: pol.GetName(), - UID: pol.GetUID(), - }, - }, - }, - Spec: batchv1.CronJobSpec{ - Schedule: pol.GetSpec().Schedule, - JobTemplate: batchv1.JobTemplateSpec{ - Spec: batchv1.JobSpec{ - Template: corev1.PodTemplateSpec{ - Spec: corev1.PodSpec{ - RestartPolicy: corev1.RestartPolicyOnFailure, - Containers: []corev1.Container{ - { - Name: "cleanup", - Image: "bitnami/kubectl:latest", - Args: []string{ - "/bin/sh", - "-c", - `echo "Hello World"`, - }, - }, - }, - }, - }, - }, - }, - }, - } - return cronjob -} diff --git a/pkg/controllers/report/aggregate/controller.go b/pkg/controllers/report/aggregate/controller.go index 15ddf5909c..80ed0d54ab 100644 --- a/pkg/controllers/report/aggregate/controller.go +++ b/pkg/controllers/report/aggregate/controller.go @@ -344,7 +344,7 @@ func (c *controller) getPolicyReports(ctx context.Context, namespace string) ([] return nil, err } for i := range list.Items { - if controllerutils.CheckLabel(&list.Items[i], kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) { + if controllerutils.IsManagedByKyverno(&list.Items[i]) { reports = append(reports, &list.Items[i]) } } @@ -354,7 +354,7 @@ func (c *controller) getPolicyReports(ctx context.Context, namespace string) ([] return nil, err } for i := range list.Items { - if controllerutils.CheckLabel(&list.Items[i], kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) { + if controllerutils.IsManagedByKyverno(&list.Items[i]) { reports = append(reports, &list.Items[i]) } } diff --git a/pkg/utils/controller/metadata.go b/pkg/utils/controller/metadata.go index 0a8cf70d42..5fdc77d71e 100644 --- a/pkg/utils/controller/metadata.go +++ b/pkg/utils/controller/metadata.go @@ -1,6 +1,7 @@ package controller import ( + kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ) @@ -23,6 +24,14 @@ func CheckLabel(obj metav1.Object, key, value string) bool { return labels[key] == value } +func SetManagedByKyvernoLabel(obj metav1.Object) { + SetLabel(obj, kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) +} + +func IsManagedByKyverno(obj metav1.Object) bool { + return CheckLabel(obj, kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) +} + func HasLabel(obj metav1.Object, key string) bool { labels := obj.GetLabels() if labels == nil { diff --git a/pkg/utils/controller/selector.go b/pkg/utils/controller/selector.go new file mode 100644 index 0000000000..e3a8f0c29a --- /dev/null +++ b/pkg/utils/controller/selector.go @@ -0,0 +1,16 @@ +package controller + +import ( + kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/selection" +) + +func SelectorNotManagedByKyverno() (labels.Selector, error) { + selector := labels.Everything() + requirement, err := labels.NewRequirement(kyvernov1.LabelAppManagedBy, selection.NotEquals, []string{kyvernov1.ValueKyvernoApp}) + if err == nil { + selector = selector.Add(*requirement) + } + return selector, err +} diff --git a/scripts/kyverno.yaml b/scripts/kyverno.yaml new file mode 100644 index 0000000000..4340598fd7 --- /dev/null +++ b/scripts/kyverno.yaml @@ -0,0 +1,16 @@ +initContainer: + extraArgs: + - --loggingFormat=text + +extraArgs: + - --loggingFormat=text + +cleanupController: + rbac: + clusterRole: + extraResources: + - apiGroups: + - '' + resources: + - pods +