From 0fb45ed53abf4cb8adce410f65c2bd258cdbe5d0 Mon Sep 17 00:00:00 2001
From: Nikhil Sharma <nikhilsharma230303@gmail.com>
Date: Wed, 16 Nov 2022 18:41:33 +0530
Subject: [PATCH] feat: add CleanupPolicy validation code to
 CleanupPolicyHandler  (#5338)
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit

* add validation code for cleanupPolicyHandler

Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com>

* update

Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* register kuttl tests for cleanup policy

Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com>

Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com>
Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
Co-authored-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
---
 charts/kyverno/README.md                      |   2 +-
 .../templates/cleanup-controller/secret.yaml  |   2 +-
 charts/kyverno/values.yaml                    |   2 +-
 cmd/cleanup-controller/handlers.go            |  39 +++++++
 cmd/cleanup-controller/logger/log.go          |   2 +-
 cmd/cleanup-controller/main.go                | 101 ++++++++++++++++--
 cmd/cleanup-controller/server.go              |  21 ++--
 cmd/cleanup-controller/validate/validate.go   |  18 ++--
 .../utils/admission/cleanup.go                |  26 ++---
 pkg/utils/admission/policy.go                 |  42 ++++++++
 pkg/utils/admission/utils.go                  |  37 -------
 test/conformance/kuttl/kuttl-test.yaml        |   1 +
 .../validate/cleanuppolicy/00-assert.yaml     |  17 +++
 .../cleanuppolicy/00-cleanuppolicy.yaml       |  17 +++
 .../validate/cleanuppolicy/01-assert.yaml     |  16 +++
 .../01-clustercleanuppolicy.yaml              |  16 +++
 .../cleanuppolicy/02-create-task.yaml         |  14 +++
 .../cleanuppolicy/badcleanuppolicy.yaml       |  17 +++
 18 files changed, 307 insertions(+), 83 deletions(-)
 create mode 100644 cmd/cleanup-controller/handlers.go
 rename cmd/cleanup-controller/utils/utils.go => pkg/utils/admission/cleanup.go (60%)
 create mode 100644 pkg/utils/admission/policy.go
 create mode 100644 test/conformance/kuttl/validate/cleanuppolicy/00-assert.yaml
 create mode 100644 test/conformance/kuttl/validate/cleanuppolicy/00-cleanuppolicy.yaml
 create mode 100644 test/conformance/kuttl/validate/cleanuppolicy/01-assert.yaml
 create mode 100644 test/conformance/kuttl/validate/cleanuppolicy/01-clustercleanuppolicy.yaml
 create mode 100644 test/conformance/kuttl/validate/cleanuppolicy/02-create-task.yaml
 create mode 100644 test/conformance/kuttl/validate/cleanuppolicy/badcleanuppolicy.yaml

diff --git a/charts/kyverno/README.md b/charts/kyverno/README.md
index 103ad86c34..14950d2c4a 100644
--- a/charts/kyverno/README.md
+++ b/charts/kyverno/README.md
@@ -214,7 +214,7 @@ The command removes all the Kubernetes components associated with the chart and
 | grafana.enabled | bool | `false` | Enable grafana dashboard creation. |
 | grafana.namespace | string | `nil` | Namespace to create the grafana dashboard configmap. If not set, it will be created in the same namespace where the chart is deployed. |
 | grafana.annotations | object | `{}` | Grafana dashboard configmap annotations. |
-| cleanupController.enabled | bool | `false` | Enable cleanup controller. |
+| cleanupController.enabled | bool | `true` | Enable cleanup controller. |
 | cleanupController.image.registry | string | `nil` | Image registry |
 | cleanupController.image.repository | string | `"ghcr.io/kyverno/cleanup-controller"` | Image repository |
 | cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
diff --git a/charts/kyverno/templates/cleanup-controller/secret.yaml b/charts/kyverno/templates/cleanup-controller/secret.yaml
index 7fe08adcde..7761a14f9a 100644
--- a/charts/kyverno/templates/cleanup-controller/secret.yaml
+++ b/charts/kyverno/templates/cleanup-controller/secret.yaml
@@ -40,7 +40,7 @@ webhooks:
     service:
       name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
       namespace: {{ template "kyverno.namespace" . }}
-      path: /todo
+      path: /validate
       port: 443
   failurePolicy: Fail
   matchPolicy: Equivalent
diff --git a/charts/kyverno/values.yaml b/charts/kyverno/values.yaml
index cff94a67df..a19cb342f9 100644
--- a/charts/kyverno/values.yaml
+++ b/charts/kyverno/values.yaml
@@ -478,7 +478,7 @@ grafana:
 
 cleanupController:
   # -- Enable cleanup controller.
-  enabled: false
+  enabled: true
 
   image:
     # -- Image registry
diff --git a/cmd/cleanup-controller/handlers.go b/cmd/cleanup-controller/handlers.go
new file mode 100644
index 0000000000..2a1bc7f85e
--- /dev/null
+++ b/cmd/cleanup-controller/handlers.go
@@ -0,0 +1,39 @@
+package main
+
+import (
+	"time"
+
+	"github.com/go-logr/logr"
+	"github.com/kyverno/kyverno/cmd/cleanup-controller/validate"
+	"github.com/kyverno/kyverno/pkg/clients/dclient"
+	admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
+	admissionv1 "k8s.io/api/admission/v1"
+)
+
+type cleanupPolicyHandlers struct {
+	client dclient.Interface
+}
+
+func NewHandlers(client dclient.Interface) CleanupPolicyHandlers {
+	return &cleanupPolicyHandlers{
+		client: client,
+	}
+}
+
+func (h *cleanupPolicyHandlers) Validate(logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse {
+	if request.SubResource != "" {
+		logger.V(4).Info("skip policy validation on status update")
+		return admissionutils.ResponseSuccess()
+	}
+	policy, _, err := admissionutils.GetCleanupPolicies(request)
+	if err != nil {
+		logger.Error(err, "failed to unmarshal policies from admission request")
+		return admissionutils.Response(err)
+	}
+	err = validate.ValidateCleanupPolicy(policy, h.client, false)
+	if err != nil {
+		logger.Error(err, "policy validation errors")
+		return admissionutils.Response(err)
+	}
+	return admissionutils.Response(err)
+}
diff --git a/cmd/cleanup-controller/logger/log.go b/cmd/cleanup-controller/logger/log.go
index e9dd032237..a770e60ea5 100644
--- a/cmd/cleanup-controller/logger/log.go
+++ b/cmd/cleanup-controller/logger/log.go
@@ -2,4 +2,4 @@ package logger
 
 import "github.com/kyverno/kyverno/pkg/logging"
 
-var Logger = logging.WithName("cleanupwebhooks")
+var Logger = logging.WithName("cleanuppolicywebhooks")
diff --git a/cmd/cleanup-controller/main.go b/cmd/cleanup-controller/main.go
index d867d27642..34ce9d7974 100644
--- a/cmd/cleanup-controller/main.go
+++ b/cmd/cleanup-controller/main.go
@@ -4,6 +4,7 @@ import (
 	"context"
 	"flag"
 	"fmt"
+	"net/http"
 	"os"
 	"os/signal"
 	"strconv"
@@ -11,8 +12,12 @@ import (
 	"time"
 
 	"github.com/go-logr/logr"
+	"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
+	"github.com/kyverno/kyverno/pkg/clients/dclient"
+	kyvernoclient "github.com/kyverno/kyverno/pkg/clients/wrappers"
 	"github.com/kyverno/kyverno/pkg/config"
 	"github.com/kyverno/kyverno/pkg/logging"
+	"github.com/kyverno/kyverno/pkg/metrics"
 	corev1 "k8s.io/api/core/v1"
 	kubeinformers "k8s.io/client-go/informers"
 	"k8s.io/client-go/kubernetes"
@@ -24,10 +29,16 @@ var (
 	clientRateLimitQPS   float64
 	clientRateLimitBurst int
 	logFormat            string
+	otel                 string
+	otelCollector        string
+	metricsPort          string
+	transportCreds       string
+	disableMetricsExport bool
 )
 
 const (
-	resyncPeriod = 15 * time.Minute
+	resyncPeriod         = 15 * time.Minute
+	metadataResyncPeriod = 15 * time.Minute
 )
 
 func parseFlags() error {
@@ -36,6 +47,11 @@ func parseFlags() error {
 	flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
 	flag.Float64Var(&clientRateLimitQPS, "clientRateLimitQPS", 20, "Configure the maximum QPS to the Kubernetes API server from Kyverno. Uses the client default if zero.")
 	flag.IntVar(&clientRateLimitBurst, "clientRateLimitBurst", 50, "Configure the maximum burst for throttle. Uses the client default if zero.")
+	flag.StringVar(&otel, "otelConfig", "prometheus", "Set this flag to 'grpc', to enable exporting metrics to an Opentelemetry Collector. The default collector is set to \"prometheus\"")
+	flag.StringVar(&otelCollector, "otelCollector", "opentelemetrycollector.kyverno.svc.cluster.local", "Set this flag to the OpenTelemetry Collector Service Address. Kyverno will try to connect to this on the metrics port.")
+	flag.StringVar(&transportCreds, "transportCreds", "", "Set this flag to the CA secret containing the certificate which is used by our Opentelemetry Metrics Client. If empty string is set, means an insecure connection will be used")
+	flag.StringVar(&metricsPort, "metricsPort", "8000", "Expose prometheus metrics at the given port, default to 8000.")
+	flag.BoolVar(&disableMetricsExport, "disableMetrics", false, "Set this flag to 'true' to disable metrics.")
 	if err := flag.Set("v", "2"); err != nil {
 		return err
 	}
@@ -57,6 +73,59 @@ func createKubeClients(logger logr.Logger) (*rest.Config, *kubernetes.Clientset,
 	return clientConfig, kubeClient, nil
 }
 
+func createInstrumentedClients(ctx context.Context, logger logr.Logger, clientConfig *rest.Config, kubeClient *kubernetes.Clientset, metricsConfig *metrics.MetricsConfig) (versioned.Interface, dclient.Interface, error) {
+	logger = logger.WithName("instrumented-clients")
+	logger.Info("create instrumented clients...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
+	kyvernoClient, err := kyvernoclient.NewForConfig(clientConfig, metricsConfig)
+	if err != nil {
+		return nil, nil, err
+	}
+	dynamicClient, err := dclient.NewClient(ctx, clientConfig, kubeClient, metricsConfig, metadataResyncPeriod)
+	if err != nil {
+		return nil, nil, err
+	}
+	return kyvernoClient, dynamicClient, nil
+}
+
+func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics.MetricsConfig, context.CancelFunc, error) {
+	logger = logger.WithName("metrics")
+	logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
+	metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
+	if err != nil {
+		return nil, nil, err
+	}
+	metricsAddr := ":" + metricsPort
+	metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
+		disableMetricsExport,
+		otel,
+		metricsAddr,
+		otelCollector,
+		metricsConfigData,
+		transportCreds,
+		kubeClient,
+		logging.WithName("metrics"),
+	)
+	if err != nil {
+		return nil, nil, err
+	}
+	var cancel context.CancelFunc
+	if otel == "grpc" {
+		cancel = func() {
+			ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
+			defer cancel()
+			metrics.ShutDownController(ctx, metricsPusher)
+		}
+	}
+	if otel == "prometheus" {
+		go func() {
+			if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
+				logger.Error(err, "failed to enable metrics", "address", metricsAddr)
+			}
+		}()
+	}
+	return metricsConfig, cancel, nil
+}
+
 func setupSignals() (context.Context, context.CancelFunc) {
 	return signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
 }
@@ -79,17 +148,38 @@ func main() {
 	}
 	logger := logging.WithName("setup")
 	// create client config and kube clients
-	_, kubeClient, err := createKubeClients(logger)
+	clientConfig, kubeClient, err := createKubeClients(logger)
 	if err != nil {
 		os.Exit(1)
 	}
 	kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
-
 	// setup signals
 	signalCtx, signalCancel := setupSignals()
 	defer signalCancel()
+	metricsConfig, metricsShutdown, err := setupMetrics(logger, kubeClient)
+	if err != nil {
+		logger.Error(err, "failed to setup metrics")
+		os.Exit(1)
+	}
+	if metricsShutdown != nil {
+		defer metricsShutdown()
+	}
+	_, dynamicClient, err := createInstrumentedClients(signalCtx, logger, clientConfig, kubeClient, metricsConfig)
+	if err != nil {
+		logger.Error(err, "failed to create instrument clients")
+		os.Exit(1)
+	}
+	policyHandlers := NewHandlers(
+		dynamicClient,
+	)
 	secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
+	// start informers and wait for cache sync
+	// we need to call start again because we potentially registered new informers
+	if !startInformersAndWaitForCacheSync(signalCtx, kubeKyvernoInformer) {
+		os.Exit(1)
+	}
 	server := NewServer(
+		policyHandlers,
 		func() ([]byte, []byte, error) {
 			secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
 			if err != nil {
@@ -98,11 +188,6 @@ func main() {
 			return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
 		},
 	)
-	// start informers and wait for cache sync
-	// we need to call start again because we potentially registered new informers
-	if !startInformersAndWaitForCacheSync(signalCtx, kubeKyvernoInformer) {
-		os.Exit(1)
-	}
 	// start webhooks server
 	server.Run(signalCtx.Done())
 	// wait for termination signal
diff --git a/cmd/cleanup-controller/server.go b/cmd/cleanup-controller/server.go
index 8deb64aa56..db650c3961 100644
--- a/cmd/cleanup-controller/server.go
+++ b/cmd/cleanup-controller/server.go
@@ -8,12 +8,15 @@ import (
 
 	"github.com/go-logr/logr"
 	"github.com/julienschmidt/httprouter"
+	"github.com/kyverno/kyverno/cmd/cleanup-controller/logger"
 	"github.com/kyverno/kyverno/pkg/logging"
-	admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
 	"github.com/kyverno/kyverno/pkg/webhooks/handlers"
 	admissionv1 "k8s.io/api/admission/v1"
 )
 
+// ValidatingWebhookServicePath is the path for validation webhook
+const ValidatingWebhookServicePath = "/validate"
+
 type Server interface {
 	// Run TLS server in separate thread and returns control immediately
 	Run(<-chan struct{})
@@ -21,26 +24,28 @@ type Server interface {
 	Stop(context.Context)
 }
 
+type CleanupPolicyHandlers interface {
+	// Validate performs the validation check on policy resources
+	Validate(logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse
+}
+
 type server struct {
 	server *http.Server
 }
 
 type TlsProvider func() ([]byte, []byte, error)
 
-func TODO(logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse {
-	return admissionutils.ResponseSuccess()
-}
-
 // NewServer creates new instance of server accordingly to given configuration
 func NewServer(
+	policyHandlers CleanupPolicyHandlers,
 	tlsProvider TlsProvider,
 ) Server {
 	mux := httprouter.New()
 	mux.HandlerFunc(
 		"POST",
-		"/todo",
-		handlers.AdmissionHandler(TODO).
-			WithAdmission(logging.WithName("todo")),
+		ValidatingWebhookServicePath,
+		handlers.AdmissionHandler(policyHandlers.Validate).
+			WithAdmission(logger.Logger.WithName("validate")),
 	)
 	return &server{
 		server: &http.Server{
diff --git a/cmd/cleanup-controller/validate/validate.go b/cmd/cleanup-controller/validate/validate.go
index b9c53fe1f5..7640add1c4 100644
--- a/cmd/cleanup-controller/validate/validate.go
+++ b/cmd/cleanup-controller/validate/validate.go
@@ -8,8 +8,6 @@ import (
 	"github.com/kyverno/kyverno/cmd/cleanup-controller/logger"
 	"github.com/kyverno/kyverno/pkg/clients/dclient"
 	"github.com/kyverno/kyverno/pkg/engine/variables"
-	"github.com/kyverno/kyverno/pkg/logging"
-	"github.com/kyverno/kyverno/pkg/openapi"
 	"github.com/kyverno/kyverno/pkg/policy/generate"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/sets"
@@ -59,8 +57,8 @@ func (c *Cleanup) CanIDelete(kind, namespace string) error {
 }
 
 // Validate checks the policy and rules declarations for required configurations
-func ValidateCleanupPolicy(cleanuppolicy kyvernov1alpha1.CleanupPolicyInterface, client dclient.Interface, mock bool, openApiManager openapi.Manager) error {
-	namespace := cleanuppolicy.GetNamespace()
+func ValidateCleanupPolicy(cleanuppolicy kyvernov1alpha1.CleanupPolicyInterface, client dclient.Interface, mock bool) error {
+	// namespace := cleanuppolicy.GetNamespace()
 	var res []*metav1.APIResourceList
 	clusterResources := sets.NewString()
 
@@ -88,11 +86,11 @@ func ValidateCleanupPolicy(cleanuppolicy kyvernov1alpha1.CleanupPolicyInterface,
 		return errs.ToAggregate()
 	}
 
-	for kind := range clusterResources {
-		checker := NewCleanup(client, *cleanuppolicy.GetSpec(), logging.GlobalLogger())
-		if err := checker.CanIDelete(kind, namespace); err != nil {
-			return fmt.Errorf("cannot delete kind %s in namespace %s", kind, namespace)
-		}
-	}
+	// for kind := range clusterResources {
+	// 	checker := NewCleanup(client, *cleanuppolicy.GetSpec(), logging.GlobalLogger())
+	// 	if err := checker.CanIDelete(kind, namespace); err != nil {
+	// 		return fmt.Errorf("cannot delete kind %s in namespace %s", kind, namespace)
+	// 	}
+	// }
 	return nil
 }
diff --git a/cmd/cleanup-controller/utils/utils.go b/pkg/utils/admission/cleanup.go
similarity index 60%
rename from cmd/cleanup-controller/utils/utils.go
rename to pkg/utils/admission/cleanup.go
index 8c1c711551..bbaa29335c 100644
--- a/cmd/cleanup-controller/utils/utils.go
+++ b/pkg/utils/admission/cleanup.go
@@ -1,4 +1,4 @@
-package utils
+package admission
 
 import (
 	"encoding/json"
@@ -9,18 +9,20 @@ import (
 )
 
 func UnmarshalCleanupPolicy(kind string, raw []byte) (kyvernov1alpha1.CleanupPolicyInterface, error) {
-	var policy kyvernov1alpha1.CleanupPolicyInterface
 	if kind == "CleanupPolicy" {
+		var policy *kyvernov1alpha1.CleanupPolicy
 		if err := json.Unmarshal(raw, &policy); err != nil {
-			return policy, err
+			return nil, err
+		}
+		return policy, nil
+	} else if kind == "ClusterCleanupPolicy" {
+		var policy *kyvernov1alpha1.ClusterCleanupPolicy
+		if err := json.Unmarshal(raw, &policy); err != nil {
+			return nil, err
 		}
 		return policy, nil
 	}
-	return policy, fmt.Errorf("admission request does not contain a cleanuppolicy")
-}
-
-func GetCleanupPolicy(request *admissionv1.AdmissionRequest) (kyvernov1alpha1.CleanupPolicyInterface, error) {
-	return UnmarshalCleanupPolicy(request.Kind.Kind, request.Object.Raw)
+	return nil, fmt.Errorf("admission request does not contain a cleanuppolicy")
 }
 
 func GetCleanupPolicies(request *admissionv1.AdmissionRequest) (kyvernov1alpha1.CleanupPolicyInterface, kyvernov1alpha1.CleanupPolicyInterface, error) {
@@ -35,11 +37,3 @@ func GetCleanupPolicies(request *admissionv1.AdmissionRequest) (kyvernov1alpha1.
 	}
 	return policy, emptypolicy, nil
 }
-
-func GetResourceName(request *admissionv1.AdmissionRequest) string {
-	resourceName := request.Kind.Kind + "/" + request.Name
-	if request.Namespace != "" {
-		resourceName = request.Namespace + "/" + resourceName
-	}
-	return resourceName
-}
diff --git a/pkg/utils/admission/policy.go b/pkg/utils/admission/policy.go
new file mode 100644
index 0000000000..6327d78869
--- /dev/null
+++ b/pkg/utils/admission/policy.go
@@ -0,0 +1,42 @@
+package admission
+
+import (
+	"encoding/json"
+	"fmt"
+
+	kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
+	admissionv1 "k8s.io/api/admission/v1"
+)
+
+func UnmarshalPolicy(kind string, raw []byte) (kyvernov1.PolicyInterface, error) {
+	if kind == "ClusterPolicy" {
+		var policy *kyvernov1.ClusterPolicy
+		if err := json.Unmarshal(raw, &policy); err != nil {
+			return nil, err
+		}
+		return policy, nil
+	} else if kind == "Policy" {
+		var policy *kyvernov1.Policy
+		if err := json.Unmarshal(raw, &policy); err != nil {
+			return nil, err
+		}
+		return policy, nil
+	}
+	return nil, fmt.Errorf("admission request does not contain a policy")
+}
+
+func GetPolicy(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, error) {
+	return UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
+}
+
+func GetPolicies(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, kyvernov1.PolicyInterface, error) {
+	policy, err := UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
+	if err != nil {
+		return policy, nil, err
+	}
+	if request.Operation == admissionv1.Update {
+		oldPolicy, err := UnmarshalPolicy(request.Kind.Kind, request.OldObject.Raw)
+		return policy, oldPolicy, err
+	}
+	return policy, nil, nil
+}
diff --git a/pkg/utils/admission/utils.go b/pkg/utils/admission/utils.go
index f2a3e8f81b..a8e5f29cbc 100644
--- a/pkg/utils/admission/utils.go
+++ b/pkg/utils/admission/utils.go
@@ -1,46 +1,9 @@
 package admission
 
 import (
-	"encoding/json"
-	"fmt"
-
-	kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
 	admissionv1 "k8s.io/api/admission/v1"
 )
 
-func UnmarshalPolicy(kind string, raw []byte) (kyvernov1.PolicyInterface, error) {
-	if kind == "ClusterPolicy" {
-		var policy *kyvernov1.ClusterPolicy
-		if err := json.Unmarshal(raw, &policy); err != nil {
-			return nil, err
-		}
-		return policy, nil
-	} else if kind == "Policy" {
-		var policy *kyvernov1.Policy
-		if err := json.Unmarshal(raw, &policy); err != nil {
-			return nil, err
-		}
-		return policy, nil
-	}
-	return nil, fmt.Errorf("admission request does not contain a policy")
-}
-
-func GetPolicy(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, error) {
-	return UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
-}
-
-func GetPolicies(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, kyvernov1.PolicyInterface, error) {
-	policy, err := UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
-	if err != nil {
-		return policy, nil, err
-	}
-	if request.Operation == admissionv1.Update {
-		oldPolicy, err := UnmarshalPolicy(request.Kind.Kind, request.OldObject.Raw)
-		return policy, oldPolicy, err
-	}
-	return policy, nil, nil
-}
-
 func GetResourceName(request *admissionv1.AdmissionRequest) string {
 	resourceName := request.Kind.Kind + "/" + request.Name
 	if request.Namespace != "" {
diff --git a/test/conformance/kuttl/kuttl-test.yaml b/test/conformance/kuttl/kuttl-test.yaml
index e7051c9a04..6e6a9c1ba8 100644
--- a/test/conformance/kuttl/kuttl-test.yaml
+++ b/test/conformance/kuttl/kuttl-test.yaml
@@ -17,6 +17,7 @@ testDirs:
 - ./test/conformance/kuttl/validate/clusterpolicy/standard/audit
 - ./test/conformance/kuttl/validate/clusterpolicy/standard/enforce
 - ./test/conformance/kuttl/validate/clusterpolicy/cornercases
+- ./test/conformance/kuttl/validate/cleanuppolicy
 # Verify image tests
 - ./test/conformance/kuttl/verifyImages/clusterpolicy/standard
 # Report tests
diff --git a/test/conformance/kuttl/validate/cleanuppolicy/00-assert.yaml b/test/conformance/kuttl/validate/cleanuppolicy/00-assert.yaml
new file mode 100644
index 0000000000..2f4bc970f6
--- /dev/null
+++ b/test/conformance/kuttl/validate/cleanuppolicy/00-assert.yaml
@@ -0,0 +1,17 @@
+apiVersion: kyverno.io/v1alpha1
+kind: CleanupPolicy
+metadata:
+  name: cleanuppolicy
+  namespace: default
+spec:
+  match:
+    any:
+    - resources:
+        kinds:
+          - Pod
+  conditions:
+    any:
+    - key: "{{ request.name }}"
+      operator: Equals
+      value: example
+  schedule: "* * * * *"
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/cleanuppolicy/00-cleanuppolicy.yaml b/test/conformance/kuttl/validate/cleanuppolicy/00-cleanuppolicy.yaml
new file mode 100644
index 0000000000..2f4bc970f6
--- /dev/null
+++ b/test/conformance/kuttl/validate/cleanuppolicy/00-cleanuppolicy.yaml
@@ -0,0 +1,17 @@
+apiVersion: kyverno.io/v1alpha1
+kind: CleanupPolicy
+metadata:
+  name: cleanuppolicy
+  namespace: default
+spec:
+  match:
+    any:
+    - resources:
+        kinds:
+          - Pod
+  conditions:
+    any:
+    - key: "{{ request.name }}"
+      operator: Equals
+      value: example
+  schedule: "* * * * *"
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/cleanuppolicy/01-assert.yaml b/test/conformance/kuttl/validate/cleanuppolicy/01-assert.yaml
new file mode 100644
index 0000000000..7997b95618
--- /dev/null
+++ b/test/conformance/kuttl/validate/cleanuppolicy/01-assert.yaml
@@ -0,0 +1,16 @@
+apiVersion: kyverno.io/v1alpha1
+kind: ClusterCleanupPolicy
+metadata:
+  name: cleanuppolicy
+spec:
+  match:
+    any:
+    - resources:
+        kinds:
+          - Pod
+  conditions:
+    any:
+    - key: "{{ request.name }}"
+      operator: Equals
+      value: example
+  schedule: "* * * * *"
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/cleanuppolicy/01-clustercleanuppolicy.yaml b/test/conformance/kuttl/validate/cleanuppolicy/01-clustercleanuppolicy.yaml
new file mode 100644
index 0000000000..7997b95618
--- /dev/null
+++ b/test/conformance/kuttl/validate/cleanuppolicy/01-clustercleanuppolicy.yaml
@@ -0,0 +1,16 @@
+apiVersion: kyverno.io/v1alpha1
+kind: ClusterCleanupPolicy
+metadata:
+  name: cleanuppolicy
+spec:
+  match:
+    any:
+    - resources:
+        kinds:
+          - Pod
+  conditions:
+    any:
+    - key: "{{ request.name }}"
+      operator: Equals
+      value: example
+  schedule: "* * * * *"
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/cleanuppolicy/02-create-task.yaml b/test/conformance/kuttl/validate/cleanuppolicy/02-create-task.yaml
new file mode 100644
index 0000000000..9623c1a599
--- /dev/null
+++ b/test/conformance/kuttl/validate/cleanuppolicy/02-create-task.yaml
@@ -0,0 +1,14 @@
+## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
+
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+    if kubectl apply -f badcleanuppolicy.yaml
+    then 
+      echo "Tested failed. Task was created when it shouldn't have been."
+      exit 1 
+    else 
+      echo "Test succeeded. Task was not created as intended."
+      exit 0
+    fi
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/cleanuppolicy/badcleanuppolicy.yaml b/test/conformance/kuttl/validate/cleanuppolicy/badcleanuppolicy.yaml
new file mode 100644
index 0000000000..63be5c6f2e
--- /dev/null
+++ b/test/conformance/kuttl/validate/cleanuppolicy/badcleanuppolicy.yaml
@@ -0,0 +1,17 @@
+apiVersion: kyverno.io/v1alpha1
+kind: CleanupPolicy
+metadata:
+  name: cleanuppolicy
+  namespace: default
+spec:
+  match:
+    any:
+    - resources:
+        kinds:
+          - Pod
+  conditions:
+    any:
+    - key: "{{ request.name }}"
+      operator: Equals
+      value: example
+  schedule: "invalid-schedule"
\ No newline at end of file