From 99d988e98cafad6a630418bc357b8ed6ece1a845 Mon Sep 17 00:00:00 2001
From: Vyom Yadav <73882557+Vyom-Yadav@users.noreply.github.com>
Date: Fri, 9 Dec 2022 22:15:23 +0530
Subject: [PATCH] feat: add support for subresources to validating and mutating
 policies (#4916)

* feat: add support for subresources to validating and mutating policies

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>

* Add CLI test cases with subresources for validating policies

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>

* Fix existing e2e tests for validating policies and remove tests migrated to kuttl

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>

* Add kuttl e2e tests for validating policies with subresources

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>

* Add kuttl e2e tests for mutating policies with subresources

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>

* Add kuttl e2e tests for validating policy by-pass by manipulating preconditions

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>

Signed-off-by: Vyom-Yadav <jackhammervyom@gmail.com>
---
 CHANGELOG.md                                  |   7 +
 Makefile                                      |   2 +
 .../kubectl-kyverno/apply/apply_command.go    |  17 +-
 cmd/cli/kubectl-kyverno/test/test_command.go  |  17 +-
 .../kubectl-kyverno/utils/common/common.go    |  98 +++++-
 .../utils/common/common_test.go               |  19 +-
 cmd/cli/kubectl-kyverno/utils/common/fetch.go | 146 +++++---
 cmd/kyverno/main.go                           |   5 +-
 pkg/background/generate/generate.go           |   2 +-
 pkg/background/mutate/mutate.go               |  21 +-
 pkg/clients/dclient/client.go                 |   8 +-
 pkg/clients/dclient/discovery.go              | 289 +++++++++++----
 pkg/clients/dclient/discovery_test.go         | 201 ++++++++++-
 pkg/clients/dclient/fake.go                   |   8 +-
 pkg/clients/dclient/utils.go                  |   6 +-
 pkg/controllers/policycache/controller.go     |  33 +-
 pkg/controllers/report/resource/controller.go |   2 +-
 pkg/controllers/webhook/controller.go         |  43 +--
 pkg/controllers/webhook/utils.go              |   6 -
 pkg/engine/background.go                      |   7 +-
 pkg/engine/common.go                          |  62 ++++
 pkg/engine/common_test.go                     |  78 ++++
 pkg/engine/imageVerify.go                     |  24 +-
 pkg/engine/imageVerifyValidate.go             |  10 +-
 pkg/engine/k8smanifest.go                     |   4 +-
 pkg/engine/loadtargets.go                     | 109 ++++--
 pkg/engine/mutate/patch/patchesUtils.go       |   6 +-
 pkg/engine/mutate/patch/patchesUtils_test.go  |   2 +-
 pkg/engine/mutation.go                        |  47 ++-
 pkg/engine/policyContext.go                   |  49 ++-
 pkg/engine/response/response.go               |   9 +
 pkg/engine/utils.go                           |  81 +++--
 pkg/engine/utils_test.go                      |  79 ++++-
 pkg/engine/validation.go                      | 104 ++----
 pkg/policy/common.go                          |   2 +-
 pkg/policy/existing.go                        |   2 +-
 pkg/policy/validate.go                        | 117 +++++-
 pkg/policycache/cache.go                      |   6 +-
 pkg/policycache/cache_test.go                 |  10 +-
 pkg/policycache/store.go                      |  23 +-
 pkg/utils/kube/kind.go                        |  79 +++--
 pkg/utils/kube/kind_test.go                   |  63 +++-
 pkg/webhooks/resource/handlers_test.go        |  23 +-
 .../resource/validation/validation.go         |   4 +-
 .../deny-exec-by-pod-label.yaml               |  41 +++
 .../test/exec-subresource/kyverno-test.yaml   |  13 +
 test/cli/test/exec-subresource/resource.yaml  |  13 +
 test/cli/test/exec-subresource/values.yaml    |  17 +
 ...enforce-replicas-for-scale-subresource.yml |  24 ++
 .../test/scale-subresource/kyverno-test.yaml  |  13 +
 test/cli/test/scale-subresource/resource.yaml |   7 +
 test/cli/test/scale-subresource/values.yaml   |  11 +
 .../01-assert.yaml                            |   9 +
 .../01-manifests.yaml                         |  41 +++
 .../02-assert.yaml                            |   7 +
 .../02-script.yaml                            |   6 +
 .../99-cleanup.yaml                           |   7 +
 .../mutate-existing-node-status/README.md     |  22 ++
 .../clear-modified-node-status.sh             |  33 ++
 .../modify-resource-filters.sh                |  22 ++
 .../mutate-node-status/01-assert.yaml         |   9 +
 .../mutate-node-status/01-manifests.yaml      |  17 +
 .../mutate-node-status/02-assert.yaml         |   7 +
 .../mutate-node-status/02-script.yaml         |   5 +
 .../mutate-node-status/99-cleanup.yaml        |   6 +
 .../standard/mutate-node-status/README.md     |  22 ++
 .../clear-modified-node-status.sh             |  33 ++
 .../modify-resource-filters.sh                |  22 ++
 .../send-request-to-status-subresource.sh     |  38 ++
 .../api-initiated-pod-eviction/01-assert.yaml |   9 +
 .../01-manifests.yaml                         |  45 +++
 .../api-initiated-pod-eviction/02-script.yaml |   4 +
 .../99-cleanup.yaml                           |   6 +
 .../api-initiated-pod-eviction/README.md      |   3 +
 .../api-initiated-eviction.sh                 |  35 ++
 .../api-initiated-pod-eviction/eviction.json  |   8 +
 .../block-pod-exec-requests/01-assert.yaml    |   9 +
 .../block-pod-exec-requests/01-manifests.yaml |  59 ++++
 .../block-pod-exec-requests/02-script.yaml    |  12 +
 .../block-pod-exec-requests/99-cleanup.yaml   |   6 +
 .../enforce/block-pod-exec-requests/README.md |   3 +
 .../scaling-with-kubectl-scale/01-assert.yaml |  19 +
 .../01-manifests.yaml                         |  49 +++
 .../scaling-with-kubectl-scale/02-script.yaml |  12 +
 .../99-cleanup.yaml                           |   6 +
 .../scaling-with-kubectl-scale/README.md      |   3 +
 .../adding-key-to-config-map/01-assert.yaml   |  10 +
 .../01-manifests.yaml                         |  39 ++
 .../adding-key-to-config-map/02-script.yaml   |  12 +
 .../adding-key-to-config-map/99-cleanup.yaml  |   6 +
 .../e2e/adding-key-to-config-map/README.md    |  21 ++
 test/e2e/validate/config.go                   | 163 ++-------
 test/e2e/validate/resources.go                | 333 +-----------------
 test/e2e/validate/validate_test.go            | 248 +++++++------
 94 files changed, 2451 insertions(+), 1044 deletions(-)
 create mode 100644 pkg/engine/common.go
 create mode 100644 pkg/engine/common_test.go
 create mode 100644 test/cli/test/exec-subresource/deny-exec-by-pod-label.yaml
 create mode 100644 test/cli/test/exec-subresource/kyverno-test.yaml
 create mode 100644 test/cli/test/exec-subresource/resource.yaml
 create mode 100644 test/cli/test/exec-subresource/values.yaml
 create mode 100644 test/cli/test/scale-subresource/enforce-replicas-for-scale-subresource.yml
 create mode 100644 test/cli/test/scale-subresource/kyverno-test.yaml
 create mode 100644 test/cli/test/scale-subresource/resource.yaml
 create mode 100644 test/cli/test/scale-subresource/values.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-assert.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-manifests.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-assert.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-script.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/99-cleanup.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/README.md
 create mode 100755 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/clear-modified-node-status.sh
 create mode 100755 test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/modify-resource-filters.sh
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-assert.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-manifests.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-assert.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-script.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/99-cleanup.yaml
 create mode 100644 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/README.md
 create mode 100755 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/clear-modified-node-status.sh
 create mode 100755 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/modify-resource-filters.sh
 create mode 100755 test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/send-request-to-status-subresource.sh
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-assert.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-manifests.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/02-script.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/99-cleanup.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/README.md
 create mode 100755 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/api-initiated-eviction.sh
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/eviction.json
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-assert.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-manifests.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/02-script.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/99-cleanup.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/README.md
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-assert.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-manifests.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/02-script.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/99-cleanup.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/README.md
 create mode 100644 test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-assert.yaml
 create mode 100644 test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-manifests.yaml
 create mode 100644 test/conformance/kuttl/validate/e2e/adding-key-to-config-map/02-script.yaml
 create mode 100644 test/conformance/kuttl/validate/e2e/adding-key-to-config-map/99-cleanup.yaml
 create mode 100644 test/conformance/kuttl/validate/e2e/adding-key-to-config-map/README.md

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 928f4db93c..69c01bf816 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,10 @@
+## v1.9.0-rc.2
+
+### Note
+
+- Webhook is no longer updated to match `pods/ephemeralcontainers` when policy only specifies `pods`. If users want to match on `pods/ephemeralcontainers`, they must specify `pods/ephemeralcontainers` in the policy.
+- Webhook is no longer updated to match `services/status` when policy only specifies `services`. If users want to match on `services/status`, they must specify `services/status` in the policy.
+
 ## v1.9.0-rc.1
 
 ### Note
diff --git a/Makefile b/Makefile
index 9dfc70c922..af7d0cbe21 100644
--- a/Makefile
+++ b/Makefile
@@ -614,6 +614,7 @@ test-e2e:
 	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/metrics -v
 	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/mutate -v
 	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/generate -v
+	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/validate -v
 
 test-e2e-local:
 	kubectl apply -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/github/rbac.yaml
@@ -622,6 +623,7 @@ test-e2e-local:
 	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/metrics -v
 	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/mutate -v
 	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/generate -v
+	E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/validate -v
 	kill  $!
 
 helm-test-values:
diff --git a/cmd/cli/kubectl-kyverno/apply/apply_command.go b/cmd/cli/kubectl-kyverno/apply/apply_command.go
index 1badf1b3cc..32dd833e4f 100644
--- a/cmd/cli/kubectl-kyverno/apply/apply_command.go
+++ b/cmd/cli/kubectl-kyverno/apply/apply_command.go
@@ -25,7 +25,7 @@ import (
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/client-go/dynamic"
 	"k8s.io/client-go/kubernetes"
-	log "sigs.k8s.io/controller-runtime/pkg/log"
+	"sigs.k8s.io/controller-runtime/pkg/log"
 	yaml1 "sigs.k8s.io/yaml"
 )
 
@@ -131,6 +131,16 @@ To apply policy with variables:
 			- name: <namespace2 name>
 			labels:
 				<label key>: <label value>
+        # If policy is matching on Kind/Subresource, then this is required
+        subresources:
+          - subresource:
+              name: <name of subresource>
+              kind: <kind of subresource>
+              version: <version of subresource>
+            parentResource:
+              name: <name of parent resource>
+              kind: <kind of parent resource>
+              version: <version of parent resource>
 
 More info: https://kyverno.io/docs/kyverno-cli/
 `
@@ -192,7 +202,7 @@ func (c *ApplyCommandConfig) applyCommandHelper() (rc *common.ResultCounts, reso
 		return rc, resources, skipInvalidPolicies, pvInfos, sanitizederror.NewWithError("pass the values either using set flag or values_file flag", err)
 	}
 
-	variables, globalValMap, valuesMap, namespaceSelectorMap, err := common.GetVariable(c.VariablesString, c.ValuesFile, fs, false, "")
+	variables, globalValMap, valuesMap, namespaceSelectorMap, subresources, err := common.GetVariable(c.VariablesString, c.ValuesFile, fs, false, "")
 	if err != nil {
 		if !sanitizederror.IsErrorSanitized(err) {
 			return rc, resources, skipInvalidPolicies, pvInfos, sanitizederror.NewWithError("failed to decode yaml", err)
@@ -391,7 +401,7 @@ func (c *ApplyCommandConfig) applyCommandHelper() (rc *common.ResultCounts, reso
 			}
 		}
 
-		kindOnwhichPolicyIsApplied := common.GetKindsFromPolicy(policy)
+		kindOnwhichPolicyIsApplied := common.GetKindsFromPolicy(policy, subresources, dClient)
 
 		for _, resource := range resources {
 			thisPolicyResourceValues, err := common.CheckVariableForPolicy(valuesMap, globalValMap, policy.GetName(), resource.GetName(), resource.GetKind(), variables, kindOnwhichPolicyIsApplied, variable)
@@ -412,6 +422,7 @@ func (c *ApplyCommandConfig) applyCommandHelper() (rc *common.ResultCounts, reso
 				PrintPatchResource:   true,
 				Client:               dClient,
 				AuditWarn:            c.AuditWarn,
+				Subresources:         subresources,
 			}
 			_, info, err := common.ApplyPolicyOnResource(applyPolicyConfig)
 			if err != nil {
diff --git a/cmd/cli/kubectl-kyverno/test/test_command.go b/cmd/cli/kubectl-kyverno/test/test_command.go
index 9c512af3e5..909669d933 100644
--- a/cmd/cli/kubectl-kyverno/test/test_command.go
+++ b/cmd/cli/kubectl-kyverno/test/test_command.go
@@ -36,7 +36,7 @@ import (
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/util/yaml"
-	log "sigs.k8s.io/controller-runtime/pkg/log"
+	"sigs.k8s.io/controller-runtime/pkg/log"
 )
 
 var longHelp = `
@@ -140,6 +140,16 @@ policies:
   - name: <resource_name_2>
     values:
       foo: bin
+# If policy is matching on Kind/Subresource, then this is required
+subresources:
+  - subresource:
+      name: <name of subresource>
+      kind: <kind of subresource>
+      version: <version of subresource>
+    parentResource:
+      name: <name of parent resource>
+      kind: <kind of parent resource>
+      version: <version of parent resource>
 
 **RESULT DESCRIPTIONS**:
 
@@ -748,7 +758,7 @@ func applyPoliciesFromPath(fs billy.Filesystem, policyBytes []byte, isGit bool,
 	valuesFile := values.Variables
 	userInfoFile := values.UserInfo
 
-	variables, globalValMap, valuesMap, namespaceSelectorMap, err := common.GetVariable(variablesString, values.Variables, fs, isGit, policyResourcePath)
+	variables, globalValMap, valuesMap, namespaceSelectorMap, subresources, err := common.GetVariable(variablesString, values.Variables, fs, isGit, policyResourcePath)
 	if err != nil {
 		if !sanitizederror.IsErrorSanitized(err) {
 			return sanitizederror.NewWithError("failed to decode yaml", err)
@@ -911,7 +921,7 @@ func applyPoliciesFromPath(fs billy.Filesystem, policyBytes []byte, isGit bool,
 			}
 		}
 
-		kindOnwhichPolicyIsApplied := common.GetKindsFromPolicy(policy)
+		kindOnwhichPolicyIsApplied := common.GetKindsFromPolicy(policy, subresources, dClient)
 
 		for _, resource := range noDuplicateResources {
 			thisPolicyResourceValues, err := common.CheckVariableForPolicy(valuesMap, globalValMap, policy.GetName(), resource.GetName(), resource.GetKind(), variables, kindOnwhichPolicyIsApplied, variable)
@@ -929,6 +939,7 @@ func applyPoliciesFromPath(fs billy.Filesystem, policyBytes []byte, isGit bool,
 				Rc:                        &resultCounts,
 				RuleToCloneSourceResource: ruleToCloneSourceResource,
 				Client:                    dClient,
+				Subresources:              subresources,
 			}
 			ers, info, err := common.ApplyPolicyOnResource(applyPolicyConfig)
 			if err != nil {
diff --git a/cmd/cli/kubectl-kyverno/utils/common/common.go b/cmd/cli/kubectl-kyverno/utils/common/common.go
index b16e6b4ca5..ca6aa54421 100644
--- a/cmd/cli/kubectl-kyverno/utils/common/common.go
+++ b/cmd/cli/kubectl-kyverno/utils/common/common.go
@@ -27,8 +27,10 @@ import (
 	ut "github.com/kyverno/kyverno/pkg/engine/utils"
 	"github.com/kyverno/kyverno/pkg/engine/variables"
 	"github.com/kyverno/kyverno/pkg/registryclient"
+	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
 	yamlutils "github.com/kyverno/kyverno/pkg/utils/yaml"
 	yamlv2 "gopkg.in/yaml.v2"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime"
 	"k8s.io/apimachinery/pkg/util/yaml"
@@ -58,6 +60,7 @@ type Values struct {
 	Policies           []Policy            `json:"policies"`
 	GlobalValues       map[string]string   `json:"globalValues"`
 	NamespaceSelectors []NamespaceSelector `json:"namespaceSelector"`
+	Subresources       []Subresource       `json:"subresources"`
 }
 
 type Resource struct {
@@ -65,6 +68,11 @@ type Resource struct {
 	Values map[string]interface{} `json:"values"`
 }
 
+type Subresource struct {
+	APIResource    metav1.APIResource `json:"subresource"`
+	ParentResource metav1.APIResource `json:"parentResource"`
+}
+
 type NamespaceSelector struct {
 	Name   string            `json:"name"`
 	Labels map[string]string `json:"labels"`
@@ -85,6 +93,7 @@ type ApplyPolicyConfig struct {
 	RuleToCloneSourceResource map[string]string
 	Client                    dclient.Interface
 	AuditWarn                 bool
+	Subresources              []Subresource
 }
 
 // HasVariables - check for variables in the policy
@@ -215,11 +224,12 @@ func RemoveDuplicateAndObjectVariables(matches [][]string) string {
 	return variableStr
 }
 
-func GetVariable(variablesString, valuesFile string, fs billy.Filesystem, isGit bool, policyResourcePath string) (map[string]string, map[string]string, map[string]map[string]Resource, map[string]map[string]string, error) {
+func GetVariable(variablesString, valuesFile string, fs billy.Filesystem, isGit bool, policyResourcePath string) (map[string]string, map[string]string, map[string]map[string]Resource, map[string]map[string]string, []Subresource, error) {
 	valuesMapResource := make(map[string]map[string]Resource)
 	valuesMapRule := make(map[string]map[string]Rule)
 	namespaceSelectorMap := make(map[string]map[string]string)
 	variables := make(map[string]string)
+	subresources := make([]Subresource, 0)
 	globalValMap := make(map[string]string)
 	reqObjVars := ""
 
@@ -259,17 +269,17 @@ func GetVariable(variablesString, valuesFile string, fs billy.Filesystem, isGit
 		}
 
 		if err != nil {
-			return variables, globalValMap, valuesMapResource, namespaceSelectorMap, sanitizederror.NewWithError("unable to read yaml", err)
+			return variables, globalValMap, valuesMapResource, namespaceSelectorMap, subresources, sanitizederror.NewWithError("unable to read yaml", err)
 		}
 
 		valuesBytes, err := yaml.ToJSON(yamlFile)
 		if err != nil {
-			return variables, globalValMap, valuesMapResource, namespaceSelectorMap, sanitizederror.NewWithError("failed to convert json", err)
+			return variables, globalValMap, valuesMapResource, namespaceSelectorMap, subresources, sanitizederror.NewWithError("failed to convert json", err)
 		}
 
 		values := &Values{}
 		if err := json.Unmarshal(valuesBytes, values); err != nil {
-			return variables, globalValMap, valuesMapResource, namespaceSelectorMap, sanitizederror.NewWithError("failed to decode yaml", err)
+			return variables, globalValMap, valuesMapResource, namespaceSelectorMap, subresources, sanitizederror.NewWithError("failed to decode yaml", err)
 		}
 
 		if values.GlobalValues == nil {
@@ -321,15 +331,16 @@ func GetVariable(variablesString, valuesFile string, fs billy.Filesystem, isGit
 		for _, n := range values.NamespaceSelectors {
 			namespaceSelectorMap[n.Name] = n.Labels
 		}
+
+		subresources = values.Subresources
 	}
 
 	if reqObjVars != "" {
-		fmt.Printf(("\nNOTICE: request.object.* variables are automatically parsed from the supplied resource. Ignoring value of variables `%v`.\n"), reqObjVars)
+		fmt.Printf("\nNOTICE: request.object.* variables are automatically parsed from the supplied resource. Ignoring value of variables `%v`.\n", reqObjVars)
 	}
 
 	if globalValMap != nil {
-		_, exist := globalValMap["request.operation"]
-		if !exist {
+		if _, ok := globalValMap["request.operation"]; !ok {
 			globalValMap["request.operation"] = "CREATE"
 			log.Log.V(3).Info("Defaulting request.operation to CREATE")
 		}
@@ -355,7 +366,7 @@ func GetVariable(variablesString, valuesFile string, fs billy.Filesystem, isGit
 		Policies: storePolicies,
 	})
 
-	return variables, globalValMap, valuesMapResource, namespaceSelectorMap, nil
+	return variables, globalValMap, valuesMapResource, namespaceSelectorMap, subresources, nil
 }
 
 // ApplyPolicyOnResource - function to apply policy on resource
@@ -451,12 +462,30 @@ OuterLoop:
 		log.Log.Error(err, "failed to add image variables to context")
 	}
 
+	subresources := make([]struct {
+		APIResource    metav1.APIResource
+		ParentResource metav1.APIResource
+	}, 0)
+
+	// If --cluster flag is not set, then we need to add subresources to the context
+	if c.Client == nil {
+		for _, subresource := range c.Subresources {
+			subresources = append(subresources, struct {
+				APIResource    metav1.APIResource
+				ParentResource metav1.APIResource
+			}{
+				APIResource: subresource.APIResource, ParentResource: subresource.ParentResource,
+			})
+		}
+	}
+
 	policyContext := engine.NewPolicyContextWithJsonContext(ctx).
 		WithPolicy(c.Policy).
 		WithNewResource(*updatedResource).
 		WithNamespaceLabels(namespaceLabels).
 		WithAdmissionInfo(c.UserInfo).
-		WithClient(c.Client)
+		WithClient(c.Client).
+		WithSubresourcesInPolicy(subresources)
 
 	mutateResponse := engine.Mutate(context.Background(), registryclient.NewOrDie(), policyContext)
 	if mutateResponse != nil {
@@ -942,19 +971,64 @@ func CheckVariableForPolicy(valuesMap map[string]map[string]Resource, globalValM
 	return thisPolicyResourceValues, nil
 }
 
-func GetKindsFromPolicy(policy kyvernov1.PolicyInterface) map[string]struct{} {
+func GetKindsFromPolicy(policy kyvernov1.PolicyInterface, subresources []Subresource, dClient dclient.Interface) map[string]struct{} {
 	kindOnwhichPolicyIsApplied := make(map[string]struct{})
 	for _, rule := range autogen.ComputeRules(policy) {
 		for _, kind := range rule.MatchResources.ResourceDescription.Kinds {
-			kindOnwhichPolicyIsApplied[kind] = struct{}{}
+			k, err := getKind(kind, subresources, dClient)
+			if err != nil {
+				fmt.Printf("Error: %s", err.Error())
+				continue
+			}
+			kindOnwhichPolicyIsApplied[k] = struct{}{}
 		}
 		for _, kind := range rule.ExcludeResources.ResourceDescription.Kinds {
-			kindOnwhichPolicyIsApplied[kind] = struct{}{}
+			k, err := getKind(kind, subresources, dClient)
+			if err != nil {
+				fmt.Printf("Error: %s", err.Error())
+				continue
+			}
+			kindOnwhichPolicyIsApplied[k] = struct{}{}
 		}
 	}
 	return kindOnwhichPolicyIsApplied
 }
 
+func getKind(kind string, subresources []Subresource, dClient dclient.Interface) (string, error) {
+	gv, k := kubeutils.GetKindFromGVK(kind)
+	parentKind, subresource := kubeutils.SplitSubresource(k)
+	var err error
+	if subresource != "" {
+		if dClient != nil {
+			var apiResource *metav1.APIResource
+			apiResource, _, _, err = dClient.Discovery().FindResource(gv, k)
+			if err == nil {
+				k = apiResource.Kind
+			}
+		} else {
+			k, err = getSubresourceKind(gv, parentKind, subresource, subresources)
+		}
+	}
+	return k, err
+}
+
+func getSubresourceKind(groupVersion, parentKind, subresourceName string, subresources []Subresource) (string, error) {
+	for _, subresource := range subresources {
+		parentResourceGroupVersion := metav1.GroupVersion{
+			Group:   subresource.ParentResource.Group,
+			Version: subresource.ParentResource.Version,
+		}.String()
+		if groupVersion == "" || kubeutils.GroupVersionMatches(groupVersion, parentResourceGroupVersion) {
+			if parentKind == subresource.ParentResource.Kind {
+				if strings.ToLower(subresourceName) == strings.Split(subresource.APIResource.Name, "/")[1] {
+					return subresource.APIResource.Kind, nil
+				}
+			}
+		}
+	}
+	return "", sanitizederror.NewWithError(fmt.Sprintf("subresource %s not found for parent resource %s", subresourceName, parentKind), nil)
+}
+
 // GetResourceFromPath - get patchedResource and generatedResource from given path
 func GetResourceFromPath(fs billy.Filesystem, path string, isGit bool, policyResourcePath string, resourceType string) (unstructured.Unstructured, error) {
 	var resourceBytes []byte
diff --git a/cmd/cli/kubectl-kyverno/utils/common/common_test.go b/cmd/cli/kubectl-kyverno/utils/common/common_test.go
index fb1fdaba47..4297e7084b 100644
--- a/cmd/cli/kubectl-kyverno/utils/common/common_test.go
+++ b/cmd/cli/kubectl-kyverno/utils/common/common_test.go
@@ -3,9 +3,10 @@ package common
 import (
 	"testing"
 
-	v1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
+	"github.com/kyverno/kyverno/api/kyverno/v1beta1"
 	yamlutils "github.com/kyverno/kyverno/pkg/utils/yaml"
 	"gotest.tools/assert"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
 var policyNamespaceSelector = []byte(`{
@@ -187,3 +188,19 @@ func Test_GetGitBranchOrPolicyPaths(t *testing.T) {
 		}
 	}
 }
+
+func Test_getSubresourceKind(t *testing.T) {
+	podAPIResource := metav1.APIResource{Name: "pods", SingularName: "", Namespaced: true, Kind: "Pod"}
+	podEvictionAPIResource := metav1.APIResource{Name: "pods/eviction", SingularName: "", Namespaced: true, Group: "policy", Version: "v1", Kind: "Eviction"}
+
+	subresources := []Subresource{
+		{
+			APIResource:    podEvictionAPIResource,
+			ParentResource: podAPIResource,
+		},
+	}
+
+	subresourceKind, err := getSubresourceKind("", "Pod", "eviction", subresources)
+	assert.NilError(t, err)
+	assert.Equal(t, subresourceKind, "Eviction")
+}
diff --git a/cmd/cli/kubectl-kyverno/utils/common/fetch.go b/cmd/cli/kubectl-kyverno/utils/common/fetch.go
index be460d2e52..b1ad2eb813 100644
--- a/cmd/cli/kubectl-kyverno/utils/common/fetch.go
+++ b/cmd/cli/kubectl-kyverno/utils/common/fetch.go
@@ -15,9 +15,8 @@ import (
 	"github.com/kyverno/kyverno/pkg/autogen"
 	"github.com/kyverno/kyverno/pkg/clients/dclient"
 	engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
+	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
 	yamlutils "github.com/kyverno/kyverno/pkg/utils/yaml"
-	"golang.org/x/text/cases"
-	"golang.org/x/text/language"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 	"k8s.io/client-go/kubernetes/scheme"
@@ -29,27 +28,33 @@ import (
 // the resources are fetched from
 // - local paths to resources, if given
 // - the k8s cluster, if given
-func GetResources(policies []kyvernov1.PolicyInterface, resourcePaths []string, dClient dclient.Interface, cluster bool, namespace string, policyReport bool) ([]*unstructured.Unstructured, error) {
+func GetResources(
+	policies []kyvernov1.PolicyInterface, resourcePaths []string, dClient dclient.Interface, cluster bool,
+	namespace string, policyReport bool,
+) ([]*unstructured.Unstructured, error) {
 	resources := make([]*unstructured.Unstructured, 0)
 	var err error
-	resourceTypesMap := make(map[string]bool)
-	var resourceTypes []string
-
-	for _, policy := range policies {
-		for _, rule := range autogen.ComputeRules(policy) {
-			resourceTypesInRule := GetKindsFromRule(rule)
-			for resourceKind := range resourceTypesInRule {
-				resourceTypesMap[resourceKind] = true
-			}
-		}
-	}
-
-	for kind := range resourceTypesMap {
-		resourceTypes = append(resourceTypes, kind)
-	}
 
 	if cluster && dClient != nil {
-		resources, err = whenClusterIsTrue(resourceTypes, dClient, namespace, resourcePaths, policyReport)
+		resourceTypesMap := make(map[schema.GroupVersionKind]bool)
+		var resourceTypes []schema.GroupVersionKind
+		var subresourceMap map[schema.GroupVersionKind]Subresource
+
+		for _, policy := range policies {
+			for _, rule := range autogen.ComputeRules(policy) {
+				var resourceTypesInRule map[schema.GroupVersionKind]bool
+				resourceTypesInRule, subresourceMap = GetKindsFromRule(rule, dClient)
+				for resourceKind := range resourceTypesInRule {
+					resourceTypesMap[resourceKind] = true
+				}
+			}
+		}
+
+		for kind := range resourceTypesMap {
+			resourceTypes = append(resourceTypes, kind)
+		}
+
+		resources, err = whenClusterIsTrue(resourceTypes, subresourceMap, dClient, namespace, resourcePaths, policyReport)
 		if err != nil {
 			return resources, err
 		}
@@ -62,9 +67,9 @@ func GetResources(policies []kyvernov1.PolicyInterface, resourcePaths []string,
 	return resources, err
 }
 
-func whenClusterIsTrue(resourceTypes []string, dClient dclient.Interface, namespace string, resourcePaths []string, policyReport bool) ([]*unstructured.Unstructured, error) {
+func whenClusterIsTrue(resourceTypes []schema.GroupVersionKind, subresourceMap map[schema.GroupVersionKind]Subresource, dClient dclient.Interface, namespace string, resourcePaths []string, policyReport bool) ([]*unstructured.Unstructured, error) {
 	resources := make([]*unstructured.Unstructured, 0)
-	resourceMap, err := getResourcesOfTypeFromCluster(resourceTypes, dClient, namespace)
+	resourceMap, err := getResourcesOfTypeFromCluster(resourceTypes, subresourceMap, dClient, namespace)
 	if err != nil {
 		return nil, err
 	}
@@ -189,24 +194,53 @@ func GetResource(resourceBytes []byte) ([]*unstructured.Unstructured, error) {
 	return resources, nil
 }
 
-func getResourcesOfTypeFromCluster(resourceTypes []string, dClient dclient.Interface, namespace string) (map[string]*unstructured.Unstructured, error) {
+func getResourcesOfTypeFromCluster(resourceTypes []schema.GroupVersionKind, subresourceMap map[schema.GroupVersionKind]Subresource, dClient dclient.Interface, namespace string) (map[string]*unstructured.Unstructured, error) {
 	r := make(map[string]*unstructured.Unstructured)
 
 	for _, kind := range resourceTypes {
-		resourceList, err := dClient.ListResource(context.TODO(), "", kind, namespace, nil)
+		resourceList, err := dClient.ListResource(context.TODO(), kind.GroupVersion().String(), kind.Kind, namespace, nil)
 		if err != nil {
 			continue
 		}
 
-		version := resourceList.GetAPIVersion()
+		gvk := resourceList.GroupVersionKind()
 		for _, resource := range resourceList.Items {
-			key := kind + "-" + resource.GetNamespace() + "-" + resource.GetName()
-			r[key] = resource.DeepCopy()
+			key := kind.Kind + "-" + resource.GetNamespace() + "-" + resource.GetName()
 			resource.SetGroupVersionKind(schema.GroupVersionKind{
-				Group:   "",
-				Version: version,
-				Kind:    kind,
+				Group:   gvk.Group,
+				Version: gvk.Version,
+				Kind:    kind.Kind,
 			})
+			r[key] = resource.DeepCopy()
+		}
+	}
+
+	for _, subresource := range subresourceMap {
+		parentGV := schema.GroupVersion{Group: subresource.ParentResource.Group, Version: subresource.ParentResource.Version}
+		resourceList, err := dClient.ListResource(context.TODO(), parentGV.String(), subresource.ParentResource.Kind, namespace, nil)
+		if err != nil {
+			continue
+		}
+
+		parentResourceNames := make([]string, 0)
+		for _, resource := range resourceList.Items {
+			parentResourceNames = append(parentResourceNames, resource.GetName())
+		}
+
+		for _, parentResourceName := range parentResourceNames {
+			subresourceName := strings.Split(subresource.APIResource.Name, "/")[1]
+			resource, err := dClient.GetResource(context.TODO(), parentGV.String(), subresource.ParentResource.Kind, namespace, parentResourceName, subresourceName)
+			if err != nil {
+				fmt.Printf("Error: %s", err.Error())
+				continue
+			}
+			key := subresource.APIResource.Kind + "-" + resource.GetNamespace() + "-" + resource.GetName()
+			resource.SetGroupVersionKind(schema.GroupVersionKind{
+				Group:   subresource.APIResource.Group,
+				Version: subresource.APIResource.Version,
+				Kind:    subresource.APIResource.Kind,
+			})
+			r[key] = resource.DeepCopy()
 		}
 	}
 	return r, nil
@@ -280,7 +314,7 @@ func convertResourceToUnstructured(resourceYaml []byte) (*unstructured.Unstructu
 	return resource, nil
 }
 
-// GetPatchedResource converts raw bytes to unstructured object
+// GetPatchedAndGeneratedResource converts raw bytes to unstructured object
 func GetPatchedAndGeneratedResource(resourceBytes []byte) (unstructured.Unstructured, error) {
 	getResource, err := GetResource(resourceBytes)
 	if err != nil {
@@ -291,24 +325,17 @@ func GetPatchedAndGeneratedResource(resourceBytes []byte) (unstructured.Unstruct
 }
 
 // GetKindsFromRule will return the kinds from policy match block
-func GetKindsFromRule(rule kyvernov1.Rule) map[string]bool {
-	resourceTypesMap := make(map[string]bool)
+func GetKindsFromRule(rule kyvernov1.Rule, client dclient.Interface) (map[schema.GroupVersionKind]bool, map[schema.GroupVersionKind]Subresource) {
+	resourceTypesMap := make(map[schema.GroupVersionKind]bool)
+	subresourceMap := make(map[schema.GroupVersionKind]Subresource)
 	for _, kind := range rule.MatchResources.Kinds {
-		if strings.Contains(kind, "/") {
-			lastElement := kind[strings.LastIndex(kind, "/")+1:]
-			resourceTypesMap[cases.Title(language.Und, cases.NoLower).String(lastElement)] = true
-		}
-		resourceTypesMap[cases.Title(language.Und, cases.NoLower).String(kind)] = true
+		addGVKToResourceTypesMap(kind, resourceTypesMap, subresourceMap, client)
 	}
 
 	if rule.MatchResources.Any != nil {
 		for _, resFilter := range rule.MatchResources.Any {
 			for _, kind := range resFilter.ResourceDescription.Kinds {
-				if strings.Contains(kind, "/") {
-					lastElement := kind[strings.LastIndex(kind, "/")+1:]
-					resourceTypesMap[cases.Title(language.Und, cases.NoLower).String(lastElement)] = true
-				}
-				resourceTypesMap[kind] = true
+				addGVKToResourceTypesMap(kind, resourceTypesMap, subresourceMap, client)
 			}
 		}
 	}
@@ -316,13 +343,36 @@ func GetKindsFromRule(rule kyvernov1.Rule) map[string]bool {
 	if rule.MatchResources.All != nil {
 		for _, resFilter := range rule.MatchResources.All {
 			for _, kind := range resFilter.ResourceDescription.Kinds {
-				if strings.Contains(kind, "/") {
-					lastElement := kind[strings.LastIndex(kind, "/")+1:]
-					resourceTypesMap[cases.Title(language.Und, cases.NoLower).String(lastElement)] = true
-				}
-				resourceTypesMap[cases.Title(language.Und, cases.NoLower).String(kind)] = true
+				addGVKToResourceTypesMap(kind, resourceTypesMap, subresourceMap, client)
 			}
 		}
 	}
-	return resourceTypesMap
+	return resourceTypesMap, subresourceMap
+}
+
+func addGVKToResourceTypesMap(kind string, resourceTypesMap map[schema.GroupVersionKind]bool, subresourceMap map[schema.GroupVersionKind]Subresource, client dclient.Interface) {
+	gvString, k := kubeutils.GetKindFromGVK(kind)
+	apiResource, parentApiResource, _, err := client.Discovery().FindResource(gvString, k)
+	if err != nil {
+		log.Log.Info("failed to find resource", "kind", kind, "error", err)
+		return
+	}
+
+	// The resource is not a subresource
+	if parentApiResource == nil {
+		gvk := schema.GroupVersionKind{
+			Group:   apiResource.Group,
+			Version: apiResource.Version,
+			Kind:    apiResource.Kind,
+		}
+		resourceTypesMap[gvk] = true
+	} else {
+		gvk := schema.GroupVersionKind{
+			Group: apiResource.Group, Version: apiResource.Version, Kind: apiResource.Kind,
+		}
+		subresourceMap[gvk] = Subresource{
+			APIResource:    *apiResource,
+			ParentResource: *parentApiResource,
+		}
+	}
 }
diff --git a/cmd/kyverno/main.go b/cmd/kyverno/main.go
index 0066425d7b..4c0874cfb8 100644
--- a/cmd/kyverno/main.go
+++ b/cmd/kyverno/main.go
@@ -34,7 +34,7 @@ import (
 	webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook"
 	"github.com/kyverno/kyverno/pkg/cosign"
 	"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
-	event "github.com/kyverno/kyverno/pkg/event"
+	"github.com/kyverno/kyverno/pkg/event"
 	"github.com/kyverno/kyverno/pkg/leaderelection"
 	"github.com/kyverno/kyverno/pkg/logging"
 	"github.com/kyverno/kyverno/pkg/metrics"
@@ -110,7 +110,6 @@ func createNonLeaderControllers(
 	kubeInformer kubeinformers.SharedInformerFactory,
 	kubeKyvernoInformer kubeinformers.SharedInformerFactory,
 	kyvernoInformer kyvernoinformer.SharedInformerFactory,
-	kubeClient kubernetes.Interface,
 	kyvernoClient versioned.Interface,
 	dynamicClient dclient.Interface,
 	rclient registryclient.Client,
@@ -120,6 +119,7 @@ func createNonLeaderControllers(
 	manager openapi.Manager,
 ) ([]internal.Controller, func() error) {
 	policyCacheController := policycachecontroller.NewController(
+		dynamicClient,
 		policyCache,
 		kyvernoInformer.Kyverno().V1().ClusterPolicies(),
 		kyvernoInformer.Kyverno().V1().Policies(),
@@ -479,7 +479,6 @@ func main() {
 		kubeInformer,
 		kubeKyvernoInformer,
 		kyvernoInformer,
-		kubeClient,
 		kyvernoClient,
 		dClient,
 		rclient,
diff --git a/pkg/background/generate/generate.go b/pkg/background/generate/generate.go
index 401ade590e..04ad0828aa 100644
--- a/pkg/background/generate/generate.go
+++ b/pkg/background/generate/generate.go
@@ -845,7 +845,7 @@ func (c *GenerateController) GetUnstrResource(genResourceSpec kyvernov1.Resource
 
 func deleteGeneratedResources(log logr.Logger, client dclient.Interface, ur kyvernov1beta1.UpdateRequest) error {
 	for _, genResource := range ur.Status.GeneratedResources {
-		err := client.DeleteResource(context.TODO(), "", genResource.Kind, genResource.Namespace, genResource.Name, false)
+		err := client.DeleteResource(context.TODO(), genResource.APIVersion, genResource.Kind, genResource.Namespace, genResource.Name, false)
 		if err != nil && !apierrors.IsNotFound(err) {
 			return err
 		}
diff --git a/pkg/background/mutate/mutate.go b/pkg/background/mutate/mutate.go
index b35707d211..bd1f8ebfbc 100644
--- a/pkg/background/mutate/mutate.go
+++ b/pkg/background/mutate/mutate.go
@@ -20,7 +20,8 @@ import (
 	"go.uber.org/multierr"
 	yamlv2 "gopkg.in/yaml.v2"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
-	cache "k8s.io/client-go/tools/cache"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"k8s.io/client-go/tools/cache"
 )
 
 var ErrEmptyPatch error = fmt.Errorf("empty resource to patch")
@@ -96,6 +97,7 @@ func (c *MutateExistingController) ProcessUR(ur *kyvernov1beta1.UpdateRequest) e
 		er := engine.Mutate(context.TODO(), c.rclient, policyContext)
 		for _, r := range er.PolicyResponse.Rules {
 			patched := r.PatchedTarget
+			patchedTargetSubresourceName := r.PatchedTargetSubresourceName
 			switch r.Status {
 			case response.RuleStatusFail, response.RuleStatusError, response.RuleStatusWarn:
 				err := fmt.Errorf("failed to mutate existing resource, rule response%v: %s", r.Status, r.Message)
@@ -123,7 +125,22 @@ func (c *MutateExistingController) ProcessUR(ur *kyvernov1beta1.UpdateRequest) e
 
 				if r.Status == response.RuleStatusPass {
 					patchedNew.SetResourceVersion("")
-					_, updateErr := c.client.UpdateResource(context.TODO(), patchedNew.GetAPIVersion(), patchedNew.GetKind(), patchedNew.GetNamespace(), patchedNew.Object, false)
+					var updateErr error
+					if patchedTargetSubresourceName == "status" {
+						_, updateErr = c.client.UpdateStatusResource(context.TODO(), patchedNew.GetAPIVersion(), patchedNew.GetKind(), patchedNew.GetNamespace(), patchedNew.Object, false)
+					} else if patchedTargetSubresourceName != "" {
+						parentResourceGVR := r.PatchedTargetParentResourceGVR
+						parentResourceGV := schema.GroupVersion{Group: parentResourceGVR.Group, Version: parentResourceGVR.Version}
+						parentResourceGVK, err := c.client.Discovery().GetGVKFromGVR(parentResourceGV.String(), parentResourceGVR.Resource)
+						if err != nil {
+							logger.Error(err, "failed to get GVK from GVR", "GVR", parentResourceGVR)
+							errs = append(errs, err)
+							continue
+						}
+						_, updateErr = c.client.UpdateResource(context.TODO(), parentResourceGV.String(), parentResourceGVK.Kind, patchedNew.GetNamespace(), patchedNew.Object, false, patchedTargetSubresourceName)
+					} else {
+						_, updateErr = c.client.UpdateResource(context.TODO(), patchedNew.GetAPIVersion(), patchedNew.GetKind(), patchedNew.GetNamespace(), patchedNew.Object, false)
+					}
 					if updateErr != nil {
 						errs = append(errs, updateErr)
 						logger.WithName(rule.Name).Error(updateErr, "failed to update target resource", "namespace", patchedNew.GetNamespace(), "name", patchedNew.GetName())
diff --git a/pkg/clients/dclient/client.go b/pkg/clients/dclient/client.go
index ecfb987ace..fe34512686 100644
--- a/pkg/clients/dclient/client.go
+++ b/pkg/clients/dclient/client.go
@@ -42,7 +42,7 @@ type Interface interface {
 	// CreateResource creates object for the specified resource/namespace
 	CreateResource(ctx context.Context, apiVersion string, kind string, namespace string, obj interface{}, dryRun bool) (*unstructured.Unstructured, error)
 	// UpdateResource updates object for the specified resource/namespace
-	UpdateResource(ctx context.Context, apiVersion string, kind string, namespace string, obj interface{}, dryRun bool) (*unstructured.Unstructured, error)
+	UpdateResource(ctx context.Context, apiVersion string, kind string, namespace string, obj interface{}, dryRun bool, subresources ...string) (*unstructured.Unstructured, error)
 	// UpdateStatusResource updates the resource "status" subresource
 	UpdateStatusResource(ctx context.Context, apiVersion string, kind string, namespace string, obj interface{}, dryRun bool) (*unstructured.Unstructured, error)
 }
@@ -69,7 +69,7 @@ func NewClient(
 		rest: disco.RESTClient(),
 	}
 	// Set discovery client
-	discoveryClient := &serverPreferredResources{
+	discoveryClient := &serverResources{
 		cachedClient: memory.NewMemCacheClient(disco),
 	}
 	// client will invalidate registered resources cache every x seconds,
@@ -175,14 +175,14 @@ func (c *client) CreateResource(ctx context.Context, apiVersion string, kind str
 }
 
 // UpdateResource updates object for the specified resource/namespace
-func (c *client) UpdateResource(ctx context.Context, apiVersion string, kind string, namespace string, obj interface{}, dryRun bool) (*unstructured.Unstructured, error) {
+func (c *client) UpdateResource(ctx context.Context, apiVersion string, kind string, namespace string, obj interface{}, dryRun bool, subresources ...string) (*unstructured.Unstructured, error) {
 	options := metav1.UpdateOptions{}
 	if dryRun {
 		options = metav1.UpdateOptions{DryRun: []string{metav1.DryRunAll}}
 	}
 	// convert typed to unstructured obj
 	if unstructuredObj, err := kubeutils.ConvertToUnstructured(obj); err == nil && unstructuredObj != nil {
-		return c.getResourceInterface(apiVersion, kind, namespace).Update(ctx, unstructuredObj, options)
+		return c.getResourceInterface(apiVersion, kind, namespace).Update(ctx, unstructuredObj, options, subresources...)
 	}
 	return nil, fmt.Errorf("unable to update resource ")
 }
diff --git a/pkg/clients/dclient/discovery.go b/pkg/clients/dclient/discovery.go
index 604a78eece..e957f1535b 100644
--- a/pkg/clients/dclient/discovery.go
+++ b/pkg/clients/dclient/discovery.go
@@ -16,37 +16,44 @@ import (
 
 // IDiscovery provides interface to mange Kind and GVR mapping
 type IDiscovery interface {
-	FindResource(apiVersion string, kind string) (*metav1.APIResource, schema.GroupVersionResource, error)
+	FindResource(groupVersion string, kind string) (apiResource, parentAPIResource *metav1.APIResource, gvr schema.GroupVersionResource, err error)
 	GetGVRFromKind(kind string) (schema.GroupVersionResource, error)
-	GetGVRFromAPIVersionKind(apiVersion string, kind string) schema.GroupVersionResource
+	GetGVRFromAPIVersionKind(groupVersion string, kind string) schema.GroupVersionResource
+	GetGVKFromGVR(apiVersion, resourceName string) (schema.GroupVersionKind, error)
 	GetServerVersion() (*version.Info, error)
 	OpenAPISchema() (*openapiv2.Document, error)
 	DiscoveryCache() discovery.CachedDiscoveryInterface
 	DiscoveryInterface() discovery.DiscoveryInterface
 }
 
-// serverPreferredResources stores the cachedClient instance for discovery client
-type serverPreferredResources struct {
+// apiResourceWithListGV is a wrapper for metav1.APIResource with the group-version of its metav1.APIResourceList
+type apiResourceWithListGV struct {
+	apiResource metav1.APIResource
+	listGV      string
+}
+
+// serverResources stores the cachedClient instance for discovery client
+type serverResources struct {
 	cachedClient discovery.CachedDiscoveryInterface
 }
 
 // DiscoveryCache gets the discovery client cache
-func (c serverPreferredResources) DiscoveryCache() discovery.CachedDiscoveryInterface {
+func (c serverResources) DiscoveryCache() discovery.CachedDiscoveryInterface {
 	return c.cachedClient
 }
 
 // DiscoveryInterface gets the discovery client
-func (c serverPreferredResources) DiscoveryInterface() discovery.DiscoveryInterface {
+func (c serverResources) DiscoveryInterface() discovery.DiscoveryInterface {
 	return c.cachedClient
 }
 
 // Poll will keep invalidate the local cache
-func (c serverPreferredResources) Poll(ctx context.Context, resync time.Duration) {
+func (c serverResources) Poll(ctx context.Context, resync time.Duration) {
 	logger := logger.WithName("Poll")
 	// start a ticker
 	ticker := time.NewTicker(resync)
 	defer func() { ticker.Stop() }()
-	logger.V(4).Info("starting registered resources sync", "period", resync)
+	logger.V(6).Info("starting registered resources sync", "period", resync)
 	for {
 		select {
 		case <-ctx.Done():
@@ -61,17 +68,17 @@ func (c serverPreferredResources) Poll(ctx context.Context, resync time.Duration
 }
 
 // OpenAPISchema returns the API server OpenAPI schema document
-func (c serverPreferredResources) OpenAPISchema() (*openapiv2.Document, error) {
+func (c serverResources) OpenAPISchema() (*openapiv2.Document, error) {
 	return c.cachedClient.OpenAPISchema()
 }
 
 // GetGVRFromKind get the Group Version Resource from kind
-func (c serverPreferredResources) GetGVRFromKind(kind string) (schema.GroupVersionResource, error) {
+func (c serverResources) GetGVRFromKind(kind string) (schema.GroupVersionResource, error) {
 	if kind == "" {
 		return schema.GroupVersionResource{}, nil
 	}
 	gv, k := kubeutils.GetKindFromGVK(kind)
-	_, gvr, err := c.FindResource(gv, k)
+	_, _, gvr, err := c.FindResource(gv, k)
 	if err != nil {
 		logger.Info("schema not found", "kind", k)
 		return schema.GroupVersionResource{}, err
@@ -81,8 +88,8 @@ func (c serverPreferredResources) GetGVRFromKind(kind string) (schema.GroupVersi
 }
 
 // GetGVRFromAPIVersionKind get the Group Version Resource from APIVersion and kind
-func (c serverPreferredResources) GetGVRFromAPIVersionKind(apiVersion string, kind string) schema.GroupVersionResource {
-	_, gvr, err := c.FindResource(apiVersion, kind)
+func (c serverResources) GetGVRFromAPIVersionKind(apiVersion string, kind string) schema.GroupVersionResource {
+	_, _, gvr, err := c.FindResource(apiVersion, kind)
 	if err != nil {
 		logger.Info("schema not found", "kind", kind, "apiVersion", apiVersion, "error : ", err)
 		return schema.GroupVersionResource{}
@@ -92,88 +99,244 @@ func (c serverPreferredResources) GetGVRFromAPIVersionKind(apiVersion string, ki
 }
 
 // GetServerVersion returns the server version of the cluster
-func (c serverPreferredResources) GetServerVersion() (*version.Info, error) {
+func (c serverResources) GetServerVersion() (*version.Info, error) {
 	return c.cachedClient.ServerVersion()
 }
 
-// FindResource finds an API resource that matches 'kind'. If the resource is not
-// found and the Cache is not fresh, the cache is invalidated and a retry is attempted
-func (c serverPreferredResources) FindResource(apiVersion string, kind string) (*metav1.APIResource, schema.GroupVersionResource, error) {
-	r, gvr, err := c.findResource(apiVersion, kind)
+// GetGVKFromGVR returns the Group Version Kind from Group Version Resource. The groupVersion has to be specified properly
+// for example, for corev1.Pod, the groupVersion has to be specified as `v1`, specifying empty groupVersion won't work.
+func (c serverResources) GetGVKFromGVR(groupVersion, resourceName string) (schema.GroupVersionKind, error) {
+	gvk, err := c.findResourceFromResourceName(groupVersion, resourceName)
 	if err == nil {
-		return r, gvr, nil
+		return gvk, nil
 	}
 
 	if !c.cachedClient.Fresh() {
 		c.cachedClient.Invalidate()
-		if r, gvr, err = c.findResource(apiVersion, kind); err == nil {
-			return r, gvr, nil
+		if gvk, err := c.findResourceFromResourceName(groupVersion, resourceName); err == nil {
+			return gvk, nil
 		}
 	}
 
-	return nil, schema.GroupVersionResource{}, err
+	return schema.GroupVersionKind{}, err
 }
 
-func (c serverPreferredResources) findResource(apiVersion string, kind string) (*metav1.APIResource, schema.GroupVersionResource, error) {
-	var serverResources []*metav1.APIResourceList
-	var err error
-	if apiVersion == "" {
-		serverResources, err = c.cachedClient.ServerPreferredResources()
-	} else {
-		_, serverResources, err = c.cachedClient.ServerGroupsAndResources()
-	}
-
+// findResourceFromResourceName returns the GVK for the a particular resourceName and groupVersion
+func (c serverResources) findResourceFromResourceName(groupVersion, resourceName string) (schema.GroupVersionKind, error) {
+	_, serverGroupsAndResources, err := c.cachedClient.ServerGroupsAndResources()
 	if err != nil && !strings.Contains(err.Error(), "Got empty response for") {
 		if discovery.IsGroupDiscoveryFailedError(err) {
-			logDiscoveryErrors(err, c)
-		} else if isMetricsServerUnavailable(kind, err) {
+			logDiscoveryErrors(err)
+		} else if isMetricsServerUnavailable(groupVersion, err) {
 			logger.V(3).Info("failed to find preferred resource version", "error", err.Error())
 		} else {
 			logger.Error(err, "failed to find preferred resource version")
-			return nil, schema.GroupVersionResource{}, err
+			return schema.GroupVersionKind{}, err
+		}
+	}
+	apiResource, err := findResourceFromResourceName(groupVersion, resourceName, serverGroupsAndResources)
+	if err != nil {
+		return schema.GroupVersionKind{}, err
+	}
+	return schema.GroupVersionKind{Group: apiResource.Group, Version: apiResource.Version, Kind: apiResource.Kind}, err
+}
+
+// FindResource finds an API resource that matches 'kind'. For finding subresources that have the same kind as the parent
+// resource, kind has to be specified as 'ParentKind/SubresourceName'. For matching status subresource of Pod, kind has
+// to be specified as `Pod/status`. If the resource is not found and the Cache is not fresh, the cache is invalidated
+// and a retry is attempted
+func (c serverResources) FindResource(groupVersion string, kind string) (apiResource, parentAPIResource *metav1.APIResource, gvr schema.GroupVersionResource, err error) {
+	r, pr, gvr, err := c.findResource(groupVersion, kind)
+	if err == nil {
+		return r, pr, gvr, nil
+	}
+
+	if !c.cachedClient.Fresh() {
+		c.cachedClient.Invalidate()
+		if r, pr, gvr, err = c.findResource(groupVersion, kind); err == nil {
+			return r, pr, gvr, nil
 		}
 	}
 
-	k, subresource := kubeutils.SplitSubresource(kind)
+	return nil, nil, schema.GroupVersionResource{}, err
+}
+
+func (c serverResources) findResource(groupVersion string, kind string) (apiResource, parentAPIResource *metav1.APIResource,
+	gvr schema.GroupVersionResource, err error,
+) {
+	serverPreferredResources, _ := c.cachedClient.ServerPreferredResources()
+	_, serverGroupsAndResources, err := c.cachedClient.ServerGroupsAndResources()
+
+	if err != nil && !strings.Contains(err.Error(), "Got empty response for") {
+		if discovery.IsGroupDiscoveryFailedError(err) {
+			logDiscoveryErrors(err)
+		} else if isMetricsServerUnavailable(groupVersion, err) {
+			logger.V(3).Info("failed to find preferred resource version", "error", err.Error())
+		} else {
+			logger.Error(err, "failed to find preferred resource version")
+			return nil, nil, schema.GroupVersionResource{}, err
+		}
+	}
+
+	kindWithoutSubresource, subresource := kubeutils.SplitSubresource(kind)
+
 	if subresource != "" {
-		kind = k
+		parentApiResource, _, _, err := c.findResource(groupVersion, kindWithoutSubresource)
+		if err != nil {
+			logger.Error(err, "Unable to find parent resource", "kind", kind)
+			return nil, nil, schema.GroupVersionResource{}, err
+		}
+		parentResourceName := parentApiResource.Name
+		resource, gvr, err := findSubresource(groupVersion, parentResourceName, subresource, kind, serverGroupsAndResources)
+		return resource, parentApiResource, gvr, err
 	}
 
-	for _, serverResource := range serverResources {
-		if apiVersion != "" && serverResource.GroupVersion != apiVersion {
-			continue
-		}
+	return findResource(groupVersion, kind, serverPreferredResources, serverGroupsAndResources)
+}
 
-		for _, resource := range serverResource.APIResources {
-			if resourceMatches(resource, kind, subresource) {
-				logger.V(6).Info("matched API resource to kind", "apiResource", resource, "kind", kind)
-				gv, err := schema.ParseGroupVersion(serverResource.GroupVersion)
-				if err != nil {
-					logger.Error(err, "failed to parse GV", "groupVersion", serverResource.GroupVersion)
-					return nil, schema.GroupVersionResource{}, err
+// findSubresource finds the subresource for the given parent resource, groupVersion and serverResourcesList
+func findSubresource(groupVersion, parentResourceName, subresource, kind string, serverResourcesList []*metav1.APIResourceList) (
+	apiResource *metav1.APIResource, gvr schema.GroupVersionResource, err error,
+) {
+	for _, serverResourceList := range serverResourcesList {
+		if groupVersion == "" || kubeutils.GroupVersionMatches(groupVersion, serverResourceList.GroupVersion) {
+			for _, serverResource := range serverResourceList.APIResources {
+				if serverResource.Name == parentResourceName+"/"+strings.ToLower(subresource) {
+					logger.V(6).Info("matched API resource to kind", "apiResource", serverResource, "kind", kind)
+
+					serverResourceGv := getServerResourceGroupVersion(serverResourceList.GroupVersion, serverResource.Group, serverResource.Version)
+					gv, _ := schema.ParseGroupVersion(serverResourceGv)
+
+					serverResource.Group = gv.Group
+					serverResource.Version = gv.Version
+
+					groupVersionResource := gv.WithResource(serverResource.Name)
+					logger.V(6).Info("gv with resource", "gvWithResource", groupVersionResource)
+					return &serverResource, groupVersionResource, nil
 				}
-				// We potentially need to fix Group and Version with what the list is for
-				if resource.Group == "" {
-					resource.Group = gv.Group
-				}
-				if resource.Version == "" {
-					resource.Version = gv.Version
-				}
-				return &resource, gv.WithResource(resource.Name), nil
 			}
 		}
 	}
 
-	return nil, schema.GroupVersionResource{}, fmt.Errorf("kind '%s' not found in apiVersion '%s'", kind, apiVersion)
+	return nil, schema.GroupVersionResource{}, fmt.Errorf("resource not found for kind %s", kind)
 }
 
-// resourceMatches checks the resource Kind, Name, SingularName and a subresource if specified
-// e.g. &apiResource{Name: "taskruns/status", Kind: "TaskRun"} will match "kind=TaskRun, subresource=Status"
-func resourceMatches(resource metav1.APIResource, kind, subresource string) bool {
-	if resource.Kind == kind || resource.Name == kind || resource.SingularName == kind {
-		_, s := kubeutils.SplitSubresource(resource.Name)
-		return strings.EqualFold(s, subresource)
+// findResource finds an API resource that matches 'groupVersion', 'kind', in the given serverResourcesList
+func findResource(groupVersion string, kind string, serverPreferredResources, serverGroupsAndResources []*metav1.APIResourceList) (
+	apiResource, parentAPIResource *metav1.APIResource, gvr schema.GroupVersionResource, err error,
+) {
+	matchingServerResources := getMatchingServerResources(groupVersion, kind, serverGroupsAndResources)
+
+	onlySubresourcePresentInMatchingResources := len(matchingServerResources) > 0
+	for _, matchingServerResource := range matchingServerResources {
+		if !kubeutils.IsSubresource(matchingServerResource.apiResource.Name) {
+			onlySubresourcePresentInMatchingResources = false
+			break
+		}
 	}
 
-	return false
+	if onlySubresourcePresentInMatchingResources {
+		apiResourceWithListGV := matchingServerResources[0]
+		matchingServerResource := apiResourceWithListGV.apiResource
+		logger.V(6).Info("matched API resource to kind", "apiResource", matchingServerResource, "kind", kind)
+
+		groupVersionResource := schema.GroupVersionResource{
+			Resource: matchingServerResource.Name,
+			Group:    matchingServerResource.Group,
+			Version:  matchingServerResource.Version,
+		}
+		logger.V(6).Info("gv with resource", "gvWithResource", groupVersionResource)
+
+		parentAPIResource, err := findResourceFromResourceName(apiResourceWithListGV.listGV, strings.Split(matchingServerResource.Name, "/")[0], serverPreferredResources)
+		if err != nil {
+			return nil, nil, schema.GroupVersionResource{}, fmt.Errorf("failed to find parent resource for subresource %s: %v", matchingServerResource.Name, err)
+		}
+		logger.V(6).Info("parent API resource", "parentAPIResource", parentAPIResource)
+
+		return &matchingServerResource, parentAPIResource, groupVersionResource, nil
+	}
+
+	if groupVersion == "" && len(matchingServerResources) > 0 {
+		for _, serverResourceList := range serverPreferredResources {
+			for _, serverResource := range serverResourceList.APIResources {
+				serverResourceGv := getServerResourceGroupVersion(serverResourceList.GroupVersion, serverResource.Group, serverResource.Version)
+				if serverResource.Kind == kind || serverResource.SingularName == kind {
+					gv, _ := schema.ParseGroupVersion(serverResourceGv)
+					serverResource.Group = gv.Group
+					serverResource.Version = gv.Version
+					groupVersionResource := gv.WithResource(serverResource.Name)
+
+					logger.V(6).Info("matched API resource to kind", "apiResource", serverResource, "kind", kind)
+					return &serverResource, nil, groupVersionResource, nil
+				}
+			}
+		}
+	} else {
+		for _, apiResourceWithListGV := range matchingServerResources {
+			matchingServerResource := apiResourceWithListGV.apiResource
+			if !kubeutils.IsSubresource(matchingServerResource.Name) {
+				logger.V(6).Info("matched API resource to kind", "apiResource", matchingServerResource, "kind", kind)
+
+				groupVersionResource := schema.GroupVersionResource{
+					Resource: matchingServerResource.Name,
+					Group:    matchingServerResource.Group,
+					Version:  matchingServerResource.Version,
+				}
+				logger.V(6).Info("gv with resource", "groupVersionResource", groupVersionResource)
+				return &matchingServerResource, nil, groupVersionResource, nil
+			}
+		}
+	}
+
+	return nil, nil, schema.GroupVersionResource{}, fmt.Errorf("kind '%s' not found in groupVersion '%s'", kind, groupVersion)
+}
+
+// getMatchingServerResources returns a list of API resources that match the given groupVersion and kind
+func getMatchingServerResources(groupVersion string, kind string, serverGroupsAndResources []*metav1.APIResourceList) []apiResourceWithListGV {
+	matchingServerResources := make([]apiResourceWithListGV, 0)
+	for _, serverResourceList := range serverGroupsAndResources {
+		for _, serverResource := range serverResourceList.APIResources {
+			serverResourceGv := getServerResourceGroupVersion(serverResourceList.GroupVersion, serverResource.Group, serverResource.Version)
+			if groupVersion == "" || kubeutils.GroupVersionMatches(groupVersion, serverResourceGv) {
+				if serverResource.Kind == kind || serverResource.SingularName == kind {
+					gv, _ := schema.ParseGroupVersion(serverResourceGv)
+					serverResource.Group = gv.Group
+					serverResource.Version = gv.Version
+					matchingServerResources = append(matchingServerResources, apiResourceWithListGV{apiResource: serverResource, listGV: serverResourceList.GroupVersion})
+				}
+			}
+		}
+	}
+	return matchingServerResources
+}
+
+// findResourceFromResourceName finds an API resource that matches 'resourceName', in the given serverResourcesList
+func findResourceFromResourceName(groupVersion string, resourceName string, serverGroupsAndResources []*metav1.APIResourceList) (*metav1.APIResource, error) {
+	for _, serverResourceList := range serverGroupsAndResources {
+		for _, apiResource := range serverResourceList.APIResources {
+			serverResourceGroupVersion := getServerResourceGroupVersion(serverResourceList.GroupVersion, apiResource.Group, apiResource.Version)
+			if serverResourceGroupVersion == groupVersion && apiResource.Name == resourceName {
+				logger.V(6).Info("found preferred resource", "groupVersion", groupVersion, "resourceName", resourceName)
+				groupVersion, _ := schema.ParseGroupVersion(serverResourceGroupVersion)
+				apiResource.Group = groupVersion.Group
+				apiResource.Version = groupVersion.Version
+				return &apiResource, nil
+			}
+		}
+	}
+	return nil, fmt.Errorf("resource %s not found in group %s", resourceName, groupVersion)
+}
+
+// getServerResourceGroupVersion returns the groupVersion of the serverResource from the apiResourceMetadata
+func getServerResourceGroupVersion(apiResourceListGroupVersion, apiResourceGroup, apiResourceVersion string) string {
+	var serverResourceGroupVersion string
+	if apiResourceGroup == "" && apiResourceVersion == "" {
+		serverResourceGroupVersion = apiResourceListGroupVersion
+	} else {
+		serverResourceGroupVersion = schema.GroupVersion{
+			Group:   apiResourceGroup,
+			Version: apiResourceVersion,
+		}.String()
+	}
+	return serverResourceGroupVersion
 }
diff --git a/pkg/clients/dclient/discovery_test.go b/pkg/clients/dclient/discovery_test.go
index 7f58ffb96d..4f053afe3e 100644
--- a/pkg/clients/dclient/discovery_test.go
+++ b/pkg/clients/dclient/discovery_test.go
@@ -3,20 +3,203 @@ package dclient
 import (
 	"testing"
 
+	"k8s.io/apimachinery/pkg/runtime/schema"
+
 	"gotest.tools/assert"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 )
 
-func Test_resourceMatches(t *testing.T) {
-	ar := metav1.APIResource{Name: "taskruns/status", Kind: "TaskRun"}
-	assert.Equal(t, resourceMatches(ar, "TaskRun", "Status"), true)
+var (
+	networkPolicyAPIResource       = metav1.APIResource{Name: "networkpolicies", SingularName: "", Namespaced: true, Kind: "NetworkPolicy"}
+	networkPolicyStatusAPIResource = metav1.APIResource{Name: "networkpolicies/status", SingularName: "", Namespaced: true, Kind: "NetworkPolicy"}
 
-	ar = metav1.APIResource{Name: "taskruns/status", Kind: "TaskRun"}
-	assert.Equal(t, resourceMatches(ar, "TaskRun", ""), false)
+	podAPIResource         = metav1.APIResource{Name: "pods", SingularName: "", Namespaced: true, Kind: "Pod"}
+	podEvictionAPIResource = metav1.APIResource{Name: "pods/eviction", SingularName: "", Namespaced: true, Group: "policy", Version: "v1", Kind: "Eviction"}
+	podLogAPIResource      = metav1.APIResource{Name: "pods/log", SingularName: "", Namespaced: true, Kind: "Pod"}
 
-	ar = metav1.APIResource{Name: "taskruns", Kind: "TaskRun"}
-	assert.Equal(t, resourceMatches(ar, "TaskRun", ""), true)
+	cronJobAPIResource = metav1.APIResource{Name: "cronjobs", SingularName: "", Namespaced: true, Kind: "CronJob"}
+)
 
-	ar = metav1.APIResource{Name: "tasks/status", Kind: "Task"}
-	assert.Equal(t, resourceMatches(ar, "TaskRun", "Status"), false)
+func Test_findSubresource(t *testing.T) {
+	serverGroupsAndResources := []*metav1.APIResourceList{
+		{
+			GroupVersion: "networking.k8s.io/v1",
+			APIResources: []metav1.APIResource{
+				networkPolicyAPIResource,
+				networkPolicyStatusAPIResource,
+			},
+		},
+
+		{
+			GroupVersion: "v1",
+			APIResources: []metav1.APIResource{
+				podAPIResource,
+				podEvictionAPIResource,
+			},
+		},
+	}
+
+	apiResource, gvr, err := findSubresource("", "pods", "eviction", "Pod/eviction", serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "pods/eviction", Group: "policy", Version: "v1"})
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, podEvictionAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, podEvictionAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, podEvictionAPIResource.Group)
+	assert.Equal(t, apiResource.Version, podEvictionAPIResource.Version)
+
+	apiResource, gvr, err = findSubresource("v1", "pods", "eviction", "Pod/eviction", serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "pods/eviction", Group: "policy", Version: "v1"})
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, podEvictionAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, podEvictionAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, podEvictionAPIResource.Group)
+	assert.Equal(t, apiResource.Version, podEvictionAPIResource.Version)
+
+	apiResource, gvr, err = findSubresource("networking.k8s.io/*", "networkpolicies", "status", "NetworkPolicy/status", serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "networkpolicies/status", Group: "networking.k8s.io", Version: "v1"})
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, networkPolicyStatusAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, networkPolicyStatusAPIResource.Kind)
+
+	// Resources with empty GV use the GV of APIResourceList
+	assert.Equal(t, apiResource.Group, "networking.k8s.io")
+	assert.Equal(t, apiResource.Version, "v1")
+}
+
+func Test_findResource(t *testing.T) {
+	serverGroupsAndResources := []*metav1.APIResourceList{
+		{
+			GroupVersion: "v1",
+			APIResources: []metav1.APIResource{
+				podEvictionAPIResource,
+				podLogAPIResource,
+				podAPIResource,
+			},
+		},
+		{
+			GroupVersion: "batch/v1beta1",
+			APIResources: []metav1.APIResource{
+				cronJobAPIResource,
+			},
+		},
+		{
+			GroupVersion: "batch/v1",
+			APIResources: []metav1.APIResource{
+				cronJobAPIResource,
+			},
+		},
+	}
+
+	serverPreferredResourcesList := []*metav1.APIResourceList{
+		{
+			GroupVersion: "v1",
+			APIResources: []metav1.APIResource{
+				podAPIResource,
+			},
+		},
+		{
+			GroupVersion: "batch/v1",
+			APIResources: []metav1.APIResource{
+				cronJobAPIResource,
+			},
+		},
+	}
+
+	apiResource, parentAPIResource, gvr, err := findResource("", "Pod", serverPreferredResourcesList, serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "pods", Group: "", Version: "v1"})
+	assert.Equal(t, parentAPIResource, (*metav1.APIResource)(nil))
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, podAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, podAPIResource.Kind)
+
+	// Resources with empty GV use the GV of APIResourceList
+	assert.Equal(t, apiResource.Group, "")
+	assert.Equal(t, apiResource.Version, "v1")
+
+	apiResource, parentAPIResource, gvr, err = findResource("policy/v1", "Eviction", serverPreferredResourcesList, serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "pods/eviction", Group: "policy", Version: "v1"})
+
+	assert.Equal(t, parentAPIResource.Name, podAPIResource.Name)
+	assert.Equal(t, parentAPIResource.Kind, podAPIResource.Kind)
+	assert.Equal(t, parentAPIResource.Group, "")
+	assert.Equal(t, parentAPIResource.Version, "v1")
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, podEvictionAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, podEvictionAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, podEvictionAPIResource.Group)
+	assert.Equal(t, apiResource.Version, podEvictionAPIResource.Version)
+
+	apiResource, parentAPIResource, gvr, err = findResource("", "CronJob", serverPreferredResourcesList, serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "cronjobs", Group: "batch", Version: "v1"})
+
+	assert.Equal(t, parentAPIResource, (*metav1.APIResource)(nil))
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, cronJobAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, cronJobAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, "batch")
+	assert.Equal(t, apiResource.Version, "v1")
+
+	apiResource, parentAPIResource, gvr, err = findResource("batch/v1beta1", "CronJob", serverPreferredResourcesList, serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, gvr, schema.GroupVersionResource{Resource: "cronjobs", Group: "batch", Version: "v1beta1"})
+
+	assert.Equal(t, parentAPIResource, (*metav1.APIResource)(nil))
+
+	// Not comparing directly because actual apiResource also contains fields like 'ShortNames' which are not set in the expected apiResource
+	assert.Equal(t, apiResource.Name, cronJobAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, cronJobAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, "batch")
+	assert.Equal(t, apiResource.Version, "v1beta1")
+}
+
+func Test_getServerResourceGroupVersion(t *testing.T) {
+	apiResource := &metav1.APIResource{Name: "pods", SingularName: "", Namespaced: true, Kind: "Pod"}
+	apiResourceListGV := "v1"
+	assert.Equal(t, getServerResourceGroupVersion(apiResourceListGV, apiResource.Group, apiResource.Version), "v1")
+
+	apiResource = &metav1.APIResource{Name: "horizontalpodautoscalers", SingularName: "", Namespaced: true, Kind: "HorizontalPodAutoscaler"}
+	apiResourceListGV = "autoscaling/v2beta1"
+	assert.Equal(t, getServerResourceGroupVersion(apiResourceListGV, apiResource.Group, apiResource.Version), "autoscaling/v2beta1")
+
+	apiResource = &metav1.APIResource{Name: "deployments/scale", SingularName: "", Namespaced: true, Group: "autoscaling", Version: "v1", Kind: "Scale"}
+	apiResourceListGV = "apps/v1"
+	assert.Equal(t, getServerResourceGroupVersion(apiResourceListGV, apiResource.Group, apiResource.Version), "autoscaling/v1")
+}
+
+func Test_findResourceFromResourceName(t *testing.T) {
+	serverGroupsAndResources := []*metav1.APIResourceList{
+		{
+			GroupVersion: "v1",
+			APIResources: []metav1.APIResource{
+				podAPIResource,
+				podEvictionAPIResource,
+			},
+		},
+	}
+
+	apiResource, err := findResourceFromResourceName("v1", "pods", serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, apiResource.Name, podAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, podAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, "")
+	assert.Equal(t, apiResource.Version, "v1")
+
+	apiResource, err = findResourceFromResourceName("policy/v1", "pods/eviction", serverGroupsAndResources)
+	assert.NilError(t, err)
+	assert.Equal(t, apiResource.Name, podEvictionAPIResource.Name)
+	assert.Equal(t, apiResource.Kind, podEvictionAPIResource.Kind)
+	assert.Equal(t, apiResource.Group, podEvictionAPIResource.Group)
+	assert.Equal(t, apiResource.Version, podEvictionAPIResource.Version)
 }
diff --git a/pkg/clients/dclient/fake.go b/pkg/clients/dclient/fake.go
index 8ee5cf6907..fc6d2e5659 100644
--- a/pkg/clients/dclient/fake.go
+++ b/pkg/clients/dclient/fake.go
@@ -77,13 +77,17 @@ func (c *fakeDiscoveryClient) GetGVRFromKind(kind string) (schema.GroupVersionRe
 	return c.getGVR(resource), nil
 }
 
+func (c *fakeDiscoveryClient) GetGVKFromGVR(apiVersion, resourceName string) (schema.GroupVersionKind, error) {
+	return schema.GroupVersionKind{}, nil
+}
+
 func (c *fakeDiscoveryClient) GetGVRFromAPIVersionKind(apiVersion string, kind string) schema.GroupVersionResource {
 	resource := strings.ToLower(kind) + "s"
 	return c.getGVR(resource)
 }
 
-func (c *fakeDiscoveryClient) FindResource(apiVersion string, kind string) (*metav1.APIResource, schema.GroupVersionResource, error) {
-	return nil, schema.GroupVersionResource{}, fmt.Errorf("not implemented")
+func (c *fakeDiscoveryClient) FindResource(groupVersion string, kind string) (apiResource, parentAPIResource *metav1.APIResource, gvr schema.GroupVersionResource, err error) {
+	return nil, nil, schema.GroupVersionResource{}, fmt.Errorf("not implemented")
 }
 
 func (c *fakeDiscoveryClient) OpenAPISchema() (*openapiv2.Document, error) {
diff --git a/pkg/clients/dclient/utils.go b/pkg/clients/dclient/utils.go
index c0032e5071..3e3a05551a 100644
--- a/pkg/clients/dclient/utils.go
+++ b/pkg/clients/dclient/utils.go
@@ -6,7 +6,7 @@ import (
 	"k8s.io/client-go/discovery"
 )
 
-func logDiscoveryErrors(err error, c serverPreferredResources) {
+func logDiscoveryErrors(err error) {
 	discoveryError := err.(*discovery.ErrGroupDiscoveryFailed)
 	for gv, e := range discoveryError.Groups {
 		if gv.Group == "custom.metrics.k8s.io" || gv.Group == "metrics.k8s.io" || gv.Group == "external.metrics.k8s.io" {
@@ -19,9 +19,9 @@ func logDiscoveryErrors(err error, c serverPreferredResources) {
 	}
 }
 
-func isMetricsServerUnavailable(kind string, err error) bool {
+func isMetricsServerUnavailable(groupVersion string, err error) bool {
 	// error message is defined at:
 	// https://github.com/kubernetes/apimachinery/blob/2456ebdaba229616fab2161a615148884b46644b/pkg/api/errors/errors.go#L432
-	return strings.HasPrefix(kind, "metrics.k8s.io/") &&
+	return strings.HasPrefix(groupVersion, "metrics.k8s.io/") &&
 		strings.Contains(err.Error(), "the server is currently unable to handle the request")
 }
diff --git a/pkg/controllers/policycache/controller.go b/pkg/controllers/policycache/controller.go
index db2f295f05..3337aea85c 100644
--- a/pkg/controllers/policycache/controller.go
+++ b/pkg/controllers/policycache/controller.go
@@ -6,11 +6,14 @@ import (
 
 	"github.com/go-logr/logr"
 	kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
+	"github.com/kyverno/kyverno/pkg/autogen"
 	kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
 	kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
+	"github.com/kyverno/kyverno/pkg/clients/dclient"
 	"github.com/kyverno/kyverno/pkg/controllers"
 	pcache "github.com/kyverno/kyverno/pkg/policycache"
 	controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
+	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
 	"k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/labels"
@@ -39,14 +42,18 @@ type controller struct {
 
 	// queue
 	queue workqueue.RateLimitingInterface
+
+	// client
+	client dclient.Interface
 }
 
-func NewController(pcache pcache.Cache, cpolInformer kyvernov1informers.ClusterPolicyInformer, polInformer kyvernov1informers.PolicyInformer) Controller {
+func NewController(client dclient.Interface, pcache pcache.Cache, cpolInformer kyvernov1informers.ClusterPolicyInformer, polInformer kyvernov1informers.PolicyInformer) Controller {
 	c := controller{
 		cache:      pcache,
 		cpolLister: cpolInformer.Lister(),
 		polLister:  polInformer.Lister(),
 		queue:      workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName),
+		client:     client,
 	}
 	controllerutils.AddDefaultEventHandlers(logger, cpolInformer.Informer(), c.queue)
 	controllerutils.AddDefaultEventHandlers(logger, polInformer.Informer(), c.queue)
@@ -65,7 +72,8 @@ func (c *controller) WarmUp() error {
 		if key, err := cache.MetaNamespaceKeyFunc(policy); err != nil {
 			return err
 		} else {
-			c.cache.Set(key, policy)
+			subresourceGVKToKind := getSubresourceGVKToKindMap(policy, c.client)
+			c.cache.Set(key, policy, subresourceGVKToKind)
 		}
 	}
 	cpols, err := c.cpolLister.List(labels.Everything())
@@ -76,7 +84,8 @@ func (c *controller) WarmUp() error {
 		if key, err := cache.MetaNamespaceKeyFunc(policy); err != nil {
 			return err
 		} else {
-			c.cache.Set(key, policy)
+			subresourceGVKToKind := getSubresourceGVKToKindMap(policy, c.client)
+			c.cache.Set(key, policy, subresourceGVKToKind)
 		}
 	}
 	return nil
@@ -95,7 +104,8 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam
 		return err
 	}
 	// TODO: check resource version ?
-	c.cache.Set(key, policy)
+	subresourceGVKToKind := getSubresourceGVKToKindMap(policy, c.client)
+	c.cache.Set(key, policy, subresourceGVKToKind)
 	return nil
 }
 
@@ -106,3 +116,18 @@ func (c *controller) loadPolicy(namespace, name string) (kyvernov1.PolicyInterfa
 		return c.polLister.Policies(namespace).Get(name)
 	}
 }
+
+func getSubresourceGVKToKindMap(policy kyvernov1.PolicyInterface, client dclient.Interface) map[string]string {
+	subresourceGVKToKind := make(map[string]string)
+	for _, rule := range autogen.ComputeRules(policy) {
+		for _, gvk := range rule.MatchResources.GetKinds() {
+			gv, k := kubeutils.GetKindFromGVK(gvk)
+			_, subresource := kubeutils.SplitSubresource(k)
+			if subresource != "" {
+				apiResource, _, _, _ := client.Discovery().FindResource(gv, k)
+				subresourceGVKToKind[gvk] = apiResource.Kind
+			}
+		}
+	}
+	return subresourceGVKToKind
+}
diff --git a/pkg/controllers/report/resource/controller.go b/pkg/controllers/report/resource/controller.go
index e9af0ef374..47190b099a 100644
--- a/pkg/controllers/report/resource/controller.go
+++ b/pkg/controllers/report/resource/controller.go
@@ -206,7 +206,7 @@ func (c *controller) updateDynamicWatchers(ctx context.Context) error {
 	gvrs := map[schema.GroupVersionKind]schema.GroupVersionResource{}
 	for _, kind := range kinds.List() {
 		apiVersion, kind := kubeutils.GetKindFromGVK(kind)
-		apiResource, gvr, err := c.client.Discovery().FindResource(apiVersion, kind)
+		apiResource, _, gvr, err := c.client.Discovery().FindResource(apiVersion, kind)
 		if err != nil {
 			logger.Error(err, "failed to get gvr from kind", "kind", kind)
 		} else {
diff --git a/pkg/controllers/webhook/controller.go b/pkg/controllers/webhook/controller.go
index 4387a2b408..a8f3f18a75 100644
--- a/pkg/controllers/webhook/controller.go
+++ b/pkg/controllers/webhook/controller.go
@@ -839,35 +839,24 @@ func (c *controller) mergeWebhook(dst *webhook, policy kyvernov1.PolicyInterface
 			gvkMap[gvk] = 1
 			// NOTE: webhook stores GVR in its rules while policy stores GVK in its rules definition
 			gv, k := kubeutils.GetKindFromGVK(gvk)
-			switch k {
-			case "Binding":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods/binding"})
-			case "NodeProxyOptions":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "nodes/proxy"})
-			case "PodAttachOptions":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods/attach"})
-			case "PodExecOptions":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods/exec"})
-			case "PodPortForwardOptions":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods/portforward"})
-			case "PodProxyOptions":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods/proxy"})
-			case "ServiceProxyOptions":
-				gvrList = append(gvrList, schema.GroupVersionResource{Group: "", Version: "v1", Resource: "services/proxy"})
-			default:
-				_, gvr, err := c.discoveryClient.FindResource(gv, k)
-				if err != nil {
-					logger.Error(err, "unable to convert GVK to GVR", "GVK", gvk)
-					continue
-				}
-				if strings.Contains(gvk, "*") {
-					group := kubeutils.GetGroupFromGVK(gvk)
-					gvrList = append(gvrList, schema.GroupVersionResource{Group: group, Version: "*", Resource: gvr.Resource})
-				} else {
-					logger.V(4).Info("configuring webhook", "GVK", gvk, "GVR", gvr)
-					gvrList = append(gvrList, gvr)
+			_, parentAPIResource, gvr, err := c.discoveryClient.FindResource(gv, k)
+			if err != nil {
+				logger.Error(err, "unable to convert GVK to GVR", "GVK", gvk)
+				continue
+			}
+			if parentAPIResource != nil {
+				gvr = schema.GroupVersionResource{
+					Group:    parentAPIResource.Group,
+					Version:  parentAPIResource.Version,
+					Resource: gvr.Resource,
 				}
 			}
+			if strings.Contains(gvk, "*") {
+				gvrList = append(gvrList, schema.GroupVersionResource{Group: gvr.Group, Version: "*", Resource: gvr.Resource})
+			} else {
+				logger.V(4).Info("configuring webhook", "GVK", gvk, "GVR", gvr)
+				gvrList = append(gvrList, gvr)
+			}
 		}
 	}
 	for _, gvr := range gvrList {
diff --git a/pkg/controllers/webhook/utils.go b/pkg/controllers/webhook/utils.go
index adb804459f..788965ba9a 100644
--- a/pkg/controllers/webhook/utils.go
+++ b/pkg/controllers/webhook/utils.go
@@ -31,12 +31,6 @@ func (wh *webhook) buildRulesWithOperations(ops ...admissionregistrationv1.Opera
 	var rules []admissionregistrationv1.RuleWithOperations
 	for gvr := range wh.rules {
 		resources := sets.NewString(gvr.Resource)
-		if resources.Has("pods") {
-			resources.Insert("pods/ephemeralcontainers")
-		}
-		if resources.Has("services") {
-			resources.Insert("services/status")
-		}
 		rules = append(rules, admissionregistrationv1.RuleWithOperations{
 			Rule: admissionregistrationv1.Rule{
 				APIGroups:   []string{gvr.Group},
diff --git a/pkg/engine/background.go b/pkg/engine/background.go
index fa3b2e3741..1fdc0d1f95 100644
--- a/pkg/engine/background.go
+++ b/pkg/engine/background.go
@@ -88,10 +88,13 @@ func filterRule(rclient registryclient.Client, rule kyvernov1.Rule, policyContex
 	logger := logging.WithName(string(ruleType)).WithValues("policy", policy.GetName(),
 		"kind", newResource.GetKind(), "namespace", newResource.GetNamespace(), "name", newResource.GetName())
 
-	if err = MatchesResourceDescription(newResource, rule, admissionInfo, excludeGroupRole, namespaceLabels, ""); err != nil {
+	kindsInPolicy := append(rule.MatchResources.GetKinds(), rule.ExcludeResources.GetKinds()...)
+	subresourceGVKToAPIResource := GetSubresourceGVKToAPIResourceMap(kindsInPolicy, policyContext)
+
+	if err = MatchesResourceDescription(subresourceGVKToAPIResource, newResource, rule, admissionInfo, excludeGroupRole, namespaceLabels, "", policyContext.subresource); err != nil {
 		if ruleType == response.Generation {
 			// if the oldResource matched, return "false" to delete GR for it
-			if err = MatchesResourceDescription(oldResource, rule, admissionInfo, excludeGroupRole, namespaceLabels, ""); err == nil {
+			if err = MatchesResourceDescription(subresourceGVKToAPIResource, oldResource, rule, admissionInfo, excludeGroupRole, namespaceLabels, "", policyContext.subresource); err == nil {
 				return &response.RuleResponse{
 					Name:   rule.Name,
 					Type:   ruleType,
diff --git a/pkg/engine/common.go b/pkg/engine/common.go
new file mode 100644
index 0000000000..4d43f6c0b8
--- /dev/null
+++ b/pkg/engine/common.go
@@ -0,0 +1,62 @@
+package engine
+
+import (
+	"strings"
+
+	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// GetSubresourceGVKToAPIResourceMap returns a map of subresource GVK to APIResource. This is used to determine if a resource is a subresource.
+func GetSubresourceGVKToAPIResourceMap(kindsInPolicy []string, ctx *PolicyContext) map[string]*metav1.APIResource {
+	subresourceGVKToAPIResource := make(map[string]*metav1.APIResource)
+	for _, gvk := range kindsInPolicy {
+		gv, k := kubeutils.GetKindFromGVK(gvk)
+		parentKind, subresource := kubeutils.SplitSubresource(k)
+		// Len of subresources is non zero only when validation request was sent from CLI without connecting to the cluster.
+		if len(ctx.subresourcesInPolicy) != 0 {
+			if subresource != "" {
+				for _, subresourceInPolicy := range ctx.subresourcesInPolicy {
+					parentResourceGroupVersion := metav1.GroupVersion{
+						Group:   subresourceInPolicy.ParentResource.Group,
+						Version: subresourceInPolicy.ParentResource.Version,
+					}.String()
+					if gv == "" || kubeutils.GroupVersionMatches(gv, parentResourceGroupVersion) {
+						if parentKind == subresourceInPolicy.ParentResource.Kind {
+							if strings.ToLower(subresource) == strings.Split(subresourceInPolicy.APIResource.Name, "/")[1] {
+								subresourceGVKToAPIResource[gvk] = &(subresourceInPolicy.APIResource)
+								break
+							}
+						}
+					}
+				}
+			} else { // Complete kind may be a subresource, for eg- 'PodExecOptions'
+				for _, subresourceInPolicy := range ctx.subresourcesInPolicy {
+					// Subresources which can be just specified by kind, for eg- 'PodExecOptions'
+					// have different kind than their parent resource. Otherwise for subresources which
+					// have same kind as parent resource, need to be specified as Kind/Subresource, eg - 'Pod/status'
+					if k == subresourceInPolicy.APIResource.Kind &&
+						k != subresourceInPolicy.ParentResource.Kind {
+						subresourceGroupVersion := metav1.GroupVersion{
+							Group:   subresourceInPolicy.APIResource.Group,
+							Version: subresourceInPolicy.APIResource.Version,
+						}.String()
+						if gv == "" || kubeutils.GroupVersionMatches(gv, subresourceGroupVersion) {
+							subresourceGVKToAPIResource[gvk] = subresourceInPolicy.APIResource.DeepCopy()
+							break
+						}
+					}
+				}
+			}
+		} else if ctx.client != nil {
+			// find the resource from API client
+			apiResource, _, _, err := ctx.client.Discovery().FindResource(gv, k)
+			if err == nil {
+				if kubeutils.IsSubresource(apiResource.Name) {
+					subresourceGVKToAPIResource[gvk] = apiResource
+				}
+			}
+		}
+	}
+	return subresourceGVKToAPIResource
+}
diff --git a/pkg/engine/common_test.go b/pkg/engine/common_test.go
new file mode 100644
index 0000000000..939a89a56f
--- /dev/null
+++ b/pkg/engine/common_test.go
@@ -0,0 +1,78 @@
+package engine
+
+import (
+	"testing"
+
+	"gotest.tools/assert"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+func Test_GetSubresourceGVKToAPIResourceMap(t *testing.T) {
+
+	podAPIResource := metav1.APIResource{
+		Name:         "pods",
+		SingularName: "",
+		Namespaced:   true,
+		Kind:         "Pod",
+		Group:        "",
+		Version:      "v1",
+	}
+
+	podStatusAPIResource := metav1.APIResource{
+		Name:         "pods/status",
+		SingularName: "",
+		Namespaced:   true,
+		Kind:         "Pod",
+		Group:        "",
+		Version:      "v1",
+	}
+
+	podEvictAPIResource := metav1.APIResource{
+		Name:         "pods/eviction",
+		SingularName: "",
+		Namespaced:   true,
+		Kind:         "Eviction",
+		Group:        "policy",
+		Version:      "v1",
+	}
+
+	policyContext := NewPolicyContext().
+		WithSubresourcesInPolicy([]struct {
+			APIResource    metav1.APIResource
+			ParentResource metav1.APIResource
+		}{
+			{
+				APIResource:    podStatusAPIResource,
+				ParentResource: podAPIResource,
+			},
+			{
+				APIResource:    podEvictAPIResource,
+				ParentResource: podAPIResource,
+			},
+		})
+
+	kindsInPolicy := []string{"Pod", "Eviction", "Pod/status", "Pod/eviction"}
+
+	subresourceGVKToAPIResourceMap := GetSubresourceGVKToAPIResourceMap(kindsInPolicy, policyContext)
+
+	podStatusResourceFromMap := subresourceGVKToAPIResourceMap["Pod/status"]
+	assert.Equal(t, podStatusResourceFromMap.Name, podStatusAPIResource.Name)
+	assert.Equal(t, podStatusResourceFromMap.Kind, podStatusAPIResource.Kind)
+	assert.Equal(t, podStatusResourceFromMap.Group, podStatusAPIResource.Group)
+	assert.Equal(t, podStatusResourceFromMap.Version, podStatusAPIResource.Version)
+
+	podEvictResourceFromMap := subresourceGVKToAPIResourceMap["Pod/eviction"]
+	assert.Equal(t, podEvictResourceFromMap.Name, podEvictAPIResource.Name)
+	assert.Equal(t, podEvictResourceFromMap.Kind, podEvictAPIResource.Kind)
+	assert.Equal(t, podEvictResourceFromMap.Group, podEvictAPIResource.Group)
+	assert.Equal(t, podEvictResourceFromMap.Version, podEvictAPIResource.Version)
+
+	podEvictResourceFromMap = subresourceGVKToAPIResourceMap["Eviction"]
+	assert.Equal(t, podEvictResourceFromMap.Name, podEvictAPIResource.Name)
+	assert.Equal(t, podEvictResourceFromMap.Kind, podEvictAPIResource.Kind)
+	assert.Equal(t, podEvictResourceFromMap.Group, podEvictAPIResource.Group)
+	assert.Equal(t, podEvictResourceFromMap.Version, podEvictAPIResource.Version)
+
+	_, ok := subresourceGVKToAPIResourceMap["Pod"]
+	assert.Equal(t, ok, false)
+}
diff --git a/pkg/engine/imageVerify.go b/pkg/engine/imageVerify.go
index 6ea9793b7b..bd394c626f 100644
--- a/pkg/engine/imageVerify.go
+++ b/pkg/engine/imageVerify.go
@@ -149,7 +149,7 @@ func VerifyAndPatchImages(
 }
 
 func appendResponse(resp *response.EngineResponse, rule *kyvernov1.Rule, msg string, status response.RuleStatus) {
-	rr := ruleResponse(*rule, response.ImageVerify, msg, status, nil)
+	rr := ruleResponse(*rule, response.ImageVerify, msg, status)
 	resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *rr)
 	incrementErrorCount(resp)
 }
@@ -196,7 +196,7 @@ func (iv *imageVerifier) verify(ctx context.Context, imageVerify kyvernov1.Image
 		if hasImageVerifiedAnnotationChanged(iv.policyContext, iv.logger) {
 			msg := imageVerifyAnnotationKey + " annotation cannot be changed"
 			iv.logger.Info("image verification error", "reason", msg)
-			ruleResp := ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusFail, nil)
+			ruleResp := ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusFail)
 			iv.resp.PolicyResponse.Rules = append(iv.resp.PolicyResponse.Rules, *ruleResp)
 			incrementAppliedCount(iv.resp)
 			continue
@@ -223,7 +223,7 @@ func (iv *imageVerifier) verify(ctx context.Context, imageVerify kyvernov1.Image
 				ruleResp = ruleError(iv.rule, response.ImageVerify, "failed to update digest", err)
 			} else if patch != nil {
 				if ruleResp == nil {
-					ruleResp = ruleResponse(*iv.rule, response.ImageVerify, "mutated image digest", response.RuleStatusPass, nil)
+					ruleResp = ruleResponse(*iv.rule, response.ImageVerify, "mutated image digest", response.RuleStatusPass)
 				}
 
 				ruleResp.Patches = append(ruleResp.Patches, patch)
@@ -310,7 +310,7 @@ func (iv *imageVerifier) verifyImage(
 	if err := iv.policyContext.jsonContext.AddImageInfo(imageInfo); err != nil {
 		iv.logger.Error(err, "failed to add image to context")
 		msg := fmt.Sprintf("failed to add image to context %s: %s", image, err.Error())
-		return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusError, nil), ""
+		return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusError), ""
 	}
 
 	if len(imageVerify.Attestors) > 0 {
@@ -346,10 +346,10 @@ func (iv *imageVerifier) verifyAttestors(
 			// handle registry network errors as a rule error (instead of a policy failure)
 			var netErr *net.OpError
 			if errors.As(err, &netErr) {
-				return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusError, nil), nil, nil
+				return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusError), nil, nil
 			}
 
-			return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusFail, nil), nil, nil
+			return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusFail), nil, nil
 		}
 		newAttestors = append(newAttestors, attestors[i])
 	}
@@ -359,7 +359,7 @@ func (iv *imageVerifier) verifyAttestors(
 	}
 
 	msg := fmt.Sprintf("verified image signatures for %s", image)
-	return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusPass, nil), cosignResponse, newAttestors
+	return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusPass), cosignResponse, newAttestors
 }
 
 func (iv *imageVerifier) verifyAttestations(
@@ -398,23 +398,23 @@ func (iv *imageVerifier) verifyAttestations(
 					// handle registry network errors as a rule error (instead of a policy failure)
 					var netErr *net.OpError
 					if errors.As(err, &netErr) {
-						return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusError, nil), ""
+						return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusError), ""
 					}
 
-					return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusFail, nil), ""
+					return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusFail), ""
 				}
 
 				verifiedCount++
 				attestationError = iv.verifyAttestation(cosignResp.Statements, attestation, imageInfo)
 				if attestationError != nil {
 					attestationError = errors.Wrapf(attestationError, entryPath+subPath)
-					return ruleResponse(*iv.rule, response.ImageVerify, attestationError.Error(), response.RuleStatusFail, nil), ""
+					return ruleResponse(*iv.rule, response.ImageVerify, attestationError.Error(), response.RuleStatusFail), ""
 				}
 
 				if verifiedCount >= requiredCount {
 					msg := fmt.Sprintf("image attestations verification succeeded, verifiedCount: %v, requiredCount: %v", verifiedCount, requiredCount)
 					iv.logger.V(2).Info(msg)
-					return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusPass, nil), ""
+					return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusPass), ""
 				}
 			}
 		}
@@ -423,7 +423,7 @@ func (iv *imageVerifier) verifyAttestations(
 
 	msg := fmt.Sprintf("verified image attestations for %s", image)
 	iv.logger.V(2).Info(msg)
-	return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusPass, nil), ""
+	return ruleResponse(*iv.rule, response.ImageVerify, msg, response.RuleStatusPass), ""
 }
 
 func (iv *imageVerifier) verifyAttestorSet(
diff --git a/pkg/engine/imageVerifyValidate.go b/pkg/engine/imageVerifyValidate.go
index 830eb4cb8b..d1d8515b2d 100644
--- a/pkg/engine/imageVerifyValidate.go
+++ b/pkg/engine/imageVerifyValidate.go
@@ -23,10 +23,10 @@ func processImageValidationRule(ctx context.Context, log logr.Logger, rclient re
 	log = log.WithValues("rule", rule.Name)
 	matchingImages, _, err := extractMatchingImages(enginectx, rule)
 	if err != nil {
-		return ruleResponse(*rule, response.Validation, err.Error(), response.RuleStatusError, nil)
+		return ruleResponse(*rule, response.Validation, err.Error(), response.RuleStatusError)
 	}
 	if len(matchingImages) == 0 {
-		return ruleResponse(*rule, response.Validation, "image verified", response.RuleStatusSkip, nil)
+		return ruleResponse(*rule, response.Validation, "image verified", response.RuleStatusSkip)
 	}
 	if err := LoadContext(ctx, log, rclient, rule.Context, enginectx, rule.Name); err != nil {
 		if _, ok := err.(gojmespath.NotFoundError); ok {
@@ -48,7 +48,7 @@ func processImageValidationRule(ctx context.Context, log logr.Logger, rclient re
 			return nil
 		}
 
-		return ruleResponse(*rule, response.Validation, "preconditions not met", response.RuleStatusSkip, nil)
+		return ruleResponse(*rule, response.Validation, "preconditions not met", response.RuleStatusSkip)
 	}
 
 	for _, v := range rule.VerifyImages {
@@ -65,14 +65,14 @@ func processImageValidationRule(ctx context.Context, log logr.Logger, rclient re
 
 				log.V(4).Info("validating image", "image", image)
 				if err := validateImage(enginectx, imageVerify, name, imageInfo, log); err != nil {
-					return ruleResponse(*rule, response.ImageVerify, err.Error(), response.RuleStatusFail, nil)
+					return ruleResponse(*rule, response.ImageVerify, err.Error(), response.RuleStatusFail)
 				}
 			}
 		}
 	}
 
 	log.V(4).Info("validated image", "rule", rule.Name)
-	return ruleResponse(*rule, response.Validation, "image verified", response.RuleStatusPass, nil)
+	return ruleResponse(*rule, response.Validation, "image verified", response.RuleStatusPass)
 }
 
 func validateImage(ctx *PolicyContext, imageVerify *kyvernov1.ImageVerification, name string, imageInfo apiutils.ImageInfo, log logr.Logger) error {
diff --git a/pkg/engine/k8smanifest.go b/pkg/engine/k8smanifest.go
index 086d091464..53fd9a1723 100644
--- a/pkg/engine/k8smanifest.go
+++ b/pkg/engine/k8smanifest.go
@@ -51,9 +51,9 @@ func handleVerifyManifest(ctx *PolicyContext, rule *kyvernov1.Rule, logger logr.
 	}
 	logger.V(3).Info("verifyManifest result", "verified", strconv.FormatBool(verified), "reason", reason)
 	if !verified {
-		return ruleResponse(*rule, response.Validation, reason, response.RuleStatusFail, nil)
+		return ruleResponse(*rule, response.Validation, reason, response.RuleStatusFail)
 	}
-	return ruleResponse(*rule, response.Validation, reason, response.RuleStatusPass, nil)
+	return ruleResponse(*rule, response.Validation, reason, response.RuleStatusPass)
 }
 
 func verifyManifest(policyContext *PolicyContext, verifyRule kyvernov1.Manifests, logger logr.Logger) (bool, string, error) {
diff --git a/pkg/engine/loadtargets.go b/pkg/engine/loadtargets.go
index 94ed0b49f3..7e17541166 100644
--- a/pkg/engine/loadtargets.go
+++ b/pkg/engine/loadtargets.go
@@ -3,17 +3,28 @@ package engine
 import (
 	"context"
 	"fmt"
+	"strings"
 
 	"github.com/go-logr/logr"
 	kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
 	"github.com/kyverno/kyverno/pkg/engine/variables"
+	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
 	"github.com/kyverno/kyverno/pkg/utils/wildcard"
 	"go.uber.org/multierr"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 )
 
-func loadTargets(targets []kyvernov1.ResourceSpec, ctx *PolicyContext, logger logr.Logger) ([]unstructured.Unstructured, error) {
-	targetObjects := []unstructured.Unstructured{}
+// resourceInfo contains the Unstructured resource, and if the resource is a subresource, it contains its name and its
+// parentResource's group-version-resource
+type resourceInfo struct {
+	unstructured      unstructured.Unstructured
+	subresource       string
+	parentResourceGVR metav1.GroupVersionResource
+}
+
+func loadTargets(targets []kyvernov1.ResourceSpec, ctx *PolicyContext, logger logr.Logger) ([]resourceInfo, error) {
+	var targetObjects []resourceInfo
 	var errors []error
 
 	for i := range targets {
@@ -23,7 +34,7 @@ func loadTargets(targets []kyvernov1.ResourceSpec, ctx *PolicyContext, logger lo
 			continue
 		}
 
-		objs, err := getTargets(spec, ctx, logger)
+		objs, err := getTargets(spec, ctx)
 		if err != nil {
 			errors = append(errors, err)
 			continue
@@ -64,8 +75,8 @@ func resolveSpec(i int, target kyvernov1.ResourceSpec, ctx *PolicyContext, logge
 	}, nil
 }
 
-func getTargets(target kyvernov1.ResourceSpec, ctx *PolicyContext, logger logr.Logger) ([]unstructured.Unstructured, error) {
-	var targetObjects []unstructured.Unstructured
+func getTargets(target kyvernov1.ResourceSpec, ctx *PolicyContext) ([]resourceInfo, error) {
+	var targetObjects []resourceInfo
 	namespace := target.Namespace
 	name := target.Name
 
@@ -74,26 +85,82 @@ func getTargets(target kyvernov1.ResourceSpec, ctx *PolicyContext, logger logr.L
 		namespace = ctx.policy.GetNamespace()
 	}
 
-	if namespace != "" && name != "" &&
-		!wildcard.ContainsWildcard(namespace) && !wildcard.ContainsWildcard(name) {
-		obj, err := ctx.client.GetResource(context.TODO(), target.APIVersion, target.Kind, namespace, name)
-		if err != nil {
-			return nil, fmt.Errorf("failed to get target %s/%s %s/%s : %v", target.APIVersion, target.Kind, namespace, name, err)
-		}
-
-		return []unstructured.Unstructured{*obj}, nil
-	}
-
-	// list all targets if wildcard is specified
-	objList, err := ctx.client.ListResource(context.TODO(), target.APIVersion, target.Kind, "", nil)
+	apiResource, parentAPIResource, _, err := ctx.client.Discovery().FindResource(target.APIVersion, target.Kind)
 	if err != nil {
 		return nil, err
 	}
 
-	for i := range objList.Items {
-		obj := objList.Items[i].DeepCopy()
-		if match(namespace, name, obj.GetNamespace(), obj.GetName()) {
-			targetObjects = append(targetObjects, *obj)
+	if namespace != "" && name != "" &&
+		!wildcard.ContainsWildcard(namespace) && !wildcard.ContainsWildcard(name) {
+		// If the target resource is a subresource
+		var obj *unstructured.Unstructured
+		var parentResourceGVR metav1.GroupVersionResource
+		subresourceName := ""
+		if kubeutils.IsSubresource(apiResource.Name) {
+			apiVersion := metav1.GroupVersion{
+				Group:   parentAPIResource.Group,
+				Version: parentAPIResource.Version,
+			}.String()
+			subresourceName = strings.Split(apiResource.Name, "/")[1]
+			obj, err = ctx.client.GetResource(context.TODO(), apiVersion, parentAPIResource.Kind, namespace, name, subresourceName)
+			parentResourceGVR = metav1.GroupVersionResource{
+				Group:    parentAPIResource.Group,
+				Version:  parentAPIResource.Version,
+				Resource: parentAPIResource.Name,
+			}
+		} else {
+			obj, err = ctx.client.GetResource(context.TODO(), target.APIVersion, target.Kind, namespace, name)
+		}
+		if err != nil {
+			return nil, fmt.Errorf("failed to get target %s/%s %s/%s : %v", target.APIVersion, target.Kind, namespace, name, err)
+		}
+
+		return []resourceInfo{{unstructured: *obj, subresource: subresourceName, parentResourceGVR: parentResourceGVR}}, nil
+	}
+
+	if kubeutils.IsSubresource(apiResource.Name) {
+		apiVersion := metav1.GroupVersion{
+			Group:   parentAPIResource.Group,
+			Version: parentAPIResource.Version,
+		}.String()
+		objList, err := ctx.client.ListResource(context.TODO(), apiVersion, parentAPIResource.Kind, "", nil)
+		if err != nil {
+			return nil, err
+		}
+		var parentObjects []unstructured.Unstructured
+		for i := range objList.Items {
+			obj := objList.Items[i].DeepCopy()
+			if match(namespace, name, obj.GetNamespace(), obj.GetName()) {
+				parentObjects = append(parentObjects, *obj)
+			}
+		}
+
+		for i := range parentObjects {
+			parentObj := parentObjects[i]
+			subresourceName := strings.Split(apiResource.Name, "/")[1]
+			obj, err := ctx.client.GetResource(context.TODO(), parentObj.GetAPIVersion(), parentAPIResource.Kind, parentObj.GetNamespace(), parentObj.GetName(), subresourceName)
+			if err != nil {
+				return nil, err
+			}
+			parentResourceGVR := metav1.GroupVersionResource{
+				Group:    parentAPIResource.Group,
+				Version:  parentAPIResource.Version,
+				Resource: parentAPIResource.Name,
+			}
+			targetObjects = append(targetObjects, resourceInfo{unstructured: *obj, subresource: subresourceName, parentResourceGVR: parentResourceGVR})
+		}
+	} else {
+		// list all targets if wildcard is specified
+		objList, err := ctx.client.ListResource(context.TODO(), target.APIVersion, target.Kind, "", nil)
+		if err != nil {
+			return nil, err
+		}
+
+		for i := range objList.Items {
+			obj := objList.Items[i].DeepCopy()
+			if match(namespace, name, obj.GetNamespace(), obj.GetName()) {
+				targetObjects = append(targetObjects, resourceInfo{unstructured: *obj})
+			}
 		}
 	}
 
diff --git a/pkg/engine/mutate/patch/patchesUtils.go b/pkg/engine/mutate/patch/patchesUtils.go
index ece74cbaea..574ba79bb6 100644
--- a/pkg/engine/mutate/patch/patchesUtils.go
+++ b/pkg/engine/mutate/patch/patchesUtils.go
@@ -5,7 +5,7 @@ import (
 	"regexp"
 	"strings"
 
-	wildcard "github.com/kyverno/kyverno/pkg/utils/wildcard"
+	"github.com/kyverno/kyverno/pkg/utils/wildcard"
 	"github.com/mattbaird/jsonpatch"
 )
 
@@ -126,10 +126,6 @@ func filterInvalidPatches(patches []jsonpatch.JsonPatchOperation) []jsonpatch.Js
 }
 
 func ignorePatch(path string) bool {
-	if strings.Contains(path, "/status") {
-		return true
-	}
-
 	if wildcard.Match("/spec/triggers/*/metadata/*", path) {
 		return false
 	}
diff --git a/pkg/engine/mutate/patch/patchesUtils_test.go b/pkg/engine/mutate/patch/patchesUtils_test.go
index c2182783d4..d03ff6a96e 100644
--- a/pkg/engine/mutate/patch/patchesUtils_test.go
+++ b/pkg/engine/mutate/patch/patchesUtils_test.go
@@ -195,7 +195,7 @@ func Test_ignorePath(t *testing.T) {
 		},
 		{
 			path:   "/status",
-			ignore: true,
+			ignore: false,
 		},
 		{
 			path:   "/spec",
diff --git a/pkg/engine/mutation.go b/pkg/engine/mutation.go
index e8e2c3cc2f..144c9271bb 100644
--- a/pkg/engine/mutation.go
+++ b/pkg/engine/mutation.go
@@ -15,6 +15,7 @@ import (
 	"github.com/kyverno/kyverno/pkg/engine/response"
 	"github.com/kyverno/kyverno/pkg/logging"
 	"github.com/kyverno/kyverno/pkg/registryclient"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 )
 
@@ -54,7 +55,9 @@ func Mutate(ctx context.Context, rclient registryclient.Client, policyContext *P
 			excludeResource = policyContext.excludeGroupRole
 		}
 
-		if err = MatchesResourceDescription(matchedResource, rule, policyContext.admissionInfo, excludeResource, policyContext.namespaceLabels, policyContext.policy.GetNamespace()); err != nil {
+		kindsInPolicy := append(rule.MatchResources.GetKinds(), rule.ExcludeResources.GetKinds()...)
+		subresourceGVKToAPIResource := GetSubresourceGVKToAPIResourceMap(kindsInPolicy, policyContext)
+		if err = MatchesResourceDescription(subresourceGVKToAPIResource, matchedResource, rule, policyContext.admissionInfo, excludeResource, policyContext.namespaceLabels, policyContext.policy.GetNamespace(), policyContext.subresource); err != nil {
 			logger.V(4).Info("rule not matched", "reason", err.Error())
 			skippedRules = append(skippedRules, rule.Name)
 			continue
@@ -81,17 +84,23 @@ func Mutate(ctx context.Context, rclient registryclient.Client, policyContext *P
 		}
 
 		ruleCopy := rule.DeepCopy()
-		var patchedResources []unstructured.Unstructured
+		var patchedResources []resourceInfo
 		if !policyContext.admissionOperation && rule.IsMutateExisting() {
 			targets, err := loadTargets(ruleCopy.Mutation.Targets, policyContext, logger)
 			if err != nil {
-				rr := ruleResponse(rule, response.Mutation, err.Error(), response.RuleStatusError, nil)
+				rr := ruleResponse(rule, response.Mutation, err.Error(), response.RuleStatusError)
 				resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *rr)
 			} else {
 				patchedResources = append(patchedResources, targets...)
 			}
 		} else {
-			patchedResources = append(patchedResources, matchedResource)
+			var parentResourceGVR metav1.GroupVersionResource
+			if policyContext.subresource != "" {
+				parentResourceGVR = policyContext.requestResource
+			}
+			patchedResources = append(patchedResources, resourceInfo{
+				unstructured: matchedResource, subresource: policyContext.subresource, parentResourceGVR: parentResourceGVR,
+			})
 		}
 
 		for _, patchedResource := range patchedResources {
@@ -101,21 +110,21 @@ func Mutate(ctx context.Context, rclient registryclient.Client, policyContext *P
 
 			if !policyContext.admissionOperation && rule.IsMutateExisting() {
 				policyContext := policyContext.Copy()
-				if err := policyContext.jsonContext.AddTargetResource(patchedResource.Object); err != nil {
+				if err := policyContext.jsonContext.AddTargetResource(patchedResource.unstructured.Object); err != nil {
 					logging.Error(err, "failed to add target resource to the context")
 					continue
 				}
 			}
 
-			logger.V(4).Info("apply rule to resource", "rule", rule.Name, "resource namespace", patchedResource.GetNamespace(), "resource name", patchedResource.GetName())
+			logger.V(4).Info("apply rule to resource", "rule", rule.Name, "resource namespace", patchedResource.unstructured.GetNamespace(), "resource name", patchedResource.unstructured.GetName())
 			var ruleResp *response.RuleResponse
 			if rule.Mutation.ForEachMutation != nil {
-				ruleResp, patchedResource = mutateForEach(ctx, rclient, ruleCopy, policyContext, patchedResource, logger)
+				ruleResp, patchedResource.unstructured = mutateForEach(ctx, rclient, ruleCopy, policyContext, patchedResource.unstructured, patchedResource.subresource, patchedResource.parentResourceGVR, logger)
 			} else {
-				ruleResp, patchedResource = mutateResource(ruleCopy, policyContext, patchedResource, logger)
+				ruleResp, patchedResource.unstructured = mutateResource(ruleCopy, policyContext, patchedResource.unstructured, patchedResource.subresource, patchedResource.parentResourceGVR, logger)
 			}
 
-			matchedResource = patchedResource
+			matchedResource = patchedResource.unstructured
 
 			if ruleResp != nil {
 				resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *ruleResp)
@@ -145,22 +154,22 @@ func Mutate(ctx context.Context, rclient registryclient.Client, policyContext *P
 	return resp
 }
 
-func mutateResource(rule *kyvernov1.Rule, ctx *PolicyContext, resource unstructured.Unstructured, logger logr.Logger) (*response.RuleResponse, unstructured.Unstructured) {
+func mutateResource(rule *kyvernov1.Rule, ctx *PolicyContext, resource unstructured.Unstructured, subresourceName string, parentResourceGVR metav1.GroupVersionResource, logger logr.Logger) (*response.RuleResponse, unstructured.Unstructured) {
 	preconditionsPassed, err := checkPreconditions(logger, ctx, rule.GetAnyAllConditions())
 	if err != nil {
 		return ruleError(rule, response.Mutation, "failed to evaluate preconditions", err), resource
 	}
 
 	if !preconditionsPassed {
-		return ruleResponse(*rule, response.Mutation, "preconditions not met", response.RuleStatusSkip, &resource), resource
+		return ruleResponseWithPatchedTarget(*rule, response.Mutation, "preconditions not met", response.RuleStatusSkip, &resource, subresourceName, parentResourceGVR), resource
 	}
 
 	mutateResp := mutate.Mutate(rule, ctx.jsonContext, resource, logger)
-	ruleResp := buildRuleResponse(rule, mutateResp, &mutateResp.PatchedResource)
+	ruleResp := buildRuleResponse(rule, mutateResp, &mutateResp.PatchedResource, subresourceName, parentResourceGVR)
 	return ruleResp, mutateResp.PatchedResource
 }
 
-func mutateForEach(ctx context.Context, rclient registryclient.Client, rule *kyvernov1.Rule, enginectx *PolicyContext, resource unstructured.Unstructured, logger logr.Logger) (*response.RuleResponse, unstructured.Unstructured) {
+func mutateForEach(ctx context.Context, rclient registryclient.Client, rule *kyvernov1.Rule, enginectx *PolicyContext, resource unstructured.Unstructured, subresourceName string, parentResourceGVR metav1.GroupVersionResource, logger logr.Logger) (*response.RuleResponse, unstructured.Unstructured) {
 	foreachList := rule.Mutation.ForEachMutation
 	if foreachList == nil {
 		return nil, resource
@@ -182,7 +191,7 @@ func mutateForEach(ctx context.Context, rclient registryclient.Client, rule *kyv
 		}
 
 		if !preconditionsPassed {
-			return ruleResponse(*rule, response.Mutation, "preconditions not met", response.RuleStatusSkip, &patchedResource), resource
+			return ruleResponseWithPatchedTarget(*rule, response.Mutation, "preconditions not met", response.RuleStatusSkip, &patchedResource, subresourceName, parentResourceGVR), resource
 		}
 
 		elements, err := evaluateList(foreach.List, enginectx.jsonContext)
@@ -194,7 +203,7 @@ func mutateForEach(ctx context.Context, rclient registryclient.Client, rule *kyv
 		mutateResp := mutateElements(ctx, rclient, rule.Name, foreach, enginectx, elements, patchedResource, logger)
 		if mutateResp.Status == response.RuleStatusError {
 			logger.Error(err, "failed to mutate elements")
-			return buildRuleResponse(rule, mutateResp, nil), resource
+			return buildRuleResponse(rule, mutateResp, nil, "", metav1.GroupVersionResource{}), resource
 		}
 
 		if mutateResp.Status != response.RuleStatusSkip {
@@ -207,10 +216,10 @@ func mutateForEach(ctx context.Context, rclient registryclient.Client, rule *kyv
 	}
 
 	if applyCount == 0 {
-		return ruleResponse(*rule, response.Mutation, "0 elements processed", response.RuleStatusSkip, &resource), resource
+		return ruleResponseWithPatchedTarget(*rule, response.Mutation, "0 elements processed", response.RuleStatusSkip, &resource, subresourceName, parentResourceGVR), resource
 	}
 
-	r := ruleResponse(*rule, response.Mutation, fmt.Sprintf("%d elements processed", applyCount), response.RuleStatusPass, &patchedResource)
+	r := ruleResponseWithPatchedTarget(*rule, response.Mutation, fmt.Sprintf("%d elements processed", applyCount), response.RuleStatusPass, &patchedResource, subresourceName, parentResourceGVR)
 	r.Patches = allPatches
 	return r, patchedResource
 }
@@ -279,8 +288,8 @@ func mutateError(err error, message string) *mutate.Response {
 	}
 }
 
-func buildRuleResponse(rule *kyvernov1.Rule, mutateResp *mutate.Response, patchedResource *unstructured.Unstructured) *response.RuleResponse {
-	resp := ruleResponse(*rule, response.Mutation, mutateResp.Message, mutateResp.Status, patchedResource)
+func buildRuleResponse(rule *kyvernov1.Rule, mutateResp *mutate.Response, patchedResource *unstructured.Unstructured, patchedSubresourceName string, parentResourceGVR metav1.GroupVersionResource) *response.RuleResponse {
+	resp := ruleResponseWithPatchedTarget(*rule, response.Mutation, mutateResp.Message, mutateResp.Status, patchedResource, patchedSubresourceName, parentResourceGVR)
 	if resp.Status == response.RuleStatusPass {
 		resp.Patches = mutateResp.Patches
 		resp.Message = buildSuccessMessage(mutateResp.PatchedResource)
diff --git a/pkg/engine/policyContext.go b/pkg/engine/policyContext.go
index 2931231f9d..eb97f12f5e 100644
--- a/pkg/engine/policyContext.go
+++ b/pkg/engine/policyContext.go
@@ -11,6 +11,7 @@ import (
 	"github.com/kyverno/kyverno/pkg/utils"
 	"github.com/pkg/errors"
 	admissionv1 "k8s.io/api/admission/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 )
 
@@ -34,6 +35,16 @@ type PolicyContext struct {
 	// admissionInfo contains the admission request information
 	admissionInfo kyvernov1beta1.RequestInfo
 
+	// requestResource is the fully-qualified resource of the original API request (for example, v1.pods).
+	// If this is specified and differs from the value in "resource", an equivalent match and conversion was performed.
+	//
+	// For example, if deployments can be modified via apps/v1 and apps/v1beta1, and a webhook registered a rule of
+	// `apiGroups:["apps"], apiVersions:["v1"], resources: ["deployments"]` and `matchPolicy: Equivalent`,
+	// an API request to apps/v1beta1 deployments would be converted and sent to the webhook
+	// with `resource: {group:"apps", version:"v1", resource:"deployments"}` (matching the resource the webhook registered for),
+	// and `requestResource: {group:"apps", version:"v1beta1", resource:"deployments"}` (indicating the resource of the original API request).
+	requestResource metav1.GroupVersionResource
+
 	// Dynamic client - used for api lookups
 	client dclient.Interface
 
@@ -53,6 +64,17 @@ type PolicyContext struct {
 
 	// informerCacheResolvers - used to get resources from informer cache
 	informerCacheResolvers resolvers.ConfigmapResolver
+
+	// subresource is the subresource being requested, if any (for example, "status" or "scale")
+	subresource string
+
+	// subresourcesInPolicy represents the APIResources that are subresources along with their parent resource.
+	// This is used to determine if a resource is a subresource. It is only used when the policy context is populated
+	// by kyverno CLI. In all other cases when connected to a cluster, this is empty.
+	subresourcesInPolicy []struct {
+		APIResource    metav1.APIResource
+		ParentResource metav1.APIResource
+	}
 }
 
 // Getters
@@ -97,6 +119,12 @@ func (c *PolicyContext) WithAdmissionInfo(admissionInfo kyvernov1beta1.RequestIn
 	return copy
 }
 
+func (c *PolicyContext) WithRequestResource(requestResource metav1.GroupVersionResource) *PolicyContext {
+	copy := c.Copy()
+	copy.requestResource = requestResource
+	return copy
+}
+
 func (c *PolicyContext) WithNewResource(resource unstructured.Unstructured) *PolicyContext {
 	copy := c.Copy()
 	copy.newResource = resource
@@ -147,6 +175,22 @@ func (c *PolicyContext) WithInformerCacheResolver(informerCacheResolver resolver
 	return copy
 }
 
+func (c *PolicyContext) WithSubresource(subresource string) *PolicyContext {
+	copy := c.Copy()
+	copy.subresource = subresource
+	return copy
+}
+
+func (c *PolicyContext) WithSubresourcesInPolicy(subresourcesInPolicy []struct {
+	APIResource    metav1.APIResource
+	ParentResource metav1.APIResource
+},
+) *PolicyContext {
+	copy := c.Copy()
+	copy.subresourcesInPolicy = subresourcesInPolicy
+	return copy
+}
+
 // Constructors
 
 func NewPolicyContextWithJsonContext(jsonContext context.Interface) *PolicyContext {
@@ -181,6 +225,7 @@ func NewPolicyContextFromAdmissionRequest(
 	if err := ctx.AddImageInfos(&newResource); err != nil {
 		return nil, errors.Wrap(err, "failed to add image information to the policy rule context")
 	}
+	requestResource := request.RequestResource.DeepCopy()
 	policyContext := NewPolicyContextWithJsonContext(ctx).
 		WithNewResource(newResource).
 		WithOldResource(oldResource).
@@ -188,7 +233,9 @@ func NewPolicyContextFromAdmissionRequest(
 		WithConfiguration(configuration).
 		WithClient(client).
 		WithAdmissionOperation(true).
-		WithInformerCacheResolver(informerCacheResolver)
+		WithInformerCacheResolver(informerCacheResolver).
+		WithRequestResource(*requestResource).
+		WithSubresource(request.SubResource)
 	return policyContext, nil
 }
 
diff --git a/pkg/engine/response/response.go b/pkg/engine/response/response.go
index 22cb77c1f5..47d777dbc6 100644
--- a/pkg/engine/response/response.go
+++ b/pkg/engine/response/response.go
@@ -7,6 +7,7 @@ import (
 
 	kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
 	"github.com/kyverno/kyverno/pkg/utils/wildcard"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 )
 
@@ -114,6 +115,14 @@ type RuleResponse struct {
 
 	// PatchedTarget is the patched resource for mutate.targets
 	PatchedTarget *unstructured.Unstructured
+
+	// PatchedTargetSubresourceName is the name of the subresource which is patched, empty if the resource patched is
+	// not a subresource.
+	PatchedTargetSubresourceName string
+
+	// PatchedTargetParentResourceGVR is the GVR of the parent resource of the PatchedTarget. This is only populated
+	// when PatchedTarget is a subresource.
+	PatchedTargetParentResourceGVR metav1.GroupVersionResource
 }
 
 // ToString ...
diff --git a/pkg/engine/utils.go b/pkg/engine/utils.go
index ec1a1eea0f..1238deae91 100644
--- a/pkg/engine/utils.go
+++ b/pkg/engine/utils.go
@@ -17,7 +17,8 @@ import (
 	"github.com/kyverno/kyverno/pkg/engine/wildcards"
 	"github.com/kyverno/kyverno/pkg/logging"
 	"github.com/kyverno/kyverno/pkg/utils"
-	wildcard "github.com/kyverno/kyverno/pkg/utils/wildcard"
+	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
+	"github.com/kyverno/kyverno/pkg/utils/wildcard"
 	"github.com/pkg/errors"
 	"golang.org/x/exp/slices"
 	"golang.org/x/text/cases"
@@ -39,32 +40,30 @@ type EngineStats struct {
 	RulesAppliedCount int
 }
 
-func checkKind(kinds []string, resourceKind string, gvk schema.GroupVersionKind) bool {
+func checkKind(subresourceGVKToAPIResource map[string]*metav1.APIResource, kinds []string, gvk schema.GroupVersionKind, subresourceInAdmnReview string) bool {
 	title := cases.Title(language.Und, cases.NoLower)
+	result := false
 	for _, k := range kinds {
-		parts := strings.Split(k, "/")
-		if len(parts) == 1 {
-			if k == "*" || resourceKind == title.String(k) {
-				return true
+		if k != "*" {
+			gv, kind := kubeutils.GetKindFromGVK(k)
+			apiResource, ok := subresourceGVKToAPIResource[k]
+			if ok {
+				result = apiResource.Group == gvk.Group && (apiResource.Version == gvk.Version || strings.Contains(gv, "*")) && apiResource.Kind == gvk.Kind
+			} else { // if the kind is not found in the subresourceGVKToAPIResource, then it is not a subresource
+				result = title.String(kind) == gvk.Kind && subresourceInAdmnReview == ""
+				if gv != "" {
+					result = result && kubeutils.GroupVersionMatches(gv, gvk.GroupVersion().String())
+				}
 			}
+		} else {
+			result = true
 		}
 
-		if len(parts) == 2 {
-			kindParts := strings.SplitN(parts[1], ".", 2)
-			if gvk.Kind == title.String(kindParts[0]) && gvk.Version == parts[0] {
-				return true
-			}
-		}
-
-		if len(parts) == 3 || len(parts) == 4 {
-			kindParts := strings.SplitN(parts[2], ".", 2)
-			if gvk.Group == parts[0] && (gvk.Version == parts[1] || parts[1] == "*") && gvk.Kind == title.String(kindParts[0]) {
-				return true
-			}
+		if result {
+			break
 		}
 	}
-
-	return false
+	return result
 }
 
 func checkName(name, resourceName string) bool {
@@ -142,11 +141,11 @@ func checkSelector(labelSelector *metav1.LabelSelector, resourceLabels map[strin
 // should be: AND across attributes but an OR inside attributes that of type list
 // To filter out the targeted resources with UserInfo, the check
 // should be: OR (across & inside) attributes
-func doesResourceMatchConditionBlock(conditionBlock kyvernov1.ResourceDescription, userInfo kyvernov1.UserInfo, admissionInfo kyvernov1beta1.RequestInfo, resource unstructured.Unstructured, dynamicConfig []string, namespaceLabels map[string]string) []error {
+func doesResourceMatchConditionBlock(subresourceGVKToAPIResource map[string]*metav1.APIResource, conditionBlock kyvernov1.ResourceDescription, userInfo kyvernov1.UserInfo, admissionInfo kyvernov1beta1.RequestInfo, resource unstructured.Unstructured, dynamicConfig []string, namespaceLabels map[string]string, subresourceInAdmnReview string) []error {
 	var errs []error
 
 	if len(conditionBlock.Kinds) > 0 {
-		if !checkKind(conditionBlock.Kinds, resource.GetKind(), resource.GroupVersionKind()) {
+		if !checkKind(subresourceGVKToAPIResource, conditionBlock.Kinds, resource.GroupVersionKind(), subresourceInAdmnReview) {
 			errs = append(errs, fmt.Errorf("kind does not match %v", conditionBlock.Kinds))
 		}
 	}
@@ -282,7 +281,7 @@ func matchSubjects(ruleSubjects []rbacv1.Subject, userInfo authenticationv1.User
 }
 
 // MatchesResourceDescription checks if the resource matches resource description of the rule or not
-func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef kyvernov1.Rule, admissionInfoRef kyvernov1beta1.RequestInfo, dynamicConfig []string, namespaceLabels map[string]string, policyNamespace string) error {
+func MatchesResourceDescription(subresourceGVKToAPIResource map[string]*metav1.APIResource, resourceRef unstructured.Unstructured, ruleRef kyvernov1.Rule, admissionInfoRef kyvernov1beta1.RequestInfo, dynamicConfig []string, namespaceLabels map[string]string, policyNamespace, subresourceInAdmnReview string) error {
 	rule := ruleRef.DeepCopy()
 	resource := *resourceRef.DeepCopy()
 	admissionInfo := *admissionInfoRef.DeepCopy()
@@ -298,7 +297,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
 		oneMatched := false
 		for _, rmr := range rule.MatchResources.Any {
 			// if there are no errors it means it was a match
-			if len(matchesResourceDescriptionMatchHelper(rmr, admissionInfo, resource, dynamicConfig, namespaceLabels)) == 0 {
+			if len(matchesResourceDescriptionMatchHelper(subresourceGVKToAPIResource, rmr, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)) == 0 {
 				oneMatched = true
 				break
 			}
@@ -309,17 +308,17 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
 	} else if len(rule.MatchResources.All) > 0 {
 		// include object if ALL of the criteria match
 		for _, rmr := range rule.MatchResources.All {
-			reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionMatchHelper(rmr, admissionInfo, resource, dynamicConfig, namespaceLabels)...)
+			reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionMatchHelper(subresourceGVKToAPIResource, rmr, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)...)
 		}
 	} else {
 		rmr := kyvernov1.ResourceFilter{UserInfo: rule.MatchResources.UserInfo, ResourceDescription: rule.MatchResources.ResourceDescription}
-		reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionMatchHelper(rmr, admissionInfo, resource, dynamicConfig, namespaceLabels)...)
+		reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionMatchHelper(subresourceGVKToAPIResource, rmr, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)...)
 	}
 
 	if len(rule.ExcludeResources.Any) > 0 {
 		// exclude the object if ANY of the criteria match
 		for _, rer := range rule.ExcludeResources.Any {
-			reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionExcludeHelper(rer, admissionInfo, resource, dynamicConfig, namespaceLabels)...)
+			reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionExcludeHelper(subresourceGVKToAPIResource, rer, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)...)
 		}
 	} else if len(rule.ExcludeResources.All) > 0 {
 		// exclude the object if ALL the criteria match
@@ -327,7 +326,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
 		for _, rer := range rule.ExcludeResources.All {
 			// we got no errors inplying a resource did NOT exclude it
 			// "matchesResourceDescriptionExcludeHelper" returns errors if resource is excluded by a filter
-			if len(matchesResourceDescriptionExcludeHelper(rer, admissionInfo, resource, dynamicConfig, namespaceLabels)) == 0 {
+			if len(matchesResourceDescriptionExcludeHelper(subresourceGVKToAPIResource, rer, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)) == 0 {
 				excludedByAll = false
 				break
 			}
@@ -337,7 +336,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
 		}
 	} else {
 		rer := kyvernov1.ResourceFilter{UserInfo: rule.ExcludeResources.UserInfo, ResourceDescription: rule.ExcludeResources.ResourceDescription}
-		reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionExcludeHelper(rer, admissionInfo, resource, dynamicConfig, namespaceLabels)...)
+		reasonsForFailure = append(reasonsForFailure, matchesResourceDescriptionExcludeHelper(subresourceGVKToAPIResource, rer, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)...)
 	}
 
 	// creating final error
@@ -355,7 +354,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
 	return nil
 }
 
-func matchesResourceDescriptionMatchHelper(rmr kyvernov1.ResourceFilter, admissionInfo kyvernov1beta1.RequestInfo, resource unstructured.Unstructured, dynamicConfig []string, namespaceLabels map[string]string) []error {
+func matchesResourceDescriptionMatchHelper(subresourceGVKToAPIResource map[string]*metav1.APIResource, rmr kyvernov1.ResourceFilter, admissionInfo kyvernov1beta1.RequestInfo, resource unstructured.Unstructured, dynamicConfig []string, namespaceLabels map[string]string, subresourceInAdmnReview string) []error {
 	var errs []error
 	if reflect.DeepEqual(admissionInfo, kyvernov1.RequestInfo{}) {
 		rmr.UserInfo = kyvernov1.UserInfo{}
@@ -364,7 +363,7 @@ func matchesResourceDescriptionMatchHelper(rmr kyvernov1.ResourceFilter, admissi
 	// checking if resource matches the rule
 	if !reflect.DeepEqual(rmr.ResourceDescription, kyvernov1.ResourceDescription{}) ||
 		!reflect.DeepEqual(rmr.UserInfo, kyvernov1.UserInfo{}) {
-		matchErrs := doesResourceMatchConditionBlock(rmr.ResourceDescription, rmr.UserInfo, admissionInfo, resource, dynamicConfig, namespaceLabels)
+		matchErrs := doesResourceMatchConditionBlock(subresourceGVKToAPIResource, rmr.ResourceDescription, rmr.UserInfo, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)
 		errs = append(errs, matchErrs...)
 	} else {
 		errs = append(errs, fmt.Errorf("match cannot be empty"))
@@ -372,12 +371,12 @@ func matchesResourceDescriptionMatchHelper(rmr kyvernov1.ResourceFilter, admissi
 	return errs
 }
 
-func matchesResourceDescriptionExcludeHelper(rer kyvernov1.ResourceFilter, admissionInfo kyvernov1beta1.RequestInfo, resource unstructured.Unstructured, dynamicConfig []string, namespaceLabels map[string]string) []error {
+func matchesResourceDescriptionExcludeHelper(subresourceGVKToAPIResource map[string]*metav1.APIResource, rer kyvernov1.ResourceFilter, admissionInfo kyvernov1beta1.RequestInfo, resource unstructured.Unstructured, dynamicConfig []string, namespaceLabels map[string]string, subresourceInAdmnReview string) []error {
 	var errs []error
 	// checking if resource matches the rule
 	if !reflect.DeepEqual(rer.ResourceDescription, kyvernov1.ResourceDescription{}) ||
 		!reflect.DeepEqual(rer.UserInfo, kyvernov1.UserInfo{}) {
-		excludeErrs := doesResourceMatchConditionBlock(rer.ResourceDescription, rer.UserInfo, admissionInfo, resource, dynamicConfig, namespaceLabels)
+		excludeErrs := doesResourceMatchConditionBlock(subresourceGVKToAPIResource, rer.ResourceDescription, rer.UserInfo, admissionInfo, resource, dynamicConfig, namespaceLabels, subresourceInAdmnReview)
 		// it was a match so we want to exclude it
 		if len(excludeErrs) == 0 {
 			errs = append(errs, fmt.Errorf("resource excluded since one of the criteria excluded it"))
@@ -456,10 +455,20 @@ func evaluateList(jmesPath string, ctx context.EvalInterface) ([]interface{}, er
 
 func ruleError(rule *kyvernov1.Rule, ruleType response.RuleType, msg string, err error) *response.RuleResponse {
 	msg = fmt.Sprintf("%s: %s", msg, err.Error())
-	return ruleResponse(*rule, ruleType, msg, response.RuleStatusError, nil)
+	return ruleResponse(*rule, ruleType, msg, response.RuleStatusError)
 }
 
-func ruleResponse(rule kyvernov1.Rule, ruleType response.RuleType, msg string, status response.RuleStatus, patchedResource *unstructured.Unstructured) *response.RuleResponse {
+func ruleResponse(rule kyvernov1.Rule, ruleType response.RuleType, msg string, status response.RuleStatus) *response.RuleResponse {
+	resp := &response.RuleResponse{
+		Name:    rule.Name,
+		Type:    ruleType,
+		Message: msg,
+		Status:  status,
+	}
+	return resp
+}
+
+func ruleResponseWithPatchedTarget(rule kyvernov1.Rule, ruleType response.RuleType, msg string, status response.RuleStatus, patchedResource *unstructured.Unstructured, patchedSubresourceName string, parentResourceGVR metav1.GroupVersionResource) *response.RuleResponse {
 	resp := &response.RuleResponse{
 		Name:    rule.Name,
 		Type:    ruleType,
@@ -469,6 +478,8 @@ func ruleResponse(rule kyvernov1.Rule, ruleType response.RuleType, msg string, s
 
 	if rule.Mutation.Targets != nil {
 		resp.PatchedTarget = patchedResource
+		resp.PatchedTargetSubresourceName = patchedSubresourceName
+		resp.PatchedTargetParentResourceGVR = parentResourceGVR
 	}
 	return resp
 }
diff --git a/pkg/engine/utils_test.go b/pkg/engine/utils_test.go
index e8e466f1e8..4a7472324a 100644
--- a/pkg/engine/utils_test.go
+++ b/pkg/engine/utils_test.go
@@ -7,7 +7,7 @@ import (
 	"k8s.io/apimachinery/pkg/runtime/schema"
 
 	v1 "github.com/kyverno/kyverno/api/kyverno/v1"
-	v1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
+	"github.com/kyverno/kyverno/api/kyverno/v1beta1"
 	"github.com/kyverno/kyverno/pkg/autogen"
 	"github.com/kyverno/kyverno/pkg/engine/utils"
 	"gotest.tools/assert"
@@ -905,7 +905,7 @@ func TestMatchesResourceDescription(t *testing.T) {
 		resource, _ := utils.ConvertToUnstructured(tc.Resource)
 
 		for _, rule := range autogen.ComputeRules(&policy) {
-			err := MatchesResourceDescription(*resource, rule, tc.AdmissionInfo, []string{}, nil, "")
+			err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, tc.AdmissionInfo, []string{}, nil, "", "")
 			if err != nil {
 				if !tc.areErrorsExpected {
 					t.Errorf("Testcase %d Unexpected error: %v\nmsg: %s", i+1, err, tc.Description)
@@ -1810,7 +1810,7 @@ func TestMatchesResourceDescription_GenerateName(t *testing.T) {
 		resource, _ := utils.ConvertToUnstructured(tc.Resource)
 
 		for _, rule := range autogen.ComputeRules(&policy) {
-			err := MatchesResourceDescription(*resource, rule, tc.AdmissionInfo, []string{}, nil, "")
+			err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, tc.AdmissionInfo, []string{}, nil, "", "")
 			if err != nil {
 				if !tc.areErrorsExpected {
 					t.Errorf("Testcase %d Unexpected error: %v\nmsg: %s", i+1, err, tc.Description)
@@ -1878,7 +1878,7 @@ func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 
@@ -1939,7 +1939,7 @@ func TestResourceDescriptionMatch_Name(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 }
@@ -1998,7 +1998,7 @@ func TestResourceDescriptionMatch_GenerateName(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 }
@@ -2058,7 +2058,7 @@ func TestResourceDescriptionMatch_Name_Regex(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 }
@@ -2117,7 +2117,7 @@ func TestResourceDescriptionMatch_GenerateName_Regex(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 }
@@ -2185,7 +2185,7 @@ func TestResourceDescriptionMatch_Label_Expression_NotMatch(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 }
@@ -2254,7 +2254,7 @@ func TestResourceDescriptionMatch_Label_Expression_Match(t *testing.T) {
 	}
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err != nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err != nil {
 		t.Errorf("Testcase has failed due to the following:%v", err)
 	}
 }
@@ -2334,7 +2334,7 @@ func TestResourceDescriptionExclude_Label_Expression_Match(t *testing.T) {
 	rule := v1.Rule{MatchResources: v1.MatchResources{ResourceDescription: resourceDescription},
 		ExcludeResources: v1.MatchResources{ResourceDescription: resourceDescriptionExclude}}
 
-	if err := MatchesResourceDescription(*resource, rule, v1beta1.RequestInfo{}, []string{}, nil, ""); err == nil {
+	if err := MatchesResourceDescription(make(map[string]*metav1.APIResource), *resource, rule, v1beta1.RequestInfo{}, []string{}, nil, "", ""); err == nil {
 		t.Errorf("Testcase has failed due to the following:\n Function has returned no error, even though it was supposed to fail")
 	}
 }
@@ -2480,21 +2480,66 @@ func TestManagedPodResource(t *testing.T) {
 }
 
 func Test_checkKind(t *testing.T) {
-	match := checkKind([]string{"*"}, "Deployment", schema.GroupVersionKind{Kind: "Deployment", Group: "", Version: "v1"})
+	subresourceGVKToAPIResource := make(map[string]*metav1.APIResource)
+	match := checkKind(subresourceGVKToAPIResource, []string{"*"}, schema.GroupVersionKind{Kind: "Deployment", Group: "", Version: "v1"}, "")
 	assert.Equal(t, match, true)
 
-	match = checkKind([]string{"Pod"}, "Pod", schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"})
+	match = checkKind(subresourceGVKToAPIResource, []string{"Pod"}, schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"}, "")
 	assert.Equal(t, match, true)
 
-	match = checkKind([]string{"v1/Pod"}, "Pod", schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"})
+	match = checkKind(subresourceGVKToAPIResource, []string{"v1/Pod"}, schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"}, "")
 	assert.Equal(t, match, true)
 
-	match = checkKind([]string{"tekton.dev/v1beta1/TaskRun"}, "TaskRun", schema.GroupVersionKind{Kind: "TaskRun", Group: "tekton.dev", Version: "v1beta1"})
+	match = checkKind(subresourceGVKToAPIResource, []string{"tekton.dev/v1beta1/TaskRun"}, schema.GroupVersionKind{Kind: "TaskRun", Group: "tekton.dev", Version: "v1beta1"}, "")
 	assert.Equal(t, match, true)
 
-	match = checkKind([]string{"tekton.dev/v1beta1/TaskRun/status"}, "TaskRun", schema.GroupVersionKind{Kind: "TaskRun", Group: "tekton.dev", Version: "v1beta1"})
+	match = checkKind(subresourceGVKToAPIResource, []string{"tekton.dev/*/TaskRun"}, schema.GroupVersionKind{Kind: "TaskRun", Group: "tekton.dev", Version: "v1alpha1"}, "")
 	assert.Equal(t, match, true)
 
-	match = checkKind([]string{"v1/pod.status"}, "Pod", schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"})
+	// Though both 'pods', 'pods/status' have same kind i.e. 'Pod' but they are different resources, 'subresourceInAdmnReview' is used in determining that.
+	match = checkKind(subresourceGVKToAPIResource, []string{"v1/Pod"}, schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"}, "status")
+	assert.Equal(t, match, false)
+
+	// Though both 'pods', 'pods/status' have same kind i.e. 'Pod' but they are different resources, 'subresourceInAdmnReview' is used in determining that.
+	match = checkKind(subresourceGVKToAPIResource, []string{"v1/Pod"}, schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"}, "ephemeralcontainers")
+	assert.Equal(t, match, false)
+
+	subresourceGVKToAPIResource["networking.k8s.io/v1/NetworkPolicy/status"] = &metav1.APIResource{
+		Name:         "networkpolicies/status",
+		SingularName: "",
+		Namespaced:   true,
+		Kind:         "NetworkPolicy",
+		Group:        "networking.k8s.io",
+		Version:      "v1",
+	}
+
+	subresourceGVKToAPIResource["v1/Pod.status"] = &metav1.APIResource{
+		Name:         "pods/status",
+		SingularName: "",
+		Namespaced:   true,
+		Kind:         "Pod",
+		Group:        "",
+		Version:      "v1",
+	}
+
+	subresourceGVKToAPIResource["*/Pod.eviction"] = &metav1.APIResource{
+		Name:         "pods/eviction",
+		SingularName: "",
+		Namespaced:   true,
+		Kind:         "Eviction",
+		Group:        "policy",
+		Version:      "v1",
+	}
+
+	match = checkKind(subresourceGVKToAPIResource, []string{"networking.k8s.io/v1/NetworkPolicy/status"}, schema.GroupVersionKind{Kind: "NetworkPolicy", Group: "networking.k8s.io", Version: "v1"}, "status")
 	assert.Equal(t, match, true)
+
+	match = checkKind(subresourceGVKToAPIResource, []string{"v1/Pod.status"}, schema.GroupVersionKind{Kind: "Pod", Group: "", Version: "v1"}, "status")
+	assert.Equal(t, match, true)
+
+	match = checkKind(subresourceGVKToAPIResource, []string{"*/Pod.eviction"}, schema.GroupVersionKind{Kind: "Eviction", Group: "policy", Version: "v1"}, "eviction")
+	assert.Equal(t, match, true)
+
+	match = checkKind(subresourceGVKToAPIResource, []string{"v1alpha1/Pod.eviction"}, schema.GroupVersionKind{Kind: "Eviction", Group: "policy", Version: "v1"}, "eviction")
+	assert.Equal(t, match, false)
 }
diff --git a/pkg/engine/validation.go b/pkg/engine/validation.go
index 2d857b02a0..c202eaf3f7 100644
--- a/pkg/engine/validation.go
+++ b/pkg/engine/validation.go
@@ -14,7 +14,6 @@ import (
 	"github.com/kyverno/kyverno/cmd/cli/kubectl-kyverno/utils/store"
 	"github.com/kyverno/kyverno/pkg/autogen"
 	"github.com/kyverno/kyverno/pkg/engine/common"
-	enginecontext "github.com/kyverno/kyverno/pkg/engine/context"
 	"github.com/kyverno/kyverno/pkg/engine/response"
 	"github.com/kyverno/kyverno/pkg/engine/validate"
 	"github.com/kyverno/kyverno/pkg/engine/variables"
@@ -148,22 +147,6 @@ func validateResource(ctx context.Context, log logr.Logger, rclient registryclie
 	return resp
 }
 
-func validateOldObject(ctx context.Context, log logr.Logger, rclient registryclient.Client, enginectx *PolicyContext, rule *kyvernov1.Rule) (*response.RuleResponse, error) {
-	ctxCopy := enginectx.Copy()
-	ctxCopy.newResource = *ctxCopy.oldResource.DeepCopy()
-	ctxCopy.oldResource = unstructured.Unstructured{}
-
-	if err := enginecontext.ReplaceResource(ctxCopy.jsonContext, ctxCopy.newResource.Object); err != nil {
-		return nil, errors.Wrapf(err, "failed to replace object in the JSON context")
-	}
-
-	if err := enginecontext.ReplaceOldResource(ctxCopy.jsonContext, ctxCopy.oldResource.Object); err != nil {
-		return nil, errors.Wrapf(err, "failed to replace old object in the JSON context")
-	}
-
-	return processValidationRule(ctx, log, rclient, ctxCopy, rule), nil
-}
-
 func processValidationRule(ctx context.Context, log logr.Logger, rclient registryclient.Client, enginectx *PolicyContext, rule *kyvernov1.Rule) *response.RuleResponse {
 	v := newValidator(log, rclient, enginectx, rule)
 	if rule.Validation.ForEachValidation != nil {
@@ -247,7 +230,7 @@ func (v *validator) validate(ctx context.Context) *response.RuleResponse {
 	}
 
 	if !preconditionsPassed {
-		return ruleResponse(*v.rule, response.Validation, "preconditions not met", response.RuleStatusSkip, nil)
+		return ruleResponse(*v.rule, response.Validation, "preconditions not met", response.RuleStatusSkip)
 	}
 
 	if v.deny != nil {
@@ -260,17 +243,6 @@ func (v *validator) validate(ctx context.Context) *response.RuleResponse {
 		}
 
 		ruleResponse := v.validateResourceWithRule()
-		if isUpdateRequest(v.ctx) {
-			priorResp, err := validateOldObject(ctx, v.log, v.rclient, v.ctx, v.rule)
-			if err != nil {
-				return ruleError(v.rule, response.Validation, "failed to validate old object", err)
-			}
-
-			if isSameRuleResponse(ruleResponse, priorResp) {
-				v.log.V(3).Info("skipping modified resource as validation results have not changed")
-				return nil
-			}
-		}
 
 		return ruleResponse
 	}
@@ -295,7 +267,7 @@ func (v *validator) validateForEach(ctx context.Context) *response.RuleResponse
 	if err != nil {
 		return ruleError(v.rule, response.Validation, "failed to evaluate preconditions", err)
 	} else if !preconditionsPassed {
-		return ruleResponse(*v.rule, response.Validation, "preconditions not met", response.RuleStatusSkip, nil)
+		return ruleResponse(*v.rule, response.Validation, "preconditions not met", response.RuleStatusSkip)
 	}
 
 	foreachList := v.rule.Validation.ForEachValidation
@@ -319,10 +291,10 @@ func (v *validator) validateForEach(ctx context.Context) *response.RuleResponse
 	}
 
 	if applyCount == 0 {
-		return ruleResponse(*v.rule, response.Validation, "rule skipped", response.RuleStatusSkip, nil)
+		return ruleResponse(*v.rule, response.Validation, "rule skipped", response.RuleStatusSkip)
 	}
 
-	return ruleResponse(*v.rule, response.Validation, "rule passed", response.RuleStatusPass, nil)
+	return ruleResponse(*v.rule, response.Validation, "rule passed", response.RuleStatusPass)
 }
 
 func (v *validator) validateElements(ctx context.Context, foreach kyvernov1.ForEachValidation, elements []interface{}, elementScope *bool) (*response.RuleResponse, int) {
@@ -357,16 +329,16 @@ func (v *validator) validateElements(ctx context.Context, foreach kyvernov1.ForE
 					continue
 				}
 				msg := fmt.Sprintf("validation failure: %v", r.Message)
-				return ruleResponse(*v.rule, response.Validation, msg, r.Status, nil), applyCount
+				return ruleResponse(*v.rule, response.Validation, msg, r.Status), applyCount
 			}
 			msg := fmt.Sprintf("validation failure: %v", r.Message)
-			return ruleResponse(*v.rule, response.Validation, msg, r.Status, nil), applyCount
+			return ruleResponse(*v.rule, response.Validation, msg, r.Status), applyCount
 		}
 
 		applyCount++
 	}
 
-	return ruleResponse(*v.rule, response.Validation, "", response.RuleStatusPass, nil), applyCount
+	return ruleResponse(*v.rule, response.Validation, "", response.RuleStatusPass), applyCount
 }
 
 func addElementToContext(ctx *PolicyContext, e interface{}, elementIndex int, elementScope *bool) error {
@@ -434,10 +406,10 @@ func (v *validator) validateDeny() *response.RuleResponse {
 
 	deny := variables.EvaluateConditions(v.log, v.ctx.jsonContext, denyConditions)
 	if deny {
-		return ruleResponse(*v.rule, response.Validation, v.getDenyMessage(deny), response.RuleStatusFail, nil)
+		return ruleResponse(*v.rule, response.Validation, v.getDenyMessage(deny), response.RuleStatusFail)
 	}
 
-	return ruleResponse(*v.rule, response.Validation, v.getDenyMessage(deny), response.RuleStatusPass, nil)
+	return ruleResponse(*v.rule, response.Validation, v.getDenyMessage(deny), response.RuleStatusPass)
 }
 
 func (v *validator) getDenyMessage(deny bool) string {
@@ -528,10 +500,10 @@ func (v *validator) validatePodSecurity() *response.RuleResponse {
 	}
 	if allowed {
 		msg := fmt.Sprintf("Validation rule '%s' passed.", v.rule.Name)
-		return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusPass, nil)
+		return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusPass)
 	} else {
 		msg := fmt.Sprintf(`Validation rule '%s' failed. It violates PodSecurity "%s:%s": %s`, v.rule.Name, v.podSecurity.Level, v.podSecurity.Version, pss.FormatChecksPrint(pssChecks))
-		return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusFail, nil)
+		return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusFail)
 	}
 }
 
@@ -554,11 +526,6 @@ func isDeleteRequest(ctx *PolicyContext) bool {
 	return isEmptyUnstructured(&ctx.newResource)
 }
 
-func isUpdateRequest(ctx *PolicyContext) bool {
-	// is the OldObject and NewObject are available, the request is an UPDATE
-	return !isEmptyUnstructured(&ctx.oldResource) && !isEmptyUnstructured(&ctx.newResource)
-}
-
 func isEmptyUnstructured(u *unstructured.Unstructured) bool {
 	if u == nil {
 		return true
@@ -573,13 +540,16 @@ func isEmptyUnstructured(u *unstructured.Unstructured) bool {
 
 // matches checks if either the new or old resource satisfies the filter conditions defined in the rule
 func matches(logger logr.Logger, rule *kyvernov1.Rule, ctx *PolicyContext) bool {
-	err := MatchesResourceDescription(ctx.newResource, *rule, ctx.admissionInfo, ctx.excludeGroupRole, ctx.namespaceLabels, "")
+	kindsInPolicy := append(rule.MatchResources.GetKinds(), rule.ExcludeResources.GetKinds()...)
+	subresourceGVKToAPIResource := GetSubresourceGVKToAPIResourceMap(kindsInPolicy, ctx)
+
+	err := MatchesResourceDescription(subresourceGVKToAPIResource, ctx.newResource, *rule, ctx.admissionInfo, ctx.excludeGroupRole, ctx.namespaceLabels, "", ctx.subresource)
 	if err == nil {
 		return true
 	}
 
-	if !reflect.DeepEqual(ctx.oldResource, unstructured.Unstructured{}) {
-		err := MatchesResourceDescription(ctx.oldResource, *rule, ctx.admissionInfo, ctx.excludeGroupRole, ctx.namespaceLabels, "")
+	if !reflect.DeepEqual(ctx.OldResource, unstructured.Unstructured{}) {
+		err := MatchesResourceDescription(subresourceGVKToAPIResource, ctx.oldResource, *rule, ctx.admissionInfo, ctx.excludeGroupRole, ctx.namespaceLabels, "", ctx.subresource)
 		if err == nil {
 			return true
 		}
@@ -589,26 +559,6 @@ func matches(logger logr.Logger, rule *kyvernov1.Rule, ctx *PolicyContext) bool
 	return false
 }
 
-func isSameRuleResponse(r1 *response.RuleResponse, r2 *response.RuleResponse) bool {
-	if r1.Name != r2.Name {
-		return false
-	}
-
-	if r1.Type != r2.Type {
-		return false
-	}
-
-	if r1.Message != r2.Message {
-		return false
-	}
-
-	if r1.Status != r2.Status {
-		return false
-	}
-
-	return true
-}
-
 // validatePatterns validate pattern and anyPattern
 func (v *validator) validatePatterns(resource unstructured.Unstructured) *response.RuleResponse {
 	if v.pattern != nil {
@@ -618,22 +568,22 @@ func (v *validator) validatePatterns(resource unstructured.Unstructured) *respon
 				v.log.V(3).Info("validation error", "path", pe.Path, "error", err.Error())
 
 				if pe.Skip {
-					return ruleResponse(*v.rule, response.Validation, pe.Error(), response.RuleStatusSkip, nil)
+					return ruleResponse(*v.rule, response.Validation, pe.Error(), response.RuleStatusSkip)
 				}
 
 				if pe.Path == "" {
-					return ruleResponse(*v.rule, response.Validation, v.buildErrorMessage(err, ""), response.RuleStatusError, nil)
+					return ruleResponse(*v.rule, response.Validation, v.buildErrorMessage(err, ""), response.RuleStatusError)
 				}
 
-				return ruleResponse(*v.rule, response.Validation, v.buildErrorMessage(err, pe.Path), response.RuleStatusFail, nil)
+				return ruleResponse(*v.rule, response.Validation, v.buildErrorMessage(err, pe.Path), response.RuleStatusFail)
 			}
 
-			return ruleResponse(*v.rule, response.Validation, v.buildErrorMessage(err, pe.Path), response.RuleStatusError, nil)
+			return ruleResponse(*v.rule, response.Validation, v.buildErrorMessage(err, pe.Path), response.RuleStatusError)
 		}
 
 		v.log.V(4).Info("successfully processed rule")
 		msg := fmt.Sprintf("validation rule '%s' passed.", v.rule.Name)
-		return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusPass, nil)
+		return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusPass)
 	}
 
 	if v.anyPattern != nil {
@@ -644,14 +594,14 @@ func (v *validator) validatePatterns(resource unstructured.Unstructured) *respon
 		anyPatterns, err := deserializeAnyPattern(v.anyPattern)
 		if err != nil {
 			msg := fmt.Sprintf("failed to deserialize anyPattern, expected type array: %v", err)
-			return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusError, nil)
+			return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusError)
 		}
 
 		for idx, pattern := range anyPatterns {
 			err := validate.MatchPattern(v.log, resource.Object, pattern)
 			if err == nil {
 				msg := fmt.Sprintf("validation rule '%s' anyPattern[%d] passed.", v.rule.Name, idx)
-				return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusPass, nil)
+				return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusPass)
 			}
 
 			if pe, ok := err.(*validate.PatternError); ok {
@@ -680,7 +630,7 @@ func (v *validator) validatePatterns(resource unstructured.Unstructured) *respon
 			}
 
 			v.log.V(4).Info(fmt.Sprintf("Validation rule '%s' skipped. %s", v.rule.Name, errorStr))
-			return ruleResponse(*v.rule, response.Validation, strings.Join(errorStr, " "), response.RuleStatusSkip, nil)
+			return ruleResponse(*v.rule, response.Validation, strings.Join(errorStr, " "), response.RuleStatusSkip)
 		} else if len(failedAnyPatternsErrors) > 0 {
 			var errorStr []string
 			for _, err := range failedAnyPatternsErrors {
@@ -689,11 +639,11 @@ func (v *validator) validatePatterns(resource unstructured.Unstructured) *respon
 
 			v.log.V(4).Info(fmt.Sprintf("Validation rule '%s' failed. %s", v.rule.Name, errorStr))
 			msg := buildAnyPatternErrorMessage(v.rule, errorStr)
-			return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusFail, nil)
+			return ruleResponse(*v.rule, response.Validation, msg, response.RuleStatusFail)
 		}
 	}
 
-	return ruleResponse(*v.rule, response.Validation, v.rule.Validation.Message, response.RuleStatusPass, nil)
+	return ruleResponse(*v.rule, response.Validation, v.rule.Validation.Message, response.RuleStatusPass)
 }
 
 func deserializeAnyPattern(anyPattern apiextensions.JSON) ([]interface{}, error) {
diff --git a/pkg/policy/common.go b/pkg/policy/common.go
index b4528d4a63..7798d0b5d9 100644
--- a/pkg/policy/common.go
+++ b/pkg/policy/common.go
@@ -11,7 +11,7 @@ import (
 	"github.com/kyverno/kyverno/pkg/logging"
 	"github.com/kyverno/kyverno/pkg/utils"
 	kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
-	wildcard "github.com/kyverno/kyverno/pkg/utils/wildcard"
+	"github.com/kyverno/kyverno/pkg/utils/wildcard"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/labels"
diff --git a/pkg/policy/existing.go b/pkg/policy/existing.go
index e59c2cc2f3..23603e671a 100644
--- a/pkg/policy/existing.go
+++ b/pkg/policy/existing.go
@@ -209,7 +209,7 @@ func (pc *PolicyController) processExistingKinds(kinds []string, policy kyvernov
 		if err != nil {
 			gv, k := kubeutils.GetKindFromGVK(kind)
 			if !strings.Contains(k, "*") {
-				resourceSchema, _, err := pc.client.Discovery().FindResource(gv, k)
+				resourceSchema, _, _, err := pc.client.Discovery().FindResource(gv, k)
 				if err != nil {
 					logger.Error(err, "failed to find resource", "kind", k)
 					continue
diff --git a/pkg/policy/validate.go b/pkg/policy/validate.go
index 3b14237570..2c14bd6776 100644
--- a/pkg/policy/validate.go
+++ b/pkg/policy/validate.go
@@ -261,13 +261,23 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 			}
 		}
 
-		podOnlyMap := make(map[string]bool) // Validate that Kind is only Pod
-		podOnlyMap["Pod"] = true
-		if reflect.DeepEqual(common.GetKindsFromRule(rule), podOnlyMap) && podControllerAutoGenExclusion(policy) {
-			msg := "Policies that match Pods apply to all Pods including those created and managed by controllers " +
-				"excluded from autogen. Use preconditions to exclude the Pods managed by controllers which are " +
-				"excluded from autogen. Refer to https://kyverno.io/docs/writing-policies/autogen/ for details."
-			warnings = append(warnings, msg)
+		kindsFromRule := rule.MatchResources.GetKinds()
+		resourceTypesMap := make(map[string]bool)
+		for _, kind := range kindsFromRule {
+			_, k := kubeutils.GetKindFromGVK(kind)
+			k, _ = kubeutils.SplitSubresource(k)
+			resourceTypesMap[k] = true
+		}
+		if len(resourceTypesMap) == 1 {
+			for k := range resourceTypesMap {
+				if k == "Pod" && podControllerAutoGenExclusion(policy) {
+					msg := "Policies that match Pods apply to all Pods including those created and managed by controllers " +
+						"excluded from autogen. Use preconditions to exclude the Pods managed by controllers which are " +
+						"excluded from autogen. Refer to https://kyverno.io/docs/writing-policies/autogen/ for details."
+
+					warnings = append(warnings, msg)
+				}
+			}
 		}
 
 		// Validate Kind with match resource kinds
@@ -279,7 +289,7 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 				return warnings, wildcardErr
 			}
 			if !slices.Contains(value.ResourceDescription.Kinds, "*") {
-				err := validateKinds(value.ResourceDescription.Kinds, mock, client, policy)
+				err := validateKinds(value.ResourceDescription.Kinds, mock, background, rule.HasValidate(), client)
 				if err != nil {
 					return warnings, errors.Wrapf(err, "the kind defined in the any match resource is invalid")
 				}
@@ -291,7 +301,7 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 				return warnings, wildcardErr
 			}
 			if !slices.Contains(value.ResourceDescription.Kinds, "*") {
-				err := validateKinds(value.ResourceDescription.Kinds, mock, client, policy)
+				err := validateKinds(value.ResourceDescription.Kinds, mock, background, rule.HasValidate(), client)
 				if err != nil {
 					return warnings, errors.Wrapf(err, "the kind defined in the all match resource is invalid")
 				}
@@ -303,7 +313,7 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 				return warnings, wildcardErr
 			}
 			if !slices.Contains(value.ResourceDescription.Kinds, "*") {
-				err := validateKinds(value.ResourceDescription.Kinds, mock, client, policy)
+				err := validateKinds(value.ResourceDescription.Kinds, mock, background, rule.HasValidate(), client)
 				if err != nil {
 					return warnings, errors.Wrapf(err, "the kind defined in the any exclude resource is invalid")
 				}
@@ -315,7 +325,7 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 				return warnings, wildcardErr
 			}
 			if !slices.Contains(value.ResourceDescription.Kinds, "*") {
-				err := validateKinds(value.ResourceDescription.Kinds, mock, client, policy)
+				err := validateKinds(value.ResourceDescription.Kinds, mock, background, rule.HasValidate(), client)
 				if err != nil {
 					return warnings, errors.Wrapf(err, "the kind defined in the all exclude resource is invalid")
 				}
@@ -323,11 +333,11 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 		}
 
 		if !slices.Contains(rule.MatchResources.Kinds, "*") {
-			err := validateKinds(rule.MatchResources.Kinds, mock, client, policy)
+			err := validateKinds(rule.MatchResources.Kinds, mock, background, rule.HasValidate(), client)
 			if err != nil {
 				return warnings, errors.Wrapf(err, "match resource kind is invalid")
 			}
-			err = validateKinds(rule.ExcludeResources.Kinds, mock, client, policy)
+			err = validateKinds(rule.ExcludeResources.Kinds, mock, background, rule.HasValidate(), client)
 			if err != nil {
 				return warnings, errors.Wrapf(err, "exclude resource kind is invalid")
 			}
@@ -383,6 +393,35 @@ func Validate(policy kyvernov1.PolicyInterface, client dclient.Interface, mock b
 				}
 			}
 		}
+
+		matchKinds := match.GetKinds()
+		excludeKinds := exclude.GetKinds()
+		allKinds := make([]string, 0, len(matchKinds)+len(excludeKinds))
+		allKinds = append(allKinds, matchKinds...)
+		allKinds = append(allKinds, excludeKinds...)
+		if rule.HasValidate() {
+			validationJson, err := json.Marshal(rule.Validation)
+			if err != nil {
+				return nil, err
+			}
+			checkForScaleSubresource(validationJson, allKinds, &warnings)
+			checkForStatusSubresource(validationJson, allKinds, &warnings)
+			checkForEphemeralContainersSubresource(validationJson, allKinds, &warnings)
+		}
+
+		if rule.HasMutate() {
+			mutationJson, err := json.Marshal(rule.Mutation)
+			targets := rule.Mutation.Targets
+			for _, target := range targets {
+				allKinds = append(allKinds, target.GetKind())
+			}
+			if err != nil {
+				return nil, err
+			}
+			checkForScaleSubresource(mutationJson, allKinds, &warnings)
+			checkForStatusSubresource(mutationJson, allKinds, &warnings)
+			checkForEphemeralContainersSubresource(mutationJson, allKinds, &warnings)
+		}
 	}
 	if !mock && (spec.SchemaValidation == nil || *spec.SchemaValidation) {
 		if err := openApiManager.ValidatePolicyMutation(policy); err != nil {
@@ -1209,16 +1248,20 @@ func validateWildcard(kinds []string, spec *kyvernov1.Spec, rule kyvernov1.Rule)
 }
 
 // validateKinds verifies if an API resource that matches 'kind' is valid kind
-// and found in the cache, returns error if not found
-func validateKinds(kinds []string, mock bool, client dclient.Interface, p kyvernov1.PolicyInterface) error {
+// and found in the cache, returns error if not found. It also returns an error if background scanning
+// is enabled for a subresource.
+func validateKinds(kinds []string, mock, backgroundScanningEnabled, isValidationPolicy bool, client dclient.Interface) error {
 	for _, kind := range kinds {
-		gv, k := kubeutils.GetKindFromGVK(kind)
-
-		if !mock && !kubeutils.SkipSubResources(k) && !strings.Contains(kind, "*") {
-			_, _, err := client.Discovery().FindResource(gv, k)
+		if !mock && !strings.Contains(kind, "*") {
+			gv, k := kubeutils.GetKindFromGVK(kind)
+			_, _, gvr, err := client.Discovery().FindResource(gv, k)
 			if err != nil {
 				return fmt.Errorf("unable to convert GVK to GVR for kinds %s, err: %s", k, err)
 			}
+			_, subresource := kubeutils.SplitSubresource(gvr.Resource)
+			if subresource != "" && isValidationPolicy && backgroundScanningEnabled {
+				return fmt.Errorf("background scan enabled with subresource %s", subresource)
+			}
 		}
 	}
 	return nil
@@ -1280,3 +1323,39 @@ func validateNamespaces(s *kyvernov1.Spec, path *field.Path) error {
 
 	return nil
 }
+
+func checkForScaleSubresource(ruleTypeJson []byte, allKinds []string, warnings *[]string) {
+	if strings.Contains(string(ruleTypeJson), "replicas") {
+		for _, kind := range allKinds {
+			if strings.Contains(strings.ToLower(kind), "scale") {
+				return
+			}
+		}
+		msg := "You are matching on replicas but not including the scale subresource in the policy."
+		*warnings = append(*warnings, msg)
+	}
+}
+
+func checkForStatusSubresource(ruleTypeJson []byte, allKinds []string, warnings *[]string) {
+	if strings.Contains(string(ruleTypeJson), "status") {
+		for _, kind := range allKinds {
+			if strings.Contains(strings.ToLower(kind), "status") {
+				return
+			}
+		}
+		msg := "You are matching on status but not including the status subresource in the policy."
+		*warnings = append(*warnings, msg)
+	}
+}
+
+func checkForEphemeralContainersSubresource(ruleTypeJson []byte, allKinds []string, warnings *[]string) {
+	if strings.Contains(string(ruleTypeJson), "ephemeralcontainers") {
+		for _, kind := range allKinds {
+			if strings.Contains(strings.ToLower(kind), "ephemeralcontainers") {
+				return
+			}
+		}
+		msg := "You are matching on ephemeralcontainers but not including the ephemeralcontainers subresource in the policy."
+		*warnings = append(*warnings, msg)
+	}
+}
diff --git a/pkg/policycache/cache.go b/pkg/policycache/cache.go
index 0318cd2416..435e42fa81 100644
--- a/pkg/policycache/cache.go
+++ b/pkg/policycache/cache.go
@@ -8,7 +8,7 @@ import (
 // Cache get method use for to get policy names and mostly use to test cache testcases
 type Cache interface {
 	// Set inserts a policy in the cache
-	Set(string, kyvernov1.PolicyInterface)
+	Set(string, kyvernov1.PolicyInterface, map[string]string)
 	// Unset removes a policy from the cache
 	Unset(string)
 	// GetPolicies returns all policies that apply to a namespace, including cluster-wide policies
@@ -27,8 +27,8 @@ func NewCache() Cache {
 	}
 }
 
-func (c *cache) Set(key string, policy kyvernov1.PolicyInterface) {
-	c.store.set(key, policy)
+func (c *cache) Set(key string, policy kyvernov1.PolicyInterface, subresourceGVKToKind map[string]string) {
+	c.store.set(key, policy, subresourceGVKToKind)
 }
 
 func (c *cache) Unset(key string) {
diff --git a/pkg/policycache/cache_test.go b/pkg/policycache/cache_test.go
index c57611aab2..27eda85ac8 100644
--- a/pkg/policycache/cache_test.go
+++ b/pkg/policycache/cache_test.go
@@ -12,7 +12,7 @@ import (
 
 func setPolicy(store store, policy kyvernov1.PolicyInterface) {
 	key, _ := kubecache.MetaNamespaceKeyFunc(policy)
-	store.set(key, policy)
+	store.set(key, policy, make(map[string]string))
 }
 
 func unsetPolicy(store store, policy kyvernov1.PolicyInterface) {
@@ -1164,7 +1164,7 @@ func Test_Get_Policies(t *testing.T) {
 	cache := NewCache()
 	policy := newPolicy(t)
 	key, _ := kubecache.MetaNamespaceKeyFunc(policy)
-	cache.Set(key, policy)
+	cache.Set(key, policy, make(map[string]string))
 
 	validateAudit := cache.GetPolicies(ValidateAudit, "Namespace", "")
 	if len(validateAudit) != 0 {
@@ -1197,7 +1197,7 @@ func Test_Get_Policies_Ns(t *testing.T) {
 	cache := NewCache()
 	policy := newNsPolicy(t)
 	key, _ := kubecache.MetaNamespaceKeyFunc(policy)
-	cache.Set(key, policy)
+	cache.Set(key, policy, make(map[string]string))
 	nspace := policy.GetNamespace()
 
 	validateAudit := cache.GetPolicies(ValidateAudit, "Pod", nspace)
@@ -1226,9 +1226,9 @@ func Test_Get_Policies_Validate_Failure_Action_Overrides(t *testing.T) {
 	policy1 := newValidateAuditPolicy(t)
 	policy2 := newValidateEnforcePolicy(t)
 	key1, _ := kubecache.MetaNamespaceKeyFunc(policy1)
-	cache.Set(key1, policy1)
+	cache.Set(key1, policy1, make(map[string]string))
 	key2, _ := kubecache.MetaNamespaceKeyFunc(policy2)
-	cache.Set(key2, policy2)
+	cache.Set(key2, policy2, make(map[string]string))
 
 	validateAudit := cache.GetPolicies(ValidateAudit, "Pod", "")
 	if len(validateAudit) != 1 {
diff --git a/pkg/policycache/store.go b/pkg/policycache/store.go
index 2e9268b963..9ee666a980 100644
--- a/pkg/policycache/store.go
+++ b/pkg/policycache/store.go
@@ -12,7 +12,7 @@ import (
 
 type store interface {
 	// set inserts a policy in the cache
-	set(string, kyvernov1.PolicyInterface)
+	set(string, kyvernov1.PolicyInterface, map[string]string)
 	// unset removes a policy from the cache
 	unset(string)
 	// get finds policies that match a given type, gvk and namespace
@@ -30,10 +30,10 @@ func newPolicyCache() store {
 	}
 }
 
-func (pc *policyCache) set(key string, policy kyvernov1.PolicyInterface) {
+func (pc *policyCache) set(key string, policy kyvernov1.PolicyInterface, subresourceGVKToKind map[string]string) {
 	pc.lock.Lock()
 	defer pc.lock.Unlock()
-	pc.store.set(key, policy)
+	pc.store.set(key, policy, subresourceGVKToKind)
 	logger.V(4).Info("policy is added to cache", "key", key)
 }
 
@@ -93,7 +93,7 @@ func set(set sets.String, item string, value bool) sets.String {
 	}
 }
 
-func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface) {
+func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface, subresourceGVKToKind map[string]string) {
 	enforcePolicy := computeEnforcePolicy(policy.GetSpec())
 	m.policies[key] = policy
 	type state struct {
@@ -102,13 +102,16 @@ func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface) {
 	kindStates := map[string]state{}
 	for _, rule := range autogen.ComputeRules(policy) {
 		for _, gvk := range rule.MatchResources.GetKinds() {
-			kind := computeKind(gvk)
+			kind, ok := subresourceGVKToKind[gvk]
+			if !ok {
+				kind = computeKind(gvk)
+			}
 			entry := kindStates[kind]
-			entry.hasMutate = (entry.hasMutate || rule.HasMutate())
-			entry.hasValidate = (entry.hasValidate || rule.HasValidate())
-			entry.hasGenerate = (entry.hasGenerate || rule.HasGenerate())
-			entry.hasVerifyImages = (entry.hasVerifyImages || rule.HasVerifyImages())
-			entry.hasImagesValidationChecks = (entry.hasImagesValidationChecks || rule.HasImagesValidationChecks())
+			entry.hasMutate = entry.hasMutate || rule.HasMutate()
+			entry.hasValidate = entry.hasValidate || rule.HasValidate()
+			entry.hasGenerate = entry.hasGenerate || rule.HasGenerate()
+			entry.hasVerifyImages = entry.hasVerifyImages || rule.HasVerifyImages()
+			entry.hasImagesValidationChecks = entry.hasImagesValidationChecks || rule.HasImagesValidationChecks()
 			kindStates[kind] = entry
 		}
 	}
diff --git a/pkg/utils/kube/kind.go b/pkg/utils/kube/kind.go
index c5f5e09161..405da5e65d 100644
--- a/pkg/utils/kube/kind.go
+++ b/pkg/utils/kube/kind.go
@@ -1,27 +1,33 @@
 package kube
 
-import "strings"
+import (
+	"regexp"
+	"strings"
+
+	"k8s.io/apimachinery/pkg/runtime/schema"
+)
 
 // GetKindFromGVK - get kind and APIVersion from GVK
-func GetKindFromGVK(str string) (apiVersion string, kind string) {
+func GetKindFromGVK(str string) (groupVersion string, kind string) {
 	parts := strings.Split(str, "/")
 	count := len(parts)
+	versionRegex := regexp.MustCompile(`v\d((alpha|beta)\d)?`)
+
 	if count == 2 {
-		return parts[0], formatSubresource(parts[1])
-	}
-
-	if count == 3 {
-		if parts[1] == "*" {
-			return "", formatSubresource(parts[2])
+		if versionRegex.MatchString(parts[0]) || parts[0] == "*" {
+			return parts[0], formatSubresource(parts[1])
+		} else {
+			return "", parts[0] + "/" + parts[1]
 		}
-
-		return parts[0] + "/" + parts[1], formatSubresource(parts[2])
-	}
-
-	if count == 4 {
+	} else if count == 3 {
+		if versionRegex.MatchString(parts[0]) || parts[0] == "*" {
+			return parts[0], parts[1] + "/" + parts[2]
+		} else {
+			return parts[0] + "/" + parts[1], formatSubresource(parts[2])
+		}
+	} else if count == 4 {
 		return parts[0] + "/" + parts[1], parts[2] + "/" + parts[3]
 	}
-
 	return "", formatSubresource(str)
 }
 
@@ -29,21 +35,9 @@ func formatSubresource(s string) string {
 	return strings.Replace(s, ".", "/", 1)
 }
 
-// GetGroupFromGVK - get group GVK
-func GetGroupFromGVK(str string) (group string) {
-	parts := strings.Split(str, "/")
-	count := len(parts)
-	if count == 3 {
-		if parts[1] == "*" {
-			return parts[0]
-		}
-	}
-	return ""
-}
-
+// SplitSubresource - split subresource from kind
 func SplitSubresource(s string) (kind string, subresource string) {
-	normalized := strings.Replace(s, ".", "/", 1)
-	parts := strings.Split(normalized, "/")
+	parts := strings.Split(s, "/")
 	if len(parts) == 2 {
 		return parts[0], parts[1]
 	}
@@ -51,17 +45,36 @@ func SplitSubresource(s string) (kind string, subresource string) {
 	return s, ""
 }
 
+// ContainsKind - check if kind is in list
 func ContainsKind(list []string, kind string) bool {
 	for _, e := range list {
-		if _, k := GetKindFromGVK(e); k == kind {
+		_, k := GetKindFromGVK(e)
+		k, _ = SplitSubresource(k)
+		if k == kind {
 			return true
 		}
 	}
 	return false
 }
 
-// SkipSubResources skip list of resources which don't have an API group.
-func SkipSubResources(kind string) bool {
-	s := []string{"PodExecOptions", "PodAttachOptions", "PodProxyOptions", "ServiceProxyOptions", "NodeProxyOptions"}
-	return ContainsKind(s, kind)
+// GroupVersionMatches - check if the given group version matches the server resource group version.
+// If the group version contains a wildcard, it will match any version, but the group must match. Returns false if the
+// supplied group version is empty, that condition should be checked before calling this function.
+func GroupVersionMatches(groupVersion, serverResourceGroupVersion string) bool {
+	if strings.Contains(groupVersion, "*") {
+		return strings.HasPrefix(serverResourceGroupVersion, strings.TrimSuffix(groupVersion, "*"))
+	}
+
+	gv, err := schema.ParseGroupVersion(groupVersion)
+	if err == nil {
+		serverResourceGV, _ := schema.ParseGroupVersion(serverResourceGroupVersion)
+		return gv.Group == serverResourceGV.Group && gv.Version == serverResourceGV.Version
+	}
+
+	return false
+}
+
+// IsSubresource returns true if the resource is a subresource
+func IsSubresource(resourceName string) bool {
+	return strings.Contains(resourceName, "/")
 }
diff --git a/pkg/utils/kube/kind_test.go b/pkg/utils/kube/kind_test.go
index c0aa0b9da6..cc2173cf62 100644
--- a/pkg/utils/kube/kind_test.go
+++ b/pkg/utils/kube/kind_test.go
@@ -21,7 +21,7 @@ func Test_GetKindFromGVK(t *testing.T) {
 	assert.Equal(t, "Pod", kind)
 
 	apiVersion, kind = GetKindFromGVK("batch/*/CronJob")
-	assert.Equal(t, "", apiVersion)
+	assert.Equal(t, "batch/*", apiVersion)
 	assert.Equal(t, "CronJob", kind)
 
 	apiVersion, kind = GetKindFromGVK("storage.k8s.io/v1/CSIDriver")
@@ -39,6 +39,34 @@ func Test_GetKindFromGVK(t *testing.T) {
 	apiVersion, kind = GetKindFromGVK("Pod.Status")
 	assert.Equal(t, "", apiVersion)
 	assert.Equal(t, "Pod/Status", kind)
+
+	apiVersion, kind = GetKindFromGVK("apps/v1/Deployment/Scale")
+	assert.Equal(t, "apps/v1", apiVersion)
+	assert.Equal(t, "Deployment/Scale", kind)
+
+	apiVersion, kind = GetKindFromGVK("v1/ReplicationController/Scale")
+	assert.Equal(t, "v1", apiVersion)
+	assert.Equal(t, "ReplicationController/Scale", kind)
+
+	apiVersion, kind = GetKindFromGVK("*/ReplicationController/Scale")
+	assert.Equal(t, "*", apiVersion)
+	assert.Equal(t, "ReplicationController/Scale", kind)
+
+	apiVersion, kind = GetKindFromGVK("*/Deployment/scale")
+	assert.Equal(t, "*", apiVersion)
+	assert.Equal(t, "Deployment/scale", kind)
+
+	apiVersion, kind = GetKindFromGVK("*/Deployment.scale")
+	assert.Equal(t, "*", apiVersion)
+	assert.Equal(t, "Deployment/scale", kind)
+
+	apiVersion, kind = GetKindFromGVK("*/Deployment/scale")
+	assert.Equal(t, "*", apiVersion)
+	assert.Equal(t, "Deployment/scale", kind)
+
+	apiVersion, kind = GetKindFromGVK("apps/v1/Deployment.scale")
+	assert.Equal(t, "apps/v1", apiVersion)
+	assert.Equal(t, "Deployment/scale", kind)
 }
 
 func Test_SplitSubresource(t *testing.T) {
@@ -50,16 +78,25 @@ func Test_SplitSubresource(t *testing.T) {
 	kind, subresource = SplitSubresource("TaskRun/status")
 	assert.Equal(t, kind, "TaskRun")
 	assert.Equal(t, subresource, "status")
-
-	kind, subresource = SplitSubresource("Pod.Status")
-	assert.Equal(t, kind, "Pod")
-	assert.Equal(t, subresource, "Status")
-
-	kind, subresource = SplitSubresource("v1/Pod/Status")
-	assert.Equal(t, kind, "v1/Pod/Status")
-	assert.Equal(t, subresource, "")
-
-	kind, subresource = SplitSubresource("v1/Pod.Status")
-	assert.Equal(t, kind, "v1/Pod.Status")
-	assert.Equal(t, subresource, "")
+}
+
+func Test_GroupVersionMatches(t *testing.T) {
+	groupVersion, serverResourceGroupVersion := "v1", "v1"
+	assert.Equal(t, GroupVersionMatches(groupVersion, serverResourceGroupVersion), true)
+
+	// If user does not specify a group, then it is considered as legacy group which is empty
+	groupVersion, serverResourceGroupVersion = "v1", "networking.k8s.io/v1"
+	assert.Equal(t, GroupVersionMatches(groupVersion, serverResourceGroupVersion), false)
+
+	groupVersion, serverResourceGroupVersion = "*", "v1"
+	assert.Equal(t, GroupVersionMatches(groupVersion, serverResourceGroupVersion), true)
+
+	groupVersion, serverResourceGroupVersion = "certificates.k8s.io/*", "certificates.k8s.io/v1"
+	assert.Equal(t, GroupVersionMatches(groupVersion, serverResourceGroupVersion), true)
+
+	groupVersion, serverResourceGroupVersion = "*", "certificates.k8s.io/v1"
+	assert.Equal(t, GroupVersionMatches(groupVersion, serverResourceGroupVersion), true)
+
+	groupVersion, serverResourceGroupVersion = "certificates.k8s.io/*", "networking.k8s.io/v1"
+	assert.Equal(t, GroupVersionMatches(groupVersion, serverResourceGroupVersion), false)
 }
diff --git a/pkg/webhooks/resource/handlers_test.go b/pkg/webhooks/resource/handlers_test.go
index 0ff9b4200a..588c88062a 100644
--- a/pkg/webhooks/resource/handlers_test.go
+++ b/pkg/webhooks/resource/handlers_test.go
@@ -271,7 +271,8 @@ func Test_AdmissionResponseValid(t *testing.T) {
 	assert.NilError(t, err)
 
 	key := makeKey(&validPolicy)
-	policyCache.Set(key, &validPolicy)
+	subresourceGVKToKind := make(map[string]string)
+	policyCache.Set(key, &validPolicy, subresourceGVKToKind)
 
 	request := &v1.AdmissionRequest{
 		Operation: v1.Create,
@@ -280,6 +281,7 @@ func Test_AdmissionResponseValid(t *testing.T) {
 		Object: runtime.RawExtension{
 			Raw: []byte(pod),
 		},
+		RequestResource: &metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
 	}
 
 	response := handlers.Mutate(ctx, logger, request, "", time.Now())
@@ -291,7 +293,7 @@ func Test_AdmissionResponseValid(t *testing.T) {
 	assert.Equal(t, len(response.Warnings), 0)
 
 	validPolicy.Spec.ValidationFailureAction = "Enforce"
-	policyCache.Set(key, &validPolicy)
+	policyCache.Set(key, &validPolicy, subresourceGVKToKind)
 
 	response = handlers.Validate(ctx, logger, request, "", time.Now())
 	assert.Equal(t, response.Allowed, false)
@@ -320,11 +322,13 @@ func Test_AdmissionResponseInvalid(t *testing.T) {
 		Object: runtime.RawExtension{
 			Raw: []byte(pod),
 		},
+		RequestResource: &metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
 	}
 
 	keyInvalid := makeKey(&invalidPolicy)
 	invalidPolicy.Spec.ValidationFailureAction = "Enforce"
-	policyCache.Set(keyInvalid, &invalidPolicy)
+	subresourceGVKToKind := make(map[string]string)
+	policyCache.Set(keyInvalid, &invalidPolicy, subresourceGVKToKind)
 
 	response := handlers.Validate(ctx, logger, request, "", time.Now())
 	assert.Equal(t, response.Allowed, false)
@@ -332,7 +336,7 @@ func Test_AdmissionResponseInvalid(t *testing.T) {
 
 	var ignore kyverno.FailurePolicyType = kyverno.Ignore
 	invalidPolicy.Spec.FailurePolicy = &ignore
-	policyCache.Set(keyInvalid, &invalidPolicy)
+	policyCache.Set(keyInvalid, &invalidPolicy, subresourceGVKToKind)
 
 	response = handlers.Validate(ctx, logger, request, "", time.Now())
 	assert.Equal(t, response.Allowed, true)
@@ -353,7 +357,8 @@ func Test_ImageVerify(t *testing.T) {
 	assert.NilError(t, err)
 
 	key := makeKey(&policy)
-	policyCache.Set(key, &policy)
+	subresourceGVKToKind := make(map[string]string)
+	policyCache.Set(key, &policy, subresourceGVKToKind)
 
 	request := &v1.AdmissionRequest{
 		Operation: v1.Create,
@@ -362,10 +367,11 @@ func Test_ImageVerify(t *testing.T) {
 		Object: runtime.RawExtension{
 			Raw: []byte(pod),
 		},
+		RequestResource: &metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
 	}
 
 	policy.Spec.ValidationFailureAction = "Enforce"
-	policyCache.Set(key, &policy)
+	policyCache.Set(key, &policy, subresourceGVKToKind)
 
 	response := handlers.Mutate(ctx, logger, request, "", time.Now())
 	assert.Equal(t, response.Allowed, false)
@@ -373,7 +379,7 @@ func Test_ImageVerify(t *testing.T) {
 
 	var ignore kyverno.FailurePolicyType = kyverno.Ignore
 	policy.Spec.FailurePolicy = &ignore
-	policyCache.Set(key, &policy)
+	policyCache.Set(key, &policy, subresourceGVKToKind)
 
 	response = handlers.Mutate(ctx, logger, request, "", time.Now())
 	assert.Equal(t, response.Allowed, false)
@@ -394,7 +400,7 @@ func Test_MutateAndVerify(t *testing.T) {
 	assert.NilError(t, err)
 
 	key := makeKey(&policy)
-	policyCache.Set(key, &policy)
+	policyCache.Set(key, &policy, make(map[string]string))
 
 	request := &v1.AdmissionRequest{
 		Operation: v1.Create,
@@ -403,6 +409,7 @@ func Test_MutateAndVerify(t *testing.T) {
 		Object: runtime.RawExtension{
 			Raw: []byte(resourceMutateAndVerify),
 		},
+		RequestResource: &metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
 	}
 
 	response := handlers.Mutate(ctx, logger, request, "", time.Now())
diff --git a/pkg/webhooks/resource/validation/validation.go b/pkg/webhooks/resource/validation/validation.go
index 857a3628a9..7f455eac42 100644
--- a/pkg/webhooks/resource/validation/validation.go
+++ b/pkg/webhooks/resource/validation/validation.go
@@ -172,8 +172,8 @@ func (v *validationHandler) handleAudit(
 	if request.DryRun != nil && *request.DryRun {
 		return
 	}
-	// we don't need reports for deletions and when it's about sub resources
-	if request.Operation == admissionv1.Delete || request.SubResource != "" {
+	// we don't need reports for deletions
+	if request.Operation == admissionv1.Delete {
 		return
 	}
 	// check if the resource supports reporting
diff --git a/test/cli/test/exec-subresource/deny-exec-by-pod-label.yaml b/test/cli/test/exec-subresource/deny-exec-by-pod-label.yaml
new file mode 100644
index 0000000000..98a869e08e
--- /dev/null
+++ b/test/cli/test/exec-subresource/deny-exec-by-pod-label.yaml
@@ -0,0 +1,41 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: deny-exec-by-pod-label
+  annotations:
+    policies.kyverno.io/title: Block Pod Exec by Pod Label
+    policies.kyverno.io/category: Sample
+    policies.kyverno.io/minversion: 1.4.2
+    policies.kyverno.io/subject: Pod
+    policies.kyverno.io/description: >-
+      The `exec` command may be used to gain shell access, or run other commands, in a Pod's container. While this can
+      be useful for troubleshooting purposes, it could represent an attack vector and is discouraged.
+      This policy blocks Pod exec commands to Pods having the label `exec=false`.      
+spec:
+  validationFailureAction: Enforce
+  background: false
+  rules:
+    - name: deny-exec-by-label
+      match:
+        all:
+          - resources:
+              kinds:
+                - PodExecOptions
+      context:
+        - name: podexeclabel
+          apiCall:
+            urlPath: "/api/v1/namespaces/{{request.namespace}}/pods/{{request.name}}"
+            jmesPath: "metadata.labels.exec"
+      preconditions:
+        all:
+          - key: "{{ request.operation }}"
+            operator: Equals
+            value: CONNECT
+      validate:
+        message: Exec'ing into Pods protected with the label `exec=false` is forbidden.
+        deny:
+          conditions:
+            all:
+              - key: "{{ podexeclabel }}"
+                operator: Equals
+                value: "false"
diff --git a/test/cli/test/exec-subresource/kyverno-test.yaml b/test/cli/test/exec-subresource/kyverno-test.yaml
new file mode 100644
index 0000000000..cc501d1de2
--- /dev/null
+++ b/test/cli/test/exec-subresource/kyverno-test.yaml
@@ -0,0 +1,13 @@
+name: deny-exec-by-pod-label
+policies:
+  - deny-exec-by-pod-label.yaml
+resources:
+  - resource.yaml
+variables: values.yaml
+results:
+  - policy: deny-exec-by-pod-label
+    rule: deny-exec-by-label
+    resource: execpod
+    namespace: default
+    kind: PodExecOptions
+    result: fail
\ No newline at end of file
diff --git a/test/cli/test/exec-subresource/resource.yaml b/test/cli/test/exec-subresource/resource.yaml
new file mode 100644
index 0000000000..3310cd33c2
--- /dev/null
+++ b/test/cli/test/exec-subresource/resource.yaml
@@ -0,0 +1,13 @@
+kind: PodExecOptions
+apiVersion: v1
+# PodExecOptions actually does not contain any metadata, but kyverno relies on the name and namespace of object
+# to perform CLI execution. So we add them as a hack to get this working.
+metadata:
+  name: execpod
+  namespace: default
+stdin: true
+stdout: true
+tty: true
+container: nginx
+command:
+  - sh
\ No newline at end of file
diff --git a/test/cli/test/exec-subresource/values.yaml b/test/cli/test/exec-subresource/values.yaml
new file mode 100644
index 0000000000..950471dee8
--- /dev/null
+++ b/test/cli/test/exec-subresource/values.yaml
@@ -0,0 +1,17 @@
+policies:
+  - name: deny-exec-by-pod-label
+    rules:
+      - name: deny-exec-by-label
+        values:
+          podexeclabel: "false"
+globalValues:
+  request.operation: CONNECT
+subresources:
+  - subresource:
+      name: "pods/exec"
+      kind: "PodExecOptions"
+      version: "v1"
+    parentResource:
+      name: "pods"
+      kind: "Pod"
+      version: "v1"
\ No newline at end of file
diff --git a/test/cli/test/scale-subresource/enforce-replicas-for-scale-subresource.yml b/test/cli/test/scale-subresource/enforce-replicas-for-scale-subresource.yml
new file mode 100644
index 0000000000..efc1978eec
--- /dev/null
+++ b/test/cli/test/scale-subresource/enforce-replicas-for-scale-subresource.yml
@@ -0,0 +1,24 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: enforce-replicas-for-scale-subresource
+spec:
+  background: false
+  failurePolicy: Fail
+  rules:
+    - match:
+        all:
+          - resources:
+              kinds:
+                - "Deployment/scale"
+              names:
+                - nginx-test
+              namespaces:
+                - default
+      name: validate-nginx-test
+      validate:
+        message: 'nginx-test needs to have 2 replicas'
+        pattern:
+          spec:
+            replicas: 2
+  validationFailureAction: Enforce
diff --git a/test/cli/test/scale-subresource/kyverno-test.yaml b/test/cli/test/scale-subresource/kyverno-test.yaml
new file mode 100644
index 0000000000..efcd02f70f
--- /dev/null
+++ b/test/cli/test/scale-subresource/kyverno-test.yaml
@@ -0,0 +1,13 @@
+name: enforce-replicas-for-scale-subresource
+policies:
+  - enforce-replicas-for-scale-subresource.yml
+resources:
+  - resource.yaml
+variables: values.yaml
+results:
+  - policy: enforce-replicas-for-scale-subresource
+    rule: validate-nginx-test
+    resource: nginx-test
+    namespace: default
+    kind: Scale
+    result: fail
\ No newline at end of file
diff --git a/test/cli/test/scale-subresource/resource.yaml b/test/cli/test/scale-subresource/resource.yaml
new file mode 100644
index 0000000000..7f573e20f5
--- /dev/null
+++ b/test/cli/test/scale-subresource/resource.yaml
@@ -0,0 +1,7 @@
+kind: Scale
+apiVersion: autoscaling/v1
+metadata:
+  name: nginx-test
+  namespace: default
+spec:
+  replicas: 4
\ No newline at end of file
diff --git a/test/cli/test/scale-subresource/values.yaml b/test/cli/test/scale-subresource/values.yaml
new file mode 100644
index 0000000000..e7d98a3ad0
--- /dev/null
+++ b/test/cli/test/scale-subresource/values.yaml
@@ -0,0 +1,11 @@
+subresources:
+  - subresource:
+      name: "deployments/scale"
+      kind: "Scale"
+      group: "autoscaling"
+      version: "v1"
+    parentResource:
+      name: "deployments"
+      kind: "Deployment"
+      group: "apps"
+      version: "v1"
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-assert.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-assert.yaml
new file mode 100644
index 0000000000..48e2262e17
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-assert.yaml
@@ -0,0 +1,9 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: advertise-resource
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-manifests.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-manifests.yaml
new file mode 100644
index 0000000000..ee8ecb5f76
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/01-manifests.yaml
@@ -0,0 +1,41 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: advertise-resource
+spec:
+  background: false
+  rules:
+    - name: advertise-resource
+      match:
+        resources:
+          kinds:
+            - Node
+      mutate:
+        targets:
+          - apiVersion: v1
+            kind: Node/status
+            name: kind-control-plane
+            namespace: ""
+        patchStrategicMerge:
+          status:
+            capacity:
+              example.com/dongle: "41"
+---
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+  labels:
+    app.kubernetes.io/instance: kyverno
+    app.kubernetes.io/name: kyverno
+  name: kyverno:modify-nodes
+rules:
+  - apiGroups:
+      - ""
+    resources:
+      - nodes
+      - nodes/status
+    verbs:
+      - create
+      - update
+      - patch
+      - delete
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-assert.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-assert.yaml
new file mode 100644
index 0000000000..84bae92ab8
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-assert.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Node
+metadata:
+  name: kind-control-plane
+status:
+  capacity:
+    example.com/dongle: "41"
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-script.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-script.yaml
new file mode 100644
index 0000000000..7c3380506f
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/02-script.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: ./modify-resource-filters.sh removeNode
+  # Send an update request to the node
+  - command: kubectl label nodes kind-control-plane abc=xyz
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/99-cleanup.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/99-cleanup.yaml
new file mode 100644
index 0000000000..ff81f50820
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/99-cleanup.yaml
@@ -0,0 +1,7 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - command: kubectl delete cpol advertise-resource --force --wait=true --ignore-not-found=true
+  - script: ./modify-resource-filters.sh addNode
+  - script: ./clear-modified-node-status.sh
+  - command: kubectl label nodes kind-control-plane abc-
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/README.md b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/README.md
new file mode 100644
index 0000000000..f43fab4660
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/README.md
@@ -0,0 +1,22 @@
+## Description
+
+This test validates that an incoming request to `Node` triggers mutating the existing `Node/status` subresource.
+
+## Expected Behavior
+
+The existing `Node/status` subresource is mutated.
+
+## Steps
+
+### Test Steps
+
+1. Create a `ClusterPolicy` that matches on `Node` and mutates `Node/status` object.
+2. Create `ClusterRole` for allowing modifications to `Node/status` subresource.
+3. Modify kyverno `resourceFilters` to allow mutating requests for `Node` resource.
+4. Send a update request to `Node`.
+5. Mutate the existing `Node/status` subresource.
+6. Verify that the existing `Node/status` object is mutated.
+
+## Reference Issue(s)
+
+https://github.com/kyverno/kyverno/issues/2843
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/clear-modified-node-status.sh b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/clear-modified-node-status.sh
new file mode 100755
index 0000000000..818caada82
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/clear-modified-node-status.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+set -eu
+
+kubectl proxy &
+proxy_pid=$!
+echo $proxy_pid
+
+function cleanup {
+  echo "killing kubectl proxy" >&2
+  kill $proxy_pid
+}
+
+attempt_counter=0
+max_attempts=5
+
+until curl --output /dev/null -fsSL http://localhost:8001/; do
+  if [ ${attempt_counter} -eq ${max_attempts} ]; then
+    echo "Max attempts reached"
+    exit 1
+  fi
+
+  attempt_counter=$((attempt_counter + 1))
+  sleep 5
+done
+
+curl --header "Content-Type: application/json-patch+json" \
+  --request PATCH \
+  --output /dev/null \
+  --data '[{"op": "remove", "path": "/status/capacity/example.com~1dongle"}]' \
+  http://localhost:8001/api/v1/nodes/kind-control-plane/status
+
+kubectl annotate node kind-control-plane policies.kyverno.io/last-applied-patches-
+trap cleanup EXIT
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/modify-resource-filters.sh b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/modify-resource-filters.sh
new file mode 100755
index 0000000000..efec3e70b1
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/existing/mutate-existing-node-status/modify-resource-filters.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+set -eu
+
+if [ $# -ne 1 ]; then
+  echo "Usage: $0 [addNode|removeNode]"
+  exit 1
+fi
+
+if [ "$1" = "removeNode" ]; then
+  resource_filters=$(kubectl get ConfigMap kyverno -n kyverno -o json | jq .data.resourceFilters)
+  resource_filters="${resource_filters//\[Node,\*,\*\]/}"
+
+  kubectl patch ConfigMap kyverno -n kyverno --type='json' -p="[{\"op\": \"replace\", \"path\": \"/data/resourceFilters\", \"value\":""$resource_filters""}]"
+fi
+
+if [ "$1" = "addNode" ]; then
+  resource_filters=$(kubectl get ConfigMap kyverno -n kyverno -o json | jq .data.resourceFilters)
+  resource_filters="${resource_filters%?}"
+
+  resource_filters="${resource_filters}""[Node,*,*]\""
+  kubectl patch ConfigMap kyverno -n kyverno --type='json' -p="[{\"op\": \"replace\", \"path\": \"/data/resourceFilters\", \"value\":""$resource_filters""}]"
+fi
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-assert.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-assert.yaml
new file mode 100644
index 0000000000..48e2262e17
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-assert.yaml
@@ -0,0 +1,9 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: advertise-resource
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-manifests.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-manifests.yaml
new file mode 100644
index 0000000000..3c858b373d
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/01-manifests.yaml
@@ -0,0 +1,17 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: advertise-resource
+spec:
+  background: false
+  rules:
+    - name: advertise-resource
+      match:
+        resources:
+          kinds:
+            - Node/status
+      mutate:
+        patchesJson6902: |-
+          - op: add
+            path: "/status/capacity/example.com~1dongle"
+            value: "4"
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-assert.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-assert.yaml
new file mode 100644
index 0000000000..9a493ab1c9
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-assert.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Node
+metadata:
+  name: kind-control-plane
+status:
+  capacity:
+    example.com/dongle: "4"
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-script.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-script.yaml
new file mode 100644
index 0000000000..71cf487980
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/02-script.yaml
@@ -0,0 +1,5 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: ./modify-resource-filters.sh removeNode
+  - script: ./send-request-to-status-subresource.sh
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/99-cleanup.yaml b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/99-cleanup.yaml
new file mode 100644
index 0000000000..159f32afe9
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/99-cleanup.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - command: kubectl delete cpol advertise-resource --force --wait=true --ignore-not-found=true
+  - script: ./modify-resource-filters.sh addNode
+  - script: ./clear-modified-node-status.sh
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/README.md b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/README.md
new file mode 100644
index 0000000000..0e463b339a
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/README.md
@@ -0,0 +1,22 @@
+## Description
+
+This test validates that an incoming request to `Node/status` is mutated by the mutation policy matching
+on `Node/status`.
+
+## Expected Behavior
+
+The request is mutated.
+
+## Steps
+
+### Test Steps
+
+1. Create a `ClusterPolicy` that matches on `Node/status` and mutates the request.
+2. Modify kyverno `resourceFilters` to allow mutating requests for `Node` resource.
+3. Send a update request to `Node/status`.
+4. Mutate the incoming request (done by kyverno).
+5. Verify that the request is mutated.
+
+## Reference Issue(s)
+
+https://github.com/kyverno/kyverno/issues/2843
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/clear-modified-node-status.sh b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/clear-modified-node-status.sh
new file mode 100755
index 0000000000..818caada82
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/clear-modified-node-status.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+set -eu
+
+kubectl proxy &
+proxy_pid=$!
+echo $proxy_pid
+
+function cleanup {
+  echo "killing kubectl proxy" >&2
+  kill $proxy_pid
+}
+
+attempt_counter=0
+max_attempts=5
+
+until curl --output /dev/null -fsSL http://localhost:8001/; do
+  if [ ${attempt_counter} -eq ${max_attempts} ]; then
+    echo "Max attempts reached"
+    exit 1
+  fi
+
+  attempt_counter=$((attempt_counter + 1))
+  sleep 5
+done
+
+curl --header "Content-Type: application/json-patch+json" \
+  --request PATCH \
+  --output /dev/null \
+  --data '[{"op": "remove", "path": "/status/capacity/example.com~1dongle"}]' \
+  http://localhost:8001/api/v1/nodes/kind-control-plane/status
+
+kubectl annotate node kind-control-plane policies.kyverno.io/last-applied-patches-
+trap cleanup EXIT
\ No newline at end of file
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/modify-resource-filters.sh b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/modify-resource-filters.sh
new file mode 100755
index 0000000000..efec3e70b1
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/modify-resource-filters.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+set -eu
+
+if [ $# -ne 1 ]; then
+  echo "Usage: $0 [addNode|removeNode]"
+  exit 1
+fi
+
+if [ "$1" = "removeNode" ]; then
+  resource_filters=$(kubectl get ConfigMap kyverno -n kyverno -o json | jq .data.resourceFilters)
+  resource_filters="${resource_filters//\[Node,\*,\*\]/}"
+
+  kubectl patch ConfigMap kyverno -n kyverno --type='json' -p="[{\"op\": \"replace\", \"path\": \"/data/resourceFilters\", \"value\":""$resource_filters""}]"
+fi
+
+if [ "$1" = "addNode" ]; then
+  resource_filters=$(kubectl get ConfigMap kyverno -n kyverno -o json | jq .data.resourceFilters)
+  resource_filters="${resource_filters%?}"
+
+  resource_filters="${resource_filters}""[Node,*,*]\""
+  kubectl patch ConfigMap kyverno -n kyverno --type='json' -p="[{\"op\": \"replace\", \"path\": \"/data/resourceFilters\", \"value\":""$resource_filters""}]"
+fi
diff --git a/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/send-request-to-status-subresource.sh b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/send-request-to-status-subresource.sh
new file mode 100755
index 0000000000..c2d69327cf
--- /dev/null
+++ b/test/conformance/kuttl/mutate/clusterpolicy/standard/mutate-node-status/send-request-to-status-subresource.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+set -eu
+
+kubectl proxy &
+proxy_pid=$!
+echo $proxy_pid
+
+function cleanup {
+  echo "killing kubectl proxy" >&2
+  kill $proxy_pid
+}
+
+attempt_counter=0
+max_attempts=5
+
+until curl --output /dev/null -fsSL http://localhost:8001/; do
+    if [ ${attempt_counter} -eq ${max_attempts} ];then
+      echo "Max attempts reached"
+      exit 1
+    fi
+
+    attempt_counter=$((attempt_counter+1))
+    sleep 5
+done
+
+if curl --header "Content-Type: application/json-patch+json" \
+   --request PATCH \
+   --output /dev/null \
+   --data '[{"op": "add", "path": "/status/capacity/example.com~1dongle", "value": "1"}]' \
+   http://localhost:8001/api/v1/nodes/kind-control-plane/status; then
+  echo "Successfully sent request to status subresource."
+  trap cleanup EXIT
+  exit 0
+else
+  echo "Failed to send request to status subresource."
+  trap cleanup EXIT
+  exit 1
+fi
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-assert.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-assert.yaml
new file mode 100644
index 0000000000..f2887a6ccb
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-assert.yaml
@@ -0,0 +1,9 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: deny-evict-by-pod-label
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-manifests.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-manifests.yaml
new file mode 100644
index 0000000000..a367c75615
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/01-manifests.yaml
@@ -0,0 +1,45 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: test-validate
+---
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: deny-evict-by-pod-label
+spec:
+  validationFailureAction: Enforce
+  background: false
+  rules:
+    - name: deny-evict-by-label
+      match:
+        resources:
+          kinds:
+            - Pod/eviction
+      context:
+        - name: podevictlabel
+          apiCall:
+            urlPath: "/api/v1/namespaces/{{request.namespace}}/pods/{{request.name}}"
+            jmesPath: "metadata.labels.evict"
+      validate:
+        message: Evicting Pods protected with the label 'evict=false' is forbidden.
+        deny:
+          conditions:
+            all:
+              - key: "{{ podevictlabel }}"
+                operator: Equals
+                value: "false"
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx
+  labels:
+    app: nginx
+    tier: frontend
+    evict: "false"
+  namespace: test-validate
+spec:
+  containers:
+    - name: nginx
+      image: nginx
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/02-script.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/02-script.yaml
new file mode 100644
index 0000000000..8d857aaf1a
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/02-script.yaml
@@ -0,0 +1,4 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: ./api-initiated-eviction.sh
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/99-cleanup.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/99-cleanup.yaml
new file mode 100644
index 0000000000..0a7469d0cc
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/99-cleanup.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - command: kubectl delete pod nginx -n test-validate --force --wait=true --ignore-not-found=true
+  - command: kubectl delete cpol deny-evict-by-pod-label -n test-validate --force --wait=true --ignore-not-found=true
+  - command: kubectl delete ns test-validate --force --ignore-not-found=true
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/README.md b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/README.md
new file mode 100644
index 0000000000..5d33f76a12
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/README.md
@@ -0,0 +1,3 @@
+# Evicting pod with label 'evict=false' is forbidden
+
+Validate test to check that a pod with label 'evict=false' cannot be evicted. Related issue https://github.com/kyverno/kyverno/issues/4313
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/api-initiated-eviction.sh b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/api-initiated-eviction.sh
new file mode 100755
index 0000000000..1faa823630
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/api-initiated-eviction.sh
@@ -0,0 +1,35 @@
+#!/usr/bin/env bash
+set -eu
+
+kubectl proxy &
+proxy_pid=$!
+echo $proxy_pid
+
+function cleanup {
+  echo "killing kubectl proxy" >&2
+  kill $proxy_pid
+}
+
+attempt_counter=0
+max_attempts=5
+
+until curl --output /dev/null -fsSL http://localhost:8001/; do
+    if [ ${attempt_counter} -eq ${max_attempts} ];then
+      echo "Max attempts reached"
+      exit 1
+    fi
+
+    attempt_counter=$((attempt_counter+1))
+    sleep 5
+done
+
+if curl -v -H 'Content-type: application/json' \
+  http://localhost:8001/api/v1/namespaces/test-validate/pods/nginx/eviction -d @eviction.json 2>&1 | grep -q "Evicting Pods protected with the label 'evict=false' is forbidden"; then
+  echo "Test succeeded. Resource was not evicted."
+  trap cleanup EXIT
+  exit 0
+else
+  echo "Tested failed. Resource was evicted."
+  trap cleanup EXIT
+  exit 1
+fi
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/eviction.json b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/eviction.json
new file mode 100644
index 0000000000..48976c7434
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/api-initiated-pod-eviction/eviction.json
@@ -0,0 +1,8 @@
+{
+  "apiVersion": "policy/v1",
+  "kind": "Eviction",
+  "metadata": {
+    "name": "nginx",
+    "namespace": "test-validate"
+  }
+}
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-assert.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-assert.yaml
new file mode 100644
index 0000000000..29794ca537
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-assert.yaml
@@ -0,0 +1,9 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: deny-exec-by-pod-label
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-manifests.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-manifests.yaml
new file mode 100644
index 0000000000..7218ad60dc
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/01-manifests.yaml
@@ -0,0 +1,59 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: test-validate
+---
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: deny-exec-by-pod-label
+  annotations:
+    policies.kyverno.io/title: Block Pod Exec by Pod Label
+    policies.kyverno.io/category: Sample
+    policies.kyverno.io/minversion: 1.4.2
+    policies.kyverno.io/subject: Pod
+    policies.kyverno.io/description: >-
+      The 'exec' command may be used to gain shell access, or run other commands, in a Pod's container. While this can
+      be useful for troubleshooting purposes, it could represent an attack vector and is discouraged.
+      This policy blocks Pod exec commands to Pods having the label 'exec=false'.
+spec:
+  validationFailureAction: Enforce
+  background: false
+  rules:
+    - name: deny-exec-by-label
+      match:
+        resources:
+          kinds:
+            - PodExecOptions
+      context:
+        - name: podexeclabel
+          apiCall:
+            urlPath: "/api/v1/namespaces/{{request.namespace}}/pods/{{request.name}}"
+            jmesPath: "metadata.labels.exec"
+      preconditions:
+        all:
+          - key: "{{ request.operation }}"
+            operator: Equals
+            value: CONNECT
+      validate:
+        message: Exec'ing into Pods protected with the label 'exec=false' is forbidden.
+        deny:
+          conditions:
+            all:
+              - key: "{{ podexeclabel }}"
+                operator: Equals
+                value: "false"
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: nginx
+  labels:
+    app: nginx
+    tier: frontend
+    exec: "false"
+  namespace: test-validate
+spec:
+  containers:
+    - name: nginx
+      image: nginx
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/02-script.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/02-script.yaml
new file mode 100644
index 0000000000..10e6dcd0e9
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/02-script.yaml
@@ -0,0 +1,12 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: |
+      if kubectl -n test-validate exec nginx -it -- sh 2>&1 | grep -q "Exec'ing into Pods protected with the label 'exec=false' is forbidden" 
+      then 
+        echo "Tested failed. Exec Request was not blocked."
+        exit 1 
+      else 
+        echo "Test succeeded. Exec Request was blocked."
+        exit 0
+      fi
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/99-cleanup.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/99-cleanup.yaml
new file mode 100644
index 0000000000..e9f231d278
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/99-cleanup.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - command: kubectl delete pod nginx -n test-validate --force --wait=true --ignore-not-found=true
+  - command: kubectl delete cpol deny-exec-by-pod-label -n test-validate --force --wait=true --ignore-not-found=true
+  - command: kubectl delete ns test-validate --force --ignore-not-found=true
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/README.md b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/README.md
new file mode 100644
index 0000000000..21f5680036
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/block-pod-exec-requests/README.md
@@ -0,0 +1,3 @@
+# Exec'ing into a pod
+
+Validate test to ensure pods with label `exec=false` cannot be exec'ed into.
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-assert.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-assert.yaml
new file mode 100644
index 0000000000..28ed7ef9d1
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-assert.yaml
@@ -0,0 +1,19 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: nginx-test-scaling-policy
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: nginx-test
+  name: nginx-test
+  namespace: test-validate
+status:
+  replicas: 2
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-manifests.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-manifests.yaml
new file mode 100644
index 0000000000..89a7eb6d8b
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/01-manifests.yaml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: test-validate
+---
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: nginx-test-scaling-policy
+spec:
+  background: false
+  failurePolicy: Fail
+  rules:
+    - match:
+        resources:
+          kinds:
+            - "Deployment/scale"
+          names:
+            - nginx-test
+          namespaces:
+            - test-validate
+      name: validate-nginx-test
+      validate:
+        message: 'nginx-test needs to have 2 replicas'
+        pattern:
+          spec:
+            replicas: 2
+  validationFailureAction: Enforce
+---
+apiVersion: apps/v1
+kind: Deployment
+metadata:
+  labels:
+    app: nginx-test
+  name: nginx-test
+  namespace: test-validate
+spec:
+  replicas: 2
+  selector:
+    matchLabels:
+      app: nginx-test
+  template:
+    metadata:
+      labels:
+        app: nginx-test
+    spec:
+      containers:
+        - image: nginx
+          name: nginx
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/02-script.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/02-script.yaml
new file mode 100644
index 0000000000..2c2e3f28c5
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/02-script.yaml
@@ -0,0 +1,12 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: |
+      if kubectl scale deployment nginx-test --replicas=1 -n test-validate 2>&1 | grep -q 'validation error: nginx-test needs to have 2 replicas' 
+      then 
+        echo "Test succeeded. Resource was blocked from scaling."
+        exit 0
+      else 
+        echo "Tested failed. Resource was allowed to scale."
+        exit 1 
+      fi
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/99-cleanup.yaml b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/99-cleanup.yaml
new file mode 100644
index 0000000000..bb1e136044
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/99-cleanup.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - command: kubectl delete deploy nginx-test -n test-validate --force --wait=true --ignore-not-found=true
+  - command: kubectl delete cpol nginx-test-scaling-policy -n test-validate --force --wait=true --ignore-not-found=true
+  - command: kubectl delete ns test-validate --force --ignore-not-found=true
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/README.md b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/README.md
new file mode 100644
index 0000000000..51806ff45e
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/standard/enforce/scaling-with-kubectl-scale/README.md
@@ -0,0 +1,3 @@
+# Scaling with kubectl scale
+
+Validate test to check that a resource can't be scaled through the `kubectl scale` command. Related issue https://github.com/kyverno/kyverno/issues/3118
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-assert.yaml b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-assert.yaml
new file mode 100644
index 0000000000..eb21b4d4fb
--- /dev/null
+++ b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-assert.yaml
@@ -0,0 +1,10 @@
+apiVersion: kyverno.io/v1
+kind: Policy
+metadata:
+  name: configmap-policy
+  namespace: test-validate-e2e-adding-key-to-config-map
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
diff --git a/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-manifests.yaml b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-manifests.yaml
new file mode 100644
index 0000000000..cdb245e260
--- /dev/null
+++ b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/01-manifests.yaml
@@ -0,0 +1,39 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: test-validate-e2e-adding-key-to-config-map
+---
+apiVersion: kyverno.io/v1
+kind: Policy
+metadata:
+  name: configmap-policy
+  namespace: test-validate-e2e-adding-key-to-config-map
+spec:
+  background: false
+  failurePolicy: Fail
+  validationFailureAction: Enforce
+  rules:
+    - match:
+        all:
+          - resources:
+              kinds:
+                - ConfigMap
+      name: key-abc
+      preconditions:
+        all:
+          - key: "admin"
+            operator: Equals
+            value: "{{ request.object.data.lock || '' }}"
+      validate:
+        anyPattern:
+          - data:
+              key: "abc"
+        message: Configmap key must be "abc"
+---
+apiVersion: v1
+kind: ConfigMap
+metadata:
+  name: test-configmap
+  namespace: test-validate-e2e-adding-key-to-config-map
+data:
+  key: xyz
diff --git a/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/02-script.yaml b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/02-script.yaml
new file mode 100644
index 0000000000..eb2d5cae1e
--- /dev/null
+++ b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/02-script.yaml
@@ -0,0 +1,12 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: |
+      if kubectl patch ConfigMap test-configmap -n test-validate-e2e-adding-key-to-config-map --type='json' -p="[{\"op\": \"add\", \"path\": \"/data/lock\", \"value\":""admin""}]" 2>&1 | grep -q 'validation error: Configmap key must be "abc"' 
+      then 
+        echo "Test succeeded. Resource was blocked from adding key."
+        exit 0
+      else 
+        echo "Tested failed. Resource was not blocked from adding key."
+        exit 1 
+      fi
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/99-cleanup.yaml b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/99-cleanup.yaml
new file mode 100644
index 0000000000..f115a929cc
--- /dev/null
+++ b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/99-cleanup.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - command: kubectl delete ConfigMap test-configmap -n test-validate-e2e-adding-key-to-config-map --force --wait=true --ignore-not-found=true
+  - command: kubectl delete pol configmap-policy -n test-validate-e2e-adding-key-to-config-map --force --wait=true --ignore-not-found=true
+  - command: kubectl delete ns test-validate-e2e-adding-key-to-config-map --force --ignore-not-found=true
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/README.md b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/README.md
new file mode 100644
index 0000000000..8fed6477c8
--- /dev/null
+++ b/test/conformance/kuttl/validate/e2e/adding-key-to-config-map/README.md
@@ -0,0 +1,21 @@
+## Description
+
+This test validates that an existing ConfigMap can't be updated with a new key that results in violation of a policy.
+
+## Expected Behavior
+
+The existing ConfigMap isn't patched and policy violation is reported.
+
+## Steps
+
+### Test Steps
+
+1. Create a `Policy` that denies only permits combination of two particular keys together.
+2. Create a `ConfigMap` that contains one of the keys.
+3. Try to patch the `ConfigMap` with a new key that is not permitted by the policy.
+4. Verify that the `ConfigMap` is not patched and policy violation is reported.
+5. Delete the `Policy` and `ConfigMap`.
+
+## Reference Issue(s)
+
+https://github.com/kyverno/kyverno/issues/3253
\ No newline at end of file
diff --git a/test/e2e/validate/config.go b/test/e2e/validate/config.go
index 76466b7079..4175611776 100644
--- a/test/e2e/validate/config.go
+++ b/test/e2e/validate/config.go
@@ -5,42 +5,12 @@ import (
 	"k8s.io/apimachinery/pkg/runtime/schema"
 )
 
-// FluxValidateTests is E2E Test Config for validation
-var FluxValidateTests = []struct {
-	// TestName - Name of the Test
-	TestName string
-	// PolicyRaw - The Yaml file of the ClusterPolicy
-	PolicyRaw []byte
-	// ResourceRaw - The Yaml file of the ClusterPolicy
-	ResourceRaw []byte
-	// ResourceNamespace - Namespace of the Resource
-	ResourceNamespace string
-	// MustSucceed declares if test case must fail on validation
-	MustSucceed bool
-}{
-	{
-		TestName:          "test-validate-with-flux-and-variable-substitution-2043",
-		PolicyRaw:         kyverno_2043_policy,
-		ResourceRaw:       kyverno_2043_FluxKustomization,
-		ResourceNamespace: "test-validate",
-		MustSucceed:       false,
-	},
-	{
-		TestName:          "test-validate-with-flux-and-variable-substitution-2241",
-		PolicyRaw:         kyverno_2241_policy,
-		ResourceRaw:       kyverno_2241_FluxKustomization,
-		ResourceNamespace: "test-validate",
-		MustSucceed:       true,
-	},
-}
-
 var (
-	podGVR        = e2e.GetGVR("", "v1", "pods")
-	deploymentGVR = e2e.GetGVR("apps", "v1", "deployments")
-	configmapGVR  = e2e.GetGVR("", "v1", "configmaps")
+	podGVR       = e2e.GetGVR("", "v1", "pods")
+	kustomizeGVR = e2e.GetGVR("kustomize.toolkit.fluxcd.io", "v1beta1", "kustomizations")
 )
 
-var ValidateTests = []struct {
+type ValidationTest struct {
 	// TestDescription - Description of the Test
 	TestDescription string
 	// PolicyName - Name of the Policy
@@ -57,126 +27,63 @@ var ValidateTests = []struct {
 	ResourceRaw []byte
 	// MustSucceed - indicates if validation must succeed
 	MustSucceed bool
-}{
+}
+
+var FluxValidateTests = []ValidationTest{
+	{
+		TestDescription:   "test-validate-with-flux-and-variable-substitution-2043",
+		PolicyName:        "flux-multi-tenancy",
+		PolicyRaw:         kyverno2043Policy,
+		ResourceName:      "dev-team",
+		ResourceNamespace: "test-validate",
+		ResourceGVR:       kustomizeGVR,
+		ResourceRaw:       kyverno2043Fluxkustomization,
+		MustSucceed:       false,
+	},
+	{
+		TestDescription:   "test-validate-with-flux-and-variable-substitution-2241",
+		PolicyName:        "flux-multi-tenancy-2",
+		PolicyRaw:         kyverno2241Policy,
+		ResourceName:      "tenants",
+		ResourceNamespace: "test-validate",
+		ResourceGVR:       kustomizeGVR,
+		ResourceRaw:       kyverno2241Fluxkustomization,
+		MustSucceed:       true,
+	},
+}
+
+var ValidateTests = []ValidationTest{
 	{
 		// Case for https://github.com/kyverno/kyverno/issues/2345 issue
 		TestDescription:   "checks that contains function works properly with string list",
 		PolicyName:        "drop-cap-net-raw",
-		PolicyRaw:         kyverno_2345_policy,
+		PolicyRaw:         kyverno2345Policy,
 		ResourceName:      "test",
 		ResourceNamespace: "test-validate1",
 		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_2345_resource,
-		MustSucceed:       false,
-	},
-	{
-		// Case for https://github.com/kyverno/kyverno/issues/2390 issue
-		TestDescription:   "checks that policy contains global anchor fields",
-		PolicyName:        "check-image-pull-secret",
-		PolicyRaw:         kyverno_global_anchor_validate_policy,
-		ResourceName:      "pod-with-nginx-allowed-registory",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_global_anchor_validate_resource_1,
-		MustSucceed:       true,
-	},
-	{
-		// Case for https://github.com/kyverno/kyverno/issues/2390 issue
-		TestDescription:   "checks that policy contains global anchor fields",
-		PolicyName:        "check-image-pull-secret",
-		PolicyRaw:         kyverno_global_anchor_validate_policy,
-		ResourceName:      "pod-with-nginx-disallowed-registory",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_global_anchor_validate_resource_2,
-		MustSucceed:       false,
-	},
-	{
-		// Case for image validation
-		TestDescription:   "checks that images are trustable",
-		PolicyName:        "check-trustable-images",
-		PolicyRaw:         kyverno_trustable_image_policy,
-		ResourceName:      "pod-with-trusted-registry",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_trusted_image_pod,
-		MustSucceed:       true,
-	},
-	{
-		// Case for image validation
-		TestDescription:   "checks that images are trustable",
-		PolicyName:        "check-trustable-images",
-		PolicyRaw:         kyverno_trustable_image_policy,
-		ResourceName:      "pod-with-root-user",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_pod_with_root_user,
+		ResourceRaw:       kyverno2345Resource,
 		MustSucceed:       false,
 	},
 	{
 		// Case for small image validation
 		TestDescription:   "checks that images are small",
 		PolicyName:        "check-small-images",
-		PolicyRaw:         kyverno_small_image_policy,
+		PolicyRaw:         kyvernoSmallImagePolicy,
 		ResourceName:      "pod-with-small-image",
 		ResourceNamespace: "test-validate",
 		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_pod_with_small_image,
+		ResourceRaw:       kyvernoPodWithSmallImage,
 		MustSucceed:       true,
 	},
 	{
 		// Case for small image validation
 		TestDescription:   "checks that images are small",
 		PolicyName:        "check-large-images",
-		PolicyRaw:         kyverno_small_image_policy,
+		PolicyRaw:         kyvernoSmallImagePolicy,
 		ResourceName:      "pod-with-large-image",
 		ResourceNamespace: "test-validate",
 		ResourceGVR:       podGVR,
-		ResourceRaw:       kyverno_pod_with_large_image,
+		ResourceRaw:       kyvernoPodWithLargeImage,
 		MustSucceed:       false,
 	},
-	{
-		// Case for yaml signing validation
-		TestDescription:   "checks that unsigned yaml manifest is blocked",
-		PolicyName:        "check-yaml-signing",
-		PolicyRaw:         kyverno_yaml_signing_validate_policy,
-		ResourceName:      "test-deployment",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       deploymentGVR,
-		ResourceRaw:       kyverno_yaml_signing_validate_resource_1,
-		MustSucceed:       false,
-	},
-	{
-		// Case for yaml signing validation
-		TestDescription:   "checks that signed yaml manifest is created",
-		PolicyName:        "check-yaml-signing",
-		PolicyRaw:         kyverno_yaml_signing_validate_policy,
-		ResourceName:      "test-deployment",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       deploymentGVR,
-		ResourceRaw:       kyverno_yaml_signing_validate_resource_2,
-		MustSucceed:       true,
-	},
-	{
-		// Case for failing X.509 certificate decoding validation
-		TestDescription:   "checks if the public key modulus of base64 encoded x.509 certificate is same as the pem x.509 certificate",
-		PolicyName:        "check-x509-decode",
-		PolicyRaw:         kyverno_decode_x509_certificate_policy,
-		ResourceName:      "test-configmap",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       configmapGVR,
-		ResourceRaw:       kyverno_decode_x509_certificate_resource_fail,
-		MustSucceed:       false,
-	},
-	{
-		// Case for passing X.509 certificate decoding validation
-		TestDescription:   "checks if the public key modulus of base64 encoded x.509 certificate is same as the pem x.509 certificate",
-		PolicyName:        "check-x509-decode",
-		PolicyRaw:         kyverno_decode_x509_certificate_policy,
-		ResourceName:      "test-configmap",
-		ResourceNamespace: "test-validate",
-		ResourceGVR:       configmapGVR,
-		ResourceRaw:       kyverno_decode_x509_certificate_resource_pass,
-		MustSucceed:       true,
-	},
 }
diff --git a/test/e2e/validate/resources.go b/test/e2e/validate/resources.go
index 5c4e9a00c1..639655dc93 100644
--- a/test/e2e/validate/resources.go
+++ b/test/e2e/validate/resources.go
@@ -1,14 +1,8 @@
 package validate
 
-import "fmt"
-
-// Namespace Description
-var namespaceYaml = []byte(`
-apiVersion: v1
-kind: Namespace
-metadata:
-  name: test-validate
-`)
+import (
+	"fmt"
+)
 
 func newNamespaceYaml(name string) []byte {
 	ns := fmt.Sprintf(`
@@ -22,7 +16,7 @@ func newNamespaceYaml(name string) []byte {
 
 // Regression: https://github.com/kyverno/kyverno/issues/2043
 // Policy: https://github.com/fluxcd/flux2-multi-tenancy/blob/main/infrastructure/kyverno-policies/flux-multi-tenancy.yaml
-var kyverno_2043_policy = []byte(`
+var kyverno2043Policy = []byte(`
 apiVersion: kyverno.io/v1
 kind: ClusterPolicy
 metadata:
@@ -39,7 +33,6 @@ spec:
         resources:
           kinds:
             - Kustomization
-            - HelmRelease
       validate:
         message: ".spec.serviceAccountName is required"
         pattern:
@@ -54,7 +47,6 @@ spec:
         resources:
           kinds:
             - Kustomization
-            - HelmRelease
       validate:
         message: "spec.sourceRef.namespace must be the same as metadata.namespace"
         deny:
@@ -64,11 +56,11 @@ spec:
               value:  "{{request.object.metadata.namespace}}"
 `)
 
-var kyverno_2241_policy = []byte(`
+var kyverno2241Policy = []byte(`
 apiVersion: kyverno.io/v1
 kind: ClusterPolicy
 metadata:
-  name: flux-multi-tenancy
+  name: flux-multi-tenancy-2
 spec:
   validationFailureAction: enforce
   rules:
@@ -81,7 +73,6 @@ spec:
         resources:
           kinds:
             - Kustomization
-            - HelmRelease
       validate:
         message: ".spec.serviceAccountName is required"
         pattern:
@@ -96,10 +87,9 @@ spec:
         resources:
           kinds:
             - Kustomization
-            - HelmRelease
       preconditions:
         any:
-        - key: "{{request.object.spec.sourceRef.namespace}}"
+        - key: "{{request.object.spec.sourceRef.namespace || ''}}"
           operator: NotEquals
           value: ""
       validate:
@@ -111,7 +101,7 @@ spec:
               value:  "{{request.object.metadata.namespace}}"
 `)
 
-var kyverno_2043_FluxCRD = []byte(`
+var kyverno2043Fluxcrd = []byte(`
 apiVersion: apiextensions.k8s.io/v1
 kind: CustomResourceDefinition
 metadata:
@@ -541,7 +531,7 @@ status:
   storedVersions: []
 `)
 
-var kyverno_2043_FluxKustomization = []byte(`
+var kyverno2043Fluxkustomization = []byte(`
 apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
 kind: Kustomization
 metadata:
@@ -557,7 +547,7 @@ spec:
   validation: client
 `)
 
-var kyverno_2241_FluxKustomization = []byte(`
+var kyverno2241Fluxkustomization = []byte(`
 apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
 kind: Kustomization
 metadata:
@@ -574,7 +564,7 @@ spec:
   validation: client
 `)
 
-var kyverno_2345_policy = []byte(`
+var kyverno2345Policy = []byte(`
 apiVersion: kyverno.io/v1
 kind: ClusterPolicy
 metadata:
@@ -597,7 +587,7 @@ spec:
             value: false
 `)
 
-var kyverno_2345_resource = []byte(`
+var kyverno2345Resource = []byte(`
 apiVersion: v1
 kind: Pod
 metadata:
@@ -632,115 +622,7 @@ spec:
         - CAP_SOMETHING
 `)
 
-var kyverno_trustable_image_policy = []byte(`
-apiVersion: kyverno.io/v1
-kind: ClusterPolicy
-metadata:
-  name: check-trustable-images
-spec:
-  validationFailureAction: enforce
-  rules:
-  - name: only-allow-trusted-images
-    match:
-      resources:
-        kinds:
-        - Pod
-    preconditions:
-      - key: "{{request.operation}}"
-        operator: NotEquals
-        value: DELETE
-    validate:
-      message: "images with root user are not allowed"
-      foreach:
-      - list: "request.object.spec.containers"
-        context:
-        - name: imageData
-          imageRegistry:
-            reference: "{{ element.image }}"
-            jmesPath: "{user: configData.config.User || '', registry: registry}"
-        deny:
-          conditions:
-            all:
-              - key: "{{ imageData.user }}"
-                operator: Equals
-                value: ""
-              - key: "{{ imageData.registry }}"
-                operator: NotEquals
-                value: "ghcr.io"
-`)
-
-var kyverno_global_anchor_validate_policy = []byte(`
-apiVersion: kyverno.io/v1
-kind: ClusterPolicy
-metadata:
-  name: sample
-spec:
-  validationFailureAction: enforce
-  rules:
-  - name: check-container-image
-    match:
-      resources:
-        kinds:
-        - Pod
-    validate:
-      pattern:
-        spec:
-          containers:
-          - name: "*"
-            <(image): "nginx"
-          imagePullSecrets:
-          - name: my-registry-secret
-`)
-
-var kyverno_global_anchor_validate_resource_1 = []byte(`
-apiVersion: v1
-kind: Pod
-metadata:
-  name: pod-with-nginx-allowed-registory
-spec:
-  containers:
-  - name: nginx
-    image: nginx
-  imagePullSecrets:
-  - name: my-registry-secret
-`)
-
-var kyverno_global_anchor_validate_resource_2 = []byte(`
-apiVersion: v1
-kind: Pod
-metadata:
-  name: pod-with-nginx-disallowed-registory
-spec:
-  containers:
-  - name: nginx
-    image: nginx
-  imagePullSecrets:
-  - name: other-registory-secret
-`)
-
-var kyverno_trusted_image_pod = []byte(`
-apiVersion: v1
-kind: Pod
-metadata:
-  name: pod-with-trusted-registry
-spec:
-  containers:
-  - name: kyverno
-    image: ghcr.io/kyverno/kyverno:latest
-`)
-
-var kyverno_pod_with_root_user = []byte(`
-apiVersion: v1
-kind: Pod
-metadata:
-  name: pod-with-root-user
-spec:
-  containers:
-  - name: ubuntu
-    image: ubuntu:bionic
-`)
-
-var kyverno_small_image_policy = []byte(`
+var kyvernoSmallImagePolicy = []byte(`
 apiVersion: kyverno.io/v1
 kind: ClusterPolicy
 metadata:
@@ -775,7 +657,7 @@ spec:
               value: "{{imageSize}}"
 `)
 
-var kyverno_pod_with_small_image = []byte(`
+var kyvernoPodWithSmallImage = []byte(`
 apiVersion: v1
 kind: Pod
 metadata:
@@ -786,7 +668,7 @@ spec:
     image: busybox:latest
 `)
 
-var kyverno_pod_with_large_image = []byte(`
+var kyvernoPodWithLargeImage = []byte(`
 apiVersion: v1
 kind: Pod
 metadata:
@@ -796,188 +678,3 @@ spec:
   - name: large-image
     image: nvidia/cuda:11.6.0-devel-ubi8
 `)
-
-var kyverno_yaml_signing_validate_policy = []byte(`
-apiVersion: kyverno.io/v1
-kind: ClusterPolicy
-metadata:
-  name: validate-resources
-spec:
-  validationFailureAction: enforce
-  background: false
-  webhookTimeoutSeconds: 30
-  failurePolicy: Fail  
-  rules:
-    - name: validate-resources
-      match:
-        any:
-        - resources:
-            kinds:
-              - Deployment
-              - Pod
-            name: test*
-      exclude:
-        any:
-        - resources:
-            kinds:
-              - Pod
-          subjects:
-          - kind: ServiceAccount
-            namespace: kube-system
-            name: replicaset-controller
-        - resources:
-            kinds:
-              - ReplicaSet
-          subjects:
-          - kind: ServiceAccount
-            namespace: kube-system
-            name: deployment-controller
-      validate:
-        manifests:
-          attestors:
-          - entries:
-            - keys: 
-                publicKeys:  |-
-                  -----BEGIN PUBLIC KEY-----
-                  MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEyQfmL5YwHbn9xrrgG3vgbU0KJxMY
-                  BibYLJ5L4VSMvGxeMLnBGdM48w5IE//6idUPj3rscigFdHs7GDMH4LLAng==
-                  -----END PUBLIC KEY-----
-`)
-
-var kyverno_yaml_signing_validate_resource_1 = []byte(`
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  labels:
-    app: nginx
-  name: test-deployment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-        - image: nginx:1.14.2
-          name: nginx
-          ports:
-            - containerPort: 80
-
-`)
-
-var kyverno_yaml_signing_validate_resource_2 = []byte(`
-apiVersion: apps/v1
-kind: Deployment
-metadata:
-  annotations:
-    cosign.sigstore.dev/message: H4sIAAAAAAAA/wBaAaX+H4sIAAAAAAAA/+ySz27bMAzGffZT8AUcSf6TpDrvuMMOw64DazOeEP2bxBZtn35wnXhegOW4oYB/F9rg930gQYlnTOIU7EApC/8mlDye7c9xqNk/Stc49902rn1ppZRy9OKr6IOLiXI2fqwYUzW+KXmQDw9tUx8FU+ZqoGjDqyPPu1d0tigm775t3+th371XWc//E12zL1Rbq042XacOhWzquusKkMU/4CkzpkLKdH4awh1dZjyd7vQvuyz1g4DRfKOUTfAaMMYsnlV5Nn7Q8Gk5Y+mIcUBGXQJYfCSbpy+YDBr8aPxLCeDRkYabF1DmSP0kThSt6TFrUCVAJks9hzTHOOT+x+dV7k0yk4sWmS7q1TAT9g/jjRXgOsBEHzyj8ZRW8gqMw5EuFq12qt3VS/e61u+8mRgSr0LmoCX+S0is4SjL/33djY2Njb/zKwAA//+MAMwjAAgAAAEAAP//7NcJ9loBAAA=
-    cosign.sigstore.dev/signature: MEUCICLCfb3LGKXcdKV3gTXl6qba3T2goZMbVX/54gyNR05UAiEAlvPuWVsCPuBx5wVqvtyT7hr/AfR9Fl7cNLDACaNIbx8=
-  labels:
-    app: nginx
-  name: test-deployment
-spec:
-  replicas: 1
-  selector:
-    matchLabels:
-      app: nginx
-  template:
-    metadata:
-      labels:
-        app: nginx
-    spec:
-      containers:
-      - image: nginx:1.14.2
-        name: nginx
-        ports:
-        - containerPort: 80
-`)
-
-var kyverno_decode_x509_certificate_policy = []byte(`
-apiVersion: kyverno.io/v1
-kind: ClusterPolicy
-metadata:
-  name: test-x509-decode
-spec:
-  validationFailureAction: enforce
-  rules:
-  - name: test-x509-decode
-    match:
-      any:
-      - resources:
-          kinds:
-          - ConfigMap
-    validate:
-      message: "public key modulus mismatch: \"{{ x509_decode('{{request.object.data.cert}}').PublicKey.N }}\" != \"{{ x509_decode('{{base64_decode('{{request.object.data.certB64}}')}}').PublicKey.N }}\""
-      deny:
-        conditions:
-          any:
-            - key: "{{ x509_decode('{{request.object.data.cert}}').PublicKey.N }}"
-              operator: NotEquals
-              value: "{{ x509_decode('{{base64_decode('{{request.object.data.certB64}}')}}').PublicKey.N }}"
-`)
-
-var kyverno_decode_x509_certificate_resource_fail = []byte(`
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: test-configmap
-  namespace: test-validate
-data:
-  cert: |
-    -----BEGIN CERTIFICATE-----
-    MIIDSjCCAjKgAwIBAgIUWxmj40l+TDVJq98Xy7c6Leo3np8wDQYJKoZIhvcNAQEL
-    BQAwPTELMAkGA1UEBhMCeHgxCjAIBgNVBAgTAXgxCjAIBgNVBAcTAXgxCjAIBgNV
-    BAoTAXgxCjAIBgNVBAsTAXgwHhcNMTgwMjAyMTIzODAwWhcNMjMwMjAxMTIzODAw
-    WjA9MQswCQYDVQQGEwJ4eDEKMAgGA1UECBMBeDEKMAgGA1UEBxMBeDEKMAgGA1UE
-    ChMBeDEKMAgGA1UECxMBeDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-    ANHkqOmVf23KMXdaZU2eFUx1h4wb09JINBB8x/HL7UE0KFJcnOoVnNQB0gRukUop
-    iYCzrzMFyGWWmB/pAEKool+ZiI2uMy6mcYBDtOi4pOm7U0TQQMV6L/5Yfi65xRz3
-    RTMd/tYAoFi4aCZbJAGjxU6UWNYDzTy8E/cP6ZnlNbVHRiA6/wHsoWcXtWTXYP5y
-    n9cf7EWQi1hOBM4BWmOIyB1f6LEgQipZWMOMPPHO3hsuSBn0rk7jovSt5XTlbgRr
-    txqAJiNjJUykWzIF+lLnZCioippGv5vkdGvE83JoACXvZTUwzA+MLu49fkw3bweq
-    kbhrer8kacjfGlw3aJN37eECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
-    EwEB/wQFMAMBAf8wHQYDVR0OBBYEFKXcb52bv6oqnD+D9fTNFHZL8IWxMA0GCSqG
-    SIb3DQEBCwUAA4IBAQADvKvv3ym0XAYwKxPLLl3Lc6sJYHDbTN0donduG7PXeb1d
-    huukJ2lfufUYp2IGSAxuLecTYeeByOVp1gaMb5LsIGt2BVDmlMMkiH29LUHsvbyi
-    85CpJo7A5RJG6AWW2VBCiDjz5v8JFM6pMkBRFfXH+pwIge65CE+MTSQcfb1/aIIo
-    Q226P7E/3uUGX4k4pDXG/O7GNvykF40v1DB5y7DDBTQ4JWiJfyGkT69TmdOGLFAm
-    jwxUjWyvEey4qJex/EGEm5RQcMv9iy7tba1wK7sykNGn5uDELGPGIIEAa5rIHm1F
-    UFOZZVoELaasWS559wy8og39Eq21dDMynb8Bndn/
-    -----END CERTIFICATE-----
-  certB64: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM3VENDQWRXZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFZTVJZd0ZBWURWUVFEREEwcUxtdDUKZG1WeWJtOHVjM1pqTUI0WERUSXlNREV4TVRFek1qWTBNMW9YRFRJek1ERXhNVEUwTWpZME0xb3dHREVXTUJRRwpBMVVFQXd3TktpNXJlWFpsY201dkxuTjJZekNDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DCmdnRUJBTXNBejg1K3lpbm8rTW1kS3NWdEh3Tmkzb0FWanVtelhIaUxmVUpLN3hpNUtVOEI3Z29QSEYvVkNlL1YKN1kyYzRhZnlmZ1kyZVB3NEx4U0RrQ1lOZ1l3cWpTd0dJYmNzcXY1WlJhekJkRHhSMDlyaTZQa25OeUJWR0xpNQpSbFBYSXJHUTNwc051ZjU1cXd4SnhMTzMxcUNadXZrdEtZNVl2dUlSNEpQbUJodVNGWE9ubjBaaVF3OHV4TWNRCjBRQTJseitQeFdDVk5rOXErMzFINURIMW9ZWkRMZlUzbWlqSU9BK0FKR1piQmIrWndCbXBWTDArMlRYTHhFNzQKV293ZEtFVitXVHNLb2pOVGQwVndjdVJLUktSLzZ5blhBQWlzMjF5MVg3VWk5RkpFNm1ESXlsVUQ0MFdYT0tHSgoxbFlZNDFrUm5ZaFZodlhZTjlKdE5ZZFkzSHNDQXdFQUFhTkNNRUF3RGdZRFZSMFBBUUgvQkFRREFnS2tNQThHCkExVWRFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRk9ubEFTVkQ5ZnUzVEFqcHRsVy9nQVhBNHFsK01BMEcKQ1NxR1NJYjNEUUVCQ3dVQUE0SUJBUUNJcHlSaUNoeHA5N2NyS2ZRMjRKdDd6OFArQUdwTGYzc1g0ZUw4N0VTYQo3UVJvVkp0WExtYXV0MXBVRW9ZTFFydUttaC8wWUZ0Wkc5V3hWZ1k2aXVLYldudTdiT2VNQi9JcitWL3lyWDNSCitYdlpPc3VYaUpuRWJKaUJXNmxKekxsZG9XNGYvNzFIK2oxV0Q0dEhwcW1kTXhxL3NMcVhmUEl1YzAvbTB5RkMKbitBREJXR0dCOE5uNjZ2eHR2K2NUNnArUklWb3RYUFFXYk1pbFdwNnBkNXdTdUI2OEZxckR3dFlMTkp0UHdGcwo5TVBWa3VhSmRZWjBlV2Qvck1jS0Q5NEhnZjg5Z3ZBMCtxek1WRmYrM0JlbVhza2pRUll5NkNLc3FveUM2alg0Cm5oWWp1bUFQLzdwc2J6SVRzbnBIdGZDRUVVKzJKWndnTTQwNmFpTWNzZ0xiCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
-`)
-
-var kyverno_decode_x509_certificate_resource_pass = []byte(`
-apiVersion: v1
-kind: ConfigMap
-metadata:
-  name: test-configmap
-  namespace: test-validate
-data:
-  cert: |
-    -----BEGIN CERTIFICATE-----
-    MIIDSjCCAjKgAwIBAgIUWxmj40l+TDVJq98Xy7c6Leo3np8wDQYJKoZIhvcNAQEL
-    BQAwPTELMAkGA1UEBhMCeHgxCjAIBgNVBAgTAXgxCjAIBgNVBAcTAXgxCjAIBgNV
-    BAoTAXgxCjAIBgNVBAsTAXgwHhcNMTgwMjAyMTIzODAwWhcNMjMwMjAxMTIzODAw
-    WjA9MQswCQYDVQQGEwJ4eDEKMAgGA1UECBMBeDEKMAgGA1UEBxMBeDEKMAgGA1UE
-    ChMBeDEKMAgGA1UECxMBeDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEB
-    ANHkqOmVf23KMXdaZU2eFUx1h4wb09JINBB8x/HL7UE0KFJcnOoVnNQB0gRukUop
-    iYCzrzMFyGWWmB/pAEKool+ZiI2uMy6mcYBDtOi4pOm7U0TQQMV6L/5Yfi65xRz3
-    RTMd/tYAoFi4aCZbJAGjxU6UWNYDzTy8E/cP6ZnlNbVHRiA6/wHsoWcXtWTXYP5y
-    n9cf7EWQi1hOBM4BWmOIyB1f6LEgQipZWMOMPPHO3hsuSBn0rk7jovSt5XTlbgRr
-    txqAJiNjJUykWzIF+lLnZCioippGv5vkdGvE83JoACXvZTUwzA+MLu49fkw3bweq
-    kbhrer8kacjfGlw3aJN37eECAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgEGMA8GA1Ud
-    EwEB/wQFMAMBAf8wHQYDVR0OBBYEFKXcb52bv6oqnD+D9fTNFHZL8IWxMA0GCSqG
-    SIb3DQEBCwUAA4IBAQADvKvv3ym0XAYwKxPLLl3Lc6sJYHDbTN0donduG7PXeb1d
-    huukJ2lfufUYp2IGSAxuLecTYeeByOVp1gaMb5LsIGt2BVDmlMMkiH29LUHsvbyi
-    85CpJo7A5RJG6AWW2VBCiDjz5v8JFM6pMkBRFfXH+pwIge65CE+MTSQcfb1/aIIo
-    Q226P7E/3uUGX4k4pDXG/O7GNvykF40v1DB5y7DDBTQ4JWiJfyGkT69TmdOGLFAm
-    jwxUjWyvEey4qJex/EGEm5RQcMv9iy7tba1wK7sykNGn5uDELGPGIIEAa5rIHm1F
-    UFOZZVoELaasWS559wy8og39Eq21dDMynb8Bndn/
-    -----END CERTIFICATE-----
-  certB64: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURTakNDQWpLZ0F3SUJBZ0lVV3htajQwbCtURFZKcTk4WHk3YzZMZW8zbnA4d0RRWUpLb1pJaHZjTkFRRUwKQlFBd1BURUxNQWtHQTFVRUJoTUNlSGd4Q2pBSUJnTlZCQWdUQVhneENqQUlCZ05WQkFjVEFYZ3hDakFJQmdOVgpCQW9UQVhneENqQUlCZ05WQkFzVEFYZ3dIaGNOTVRnd01qQXlNVEl6T0RBd1doY05Nak13TWpBeE1USXpPREF3CldqQTlNUXN3Q1FZRFZRUUdFd0o0ZURFS01BZ0dBMVVFQ0JNQmVERUtNQWdHQTFVRUJ4TUJlREVLTUFnR0ExVUUKQ2hNQmVERUtNQWdHQTFVRUN4TUJlRENDQVNJd0RRWUpLb1pJaHZjTkFRRUJCUUFEZ2dFUEFEQ0NBUW9DZ2dFQgpBTkhrcU9tVmYyM0tNWGRhWlUyZUZVeDFoNHdiMDlKSU5CQjh4L0hMN1VFMEtGSmNuT29Wbk5RQjBnUnVrVW9wCmlZQ3pyek1GeUdXV21CL3BBRUtvb2wrWmlJMnVNeTZtY1lCRHRPaTRwT203VTBUUVFNVjZMLzVZZmk2NXhSejMKUlRNZC90WUFvRmk0YUNaYkpBR2p4VTZVV05ZRHpUeThFL2NQNlpubE5iVkhSaUE2L3dIc29XY1h0V1RYWVA1eQpuOWNmN0VXUWkxaE9CTTRCV21PSXlCMWY2TEVnUWlwWldNT01QUEhPM2hzdVNCbjByazdqb3ZTdDVYVGxiZ1JyCnR4cUFKaU5qSlV5a1d6SUYrbExuWkNpb2lwcEd2NXZrZEd2RTgzSm9BQ1h2WlRVd3pBK01MdTQ5Zmt3M2J3ZXEKa2JocmVyOGthY2pmR2x3M2FKTjM3ZUVDQXdFQUFhTkNNRUF3RGdZRFZSMFBBUUgvQkFRREFnRUdNQThHQTFVZApFd0VCL3dRRk1BTUJBZjh3SFFZRFZSME9CQllFRktYY2I1MmJ2Nm9xbkQrRDlmVE5GSFpMOElXeE1BMEdDU3FHClNJYjNEUUVCQ3dVQUE0SUJBUUFEdkt2djN5bTBYQVl3S3hQTExsM0xjNnNKWUhEYlROMGRvbmR1RzdQWGViMWQKaHV1a0oybGZ1ZlVZcDJJR1NBeHVMZWNUWWVlQnlPVnAxZ2FNYjVMc0lHdDJCVkRtbE1Na2lIMjlMVUhzdmJ5aQo4NUNwSm83QTVSSkc2QVdXMlZCQ2lEano1djhKRk02cE1rQlJGZlhIK3B3SWdlNjVDRStNVFNRY2ZiMS9hSUlvClEyMjZQN0UvM3VVR1g0azRwRFhHL083R052eWtGNDB2MURCNXk3RERCVFE0SldpSmZ5R2tUNjlUbWRPR0xGQW0Kand4VWpXeXZFZXk0cUpleC9FR0VtNVJRY012OWl5N3RiYTF3SzdzeWtOR241dURFTEdQR0lJRUFhNXJJSG0xRgpVRk9aWlZvRUxhYXNXUzU1OXd5OG9nMzlFcTIxZERNeW5iOEJuZG4vCi0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K
-`)
diff --git a/test/e2e/validate/validate_test.go b/test/e2e/validate/validate_test.go
index a6e92b9f98..37d26d883a 100644
--- a/test/e2e/validate/validate_test.go
+++ b/test/e2e/validate/validate_test.go
@@ -1,7 +1,6 @@
 package validate
 
 import (
-	"errors"
 	"fmt"
 	"os"
 	"testing"
@@ -24,9 +23,6 @@ var (
 
 	// ClusterPolicy Namespace
 	policyNamespace = ""
-	// Namespace Name
-	// Hardcoded in YAML Definition
-	nspace = "test-validate"
 
 	crdName = "kustomizations.kustomize.toolkit.fluxcd.io"
 )
@@ -41,58 +37,32 @@ func Test_Validate_Flux_Sets(t *testing.T) {
 	e2eClient, err := e2e.NewE2EClient()
 	Expect(err).To(BeNil())
 
+	// Create Flux CRD
+	err = createKustomizationCRD(e2eClient)
+	Expect(err).NotTo(HaveOccurred())
+
+	// Created CRD is not a guarantee that we already can create new resources
+	time.Sleep(10 * time.Second)
+
 	for _, test := range FluxValidateTests {
-		By(fmt.Sprintf("Test to validate objects: \"%s\"", test.TestName))
+		By(fmt.Sprintf("Validate Test: %s", test.TestDescription))
 
-		// Clean up Resources
-		By(string("Cleaning Cluster Policies"))
-		e2eClient.CleanClusterPolicies(policyGVR)
-		// Clear Namespace
-		By(fmt.Sprintf("Deleting Namespace: \"%s\"", nspace))
-		e2eClient.DeleteClusteredResource(namespaceGVR, nspace)
-		//CleanUp CRDs
-		e2eClient.DeleteClusteredResource(crdGVR, crdName)
-
-		// Wait Till Deletion of Namespace
-		e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
-			_, err := e2eClient.GetClusteredResource(namespaceGVR, nspace)
-			if err != nil {
-				return nil
-			}
-			return errors.New("Deleting Namespace")
-		})
-
-		// Create Namespace
-		By(fmt.Sprintf("Creating namespace \"%s\"", nspace))
-		_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, namespaceYaml)
+		err = deleteClusterPolicy(e2eClient)
 		Expect(err).NotTo(HaveOccurred())
 
-		// Create policy
-		By(fmt.Sprintf("Creating policy in \"%s\"", policyNamespace))
-		_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, "", test.PolicyRaw)
+		err = deleteResource(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		// Create Flux CRD
-		By(fmt.Sprintf("Creating Flux CRD in \"%s\"", nspace))
-		_, err = e2eClient.CreateClusteredResourceYaml(crdGVR, kyverno_2043_FluxCRD)
+		err = deleteNamespace(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		// Wait till CRD is created
-		e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
-			_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
-			if err == nil {
-				return nil
-			}
-			return errors.New("Waiting for CRD to be created...")
-		})
+		err = createNamespace(e2eClient, test)
+		Expect(err).NotTo(HaveOccurred())
 
-		// Created CRD is not a garantee that we already can create new resources
-		time.Sleep(3 * time.Second)
+		err = createPolicy(e2eClient, test)
+		Expect(err).NotTo(HaveOccurred())
 
-		// Create Kustomize resource
-		kustomizeGVR := e2e.GetGVR("kustomize.toolkit.fluxcd.io", "v1beta1", "kustomizations")
-		By(fmt.Sprintf("Creating Kustomize resource in \"%s\"", nspace))
-		_, err = e2eClient.CreateNamespacedResourceYaml(kustomizeGVR, nspace, "", test.ResourceRaw)
+		err = createResource(e2eClient, test)
 
 		if test.MustSucceed {
 			Expect(err).NotTo(HaveOccurred())
@@ -100,25 +70,20 @@ func Test_Validate_Flux_Sets(t *testing.T) {
 			Expect(err).To(HaveOccurred())
 		}
 
-		//CleanUp Resources
-		e2eClient.CleanClusterPolicies(policyGVR)
+		err = deleteClusterPolicy(e2eClient)
+		Expect(err).NotTo(HaveOccurred())
 
-		//CleanUp CRDs
-		e2eClient.DeleteClusteredResource(crdGVR, crdName)
+		err = deleteResource(e2eClient, test)
+		Expect(err).NotTo(HaveOccurred())
 
-		// Clear Namespace
-		e2eClient.DeleteClusteredResource(namespaceGVR, nspace)
-		// Wait Till Deletion of Namespace
-		e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
-			_, err := e2eClient.GetClusteredResource(namespaceGVR, nspace)
-			if err != nil {
-				return nil
-			}
-			return errors.New("Deleting Namespace")
-		})
+		err = deleteNamespace(e2eClient, test)
+		Expect(err).NotTo(HaveOccurred())
 
-		By(fmt.Sprintf("Test %s Completed \n\n\n", test.TestName))
+		By("Test passed successfully:" + test.TestDescription)
 	}
+
+	err = deleteKustomizationCRD(e2eClient)
+	Expect(err).NotTo(HaveOccurred())
 }
 
 func TestValidate(t *testing.T) {
@@ -131,55 +96,27 @@ func TestValidate(t *testing.T) {
 	Expect(err).To(BeNil())
 
 	for _, test := range ValidateTests {
-		By(fmt.Sprintf("Mutation Test: %s", test.TestDescription))
+		By(fmt.Sprintf("Validate Test: %s", test.TestDescription))
 
-		By("Deleting Cluster Policies...")
-		_ = e2eClient.CleanClusterPolicies(policyGVR)
-
-		By("Deleting Resource...")
-		_ = e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
-
-		By("Deleting Namespace...")
-		By(fmt.Sprintf("Deleting Namespace: %s...", test.ResourceNamespace))
-		_ = e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
-
-		By("Wait Till Deletion of Namespace...")
-		err = e2e.GetWithRetry(1*time.Second, 15, func() error {
-			_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
-			if err != nil {
-				return nil
-			}
-			return fmt.Errorf("failed to delete namespace: %v", err)
-		})
+		err = deleteClusterPolicy(e2eClient)
 		Expect(err).NotTo(HaveOccurred())
 
-		By(fmt.Sprintf("Creating Namespace: %s...", policyNamespace))
-		_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.ResourceNamespace))
+		err = deleteResource(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		By("Wait Till Creation of Namespace...")
-		err = e2e.GetWithRetry(1*time.Second, 15, func() error {
-			_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
-			if err != nil {
-				return err
-			}
-
-			return nil
-		})
+		err = deleteNamespace(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		By("Creating Policy...")
-		_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
+		err = createNamespace(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		err = commonE2E.PolicyCreated(test.PolicyName)
+		err = createPolicy(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		By("Creating Resource...")
-		_, err = e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, test.PolicyName, test.ResourceRaw)
+		err = createResource(e2eClient, test)
 
 		statusErr, ok := err.(*k8sErrors.StatusError)
-		validationError := (ok && statusErr.ErrStatus.Code == 400) // Validation error is always Bad Request
+		validationError := ok && statusErr.ErrStatus.Code == 400 // Validation error is always Bad Request
 
 		if test.MustSucceed || !validationError {
 			Expect(err).NotTo(HaveOccurred())
@@ -187,28 +124,111 @@ func TestValidate(t *testing.T) {
 			Expect(err).To(HaveOccurred())
 		}
 
-		By("Deleting Cluster Policies...")
-		err = e2eClient.CleanClusterPolicies(policyGVR)
+		err = deleteClusterPolicy(e2eClient)
 		Expect(err).NotTo(HaveOccurred())
 
-		By("Deleting Resource...") // if it is present, so ignore an error
-		e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
-
-		By("Deleting Namespace...")
-		err = e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
+		err = deleteResource(e2eClient, test)
 		Expect(err).NotTo(HaveOccurred())
 
-		By("Wait Till Creation of Namespace...")
-		e2e.GetWithRetry(1*time.Second, 15, func() error {
-			_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
-			if err != nil {
-				return nil
-			}
-			return fmt.Errorf("failed to delete namespace: %v", err)
-		})
-
-		// Do not fail if waiting fails. Sometimes namespace needs time to be deleted.
+		err = deleteNamespace(e2eClient, test)
+		Expect(err).NotTo(HaveOccurred())
 
 		By("Done")
 	}
 }
+
+func createNamespace(e2eClient *e2e.E2EClient, test ValidationTest) error {
+	By(fmt.Sprintf("Creating Namespace: %s...", test.ResourceNamespace))
+	_, err := e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.ResourceNamespace))
+	Expect(err).NotTo(HaveOccurred())
+
+	By("Wait Till Creation of Namespace...")
+	err = e2e.GetWithRetry(1*time.Second, 240, func() error {
+		_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
+		if err != nil {
+			return err
+		}
+
+		return nil
+	})
+	return err
+}
+
+func createPolicy(e2eClient *e2e.E2EClient, test ValidationTest) error {
+	By("Creating Policy...")
+	_, err := e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
+	Expect(err).NotTo(HaveOccurred())
+
+	err = commonE2E.PolicyCreated(test.PolicyName)
+	return err
+}
+
+func createResource(e2eClient *e2e.E2EClient, test ValidationTest) error {
+	By("Creating Resource...")
+	_, err := e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, test.ResourceName, test.ResourceRaw)
+	return err
+}
+
+func createKustomizationCRD(e2eClient *e2e.E2EClient) error {
+	By("Creating Flux CRD")
+	_, err := e2eClient.CreateClusteredResourceYaml(crdGVR, kyverno2043Fluxcrd)
+	Expect(err).NotTo(HaveOccurred())
+
+	// Wait till CRD is created
+	By("Wait Till Creation of CRD...")
+	err = e2e.GetWithRetry(1*time.Second, 240, func() error {
+		_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
+		if err == nil {
+			return nil
+		}
+		return fmt.Errorf("failed to create CRD: %v", err)
+	})
+	return err
+}
+
+func deleteClusterPolicy(e2eClient *e2e.E2EClient) error {
+	By("Deleting Cluster Policies...")
+	err := e2eClient.CleanClusterPolicies(policyGVR)
+	return err
+}
+
+func deleteResource(e2eClient *e2e.E2EClient, test ValidationTest) error {
+	By("Deleting Resource...")
+	err := e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
+	if k8sErrors.IsNotFound(err) {
+		return nil
+	}
+	return err
+}
+
+func deleteNamespace(e2eClient *e2e.E2EClient, test ValidationTest) error {
+	By("Deleting Namespace...")
+	By(fmt.Sprintf("Deleting Namespace: %s...", test.ResourceNamespace))
+	_ = e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
+
+	By("Wait Till Deletion of Namespace...")
+	err := e2e.GetWithRetry(1*time.Second, 240, func() error {
+		_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
+		if err != nil {
+			return nil
+		}
+		return fmt.Errorf("failed to delete namespace: %v", err)
+	})
+	return err
+}
+
+func deleteKustomizationCRD(e2eClient *e2e.E2EClient) error {
+	By("Deleting Flux CRD")
+	_ = e2eClient.DeleteClusteredResource(crdGVR, crdName)
+
+	// Wait till CRD is deleted
+	By("Wait Till Deletion of CRD...")
+	err := e2e.GetWithRetry(1*time.Second, 240, func() error {
+		_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
+		if err != nil {
+			return nil
+		}
+		return fmt.Errorf("failed to delete CRD: %v", err)
+	})
+	return err
+}