From a48049aac22fe39a168d9556a670ebac36eda9b6 Mon Sep 17 00:00:00 2001
From: shuting <shuting@nirmata.com>
Date: Thu, 13 Apr 2023 15:00:50 +0800
Subject: [PATCH] apply policy on UPDATEs with deletionTimestamp set (#6878)

Signed-off-by: ShutingZhao <shuting@nirmata.com>
---
 .../resource/imageverification/handler.go     | 17 ++----------
 pkg/webhooks/resource/mutation/mutation.go    | 21 ++-------------
 .../resource/validation/validation.go         | 18 ++-----------
 .../apply-on-deletion/01-cluster-policy.yaml  |  6 +++++
 .../apply-on-deletion/02-manifests.yaml       |  7 +++++
 .../apply-on-deletion/03-path-service.yaml    |  7 +++++
 .../04-script-patch-svc-type.yaml             | 13 ++++++++++
 .../05-update-svc-label.yaml                  | 26 +++++++++++++++++++
 .../06-remove-finalizer.yaml                  |  6 +++++
 .../cornercases/apply-on-deletion/README.md   | 11 ++++++++
 .../cornercases/apply-on-deletion/ns.yaml     |  4 +++
 .../apply-on-deletion/policy-ready.yaml       |  9 +++++++
 .../cornercases/apply-on-deletion/policy.yaml | 19 ++++++++++++++
 .../apply-on-deletion/service.yaml            | 23 ++++++++++++++++
 14 files changed, 137 insertions(+), 50 deletions(-)
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/01-cluster-policy.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/02-manifests.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/03-path-service.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/04-script-patch-svc-type.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/05-update-svc-label.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/06-remove-finalizer.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/README.md
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/ns.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy-ready.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy.yaml
 create mode 100644 test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/service.yaml

diff --git a/pkg/webhooks/resource/imageverification/handler.go b/pkg/webhooks/resource/imageverification/handler.go
index 5b23649072..2522814b74 100644
--- a/pkg/webhooks/resource/imageverification/handler.go
+++ b/pkg/webhooks/resource/imageverification/handler.go
@@ -19,7 +19,6 @@ import (
 	webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
 	"go.opentelemetry.io/otel/trace"
 	admissionv1 "k8s.io/api/admission/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 )
@@ -101,10 +100,8 @@ func (h *imageVerificationHandler) handleVerifyImages(
 
 	failurePolicy := policies[0].GetSpec().GetFailurePolicy()
 	blocked := webhookutils.BlockRequest(engineResponses, failurePolicy, logger)
-	if !isResourceDeleted(policyContext) {
-		events := webhookutils.GenerateEvents(engineResponses, blocked)
-		h.eventGen.Add(events...)
-	}
+	events := webhookutils.GenerateEvents(engineResponses, blocked)
+	h.eventGen.Add(events...)
 
 	if blocked {
 		logger.V(4).Info("admission request blocked")
@@ -134,16 +131,6 @@ func hasAnnotations(context *engine.PolicyContext) bool {
 	return len(annotations) != 0
 }
 
-func isResourceDeleted(policyContext *engine.PolicyContext) bool {
-	var deletionTimeStamp *metav1.Time
-	if resource := policyContext.NewResource(); resource.Object != nil {
-		deletionTimeStamp = resource.GetDeletionTimestamp()
-	} else if resource := policyContext.OldResource(); resource.Object != nil {
-		deletionTimeStamp = resource.GetDeletionTimestamp()
-	}
-	return deletionTimeStamp != nil
-}
-
 func (v *imageVerificationHandler) handleAudit(
 	ctx context.Context,
 	resource unstructured.Unstructured,
diff --git a/pkg/webhooks/resource/mutation/mutation.go b/pkg/webhooks/resource/mutation/mutation.go
index dde466338e..370a88509c 100644
--- a/pkg/webhooks/resource/mutation/mutation.go
+++ b/pkg/webhooks/resource/mutation/mutation.go
@@ -19,7 +19,6 @@ import (
 	webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
 	"go.opentelemetry.io/otel/trace"
 	admissionv1 "k8s.io/api/admission/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	corev1listers "k8s.io/client-go/listers/core/v1"
 )
 
@@ -84,10 +83,6 @@ func (v *mutationHandler) applyMutations(
 		return nil, nil, nil
 	}
 
-	if isResourceDeleted(policyContext) && request.Operation == admissionv1.Update {
-		return nil, nil, nil
-	}
-
 	var patches [][]byte
 	var engineResponses []engineapi.EngineResponse
 
@@ -135,10 +130,8 @@ func (v *mutationHandler) applyMutations(
 		patches = append(patches, annPatches...)
 	}
 
-	if !isResourceDeleted(policyContext) {
-		events := webhookutils.GenerateEvents(engineResponses, false)
-		v.eventGen.Add(events...)
-	}
+	events := webhookutils.GenerateEvents(engineResponses, false)
+	v.eventGen.Add(events...)
 
 	logMutationResponse(patches, engineResponses, v.log)
 
@@ -178,13 +171,3 @@ func logMutationResponse(patches [][]byte, engineResponses []engineapi.EngineRes
 		logger.Error(fmt.Errorf(webhookutils.GetErrorMsg(engineResponses)), "failed to apply mutation rules on the resource, reporting policy violation")
 	}
 }
-
-func isResourceDeleted(policyContext *engine.PolicyContext) bool {
-	var deletionTimeStamp *metav1.Time
-	if resource := policyContext.NewResource(); resource.Object != nil {
-		deletionTimeStamp = resource.GetDeletionTimestamp()
-	} else if resource := policyContext.OldResource(); resource.Object != nil {
-		deletionTimeStamp = resource.GetDeletionTimestamp()
-	}
-	return deletionTimeStamp != nil
-}
diff --git a/pkg/webhooks/resource/validation/validation.go b/pkg/webhooks/resource/validation/validation.go
index 191e949357..aaa80b8a3b 100644
--- a/pkg/webhooks/resource/validation/validation.go
+++ b/pkg/webhooks/resource/validation/validation.go
@@ -21,7 +21,6 @@ import (
 	webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
 	"go.opentelemetry.io/otel/trace"
 	admissionv1 "k8s.io/api/admission/v1"
-	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
 	"k8s.io/apimachinery/pkg/runtime/schema"
 )
@@ -79,17 +78,6 @@ func (v *validationHandler) HandleValidation(
 	resourceName := admissionutils.GetResourceName(request.AdmissionRequest)
 	logger := v.log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind)
 
-	var deletionTimeStamp *metav1.Time
-	if resource := policyContext.NewResource(); resource.Object != nil {
-		deletionTimeStamp = resource.GetDeletionTimestamp()
-	} else if resource := policyContext.OldResource(); resource.Object != nil {
-		deletionTimeStamp = resource.GetDeletionTimestamp()
-	}
-
-	if deletionTimeStamp != nil && request.Operation == admissionv1.Update {
-		return true, "", nil
-	}
-
 	var engineResponses []engineapi.EngineResponse
 	failurePolicy := kyvernov1.Ignore
 	for _, policy := range policies {
@@ -124,10 +112,8 @@ func (v *validationHandler) HandleValidation(
 	}
 
 	blocked := webhookutils.BlockRequest(engineResponses, failurePolicy, logger)
-	if deletionTimeStamp == nil {
-		events := webhookutils.GenerateEvents(engineResponses, blocked)
-		v.eventGen.Add(events...)
-	}
+	events := webhookutils.GenerateEvents(engineResponses, blocked)
+	v.eventGen.Add(events...)
 
 	if blocked {
 		logger.V(4).Info("admission request blocked")
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/01-cluster-policy.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/01-cluster-policy.yaml
new file mode 100644
index 0000000000..57ffd5631d
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/01-cluster-policy.yaml
@@ -0,0 +1,6 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+apply:
+- policy.yaml
+assert:
+- policy-ready.yaml
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/02-manifests.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/02-manifests.yaml
new file mode 100644
index 0000000000..0f220f560b
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/02-manifests.yaml
@@ -0,0 +1,7 @@
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+apply:
+- ns.yaml
+- service.yaml
+assert:
+- service.yaml
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/03-path-service.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/03-path-service.yaml
new file mode 100644
index 0000000000..8edc796b93
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/03-path-service.yaml
@@ -0,0 +1,7 @@
+# This clean-up stage is necessary because of https://github.com/kyverno/kyverno/issues/5101
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+  - script: |
+      kubectl patch service podinfo -p '{"metadata":{"finalizers":["bburky.com/hax"]}}' -n apply-on-deletion-ns
+      kubectl delete service podinfo --wait=false -n apply-on-deletion-ns
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/04-script-patch-svc-type.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/04-script-patch-svc-type.yaml
new file mode 100644
index 0000000000..5cb2583f22
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/04-script-patch-svc-type.yaml
@@ -0,0 +1,13 @@
+## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+    if kubectl patch service podinfo -p '{"spec":{"type":"NodePort","ports":[{"port":9898,"nodePort":32000}]}}' -n apply-on-deletion-ns
+    then 
+      echo "Tested failed. The service type cannot be changed to NodePort"
+      exit 1 
+    else 
+      echo "Test succeeded. The service update is blocked"
+      exit 0
+    fi
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/05-update-svc-label.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/05-update-svc-label.yaml
new file mode 100644
index 0000000000..120e731226
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/05-update-svc-label.yaml
@@ -0,0 +1,26 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: podinfo
+  namespace: apply-on-deletion-ns
+  labels:
+    name: podinfo
+    namespace: apply-on-deletion-ns
+spec:
+  internalTrafficPolicy: Cluster
+  ipFamilies:
+  - IPv4
+  ipFamilyPolicy: SingleStack
+  ports:
+  - name: http
+    port: 9898
+    protocol: TCP
+    targetPort: http
+  - name: grpc
+    port: 9999
+    protocol: TCP
+    targetPort: grpc
+  selector:
+    app: podinfo
+  sessionAffinity: None
+  type: ClusterIP
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/06-remove-finalizer.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/06-remove-finalizer.yaml
new file mode 100644
index 0000000000..e41fe50398
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/06-remove-finalizer.yaml
@@ -0,0 +1,6 @@
+## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
+apiVersion: kuttl.dev/v1beta1
+kind: TestStep
+commands:
+- script: |
+    kubectl patch service podinfo -p '{"metadata":{"finalizers":null}}' -n apply-on-deletion-ns
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/README.md b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/README.md
new file mode 100644
index 0000000000..2492fd9c67
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/README.md
@@ -0,0 +1,11 @@
+## Description
+
+This test ensures the policy is applied on the resource to be deleted (deletionTimestamp is set).
+
+## Expected Behavior
+
+With a bogus finalizer added to the service, the resource deletion is blocked as no controller serves behind to perform deletion. During this time, when one tries to patch the service that violates the policy, the patch request should be blocked. While if the patch doesn't result in an violation it should be allowed.
+
+## Reference Issue(s)
+
+N/A
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/ns.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/ns.yaml
new file mode 100644
index 0000000000..d749e1367a
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/ns.yaml
@@ -0,0 +1,4 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+  name: apply-on-deletion-ns
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy-ready.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy-ready.yaml
new file mode 100644
index 0000000000..e652590157
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy-ready.yaml
@@ -0,0 +1,9 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: cpol-apply-on-deletion 
+status:
+  conditions:
+  - reason: Succeeded
+    status: "True"
+    type: Ready
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy.yaml
new file mode 100644
index 0000000000..daeb1b478d
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/policy.yaml
@@ -0,0 +1,19 @@
+apiVersion: kyverno.io/v1
+kind: ClusterPolicy
+metadata:
+  name: cpol-apply-on-deletion  
+spec:
+  validationFailureAction: Enforce
+  background: true
+  rules:
+  - name: validate-nodeport
+    match:
+      any:
+      - resources:
+          kinds:
+          - Service
+    validate:
+      message: "Services of type NodePort are not allowed."
+      pattern:
+        spec:
+          =(type): "!NodePort"
\ No newline at end of file
diff --git a/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/service.yaml b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/service.yaml
new file mode 100644
index 0000000000..7ccc93bf48
--- /dev/null
+++ b/test/conformance/kuttl/validate/clusterpolicy/cornercases/apply-on-deletion/service.yaml
@@ -0,0 +1,23 @@
+apiVersion: v1
+kind: Service
+metadata:
+  name: podinfo
+  namespace: apply-on-deletion-ns
+spec:
+  internalTrafficPolicy: Cluster
+  ipFamilies:
+  - IPv4
+  ipFamilyPolicy: SingleStack
+  ports:
+  - name: http
+    port: 9898
+    protocol: TCP
+    targetPort: http
+  - name: grpc
+    port: 9999
+    protocol: TCP
+    targetPort: grpc
+  selector:
+    app: podinfo
+  sessionAffinity: None
+  type: ClusterIP
\ No newline at end of file