mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 02:18:15 +00:00
apply policy on UPDATEs with deletionTimestamp set (#6878)
Signed-off-by: ShutingZhao <shuting@nirmata.com>
This commit is contained in:
parent
c30934add2
commit
a48049aac2
14 changed files with 137 additions and 50 deletions
|
@ -19,7 +19,6 @@ import (
|
|||
webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -101,10 +100,8 @@ func (h *imageVerificationHandler) handleVerifyImages(
|
|||
|
||||
failurePolicy := policies[0].GetSpec().GetFailurePolicy()
|
||||
blocked := webhookutils.BlockRequest(engineResponses, failurePolicy, logger)
|
||||
if !isResourceDeleted(policyContext) {
|
||||
events := webhookutils.GenerateEvents(engineResponses, blocked)
|
||||
h.eventGen.Add(events...)
|
||||
}
|
||||
events := webhookutils.GenerateEvents(engineResponses, blocked)
|
||||
h.eventGen.Add(events...)
|
||||
|
||||
if blocked {
|
||||
logger.V(4).Info("admission request blocked")
|
||||
|
@ -134,16 +131,6 @@ func hasAnnotations(context *engine.PolicyContext) bool {
|
|||
return len(annotations) != 0
|
||||
}
|
||||
|
||||
func isResourceDeleted(policyContext *engine.PolicyContext) bool {
|
||||
var deletionTimeStamp *metav1.Time
|
||||
if resource := policyContext.NewResource(); resource.Object != nil {
|
||||
deletionTimeStamp = resource.GetDeletionTimestamp()
|
||||
} else if resource := policyContext.OldResource(); resource.Object != nil {
|
||||
deletionTimeStamp = resource.GetDeletionTimestamp()
|
||||
}
|
||||
return deletionTimeStamp != nil
|
||||
}
|
||||
|
||||
func (v *imageVerificationHandler) handleAudit(
|
||||
ctx context.Context,
|
||||
resource unstructured.Unstructured,
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
)
|
||||
|
||||
|
@ -84,10 +83,6 @@ func (v *mutationHandler) applyMutations(
|
|||
return nil, nil, nil
|
||||
}
|
||||
|
||||
if isResourceDeleted(policyContext) && request.Operation == admissionv1.Update {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var patches [][]byte
|
||||
var engineResponses []engineapi.EngineResponse
|
||||
|
||||
|
@ -135,10 +130,8 @@ func (v *mutationHandler) applyMutations(
|
|||
patches = append(patches, annPatches...)
|
||||
}
|
||||
|
||||
if !isResourceDeleted(policyContext) {
|
||||
events := webhookutils.GenerateEvents(engineResponses, false)
|
||||
v.eventGen.Add(events...)
|
||||
}
|
||||
events := webhookutils.GenerateEvents(engineResponses, false)
|
||||
v.eventGen.Add(events...)
|
||||
|
||||
logMutationResponse(patches, engineResponses, v.log)
|
||||
|
||||
|
@ -178,13 +171,3 @@ func logMutationResponse(patches [][]byte, engineResponses []engineapi.EngineRes
|
|||
logger.Error(fmt.Errorf(webhookutils.GetErrorMsg(engineResponses)), "failed to apply mutation rules on the resource, reporting policy violation")
|
||||
}
|
||||
}
|
||||
|
||||
func isResourceDeleted(policyContext *engine.PolicyContext) bool {
|
||||
var deletionTimeStamp *metav1.Time
|
||||
if resource := policyContext.NewResource(); resource.Object != nil {
|
||||
deletionTimeStamp = resource.GetDeletionTimestamp()
|
||||
} else if resource := policyContext.OldResource(); resource.Object != nil {
|
||||
deletionTimeStamp = resource.GetDeletionTimestamp()
|
||||
}
|
||||
return deletionTimeStamp != nil
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -79,17 +78,6 @@ func (v *validationHandler) HandleValidation(
|
|||
resourceName := admissionutils.GetResourceName(request.AdmissionRequest)
|
||||
logger := v.log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind)
|
||||
|
||||
var deletionTimeStamp *metav1.Time
|
||||
if resource := policyContext.NewResource(); resource.Object != nil {
|
||||
deletionTimeStamp = resource.GetDeletionTimestamp()
|
||||
} else if resource := policyContext.OldResource(); resource.Object != nil {
|
||||
deletionTimeStamp = resource.GetDeletionTimestamp()
|
||||
}
|
||||
|
||||
if deletionTimeStamp != nil && request.Operation == admissionv1.Update {
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
var engineResponses []engineapi.EngineResponse
|
||||
failurePolicy := kyvernov1.Ignore
|
||||
for _, policy := range policies {
|
||||
|
@ -124,10 +112,8 @@ func (v *validationHandler) HandleValidation(
|
|||
}
|
||||
|
||||
blocked := webhookutils.BlockRequest(engineResponses, failurePolicy, logger)
|
||||
if deletionTimeStamp == nil {
|
||||
events := webhookutils.GenerateEvents(engineResponses, blocked)
|
||||
v.eventGen.Add(events...)
|
||||
}
|
||||
events := webhookutils.GenerateEvents(engineResponses, blocked)
|
||||
v.eventGen.Add(events...)
|
||||
|
||||
if blocked {
|
||||
logger.V(4).Info("admission request blocked")
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: kuttl.dev/v1beta1
|
||||
kind: TestStep
|
||||
apply:
|
||||
- policy.yaml
|
||||
assert:
|
||||
- policy-ready.yaml
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: kuttl.dev/v1beta1
|
||||
kind: TestStep
|
||||
apply:
|
||||
- ns.yaml
|
||||
- service.yaml
|
||||
assert:
|
||||
- service.yaml
|
|
@ -0,0 +1,7 @@
|
|||
# This clean-up stage is necessary because of https://github.com/kyverno/kyverno/issues/5101
|
||||
apiVersion: kuttl.dev/v1beta1
|
||||
kind: TestStep
|
||||
commands:
|
||||
- script: |
|
||||
kubectl patch service podinfo -p '{"metadata":{"finalizers":["bburky.com/hax"]}}' -n apply-on-deletion-ns
|
||||
kubectl delete service podinfo --wait=false -n apply-on-deletion-ns
|
|
@ -0,0 +1,13 @@
|
|||
## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
|
||||
apiVersion: kuttl.dev/v1beta1
|
||||
kind: TestStep
|
||||
commands:
|
||||
- script: |
|
||||
if kubectl patch service podinfo -p '{"spec":{"type":"NodePort","ports":[{"port":9898,"nodePort":32000}]}}' -n apply-on-deletion-ns
|
||||
then
|
||||
echo "Tested failed. The service type cannot be changed to NodePort"
|
||||
exit 1
|
||||
else
|
||||
echo "Test succeeded. The service update is blocked"
|
||||
exit 0
|
||||
fi
|
|
@ -0,0 +1,26 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: apply-on-deletion-ns
|
||||
labels:
|
||||
name: podinfo
|
||||
namespace: apply-on-deletion-ns
|
||||
spec:
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- name: grpc
|
||||
port: 9999
|
||||
protocol: TCP
|
||||
targetPort: grpc
|
||||
selector:
|
||||
app: podinfo
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
|
@ -0,0 +1,6 @@
|
|||
## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
|
||||
apiVersion: kuttl.dev/v1beta1
|
||||
kind: TestStep
|
||||
commands:
|
||||
- script: |
|
||||
kubectl patch service podinfo -p '{"metadata":{"finalizers":null}}' -n apply-on-deletion-ns
|
|
@ -0,0 +1,11 @@
|
|||
## Description
|
||||
|
||||
This test ensures the policy is applied on the resource to be deleted (deletionTimestamp is set).
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
With a bogus finalizer added to the service, the resource deletion is blocked as no controller serves behind to perform deletion. During this time, when one tries to patch the service that violates the policy, the patch request should be blocked. While if the patch doesn't result in an violation it should be allowed.
|
||||
|
||||
## Reference Issue(s)
|
||||
|
||||
N/A
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: apply-on-deletion-ns
|
|
@ -0,0 +1,9 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: cpol-apply-on-deletion
|
||||
status:
|
||||
conditions:
|
||||
- reason: Succeeded
|
||||
status: "True"
|
||||
type: Ready
|
|
@ -0,0 +1,19 @@
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: cpol-apply-on-deletion
|
||||
spec:
|
||||
validationFailureAction: Enforce
|
||||
background: true
|
||||
rules:
|
||||
- name: validate-nodeport
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Service
|
||||
validate:
|
||||
message: "Services of type NodePort are not allowed."
|
||||
pattern:
|
||||
spec:
|
||||
=(type): "!NodePort"
|
|
@ -0,0 +1,23 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: podinfo
|
||||
namespace: apply-on-deletion-ns
|
||||
spec:
|
||||
internalTrafficPolicy: Cluster
|
||||
ipFamilies:
|
||||
- IPv4
|
||||
ipFamilyPolicy: SingleStack
|
||||
ports:
|
||||
- name: http
|
||||
port: 9898
|
||||
protocol: TCP
|
||||
targetPort: http
|
||||
- name: grpc
|
||||
port: 9999
|
||||
protocol: TCP
|
||||
targetPort: grpc
|
||||
selector:
|
||||
app: podinfo
|
||||
sessionAffinity: None
|
||||
type: ClusterIP
|
Loading…
Add table
Reference in a new issue