1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-13 19:28:55 +00:00

feat: apply generate rules on trigger events (#6508)

* - fire generation on trigger deletion, with condition rules;
- delete downstream if trigger no longer matches;
- delete downstream if trigger is deleted, with sync rule

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* trim condition key spaces

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* fix UR spec

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-create-on-trigger-deletion

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-create-on-trigger-deletion

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-data-sync-delete-trigger

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-data-nosync-delete-trigger

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-data-sync-update-trigger-no-match

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* rename policy

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-data-nosync-update-trigger-no-match

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* fix

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add debug logs

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-clone-create-on-trigger-deletion

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* update readme

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* fix

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-clone-sync-delete-trigger

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-clone-nosync-delete-trigger

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-clone-sync-update-trigger-no-match

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* update readme

Signed-off-by: ShutingZhao <shuting@nirmata.com>

* add a kuttl test cpol-clone-nosync-update-trigger-no-match

Signed-off-by: ShutingZhao <shuting@nirmata.com>

---------

Signed-off-by: ShutingZhao <shuting@nirmata.com>
This commit is contained in:
shuting 2023-03-11 01:17:10 +08:00 committed by GitHub
parent 0810290f26
commit 637f830917
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
102 changed files with 1126 additions and 518 deletions

View file

@ -24,10 +24,12 @@ import (
enginecontext "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/variables"
"github.com/kyverno/kyverno/pkg/event"
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
datautils "github.com/kyverno/kyverno/pkg/utils/data"
engineutils "github.com/kyverno/kyverno/pkg/utils/engine"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"golang.org/x/exp/slices"
admissionv1 "k8s.io/api/admission/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -88,17 +90,12 @@ func NewGenerateController(
func (c *GenerateController) ProcessUR(ur *kyvernov1beta1.UpdateRequest) error {
logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey(), "resource", ur.Spec.GetResource().String())
var err error
var resource *unstructured.Unstructured
var genResources []kyvernov1.ResourceSpec
logger.Info("start processing UR", "ur", ur.Name, "resourceVersion", ur.GetResourceVersion())
// 1 - Check if the trigger exists
resource, err = common.GetResource(c.client, ur.Spec, c.log)
trigger, err := c.getTrigger(ur.Spec)
if err != nil {
// Don't update status
// re-queueing the UR by updating the annotation
// retry - 5 times
logger.V(3).Info("resource does not exist or is pending creation, re-queueing", "details", err.Error())
logger.V(3).Info("the trigger resource does not exist or is pending creation, re-queueing", "details", err.Error())
retry, urAnnotations, err := increaseRetryAnnotation(ur)
if err != nil {
return err
@ -109,24 +106,22 @@ func (c *GenerateController) ProcessUR(ur *kyvernov1beta1.UpdateRequest) error {
logger.Error(err, "exceeds retry limit, failed to delete the UR", "update request", ur.Name, "retry", retry, "resourceVersion", ur.GetResourceVersion())
return err
}
}
ur.SetAnnotations(urAnnotations)
_, err = c.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Update(context.TODO(), ur, metav1.UpdateOptions{})
if err != nil {
logger.Error(err, "failed to update annotation in update request for the resource", "update request", ur.Name, "resourceVersion", ur.GetResourceVersion(), "annotations", urAnnotations, "retry", retry)
return err
} else {
ur.SetAnnotations(urAnnotations)
_, err = c.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Update(context.TODO(), ur, metav1.UpdateOptions{})
if err != nil {
logger.Error(err, "failed to update annotation in update request for the resource", "update request", ur.Name, "resourceVersion", ur.GetResourceVersion(), "annotations", urAnnotations, "retry", retry)
return err
}
}
}
// trigger resource is being terminated
if resource == nil {
if trigger == nil {
return nil
}
// 2 - Apply the generate policy on the resource
namespaceLabels := engineutils.GetNamespaceSelectorsFromNamespaceLister(resource.GetKind(), resource.GetNamespace(), c.nsLister, logger)
genResources, err = c.applyGenerate(*resource, *ur, namespaceLabels)
namespaceLabels := engineutils.GetNamespaceSelectorsFromNamespaceLister(trigger.GetKind(), trigger.GetNamespace(), c.nsLister, logger)
genResources, err = c.applyGenerate(*trigger, *ur, namespaceLabels)
if err != nil {
// Need not update the status when policy doesn't apply on resource, because all the update requests are removed by the cleanup controller
if strings.Contains(err.Error(), doesNotApply) {
@ -134,17 +129,34 @@ func (c *GenerateController) ProcessUR(ur *kyvernov1beta1.UpdateRequest) error {
return nil
}
// 3 - Report failure Events
events := event.NewBackgroundFailedEvent(err, ur.Spec.Policy, "", event.GeneratePolicyController, resource)
events := event.NewBackgroundFailedEvent(err, ur.Spec.Policy, "", event.GeneratePolicyController, trigger)
c.eventGen.Add(events...)
}
// 4 - Update Status
return updateStatus(c.statusControl, *ur, err, genResources)
}
const doesNotApply = "policy does not apply to resource"
func (c *GenerateController) getTrigger(spec kyvernov1beta1.UpdateRequestSpec) (*unstructured.Unstructured, error) {
if spec.Context.AdmissionRequestInfo.Operation == admissionv1.Delete {
request := spec.Context.AdmissionRequestInfo.AdmissionRequest
_, oldResource, err := admissionutils.ExtractResources(nil, request)
if err != nil {
return nil, fmt.Errorf("failed to load resource from context: %w", err)
}
labels := oldResource.GetLabels()
if labels[common.GeneratePolicyLabel] != "" {
// non-trigger deletion, get trigger from ur spec
c.log.V(4).Info("non-trigger resource is deleted, fetching the trigger from the UR spec", "trigger", spec.Resource.String())
return common.GetResource(c.client, spec, c.log)
}
return &oldResource, nil
} else {
return common.GetResource(c.client, spec, c.log)
}
}
func (c *GenerateController) applyGenerate(resource unstructured.Unstructured, ur kyvernov1beta1.UpdateRequest, namespaceLabels map[string]string) ([]kyvernov1.ResourceSpec, error) {
logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey(), "resource", ur.Spec.GetResource().String())
logger.V(3).Info("applying generate policy rule")
@ -324,7 +336,7 @@ func getResourceInfoForDataAndClone(rule kyvernov1.Rule) (kind, name, namespace,
return
}
func applyRule(log logr.Logger, client dclient.Interface, rule kyvernov1.Rule, resource unstructured.Unstructured, ctx enginecontext.EvalInterface, policy kyvernov1.PolicyInterface, ur kyvernov1beta1.UpdateRequest) ([]kyvernov1.ResourceSpec, error) {
func applyRule(log logr.Logger, client dclient.Interface, rule kyvernov1.Rule, trigger unstructured.Unstructured, ctx enginecontext.EvalInterface, policy kyvernov1.PolicyInterface, ur kyvernov1beta1.UpdateRequest) ([]kyvernov1.ResourceSpec, error) {
rdatas := []GenerateResponse{}
var cresp, dresp map[string]interface{}
var err error
@ -396,7 +408,7 @@ func applyRule(log logr.Logger, client dclient.Interface, rule kyvernov1.Rule, r
}
newResource.SetAPIVersion(rdata.GenAPIVersion)
common.ManageLabels(newResource, resource, policy, rule.Name)
common.ManageLabels(newResource, trigger, policy, rule.Name)
// Add Synchronize label
label := newResource.GetLabels()
@ -414,11 +426,8 @@ func applyRule(log logr.Logger, client dclient.Interface, rule kyvernov1.Rule, r
label[LabelSynchronize] = "disable"
}
// Reset resource version
newResource.SetResourceVersion("")
newResource.SetLabels(label)
// Create the resource
_, err = client.CreateResource(context.TODO(), rdata.GenAPIVersion, rdata.GenKind, rdata.GenNamespace, newResource, false)
if err != nil {
if !apierrors.IsAlreadyExists(err) {

View file

@ -16,6 +16,6 @@ const (
// checks.
RuleStatusError RuleStatus = "error"
// RuleStatusSkip indicates that the policy rule was not selected based on user inputs or applicability, for example
// when preconditions are not met, or when conditional or global anchors are not satistied.
// when preconditions are not met, or when conditional or global anchors are not satisfied.
RuleStatusSkip RuleStatus = "skip"
)

View file

@ -16,4 +16,6 @@ var (
RegexVariableInit = regexp.MustCompile(`^\{\{(\{[^{}]*\}|[^{}])*\}\}`)
RegexElementIndex = regexp.MustCompile(`{{\s*elementIndex\d*\s*}}`)
RegexVariableKey = regexp.MustCompile(`\{{(.*?)\}}`)
)

View file

@ -149,6 +149,17 @@ func NewPolicyExceptionEvents(engineResponse *engineapi.EngineResponse, ruleResp
return []Info{policyEvent, exceptionEvent}
}
func NewFailedEvent(err error, policy, rule string, source Source, resource kyvernov1.ResourceSpec) Info {
return Info{
Kind: resource.GetKind(),
Namespace: resource.GetNamespace(),
Name: resource.GetName(),
Source: source,
Reason: PolicyError,
Message: fmt.Sprintf("policy %s/%s error: %v", policy, rule, err),
}
}
func resourceKey(resource unstructured.Unstructured) string {
if resource.GetNamespace() != "" {
return strings.Join([]string{resource.GetKind(), resource.GetNamespace(), resource.GetName()}, "/")

View file

@ -27,7 +27,7 @@ import (
)
type GenerationHandler interface {
HandleNew(context.Context, *admissionv1.AdmissionRequest, []kyvernov1.PolicyInterface, *engine.PolicyContext)
Handle(context.Context, *admissionv1.AdmissionRequest, []kyvernov1.PolicyInterface, *engine.PolicyContext)
}
func NewGenerationHandler(
@ -75,7 +75,7 @@ type generationHandler struct {
metrics metrics.MetricsConfigManager
}
func (h *generationHandler) HandleNew(
func (h *generationHandler) Handle(
ctx context.Context,
request *admissionv1.AdmissionRequest,
policies []kyvernov1.PolicyInterface,
@ -89,16 +89,30 @@ func (h *generationHandler) HandleNew(
h.handleNonTrigger(ctx, policyContext, request)
}
func getAppliedRules(policy kyvernov1.PolicyInterface, applied []engineapi.RuleResponse) []kyvernov1.Rule {
rules := []kyvernov1.Rule{}
for _, rule := range policy.GetSpec().Rules {
if !rule.HasGenerate() {
continue
}
for _, applied := range applied {
if applied.Name == rule.Name && applied.Type == engineapi.Generation {
rules = append(rules, rule)
}
}
}
return rules
}
func (h *generationHandler) handleTrigger(
ctx context.Context,
request *admissionv1.AdmissionRequest,
policies []kyvernov1.PolicyInterface,
policyContext *engine.PolicyContext,
) {
h.log.V(4).Info("handle trigger resource operation for generate")
var engineResponses []*engineapi.EngineResponse
h.log.V(4).Info("handle trigger resource operation for generate", "policies", len(policies))
for _, policy := range policies {
var appliedRules []engineapi.RuleResponse
var appliedRules, failedRules []engineapi.RuleResponse
policyContext := policyContext.WithPolicy(policy)
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
policyContext = policyContext.WithNamespaceLabels(engineutils.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, h.log))
@ -107,30 +121,17 @@ func (h *generationHandler) handleTrigger(
for _, rule := range engineResponse.PolicyResponse.Rules {
if rule.Status == engineapi.RuleStatusPass {
appliedRules = append(appliedRules, rule)
} else if rule.Status == engineapi.RuleStatusFail {
failedRules = append(failedRules, rule)
}
}
if len(appliedRules) > 0 {
engineResponse.PolicyResponse.Rules = appliedRules
// some generate rules do apply to the resource
engineResponses = append(engineResponses, engineResponse)
}
h.applyGeneration(ctx, request, policy, appliedRules, policyContext)
h.syncTriggerAction(ctx, request, policy, failedRules, policyContext)
// registering the kyverno_policy_results_total metric concurrently
go webhookutils.RegisterPolicyResultsMetricGeneration(ctx, h.log, h.metrics, string(request.Operation), policy, *engineResponse)
// registering the kyverno_policy_execution_duration_seconds metric concurrently
go webhookutils.RegisterPolicyExecutionDurationMetricGenerate(ctx, h.log, h.metrics, string(request.Operation), policy, *engineResponse)
}
if failedResponse := applyUpdateRequest(ctx, request, kyvernov1beta1.Generate, h.urGenerator, policyContext.AdmissionInfo(), request.Operation, engineResponses...); failedResponse != nil {
// report failure event
for _, failedUR := range failedResponse {
err := fmt.Errorf("failed to create Update Request: %v", failedUR.err)
newResource := policyContext.NewResource()
e := event.NewBackgroundFailedEvent(err, failedUR.ur.Policy, "", event.GeneratePolicyController, &newResource)
h.eventGen.Add(e...)
}
}
}
func (h *generationHandler) handleNonTrigger(
@ -148,6 +149,97 @@ func (h *generationHandler) handleNonTrigger(
}
}
func (h *generationHandler) applyGeneration(
ctx context.Context,
request *admissionv1.AdmissionRequest,
policy kyvernov1.PolicyInterface,
appliedRules []engineapi.RuleResponse,
policyContext *engine.PolicyContext,
) {
if len(appliedRules) == 0 {
return
}
pKey := common.PolicyKey(policy.GetNamespace(), policy.GetName())
trigger := policyContext.NewResource()
triggerSpec := kyvernov1.ResourceSpec{
APIVersion: trigger.GetAPIVersion(),
Kind: trigger.GetKind(),
Namespace: trigger.GetNamespace(),
Name: trigger.GetName(),
}
rules := getAppliedRules(policy, appliedRules)
for _, rule := range rules {
h.log.V(4).Info("creating the UR to generate downstream on trigger's operation", "operation", request.Operation, "rule", rule.Name)
urSpec := buildURSpec(kyvernov1beta1.Generate, pKey, rule.Name, triggerSpec, false)
urSpec.Context = buildURContext(request, policyContext)
if err := h.urGenerator.Apply(ctx, urSpec); err != nil {
h.log.Error(err, "failed to create the UR to create downstream on trigger's operation", "operation", request.Operation, "rule", rule.Name)
e := event.NewFailedEvent(err, pKey, rule.Name, event.GeneratePolicyController,
kyvernov1.ResourceSpec{Kind: policy.GetKind(), Namespace: policy.GetNamespace(), Name: policy.GetName()})
h.eventGen.Add(e)
}
}
}
// handleFailedRules sync changes of the trigger to the downstream
// it can be 1. trigger deletion; 2. trigger no longer matches, when a rule fails
func (h *generationHandler) syncTriggerAction(
ctx context.Context,
request *admissionv1.AdmissionRequest,
policy kyvernov1.PolicyInterface,
failedRules []engineapi.RuleResponse,
policyContext *engine.PolicyContext,
) {
if len(failedRules) == 0 {
return
}
pKey := common.PolicyKey(policy.GetNamespace(), policy.GetName())
trigger := policyContext.OldResource()
urSpec := kyvernov1.ResourceSpec{
APIVersion: trigger.GetAPIVersion(),
Kind: trigger.GetKind(),
Namespace: trigger.GetNamespace(),
Name: trigger.GetName(),
}
rules := getAppliedRules(policy, failedRules)
for _, rule := range rules {
// fire generation on trigger deletion
if (request.Operation == admissionv1.Delete) && precondition(rule, kyvernov1.Condition{
RawKey: kyvernov1.ToJSON("request.operation"),
Operator: "Equals",
RawValue: kyvernov1.ToJSON("DELETE"),
}) {
h.log.V(4).Info("creating the UR to generate downstream on trigger's deletion", "operation", request.Operation, "rule", rule.Name)
ur := buildURSpec(kyvernov1beta1.Generate, pKey, rule.Name, urSpec, false)
ur.Context = buildURContext(request, policyContext)
if err := h.urGenerator.Apply(ctx, ur); err != nil {
h.log.Error(err, "failed to create the UR to generate downstream on trigger's deletion", "operation", request.Operation, "rule", rule.Name)
e := event.NewFailedEvent(err, pKey, rule.Name, event.GeneratePolicyController,
kyvernov1.ResourceSpec{Kind: policy.GetKind(), Namespace: policy.GetNamespace(), Name: policy.GetName()})
h.eventGen.Add(e)
}
continue
}
// delete downstream on trigger deletion
if rule.Generation.Synchronize {
h.log.V(4).Info("creating the UR to delete downstream on trigger's event", "operation", request.Operation, "rule", rule.Name)
ur := buildURSpec(kyvernov1beta1.Generate, pKey, rule.Name, urSpec, true)
ur.Context = buildURContext(request, policyContext)
if err := h.urGenerator.Apply(ctx, ur); err != nil {
h.log.Error(err, "failed to create the UR to delete downstream on trigger's event", "operation", request.Operation, "rule", rule.Name)
e := event.NewFailedEvent(err, pKey, rule.Name, event.GeneratePolicyController,
kyvernov1.ResourceSpec{Kind: policy.GetKind(), Namespace: policy.GetNamespace(), Name: policy.GetName()})
h.eventGen.Add(e)
}
}
}
}
func (h *generationHandler) createUR(ctx context.Context, policyContext *engine.PolicyContext, request *admissionv1.AdmissionRequest) (err error) {
var policy kyvernov1.PolicyInterface
new := policyContext.NewResource()
@ -183,15 +275,8 @@ func (h *generationHandler) createUR(ctx context.Context, policyContext *engine.
pKey := common.PolicyKey(pNamespace, pName)
for _, rule := range policy.GetSpec().Rules {
if rule.Name == pRuleName && rule.Generation.Synchronize {
ur := kyvernov1beta1.UpdateRequestSpec{
Type: kyvernov1beta1.Generate,
Policy: pKey,
Rule: rule.Name,
Resource: generateutils.TriggerFromLabels(labels),
}
ur.DeleteDownstream = deleteDownstream
if err := h.urGenerator.Apply(ctx, ur, admissionv1.Update); err != nil {
ur := buildURSpec(kyvernov1beta1.Generate, pKey, rule.Name, generateutils.TriggerFromLabels(labels), deleteDownstream)
if err := h.urGenerator.Apply(ctx, ur); err != nil {
e := event.NewBackgroundFailedEvent(err, pKey, pRuleName, event.GeneratePolicyController, &new)
h.eventGen.Add(e...)
return err
@ -200,18 +285,3 @@ func (h *generationHandler) createUR(ctx context.Context, policyContext *engine.
}
return nil
}
func compareLabels(new, old map[string]string) bool {
if new == nil {
return true
}
if new[common.GeneratePolicyLabel] != old[common.GeneratePolicyLabel] ||
new[common.GeneratePolicyNamespaceLabel] != old[common.GeneratePolicyNamespaceLabel] ||
new[common.GenerateRuleLabel] != old[common.GenerateRuleLabel] ||
new[common.GenerateTriggerNameLabel] != old[common.GenerateTriggerNameLabel] ||
new[common.GenerateTriggerNSLabel] != old[common.GenerateTriggerNSLabel] ||
new[common.GenerateTriggerKindLabel] != old[common.GenerateTriggerKindLabel] {
return false
}
return true
}

View file

@ -1,130 +1,81 @@
package generation
import (
"context"
"reflect"
"strings"
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/background/generate"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
"github.com/kyverno/kyverno/pkg/background/common"
"github.com/kyverno/kyverno/pkg/engine"
utils "github.com/kyverno/kyverno/pkg/engine/utils"
"github.com/kyverno/kyverno/pkg/engine/variables/regex"
admissionv1 "k8s.io/api/admission/v1"
apiextv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
)
// stripNonPolicyFields - remove feilds which get updated with each request by kyverno and are non policy fields
func stripNonPolicyFields(obj, newRes map[string]interface{}, logger logr.Logger) (map[string]interface{}, map[string]interface{}) {
if metadata, found := obj["metadata"]; found {
requiredMetadataInObj := make(map[string]interface{})
if annotations, found := metadata.(map[string]interface{})["annotations"]; found {
delete(annotations.(map[string]interface{}), "kubectl.kubernetes.io/last-applied-configuration")
requiredMetadataInObj["annotations"] = annotations
}
if labels, found := metadata.(map[string]interface{})["labels"]; found {
delete(labels.(map[string]interface{}), generate.LabelClonePolicyName)
requiredMetadataInObj["labels"] = labels
}
obj["metadata"] = requiredMetadataInObj
func buildURSpec(requestType kyvernov1beta1.RequestType, policyKey, ruleName string, resource kyvernov1.ResourceSpec, deleteDownstream bool) kyvernov1beta1.UpdateRequestSpec {
return kyvernov1beta1.UpdateRequestSpec{
Type: requestType,
Policy: policyKey,
Rule: ruleName,
Resource: resource,
DeleteDownstream: deleteDownstream,
}
if metadata, found := newRes["metadata"]; found {
requiredMetadataInNewRes := make(map[string]interface{})
if annotations, found := metadata.(map[string]interface{})["annotations"]; found {
requiredMetadataInNewRes["annotations"] = annotations
}
if labels, found := metadata.(map[string]interface{})["labels"]; found {
requiredMetadataInNewRes["labels"] = labels
}
newRes["metadata"] = requiredMetadataInNewRes
}
delete(obj, "status")
if _, found := obj["spec"]; found {
delete(obj["spec"].(map[string]interface{}), "tolerations")
}
if dataMap, found := obj["data"]; found {
keyInData := make([]string, 0)
switch dataMap := dataMap.(type) {
case map[string]interface{}:
for k := range dataMap {
keyInData = append(keyInData, k)
}
}
if len(keyInData) > 0 {
for _, dataKey := range keyInData {
originalResourceData := dataMap.(map[string]interface{})[dataKey]
replaceData := strings.Replace(originalResourceData.(string), "\n", "", -1)
dataMap.(map[string]interface{})[dataKey] = replaceData
newResourceData := newRes["data"].(map[string]interface{})[dataKey]
replacenewResourceData := strings.Replace(newResourceData.(string), "\n", "", -1)
newRes["data"].(map[string]interface{})[dataKey] = replacenewResourceData
}
} else {
logger.V(4).Info("data is not of type map[string]interface{}")
}
}
return obj, newRes
}
type updateRequestResponse struct {
ur kyvernov1beta1.UpdateRequestSpec
err error
}
func applyUpdateRequest(
ctx context.Context,
request *admissionv1.AdmissionRequest,
ruleType kyvernov1beta1.RequestType,
urGenerator updaterequest.Generator,
userRequestInfo kyvernov1beta1.RequestInfo,
action admissionv1.Operation,
engineResponses ...*engineapi.EngineResponse,
) (failedUpdateRequest []updateRequestResponse) {
admissionRequestInfo := kyvernov1beta1.AdmissionRequestInfoObject{
AdmissionRequest: request,
Operation: action,
}
for _, er := range engineResponses {
ur := transform(admissionRequestInfo, userRequestInfo, er, ruleType)
if err := urGenerator.Apply(ctx, ur, action); err != nil {
failedUpdateRequest = append(failedUpdateRequest, updateRequestResponse{ur: ur, err: err})
}
}
return
}
func transform(admissionRequestInfo kyvernov1beta1.AdmissionRequestInfoObject, userRequestInfo kyvernov1beta1.RequestInfo, er *engineapi.EngineResponse, ruleType kyvernov1beta1.RequestType) kyvernov1beta1.UpdateRequestSpec {
var PolicyNameNamespaceKey string
if er.Policy.GetNamespace() != "" {
PolicyNameNamespaceKey = er.Policy.GetNamespace() + "/" + er.Policy.GetName()
} else {
PolicyNameNamespaceKey = er.Policy.GetName()
}
ur := kyvernov1beta1.UpdateRequestSpec{
Type: ruleType,
Policy: PolicyNameNamespaceKey,
Resource: kyvernov1.ResourceSpec{
Kind: er.Resource.GetKind(),
Namespace: er.Resource.GetNamespace(),
Name: er.Resource.GetName(),
APIVersion: er.Resource.GetAPIVersion(),
},
Context: kyvernov1beta1.UpdateRequestSpecContext{
UserRequestInfo: userRequestInfo,
AdmissionRequestInfo: admissionRequestInfo,
func buildURContext(request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext) kyvernov1beta1.UpdateRequestSpecContext {
return kyvernov1beta1.UpdateRequestSpecContext{
UserRequestInfo: policyContext.AdmissionInfo(),
AdmissionRequestInfo: kyvernov1beta1.AdmissionRequestInfoObject{
AdmissionRequest: request,
Operation: request.Operation,
},
}
return ur
}
func precondition(rule kyvernov1.Rule, expected kyvernov1.Condition) bool {
conditions, err := utils.TransformConditions(rule.GetAnyAllConditions())
if err != nil {
return false
}
var conditionsAll []kyvernov1.Condition
switch typedConditions := conditions.(type) {
case kyvernov1.AnyAllConditions:
conditionsAll = append(typedConditions.AllConditions, typedConditions.AnyConditions...)
case []kyvernov1.Condition:
conditionsAll = typedConditions
}
for _, condition := range conditionsAll {
copy := condition.DeepCopy()
copy.RawKey = trimKeySpaces(condition.RawKey)
if reflect.DeepEqual(*copy, expected) {
return true
}
}
return false
}
func trimKeySpaces(rawKey *apiextv1.JSON) *apiextv1.JSON {
keys := regex.RegexVariableKey.FindAllStringSubmatch(string(rawKey.Raw), -1)
if len(keys) != 0 {
return kyvernov1.ToJSON(strings.TrimSpace(keys[0][1]))
}
return kyvernov1.ToJSON("")
}
func compareLabels(new, old map[string]string) bool {
if new == nil {
return true
}
if new[common.GeneratePolicyLabel] != old[common.GeneratePolicyLabel] ||
new[common.GeneratePolicyNamespaceLabel] != old[common.GeneratePolicyNamespaceLabel] ||
new[common.GenerateRuleLabel] != old[common.GenerateRuleLabel] ||
new[common.GenerateTriggerNameLabel] != old[common.GenerateTriggerNameLabel] ||
new[common.GenerateTriggerNSLabel] != old[common.GenerateTriggerNSLabel] ||
new[common.GenerateTriggerKindLabel] != old[common.GenerateTriggerKindLabel] {
return false
}
return true
}

View file

@ -1,276 +0,0 @@
package generation
import (
"reflect"
"testing"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/background/generate"
"gotest.tools/assert"
)
func Test_updateFeildsInSourceAndUpdatedResource(t *testing.T) {
type TestCase struct {
obj map[string]interface{}
newRes map[string]interface{}
expectedObj map[string]interface{}
expectedNewRes map[string]interface{}
}
testcases := []TestCase{
{
obj: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"ca": "-----BEGIN CERTIFICATE-----\nMIID5zCCAs+gAwIBAgIUCl6BKlpe2QiS5IQby6QOW7vexMwwDQYJKoZIhvcNAQEL\nBQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UEBwwEVG93bjEQ\n-----END CERTIFICATE-----",
},
"kind": "ConfigMap",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"imageregistry": "https://hub.docker.com/",
"kubectl.kubernetes.io/last-applied-configuration": `{"apiVersion":"v1","data":{"ca":"-----BEGIN CERTIFICATE-----\nMIID5zCCAs+gAwIBAgIUCl6BKlpe2QiS5IQby6QOW7vexMwwDQYJKoZIhvcNAQEL\nBQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UEBwwEVG93bjEQ\n-----END CERTIFICATE-----"},"kind":"ConfigMap","metadata":{"annotations":{"imageregistry":"https://hub.docker.com/"},"name":"corp-ca-cert","namespace":"default"}}`,
},
"creationTimestamp": "2021-01-09T12:37:26Z",
"labels": map[string]interface{}{generate.LabelClonePolicyName: "generate-policy"},
"managedFields": map[string]interface{}{
"apiVersion": "v1",
"fieldsType": "FieldsV1",
},
},
},
newRes: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"ca": "-----BEGIN CERTIFICATE-----\nMIID5zCCAs+gAwIBAgIUCl6BKlpe2QiS5IQby6QOW7vexMwwDQYJKoZIhvcNAQEL\nBQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UEBwwEVG93bjEQ\n-----END CERTIFICATE-----",
},
"kind": "ConfigMap",
"metadata": map[string]interface{}{
"creationTimestamp": "2021-01-09T12:37:26Z",
"managedFields": map[string]interface{}{
"apiVersion": "v1",
"fieldsType": "FieldsV1",
},
},
},
expectedObj: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"ca": "-----BEGIN CERTIFICATE-----MIID5zCCAs+gAwIBAgIUCl6BKlpe2QiS5IQby6QOW7vexMwwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UEBwwEVG93bjEQ-----END CERTIFICATE-----",
},
"kind": "ConfigMap",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"imageregistry": "https://hub.docker.com/",
},
"labels": map[string]interface{}{},
},
},
expectedNewRes: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"ca": "-----BEGIN CERTIFICATE-----MIID5zCCAs+gAwIBAgIUCl6BKlpe2QiS5IQby6QOW7vexMwwDQYJKoZIhvcNAQELBQAwgYIxCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJDQTENMAsGA1UEBwwEVG93bjEQ-----END CERTIFICATE-----",
},
"kind": "ConfigMap",
"metadata": map[string]interface{}{},
},
},
{
obj: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"tls.crt": "MIIC2DCCAcCgAwIBAgIBATANBgkqh",
"tls.key": "MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ",
},
"kind": "Secret",
"type": "kubernetes.io/tls",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"kubectl.kubernetes.io/last-applied-configuration": `{"apiVersion":"v1","data":{"tls.crt":"MIIC2DCCAcCgAwIBAgIBATANBgkqh","tls.key": "MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ"},"type": "kubernetes.io/tls","kind":"Secret"}`,
},
"creationTimestamp": "2021-01-09T12:37:26Z",
"labels": map[string]interface{}{generate.LabelClonePolicyName: "generate-policy"},
"managedFields": map[string]interface{}{
"apiVersion": "v1",
"fieldsType": "FieldsV1",
},
},
},
newRes: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"tls.crt": "MIIC2DCCAcCgAwIBAgIBATANBgkqh",
"tls.key": "MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ",
},
"kind": "Secret",
"type": "kubernetes.io/tls",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{},
"creationTimestamp": "2021-01-09T12:37:26Z",
"labels": map[string]interface{}{
generate.LabelURName: "gr-qmjr9",
generate.LabelDataPolicyName: "generate-policy",
},
"managedFields": map[string]interface{}{
"apiVersion": "v1",
"fieldsType": "FieldsV1",
},
},
},
expectedObj: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"tls.crt": "MIIC2DCCAcCgAwIBAgIBATANBgkqh",
"tls.key": "MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ",
},
"kind": "Secret",
"type": "kubernetes.io/tls",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{},
"labels": map[string]interface{}{},
},
},
expectedNewRes: map[string]interface{}{
"apiVersion": "v1",
"data": map[string]interface{}{
"tls.crt": "MIIC2DCCAcCgAwIBAgIBATANBgkqh",
"tls.key": "MIIEpgIBAAKCAQEA7yn3bRHQ5FHMQ",
},
"kind": "Secret",
"type": "kubernetes.io/tls",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{},
"labels": map[string]interface{}{
generate.LabelURName: "gr-qmjr9",
generate.LabelDataPolicyName: "generate-policy",
},
},
},
},
{
obj: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{
"kubectl.kubernetes.io/last-applied-configuration": `{"apiVersion":"v1","kind":"Pod", ...}`,
},
"creationTimestamp": "2021-01-09T12:37:26Z",
"labels": map[string]interface{}{generate.LabelClonePolicyName: "generate-policy"},
"managedFields": map[string]interface{}{
"apiVersion": "v1",
"fieldsType": "FieldsV1",
},
},
"spec": map[string]interface{}{
"containers": map[string]interface{}{
"image": "redis:5.0.4",
"imagePullPolicy": "IfNotPresent",
"name": "redis",
},
},
"status": map[string]interface{}{
"conditions": map[string]interface{}{
"lastProbeTime": "null",
"lastTransitionTime": "2021-01-19T13:09:14Z",
"status": "True",
"type": "Initialized",
},
"containerStatuses": map[string]interface{}{
"containerID": `docker://55ad0787835e874b6762ad650af3d36c1`,
"image": "redis:5.0.4",
},
},
},
newRes: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{},
"creationTimestamp": "2021-01-09T12:37:26Z",
"labels": map[string]interface{}{},
"managedFields": map[string]interface{}{
"apiVersion": "v1",
"fieldsType": "FieldsV1",
},
},
"spec": map[string]interface{}{
"containers": map[string]interface{}{
"image": "redis:5.0.4",
"imagePullPolicy": "IfNotPresent",
"name": "redis",
},
},
"status": map[string]interface{}{
"conditions": map[string]interface{}{
"lastProbeTime": "null",
"lastTransitionTime": "2021-01-19T13:09:14Z",
"status": "True",
"type": "Initialized",
},
"containerStatuses": map[string]interface{}{
"containerID": `docker://55ad0787835e874b6762ad650af3d36c1`,
"image": "redis:5.0.4",
},
},
},
expectedObj: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{},
"labels": map[string]interface{}{},
},
"spec": map[string]interface{}{
"containers": map[string]interface{}{
"image": "redis:5.0.4",
"imagePullPolicy": "IfNotPresent",
"name": "redis",
},
},
},
expectedNewRes: map[string]interface{}{
"apiVersion": "v1",
"kind": "Pod",
"metadata": map[string]interface{}{
"annotations": map[string]interface{}{},
"labels": map[string]interface{}{},
},
"spec": map[string]interface{}{
"containers": map[string]interface{}{
"image": "redis:5.0.4",
"imagePullPolicy": "IfNotPresent",
"name": "redis",
},
},
"status": map[string]interface{}{
"conditions": map[string]interface{}{
"lastProbeTime": "null",
"lastTransitionTime": "2021-01-19T13:09:14Z",
"status": "True",
"type": "Initialized",
},
"containerStatuses": map[string]interface{}{
"containerID": `docker://55ad0787835e874b6762ad650af3d36c1`,
"image": "redis:5.0.4",
},
},
},
},
}
for _, tc := range testcases {
o, n := stripNonPolicyFields(tc.obj, tc.newRes, logr.Discard())
assert.Assert(t, reflect.DeepEqual(tc.expectedObj, o))
assert.Assert(t, reflect.DeepEqual(tc.expectedNewRes, n))
}
}

View file

@ -7,8 +7,6 @@ import (
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/background/generate"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
@ -24,7 +22,6 @@ import (
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
engineutils "github.com/kyverno/kyverno/pkg/utils/engine"
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"github.com/kyverno/kyverno/pkg/webhooks"
"github.com/kyverno/kyverno/pkg/webhooks/resource/imageverification"
"github.com/kyverno/kyverno/pkg/webhooks/resource/mutation"
@ -141,9 +138,7 @@ func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *ad
return admissionutils.Response(request.UID, errors.New(msg), warnings...)
}
defer h.handleDelete(logger, request)
go h.handleBackgroundApplies(ctx, logger, request, policyContext, generatePolicies, mutatePolicies, startTime)
return admissionutils.ResponseSuccess(request.UID, warnings...)
}
@ -189,30 +184,6 @@ func (h *handlers) Mutate(ctx context.Context, logger logr.Logger, request *admi
return admissionutils.MutationResponse(request.UID, patch, warnings...)
}
func (h *handlers) handleDelete(logger logr.Logger, request *admissionv1.AdmissionRequest) {
if request.Operation == admissionv1.Delete {
resource, err := kubeutils.BytesToUnstructured(request.OldObject.Raw)
if err != nil {
logger.Error(err, "failed to convert object resource to unstructured format")
}
resLabels := resource.GetLabels()
if resLabels[kyvernov1.LabelAppManagedBy] == kyvernov1.ValueKyvernoApp {
urName := resLabels[generate.LabelURName]
ur, err := h.urLister.Get(urName)
if err != nil {
logger.Error(err, "failed to get update request", "name", urName)
return
}
if ur.Spec.GetRequestType() == kyvernov1beta1.Mutate {
return
}
h.urUpdater.UpdateAnnotation(logger, ur.GetName())
}
}
}
func filterPolicies(failurePolicy string, policies ...kyvernov1.PolicyInterface) []kyvernov1.PolicyInterface {
var results []kyvernov1.PolicyInterface
for _, policy := range policies {

View file

@ -73,5 +73,5 @@ func (h *handlers) handleMutateExisting(ctx context.Context, logger logr.Logger,
func (h *handlers) handleGenerate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, generatePolicies []kyvernov1.PolicyInterface, policyContext *engine.PolicyContext, ts time.Time) {
gh := generation.NewGenerationHandler(logger, h.engine, h.client, h.kyvernoClient, h.nsLister, h.urLister, h.cpolLister, h.polLister, h.urGenerator, h.urUpdater, h.eventGen, h.metricsConfig)
go gh.HandleNew(ctx, request, generatePolicies, policyContext)
go gh.Handle(ctx, request, generatePolicies, policyContext)
}

View file

@ -61,7 +61,7 @@ func applyUpdateRequest(
for _, er := range engineResponses {
ur := transform(admissionRequestInfo, userRequestInfo, er, ruleType)
if err := urGenerator.Apply(ctx, ur, action); err != nil {
if err := urGenerator.Apply(ctx, ur); err != nil {
failedUpdateRequest = append(failedUpdateRequest, updateRequestResponse{ur: ur, err: err})
}
}

View file

@ -4,7 +4,6 @@ import (
"context"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
admissionv1 "k8s.io/api/admission/v1"
)
func NewFake() Generator {
@ -13,6 +12,6 @@ func NewFake() Generator {
type fakeGenerator struct{}
func (f *fakeGenerator) Apply(ctx context.Context, gr kyvernov1beta1.UpdateRequestSpec, action admissionv1.Operation) error {
func (f *fakeGenerator) Apply(ctx context.Context, gr kyvernov1beta1.UpdateRequestSpec) error {
return nil
}

View file

@ -11,14 +11,13 @@ import (
kyvernov1beta1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1beta1"
kyvernov1beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/config"
admissionv1 "k8s.io/api/admission/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
// Generator provides interface to manage update requests
type Generator interface {
Apply(context.Context, kyvernov1beta1.UpdateRequestSpec, admissionv1.Operation) error
Apply(context.Context, kyvernov1beta1.UpdateRequestSpec) error
}
// generator defines the implementation to manage update request resource
@ -39,11 +38,8 @@ func NewGenerator(client versioned.Interface, urInformer kyvernov1beta1informers
}
// Apply creates update request resource
func (g *generator) Apply(ctx context.Context, ur kyvernov1beta1.UpdateRequestSpec, action admissionv1.Operation) error {
func (g *generator) Apply(ctx context.Context, ur kyvernov1beta1.UpdateRequestSpec) error {
logger.V(4).Info("apply Update Request", "request", ur)
if action == admissionv1.Delete && ur.GetRequestType() == kyvernov1beta1.Generate {
return nil
}
go g.applyResource(context.TODO(), ur)
return nil
}

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-create-on-trigger-deletion
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,47 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
downstream: "cpol-clone-create-on-trigger-deletion-manifest-ns"
name: cpol-clone-create-on-trigger-deletion-trigger-ns
---
apiVersion: v1
kind: Namespace
metadata:
name: cpol-clone-create-on-trigger-deletion-manifest-ns
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: cpol-clone-create-on-trigger-deletion-secret
namespace: cpol-clone-create-on-trigger-deletion-manifest-ns
type: Opaque
---
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-create-on-trigger-deletion
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- Namespace
preconditions:
any:
- key: "{{ request.operation }}"
operator: Equals
value: DELETE
generate:
apiVersion: v1
kind: Secret
name: regcred
namespace: "{{request.object.metadata.labels.downstream}}"
synchronize: true
clone:
namespace: cpol-clone-create-on-trigger-deletion-manifest-ns
name: regcred

View file

@ -0,0 +1,7 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: Namespace
name: cpol-clone-create-on-trigger-deletion-trigger-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: Secret
metadata:
name: cpol-clone-create-on-trigger-deletion-secret
namespace: cpol-clone-create-on-trigger-deletion-manifest-ns

View file

@ -0,0 +1,11 @@
## Description
This is a corner case test to ensure a generate clone rule can be triggered on the deletion of the trigger resource.
## Expected Behavior
If the downstream resource is created, the test passes. If it is not created, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6398

View file

@ -0,0 +1,15 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-create-on-trigger-deletion
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-create-on-trigger-deletion-ns

View file

@ -0,0 +1,40 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-create-on-trigger-deletion-ns
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-create-on-trigger-deletion-ns
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: cpol-create-on-trigger-deletion
spec:
rules:
- name: default-deny
match:
any:
- resources:
kinds:
- ConfigMap
preconditions:
any:
- key: "{{ request.operation }}"
operator: Equals
value: DELETE
generate:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny
namespace: "{{request.object.metadata.namespace}}"
synchronize: false
data:
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,8 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: ConfigMap
name: test-org
namespace: cpol-create-on-trigger-deletion-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,9 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
namespace: cpol-create-on-trigger-deletion-ns
spec:
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,11 @@
## Description
This is a corner case test to ensure a generate data rule can be triggered on the deletion of the trigger resource.
## Expected Behavior
If the downstream resource is created, the test passes. If it is not created, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6398

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-nosync-delete-trigger-policy
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-clone-nosync-delete-trigger-ns
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: source-secret
namespace: cpol-clone-nosync-delete-trigger-ns
type: Opaque
---
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-nosync-delete-trigger-policy
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- ConfigMap
generate:
apiVersion: v1
kind: Secret
name: downstream-secret
namespace: "{{request.object.metadata.namespace}}"
synchronize: false
clone:
namespace: cpol-clone-nosync-delete-trigger-ns
name: source-secret

View file

@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- trigger.yaml
assert:
- downstream.yaml

View file

@ -0,0 +1,8 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: ConfigMap
name: test-org
namespace: cpol-clone-nosync-delete-trigger-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that deletion of a trigger resource, with a generate clone declaration and sync disabled, does not result in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test fails. If it remains, the test passes.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/2229

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: downstream-secret
namespace: cpol-clone-nosync-delete-trigger-ns
type: Opaque

View file

@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-clone-nosync-delete-trigger-ns

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-nosync-update-trigger-no-match-policy
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,38 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-clone-nosync-update-trigger-no-match-ns
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: source-secret
namespace: cpol-clone-nosync-update-trigger-no-match-ns
type: Opaque
---
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-nosync-update-trigger-no-match-policy
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- ConfigMap
selector:
matchLabels:
create-secret: "true"
generate:
apiVersion: v1
kind: Secret
name: downstream-secret
namespace: "{{request.object.metadata.namespace}}"
synchronize: false
clone:
namespace: cpol-clone-nosync-update-trigger-no-match-ns
name: source-secret

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-secret: "true"
name: test-org
namespace: cpol-clone-nosync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-secret: "false"
name: test-org
namespace: cpol-clone-nosync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that updates to a trigger which cause it to no longer match the rule, with a generate clone declaration and sync disabled, does not result in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test fails. If it remains, the test passes.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6507

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: downstream-secret
namespace: cpol-clone-nosync-update-trigger-no-match-ns
type: Opaque

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-sync-delete-trigger-policy
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-clone-sync-delete-trigger-ns
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: source-secret
namespace: cpol-clone-sync-delete-trigger-ns
type: Opaque
---
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-sync-delete-trigger-policy
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- ConfigMap
generate:
apiVersion: v1
kind: Secret
name: downstream-secret
namespace: "{{request.object.metadata.namespace}}"
synchronize: true
clone:
namespace: cpol-clone-sync-delete-trigger-ns
name: source-secret

View file

@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- trigger.yaml
assert:
- downstream.yaml

View file

@ -0,0 +1,8 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: ConfigMap
name: test-org
namespace: cpol-clone-sync-delete-trigger-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
error:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that deletion of a trigger resource, with a generate clone declaration and sync enabled, results in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test passes. If it remains, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/2229

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: downstream-secret
namespace: cpol-clone-sync-delete-trigger-ns
type: Opaque

View file

@ -0,0 +1,6 @@
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-clone-sync-delete-trigger-ns

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-sync-update-trigger-no-match-policy
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,38 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-clone-sync-update-trigger-no-match-ns
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: source-secret
namespace: cpol-clone-sync-update-trigger-no-match-ns
type: Opaque
---
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-clone-sync-update-trigger-no-match-policy
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- ConfigMap
selector:
matchLabels:
create-secret: "true"
generate:
apiVersion: v1
kind: Secret
name: downstream-secret
namespace: "{{request.object.metadata.namespace}}"
synchronize: true
clone:
namespace: cpol-clone-sync-update-trigger-no-match-ns
name: source-secret

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-secret: "true"
name: test-org
namespace: cpol-clone-sync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-secret: "false"
name: test-org
namespace: cpol-clone-sync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
error:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that updates to a trigger which cause it to no longer match the rule, with a generate clone declaration and sync enabled, results in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test passes. If it remains, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6507

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: downstream-secret
namespace: cpol-clone-sync-update-trigger-no-match-ns
type: Opaque

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-data-nosync-delete-trigger
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,24 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: cpol-data-nosync-delete-trigger
spec:
rules:
- name: default-deny
match:
any:
- resources:
kinds:
- ConfigMap
generate:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny
namespace: "{{request.object.metadata.namespace}}"
synchronize: false
data:
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-data-nosync-delete-trigger-ns
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-data-nosync-delete-trigger-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,8 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: ConfigMap
name: test-org
namespace: cpol-data-nosync-delete-trigger-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that deletion of a trigger resource, with a generate data declaration and sync disabled, doesn't result in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test fails. If it remains, the test passes.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/2229

View file

@ -0,0 +1,9 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
namespace: cpol-data-nosync-delete-trigger-ns
spec:
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-data-nosync-update-trigger-no-match
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,27 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: cpol-data-nosync-update-trigger-no-match
spec:
rules:
- name: default-deny
match:
any:
- resources:
kinds:
- ConfigMap
selector:
matchLabels:
create-netpol: "true"
generate:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny
namespace: "{{request.object.metadata.namespace}}"
synchronize: false
data:
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-data-nosync-update-trigger-no-match-ns
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-netpol: "true"
name: test-org
namespace: cpol-data-nosync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-netpol: "false"
name: test-org
namespace: cpol-data-nosync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that updates to a trigger which cause it to no longer match the rule, with a generate data declaration and sync disabled, does not result in the downstream resource's deletion.
## Expected Behavior
If the downstream resource remains, the test passes. If it is deleted, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6507

View file

@ -0,0 +1,9 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
namespace: cpol-data-nosync-update-trigger-no-match-ns
spec:
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,15 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-data-sync-delete-trigger
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-data-sync-delete-trigger-ns

View file

@ -0,0 +1,35 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-data-sync-delete-trigger-ns
---
apiVersion: v1
kind: ConfigMap
metadata:
name: test-org
namespace: cpol-data-sync-delete-trigger-ns
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: cpol-data-sync-delete-trigger
spec:
rules:
- name: default-deny
match:
any:
- resources:
kinds:
- ConfigMap
generate:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny
namespace: "{{request.object.metadata.namespace}}"
synchronize: true
data:
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,8 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: ConfigMap
name: test-org
namespace: cpol-data-sync-delete-trigger-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,9 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
namespace: cpol-data-sync-delete-trigger-ns
spec:
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that deletion of a trigger resource, with a generate data declaration and sync enabled, results in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test passes. If it remains, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/2229

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-data-sync-update-trigger-no-match
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,27 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: cpol-data-sync-update-trigger-no-match
spec:
rules:
- name: default-deny
match:
any:
- resources:
kinds:
- ConfigMap
selector:
matchLabels:
create-netpol: "true"
generate:
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
name: default-deny
namespace: "{{request.object.metadata.namespace}}"
synchronize: true
data:
spec:
podSelector: {}
policyTypes:
- Ingress
- Egress

View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Namespace
metadata:
name: cpol-data-sync-update-trigger-no-match-ns
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-netpol: "true"
name: test-org
namespace: cpol-data-sync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- downstream.yaml

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
labels:
create-netpol: "false"
name: test-org
namespace: cpol-data-sync-update-trigger-no-match-ns

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
error:
- downstream.yaml

View file

@ -0,0 +1,11 @@
## Description
This test checks to ensure that updates to a trigger which cause it to no longer match the rule, with a generate data declaration and sync enabled, results in the downstream resource's deletion.
## Expected Behavior
If the downstream resource is deleted, the test passes. If it remains, the test fails.
## Reference Issue(s)
https://github.com/kyverno/kyverno/issues/6507

View file

@ -0,0 +1,9 @@
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny
namespace: cpol-data-sync-update-trigger-no-match-ns
spec:
policyTypes:
- Ingress
- Egress

View file

@ -1,7 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: poltest
name: pol-clone-sync-modify-downstream-ns
---
apiVersion: v1
data:
@ -9,5 +9,5 @@ data:
kind: Secret
metadata:
name: regcred
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns
type: Opaque

View file

@ -1,7 +1,7 @@
apiVersion: v1
kind: Namespace
metadata:
name: poltest
name: pol-clone-sync-modify-downstream-ns
---
apiVersion: v1
data:
@ -9,5 +9,5 @@ data:
kind: Secret
metadata:
name: regcred
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns
type: Opaque

View file

@ -1,8 +1,8 @@
apiVersion: kyverno.io/v2beta1
kind: Policy
metadata:
name: pol-sync-clone
namespace: poltest
name: pol-clone-sync-modify-downstream-policy
namespace: pol-clone-sync-modify-downstream-ns
status:
conditions:
- reason: Succeeded

View file

@ -1,8 +1,8 @@
apiVersion: kyverno.io/v2beta1
kind: Policy
metadata:
name: pol-sync-clone
namespace: poltest
name: pol-clone-sync-modify-downstream-policy
namespace: pol-clone-sync-modify-downstream-ns
spec:
rules:
- name: gen-zk
@ -15,8 +15,8 @@ spec:
apiVersion: v1
kind: Secret
name: myclonedsecret
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns
synchronize: true
clone:
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns
name: regcred

View file

@ -4,4 +4,4 @@ data:
kind: ConfigMap
metadata:
name: foo
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns

View file

@ -4,5 +4,5 @@ data:
kind: Secret
metadata:
name: myclonedsecret
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns
type: Opaque

View file

@ -4,5 +4,5 @@ data:
kind: Secret
metadata:
name: myclonedsecret
namespace: poltest
namespace: pol-clone-sync-modify-downstream-ns
type: Opaque

Some files were not shown because too many files have changed in this diff Show more