1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-05 07:26:55 +00:00

refactor: event package (#6124)

* refactor: event package

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* more

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* kuttl tests

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* Update pkg/event/source.go

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-01-26 22:19:02 +01:00 committed by GitHub
parent 80750dc4d0
commit 39d5ceb00c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 239 additions and 109 deletions

View file

@ -2,7 +2,6 @@ package cleanup
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
@ -44,7 +43,7 @@ func New(
cpolLister: cpolLister,
polLister: polLister,
nsLister: nsLister,
recorder: newRecorder(client),
recorder: event.NewRecorder(event.CleanupController, client.GetEventsInterface()),
}
}
@ -189,13 +188,26 @@ func (h *handlers) createEvent(policy kyvernov2alpha1.CleanupPolicyInterface, re
} else if policy.GetNamespace() != "" {
cleanuppol = policy.(*kyvernov2alpha1.CleanupPolicy)
}
switch err == nil {
case true:
msg := fmt.Sprintf("successfully cleaned up the target resource %v/%v/%v", resource.GetKind(), resource.GetNamespace(), resource.GetName())
h.recorder.Event(cleanuppol, corev1.EventTypeNormal, event.PolicyApplied.String(), msg)
case false:
msg := fmt.Sprintf("failed to clean up the target resource %v/%v/%v: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err.Error())
h.recorder.Event(cleanuppol, corev1.EventTypeWarning, event.PolicyError.String(), msg)
if err == nil {
h.recorder.Eventf(
cleanuppol,
corev1.EventTypeNormal,
string(event.PolicyApplied),
"successfully cleaned up the target resource %v/%v/%v",
resource.GetKind(),
resource.GetNamespace(),
resource.GetName(),
)
} else {
h.recorder.Eventf(
cleanuppol,
corev1.EventTypeWarning,
string(event.PolicyError),
"failed to clean up the target resource %v/%v/%v: %v",
resource.GetKind(),
resource.GetNamespace(),
resource.GetName(),
err.Error(),
)
}
}

View file

@ -5,22 +5,26 @@ import (
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned/scheme"
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/controllers"
corev1 "k8s.io/api/core/v1"
errors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
// Generator generate events
type Generator struct {
const (
eventWorkQueueName = "kyverno-events"
workQueueRetryLimit = 3
)
// generator generate events
type generator struct {
client dclient.Interface
// list/get cluster policy
cpLister kyvernov1listers.ClusterPolicyLister
@ -42,57 +46,43 @@ type Generator struct {
log logr.Logger
}
// Controller interface to generate event
type Controller interface {
controllers.Controller
Interface
}
// Interface to generate event
type Interface interface {
Add(infoList ...Info)
}
// NewEventGenerator to generate a new event controller
func NewEventGenerator(client dclient.Interface, cpInformer kyvernov1informers.ClusterPolicyInformer, pInformer kyvernov1informers.PolicyInformer, maxQueuedEvents int, log logr.Logger) *Generator {
gen := Generator{
func NewEventGenerator(
// source Source,
client dclient.Interface,
cpInformer kyvernov1informers.ClusterPolicyInformer,
pInformer kyvernov1informers.PolicyInformer,
maxQueuedEvents int,
log logr.Logger,
) Controller {
gen := generator{
client: client,
cpLister: cpInformer.Lister(),
pLister: pInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(rateLimiter(), eventWorkQueueName),
policyCtrRecorder: initRecorder(client, PolicyController, log),
admissionCtrRecorder: initRecorder(client, AdmissionController, log),
genPolicyRecorder: initRecorder(client, GeneratePolicyController, log),
mutateExistingRecorder: initRecorder(client, MutateExistingController, log),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), eventWorkQueueName),
policyCtrRecorder: NewRecorder(PolicyController, client.GetEventsInterface()),
admissionCtrRecorder: NewRecorder(AdmissionController, client.GetEventsInterface()),
genPolicyRecorder: NewRecorder(GeneratePolicyController, client.GetEventsInterface()),
mutateExistingRecorder: NewRecorder(MutateExistingController, client.GetEventsInterface()),
maxQueuedEvents: maxQueuedEvents,
log: log,
}
return &gen
}
func rateLimiter() workqueue.RateLimiter {
return workqueue.DefaultItemBasedRateLimiter()
}
func initRecorder(client dclient.Interface, eventSource Source, log logr.Logger) record.EventRecorder {
// Initialize Event Broadcaster
err := scheme.AddToScheme(scheme.Scheme)
if err != nil {
log.Error(err, "failed to add to scheme")
return nil
}
eventBroadcaster := record.NewBroadcaster()
eventInterface := client.GetEventsInterface()
eventBroadcaster.StartRecordingToSink(
&typedcorev1.EventSinkImpl{
Interface: eventInterface,
},
)
recorder := eventBroadcaster.NewRecorder(
scheme.Scheme,
corev1.EventSource{
Component: eventSource.String(),
},
)
return recorder
}
// Add queues an event for generation
func (gen *Generator) Add(infos ...Info) {
func (gen *generator) Add(infos ...Info) {
logger := gen.log
logger.V(3).Info("generating events", "count", len(infos))
@ -113,7 +103,7 @@ func (gen *Generator) Add(infos ...Info) {
}
// Run begins generator
func (gen *Generator) Run(ctx context.Context, workers int) {
func (gen *generator) Run(ctx context.Context, workers int) {
logger := gen.log
defer utilruntime.HandleCrash()
@ -126,12 +116,12 @@ func (gen *Generator) Run(ctx context.Context, workers int) {
<-ctx.Done()
}
func (gen *Generator) runWorker(ctx context.Context) {
func (gen *generator) runWorker(ctx context.Context) {
for gen.processNextWorkItem() {
}
}
func (gen *Generator) handleErr(err error, key interface{}) {
func (gen *generator) handleErr(err error, key interface{}) {
logger := gen.log
if err == nil {
gen.queue.Forget(key)
@ -152,7 +142,7 @@ func (gen *Generator) handleErr(err error, key interface{}) {
}
}
func (gen *Generator) processNextWorkItem() bool {
func (gen *generator) processNextWorkItem() bool {
obj, shutdown := gen.queue.Get()
if shutdown {
return false
@ -172,7 +162,7 @@ func (gen *Generator) processNextWorkItem() bool {
return true
}
func (gen *Generator) syncHandler(key Info) error {
func (gen *generator) syncHandler(key Info) error {
logger := gen.log
var robj runtime.Object
var err error
@ -204,20 +194,20 @@ func (gen *Generator) syncHandler(key Info) error {
// if skip/pass, reason will be: NORMAL
// else reason will be: WARNING
eventType := corev1.EventTypeWarning
if key.Reason == PolicyApplied.String() || key.Reason == PolicySkipped.String() {
if key.Reason == PolicyApplied || key.Reason == PolicySkipped {
eventType = corev1.EventTypeNormal
}
// based on the source of event generation, use different event recorders
switch key.Source {
case AdmissionController:
gen.admissionCtrRecorder.Event(robj, eventType, key.Reason, key.Message)
gen.admissionCtrRecorder.Event(robj, eventType, string(key.Reason), key.Message)
case PolicyController:
gen.policyCtrRecorder.Event(robj, eventType, key.Reason, key.Message)
gen.policyCtrRecorder.Event(robj, eventType, string(key.Reason), key.Message)
case GeneratePolicyController:
gen.genPolicyRecorder.Event(robj, eventType, key.Reason, key.Message)
gen.genPolicyRecorder.Event(robj, eventType, string(key.Reason), key.Message)
case MutateExistingController:
gen.mutateExistingRecorder.Event(robj, eventType, key.Reason, key.Message)
gen.mutateExistingRecorder.Event(robj, eventType, string(key.Reason), key.Message)
default:
logger.Info("info.source not defined for the request")
}

View file

@ -10,15 +10,13 @@ import (
)
func NewPolicyFailEvent(source Source, reason Reason, engineResponse *response.EngineResponse, ruleResp *response.RuleResponse, blocked bool) Info {
msg := buildPolicyEventMessage(ruleResp, engineResponse.GetResourceSpec(), blocked)
return Info{
Kind: getPolicyKind(engineResponse.Policy),
Name: engineResponse.PolicyResponse.Policy.Name,
Namespace: engineResponse.PolicyResponse.Policy.Namespace,
Reason: reason.String(),
Reason: reason,
Source: source,
Message: msg,
Message: buildPolicyEventMessage(ruleResp, engineResponse.GetResourceSpec(), blocked),
}
}
@ -64,7 +62,7 @@ func NewPolicyAppliedEvent(source Source, engineResponse *response.EngineRespons
Kind: getPolicyKind(engineResponse.Policy),
Name: engineResponse.PolicyResponse.Policy.Name,
Namespace: engineResponse.PolicyResponse.Policy.Namespace,
Reason: PolicyApplied.String(),
Reason: PolicyApplied,
Source: source,
Message: bldr.String(),
}
@ -82,7 +80,7 @@ func NewResourceViolationEvent(source Source, reason Reason, engineResponse *res
Kind: resource.Kind,
Name: resource.Name,
Namespace: resource.Namespace,
Reason: reason.String(),
Reason: reason,
Source: source,
Message: bldr.String(),
}
@ -99,7 +97,7 @@ func NewBackgroundFailedEvent(err error, policy, rule string, source Source, r *
Namespace: r.GetNamespace(),
Name: r.GetName(),
Source: source,
Reason: PolicyError.String(),
Reason: PolicyError,
Message: fmt.Sprintf("policy %s/%s error: %v", policy, rule, err),
})
@ -118,7 +116,7 @@ func NewBackgroundSuccessEvent(policy, rule string, source Source, r *unstructur
Namespace: r.GetNamespace(),
Name: r.GetName(),
Source: source,
Reason: PolicyApplied.String(),
Reason: PolicyApplied,
Message: msg,
})
@ -138,14 +136,14 @@ func NewPolicyExceptionEvents(engineResponse *response.EngineResponse, ruleResp
Kind: getPolicyKind(engineResponse.Policy),
Name: engineResponse.PolicyResponse.Policy.Name,
Namespace: engineResponse.PolicyResponse.Policy.Namespace,
Reason: PolicySkipped.String(),
Reason: PolicySkipped,
Message: policyMessage,
}
exceptionEvent := Info{
Kind: "PolicyException",
Name: exceptionName,
Namespace: exceptionNamespace,
Reason: PolicySkipped.String(),
Reason: PolicySkipped,
Message: exceptionMessage,
}
return []Info{policyEvent, exceptionEvent}

View file

@ -6,5 +6,4 @@ func NewFake() Interface {
type fakeEventGenerator struct{}
func (f *fakeEventGenerator) Add(infoList ...Info) {
}
func (f *fakeEventGenerator) Add(infoList ...Info) {}

View file

@ -1,15 +1,11 @@
package event
const eventWorkQueueName = "kyverno-events"
const workQueueRetryLimit = 3
// Info defines the event details
type Info struct {
Kind string
Name string
Namespace string
Reason string
Reason Reason
Message string
Source Source
}

View file

@ -1,20 +1,11 @@
package event
// Reason types of Event Reasons
type Reason int
type Reason string
const (
PolicyViolation Reason = iota
PolicyApplied
PolicyError
PolicySkipped
PolicyViolation Reason = "PolicyViolation"
PolicyApplied Reason = "PolicyApplied"
PolicyError Reason = "PolicyError"
PolicySkipped Reason = "PolicySkipped"
)
func (r Reason) String() string {
return [...]string{
"PolicyViolation",
"PolicyApplied",
"PolicyError",
"PolicySkipped",
}[r]
}

View file

@ -1,18 +1,26 @@
package cleanup
package event
import (
"github.com/kyverno/kyverno/pkg/client/clientset/versioned/scheme"
"github.com/kyverno/kyverno/pkg/clients/dclient"
corev1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
)
func newRecorder(client dclient.Interface) record.EventRecorder {
func NewRecorder(source Source, sink typedcorev1.EventInterface) record.EventRecorder {
utilruntime.Must(scheme.AddToScheme(scheme.Scheme))
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(0)
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: client.GetEventsInterface()})
return eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "cleanup-controller"})
eventBroadcaster.StartRecordingToSink(
&typedcorev1.EventSinkImpl{
Interface: sink,
},
)
return eventBroadcaster.NewRecorder(
scheme.Scheme,
corev1.EventSource{
Component: string(source),
},
)
}

View file

@ -1,24 +1,17 @@
package event
// Source of event generation
type Source int
type Source string
const (
// AdmissionController : event generated in admission-control webhook
AdmissionController Source = iota
AdmissionController Source = "kyverno-admission"
// PolicyController : event generated in policy-controller
PolicyController
PolicyController Source = "kyverno-scan"
// GeneratePolicyController : event generated in generate policyController
GeneratePolicyController
GeneratePolicyController Source = "kyverno-generate"
// MutateExistingController : event generated for mutateExisting policies
MutateExistingController
MutateExistingController Source = "kyverno-mutate"
// CleanupController : event generated for cleanup policies
CleanupController Source = "kyverno-cleanup"
)
func (s Source) String() string {
return [...]string{
"kyverno-admission",
"kyverno-scan",
"kyverno-generate",
"kyverno-mutate",
}[s]
}

View file

@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- policy.yaml
assert:
- policy-assert.yaml

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- file: resource.yaml

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- event-assert.yaml

View file

@ -0,0 +1,11 @@
## Description
This test creates a policy, and a resource.
A `PolicyApplied` event should be created.
## Steps
1. - Create a policy
- Assert the policy becomes ready
1. - Create a resource
1. - Asset a `PolicyApplied` event is created

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Event
metadata: {}
involvedObject:
apiVersion: kyverno.io/v1
kind: Policy
name: require-labels
type: Normal
reason: PolicyApplied
source:
component: kyverno-admission

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: Policy
metadata:
name: require-labels
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: Policy
metadata:
name: require-labels
spec:
validationFailureAction: Enforce
background: false
rules:
- name: require-team
match:
any:
- resources:
kinds:
- ConfigMap
validate:
message: 'The label `team` is required.'
pattern:
metadata:
labels:
team: '?*'

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: foo
labels:
team: kyverno

View file

@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- policy.yaml
assert:
- policy-assert.yaml

View file

@ -0,0 +1,5 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- file: resource.yaml
shouldFail: true

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- event-assert.yaml

View file

@ -0,0 +1,12 @@
## Description
This test creates a policy, and a resource.
The resource is expected to be rejected.
A `PolicyViolation` event should be created.
## Steps
1. - Create a policy
- Assert the policy becomes ready
1. - Try to create a resource, expecting the creation to fail
1. - Asset a `PolicyViolation` event is created

View file

@ -0,0 +1,11 @@
apiVersion: v1
kind: Event
metadata: {}
involvedObject:
apiVersion: kyverno.io/v1
kind: Policy
name: require-labels
type: Warning
reason: PolicyViolation
source:
component: kyverno-admission

View file

@ -0,0 +1,9 @@
apiVersion: kyverno.io/v1
kind: Policy
metadata:
name: require-labels
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1
kind: Policy
metadata:
name: require-labels
spec:
validationFailureAction: Enforce
background: false
rules:
- name: require-team
match:
any:
- resources:
kinds:
- ConfigMap
validate:
message: 'The label `team` is required.'
pattern:
metadata:
labels:
team: '?*'

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: foo