mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-05 15:37:19 +00:00
refactor: events controller (#9236)
* refactor: make events controller shutdown graceful Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * nit Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * drain Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * refactor: events controller Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * exception Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * remove queue Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> --------- Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
parent
b61a1f3d18
commit
b54e6230c5
10 changed files with 319 additions and 424 deletions
|
@ -138,11 +138,8 @@ func main() {
|
||||||
}
|
}
|
||||||
eventGenerator := event.NewEventGenerator(
|
eventGenerator := event.NewEventGenerator(
|
||||||
setup.KyvernoDynamicClient,
|
setup.KyvernoDynamicClient,
|
||||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
|
||||||
kyvernoInformer.Kyverno().V1().Policies(),
|
|
||||||
maxQueuedEvents,
|
|
||||||
emitEventsValues,
|
|
||||||
logging.WithName("EventGenerator"),
|
logging.WithName("EventGenerator"),
|
||||||
|
emitEventsValues...,
|
||||||
)
|
)
|
||||||
// this controller only subscribe to events, nothing is returned...
|
// this controller only subscribe to events, nothing is returned...
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -172,7 +169,7 @@ func main() {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
// start event generator
|
// start event generator
|
||||||
go eventGenerator.Run(signalCtx, 3, &wg)
|
go eventGenerator.Run(signalCtx, event.Workers, &wg)
|
||||||
// setup leader election
|
// setup leader election
|
||||||
le, err := leaderelection.New(
|
le, err := leaderelection.New(
|
||||||
setup.Logger.WithName("leader-election"),
|
setup.Logger.WithName("leader-election"),
|
||||||
|
|
|
@ -132,11 +132,8 @@ func main() {
|
||||||
kyvernoInformer.Kyverno().V2beta1().ClusterCleanupPolicies(),
|
kyvernoInformer.Kyverno().V2beta1().ClusterCleanupPolicies(),
|
||||||
genericloggingcontroller.CheckGeneration,
|
genericloggingcontroller.CheckGeneration,
|
||||||
)
|
)
|
||||||
eventGenerator := event.NewEventCleanupGenerator(
|
eventGenerator := event.NewEventGenerator(
|
||||||
setup.KyvernoDynamicClient,
|
setup.KyvernoDynamicClient,
|
||||||
kyvernoInformer.Kyverno().V2beta1().ClusterCleanupPolicies(),
|
|
||||||
kyvernoInformer.Kyverno().V2beta1().CleanupPolicies(),
|
|
||||||
maxQueuedEvents,
|
|
||||||
logging.WithName("EventGenerator"),
|
logging.WithName("EventGenerator"),
|
||||||
)
|
)
|
||||||
// start informers and wait for cache sync
|
// start informers and wait for cache sync
|
||||||
|
@ -145,7 +142,7 @@ func main() {
|
||||||
}
|
}
|
||||||
// start event generator
|
// start event generator
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
go eventGenerator.Run(ctx, 3, &wg)
|
go eventGenerator.Run(ctx, event.CleanupWorkers, &wg)
|
||||||
// setup leader election
|
// setup leader election
|
||||||
le, err := leaderelection.New(
|
le, err := leaderelection.New(
|
||||||
setup.Logger.WithName("leader-election"),
|
setup.Logger.WithName("leader-election"),
|
||||||
|
|
|
@ -322,11 +322,8 @@ func main() {
|
||||||
}
|
}
|
||||||
eventGenerator := event.NewEventGenerator(
|
eventGenerator := event.NewEventGenerator(
|
||||||
setup.KyvernoDynamicClient,
|
setup.KyvernoDynamicClient,
|
||||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
|
||||||
kyvernoInformer.Kyverno().V1().Policies(),
|
|
||||||
maxQueuedEvents,
|
|
||||||
omitEventsValues,
|
|
||||||
logging.WithName("EventGenerator"),
|
logging.WithName("EventGenerator"),
|
||||||
|
omitEventsValues...,
|
||||||
)
|
)
|
||||||
// this controller only subscribe to events, nothing is returned...
|
// this controller only subscribe to events, nothing is returned...
|
||||||
policymetricscontroller.NewController(
|
policymetricscontroller.NewController(
|
||||||
|
@ -393,7 +390,7 @@ func main() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// start event generator
|
// start event generator
|
||||||
go eventGenerator.Run(signalCtx, 3, &wg)
|
go eventGenerator.Run(signalCtx, event.Workers, &wg)
|
||||||
// setup leader election
|
// setup leader election
|
||||||
le, err := leaderelection.New(
|
le, err := leaderelection.New(
|
||||||
setup.Logger.WithName("leader-election"),
|
setup.Logger.WithName("leader-election"),
|
||||||
|
|
|
@ -255,11 +255,8 @@ func main() {
|
||||||
}
|
}
|
||||||
eventGenerator := event.NewEventGenerator(
|
eventGenerator := event.NewEventGenerator(
|
||||||
setup.KyvernoDynamicClient,
|
setup.KyvernoDynamicClient,
|
||||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
|
||||||
kyvernoInformer.Kyverno().V1().Policies(),
|
|
||||||
maxQueuedEvents,
|
|
||||||
omitEventsValues,
|
|
||||||
logging.WithName("EventGenerator"),
|
logging.WithName("EventGenerator"),
|
||||||
|
omitEventsValues...,
|
||||||
)
|
)
|
||||||
// engine
|
// engine
|
||||||
engine := internal.NewEngine(
|
engine := internal.NewEngine(
|
||||||
|
@ -283,7 +280,7 @@ func main() {
|
||||||
}
|
}
|
||||||
// start event generator
|
// start event generator
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
go eventGenerator.Run(ctx, 3, &wg)
|
go eventGenerator.Run(ctx, event.Workers, &wg)
|
||||||
// setup leader election
|
// setup leader election
|
||||||
le, err := leaderelection.New(
|
le, err := leaderelection.New(
|
||||||
setup.Logger.WithName("leader-election"),
|
setup.Logger.WithName("leader-election"),
|
||||||
|
|
|
@ -5,8 +5,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/kyverno/kyverno/pkg/config"
|
"github.com/kyverno/kyverno/pkg/config"
|
||||||
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
"k8s.io/apimachinery/pkg/runtime"
|
||||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||||
)
|
)
|
||||||
|
@ -27,6 +27,25 @@ type fixture struct {
|
||||||
client Interface
|
client Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
|
||||||
|
return &unstructured.Unstructured{
|
||||||
|
Object: map[string]interface{}{
|
||||||
|
"apiVersion": apiVersion,
|
||||||
|
"kind": kind,
|
||||||
|
"metadata": map[string]interface{}{
|
||||||
|
"namespace": namespace,
|
||||||
|
"name": name,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUnstructuredWithSpec(apiVersion, kind, namespace, name string, spec map[string]interface{}) *unstructured.Unstructured {
|
||||||
|
u := newUnstructured(apiVersion, kind, namespace, name)
|
||||||
|
u.Object["spec"] = spec
|
||||||
|
return u
|
||||||
|
}
|
||||||
|
|
||||||
func newFixture(t *testing.T) *fixture {
|
func newFixture(t *testing.T) *fixture {
|
||||||
// init groupversion
|
// init groupversion
|
||||||
regResource := []schema.GroupVersionResource{
|
regResource := []schema.GroupVersionResource{
|
||||||
|
@ -44,12 +63,12 @@ func newFixture(t *testing.T) *fixture {
|
||||||
}
|
}
|
||||||
|
|
||||||
objects := []runtime.Object{
|
objects := []runtime.Object{
|
||||||
kubeutils.NewUnstructured("group/version", "TheKind", "ns-foo", "name-foo"),
|
newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"),
|
||||||
kubeutils.NewUnstructured("group2/version", "TheKind", "ns-foo", "name2-foo"),
|
newUnstructured("group2/version", "TheKind", "ns-foo", "name2-foo"),
|
||||||
kubeutils.NewUnstructured("group/version", "TheKind", "ns-foo", "name-bar"),
|
newUnstructured("group/version", "TheKind", "ns-foo", "name-bar"),
|
||||||
kubeutils.NewUnstructured("group/version", "TheKind", "ns-foo", "name-baz"),
|
newUnstructured("group/version", "TheKind", "ns-foo", "name-baz"),
|
||||||
kubeutils.NewUnstructured("group2/version", "TheKind", "ns-foo", "name2-baz"),
|
newUnstructured("group2/version", "TheKind", "ns-foo", "name2-baz"),
|
||||||
kubeutils.NewUnstructured("apps/v1", "Deployment", config.KyvernoNamespace(), config.KyvernoDeploymentName()),
|
newUnstructured("apps/v1", "Deployment", config.KyvernoNamespace(), config.KyvernoDeploymentName()),
|
||||||
}
|
}
|
||||||
|
|
||||||
scheme := runtime.NewScheme()
|
scheme := runtime.NewScheme()
|
||||||
|
@ -89,17 +108,17 @@ func TestCRUDResource(t *testing.T) {
|
||||||
t.Errorf("DeleteResouce not working: %s", err)
|
t.Errorf("DeleteResouce not working: %s", err)
|
||||||
}
|
}
|
||||||
// CreateResource
|
// CreateResource
|
||||||
_, err = f.client.CreateResource(context.TODO(), "", "thekind", "ns-foo", kubeutils.NewUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), false)
|
_, err = f.client.CreateResource(context.TODO(), "", "thekind", "ns-foo", newUnstructured("group/version", "TheKind", "ns-foo", "name-foo1"), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("CreateResource not working: %s", err)
|
t.Errorf("CreateResource not working: %s", err)
|
||||||
}
|
}
|
||||||
// UpdateResource
|
// UpdateResource
|
||||||
_, err = f.client.UpdateResource(context.TODO(), "", "thekind", "ns-foo", kubeutils.NewUnstructuredWithSpec("group/version", "TheKind", "ns-foo", "name-foo1", map[string]interface{}{"foo": "bar"}), false)
|
_, err = f.client.UpdateResource(context.TODO(), "", "thekind", "ns-foo", newUnstructuredWithSpec("group/version", "TheKind", "ns-foo", "name-foo1", map[string]interface{}{"foo": "bar"}), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("UpdateResource not working: %s", err)
|
t.Errorf("UpdateResource not working: %s", err)
|
||||||
}
|
}
|
||||||
// UpdateStatusResource
|
// UpdateStatusResource
|
||||||
_, err = f.client.UpdateStatusResource(context.TODO(), "", "thekind", "ns-foo", kubeutils.NewUnstructuredWithSpec("group/version", "TheKind", "ns-foo", "name-foo1", map[string]interface{}{"foo": "status"}), false)
|
_, err = f.client.UpdateStatusResource(context.TODO(), "", "thekind", "ns-foo", newUnstructuredWithSpec("group/version", "TheKind", "ns-foo", "name-foo1", map[string]interface{}{"foo": "status"}), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("UpdateStatusResource not working: %s", err)
|
t.Errorf("UpdateStatusResource not working: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,58 +3,40 @@ package event
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
"github.com/kyverno/kyverno/pkg/client/clientset/versioned/scheme"
|
||||||
kyvernov2beta1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v2beta1"
|
|
||||||
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
|
|
||||||
kyvernov2beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2beta1"
|
|
||||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||||
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
corev1 "k8s.io/api/core/v1"
|
||||||
errors "k8s.io/apimachinery/pkg/api/errors"
|
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
"k8s.io/client-go/tools/events"
|
"k8s.io/client-go/tools/events"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/klog/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
Workers = 3
|
||||||
|
CleanupWorkers = 3
|
||||||
eventWorkQueueName = "kyverno-events"
|
eventWorkQueueName = "kyverno-events"
|
||||||
workQueueRetryLimit = 3
|
workQueueRetryLimit = 3
|
||||||
)
|
)
|
||||||
|
|
||||||
// generator generate events
|
// generator generate events
|
||||||
type generator struct {
|
type generator struct {
|
||||||
client dclient.Interface
|
// broadcaster
|
||||||
// list/get cluster policy
|
broadcaster events.EventBroadcaster
|
||||||
cpLister kyvernov1listers.ClusterPolicyLister
|
|
||||||
// list/get policy
|
|
||||||
pLister kyvernov1listers.PolicyLister
|
|
||||||
// list/get cluster cleanup policy
|
|
||||||
clustercleanuppolLister kyvernov2beta1listers.ClusterCleanupPolicyLister
|
|
||||||
// list/get cleanup policy
|
|
||||||
cleanuppolLister kyvernov2beta1listers.CleanupPolicyLister
|
|
||||||
// queue to store event generation requests
|
|
||||||
queue workqueue.RateLimitingInterface
|
|
||||||
// events generated at policy controller
|
|
||||||
policyCtrRecorder events.EventRecorder
|
|
||||||
// events generated at admission control
|
|
||||||
admissionCtrRecorder events.EventRecorder
|
|
||||||
// events generated at namespaced policy controller to process 'generate' rule
|
|
||||||
genPolicyRecorder events.EventRecorder
|
|
||||||
// events generated at mutateExisting controller
|
|
||||||
mutateExistingRecorder events.EventRecorder
|
|
||||||
// events generated at cleanup controller
|
|
||||||
cleanupPolicyRecorder events.EventRecorder
|
|
||||||
|
|
||||||
maxQueuedEvents int
|
// recorders
|
||||||
|
recorders map[Source]events.EventRecorder
|
||||||
|
|
||||||
omitEvents []string
|
// config
|
||||||
|
omitEvents sets.Set[string]
|
||||||
|
logger logr.Logger
|
||||||
|
}
|
||||||
|
|
||||||
log logr.Logger
|
// Interface to generate event
|
||||||
|
type Interface interface {
|
||||||
|
Add(infoList ...Info)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Controller interface to generate event
|
// Controller interface to generate event
|
||||||
|
@ -63,214 +45,84 @@ type Controller interface {
|
||||||
Run(context.Context, int, *sync.WaitGroup)
|
Run(context.Context, int, *sync.WaitGroup)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Interface to generate event
|
|
||||||
type Interface interface {
|
|
||||||
Add(infoList ...Info)
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEventGenerator to generate a new event controller
|
// NewEventGenerator to generate a new event controller
|
||||||
func NewEventGenerator(
|
func NewEventGenerator(client dclient.Interface, logger logr.Logger, omitEvents ...string) Controller {
|
||||||
// source Source,
|
return &generator{
|
||||||
client dclient.Interface,
|
broadcaster: events.NewBroadcaster(&events.EventSinkImpl{
|
||||||
cpInformer kyvernov1informers.ClusterPolicyInformer,
|
Interface: client.GetEventsInterface(),
|
||||||
pInformer kyvernov1informers.PolicyInformer,
|
}),
|
||||||
maxQueuedEvents int,
|
omitEvents: sets.New(omitEvents...),
|
||||||
omitEvents []string,
|
logger: logger,
|
||||||
log logr.Logger,
|
|
||||||
) Controller {
|
|
||||||
gen := generator{
|
|
||||||
client: client,
|
|
||||||
cpLister: cpInformer.Lister(),
|
|
||||||
pLister: pInformer.Lister(),
|
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), eventWorkQueueName),
|
|
||||||
policyCtrRecorder: NewRecorder(PolicyController, client.GetEventsInterface()),
|
|
||||||
admissionCtrRecorder: NewRecorder(AdmissionController, client.GetEventsInterface()),
|
|
||||||
genPolicyRecorder: NewRecorder(GeneratePolicyController, client.GetEventsInterface()),
|
|
||||||
mutateExistingRecorder: NewRecorder(MutateExistingController, client.GetEventsInterface()),
|
|
||||||
maxQueuedEvents: maxQueuedEvents,
|
|
||||||
omitEvents: omitEvents,
|
|
||||||
log: log,
|
|
||||||
}
|
}
|
||||||
return &gen
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEventGenerator to generate a new event cleanup controller
|
|
||||||
func NewEventCleanupGenerator(
|
|
||||||
// source Source,
|
|
||||||
client dclient.Interface,
|
|
||||||
clustercleanuppolInformer kyvernov2beta1informers.ClusterCleanupPolicyInformer,
|
|
||||||
cleanuppolInformer kyvernov2beta1informers.CleanupPolicyInformer,
|
|
||||||
maxQueuedEvents int,
|
|
||||||
log logr.Logger,
|
|
||||||
) Controller {
|
|
||||||
gen := generator{
|
|
||||||
client: client,
|
|
||||||
clustercleanuppolLister: clustercleanuppolInformer.Lister(),
|
|
||||||
cleanuppolLister: cleanuppolInformer.Lister(),
|
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultItemBasedRateLimiter(), eventWorkQueueName),
|
|
||||||
cleanupPolicyRecorder: NewRecorder(CleanupController, client.GetEventsInterface()),
|
|
||||||
maxQueuedEvents: maxQueuedEvents,
|
|
||||||
log: log,
|
|
||||||
}
|
|
||||||
return &gen
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Add queues an event for generation
|
// Add queues an event for generation
|
||||||
func (gen *generator) Add(infos ...Info) {
|
func (gen *generator) Add(infos ...Info) {
|
||||||
logger := gen.log
|
logger := gen.logger
|
||||||
logger.V(3).Info("generating events", "count", len(infos))
|
logger.V(3).Info("generating events", "count", len(infos))
|
||||||
if gen.maxQueuedEvents == 0 || gen.queue.Len() > gen.maxQueuedEvents {
|
|
||||||
logger.V(2).Info("exceeds the event queue limit, dropping the event", "maxQueuedEvents", gen.maxQueuedEvents, "current size", gen.queue.Len())
|
|
||||||
return
|
|
||||||
}
|
|
||||||
for _, info := range infos {
|
for _, info := range infos {
|
||||||
if info.Name == "" {
|
// don't create event for resources with generateName as the name is not generated yet
|
||||||
// dont create event for resources with generateName
|
if info.Regarding.Name == "" {
|
||||||
// as the name is not generated yet
|
logger.V(3).Info("skipping event creation for resource without a name", "kind", info.Regarding.Kind, "name", info.Regarding.Name, "namespace", info.Regarding.Namespace)
|
||||||
logger.V(3).Info("skipping event creation for resource without a name", "kind", info.Kind, "name", info.Name, "namespace", info.Namespace)
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
if gen.omitEvents.Has(string(info.Reason)) {
|
||||||
shouldEmitEvent := true
|
logger.V(6).Info("omitting event", "kind", info.Regarding.Kind, "name", info.Regarding.Name, "namespace", info.Regarding.Namespace, "reason", info.Reason)
|
||||||
for _, eventReason := range gen.omitEvents {
|
continue
|
||||||
if info.Reason == Reason(eventReason) {
|
|
||||||
shouldEmitEvent = false
|
|
||||||
logger.V(6).Info("omitting event", "kind", info.Kind, "name", info.Name, "namespace", info.Namespace, "reason", info.Reason)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if shouldEmitEvent {
|
|
||||||
gen.queue.Add(info)
|
|
||||||
logger.V(6).Info("creating event", "kind", info.Kind, "name", info.Name, "namespace", info.Namespace, "reason", info.Reason)
|
|
||||||
}
|
}
|
||||||
|
gen.emitEvent(info)
|
||||||
|
logger.V(6).Info("creating event", "kind", info.Regarding.Kind, "name", info.Regarding.Name, "namespace", info.Regarding.Namespace, "reason", info.Reason)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run begins generator
|
// Run begins generator
|
||||||
func (gen *generator) Run(ctx context.Context, workers int, waitGroup *sync.WaitGroup) {
|
func (gen *generator) Run(ctx context.Context, workers int, waitGroup *sync.WaitGroup) {
|
||||||
logger := gen.log
|
logger := gen.logger
|
||||||
logger.Info("start")
|
logger.Info("start")
|
||||||
defer logger.Info("shutting down")
|
defer logger.Info("terminated")
|
||||||
defer utilruntime.HandleCrash()
|
defer utilruntime.HandleCrash()
|
||||||
defer gen.queue.ShutDown()
|
defer gen.stopRecorders()
|
||||||
for i := 0; i < workers; i++ {
|
defer logger.Info("shutting down...")
|
||||||
waitGroup.Add(1)
|
if err := gen.startRecorders(ctx); err != nil {
|
||||||
go func() {
|
logger.Error(err, "failed to start recorders")
|
||||||
defer waitGroup.Done()
|
return
|
||||||
wait.UntilWithContext(ctx, gen.runWorker, time.Second)
|
|
||||||
}()
|
|
||||||
}
|
}
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gen *generator) runWorker(ctx context.Context) {
|
func (gen *generator) startRecorders(ctx context.Context) error {
|
||||||
for gen.processNextWorkItem() {
|
if err := gen.broadcaster.StartRecordingToSinkWithContext(ctx); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
logger := klog.Background().V(int(0))
|
||||||
|
// TODO: logger watcher should be stopped
|
||||||
|
if _, err := gen.broadcaster.StartLogging(logger); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
gen.recorders = map[Source]events.EventRecorder{
|
||||||
|
PolicyController: gen.broadcaster.NewRecorder(scheme.Scheme, string(PolicyController)),
|
||||||
|
AdmissionController: gen.broadcaster.NewRecorder(scheme.Scheme, string(AdmissionController)),
|
||||||
|
GeneratePolicyController: gen.broadcaster.NewRecorder(scheme.Scheme, string(GeneratePolicyController)),
|
||||||
|
MutateExistingController: gen.broadcaster.NewRecorder(scheme.Scheme, string(MutateExistingController)),
|
||||||
|
CleanupController: gen.broadcaster.NewRecorder(scheme.Scheme, string(CleanupController)),
|
||||||
|
}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gen *generator) handleErr(err error, key interface{}) {
|
func (gen *generator) stopRecorders() {
|
||||||
logger := gen.log
|
gen.broadcaster.Shutdown()
|
||||||
if err == nil {
|
|
||||||
gen.queue.Forget(key)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// This controller retries if something goes wrong. After that, it stops trying.
|
|
||||||
if gen.queue.NumRequeues(key) < workQueueRetryLimit {
|
|
||||||
logger.V(4).Info("retrying event generation", "key", key, "reason", err.Error())
|
|
||||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
|
||||||
// queue and the re-enqueue history, the key will be processed later again.
|
|
||||||
gen.queue.AddRateLimited(key)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
gen.queue.Forget(key)
|
|
||||||
if !errors.IsNotFound(err) {
|
|
||||||
logger.Error(err, "failed to generate event", "key", key)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (gen *generator) processNextWorkItem() bool {
|
func (gen *generator) emitEvent(key Info) {
|
||||||
obj, shutdown := gen.queue.Get()
|
logger := gen.logger
|
||||||
if shutdown {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
defer gen.queue.Done(obj)
|
|
||||||
var key Info
|
|
||||||
var ok bool
|
|
||||||
if key, ok = obj.(Info); !ok {
|
|
||||||
gen.queue.Forget(obj)
|
|
||||||
gen.log.V(2).Info("Incorrect type; expected type 'info'", "obj", obj)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
err := gen.syncHandler(key)
|
|
||||||
gen.handleErr(err, obj)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func (gen *generator) syncHandler(key Info) error {
|
|
||||||
logger := gen.log
|
|
||||||
var regardingObj, relatedObj runtime.Object
|
|
||||||
var err error
|
|
||||||
switch key.Kind {
|
|
||||||
case "ClusterPolicy":
|
|
||||||
regardingObj, err = gen.cpLister.Get(key.Name)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "failed to get cluster policy", "name", key.Name)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "Policy":
|
|
||||||
regardingObj, err = gen.pLister.Policies(key.Namespace).Get(key.Name)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "failed to get policy", "name", key.Name)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "ClusterCleanupPolicy":
|
|
||||||
regardingObj, err = gen.clustercleanuppolLister.Get(key.Name)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "failed to get cluster clean up policy", "name", key.Name)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
case "CleanupPolicy":
|
|
||||||
regardingObj, err = gen.cleanuppolLister.CleanupPolicies(key.Namespace).Get(key.Name)
|
|
||||||
if err != nil {
|
|
||||||
logger.Error(err, "failed to get cleanup policy", "name", key.Name)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
default:
|
|
||||||
regardingObj, err = gen.client.GetResource(context.TODO(), "", key.Kind, key.Namespace, key.Name)
|
|
||||||
if err != nil {
|
|
||||||
if !errors.IsNotFound(err) {
|
|
||||||
logger.Error(err, "failed to get resource", "kind", key.Kind, "name", key.Name, "namespace", key.Namespace)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
relatedObj = kubeutils.NewUnstructured(key.RelatedAPIVersion, key.RelatedKind, key.RelatedNamespace, key.RelatedName)
|
|
||||||
|
|
||||||
// set the event type based on reason
|
|
||||||
// if skip/pass, reason will be: NORMAL
|
|
||||||
// else reason will be: WARNING
|
|
||||||
eventType := corev1.EventTypeWarning
|
eventType := corev1.EventTypeWarning
|
||||||
if key.Reason == PolicyApplied || key.Reason == PolicySkipped {
|
if key.Reason == PolicyApplied || key.Reason == PolicySkipped {
|
||||||
eventType = corev1.EventTypeNormal
|
eventType = corev1.EventTypeNormal
|
||||||
}
|
}
|
||||||
|
if recorder := gen.recorders[key.Source]; recorder != nil {
|
||||||
logger.V(3).Info("creating the event", "source", key.Source, "type", eventType, "resource", key.Resource())
|
logger.V(3).Info("creating the event", "source", key.Source, "type", eventType, "resource", key.Resource())
|
||||||
// based on the source of event generation, use different event recorders
|
recorder.Eventf(&key.Regarding, key.Related, eventType, string(key.Reason), string(key.Action), key.Message)
|
||||||
switch key.Source {
|
} else {
|
||||||
case AdmissionController:
|
|
||||||
gen.admissionCtrRecorder.Eventf(regardingObj, relatedObj, eventType, string(key.Reason), string(key.Action), key.Message)
|
|
||||||
case PolicyController:
|
|
||||||
gen.policyCtrRecorder.Eventf(regardingObj, relatedObj, eventType, string(key.Reason), string(key.Action), key.Message)
|
|
||||||
case GeneratePolicyController:
|
|
||||||
gen.genPolicyRecorder.Eventf(regardingObj, relatedObj, eventType, string(key.Reason), string(key.Action), key.Message)
|
|
||||||
case MutateExistingController:
|
|
||||||
gen.mutateExistingRecorder.Eventf(regardingObj, relatedObj, eventType, string(key.Reason), string(key.Action), key.Message)
|
|
||||||
case CleanupController:
|
|
||||||
gen.cleanupPolicyRecorder.Eventf(regardingObj, relatedObj, eventType, string(key.Reason), string(key.Action), key.Message)
|
|
||||||
default:
|
|
||||||
logger.Info("info.source not defined for the request")
|
logger.Info("info.source not defined for the request")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -7,7 +7,9 @@ import (
|
||||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||||
kyvernov2alpha1 "github.com/kyverno/kyverno/api/kyverno/v2alpha1"
|
kyvernov2alpha1 "github.com/kyverno/kyverno/api/kyverno/v2alpha1"
|
||||||
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
|
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
func NewPolicyFailEvent(source Source, reason Reason, engineResponse engineapi.EngineResponse, ruleResp engineapi.RuleResponse, blocked bool) Info {
|
func NewPolicyFailEvent(source Source, reason Reason, engineResponse engineapi.EngineResponse, ruleResp engineapi.RuleResponse, blocked bool) Info {
|
||||||
|
@ -15,21 +17,29 @@ func NewPolicyFailEvent(source Source, reason Reason, engineResponse engineapi.E
|
||||||
if blocked {
|
if blocked {
|
||||||
action = ResourceBlocked
|
action = ResourceBlocked
|
||||||
}
|
}
|
||||||
|
|
||||||
pol := engineResponse.Policy()
|
pol := engineResponse.Policy()
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v1",
|
||||||
|
Kind: pol.GetKind(),
|
||||||
|
Name: pol.GetName(),
|
||||||
|
Namespace: pol.GetNamespace(),
|
||||||
|
UID: pol.MetaObject().GetUID(),
|
||||||
|
}
|
||||||
|
related := engineResponse.GetResourceSpec()
|
||||||
return Info{
|
return Info{
|
||||||
Kind: pol.GetKind(),
|
Regarding: regarding,
|
||||||
Name: pol.GetName(),
|
Related: &corev1.ObjectReference{
|
||||||
Namespace: pol.GetNamespace(),
|
APIVersion: related.APIVersion,
|
||||||
RelatedAPIVersion: engineResponse.GetResourceSpec().APIVersion,
|
Kind: related.Kind,
|
||||||
RelatedKind: engineResponse.GetResourceSpec().Kind,
|
Name: related.Name,
|
||||||
RelatedName: engineResponse.GetResourceSpec().Name,
|
Namespace: related.Namespace,
|
||||||
RelatedNamespace: engineResponse.GetResourceSpec().Namespace,
|
UID: types.UID(related.UID),
|
||||||
Reason: reason,
|
},
|
||||||
Source: source,
|
Reason: reason,
|
||||||
Message: buildPolicyEventMessage(ruleResp, engineResponse.GetResourceSpec(), blocked),
|
Source: source,
|
||||||
Action: action,
|
Message: buildPolicyEventMessage(ruleResp, engineResponse.GetResourceSpec(), blocked),
|
||||||
|
Action: action,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -83,19 +93,28 @@ func NewPolicyAppliedEvent(source Source, engineResponse engineapi.EngineRespons
|
||||||
fmt.Fprintf(&bldr, "%s: pass", res)
|
fmt.Fprintf(&bldr, "%s: pass", res)
|
||||||
action = ResourcePassed
|
action = ResourcePassed
|
||||||
}
|
}
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v1",
|
||||||
|
Kind: policy.GetKind(),
|
||||||
|
Name: policy.GetName(),
|
||||||
|
Namespace: policy.GetNamespace(),
|
||||||
|
UID: policy.MetaObject().GetUID(),
|
||||||
|
}
|
||||||
|
related := engineResponse.GetResourceSpec()
|
||||||
return Info{
|
return Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Name: policy.GetName(),
|
Related: &corev1.ObjectReference{
|
||||||
Namespace: policy.GetNamespace(),
|
APIVersion: related.APIVersion,
|
||||||
RelatedAPIVersion: resource.GetAPIVersion(),
|
Kind: related.Kind,
|
||||||
RelatedKind: resource.GetKind(),
|
Name: related.Name,
|
||||||
RelatedName: resource.GetName(),
|
Namespace: related.Namespace,
|
||||||
RelatedNamespace: resource.GetNamespace(),
|
UID: types.UID(related.UID),
|
||||||
Reason: PolicyApplied,
|
},
|
||||||
Source: source,
|
Reason: PolicyApplied,
|
||||||
Message: bldr.String(),
|
Source: source,
|
||||||
Action: action,
|
Message: bldr.String(),
|
||||||
|
Action: action,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,11 +126,15 @@ func NewResourceViolationEvent(source Source, reason Reason, engineResponse engi
|
||||||
fmt.Fprintf(&bldr, "policy %s/%s %s: %s", pol.GetName(),
|
fmt.Fprintf(&bldr, "policy %s/%s %s: %s", pol.GetName(),
|
||||||
ruleResp.Name(), ruleResp.Status(), ruleResp.Message())
|
ruleResp.Name(), ruleResp.Status(), ruleResp.Message())
|
||||||
resource := engineResponse.GetResourceSpec()
|
resource := engineResponse.GetResourceSpec()
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
APIVersion: resource.APIVersion,
|
||||||
|
Kind: resource.Kind,
|
||||||
|
Name: resource.Name,
|
||||||
|
Namespace: resource.Namespace,
|
||||||
|
UID: types.UID(resource.UID),
|
||||||
|
}
|
||||||
return Info{
|
return Info{
|
||||||
Kind: resource.Kind,
|
Regarding: regarding,
|
||||||
Name: resource.Name,
|
|
||||||
Namespace: resource.Namespace,
|
|
||||||
Reason: reason,
|
Reason: reason,
|
||||||
Source: source,
|
Source: source,
|
||||||
Message: bldr.String(),
|
Message: bldr.String(),
|
||||||
|
@ -121,11 +144,15 @@ func NewResourceViolationEvent(source Source, reason Reason, engineResponse engi
|
||||||
|
|
||||||
func NewResourceGenerationEvent(policy, rule string, source Source, resource kyvernov1.ResourceSpec) Info {
|
func NewResourceGenerationEvent(policy, rule string, source Source, resource kyvernov1.ResourceSpec) Info {
|
||||||
msg := fmt.Sprintf("Created %s %s as a result of applying policy %s/%s", resource.GetKind(), resource.GetName(), policy, rule)
|
msg := fmt.Sprintf("Created %s %s as a result of applying policy %s/%s", resource.GetKind(), resource.GetName(), policy, rule)
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
APIVersion: resource.APIVersion,
|
||||||
|
Kind: resource.Kind,
|
||||||
|
Name: resource.Name,
|
||||||
|
Namespace: resource.Namespace,
|
||||||
|
UID: resource.UID,
|
||||||
|
}
|
||||||
return Info{
|
return Info{
|
||||||
Kind: resource.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: resource.GetNamespace(),
|
|
||||||
Name: resource.GetName(),
|
|
||||||
Source: source,
|
Source: source,
|
||||||
Reason: PolicyApplied,
|
Reason: PolicyApplied,
|
||||||
Message: msg,
|
Message: msg,
|
||||||
|
@ -135,18 +162,27 @@ func NewResourceGenerationEvent(policy, rule string, source Source, resource kyv
|
||||||
|
|
||||||
func NewBackgroundFailedEvent(err error, policy kyvernov1.PolicyInterface, rule string, source Source, resource kyvernov1.ResourceSpec) []Info {
|
func NewBackgroundFailedEvent(err error, policy kyvernov1.PolicyInterface, rule string, source Source, resource kyvernov1.ResourceSpec) []Info {
|
||||||
var events []Info
|
var events []Info
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v1",
|
||||||
|
Kind: policy.GetKind(),
|
||||||
|
Name: policy.GetName(),
|
||||||
|
Namespace: policy.GetNamespace(),
|
||||||
|
UID: policy.GetUID(),
|
||||||
|
}
|
||||||
events = append(events, Info{
|
events = append(events, Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: policy.GetNamespace(),
|
Related: &corev1.ObjectReference{
|
||||||
Name: policy.GetName(),
|
APIVersion: resource.APIVersion,
|
||||||
RelatedAPIVersion: resource.GetAPIVersion(),
|
Kind: resource.Kind,
|
||||||
RelatedKind: resource.GetKind(),
|
Name: resource.Name,
|
||||||
RelatedNamespace: resource.GetNamespace(),
|
Namespace: resource.Namespace,
|
||||||
RelatedName: resource.GetName(),
|
UID: resource.UID,
|
||||||
Source: source,
|
},
|
||||||
Reason: PolicyError,
|
Source: source,
|
||||||
Message: fmt.Sprintf("policy %s/%s error: %v", policy.GetName(), rule, err),
|
Reason: PolicyError,
|
||||||
Action: None,
|
Message: fmt.Sprintf("policy %s/%s error: %v", policy.GetName(), rule, err),
|
||||||
|
Action: None,
|
||||||
})
|
})
|
||||||
|
|
||||||
return events
|
return events
|
||||||
|
@ -156,25 +192,32 @@ func NewBackgroundSuccessEvent(source Source, policy kyvernov1.PolicyInterface,
|
||||||
var events []Info
|
var events []Info
|
||||||
msg := "resource generated"
|
msg := "resource generated"
|
||||||
action := ResourceGenerated
|
action := ResourceGenerated
|
||||||
|
|
||||||
if source == MutateExistingController {
|
if source == MutateExistingController {
|
||||||
msg = "resource mutated"
|
msg = "resource mutated"
|
||||||
action = ResourceMutated
|
action = ResourceMutated
|
||||||
}
|
}
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v1",
|
||||||
|
Kind: policy.GetKind(),
|
||||||
|
Name: policy.GetName(),
|
||||||
|
Namespace: policy.GetNamespace(),
|
||||||
|
UID: policy.GetUID(),
|
||||||
|
}
|
||||||
for _, res := range resources {
|
for _, res := range resources {
|
||||||
events = append(events, Info{
|
events = append(events, Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: policy.GetNamespace(),
|
Related: &corev1.ObjectReference{
|
||||||
Name: policy.GetName(),
|
APIVersion: res.APIVersion,
|
||||||
RelatedAPIVersion: res.GetAPIVersion(),
|
Kind: res.Kind,
|
||||||
RelatedKind: res.GetKind(),
|
Name: res.Name,
|
||||||
RelatedNamespace: res.GetNamespace(),
|
Namespace: res.Namespace,
|
||||||
RelatedName: res.GetName(),
|
UID: res.UID,
|
||||||
Source: source,
|
},
|
||||||
Reason: PolicyApplied,
|
Source: source,
|
||||||
Message: msg,
|
Reason: PolicyApplied,
|
||||||
Action: action,
|
Message: msg,
|
||||||
|
Action: action,
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -192,104 +235,138 @@ func NewPolicyExceptionEvents(engineResponse engineapi.EngineResponse, ruleResp
|
||||||
} else {
|
} else {
|
||||||
exceptionMessage = fmt.Sprintf("resource %s was skipped from policy rule %s/%s/%s", resourceKey(engineResponse.PatchedResource), pol.GetNamespace(), pol.GetName(), ruleResp.Name())
|
exceptionMessage = fmt.Sprintf("resource %s was skipped from policy rule %s/%s/%s", resourceKey(engineResponse.PatchedResource), pol.GetNamespace(), pol.GetName(), ruleResp.Name())
|
||||||
}
|
}
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v1",
|
||||||
|
Kind: pol.GetKind(),
|
||||||
|
Name: pol.GetName(),
|
||||||
|
Namespace: pol.GetNamespace(),
|
||||||
|
UID: pol.GetUID(),
|
||||||
|
}
|
||||||
|
related := engineResponse.GetResourceSpec()
|
||||||
policyEvent := Info{
|
policyEvent := Info{
|
||||||
Kind: pol.GetKind(),
|
Regarding: regarding,
|
||||||
Name: pol.GetName(),
|
Related: &corev1.ObjectReference{
|
||||||
Namespace: pol.GetNamespace(),
|
APIVersion: related.APIVersion,
|
||||||
RelatedAPIVersion: engineResponse.PatchedResource.GetAPIVersion(),
|
Kind: related.Kind,
|
||||||
RelatedKind: engineResponse.PatchedResource.GetKind(),
|
Name: related.Name,
|
||||||
RelatedName: engineResponse.PatchedResource.GetName(),
|
Namespace: related.Namespace,
|
||||||
RelatedNamespace: engineResponse.PatchedResource.GetNamespace(),
|
UID: types.UID(related.UID),
|
||||||
Reason: PolicySkipped,
|
},
|
||||||
Message: policyMessage,
|
Reason: PolicySkipped,
|
||||||
Source: source,
|
Message: policyMessage,
|
||||||
Action: ResourcePassed,
|
Source: source,
|
||||||
|
Action: ResourcePassed,
|
||||||
}
|
}
|
||||||
exceptionEvent := Info{
|
exceptionEvent := Info{
|
||||||
Kind: "PolicyException",
|
Regarding: corev1.ObjectReference{
|
||||||
Name: exceptionName,
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
Namespace: exceptionNamespace,
|
APIVersion: "kyverno.io/v2",
|
||||||
RelatedAPIVersion: engineResponse.PatchedResource.GetAPIVersion(),
|
Kind: "PolicyException",
|
||||||
RelatedKind: engineResponse.PatchedResource.GetKind(),
|
Name: exceptionName,
|
||||||
RelatedName: engineResponse.PatchedResource.GetName(),
|
Namespace: exceptionNamespace,
|
||||||
RelatedNamespace: engineResponse.PatchedResource.GetNamespace(),
|
UID: exception.GetUID(),
|
||||||
Reason: PolicySkipped,
|
},
|
||||||
Message: exceptionMessage,
|
Related: &corev1.ObjectReference{
|
||||||
Source: source,
|
APIVersion: related.APIVersion,
|
||||||
Action: ResourcePassed,
|
Kind: related.Kind,
|
||||||
|
Name: related.Name,
|
||||||
|
Namespace: related.Namespace,
|
||||||
|
UID: types.UID(related.UID),
|
||||||
|
},
|
||||||
|
Reason: PolicySkipped,
|
||||||
|
Message: exceptionMessage,
|
||||||
|
Source: source,
|
||||||
|
Action: ResourcePassed,
|
||||||
}
|
}
|
||||||
return []Info{policyEvent, exceptionEvent}
|
return []Info{policyEvent, exceptionEvent}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCleanupPolicyEvent(policy kyvernov2alpha1.CleanupPolicyInterface, resource unstructured.Unstructured, err error) Info {
|
func NewCleanupPolicyEvent(policy kyvernov2alpha1.CleanupPolicyInterface, resource unstructured.Unstructured, err error) Info {
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v2beta1",
|
||||||
|
Kind: policy.GetKind(),
|
||||||
|
Name: policy.GetName(),
|
||||||
|
Namespace: policy.GetNamespace(),
|
||||||
|
UID: policy.GetUID(),
|
||||||
|
}
|
||||||
|
related := &corev1.ObjectReference{
|
||||||
|
APIVersion: resource.GetAPIVersion(),
|
||||||
|
Kind: resource.GetKind(),
|
||||||
|
Namespace: resource.GetNamespace(),
|
||||||
|
Name: resource.GetName(),
|
||||||
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return Info{
|
return Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: policy.GetNamespace(),
|
Related: related,
|
||||||
Name: policy.GetName(),
|
Source: CleanupController,
|
||||||
RelatedAPIVersion: resource.GetAPIVersion(),
|
Action: ResourceCleanedUp,
|
||||||
RelatedKind: resource.GetKind(),
|
Reason: PolicyApplied,
|
||||||
RelatedNamespace: resource.GetNamespace(),
|
Message: fmt.Sprintf("successfully cleaned up the target resource %v/%v/%v", resource.GetKind(), resource.GetNamespace(), resource.GetName()),
|
||||||
RelatedName: resource.GetName(),
|
|
||||||
Source: CleanupController,
|
|
||||||
Action: ResourceCleanedUp,
|
|
||||||
Reason: PolicyApplied,
|
|
||||||
Message: fmt.Sprintf("successfully cleaned up the target resource %v/%v/%v", resource.GetKind(), resource.GetNamespace(), resource.GetName()),
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return Info{
|
return Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: policy.GetNamespace(),
|
Related: related,
|
||||||
Name: policy.GetName(),
|
Source: CleanupController,
|
||||||
RelatedAPIVersion: resource.GetAPIVersion(),
|
Action: None,
|
||||||
RelatedKind: resource.GetKind(),
|
Reason: PolicyError,
|
||||||
RelatedNamespace: resource.GetNamespace(),
|
Message: fmt.Sprintf("failed to clean up the target resource %v/%v/%v: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err.Error()),
|
||||||
RelatedName: resource.GetName(),
|
|
||||||
Source: CleanupController,
|
|
||||||
Action: None,
|
|
||||||
Reason: PolicyError,
|
|
||||||
Message: fmt.Sprintf("failed to clean up the target resource %v/%v/%v: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err.Error()),
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewValidatingAdmissionPolicyEvent(policy kyvernov1.PolicyInterface, vapName, vapBindingName string) []Info {
|
func NewValidatingAdmissionPolicyEvent(policy kyvernov1.PolicyInterface, vapName, vapBindingName string) []Info {
|
||||||
|
regarding := corev1.ObjectReference{
|
||||||
|
// TODO: iirc it's not safe to assume api version is set
|
||||||
|
APIVersion: "kyverno.io/v1",
|
||||||
|
Kind: policy.GetKind(),
|
||||||
|
Name: policy.GetName(),
|
||||||
|
Namespace: policy.GetNamespace(),
|
||||||
|
UID: policy.GetUID(),
|
||||||
|
}
|
||||||
vapEvent := Info{
|
vapEvent := Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: policy.GetNamespace(),
|
Related: &corev1.ObjectReference{
|
||||||
Name: policy.GetName(),
|
APIVersion: "admissionregistration.k8s.io/v1alpha1",
|
||||||
RelatedAPIVersion: "admissionregistration.k8s.io/v1alpha1",
|
Kind: "ValidatingAdmissionPolicy",
|
||||||
RelatedKind: "ValidatingAdmissionPolicy",
|
Name: vapName,
|
||||||
RelatedName: vapName,
|
},
|
||||||
Source: GeneratePolicyController,
|
Source: GeneratePolicyController,
|
||||||
Action: ResourceGenerated,
|
Action: ResourceGenerated,
|
||||||
Reason: PolicyApplied,
|
Reason: PolicyApplied,
|
||||||
Message: fmt.Sprintf("successfully generated validating admission policy %s from policy %s", vapName, policy.GetName()),
|
Message: fmt.Sprintf("successfully generated validating admission policy %s from policy %s", vapName, policy.GetName()),
|
||||||
}
|
}
|
||||||
vapBindingEvent := Info{
|
vapBindingEvent := Info{
|
||||||
Kind: policy.GetKind(),
|
Regarding: regarding,
|
||||||
Namespace: policy.GetNamespace(),
|
Related: &corev1.ObjectReference{
|
||||||
Name: policy.GetName(),
|
APIVersion: "admissionregistration.k8s.io/v1alpha1",
|
||||||
RelatedAPIVersion: "admissionregistration.k8s.io/v1alpha1",
|
Kind: "ValidatingAdmissionPolicyBinding",
|
||||||
RelatedKind: "ValidatingAdmissionPolicyBinding",
|
Name: vapBindingName,
|
||||||
RelatedName: vapBindingName,
|
},
|
||||||
Source: GeneratePolicyController,
|
Source: GeneratePolicyController,
|
||||||
Action: ResourceGenerated,
|
Action: ResourceGenerated,
|
||||||
Reason: PolicyApplied,
|
Reason: PolicyApplied,
|
||||||
Message: fmt.Sprintf("successfully generated validating admission policy binding %s from policy %s", vapBindingName, policy.GetName()),
|
Message: fmt.Sprintf("successfully generated validating admission policy binding %s from policy %s", vapBindingName, policy.GetName()),
|
||||||
}
|
}
|
||||||
return []Info{vapEvent, vapBindingEvent}
|
return []Info{vapEvent, vapBindingEvent}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFailedEvent(err error, policy, rule string, source Source, resource kyvernov1.ResourceSpec) Info {
|
func NewFailedEvent(err error, policy, rule string, source Source, resource kyvernov1.ResourceSpec) Info {
|
||||||
return Info{
|
return Info{
|
||||||
Kind: resource.GetKind(),
|
Regarding: corev1.ObjectReference{
|
||||||
Namespace: resource.GetNamespace(),
|
APIVersion: resource.APIVersion,
|
||||||
Name: resource.GetName(),
|
Kind: resource.Kind,
|
||||||
Source: source,
|
Name: resource.Name,
|
||||||
Reason: PolicyError,
|
Namespace: resource.Namespace,
|
||||||
Message: fmt.Sprintf("policy %s/%s error: %v", policy, rule, err),
|
UID: resource.UID,
|
||||||
Action: None,
|
},
|
||||||
|
Source: source,
|
||||||
|
Reason: PolicyError,
|
||||||
|
Message: fmt.Sprintf("policy %s/%s error: %v", policy, rule, err),
|
||||||
|
Action: None,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1,25 +1,24 @@
|
||||||
package event
|
package event
|
||||||
|
|
||||||
import "strings"
|
import (
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
|
)
|
||||||
|
|
||||||
// Info defines the event details
|
// Info defines the event details
|
||||||
type Info struct {
|
type Info struct {
|
||||||
Kind string
|
Regarding corev1.ObjectReference
|
||||||
Name string
|
Related *corev1.ObjectReference
|
||||||
Namespace string
|
Reason Reason
|
||||||
RelatedAPIVersion string
|
Message string
|
||||||
RelatedKind string
|
Action Action
|
||||||
RelatedName string
|
Source Source
|
||||||
RelatedNamespace string
|
|
||||||
Reason Reason
|
|
||||||
Message string
|
|
||||||
Action Action
|
|
||||||
Source Source
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *Info) Resource() string {
|
func (i *Info) Resource() string {
|
||||||
if i.Namespace == "" {
|
if i.Regarding.Namespace == "" {
|
||||||
return strings.Join([]string{i.Kind, i.Name}, "/")
|
return strings.Join([]string{i.Regarding.Kind, i.Regarding.Name}, "/")
|
||||||
}
|
}
|
||||||
return strings.Join([]string{i.Kind, i.Namespace, i.Name}, "/")
|
return strings.Join([]string{i.Regarding.Kind, i.Regarding.Namespace, i.Regarding.Name}, "/")
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,21 +0,0 @@
|
||||||
package event
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/kyverno/kyverno/pkg/client/clientset/versioned/scheme"
|
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
||||||
typedeventsv1 "k8s.io/client-go/kubernetes/typed/events/v1"
|
|
||||||
"k8s.io/client-go/tools/events"
|
|
||||||
)
|
|
||||||
|
|
||||||
func NewRecorder(source Source, sink typedeventsv1.EventsV1Interface) events.EventRecorder {
|
|
||||||
utilruntime.Must(scheme.AddToScheme(scheme.Scheme))
|
|
||||||
eventBroadcaster := events.NewBroadcaster(
|
|
||||||
&events.EventSinkImpl{
|
|
||||||
Interface: sink,
|
|
||||||
},
|
|
||||||
)
|
|
||||||
eventBroadcaster.StartStructuredLogging(0)
|
|
||||||
stopCh := make(chan struct{})
|
|
||||||
eventBroadcaster.StartRecordingToSink(stopCh)
|
|
||||||
return eventBroadcaster.NewRecorder(scheme.Scheme, string(source))
|
|
||||||
}
|
|
|
@ -28,22 +28,3 @@ func ObjToUnstructured(obj interface{}) (*unstructured.Unstructured, error) {
|
||||||
}
|
}
|
||||||
return &unstructured.Unstructured{Object: unstrObj}, nil
|
return &unstructured.Unstructured{Object: unstrObj}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
|
|
||||||
return &unstructured.Unstructured{
|
|
||||||
Object: map[string]interface{}{
|
|
||||||
"apiVersion": apiVersion,
|
|
||||||
"kind": kind,
|
|
||||||
"metadata": map[string]interface{}{
|
|
||||||
"namespace": namespace,
|
|
||||||
"name": name,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUnstructuredWithSpec(apiVersion, kind, namespace, name string, spec map[string]interface{}) *unstructured.Unstructured {
|
|
||||||
u := NewUnstructured(apiVersion, kind, namespace, name)
|
|
||||||
u.Object["spec"] = spec
|
|
||||||
return u
|
|
||||||
}
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue