mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-06 16:06:56 +00:00
* Fix Dev setup * webhook monitor - start webhook monitor in main process Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add leaderelection Signed-off-by: Jim Bugwadia <jim@nirmata.com> * - add isLeader; - update to use configmap lock Signed-off-by: Shuting Zhao <shutting06@gmail.com> * - add initialization method - add methods to get attributes Signed-off-by: Shuting Zhao <shutting06@gmail.com> * address comments Signed-off-by: Shuting Zhao <shutting06@gmail.com> * remove newContext in runLeaderElection Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add leader election to GenerateController Signed-off-by: Jim Bugwadia <jim@nirmata.com> * skip processing for non-leaders Signed-off-by: Jim Bugwadia <jim@nirmata.com> * skip processing for non-leaders Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add leader election to generate cleanup controller Signed-off-by: Jim Bugwadia <jim@nirmata.com> * Gracefully drain request * HA - Webhook Register / Webhook Monitor / Certificate Renewer (#1920) * enable leader election for webhook register Signed-off-by: Shuting Zhao <shutting06@gmail.com> * extract certManager to its own process Signed-off-by: Shuting Zhao <shutting06@gmail.com> * leader election for cert manager Signed-off-by: Shuting Zhao <shutting06@gmail.com> * certManager - init certs by the leader Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add leader election to webhook monitor Signed-off-by: Shuting Zhao <shutting06@gmail.com> * update log message Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add leader election to policy controller Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add leader election to policy report controller Signed-off-by: Shuting Zhao <shutting06@gmail.com> * rebuild leader election config Signed-off-by: Shuting Zhao <shutting06@gmail.com> * start informers in leaderelection Signed-off-by: Shuting Zhao <shutting06@gmail.com> * start policy informers in main Signed-off-by: Shuting Zhao <shutting06@gmail.com> * enable leader election in main Signed-off-by: Shuting Zhao <shutting06@gmail.com> * move eventHandler to the leader election start method Signed-off-by: Shuting Zhao <shutting06@gmail.com> * address reviewdog comments Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add clusterrole leaderelection Signed-off-by: Shuting Zhao <shutting06@gmail.com> * fixed generate flow (#1936) Signed-off-by: NoSkillGirl <singhpooja240393@gmail.com> * - init separate kubeclient for leaderelection - fix webhook monitor Signed-off-by: Shuting Zhao <shutting06@gmail.com> * address reviewdog comments Signed-off-by: Shuting Zhao <shutting06@gmail.com> * cleanup Kyverno managed resources on stopLeading Signed-off-by: Shuting Zhao <shutting06@gmail.com> * tag v1.4.0-beta1 Signed-off-by: Shuting Zhao <shutting06@gmail.com> * fix cleanup process on Kyverno stops Signed-off-by: Shuting Zhao <shutting06@gmail.com> * bump kind to 0.11.0, k8s v1.21 (#1980) Co-authored-by: vyankatesh <vyankatesh@neualto.com> Co-authored-by: vyankatesh <vyankateshkd@gmail.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com> Co-authored-by: Pooja Singh <36136335+NoSkillGirl@users.noreply.github.com>
193 lines
5.8 KiB
Go
193 lines
5.8 KiB
Go
package generate
|
|
|
|
import (
|
|
"context"
|
|
"time"
|
|
|
|
backoff "github.com/cenkalti/backoff"
|
|
"github.com/gardener/controller-manager-library/pkg/logger"
|
|
"github.com/go-logr/logr"
|
|
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
|
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
|
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
|
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
|
|
"github.com/kyverno/kyverno/pkg/config"
|
|
"k8s.io/api/admission/v1beta1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
"k8s.io/apimachinery/pkg/labels"
|
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
"k8s.io/client-go/tools/cache"
|
|
)
|
|
|
|
// GenerateRequests provides interface to manage generate requests
|
|
type GenerateRequests interface {
|
|
Apply(gr kyverno.GenerateRequestSpec, action v1beta1.Operation) error
|
|
}
|
|
|
|
// GeneratorChannel ...
|
|
type GeneratorChannel struct {
|
|
spec kyverno.GenerateRequestSpec
|
|
action v1beta1.Operation
|
|
}
|
|
|
|
// Generator defines the implementation to mange generate request resource
|
|
type Generator struct {
|
|
// channel to receive request
|
|
client *kyvernoclient.Clientset
|
|
stopCh <-chan struct{}
|
|
log logr.Logger
|
|
// grLister can list/get generate request from the shared informer's store
|
|
grLister kyvernolister.GenerateRequestNamespaceLister
|
|
grSynced cache.InformerSynced
|
|
}
|
|
|
|
// NewGenerator returns a new instance of Generate-Request resource generator
|
|
func NewGenerator(client *kyvernoclient.Clientset, grInformer kyvernoinformer.GenerateRequestInformer, stopCh <-chan struct{}, log logr.Logger) *Generator {
|
|
gen := &Generator{
|
|
client: client,
|
|
stopCh: stopCh,
|
|
log: log,
|
|
grLister: grInformer.Lister().GenerateRequests(config.KyvernoNamespace),
|
|
grSynced: grInformer.Informer().HasSynced,
|
|
}
|
|
return gen
|
|
}
|
|
|
|
// Apply creates generate request resource (blocking call if channel is full)
|
|
func (g *Generator) Apply(gr kyverno.GenerateRequestSpec, action v1beta1.Operation) error {
|
|
logger := g.log
|
|
logger.V(4).Info("creating Generate Request", "request", gr)
|
|
|
|
// Update to channel
|
|
message := GeneratorChannel{
|
|
action: action,
|
|
spec: gr,
|
|
}
|
|
go g.processApply(message)
|
|
return nil
|
|
}
|
|
|
|
// Run starts the generate request spec
|
|
func (g *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|
logger := g.log
|
|
defer utilruntime.HandleCrash()
|
|
|
|
logger.V(4).Info("starting")
|
|
defer func() {
|
|
logger.V(4).Info("shutting down")
|
|
}()
|
|
|
|
if !cache.WaitForCacheSync(stopCh, g.grSynced) {
|
|
logger.Info("failed to sync informer cache")
|
|
return
|
|
}
|
|
|
|
<-g.stopCh
|
|
}
|
|
|
|
func (g *Generator) processApply(m GeneratorChannel) {
|
|
if err := g.generate(m.spec, m.action); err != nil {
|
|
logger.Error(err, "failed to generate request CR")
|
|
}
|
|
}
|
|
|
|
func (g *Generator) generate(grSpec kyverno.GenerateRequestSpec, action v1beta1.Operation) error {
|
|
// create/update a generate request
|
|
|
|
if err := retryApplyResource(g.client, grSpec, g.log, action, g.grLister); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// -> receiving channel to take requests to create request
|
|
// use worker pattern to read and create the CR resource
|
|
|
|
func retryApplyResource(client *kyvernoclient.Clientset, grSpec kyverno.GenerateRequestSpec,
|
|
log logr.Logger, action v1beta1.Operation, grLister kyvernolister.GenerateRequestNamespaceLister) error {
|
|
|
|
var i int
|
|
var err error
|
|
|
|
applyResource := func() error {
|
|
gr := kyverno.GenerateRequest{
|
|
Spec: grSpec,
|
|
}
|
|
|
|
gr.SetNamespace(config.KyvernoNamespace)
|
|
// Initial state "Pending"
|
|
// TODO: status is not updated
|
|
// gr.Status.State = kyverno.Pending
|
|
// generate requests created in kyverno namespace
|
|
isExist := false
|
|
if action == v1beta1.Create || action == v1beta1.Update {
|
|
log.V(4).Info("querying all generate requests")
|
|
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
|
"generate.kyverno.io/policy-name": grSpec.Policy,
|
|
"generate.kyverno.io/resource-name": grSpec.Resource.Name,
|
|
"generate.kyverno.io/resource-kind": grSpec.Resource.Kind,
|
|
"generate.kyverno.io/resource-namespace": grSpec.Resource.Namespace,
|
|
}))
|
|
grList, err := grLister.List(selector)
|
|
if err != nil {
|
|
logger.Error(err, "failed to get generate request for the resource", "kind", grSpec.Resource.Kind, "name", grSpec.Resource.Name, "namespace", grSpec.Resource.Namespace)
|
|
return err
|
|
}
|
|
|
|
for _, v := range grList {
|
|
|
|
grLabels := gr.Labels
|
|
if len(grLabels) == 0 {
|
|
grLabels = make(map[string]string)
|
|
}
|
|
grLabels["resources-update"] = "true"
|
|
gr.SetLabels(grLabels)
|
|
v.Spec.Context = gr.Spec.Context
|
|
v.Spec.Policy = gr.Spec.Policy
|
|
v.Spec.Resource = gr.Spec.Resource
|
|
|
|
_, err = client.KyvernoV1().GenerateRequests(config.KyvernoNamespace).Update(context.TODO(), v, metav1.UpdateOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
isExist = true
|
|
}
|
|
|
|
if !isExist {
|
|
gr.SetGenerateName("gr-")
|
|
gr.SetLabels(map[string]string{
|
|
"generate.kyverno.io/policy-name": grSpec.Policy,
|
|
"generate.kyverno.io/resource-name": grSpec.Resource.Name,
|
|
"generate.kyverno.io/resource-kind": grSpec.Resource.Kind,
|
|
"generate.kyverno.io/resource-namespace": grSpec.Resource.Namespace,
|
|
})
|
|
_, err = client.KyvernoV1().GenerateRequests(config.KyvernoNamespace).Create(context.TODO(), &gr, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
log.V(4).Info("retrying update generate request CR", "retryCount", i, "name", gr.GetGenerateName(), "namespace", gr.GetNamespace())
|
|
i++
|
|
return err
|
|
}
|
|
|
|
exbackoff := &backoff.ExponentialBackOff{
|
|
InitialInterval: 500 * time.Millisecond,
|
|
RandomizationFactor: 0.5,
|
|
Multiplier: 1.5,
|
|
MaxInterval: time.Second,
|
|
MaxElapsedTime: 3 * time.Second,
|
|
Clock: backoff.SystemClock,
|
|
}
|
|
|
|
exbackoff.Reset()
|
|
err = backoff.Retry(applyResource, exbackoff)
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|