1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

merge main

Signed-off-by: Jim Bugwadia <jim@nirmata.com>
This commit is contained in:
Jim Bugwadia 2021-03-25 18:00:02 -07:00
commit 8d03f8c59e
9 changed files with 244 additions and 72 deletions

View file

@ -41,35 +41,36 @@ const resyncPeriod = 15 * time.Minute
var (
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
filterK8sResources string
kubeconfig string
serverIP string
runValidationInMutatingWebhook string
excludeGroupRole string
excludeUsername string
profilePort string
filterK8sResources string
kubeconfig string
serverIP string
excludeGroupRole string
excludeUsername string
profilePort string
webhookTimeout int
genWorkers int
profile bool
policyReport bool
setupLog = log.Log.WithName("setup")
policyControllerResyncPeriod time.Duration
setupLog = log.Log.WithName("setup")
)
func main() {
klog.InitFlags(nil)
log.SetLogger(klogr.New())
flag.StringVar(&filterK8sResources, "filterK8sResources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.StringVar(&filterK8sResources, "filterK8sResources", "", "Resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. For example, --filterK8sResources \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.StringVar(&excludeGroupRole, "excludeGroupRole", "", "")
flag.StringVar(&excludeUsername, "excludeUsername", "", "")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.IntVar(&genWorkers, "gen-workers", 20, "workers for generate controller")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "Timeout for webhook configurations")
flag.IntVar(&genWorkers, "gen-workers", 10, "Workers for generate controller")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&runValidationInMutatingWebhook, "runValidationInMutatingWebhook", "", "Validation will also be done using the mutation webhook, set to 'true' to enable. Older kubernetes versions do not work properly when a validation webhook is registered.")
flag.BoolVar(&profile, "profile", false, "Set this flag to 'true', to enable profiling.")
flag.StringVar(&profilePort, "profile-port", "6060", "Enable profiling at given port, default to 6060.")
flag.DurationVar(&policyControllerResyncPeriod, "background-scan", time.Hour, "Perform background scan every given interval, e.g., 30s, 15m, 1h.")
if err := flag.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level")
os.Exit(1)
@ -155,20 +156,7 @@ func main() {
// - ClusterPolicyReport, PolicyReport
// - GenerateRequest
// - ClusterReportChangeRequest, ReportChangeRequest
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, resyncPeriod)
// Configuration Data
// dynamically load the configuration from configMap
// - resource filters
// if the configMap is update, the configuration will be updated :D
configData := config.NewConfigData(
kubeClient,
kubeInformer.Core().V1().ConfigMaps(),
filterK8sResources,
excludeGroupRole,
excludeUsername,
log.Log.WithName("ConfigData"),
)
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, policyControllerResyncPeriod)
// EVENT GENERATOR
// - generate event with retry mechanism
@ -185,10 +173,7 @@ func main() {
pInformer.Kyverno().V1().Policies().Lister())
// POLICY Report GENERATOR
// -- generate policy report
var reportReqGen *policyreport.Generator
var prgen *policyreport.ReportGenerator
reportReqGen = policyreport.NewReportChangeRequestGenerator(pclient,
reportReqGen := policyreport.NewReportChangeRequestGenerator(pclient,
client,
pInformer.Kyverno().V1alpha1().ReportChangeRequests(),
pInformer.Kyverno().V1alpha1().ClusterReportChangeRequests(),
@ -198,7 +183,8 @@ func main() {
log.Log.WithName("ReportChangeRequestGenerator"),
)
prgen = policyreport.NewReportGenerator(client,
prgen := policyreport.NewReportGenerator(pclient,
client,
pInformer.Wgpolicyk8s().V1alpha1().ClusterPolicyReports(),
pInformer.Wgpolicyk8s().V1alpha1().PolicyReports(),
pInformer.Kyverno().V1alpha1().ReportChangeRequests(),
@ -207,6 +193,20 @@ func main() {
log.Log.WithName("PolicyReportGenerator"),
)
// Configuration Data
// dynamically load the configuration from configMap
// - resource filters
// if the configMap is update, the configuration will be updated :D
configData := config.NewConfigData(
kubeClient,
kubeInformer.Core().V1().ConfigMaps(),
filterK8sResources,
excludeGroupRole,
excludeUsername,
prgen.ReconcileCh,
log.Log.WithName("ConfigData"),
)
// POLICY CONTROLLER
// - reconciliation policy and policy violation
// - process policy on existing resources
@ -219,9 +219,11 @@ func main() {
configData,
eventGenerator,
reportReqGen,
prgen,
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyController"),
rCache,
policyControllerResyncPeriod,
)
if err != nil {
@ -358,7 +360,7 @@ func main() {
go reportReqGen.Run(2, stopCh)
go prgen.Run(1, stopCh)
go configData.Run(stopCh)
go policyCtrl.Run(2, stopCh)
go policyCtrl.Run(2, prgen.ReconcileCh, stopCh)
go eventGenerator.Run(3, stopCh)
go grgen.Run(10, stopCh)
go grc.Run(genWorkers, stopCh)

View file

@ -31,6 +31,7 @@ type ConfigData struct {
excludeUsername []string
restrictDevelopmentUsername []string
cmSycned cache.InformerSynced
reconcilePolicyReport chan<- bool
log logr.Logger
}
@ -96,17 +97,18 @@ type Interface interface {
}
// NewConfigData ...
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8sResources, excludeGroupRole, excludeUsername string, log logr.Logger) *ConfigData {
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8sResources, excludeGroupRole, excludeUsername string, reconcilePolicyReport chan<- bool, log logr.Logger) *ConfigData {
// environment var is read at start only
if cmNameEnv == "" {
log.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
}
cd := ConfigData{
client: rclient,
cmName: os.Getenv(cmNameEnv),
cmSycned: cmInformer.Informer().HasSynced,
log: log,
client: rclient,
cmName: os.Getenv(cmNameEnv),
cmSycned: cmInformer.Informer().HasSynced,
reconcilePolicyReport: reconcilePolicyReport,
log: log,
}
cd.restrictDevelopmentUsername = []string{"minikube-user", "kubernetes-admin"}
@ -163,7 +165,11 @@ func (cd *ConfigData) updateCM(old, cur interface{}) {
return
}
// if data has not changed then dont load configmap
cd.load(*cm)
changed := cd.load(*cm)
if changed {
cd.log.Info("resource filters changed, sending reconcile signal to the policy controller")
cd.reconcilePolicyReport <- true
}
}
func (cd *ConfigData) deleteCM(obj interface{}) {
@ -189,16 +195,16 @@ func (cd *ConfigData) deleteCM(obj interface{}) {
cd.unload(*cm)
}
func (cd *ConfigData) load(cm v1.ConfigMap) {
func (cd *ConfigData) load(cm v1.ConfigMap) (changed bool) {
logger := cd.log.WithValues("name", cm.Name, "namespace", cm.Namespace)
if cm.Data == nil {
logger.V(4).Info("configuration: No data defined in ConfigMap")
return
}
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
// get resource filters
filters, ok := cm.Data["resourceFilters"]
if !ok {
logger.V(4).Info("configuration: No resourceFilters defined in ConfigMap")
@ -208,12 +214,11 @@ func (cd *ConfigData) load(cm v1.ConfigMap) {
logger.V(4).Info("resourceFilters did not change")
} else {
logger.V(2).Info("Updated resource filters", "oldFilters", cd.filters, "newFilters", newFilters)
// update filters
cd.filters = newFilters
changed = true
}
}
// get resource filters
excludeGroupRole, ok := cm.Data["excludeGroupRole"]
if !ok {
logger.V(4).Info("configuration: No excludeGroupRole defined in ConfigMap")
@ -224,11 +229,10 @@ func (cd *ConfigData) load(cm v1.ConfigMap) {
logger.V(4).Info("excludeGroupRole did not change")
} else {
logger.V(2).Info("Updated resource excludeGroupRoles", "oldExcludeGroupRole", cd.excludeGroupRole, "newExcludeGroupRole", newExcludeGroupRoles)
// update filters
cd.excludeGroupRole = newExcludeGroupRoles
changed = true
}
// get resource filters
excludeUsername, ok := cm.Data["excludeUsername"]
if !ok {
logger.V(4).Info("configuration: No excludeUsername defined in ConfigMap")
@ -238,11 +242,12 @@ func (cd *ConfigData) load(cm v1.ConfigMap) {
logger.V(4).Info("excludeGroupRole did not change")
} else {
logger.V(2).Info("Updated resource excludeUsernames", "oldExcludeUsername", cd.excludeUsername, "newExcludeUsername", excludeUsernames)
// update filters
cd.excludeUsername = excludeUsernames
changed = true
}
}
return changed
}
//TODO: this has been added to backward support command line arguments

View file

@ -31,7 +31,8 @@ func IsReference(value string) bool {
return len(groups) != 0
}
//ReplaceAllVars replaces all variables with the value defined in the replacement function
// ReplaceAllVars replaces all variables with the value defined in the replacement function
// This is used to avoid validation errors
func ReplaceAllVars(src string, repl func(string) string) string {
return regexVariables.ReplaceAllStringFunc(src, repl)
}
@ -293,8 +294,6 @@ func getValueFromReference(fullDocument interface{}, path string) (interface{},
return element, nil
}
func SubstituteAllInRule(log logr.Logger, ctx context.EvalInterface, typedRule kyverno.Rule) (_ kyverno.Rule, err error) {
var rule interface{}

View file

@ -1,12 +1,20 @@
package policy
import (
"context"
"fmt"
"strings"
"time"
"github.com/go-logr/logr"
v1alpha1 "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
policyreportlister "github.com/kyverno/kyverno/pkg/client/listers/policyreport/v1alpha1"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/policyreport"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
func (pc *PolicyController) report(policy string, engineResponses []*response.EngineResponse, logger logr.Logger) {
@ -22,6 +30,107 @@ func (pc *PolicyController) report(policy string, engineResponses []*response.En
logger.V(4).Info("added a request to RCR generator", "key", info.ToKey())
}
// forceReconciliation forces a background scan by adding all policies to the workqueue
func (pc *PolicyController) forceReconciliation(reconcileCh <-chan bool, stopCh <-chan struct{}) {
logger := pc.log.WithName("forceReconciliation")
ticker := time.NewTicker(pc.reconcilePeriod)
for {
select {
case <-ticker.C:
logger.Info("performing the background scan", "scan interval", pc.reconcilePeriod.String())
if err := pc.policyReportEraser.EraseResultsEntries(eraseResultsEntries); err != nil {
logger.Error(err, "continue reconciling policy reports")
}
pc.requeuePolicies()
case erase := <-reconcileCh:
logger.Info("received the reconcile signal, reconciling policy report")
if erase {
if err := pc.policyReportEraser.EraseResultsEntries(eraseResultsEntries); err != nil {
logger.Error(err, "continue reconciling policy reports")
}
}
pc.requeuePolicies()
case <-stopCh:
return
}
}
}
func eraseResultsEntries(pclient *kyvernoclient.Clientset, reportLister policyreportlister.PolicyReportLister, clusterReportLister policyreportlister.ClusterPolicyReportLister) error {
var errors []string
if polrs, err := reportLister.List(labels.Everything()); err != nil {
errors = append(errors, err.Error())
} else {
for _, polr := range polrs {
polr.Results = []*v1alpha1.PolicyReportResult{}
polr.Summary = v1alpha1.PolicyReportSummary{}
if _, err = pclient.Wgpolicyk8sV1alpha1().PolicyReports(polr.GetNamespace()).Update(context.TODO(), polr, metav1.UpdateOptions{}); err != nil {
errors = append(errors, fmt.Sprintf("%s/%s/%s: %v", polr.Kind, polr.Namespace, polr.Name, err))
}
}
}
if cpolrs, err := clusterReportLister.List(labels.Everything()); err != nil {
errors = append(errors, err.Error())
} else {
for _, cpolr := range cpolrs {
cpolr.Results = []*v1alpha1.PolicyReportResult{}
cpolr.Summary = v1alpha1.PolicyReportSummary{}
if _, err = pclient.Wgpolicyk8sV1alpha1().ClusterPolicyReports().Update(context.TODO(), cpolr, metav1.UpdateOptions{}); err != nil {
errors = append(errors, fmt.Sprintf("%s/%s: %v", cpolr.Kind, cpolr.Name, err))
}
}
}
if len(errors) == 0 {
return nil
}
return fmt.Errorf("failed to erase results entries %v", strings.Join(errors, ";"))
}
func (pc *PolicyController) requeuePolicies() {
logger := pc.log.WithName("requeuePolicies")
if cpols, err := pc.pLister.List(labels.Everything()); err == nil {
for _, cpol := range cpols {
if !pc.canBackgroundProcess(cpol) {
continue
}
pc.enqueuePolicy(cpol)
}
} else {
logger.Error(err, "unable to list ClusterPolicies")
}
namespaces, err := pc.nsLister.List(labels.Everything())
if err != nil {
logger.Error(err, "unable to list namespaces")
return
}
for _, ns := range namespaces {
pols, err := pc.npLister.Policies(ns.GetName()).List(labels.Everything())
if err != nil {
logger.Error(err, "unable to list Policies", "namespace", ns.GetName())
continue
}
for _, p := range pols {
pol := ConvertPolicyToClusterPolicy(p)
if !pc.canBackgroundProcess(pol) {
continue
}
pc.enqueuePolicy(pol)
}
}
}
func generateEvents(log logr.Logger, ers []*response.EngineResponse) []event.Info {
var eventInfos []event.Info
for _, er := range ers {

View file

@ -9,6 +9,7 @@ import (
"github.com/jmespath/go-jmespath"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/variables"
"github.com/kyverno/kyverno/pkg/kyverno/common"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
@ -681,7 +682,10 @@ func validateAPICall(entry kyverno.ContextEntry) error {
return fmt.Errorf("both configMap and apiCall are not allowed in a context entry")
}
if _, err := engine.NewAPIPath(entry.APICall.URLPath); err != nil {
// Replace all variables to prevent validation failing on variable keys.
urlPath := variables.ReplaceAllVars(entry.APICall.URLPath, func(s string) string { return "kyvernoapicallvariable" })
if _, err := engine.NewAPIPath(urlPath); err != nil {
return err
}

View file

@ -92,9 +92,13 @@ type PolicyController struct {
// policy report generator
prGenerator policyreport.GeneratorInterface
policyReportEraser policyreport.PolicyReportEraser
// resCache - controls creation and fetching of resource informer cache
resCache resourcecache.ResourceCache
reconcilePeriod time.Duration
log logr.Logger
}
@ -107,9 +111,11 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
configHandler config.Interface,
eventGen event.Interface,
prGenerator policyreport.GeneratorInterface,
policyReportEraser policyreport.PolicyReportEraser,
namespaces informers.NamespaceInformer,
log logr.Logger,
resCache resourcecache.ResourceCache) (*PolicyController, error) {
resCache resourcecache.ResourceCache,
reconcilePeriod time.Duration) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
@ -121,15 +127,17 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pc := PolicyController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
prGenerator: prGenerator,
log: log,
resCache: resCache,
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
prGenerator: prGenerator,
policyReportEraser: policyReportEraser,
log: log,
resCache: resCache,
reconcilePeriod: reconcilePeriod,
}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -182,7 +190,7 @@ func (pc *PolicyController) addPolicy(obj interface{}) {
logger := pc.log
p := obj.(*kyverno.ClusterPolicy)
logger.Info("policy created event", "uid", p.UID, "kind", "ClusterPolicy", "policy_name", p.Name)
logger.Info("policy created", "uid", p.UID, "kind", "ClusterPolicy", "name", p.Name)
if !pc.canBackgroundProcess(p) {
return
@ -205,7 +213,7 @@ func (pc *PolicyController) updatePolicy(old, cur interface{}) {
return
}
logger.V(4).Info("updating policy", "name", oldP.Name)
logger.V(2).Info("updating policy", "name", oldP.Name)
pc.enqueueRCRDeletedRule(oldP, curP)
pc.enqueuePolicy(curP)
@ -228,7 +236,7 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
}
}
logger.Info("policy deleted event", "uid", p.UID, "kind", "ClusterPolicy", "policy_name", p.Name)
logger.Info("policy deleted", "uid", p.UID, "kind", "ClusterPolicy", "name", p.Name)
// we process policies that are not set of background processing
// as we need to clean up GRs when a policy is deleted
@ -240,7 +248,7 @@ func (pc *PolicyController) addNsPolicy(obj interface{}) {
logger := pc.log
p := obj.(*kyverno.Policy)
logger.Info("policy created event", "uid", p.UID, "kind", "Policy", "policy_name", p.Name, "namespaces", p.Namespace)
logger.Info("policy created", "uid", p.UID, "kind", "Policy", "name", p.Name, "namespaces", p.Namespace)
pol := ConvertPolicyToClusterPolicy(p)
if !pc.canBackgroundProcess(pol) {
@ -335,7 +343,7 @@ func (pc *PolicyController) enqueuePolicy(policy *kyverno.ClusterPolicy) {
}
// Run begins watching and syncing.
func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
func (pc *PolicyController) Run(workers int, reconcileCh <-chan bool, stopCh <-chan struct{}) {
logger := pc.log
defer utilruntime.HandleCrash()
@ -352,6 +360,9 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
for i := 0; i < workers; i++ {
go wait.Until(pc.worker, time.Second, stopCh)
}
go pc.forceReconciliation(reconcileCh, stopCh)
<-stopCh
}

View file

@ -8,9 +8,21 @@ import (
"github.com/cornelk/hashmap"
changerequest "github.com/kyverno/kyverno/pkg/api/kyverno/v1alpha1"
report "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
policyreportlister "github.com/kyverno/kyverno/pkg/client/listers/policyreport/v1alpha1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type PolicyReportEraser interface {
EraseResultsEntries(erase EraseResultsEntries) error
}
type EraseResultsEntries = func(pclient *kyvernoclient.Clientset, reportLister policyreportlister.PolicyReportLister, clusterReportLister policyreportlister.ClusterPolicyReportLister) error
func (g *ReportGenerator) EraseResultsEntries(erase EraseResultsEntries) error {
return erase(g.pclient, g.reportLister, g.clusterReportLister)
}
type deletedResource struct {
kind, ns, name string
}

View file

@ -9,6 +9,7 @@ import (
"github.com/go-logr/logr"
changerequest "github.com/kyverno/kyverno/pkg/api/kyverno/v1alpha1"
report "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
requestinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
policyreportinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
requestlister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1"
@ -37,6 +38,7 @@ const (
// ReportGenerator creates policy report
type ReportGenerator struct {
pclient *kyvernoclient.Clientset
dclient *dclient.Client
reportLister policyreport.PolicyReportLister
@ -56,11 +58,16 @@ type ReportGenerator struct {
queue workqueue.RateLimitingInterface
// ReconcileCh sends a signal to policy controller to force the reconciliation of policy report
// if send true, the reports' results will be erased, this is used to recover from the invalid records
ReconcileCh chan bool
log logr.Logger
}
// NewReportGenerator returns a new instance of policy report generator
func NewReportGenerator(
pclient *kyvernoclient.Clientset,
dclient *dclient.Client,
clusterReportInformer policyreportinformer.ClusterPolicyReportInformer,
reportInformer policyreportinformer.PolicyReportInformer,
@ -70,9 +77,11 @@ func NewReportGenerator(
log logr.Logger) *ReportGenerator {
gen := &ReportGenerator{
dclient: dclient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), prWorkQueueName),
log: log,
pclient: pclient,
dclient: dclient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), prWorkQueueName),
ReconcileCh: make(chan bool, 10),
log: log,
}
reportReqInformer.Informer().AddEventHandler(
@ -87,6 +96,16 @@ func NewReportGenerator(
UpdateFunc: gen.updateClusterReportChangeRequest,
})
reportInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: gen.deletePolicyReport,
})
clusterReportInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: gen.deleteClusterPolicyReport,
})
gen.clusterReportLister = clusterReportInformer.Lister()
gen.clusterReportSynced = clusterReportInformer.Informer().HasSynced
gen.reportLister = reportInformer.Lister()
@ -166,6 +185,17 @@ func (g *ReportGenerator) updateClusterReportChangeRequest(old interface{}, cur
g.queue.Add("")
}
func (g *ReportGenerator) deletePolicyReport(obj interface{}) {
report := obj.(*report.PolicyReport)
g.log.V(2).Info("PolicyReport deleted", "name", report.GetName())
g.ReconcileCh <- false
}
func (g *ReportGenerator) deleteClusterPolicyReport(obj interface{}) {
g.log.V(2).Info("ClusterPolicyReport deleted")
g.ReconcileCh <- false
}
// Run starts the workers
func (g *ReportGenerator) Run(workers int, stopCh <-chan struct{}) {
logger := g.log

View file

@ -143,12 +143,12 @@ func (wrc *Register) cleanupKyvernoResource() bool {
logger := wrc.log.WithName("cleanupKyvernoResource")
deploy, err := wrc.client.GetResource("", "Deployment", deployNamespace, deployName)
if err != nil {
logger.Error(err, "failed to get deployment")
return false
logger.Error(err, "failed to get deployment, cleanup kyverno resources anyway")
return true
}
if deploy.GetDeletionTimestamp() != nil {
logger.Info("Kyverno is terminating, clean up Kyverno resources")
logger.Info("Kyverno is terminating, cleanup Kyverno resources")
return true
}
@ -158,7 +158,7 @@ func (wrc *Register) cleanupKyvernoResource() bool {
}
if replicas == 0 {
logger.Info("Kyverno is scaled to zero, clean up Kyverno resources")
logger.Info("Kyverno is scaled to zero, cleanup Kyverno resources")
return true
}