1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 07:57:07 +00:00

structuring log (#12111)

* structuring log

Signed-off-by: Kamaal <kamaal@macs-MacBook-Air.local>

* Update controller.go

Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>

* Update main.go

Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>

* Update run.go

Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>

* Update config.go

Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>

* Update pkg/webhooks/resource/mutation/mutation.go

Co-authored-by: shuting <shuting@nirmata.com>
Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>

* Update pkg/webhooks/resource/mutation/mutation.go

Co-authored-by: shuting <shuting@nirmata.com>
Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>
Signed-off-by: Kamaal <kamaal@macs-MacBook-Air.local>

---------

Signed-off-by: Kamaal <kamaal@macs-MacBook-Air.local>
Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com>
Co-authored-by: Kamaal <kamaal@macs-MacBook-Air.local>
Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
Mohd Kamaal 2025-02-13 20:32:02 +05:30 committed by GitHub
parent 1fa1c8674e
commit de75c64a02
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
55 changed files with 163 additions and 163 deletions

View file

@ -437,7 +437,7 @@ func (c *ApplyCommandConfig) applyPolicies(
ers, err := processor.ApplyPoliciesOnResource() ers, err := processor.ApplyPoliciesOnResource()
if err != nil { if err != nil {
if c.ContinueOnFail { if c.ContinueOnFail {
log.Log.Info(fmt.Sprintf("failed to apply policies on resource %s (%s)\n", resource.GetName(), err.Error())) log.Log.V(2).Info(fmt.Sprintf("failed to apply policies on resource %s (%s)\n", resource.GetName(), err.Error()))
continue continue
} }
return &rc, resources, responses, fmt.Errorf("failed to apply policies on resource %s (%w)", resource.GetName(), err) return &rc, resources, responses, fmt.Errorf("failed to apply policies on resource %s (%w)", resource.GetName(), err)
@ -446,7 +446,7 @@ func (c *ApplyCommandConfig) applyPolicies(
} }
for _, policy := range validPolicies { for _, policy := range validPolicies {
if policy.GetNamespace() == "" && policy.GetKind() == "Policy" { if policy.GetNamespace() == "" && policy.GetKind() == "Policy" {
log.Log.Info(fmt.Sprintf("Policy %s has no namespace detected. Ensure that namespaced policies are correctly loaded.", policy.GetNamespace())) log.Log.V(3).Info(fmt.Sprintf("Policy %s has no namespace detected. Ensure that namespaced policies are correctly loaded.", policy.GetNamespace()))
} }
} }
return &rc, resources, responses, nil return &rc, resources, responses, nil

View file

@ -266,7 +266,7 @@ func addGVKToResourceTypesMap(kind string, resourceTypesMap map[schema.GroupVers
group, version, kind, subresource := kubeutils.ParseKindSelector(kind) group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrss, err := client.Discovery().FindResources(group, version, kind, subresource) gvrss, err := client.Discovery().FindResources(group, version, kind, subresource)
if err != nil { if err != nil {
log.Log.Info("failed to find resource", "kind", kind, "error", err) log.Log.V(2).Info("failed to find resource", "kind", kind, "error", err)
return return
} }
for parent, child := range gvrss { for parent, child := range gvrss {

View file

@ -12,21 +12,21 @@ func parse(vars ...string) map[string]string {
variable = strings.TrimSpace(variable) variable = strings.TrimSpace(variable)
kvs := strings.Split(variable, "=") kvs := strings.Split(variable, "=")
if len(kvs) != 2 { if len(kvs) != 2 {
log.Log.Info("ignored variable", "variable", variable) log.Log.V(3).Info("ignored variable", "variable", variable)
continue continue
} }
key := strings.TrimSpace(kvs[0]) key := strings.TrimSpace(kvs[0])
value := strings.TrimSpace(kvs[1]) value := strings.TrimSpace(kvs[1])
if len(value) == 0 || len(key) == 0 { if len(value) == 0 || len(key) == 0 {
log.Log.Info("ignored variable", "variable", variable) log.Log.V(3).Info("ignored variable", "variable", variable)
continue continue
} }
if strings.Contains(key, "request.object.") { if strings.Contains(key, "request.object.") {
log.Log.Info("ignored variable (contains `request.object.`)", "variable", variable) log.Log.V(3).Info("ignored variable (contains `request.object.`)", "variable", variable)
continue continue
} }
if result[key] != "" { if result[key] != "" {
log.Log.Info("ignored variable (duplicated)", "variable", variable) log.Log.V(3).Info("ignored variable (duplicated)", "variable", variable)
continue continue
} }
result[key] = value result[key] = value

View file

@ -40,7 +40,7 @@ func createClientConfig(logger logr.Logger, rateLimitQPS float64, rateLimitBurst
func createKubernetesClient(logger logr.Logger, rateLimitQPS float64, rateLimitBurst int, opts ...kubeclient.NewOption) kubernetes.Interface { func createKubernetesClient(logger logr.Logger, rateLimitQPS float64, rateLimitBurst int, opts ...kubeclient.NewOption) kubernetes.Interface {
logger = logger.WithName("kube-client") logger = logger.WithName("kube-client")
logger.Info("create kube client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create kube client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := kubeclient.NewForConfig(createClientConfig(logger, rateLimitQPS, rateLimitBurst), opts...) client, err := kubeclient.NewForConfig(createClientConfig(logger, rateLimitQPS, rateLimitBurst), opts...)
checkError(logger, err, "failed to create kubernetes client") checkError(logger, err, "failed to create kubernetes client")
return client return client
@ -48,7 +48,7 @@ func createKubernetesClient(logger logr.Logger, rateLimitQPS float64, rateLimitB
func createKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versioned.Interface { func createKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versioned.Interface {
logger = logger.WithName("kyverno-client") logger = logger.WithName("kyverno-client")
logger.Info("create kyverno client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create kyverno client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := kyverno.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...) client, err := kyverno.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
checkError(logger, err, "failed to create kyverno client") checkError(logger, err, "failed to create kyverno client")
return client return client
@ -56,7 +56,7 @@ func createKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versione
func createDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Interface { func createDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Interface {
logger = logger.WithName("dynamic-client") logger = logger.WithName("dynamic-client")
logger.Info("create dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := dyn.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...) client, err := dyn.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
checkError(logger, err, "failed to create dynamic client") checkError(logger, err, "failed to create dynamic client")
return client return client
@ -64,7 +64,7 @@ func createDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Inte
func createMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.Interface { func createMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.Interface {
logger = logger.WithName("metadata-client") logger = logger.WithName("metadata-client")
logger.Info("create metadata client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create metadata client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := meta.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...) client, err := meta.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
checkError(logger, err, "failed to create metadata client") checkError(logger, err, "failed to create metadata client")
return client return client
@ -72,7 +72,7 @@ func createMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.I
func createApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserver.Interface { func createApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserver.Interface {
logger = logger.WithName("apiserver-client") logger = logger.WithName("apiserver-client")
logger.Info("create apiserver client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create apiserver client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := apisrv.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...) client, err := apisrv.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
checkError(logger, err, "failed to create apiserver client") checkError(logger, err, "failed to create apiserver client")
return client return client
@ -80,7 +80,7 @@ func createApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserv
func createKyvernoDynamicClient(logger logr.Logger, ctx context.Context, dyn dynamic.Interface, kube kubernetes.Interface, resync time.Duration) dclient.Interface { func createKyvernoDynamicClient(logger logr.Logger, ctx context.Context, dyn dynamic.Interface, kube kubernetes.Interface, resync time.Duration) dclient.Interface {
logger = logger.WithName("d-client") logger = logger.WithName("d-client")
logger.Info("create the kyverno dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create the kyverno dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := dclient.NewClient(ctx, dyn, kube, resync) client, err := dclient.NewClient(ctx, dyn, kube, resync)
checkError(logger, err, "failed to create d client") checkError(logger, err, "failed to create d client")
return client return client
@ -88,7 +88,7 @@ func createKyvernoDynamicClient(logger logr.Logger, ctx context.Context, dyn dyn
func createEventsClient(logger logr.Logger, metricsManager metrics.MetricsConfigManager) eventsv1.EventsV1Interface { func createEventsClient(logger logr.Logger, metricsManager metrics.MetricsConfigManager) eventsv1.EventsV1Interface {
logger = logger.WithName("events-client") logger = logger.WithName("events-client")
logger.Info("create the events client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create the events client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client := kubeclient.From(createKubernetesClient(logger, eventsRateLimitQPS, eventsRateLimitBurst), kubeclient.WithTracing()) client := kubeclient.From(createKubernetesClient(logger, eventsRateLimitQPS, eventsRateLimitBurst), kubeclient.WithTracing())
client = client.WithMetrics(metricsManager, metrics.KubeClient) client = client.WithMetrics(metricsManager, metrics.KubeClient)
return client.EventsV1() return client.EventsV1()
@ -96,7 +96,7 @@ func createEventsClient(logger logr.Logger, metricsManager metrics.MetricsConfig
func CreateAggregatorClient(logger logr.Logger, opts ...agg.NewOption) aggregator.Interface { func CreateAggregatorClient(logger logr.Logger, opts ...agg.NewOption) aggregator.Interface {
logger = logger.WithName("aggregator-client") logger = logger.WithName("aggregator-client")
logger.Info("create aggregator client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst) logger.V(2).Info("create aggregator client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
client, err := agg.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...) client, err := agg.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
checkError(logger, err, "failed to create aggregator client") checkError(logger, err, "failed to create aggregator client")
return client return client

View file

@ -29,8 +29,8 @@ func NewController(name string, c controllers.Controller, w int) Controller {
func (c controller) Run(ctx context.Context, logger logr.Logger, wg *wait.Group) { func (c controller) Run(ctx context.Context, logger logr.Logger, wg *wait.Group) {
logger = logger.WithValues("name", c.name) logger = logger.WithValues("name", c.name)
wg.Start(func() { wg.Start(func() {
logger.Info("starting controller", "workers", c.workers) logger.V(2).Info("starting controller", "workers", c.workers)
defer logger.Info("controller stopped") defer logger.V(2).Info("controller stopped")
c.controller.Run(ctx, c.workers) c.controller.Run(ctx, c.workers)
}) })
} }

View file

@ -43,7 +43,7 @@ func NewEngine(
) engineapi.Engine { ) engineapi.Engine {
configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, resyncPeriod) configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, resyncPeriod)
logger = logger.WithName("engine") logger = logger.WithName("engine")
logger.Info("setup engine...") logger.V(2).Info("setup engine...")
return engine.NewEngine( return engine.NewEngine(
configuration, configuration,
metricsConfiguration, metricsConfiguration,
@ -62,7 +62,7 @@ func NewExceptionSelector(
kyvernoInformer kyvernoinformer.SharedInformerFactory, kyvernoInformer kyvernoinformer.SharedInformerFactory,
) (engineapi.PolicyExceptionSelector, Controller) { ) (engineapi.PolicyExceptionSelector, Controller) {
logger = logger.WithName("exception-selector").WithValues("enablePolicyException", enablePolicyException, "exceptionNamespace", exceptionNamespace) logger = logger.WithName("exception-selector").WithValues("enablePolicyException", enablePolicyException, "exceptionNamespace", exceptionNamespace)
logger.Info("setup exception selector...") logger.V(2).Info("setup exception selector...")
if !enablePolicyException { if !enablePolicyException {
return nil, nil return nil, nil
} }
@ -91,7 +91,7 @@ func NewConfigMapResolver(
resyncPeriod time.Duration, resyncPeriod time.Duration,
) engineapi.ConfigmapResolver { ) engineapi.ConfigmapResolver {
logger = logger.WithName("configmap-resolver").WithValues("enableConfigMapCaching", enableConfigMapCaching) logger = logger.WithName("configmap-resolver").WithValues("enableConfigMapCaching", enableConfigMapCaching)
logger.Info("setup config map resolver...") logger.V(2).Info("setup config map resolver...")
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient) clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
checkError(logger, err, "failed to create client based resolver") checkError(logger, err, "failed to create client based resolver")
if !enableConfigMapCaching { if !enableConfigMapCaching {

View file

@ -7,7 +7,7 @@ import (
func setupImageVerifyCache(logger logr.Logger) imageverifycache.Client { func setupImageVerifyCache(logger logr.Logger) imageverifycache.Client {
logger = logger.WithName("image-verify-cache").WithValues("enabled", imageVerifyCacheEnabled, "maxsize", imageVerifyCacheMaxSize, "ttl", imageVerifyCacheTTLDuration) logger = logger.WithName("image-verify-cache").WithValues("enabled", imageVerifyCacheEnabled, "maxsize", imageVerifyCacheMaxSize, "ttl", imageVerifyCacheTTLDuration)
logger.Info("setup image verify cache...") logger.V(2).Info("setup image verify cache...")
opts := []imageverifycache.Option{ opts := []imageverifycache.Option{
imageverifycache.WithLogger(logger), imageverifycache.WithLogger(logger),
imageverifycache.WithCacheEnableFlag(imageVerifyCacheEnabled), imageverifycache.WithCacheEnableFlag(imageVerifyCacheEnabled),

View file

@ -9,11 +9,11 @@ import (
func setupMaxProcs(logger logr.Logger) func() { func setupMaxProcs(logger logr.Logger) func() {
logger = logger.WithName("maxprocs") logger = logger.WithName("maxprocs")
logger.Info("setup maxprocs...") logger.V(2).Info("setup maxprocs...")
undo, err := maxprocs.Set( undo, err := maxprocs.Set(
maxprocs.Logger( maxprocs.Logger(
func(format string, args ...interface{}) { func(format string, args ...interface{}) {
logger.Info(fmt.Sprintf(format, args...)) logger.V(4).Info(fmt.Sprintf(format, args...))
}, },
), ),
) )

View file

@ -15,7 +15,7 @@ import (
func SetupMetrics(ctx context.Context, logger logr.Logger, metricsConfiguration config.MetricsConfiguration, kubeClient kubernetes.Interface) (metrics.MetricsConfigManager, context.CancelFunc) { func SetupMetrics(ctx context.Context, logger logr.Logger, metricsConfiguration config.MetricsConfiguration, kubeClient kubernetes.Interface) (metrics.MetricsConfigManager, context.CancelFunc) {
logger = logger.WithName("metrics") logger = logger.WithName("metrics")
logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds) logger.V(2).Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
metricsAddr := ":" + metricsPort metricsAddr := ":" + metricsPort
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics( metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
ctx, ctx,

View file

@ -10,7 +10,7 @@ import (
func setupProfiling(logger logr.Logger) { func setupProfiling(logger logr.Logger) {
logger = logger.WithName("profiling").WithValues("enabled", profilingEnabled, "address", profilingAddress, "port", profilingPort) logger = logger.WithName("profiling").WithValues("enabled", profilingEnabled, "address", profilingAddress, "port", profilingPort)
if profilingEnabled { if profilingEnabled {
logger.Info("setup profiling...") logger.V(2).Info("setup profiling...")
profiling.Start(logger, net.JoinHostPort(profilingAddress, profilingPort)) profiling.Start(logger, net.JoinHostPort(profilingAddress, profilingPort))
} }
} }

View file

@ -15,7 +15,7 @@ import (
func setupRegistryClient(ctx context.Context, logger logr.Logger, client kubernetes.Interface) (registryclient.Client, corev1listers.SecretNamespaceLister) { func setupRegistryClient(ctx context.Context, logger logr.Logger, client kubernetes.Interface) (registryclient.Client, corev1listers.SecretNamespaceLister) {
logger = logger.WithName("registry-client").WithValues("secrets", imagePullSecrets, "insecure", allowInsecureRegistry) logger = logger.WithName("registry-client").WithValues("secrets", imagePullSecrets, "insecure", allowInsecureRegistry)
logger.Info("setup registry client...") logger.V(2).Info("setup registry client...")
factory := kubeinformers.NewSharedInformerFactoryWithOptions(client, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace())) factory := kubeinformers.NewSharedInformerFactoryWithOptions(client, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
secretLister := factory.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace()) secretLister := factory.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
// start informers and wait for cache sync // start informers and wait for cache sync

View file

@ -10,6 +10,6 @@ import (
func setupReporting(logger logr.Logger) reportutils.ReportingConfiguration { func setupReporting(logger logr.Logger) reportutils.ReportingConfiguration {
logger = logger.WithName("setup-reporting").WithValues("enableReporting", enableReporting) logger = logger.WithName("setup-reporting").WithValues("enableReporting", enableReporting)
cfg := reportutils.NewReportingConfig(strings.Split(enableReporting, ",")...) cfg := reportutils.NewReportingConfig(strings.Split(enableReporting, ",")...)
logger.Info("setting up reporting...", "validate", cfg.ValidateReportsEnabled(), "mutate", cfg.MutateReportsEnabled(), "mutateExisiting", cfg.MutateExistingReportsEnabled(), "imageVerify", cfg.ImageVerificationReportsEnabled(), "generate", cfg.GenerateReportsEnabled()) logger.V(2).Info("setting up reporting...", "validate", cfg.ValidateReportsEnabled(), "mutate", cfg.MutateReportsEnabled(), "mutateExisiting", cfg.MutateExistingReportsEnabled(), "imageVerify", cfg.ImageVerificationReportsEnabled(), "generate", cfg.GenerateReportsEnabled())
return cfg return cfg
} }

View file

@ -26,7 +26,7 @@ func shutdown(logger logr.Logger, sdowns ...context.CancelFunc) context.CancelFu
return func() { return func() {
for i := range sdowns { for i := range sdowns {
if sdowns[i] != nil { if sdowns[i] != nil {
logger.Info("shutting down...") logger.V(2).Info("shutting down...")
defer sdowns[i]() defer sdowns[i]()
} }
} }

View file

@ -13,6 +13,6 @@ var Context = context.Background()
func setupSignals(logger logr.Logger) (context.Context, context.CancelFunc) { func setupSignals(logger logr.Logger) (context.Context, context.CancelFunc) {
logger = logger.WithName("signals") logger = logger.WithName("signals")
logger.Info("setup signals...") logger.V(2).Info("setup signals...")
return signal.NotifyContext(Context, os.Interrupt, syscall.SIGTERM) return signal.NotifyContext(Context, os.Interrupt, syscall.SIGTERM)
} }

View file

@ -12,7 +12,7 @@ import (
func SetupTracing(logger logr.Logger, name string, kubeClient kubernetes.Interface) context.CancelFunc { func SetupTracing(logger logr.Logger, name string, kubeClient kubernetes.Interface) context.CancelFunc {
logger = logger.WithName("tracing").WithValues("enabled", tracingEnabled, "name", name, "address", tracingAddress, "port", tracingPort, "creds", tracingCreds) logger = logger.WithName("tracing").WithValues("enabled", tracingEnabled, "name", name, "address", tracingAddress, "port", tracingPort, "creds", tracingCreds)
if tracingEnabled { if tracingEnabled {
logger.Info("setup tracing...") logger.V(2).Info("setup tracing...")
shutdown, err := tracing.NewTraceConfig( shutdown, err := tracing.NewTraceConfig(
logger, logger,
name, name,

View file

@ -16,7 +16,7 @@ func setupSigstoreTUF(ctx context.Context, logger logr.Logger) {
} }
logger = logger.WithName("sigstore-tuf").WithValues("tufRoot", tufRoot, "tufRootRaw", tufRootRaw, "tufMirror", tufMirror) logger = logger.WithName("sigstore-tuf").WithValues("tufRoot", tufRoot, "tufRootRaw", tufRootRaw, "tufMirror", tufMirror)
logger.Info("setup tuf client for sigstore...") logger.V(2).Info("setup tuf client for sigstore...")
var tufRootBytes []byte var tufRootBytes []byte
var err error var err error
if tufRoot != "" { if tufRoot != "" {
@ -32,7 +32,7 @@ func setupSigstoreTUF(ctx context.Context, logger logr.Logger) {
tufRootBytes = root tufRootBytes = root
} }
logger.Info("Initializing TUF root") logger.V(2).Info("Initializing TUF root")
if err := tuf.Initialize(ctx, tufMirror, tufRootBytes); err != nil { if err := tuf.Initialize(ctx, tufMirror, tufRootBytes); err != nil {
checkError(logger, err, fmt.Sprintf("Failed to initialize TUF client from %s : %v", tufRoot, err)) checkError(logger, err, fmt.Sprintf("Failed to initialize TUF client from %s : %v", tufRoot, err))
} }

View file

@ -171,10 +171,10 @@ func process(client dclient.Interface, kyvernoclient kyvernoclient.Interface, do
select { select {
case out <- executeRequest(client, kyvernoclient, req): case out <- executeRequest(client, kyvernoclient, req):
case <-done: case <-done:
logger.Info("done") logger.V(4).Info("done")
return return
case <-stopCh: case <-stopCh:
logger.Info("shutting down") logger.V(4).Info("shutting down")
return return
} }
} }
@ -194,10 +194,10 @@ func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan err
select { select {
case out <- err: case out <- err:
case <-done: case <-done:
logger.Info("done") logger.V(4).Info("done")
return return
case <-stopCh: case <-stopCh:
logger.Info("shutting down") logger.V(4).Info("shutting down")
return return
} }
} }

View file

@ -84,7 +84,7 @@ func showWarnings(ctx context.Context, logger logr.Logger) {
logger = logger.WithName("warnings") logger = logger.WithName("warnings")
// log if `forceFailurePolicyIgnore` flag has been set or not // log if `forceFailurePolicyIgnore` flag has been set or not
if toggle.FromContext(ctx).ForceFailurePolicyIgnore() { if toggle.FromContext(ctx).ForceFailurePolicyIgnore() {
logger.Info("'ForceFailurePolicyIgnore' is enabled, all policies with policy failures will be set to Ignore") logger.V(2).Info("'ForceFailurePolicyIgnore' is enabled, all policies with policy failures will be set to Ignore")
} }
} }

View file

@ -266,7 +266,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
} }
setup.Logger.Info("background scan interval", "duration", backgroundScanInterval.String()) setup.Logger.V(2).Info("background scan interval", "duration", backgroundScanInterval.String())
// informer factories // informer factories
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod) kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
polexCache, polexController := internal.NewExceptionSelector(setup.Logger, kyvernoInformer) polexCache, polexController := internal.NewExceptionSelector(setup.Logger, kyvernoInformer)

View file

@ -80,7 +80,7 @@ func (o *canIOptions) RunAccessCheck(ctx context.Context) (bool, string, error)
return false, "", err return false, "", err
} }
if !result.Allowed { if !result.Allowed {
logger.Info("disallowed operation", "reason", result.Reason, "evaluationError", result.EvaluationError) logger.V(3).Info("disallowed operation", "reason", result.Reason, "evaluationError", result.EvaluationError)
} }
return result.Allowed, result.Reason, nil return result.Allowed, result.Reason, nil
} }

View file

@ -48,7 +48,7 @@ func (c *GenerateController) deleteDownstream(policy kyvernov1.PolicyInterface,
func (c *GenerateController) handleNonPolicyChanges(policy kyvernov1.PolicyInterface, ruleContext kyvernov2.RuleContext, ur *kyvernov2.UpdateRequest) error { func (c *GenerateController) handleNonPolicyChanges(policy kyvernov1.PolicyInterface, ruleContext kyvernov2.RuleContext, ur *kyvernov2.UpdateRequest) error {
logger := c.log.V(4).WithValues("ur", ur.Name, "policy", ur.Spec.Policy, "rule", ruleContext.Rule) logger := c.log.V(4).WithValues("ur", ur.Name, "policy", ur.Spec.Policy, "rule", ruleContext.Rule)
logger.Info("synchronize for none-policy changes") logger.Info("synchronize for non-policy changes")
for _, rule := range policy.GetSpec().Rules { for _, rule := range policy.GetSpec().Rules {
if ruleContext.Rule != rule.Name { if ruleContext.Rule != rule.Name {
continue continue

View file

@ -101,7 +101,7 @@ func NewGenerateController(
func (c *GenerateController) ProcessUR(ur *kyvernov2.UpdateRequest) error { func (c *GenerateController) ProcessUR(ur *kyvernov2.UpdateRequest) error {
logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey()) logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey())
var genResources []kyvernov1.ResourceSpec var genResources []kyvernov1.ResourceSpec
logger.Info("start processing UR", "ur", ur.Name, "resourceVersion", ur.GetResourceVersion()) logger.V(2).Info("start processing UR", "ur", ur.Name, "resourceVersion", ur.GetResourceVersion())
var failures []error var failures []error
policy, err := c.getPolicyObject(*ur) policy, err := c.getPolicyObject(*ur)
@ -121,7 +121,7 @@ func (c *GenerateController) ProcessUR(ur *kyvernov2.UpdateRequest) error {
genResources, err = c.applyGenerate(*trigger, *ur, policy, i) genResources, err = c.applyGenerate(*trigger, *ur, policy, i)
if err != nil { if err != nil {
if strings.Contains(err.Error(), doesNotApply) { if strings.Contains(err.Error(), doesNotApply) {
logger.V(4).Info(fmt.Sprintf("skipping rule %s: %v", rule.Rule, err.Error())) logger.V(3).Info(fmt.Sprintf("skipping rule %s: %v", rule.Rule, err.Error()))
} }
events := event.NewBackgroundFailedEvent(err, policy, ur.Spec.RuleContext[i].Rule, event.GeneratePolicyController, events := event.NewBackgroundFailedEvent(err, policy, ur.Spec.RuleContext[i].Rule, event.GeneratePolicyController,
@ -332,7 +332,7 @@ func (c *GenerateController) ApplyGeneratePolicy(log logr.Logger, policyContext
for _, s := range vars { for _, s := range vars {
for _, banned := range validationpolicy.ForbiddenUserVariables { for _, banned := range validationpolicy.ForbiddenUserVariables {
if banned.Match([]byte(s[2])) { if banned.Match([]byte(s[2])) {
log.Info("warning: resources with admission request variables may not be regenerated", "policy", policy.GetName(), "rule", rule.Name, "variable", s[2]) log.V(2).Info("warning: resources with admission request variables may not be regenerated", "policy", policy.GetName(), "rule", rule.Name, "variable", s[2])
} }
} }
} }

View file

@ -89,7 +89,7 @@ func (c *mutateExistingController) ProcessUR(ur *kyvernov2.UpdateRequest) error
logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey(), "resource", ur.Spec.GetResource().String()) logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey(), "resource", ur.Spec.GetResource().String())
var errs []error var errs []error
logger.Info("processing mutate existing") logger.V(3).Info("processing mutate existing")
policy, err := c.getPolicy(ur) policy, err := c.getPolicy(ur)
if err != nil { if err != nil {
logger.Error(err, "failed to get policy") logger.Error(err, "failed to get policy")

View file

@ -114,8 +114,8 @@ func (c *controller) Run(ctx context.Context, workers int) {
defer runtime.HandleCrash() defer runtime.HandleCrash()
defer c.queue.ShutDown() defer c.queue.ShutDown()
logger.Info("starting") logger.V(4).Info("starting")
defer logger.Info("shutting down") defer logger.V(4).Info("shutting down")
if !cache.WaitForNamedCacheSync("background", ctx.Done(), c.informersSynced...) { if !cache.WaitForNamedCacheSync("background", ctx.Done(), c.informersSynced...) {
return return
@ -243,7 +243,7 @@ func (c *controller) processUR(ur *kyvernov2.UpdateRequest) error {
func (c *controller) reconcileURStatus(ur *kyvernov2.UpdateRequest) (kyvernov2.UpdateRequestState, error) { func (c *controller) reconcileURStatus(ur *kyvernov2.UpdateRequest) (kyvernov2.UpdateRequestState, error) {
new, err := c.kyvernoClient.KyvernoV2().UpdateRequests(config.KyvernoNamespace()).Get(context.TODO(), ur.GetName(), metav1.GetOptions{}) new, err := c.kyvernoClient.KyvernoV2().UpdateRequests(config.KyvernoNamespace()).Get(context.TODO(), ur.GetName(), metav1.GetOptions{})
if err != nil { if err != nil {
logger.V(2).Info("cannot fetch latest UR, fallback to the existing one", "reason", err.Error()) logger.V(3).Info("cannot fetch latest UR, fallback to the existing one", "reason", err.Error())
new = ur new = ur
} }

View file

@ -379,16 +379,16 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
// load filters // load filters
cd.filters = parseKinds(data[resourceFilters]) cd.filters = parseKinds(data[resourceFilters])
cd.updateRequestThreshold = UpdateRequestThreshold cd.updateRequestThreshold = UpdateRequestThreshold
logger.Info("filters configured", "filters", cd.filters) logger.V(4).Info("filters configured", "filters", cd.filters)
// load defaultRegistry // load defaultRegistry
defaultRegistry, ok := data[defaultRegistry] defaultRegistry, ok := data[defaultRegistry]
if !ok { if !ok {
logger.Info("defaultRegistry not set") logger.V(2).Info("defaultRegistry not set")
} else { } else {
logger := logger.WithValues("defaultRegistry", defaultRegistry) logger := logger.WithValues("defaultRegistry", defaultRegistry)
if valid.IsDNSName(defaultRegistry) { if valid.IsDNSName(defaultRegistry) {
cd.defaultRegistry = defaultRegistry cd.defaultRegistry = defaultRegistry
logger.Info("defaultRegistry configured") logger.V(2).Info("defaultRegistry configured")
} else { } else {
logger.Error(errors.New("defaultRegistry is not a valid DNS hostname"), "failed to configure defaultRegistry") logger.Error(errors.New("defaultRegistry is not a valid DNS hostname"), "failed to configure defaultRegistry")
} }
@ -396,7 +396,7 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
// load enableDefaultRegistryMutation // load enableDefaultRegistryMutation
enableDefaultRegistryMutation, ok := data[enableDefaultRegistryMutation] enableDefaultRegistryMutation, ok := data[enableDefaultRegistryMutation]
if !ok { if !ok {
logger.Info("enableDefaultRegistryMutation not set") logger.V(2).Info("enableDefaultRegistryMutation not set")
} else { } else {
logger := logger.WithValues("enableDefaultRegistryMutation", enableDefaultRegistryMutation) logger := logger.WithValues("enableDefaultRegistryMutation", enableDefaultRegistryMutation)
enableDefaultRegistryMutation, err := strconv.ParseBool(enableDefaultRegistryMutation) enableDefaultRegistryMutation, err := strconv.ParseBool(enableDefaultRegistryMutation)
@ -404,45 +404,45 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "enableDefaultRegistryMutation is not a boolean") logger.Error(err, "enableDefaultRegistryMutation is not a boolean")
} else { } else {
cd.enableDefaultRegistryMutation = enableDefaultRegistryMutation cd.enableDefaultRegistryMutation = enableDefaultRegistryMutation
logger.Info("enableDefaultRegistryMutation configured") logger.V(2).Info("enableDefaultRegistryMutation configured")
} }
} }
// load excludeGroupRole // load excludeGroupRole
excludedGroups, ok := data[excludeGroups] excludedGroups, ok := data[excludeGroups]
if !ok { if !ok {
logger.Info("excludeGroups not set") logger.V(2).Info("excludeGroups not set")
} else { } else {
cd.exclusions.groups, cd.inclusions.groups = parseExclusions(excludedGroups) cd.exclusions.groups, cd.inclusions.groups = parseExclusions(excludedGroups)
logger.Info("excludedGroups configured", "excludeGroups", cd.exclusions.groups, "includeGroups", cd.inclusions.groups) logger.V(2).Info("excludedGroups configured", "excludeGroups", cd.exclusions.groups, "includeGroups", cd.inclusions.groups)
} }
// load excludeUsername // load excludeUsername
excludedUsernames, ok := data[excludeUsernames] excludedUsernames, ok := data[excludeUsernames]
if !ok { if !ok {
logger.Info("excludeUsernames not set") logger.V(2).Info("excludeUsernames not set")
} else { } else {
cd.exclusions.usernames, cd.inclusions.usernames = parseExclusions(excludedUsernames) cd.exclusions.usernames, cd.inclusions.usernames = parseExclusions(excludedUsernames)
logger.Info("excludedUsernames configured", "excludeUsernames", cd.exclusions.usernames, "includeUsernames", cd.inclusions.usernames) logger.V(2).Info("excludedUsernames configured", "excludeUsernames", cd.exclusions.usernames, "includeUsernames", cd.inclusions.usernames)
} }
// load excludeRoles // load excludeRoles
excludedRoles, ok := data[excludeRoles] excludedRoles, ok := data[excludeRoles]
if !ok { if !ok {
logger.Info("excludeRoles not set") logger.V(2).Info("excludeRoles not set")
} else { } else {
cd.exclusions.roles, cd.inclusions.roles = parseExclusions(excludedRoles) cd.exclusions.roles, cd.inclusions.roles = parseExclusions(excludedRoles)
logger.Info("excludedRoles configured", "excludeRoles", cd.exclusions.roles, "includeRoles", cd.inclusions.roles) logger.V(2).Info("excludedRoles configured", "excludeRoles", cd.exclusions.roles, "includeRoles", cd.inclusions.roles)
} }
// load excludeClusterRoles // load excludeClusterRoles
excludedClusterRoles, ok := data[excludeClusterRoles] excludedClusterRoles, ok := data[excludeClusterRoles]
if !ok { if !ok {
logger.Info("excludeClusterRoles not set") logger.V(2).Info("excludeClusterRoles not set")
} else { } else {
cd.exclusions.clusterroles, cd.inclusions.clusterroles = parseExclusions(excludedClusterRoles) cd.exclusions.clusterroles, cd.inclusions.clusterroles = parseExclusions(excludedClusterRoles)
logger.Info("excludedClusterRoles configured", "excludeClusterRoles", cd.exclusions.clusterroles, "includeClusterRoles", cd.inclusions.clusterroles) logger.V(2).Info("excludedClusterRoles configured", "excludeClusterRoles", cd.exclusions.clusterroles, "includeClusterRoles", cd.inclusions.clusterroles)
} }
// load generateSuccessEvents // load generateSuccessEvents
generateSuccessEvents, ok := data[generateSuccessEvents] generateSuccessEvents, ok := data[generateSuccessEvents]
if !ok { if !ok {
logger.Info("generateSuccessEvents not set") logger.V(2).Info("generateSuccessEvents not set")
} else { } else {
logger := logger.WithValues("generateSuccessEvents", generateSuccessEvents) logger := logger.WithValues("generateSuccessEvents", generateSuccessEvents)
generateSuccessEvents, err := strconv.ParseBool(generateSuccessEvents) generateSuccessEvents, err := strconv.ParseBool(generateSuccessEvents)
@ -450,13 +450,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "generateSuccessEvents is not a boolean") logger.Error(err, "generateSuccessEvents is not a boolean")
} else { } else {
cd.generateSuccessEvents = generateSuccessEvents cd.generateSuccessEvents = generateSuccessEvents
logger.Info("generateSuccessEvents configured") logger.V(2).Info("generateSuccessEvents configured")
} }
} }
// load webhooks // load webhooks
webhooks, ok := data[webhooks] webhooks, ok := data[webhooks]
if !ok { if !ok {
logger.Info("webhooks not set") logger.V(2).Info("webhooks not set")
} else { } else {
logger := logger.WithValues("webhooks", webhooks) logger := logger.WithValues("webhooks", webhooks)
webhook, err := parseWebhooks(webhooks) webhook, err := parseWebhooks(webhooks)
@ -464,13 +464,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse webhooks") logger.Error(err, "failed to parse webhooks")
} else { } else {
cd.webhook = *webhook cd.webhook = *webhook
logger.Info("webhooks configured") logger.V(2).Info("webhooks configured")
} }
} }
// load webhook annotations // load webhook annotations
webhookAnnotations, ok := data[webhookAnnotations] webhookAnnotations, ok := data[webhookAnnotations]
if !ok { if !ok {
logger.Info("webhookAnnotations not set") logger.V(2).Info("webhookAnnotations not set")
} else { } else {
logger := logger.WithValues("webhookAnnotations", webhookAnnotations) logger := logger.WithValues("webhookAnnotations", webhookAnnotations)
webhookAnnotations, err := parseWebhookAnnotations(webhookAnnotations) webhookAnnotations, err := parseWebhookAnnotations(webhookAnnotations)
@ -478,13 +478,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse webhook annotations") logger.Error(err, "failed to parse webhook annotations")
} else { } else {
cd.webhookAnnotations = webhookAnnotations cd.webhookAnnotations = webhookAnnotations
logger.Info("webhookAnnotations configured") logger.V(2).Info("webhookAnnotations configured")
} }
} }
// load webhook annotations // load webhook annotations
webhookLabels, ok := data[webhookLabels] webhookLabels, ok := data[webhookLabels]
if !ok { if !ok {
logger.Info("webhookLabels not set") logger.V(2).Info("webhookLabels not set")
} else { } else {
logger := logger.WithValues("webhookLabels", webhookLabels) logger := logger.WithValues("webhookLabels", webhookLabels)
webhookLabels, err := parseWebhookLabels(webhookLabels) webhookLabels, err := parseWebhookLabels(webhookLabels)
@ -492,13 +492,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse webhook labels") logger.Error(err, "failed to parse webhook labels")
} else { } else {
cd.webhookLabels = webhookLabels cd.webhookLabels = webhookLabels
logger.Info("webhookLabels configured") logger.V(2).Info("webhookLabels configured")
} }
} }
// load match conditions // load match conditions
matchConditions, ok := data[matchConditions] matchConditions, ok := data[matchConditions]
if !ok { if !ok {
logger.Info("matchConditions not set") logger.V(2).Info("matchConditions not set")
} else { } else {
logger := logger.WithValues("matchConditions", matchConditions) logger := logger.WithValues("matchConditions", matchConditions)
matchConditions, err := parseMatchConditions(matchConditions) matchConditions, err := parseMatchConditions(matchConditions)
@ -506,12 +506,12 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse match conditions") logger.Error(err, "failed to parse match conditions")
} else { } else {
cd.matchConditions = matchConditions cd.matchConditions = matchConditions
logger.Info("matchConditions configured") logger.V(2).Info("matchConditions configured")
} }
} }
threshold, ok := data[updateRequestThreshold] threshold, ok := data[updateRequestThreshold]
if !ok { if !ok {
logger.Info("enableDefaultRegistryMutation not set") logger.V(2).Info("enableDefaultRegistryMutation not set")
} else { } else {
logger := logger.WithValues("enableDefaultRegistryMutation", enableDefaultRegistryMutation) logger := logger.WithValues("enableDefaultRegistryMutation", enableDefaultRegistryMutation)
urThreshold, err := strconv.ParseInt(threshold, 10, 64) urThreshold, err := strconv.ParseInt(threshold, 10, 64)
@ -519,7 +519,7 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
logger.Error(err, "enableDefaultRegistryMutation is not a boolean") logger.Error(err, "enableDefaultRegistryMutation is not a boolean")
} else { } else {
cd.updateRequestThreshold = urThreshold cd.updateRequestThreshold = urThreshold
logger.Info("enableDefaultRegistryMutation configured") logger.V(2).Info("enableDefaultRegistryMutation configured")
} }
} }
} }
@ -537,7 +537,7 @@ func (cd *configuration) unload() {
cd.webhook = WebhookConfig{} cd.webhook = WebhookConfig{}
cd.webhookAnnotations = nil cd.webhookAnnotations = nil
cd.webhookLabels = nil cd.webhookLabels = nil
logger.Info("configuration unloaded") logger.V(2).Info("configuration unloaded")
} }
func (cd *configuration) notify() { func (cd *configuration) notify() {

View file

@ -165,7 +165,7 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
// load metricsRefreshInterval // load metricsRefreshInterval
metricsRefreshInterval, ok := data["metricsRefreshInterval"] metricsRefreshInterval, ok := data["metricsRefreshInterval"]
if !ok { if !ok {
logger.Info("metricsRefreshInterval not set") logger.V(4).Info("metricsRefreshInterval not set")
} else { } else {
logger := logger.WithValues("metricsRefreshInterval", metricsRefreshInterval) logger := logger.WithValues("metricsRefreshInterval", metricsRefreshInterval)
metricsRefreshInterval, err := time.ParseDuration(metricsRefreshInterval) metricsRefreshInterval, err := time.ParseDuration(metricsRefreshInterval)
@ -173,13 +173,13 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse metricsRefreshInterval") logger.Error(err, "failed to parse metricsRefreshInterval")
} else { } else {
cd.metricsRefreshInterval = metricsRefreshInterval cd.metricsRefreshInterval = metricsRefreshInterval
logger.Info("metricsRefreshInterval configured") logger.V(4).Info("metricsRefreshInterval configured")
} }
} }
// load namespaces // load namespaces
namespaces, ok := data["namespaces"] namespaces, ok := data["namespaces"]
if !ok { if !ok {
logger.Info("namespaces not set") logger.V(4).Info("namespaces not set")
} else { } else {
logger := logger.WithValues("namespaces", namespaces) logger := logger.WithValues("namespaces", namespaces)
namespaces, err := parseIncludeExcludeNamespacesFromNamespacesConfig(namespaces) namespaces, err := parseIncludeExcludeNamespacesFromNamespacesConfig(namespaces)
@ -187,13 +187,13 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse namespaces") logger.Error(err, "failed to parse namespaces")
} else { } else {
cd.namespaces = namespaces cd.namespaces = namespaces
logger.Info("namespaces configured") logger.V(4).Info("namespaces configured")
} }
} }
// load bucket boundaries // load bucket boundaries
bucketBoundariesString, ok := data["bucketBoundaries"] bucketBoundariesString, ok := data["bucketBoundaries"]
if !ok { if !ok {
logger.Info("bucketBoundaries not set") logger.V(4).Info("bucketBoundaries not set")
} else { } else {
logger := logger.WithValues("bucketBoundaries", bucketBoundariesString) logger := logger.WithValues("bucketBoundaries", bucketBoundariesString)
bucketBoundaries, err := parseBucketBoundariesConfig(bucketBoundariesString) bucketBoundaries, err := parseBucketBoundariesConfig(bucketBoundariesString)
@ -201,13 +201,13 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse bucketBoundariesString") logger.Error(err, "failed to parse bucketBoundariesString")
} else { } else {
cd.bucketBoundaries = bucketBoundaries cd.bucketBoundaries = bucketBoundaries
logger.Info("bucketBoundaries configured") logger.V(4).Info("bucketBoundaries configured")
} }
} }
// load include resource details // load include resource details
metricsExposureString, ok := data["metricsExposure"] metricsExposureString, ok := data["metricsExposure"]
if !ok { if !ok {
logger.Info("metricsExposure not set") logger.V(4).Info("metricsExposure not set")
} else { } else {
logger := logger.WithValues("metricsExposure", metricsExposureString) logger := logger.WithValues("metricsExposure", metricsExposureString)
metricsExposure, err := parseMetricExposureConfig(metricsExposureString, cd.bucketBoundaries) metricsExposure, err := parseMetricExposureConfig(metricsExposureString, cd.bucketBoundaries)
@ -215,7 +215,7 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
logger.Error(err, "failed to parse metricsExposure") logger.Error(err, "failed to parse metricsExposure")
} else { } else {
cd.metricsExposure = metricsExposure cd.metricsExposure = metricsExposure
logger.Info("metricsExposure configured") logger.V(4).Info("metricsExposure configured")
} }
} }
} }

View file

@ -95,7 +95,7 @@ func NewController(
if obj.GetNamespace() != "" { if obj.GetNamespace() != "" {
logger = logger.WithValues("namespace", obj.GetNamespace()) logger = logger.WithValues("namespace", obj.GetNamespace())
} }
logger.Info(operation) logger.V(2).Info(operation)
if err := baseEnqueueFunc(obj); err != nil { if err := baseEnqueueFunc(obj); err != nil {
logger.Error(err, "failed to enqueue object", "obj", obj) logger.Error(err, "failed to enqueue object", "obj", obj)
return err return err

View file

@ -42,7 +42,7 @@ func (c *controller) add(obj metav1.Object) {
c.logger.Error(err, "failed to extract name", "object", obj) c.logger.Error(err, "failed to extract name", "object", obj)
name = "unknown" name = "unknown"
} }
c.logger.Info("resource added", "name", name) c.logger.V(2).Info("resource added", "name", name)
} }
func (c *controller) update(old, obj metav1.Object) { func (c *controller) update(old, obj metav1.Object) {
@ -56,7 +56,7 @@ func (c *controller) update(old, obj metav1.Object) {
c.logger.Error(err, "failed to extract name", "object", obj) c.logger.Error(err, "failed to extract name", "object", obj)
name = "unknown" name = "unknown"
} }
c.logger.Info("resource updated", "name", name) c.logger.V(2).Info("resource updated", "name", name)
} }
func (c *controller) delete(obj metav1.Object) { func (c *controller) delete(obj metav1.Object) {
@ -65,5 +65,5 @@ func (c *controller) delete(obj metav1.Object) {
c.logger.Error(err, "failed to extract name", "object", obj) c.logger.Error(err, "failed to extract name", "object", obj)
name = "unknown" name = "unknown"
} }
c.logger.Info("resource deleted", "name", name) c.logger.V(2).Info("resource deleted", "name", name)
} }

View file

@ -239,21 +239,21 @@ func (c *controller) reconcileWebhookDeletion(ctx context.Context) error {
c.logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy) c.logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
return err return err
} else if err == nil { } else if err == nil {
c.logger.Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy) c.logger.V(4).Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
} }
if err := c.postWebhookCleanup(ctx, c.logger); err != nil { if err := c.postWebhookCleanup(ctx, c.logger); err != nil {
c.logger.Error(err, "failed to clean up temporary rbac") c.logger.Error(err, "failed to clean up temporary rbac")
return err return err
} else { } else {
c.logger.Info("successfully deleted temporary rbac") c.logger.V(4).Info("successfully deleted temporary rbac")
} }
} else { } else {
if err := c.webhookCleanupSetup(ctx, c.logger); err != nil { if err := c.webhookCleanupSetup(ctx, c.logger); err != nil {
c.logger.Error(err, "failed to reconcile webhook cleanup setup") c.logger.Error(err, "failed to reconcile webhook cleanup setup")
return err return err
} }
c.logger.Info("reconciled webhook cleanup setup") c.logger.V(4).Info("reconciled webhook cleanup setup")
} }
} }
return nil return nil

View file

@ -79,7 +79,7 @@ func NewController(
} }
func (c *controller) addGTXEntry(obj *kyvernov2alpha1.GlobalContextEntry) { func (c *controller) addGTXEntry(obj *kyvernov2alpha1.GlobalContextEntry) {
logger.Info("globalcontextentry created", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName()) logger.V(4).Info("globalcontextentry created", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
c.enqueueGCTXEntry(obj) c.enqueueGCTXEntry(obj)
} }
@ -87,12 +87,12 @@ func (c *controller) updateGTXEntry(old, obj *kyvernov2alpha1.GlobalContextEntry
if datautils.DeepEqual(old.Spec, obj.Spec) { if datautils.DeepEqual(old.Spec, obj.Spec) {
return return
} }
logger.Info("globalcontextentry updated", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName()) logger.V(4).Info("globalcontextentry updated", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
c.enqueueGCTXEntry(obj) c.enqueueGCTXEntry(obj)
} }
func (c *controller) deleteGTXEntry(obj *kyvernov2alpha1.GlobalContextEntry) { func (c *controller) deleteGTXEntry(obj *kyvernov2alpha1.GlobalContextEntry) {
logger.Info("globalcontextentry deleted", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName()) logger.V(4).Info("globalcontextentry deleted", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
c.enqueueGCTXEntry(obj) c.enqueueGCTXEntry(obj)
} }

View file

@ -66,8 +66,8 @@ func NewController(client dclient.Interface, pcache pcache.Cache, cpolInformer k
} }
func (c *controller) WarmUp() error { func (c *controller) WarmUp() error {
logger.Info("warming up ...") logger.V(4).Info("warming up ...")
defer logger.Info("warm up done") defer logger.V(4).Info("warm up done")
pols, err := c.polLister.Policies(metav1.NamespaceAll).List(labels.Everything()) pols, err := c.polLister.Policies(metav1.NamespaceAll).List(labels.Everything())
if err != nil { if err != nil {

View file

@ -401,7 +401,7 @@ func (c *controller) frontReconcile(ctx context.Context, logger logr.Logger, _,
if adopted, forbidden := c.adopt(ctx, reportMeta); adopted { if adopted, forbidden := c.adopt(ctx, reportMeta); adopted {
return nil return nil
} else if forbidden { } else if forbidden {
logger.Info("deleting because insufficient permission to fetch resource") logger.V(3).Info("deleting because insufficient permission to fetch resource")
return c.deleteEphemeralReport(ctx, reportMeta.GetNamespace(), reportMeta.GetName()) return c.deleteEphemeralReport(ctx, reportMeta.GetNamespace(), reportMeta.GetName())
} }
// if not found and too old, forget about it // if not found and too old, forget about it

View file

@ -176,7 +176,7 @@ func NewController(
} }
func (c *controller) Run(ctx context.Context, workers int) { func (c *controller) Run(ctx context.Context, workers int) {
logger.Info("background scan", "interval", c.forceDelay.Abs().String()) logger.V(2).Info("background scan", "interval", c.forceDelay.Abs().String())
controllerutils.Run(ctx, logger, ControllerName, time.Second, c.queue, workers, maxRetries, c.reconcile) controllerutils.Run(ctx, logger, ControllerName, time.Second, c.queue, workers, maxRetries, c.reconcile)
} }

View file

@ -196,9 +196,9 @@ func (c *controller) startWatcher(ctx context.Context, logger logr.Logger, gvr s
c.notify(Added, uid, gvk, hashes[uid]) c.notify(Added, uid, gvk, hashes[uid])
} }
logger := logger.WithValues("resourceVersion", resourceVersion) logger := logger.WithValues("resourceVersion", resourceVersion)
logger.Info("start watcher ...") logger.V(2).Info("start watcher ...")
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) { watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
logger.Info("creating watcher...") logger.V(3).Info("creating watcher...")
watch, err := c.client.GetDynamicInterface().Resource(gvr).Watch(context.Background(), options) watch, err := c.client.GetDynamicInterface().Resource(gvr).Watch(context.Background(), options)
if err != nil { if err != nil {
logger.Error(err, "failed to watch") logger.Error(err, "failed to watch")
@ -216,7 +216,7 @@ func (c *controller) startWatcher(ctx context.Context, logger logr.Logger, gvr s
hashes: hashes, hashes: hashes,
} }
go func(gvr schema.GroupVersionResource) { go func(gvr schema.GroupVersionResource) {
defer logger.Info("watcher stopped") defer logger.V(2).Info("watcher stopped")
for event := range watchInterface.ResultChan() { for event := range watchInterface.ResultChan() {
switch event.Type { switch event.Type {
case watch.Added: case watch.Added:
@ -317,12 +317,12 @@ func (c *controller) addGVKToGVRMapping(group, version, kind, subresource string
if gvrs.SubResource == "" { if gvrs.SubResource == "" {
gvk := schema.GroupVersionKind{Group: gvrs.Group, Version: gvrs.Version, Kind: kind} gvk := schema.GroupVersionKind{Group: gvrs.Group, Version: gvrs.Version, Kind: kind}
if !reportutils.IsGvkSupported(gvk) { if !reportutils.IsGvkSupported(gvk) {
logger.Info("kind is not supported", "gvk", gvk) logger.V(2).Info("kind is not supported", "gvk", gvk)
} else { } else {
if slices.Contains(api.Verbs, "list") && slices.Contains(api.Verbs, "watch") { if slices.Contains(api.Verbs, "list") && slices.Contains(api.Verbs, "watch") {
gvrMap[gvk] = gvrs.GroupVersionResource() gvrMap[gvk] = gvrs.GroupVersionResource()
} else { } else {
logger.Info("list/watch not supported for kind", "kind", kind) logger.V(2).Info("list/watch not supported for kind", "kind", kind)
} }
} }
} }

View file

@ -135,7 +135,7 @@ func determinePropagationPolicy(metaObj metav1.Object, logger logr.Logger) *meta
case "": case "":
return nil return nil
default: default:
logger.Info("Unknown propagationPolicy annotation, no global policy found", "policy", annotations[kyverno.AnnotationCleanupPropagationPolicy]) logger.V(2).Info("Unknown propagationPolicy annotation, no global policy found", "policy", annotations[kyverno.AnnotationCleanupPropagationPolicy])
return nil return nil
} }
} }
@ -160,7 +160,7 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, itemKey
} }
metaObj, err := meta.Accessor(obj) metaObj, err := meta.Accessor(obj)
if err != nil { if err != nil {
logger.Info("object is not of type metav1.Object") logger.V(2).Info("object is not of type metav1.Object")
return err return err
} }
commonLabels := []attribute.KeyValue{ commonLabels := []attribute.KeyValue{
@ -197,7 +197,7 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, itemKey
} }
return err return err
} }
logger.Info("resource has been deleted") logger.V(2).Info("resource has been deleted")
} else { } else {
if c.metrics.deletedObjectsTotal != nil { if c.metrics.deletedObjectsTotal != nil {
c.metrics.deletedObjectsTotal.Add(context.Background(), 1, metric.WithAttributes(commonLabels...)) c.metrics.deletedObjectsTotal.Add(context.Background(), 1, metric.WithAttributes(commonLabels...))

View file

@ -127,8 +127,8 @@ func (m *manager) stop(ctx context.Context, gvr schema.GroupVersionResource) err
if stopFunc, ok := m.resController[gvr]; ok { if stopFunc, ok := m.resController[gvr]; ok {
delete(m.resController, gvr) delete(m.resController, gvr)
func() { func() {
defer logger.Info("controller stopped") defer logger.V(2).Info("controller stopped")
logger.Info("stopping controller...") logger.V(2).Info("stopping controller...")
stopFunc() stopFunc()
}() }()
} }

View file

@ -115,7 +115,7 @@ func (c *controller) Run(ctx context.Context, workers int) {
} }
func (c *controller) addPolicy(obj kyvernov1.PolicyInterface) { func (c *controller) addPolicy(obj kyvernov1.PolicyInterface) {
logger.Info("policy created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName()) logger.V(2).Info("policy created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
c.enqueuePolicy(obj) c.enqueuePolicy(obj)
} }
@ -123,7 +123,7 @@ func (c *controller) updatePolicy(old, obj kyvernov1.PolicyInterface) {
if datautils.DeepEqual(old.GetSpec(), obj.GetSpec()) { if datautils.DeepEqual(old.GetSpec(), obj.GetSpec()) {
return return
} }
logger.Info("policy updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName()) logger.V(2).Info("policy updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
c.enqueuePolicy(obj) c.enqueuePolicy(obj)
} }
@ -138,7 +138,7 @@ func (c *controller) deletePolicy(obj kyvernov1.PolicyInterface) {
return return
} }
logger.Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "name", p.GetName()) logger.V(2).Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "name", p.GetName())
c.enqueuePolicy(obj) c.enqueuePolicy(obj)
} }
@ -152,7 +152,7 @@ func (c *controller) enqueuePolicy(obj kyvernov1.PolicyInterface) {
} }
func (c *controller) addException(obj *kyvernov2.PolicyException) { func (c *controller) addException(obj *kyvernov2.PolicyException) {
logger.Info("policy exception created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName()) logger.V(2).Info("policy exception created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
c.enqueueException(obj) c.enqueueException(obj)
} }
@ -160,14 +160,14 @@ func (c *controller) updateException(old, obj *kyvernov2.PolicyException) {
if datautils.DeepEqual(old.Spec, obj.Spec) { if datautils.DeepEqual(old.Spec, obj.Spec) {
return return
} }
logger.Info("policy exception updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName()) logger.V(2).Info("policy exception updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
c.enqueueException(obj) c.enqueueException(obj)
} }
func (c *controller) deleteException(obj *kyvernov2.PolicyException) { func (c *controller) deleteException(obj *kyvernov2.PolicyException) {
polex := kubeutils.GetObjectWithTombstone(obj).(*kyvernov2.PolicyException) polex := kubeutils.GetObjectWithTombstone(obj).(*kyvernov2.PolicyException)
logger.Info("policy exception deleted", "uid", polex.GetUID(), "kind", polex.GetKind(), "name", polex.GetName()) logger.V(2).Info("policy exception deleted", "uid", polex.GetUID(), "kind", polex.GetKind(), "name", polex.GetName())
c.enqueueException(obj) c.enqueueException(obj)
} }
@ -305,14 +305,14 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam
// check if the controller has the required permissions to generate validating admission policies. // check if the controller has the required permissions to generate validating admission policies.
if !admissionpolicy.HasValidatingAdmissionPolicyPermission(c.checker) { if !admissionpolicy.HasValidatingAdmissionPolicyPermission(c.checker) {
logger.Info("insufficient permissions to generate ValidatingAdmissionPolicies") logger.V(2).Info("insufficient permissions to generate ValidatingAdmissionPolicies")
c.updateClusterPolicyStatus(ctx, *policy, false, "insufficient permissions to generate ValidatingAdmissionPolicies") c.updateClusterPolicyStatus(ctx, *policy, false, "insufficient permissions to generate ValidatingAdmissionPolicies")
return nil return nil
} }
// check if the controller has the required permissions to generate validating admission policy bindings. // check if the controller has the required permissions to generate validating admission policy bindings.
if !admissionpolicy.HasValidatingAdmissionPolicyBindingPermission(c.checker) { if !admissionpolicy.HasValidatingAdmissionPolicyBindingPermission(c.checker) {
logger.Info("insufficient permissions to generate ValidatingAdmissionPolicyBindings") logger.V(2).Info("insufficient permissions to generate ValidatingAdmissionPolicyBindings")
c.updateClusterPolicyStatus(ctx, *policy, false, "insufficient permissions to generate ValidatingAdmissionPolicyBindings") c.updateClusterPolicyStatus(ctx, *policy, false, "insufficient permissions to generate ValidatingAdmissionPolicyBindings")
return nil return nil
} }

View file

@ -433,7 +433,7 @@ func (c *controller) reconcileWebhookDeletion(ctx context.Context) error {
logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy) logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
return err return err
} else if err == nil { } else if err == nil {
logger.Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy) logger.V(3).Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
} }
if err := c.mwcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{ if err := c.mwcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
LabelSelector: kyverno.LabelWebhookManagedBy, LabelSelector: kyverno.LabelWebhookManagedBy,
@ -441,21 +441,21 @@ func (c *controller) reconcileWebhookDeletion(ctx context.Context) error {
logger.Error(err, "failed to clean up mutating webhook configuration", "label", kyverno.LabelWebhookManagedBy) logger.Error(err, "failed to clean up mutating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
return err return err
} else if err == nil { } else if err == nil {
logger.Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy) logger.V(3).Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
} }
if err := c.postWebhookCleanup(ctx, logger); err != nil { if err := c.postWebhookCleanup(ctx, logger); err != nil {
logger.Error(err, "failed to clean up temporary rbac") logger.Error(err, "failed to clean up temporary rbac")
return err return err
} else { } else {
logger.Info("successfully deleted temporary rbac") logger.V(3).Info("successfully deleted temporary rbac")
} }
} else { } else {
if err := c.webhookCleanupSetup(ctx, logger); err != nil { if err := c.webhookCleanupSetup(ctx, logger); err != nil {
logger.Error(err, "failed to reconcile webhook cleanup setup") logger.Error(err, "failed to reconcile webhook cleanup setup")
return err return err
} }
logger.Info("reconciled webhook cleanup setup") logger.V(3).Info("reconciled webhook cleanup setup")
} }
} }
return nil return nil

View file

@ -106,7 +106,7 @@ func (a *executor) executeServiceCall(ctx context.Context, apiCall *kyvernov1.AP
} }
} }
a.logger.Info("executed service APICall", "name", a.name, "len", len(body)) a.logger.V(4).Info("executed service APICall", "name", a.name, "len", len(body))
return body, nil return body, nil
} }

View file

@ -103,7 +103,7 @@ func (l *contextLoader) newLoader(
ldr := loaders.NewConfigMapLoader(ctx, l.logger, entry, l.cmResolver, jsonContext) ldr := loaders.NewConfigMapLoader(ctx, l.logger, entry, l.cmResolver, jsonContext)
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger) return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
} else { } else {
l.logger.Info("disabled loading of ConfigMap context entry", "name", entry.Name) l.logger.V(3).Info("disabled loading of ConfigMap context entry", "name", entry.Name)
return nil, nil return nil, nil
} }
} else if entry.APICall != nil { } else if entry.APICall != nil {
@ -111,7 +111,7 @@ func (l *contextLoader) newLoader(
ldr := loaders.NewAPILoader(ctx, l.logger, entry, jsonContext, jp, client, l.apiCallConfig) ldr := loaders.NewAPILoader(ctx, l.logger, entry, jsonContext, jp, client, l.apiCallConfig)
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger) return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
} else { } else {
l.logger.Info("disabled loading of APICall context entry", "name", entry.Name) l.logger.V(3).Info("disabled loading of APICall context entry", "name", entry.Name)
return nil, nil return nil, nil
} }
} else if entry.GlobalReference != nil { } else if entry.GlobalReference != nil {
@ -119,7 +119,7 @@ func (l *contextLoader) newLoader(
ldr := loaders.NewGCTXLoader(ctx, l.logger, entry, jsonContext, jp, gctx) ldr := loaders.NewGCTXLoader(ctx, l.logger, entry, jsonContext, jp, gctx)
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger) return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
} else { } else {
l.logger.Info("disabled loading of GlobalContext context entry", "name", entry.Name) l.logger.V(3).Info("disabled loading of GlobalContext context entry", "name", entry.Name)
return nil, nil return nil, nil
} }
} else if entry.ImageRegistry != nil { } else if entry.ImageRegistry != nil {
@ -127,7 +127,7 @@ func (l *contextLoader) newLoader(
ldr := loaders.NewImageDataLoader(ctx, l.logger, entry, jsonContext, jp, rclientFactory) ldr := loaders.NewImageDataLoader(ctx, l.logger, entry, jsonContext, jp, rclientFactory)
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger) return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
} else { } else {
l.logger.Info("disabled loading of ImageRegistry context entry", "name", entry.Name) l.logger.V(3).Info("disabled loading of ImageRegistry context entry", "name", entry.Name)
return nil, nil return nil, nil
} }
} else if entry.Variable != nil { } else if entry.Variable != nil {

View file

@ -45,7 +45,7 @@ func (f *forEachMutator) mutateForEach(ctx context.Context) *mutate.Response {
if mutateResp.Status == engineapi.RuleStatusPass { if mutateResp.Status == engineapi.RuleStatusPass {
f.resource.unstructured = mutateResp.PatchedResource f.resource.unstructured = mutateResp.PatchedResource
} }
f.logger.Info("mutateResp.PatchedResource", "resource", mutateResp.PatchedResource) f.logger.V(3).Info("mutateResp.PatchedResource", "resource", mutateResp.PatchedResource)
if err := f.policyContext.JSONContext().AddResource(mutateResp.PatchedResource.Object); err != nil { if err := f.policyContext.JSONContext().AddResource(mutateResp.PatchedResource.Object); err != nil {
f.logger.Error(err, "failed to update resource in context") f.logger.Error(err, "failed to update resource in context")
} }
@ -104,7 +104,7 @@ func (f *forEachMutator) mutateElements(ctx context.Context, foreach kyvernov1.F
} }
if !preconditionsPassed { if !preconditionsPassed {
f.logger.Info("mutate.foreach.preconditions not met", "elementIndex", index, "message", msg) f.logger.V(3).Info("mutate.foreach.preconditions not met", "elementIndex", index, "message", msg)
continue continue
} }

View file

@ -148,17 +148,17 @@ func (h validateManifestHandler) verifyManifest(
// check if kyverno can 'create' dryrun resource // check if kyverno can 'create' dryrun resource
ok, err := h.checkDryRunPermission(ctx, adreq.Kind.Kind, vo.DryRunNamespace) ok, err := h.checkDryRunPermission(ctx, adreq.Kind.Kind, vo.DryRunNamespace)
if err != nil { if err != nil {
logger.V(1).Info("failed to check permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind, "error", err.Error()) logger.V(2).Info("failed to check permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind, "error", err.Error())
vo.DisableDryRun = true vo.DisableDryRun = true
} }
if !ok { if !ok {
logger.V(1).Info("kyverno does not have permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind) logger.V(2).Info("kyverno does not have permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind)
vo.DisableDryRun = true vo.DisableDryRun = true
} }
// check if kyverno namespace is not used for dryrun // check if kyverno namespace is not used for dryrun
ok = checkDryRunNamespace(vo.DryRunNamespace) ok = checkDryRunNamespace(vo.DryRunNamespace)
if !ok { if !ok {
logger.V(1).Info("an inappropriate dryrun namespace is set; set a namespace other than kyverno.", "dryrun namespace", vo.DryRunNamespace) logger.V(2).Info("an inappropriate dryrun namespace is set; set a namespace other than kyverno.", "dryrun namespace", vo.DryRunNamespace)
vo.DisableDryRun = true vo.DisableDryRun = true
} }
} }

View file

@ -282,7 +282,7 @@ func (iv *ImageVerifier) verifyImage(
} }
if matchReferences(imageVerify.SkipImageReferences, image) { if matchReferences(imageVerify.SkipImageReferences, image) {
iv.logger.Info("skipping image reference", "image", image, "policy", iv.policyContext.Policy().GetName(), "ruleName", iv.rule.Name) iv.logger.V(3).Info("skipping image reference", "image", image, "policy", iv.policyContext.Policy().GetName(), "ruleName", iv.rule.Name)
iv.ivm.Add(image, engineapi.ImageVerificationSkip) iv.ivm.Add(image, engineapi.ImageVerificationSkip)
return engineapi.RuleSkip(iv.rule.Name, engineapi.ImageVerify, fmt.Sprintf("skipping image reference image %s, policy %s ruleName %s", image, iv.policyContext.Policy().GetName(), iv.rule.Name), iv.rule.ReportProperties).WithEmitWarning(true), "" return engineapi.RuleSkip(iv.rule.Name, engineapi.ImageVerify, fmt.Sprintf("skipping image reference image %s, policy %s ruleName %s", image, iv.policyContext.Policy().GetName(), iv.rule.Name), iv.rule.ReportProperties).WithEmitWarning(true), ""
} }
@ -538,7 +538,7 @@ func (iv *ImageVerifier) buildCosignVerifier(
opts.Type = attestation.Type opts.Type = attestation.Type
opts.IgnoreSCT = true // TODO: Add option to allow SCT when attestors are not provided opts.IgnoreSCT = true // TODO: Add option to allow SCT when attestors are not provided
if attestation.PredicateType != "" && attestation.Type == "" { if attestation.PredicateType != "" && attestation.Type == "" {
iv.logger.Info("predicate type has been deprecated, please use type instead", "image", image) iv.logger.V(4).Info("predicate type has been deprecated, please use type instead", "image", image)
opts.Type = attestation.PredicateType opts.Type = attestation.PredicateType
} }
opts.FetchAttestations = true opts.FetchAttestations = true
@ -647,7 +647,7 @@ func (iv *ImageVerifier) buildNotaryVerifier(
opts.Type = attestation.Type opts.Type = attestation.Type
opts.PredicateType = attestation.PredicateType opts.PredicateType = attestation.PredicateType
if attestation.PredicateType != "" && attestation.Type == "" { if attestation.PredicateType != "" && attestation.Type == "" {
iv.logger.Info("predicate type has been deprecated, please use type instead", "image", image) iv.logger.V(2).Info("predicate type has been deprecated, please use type instead", "image", image)
opts.Type = attestation.PredicateType opts.Type = attestation.PredicateType
} }
opts.FetchAttestations = true opts.FetchAttestations = true
@ -673,11 +673,11 @@ func (iv *ImageVerifier) verifyAttestation(statements []map[string]interface{},
iv.logger.V(4).Info("checking attestations", "predicates", types, "image", image) iv.logger.V(4).Info("checking attestations", "predicates", types, "image", image)
statements = statementsByPredicate[attestation.Type] statements = statementsByPredicate[attestation.Type]
if statements == nil { if statements == nil {
iv.logger.Info("no attestations found for predicate", "type", attestation.Type, "predicates", types, "image", imageInfo.String()) iv.logger.V(2).Info("no attestations found for predicate", "type", attestation.Type, "predicates", types, "image", imageInfo.String())
return fmt.Errorf("attestions not found for predicate type %s", attestation.Type) return fmt.Errorf("attestions not found for predicate type %s", attestation.Type)
} }
for _, s := range statements { for _, s := range statements {
iv.logger.Info("checking attestation", "predicates", types, "image", imageInfo.String()) iv.logger.V(3).Info("checking attestation", "predicates", types, "image", imageInfo.String())
val, msg, err := iv.checkAttestations(attestation, s) val, msg, err := iv.checkAttestations(attestation, s)
if err != nil { if err != nil {
return fmt.Errorf("failed to check attestations: %w", err) return fmt.Errorf("failed to check attestations: %w", err)

View file

@ -101,8 +101,8 @@ func (gen *controller) Add(infos ...Info) {
// Run begins generator // Run begins generator
func (gen *controller) Run(ctx context.Context, workers int) { func (gen *controller) Run(ctx context.Context, workers int) {
logger := gen.logger logger := gen.logger
logger.Info("start") logger.V(2).Info("start")
defer logger.Info("terminated") defer logger.V(2).Info("terminated")
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
var waitGroup wait.Group var waitGroup wait.Group
for i := 0; i < workers; i++ { for i := 0; i < workers; i++ {

View file

@ -80,23 +80,23 @@ func New(log logr.Logger, name, namespace string, kubeClient kubernetes.Interfac
Callbacks: leaderelection.LeaderCallbacks{ Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) { OnStartedLeading: func(ctx context.Context) {
atomic.StoreInt64(&e.isLeader, 1) atomic.StoreInt64(&e.isLeader, 1)
e.log.Info("started leading") e.log.V(2).Info("started leading")
if e.startWork != nil { if e.startWork != nil {
e.startWork(ctx) e.startWork(ctx)
} }
}, },
OnStoppedLeading: func() { OnStoppedLeading: func() {
atomic.StoreInt64(&e.isLeader, 0) atomic.StoreInt64(&e.isLeader, 0)
e.log.Info("leadership lost, stopped leading") e.log.V(2).Info("leadership lost, stopped leading")
if e.stopWork != nil { if e.stopWork != nil {
e.stopWork() e.stopWork()
} }
}, },
OnNewLeader: func(identity string) { OnNewLeader: func(identity string) {
if identity == e.lock.Identity() { if identity == e.lock.Identity() {
e.log.Info("still leading") e.log.V(4).Info("still leading")
} else { } else {
e.log.Info("another instance has been elected as leader", "leader", identity) e.log.V(2).Info("another instance has been elected as leader", "leader", identity)
} }
}, },
}, },

View file

@ -181,7 +181,7 @@ func (pc *policyController) canBackgroundProcess(p kyvernov1.PolicyInterface) bo
func (pc *policyController) addPolicy(obj interface{}) { func (pc *policyController) addPolicy(obj interface{}) {
logger := pc.log logger := pc.log
p := castPolicy(obj) p := castPolicy(obj)
logger.Info("policy created", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName()) logger.V(2).Info("policy created", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName())
if !pc.canBackgroundProcess(p) { if !pc.canBackgroundProcess(p) {
return return
@ -230,7 +230,7 @@ func (pc *policyController) deletePolicy(obj interface{}) {
return return
} }
logger.Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName()) logger.V(2).Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName())
err := pc.createURForDownstreamDeletion(p) err := pc.createURForDownstreamDeletion(p)
if err != nil { if err != nil {
utilruntime.HandleError(fmt.Errorf("failed to create UR on policy deletion, clean up downstream resource may be failed: %v", err)) utilruntime.HandleError(fmt.Errorf("failed to create UR on policy deletion, clean up downstream resource may be failed: %v", err))
@ -254,8 +254,8 @@ func (pc *policyController) Run(ctx context.Context, workers int) {
defer utilruntime.HandleCrash() defer utilruntime.HandleCrash()
defer pc.queue.ShutDown() defer pc.queue.ShutDown()
logger.Info("starting") logger.V(2).Info("starting")
defer logger.Info("shutting down") defer logger.V(2).Info("shutting down")
if !cache.WaitForNamedCacheSync("PolicyController", ctx.Done(), pc.informersSynced...) { if !cache.WaitForNamedCacheSync("PolicyController", ctx.Done(), pc.informersSynced...) {
return return
@ -368,7 +368,7 @@ func (pc *policyController) forceReconciliation(ctx context.Context) {
for { for {
select { select {
case <-ticker.C: case <-ticker.C:
logger.Info("reconciling generate and mutateExisting policies", "scan interval", pc.reconcilePeriod.String()) logger.V(3).Info("reconciling generate and mutateExisting policies", "scan interval", pc.reconcilePeriod.String())
pc.requeuePolicies() pc.requeuePolicies()
case <-ctx.Done(): case <-ctx.Done():

View file

@ -210,7 +210,7 @@ func (m *policyMap) get(key PolicyType, gvr schema.GroupVersionResource, subreso
isNamespacedPolicy := ns != "" isNamespacedPolicy := ns != ""
policy := m.policies[policyName] policy := m.policies[policyName]
if policy == nil { if policy == nil {
logger.Info("nil policy in the cache, this should not happen") logger.V(4).Info("nil policy in the cache, this should not happen")
} }
if !isNamespacedPolicy && namespace == "" { if !isNamespacedPolicy && namespace == "" {
result = append(result, policy) result = append(result, policy)

View file

@ -11,7 +11,7 @@ import (
) )
func Start(logger logr.Logger, address string) { func Start(logger logr.Logger, address string) {
logger.Info("Enable profiling, see details at https://github.com/kyverno/kyverno/wiki/Profiling-Kyverno-on-Kubernetes") logger.V(2).Info("Enable profiling, see details at https://github.com/kyverno/kyverno/wiki/Profiling-Kyverno-on-Kubernetes")
go func() { go func() {
s := http.Server{ s := http.Server{
Addr: address, Addr: address,

View file

@ -55,8 +55,8 @@ func newControllerMetrics(logger logr.Logger, controllerName string) *controller
} }
func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName string, period time.Duration, queue workqueue.TypedRateLimitingInterface[T], n, maxRetries int, r reconcileFunc, routines ...func(context.Context, logr.Logger)) { func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName string, period time.Duration, queue workqueue.TypedRateLimitingInterface[T], n, maxRetries int, r reconcileFunc, routines ...func(context.Context, logr.Logger)) {
logger.Info("starting ...") logger.V(2).Info("starting ...")
defer logger.Info("stopped") defer logger.V(2).Info("stopped")
var wg sync.WaitGroup var wg sync.WaitGroup
defer wg.Wait() defer wg.Wait()
defer runtime.HandleCrash() defer runtime.HandleCrash()
@ -68,8 +68,8 @@ func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName s
for i := 0; i < n; i++ { for i := 0; i < n; i++ {
wg.Add(1) wg.Add(1)
go func(logger logr.Logger) { go func(logger logr.Logger) {
logger.Info("starting worker") logger.V(4).Info("starting worker")
defer logger.Info("worker stopped") defer logger.V(4).Info("worker stopped")
defer wg.Done() defer wg.Done()
wait.UntilWithContext(ctx, func(ctx context.Context) { worker(ctx, logger, metric, queue, maxRetries, r) }, period) wait.UntilWithContext(ctx, func(ctx context.Context) { worker(ctx, logger, metric, queue, maxRetries, r) }, period)
}(logger.WithName("worker").WithValues("id", i)) }(logger.WithName("worker").WithValues("id", i))
@ -77,15 +77,15 @@ func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName s
for i, routine := range routines { for i, routine := range routines {
wg.Add(1) wg.Add(1)
go func(logger logr.Logger, routine func(context.Context, logr.Logger)) { go func(logger logr.Logger, routine func(context.Context, logr.Logger)) {
logger.Info("starting routine") logger.V(4).Info("starting routine")
defer logger.Info("routine stopped") defer logger.V(4).Info("routine stopped")
defer wg.Done() defer wg.Done()
routine(ctx, logger) routine(ctx, logger)
}(logger.WithName("routine").WithValues("id", i), routine) }(logger.WithName("routine").WithValues("id", i), routine)
} }
<-ctx.Done() <-ctx.Done()
}() }()
logger.Info("waiting for workers to terminate ...") logger.V(4).Info("waiting for workers to terminate ...")
} }
func worker[T comparable](ctx context.Context, logger logr.Logger, metric *controllerMetrics, queue workqueue.TypedRateLimitingInterface[T], maxRetries int, r reconcileFunc) { func worker[T comparable](ctx context.Context, logger logr.Logger, metric *controllerMetrics, queue workqueue.TypedRateLimitingInterface[T], maxRetries int, r reconcileFunc) {
@ -112,7 +112,7 @@ func handleErr[T comparable](ctx context.Context, logger logr.Logger, metric *co
logger.V(4).Info("Dropping request from the queue", "obj", obj, "error", err.Error()) logger.V(4).Info("Dropping request from the queue", "obj", obj, "error", err.Error())
queue.Forget(obj) queue.Forget(obj)
} else if queue.NumRequeues(obj) < maxRetries { } else if queue.NumRequeues(obj) < maxRetries {
logger.Info("Retrying request", "obj", obj, "error", err.Error()) logger.V(3).Info("Retrying request", "obj", obj, "error", err.Error())
queue.AddRateLimited(obj) queue.AddRateLimited(obj)
if metric.requeueTotal != nil { if metric.requeueTotal != nil {
metric.requeueTotal.Add( metric.requeueTotal.Add(

View file

@ -68,7 +68,7 @@ func (c *runtime) IsRollingUpdate() bool {
} }
nonTerminatedReplicas := deployment.Status.Replicas nonTerminatedReplicas := deployment.Status.Replicas
if nonTerminatedReplicas > replicas { if nonTerminatedReplicas > replicas {
c.logger.Info("detect Kyverno is in rolling update, won't trigger the update again") c.logger.V(2).Info("detect Kyverno is in rolling update, won't trigger the update again")
return true return true
} }
return false return false

View file

@ -46,5 +46,5 @@ func Hash() string {
// PrintVersionInfo displays the kyverno version - git version // PrintVersionInfo displays the kyverno version - git version
func PrintVersionInfo(log logr.Logger) { func PrintVersionInfo(log logr.Logger) {
log.Info("version", "version", Version(), "hash", Hash(), "time", Time()) log.V(2).Info("version", "version", Version(), "hash", Hash(), "time", Time())
} }

View file

@ -41,7 +41,7 @@ func dumpPayload(
logger.Error(err, "Failed to extract resources") logger.Error(err, "Failed to extract resources")
} else { } else {
logger = logger.WithValues("admission.response", response, "admission.request", reqPayload) logger = logger.WithValues("admission.response", response, "admission.request", reqPayload)
logger.Info("admission request dump") logger.V(4).Info("admission request dump")
} }
} }

View file

@ -158,7 +158,7 @@ func (h *resourceHandlers) Validate(ctx context.Context, logger logr.Logger, req
} }
wg.Wait() wg.Wait()
if !ok { if !ok {
logger.Info("admission request denied") logger.V(4).Info("admission request denied")
events := webhookutils.GenerateEvents(enforceResponses, true, h.configuration) events := webhookutils.GenerateEvents(enforceResponses, true, h.configuration)
h.eventGen.Add(events...) h.eventGen.Add(events...)
return admissionutils.Response(request.UID, errors.New(msg), warnings...) return admissionutils.Response(request.UID, errors.New(msg), warnings...)

View file

@ -133,7 +133,7 @@ func (v *mutationHandler) applyMutations(
patches = append(patches, policyPatches...) patches = append(patches, policyPatches...)
rules := engineResponse.GetSuccessRules() rules := engineResponse.GetSuccessRules()
if len(rules) != 0 { if len(rules) != 0 {
v.log.Info("mutation rules from policy applied successfully", "policy", policy.GetName(), "rules", rules) v.log.V(2).Info("mutation rules from policy applied successfully", "policy", policy.GetName(), "rules", rules)
} }
} }
@ -182,10 +182,10 @@ func (h *mutationHandler) applyMutation(ctx context.Context, request admissionv1
if !engineResponse.IsSuccessful() { if !engineResponse.IsSuccessful() {
if webhookutils.BlockRequest([]engineapi.EngineResponse{engineResponse}, failurePolicy, h.log) { if webhookutils.BlockRequest([]engineapi.EngineResponse{engineResponse}, failurePolicy, h.log) {
h.log.Info("failed to apply policy, blocking request", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors()) h.log.V(2).Info("failed to apply policy, blocking request", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors())
return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy().GetName(), engineResponse.GetFailedRulesWithErrors()) return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy().GetName(), engineResponse.GetFailedRulesWithErrors())
} else { } else {
h.log.Info("ignoring unsuccessful engine responses", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors()) h.log.V(4).Info("ignoring unsuccessful engine responses", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors())
return &engineResponse, nil, nil return &engineResponse, nil, nil
} }
} }

View file

@ -245,7 +245,7 @@ func (s *server) cleanup(ctx context.Context) {
if err := s.leaseClient.Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) { if err := s.leaseClient.Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "failed to clean up lease", "name", name) logger.Error(err, "failed to clean up lease", "name", name)
} else if err == nil { } else if err == nil {
logger.Info("successfully deleted leases", "label", kyverno.LabelWebhookManagedBy) logger.V(2).Info("successfully deleted leases", "label", kyverno.LabelWebhookManagedBy)
} }
} }
deleteVwc := func() { deleteVwc := func() {
@ -254,7 +254,7 @@ func (s *server) cleanup(ctx context.Context) {
}); err != nil && !apierrors.IsNotFound(err) { }); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy) logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
} else if err == nil { } else if err == nil {
logger.Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy) logger.V(2).Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
} }
} }
deleteMwc := func() { deleteMwc := func() {
@ -263,7 +263,7 @@ func (s *server) cleanup(ctx context.Context) {
}); err != nil && !apierrors.IsNotFound(err) { }); err != nil && !apierrors.IsNotFound(err) {
logger.Error(err, "failed to clean up mutating webhook configuration", "label", kyverno.LabelWebhookManagedBy) logger.Error(err, "failed to clean up mutating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
} else if err == nil { } else if err == nil {
logger.Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy) logger.V(2).Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
} }
} }
deleteLease("kyvernopre-lock") deleteLease("kyvernopre-lock")