mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-05 15:37:19 +00:00
structuring log (#12111)
* structuring log Signed-off-by: Kamaal <kamaal@macs-MacBook-Air.local> * Update controller.go Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> * Update main.go Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> * Update run.go Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> * Update config.go Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> * Update pkg/webhooks/resource/mutation/mutation.go Co-authored-by: shuting <shuting@nirmata.com> Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> * Update pkg/webhooks/resource/mutation/mutation.go Co-authored-by: shuting <shuting@nirmata.com> Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> Signed-off-by: Kamaal <kamaal@macs-MacBook-Air.local> --------- Signed-off-by: Kamaal <kamaal@macs-MacBook-Air.local> Signed-off-by: Mohd Kamaal <102820439+Mohdcode@users.noreply.github.com> Co-authored-by: Kamaal <kamaal@macs-MacBook-Air.local> Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
parent
1fa1c8674e
commit
de75c64a02
55 changed files with 163 additions and 163 deletions
|
@ -437,7 +437,7 @@ func (c *ApplyCommandConfig) applyPolicies(
|
|||
ers, err := processor.ApplyPoliciesOnResource()
|
||||
if err != nil {
|
||||
if c.ContinueOnFail {
|
||||
log.Log.Info(fmt.Sprintf("failed to apply policies on resource %s (%s)\n", resource.GetName(), err.Error()))
|
||||
log.Log.V(2).Info(fmt.Sprintf("failed to apply policies on resource %s (%s)\n", resource.GetName(), err.Error()))
|
||||
continue
|
||||
}
|
||||
return &rc, resources, responses, fmt.Errorf("failed to apply policies on resource %s (%w)", resource.GetName(), err)
|
||||
|
@ -446,7 +446,7 @@ func (c *ApplyCommandConfig) applyPolicies(
|
|||
}
|
||||
for _, policy := range validPolicies {
|
||||
if policy.GetNamespace() == "" && policy.GetKind() == "Policy" {
|
||||
log.Log.Info(fmt.Sprintf("Policy %s has no namespace detected. Ensure that namespaced policies are correctly loaded.", policy.GetNamespace()))
|
||||
log.Log.V(3).Info(fmt.Sprintf("Policy %s has no namespace detected. Ensure that namespaced policies are correctly loaded.", policy.GetNamespace()))
|
||||
}
|
||||
}
|
||||
return &rc, resources, responses, nil
|
||||
|
|
|
@ -266,7 +266,7 @@ func addGVKToResourceTypesMap(kind string, resourceTypesMap map[schema.GroupVers
|
|||
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
|
||||
gvrss, err := client.Discovery().FindResources(group, version, kind, subresource)
|
||||
if err != nil {
|
||||
log.Log.Info("failed to find resource", "kind", kind, "error", err)
|
||||
log.Log.V(2).Info("failed to find resource", "kind", kind, "error", err)
|
||||
return
|
||||
}
|
||||
for parent, child := range gvrss {
|
||||
|
|
|
@ -12,21 +12,21 @@ func parse(vars ...string) map[string]string {
|
|||
variable = strings.TrimSpace(variable)
|
||||
kvs := strings.Split(variable, "=")
|
||||
if len(kvs) != 2 {
|
||||
log.Log.Info("ignored variable", "variable", variable)
|
||||
log.Log.V(3).Info("ignored variable", "variable", variable)
|
||||
continue
|
||||
}
|
||||
key := strings.TrimSpace(kvs[0])
|
||||
value := strings.TrimSpace(kvs[1])
|
||||
if len(value) == 0 || len(key) == 0 {
|
||||
log.Log.Info("ignored variable", "variable", variable)
|
||||
log.Log.V(3).Info("ignored variable", "variable", variable)
|
||||
continue
|
||||
}
|
||||
if strings.Contains(key, "request.object.") {
|
||||
log.Log.Info("ignored variable (contains `request.object.`)", "variable", variable)
|
||||
log.Log.V(3).Info("ignored variable (contains `request.object.`)", "variable", variable)
|
||||
continue
|
||||
}
|
||||
if result[key] != "" {
|
||||
log.Log.Info("ignored variable (duplicated)", "variable", variable)
|
||||
log.Log.V(3).Info("ignored variable (duplicated)", "variable", variable)
|
||||
continue
|
||||
}
|
||||
result[key] = value
|
||||
|
|
|
@ -40,7 +40,7 @@ func createClientConfig(logger logr.Logger, rateLimitQPS float64, rateLimitBurst
|
|||
|
||||
func createKubernetesClient(logger logr.Logger, rateLimitQPS float64, rateLimitBurst int, opts ...kubeclient.NewOption) kubernetes.Interface {
|
||||
logger = logger.WithName("kube-client")
|
||||
logger.Info("create kube client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create kube client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := kubeclient.NewForConfig(createClientConfig(logger, rateLimitQPS, rateLimitBurst), opts...)
|
||||
checkError(logger, err, "failed to create kubernetes client")
|
||||
return client
|
||||
|
@ -48,7 +48,7 @@ func createKubernetesClient(logger logr.Logger, rateLimitQPS float64, rateLimitB
|
|||
|
||||
func createKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versioned.Interface {
|
||||
logger = logger.WithName("kyverno-client")
|
||||
logger.Info("create kyverno client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create kyverno client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := kyverno.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
|
||||
checkError(logger, err, "failed to create kyverno client")
|
||||
return client
|
||||
|
@ -56,7 +56,7 @@ func createKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versione
|
|||
|
||||
func createDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Interface {
|
||||
logger = logger.WithName("dynamic-client")
|
||||
logger.Info("create dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := dyn.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
|
||||
checkError(logger, err, "failed to create dynamic client")
|
||||
return client
|
||||
|
@ -64,7 +64,7 @@ func createDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Inte
|
|||
|
||||
func createMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.Interface {
|
||||
logger = logger.WithName("metadata-client")
|
||||
logger.Info("create metadata client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create metadata client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := meta.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
|
||||
checkError(logger, err, "failed to create metadata client")
|
||||
return client
|
||||
|
@ -72,7 +72,7 @@ func createMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.I
|
|||
|
||||
func createApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserver.Interface {
|
||||
logger = logger.WithName("apiserver-client")
|
||||
logger.Info("create apiserver client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create apiserver client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := apisrv.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
|
||||
checkError(logger, err, "failed to create apiserver client")
|
||||
return client
|
||||
|
@ -80,7 +80,7 @@ func createApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserv
|
|||
|
||||
func createKyvernoDynamicClient(logger logr.Logger, ctx context.Context, dyn dynamic.Interface, kube kubernetes.Interface, resync time.Duration) dclient.Interface {
|
||||
logger = logger.WithName("d-client")
|
||||
logger.Info("create the kyverno dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create the kyverno dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := dclient.NewClient(ctx, dyn, kube, resync)
|
||||
checkError(logger, err, "failed to create d client")
|
||||
return client
|
||||
|
@ -88,7 +88,7 @@ func createKyvernoDynamicClient(logger logr.Logger, ctx context.Context, dyn dyn
|
|||
|
||||
func createEventsClient(logger logr.Logger, metricsManager metrics.MetricsConfigManager) eventsv1.EventsV1Interface {
|
||||
logger = logger.WithName("events-client")
|
||||
logger.Info("create the events client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create the events client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client := kubeclient.From(createKubernetesClient(logger, eventsRateLimitQPS, eventsRateLimitBurst), kubeclient.WithTracing())
|
||||
client = client.WithMetrics(metricsManager, metrics.KubeClient)
|
||||
return client.EventsV1()
|
||||
|
@ -96,7 +96,7 @@ func createEventsClient(logger logr.Logger, metricsManager metrics.MetricsConfig
|
|||
|
||||
func CreateAggregatorClient(logger logr.Logger, opts ...agg.NewOption) aggregator.Interface {
|
||||
logger = logger.WithName("aggregator-client")
|
||||
logger.Info("create aggregator client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
logger.V(2).Info("create aggregator client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := agg.NewForConfig(createClientConfig(logger, clientRateLimitQPS, clientRateLimitBurst), opts...)
|
||||
checkError(logger, err, "failed to create aggregator client")
|
||||
return client
|
||||
|
|
|
@ -29,8 +29,8 @@ func NewController(name string, c controllers.Controller, w int) Controller {
|
|||
func (c controller) Run(ctx context.Context, logger logr.Logger, wg *wait.Group) {
|
||||
logger = logger.WithValues("name", c.name)
|
||||
wg.Start(func() {
|
||||
logger.Info("starting controller", "workers", c.workers)
|
||||
defer logger.Info("controller stopped")
|
||||
logger.V(2).Info("starting controller", "workers", c.workers)
|
||||
defer logger.V(2).Info("controller stopped")
|
||||
c.controller.Run(ctx, c.workers)
|
||||
})
|
||||
}
|
||||
|
|
|
@ -43,7 +43,7 @@ func NewEngine(
|
|||
) engineapi.Engine {
|
||||
configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, resyncPeriod)
|
||||
logger = logger.WithName("engine")
|
||||
logger.Info("setup engine...")
|
||||
logger.V(2).Info("setup engine...")
|
||||
return engine.NewEngine(
|
||||
configuration,
|
||||
metricsConfiguration,
|
||||
|
@ -62,7 +62,7 @@ func NewExceptionSelector(
|
|||
kyvernoInformer kyvernoinformer.SharedInformerFactory,
|
||||
) (engineapi.PolicyExceptionSelector, Controller) {
|
||||
logger = logger.WithName("exception-selector").WithValues("enablePolicyException", enablePolicyException, "exceptionNamespace", exceptionNamespace)
|
||||
logger.Info("setup exception selector...")
|
||||
logger.V(2).Info("setup exception selector...")
|
||||
if !enablePolicyException {
|
||||
return nil, nil
|
||||
}
|
||||
|
@ -91,7 +91,7 @@ func NewConfigMapResolver(
|
|||
resyncPeriod time.Duration,
|
||||
) engineapi.ConfigmapResolver {
|
||||
logger = logger.WithName("configmap-resolver").WithValues("enableConfigMapCaching", enableConfigMapCaching)
|
||||
logger.Info("setup config map resolver...")
|
||||
logger.V(2).Info("setup config map resolver...")
|
||||
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
|
||||
checkError(logger, err, "failed to create client based resolver")
|
||||
if !enableConfigMapCaching {
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
func setupImageVerifyCache(logger logr.Logger) imageverifycache.Client {
|
||||
logger = logger.WithName("image-verify-cache").WithValues("enabled", imageVerifyCacheEnabled, "maxsize", imageVerifyCacheMaxSize, "ttl", imageVerifyCacheTTLDuration)
|
||||
logger.Info("setup image verify cache...")
|
||||
logger.V(2).Info("setup image verify cache...")
|
||||
opts := []imageverifycache.Option{
|
||||
imageverifycache.WithLogger(logger),
|
||||
imageverifycache.WithCacheEnableFlag(imageVerifyCacheEnabled),
|
||||
|
|
|
@ -9,11 +9,11 @@ import (
|
|||
|
||||
func setupMaxProcs(logger logr.Logger) func() {
|
||||
logger = logger.WithName("maxprocs")
|
||||
logger.Info("setup maxprocs...")
|
||||
logger.V(2).Info("setup maxprocs...")
|
||||
undo, err := maxprocs.Set(
|
||||
maxprocs.Logger(
|
||||
func(format string, args ...interface{}) {
|
||||
logger.Info(fmt.Sprintf(format, args...))
|
||||
logger.V(4).Info(fmt.Sprintf(format, args...))
|
||||
},
|
||||
),
|
||||
)
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func SetupMetrics(ctx context.Context, logger logr.Logger, metricsConfiguration config.MetricsConfiguration, kubeClient kubernetes.Interface) (metrics.MetricsConfigManager, context.CancelFunc) {
|
||||
logger = logger.WithName("metrics")
|
||||
logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
|
||||
logger.V(2).Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
|
||||
metricsAddr := ":" + metricsPort
|
||||
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
|
||||
ctx,
|
||||
|
|
|
@ -10,7 +10,7 @@ import (
|
|||
func setupProfiling(logger logr.Logger) {
|
||||
logger = logger.WithName("profiling").WithValues("enabled", profilingEnabled, "address", profilingAddress, "port", profilingPort)
|
||||
if profilingEnabled {
|
||||
logger.Info("setup profiling...")
|
||||
logger.V(2).Info("setup profiling...")
|
||||
profiling.Start(logger, net.JoinHostPort(profilingAddress, profilingPort))
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ import (
|
|||
|
||||
func setupRegistryClient(ctx context.Context, logger logr.Logger, client kubernetes.Interface) (registryclient.Client, corev1listers.SecretNamespaceLister) {
|
||||
logger = logger.WithName("registry-client").WithValues("secrets", imagePullSecrets, "insecure", allowInsecureRegistry)
|
||||
logger.Info("setup registry client...")
|
||||
logger.V(2).Info("setup registry client...")
|
||||
factory := kubeinformers.NewSharedInformerFactoryWithOptions(client, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
secretLister := factory.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
|
||||
// start informers and wait for cache sync
|
||||
|
|
|
@ -10,6 +10,6 @@ import (
|
|||
func setupReporting(logger logr.Logger) reportutils.ReportingConfiguration {
|
||||
logger = logger.WithName("setup-reporting").WithValues("enableReporting", enableReporting)
|
||||
cfg := reportutils.NewReportingConfig(strings.Split(enableReporting, ",")...)
|
||||
logger.Info("setting up reporting...", "validate", cfg.ValidateReportsEnabled(), "mutate", cfg.MutateReportsEnabled(), "mutateExisiting", cfg.MutateExistingReportsEnabled(), "imageVerify", cfg.ImageVerificationReportsEnabled(), "generate", cfg.GenerateReportsEnabled())
|
||||
logger.V(2).Info("setting up reporting...", "validate", cfg.ValidateReportsEnabled(), "mutate", cfg.MutateReportsEnabled(), "mutateExisiting", cfg.MutateExistingReportsEnabled(), "imageVerify", cfg.ImageVerificationReportsEnabled(), "generate", cfg.GenerateReportsEnabled())
|
||||
return cfg
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func shutdown(logger logr.Logger, sdowns ...context.CancelFunc) context.CancelFu
|
|||
return func() {
|
||||
for i := range sdowns {
|
||||
if sdowns[i] != nil {
|
||||
logger.Info("shutting down...")
|
||||
logger.V(2).Info("shutting down...")
|
||||
defer sdowns[i]()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,6 +13,6 @@ var Context = context.Background()
|
|||
|
||||
func setupSignals(logger logr.Logger) (context.Context, context.CancelFunc) {
|
||||
logger = logger.WithName("signals")
|
||||
logger.Info("setup signals...")
|
||||
logger.V(2).Info("setup signals...")
|
||||
return signal.NotifyContext(Context, os.Interrupt, syscall.SIGTERM)
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
func SetupTracing(logger logr.Logger, name string, kubeClient kubernetes.Interface) context.CancelFunc {
|
||||
logger = logger.WithName("tracing").WithValues("enabled", tracingEnabled, "name", name, "address", tracingAddress, "port", tracingPort, "creds", tracingCreds)
|
||||
if tracingEnabled {
|
||||
logger.Info("setup tracing...")
|
||||
logger.V(2).Info("setup tracing...")
|
||||
shutdown, err := tracing.NewTraceConfig(
|
||||
logger,
|
||||
name,
|
||||
|
|
|
@ -16,7 +16,7 @@ func setupSigstoreTUF(ctx context.Context, logger logr.Logger) {
|
|||
}
|
||||
|
||||
logger = logger.WithName("sigstore-tuf").WithValues("tufRoot", tufRoot, "tufRootRaw", tufRootRaw, "tufMirror", tufMirror)
|
||||
logger.Info("setup tuf client for sigstore...")
|
||||
logger.V(2).Info("setup tuf client for sigstore...")
|
||||
var tufRootBytes []byte
|
||||
var err error
|
||||
if tufRoot != "" {
|
||||
|
@ -32,7 +32,7 @@ func setupSigstoreTUF(ctx context.Context, logger logr.Logger) {
|
|||
tufRootBytes = root
|
||||
}
|
||||
|
||||
logger.Info("Initializing TUF root")
|
||||
logger.V(2).Info("Initializing TUF root")
|
||||
if err := tuf.Initialize(ctx, tufMirror, tufRootBytes); err != nil {
|
||||
checkError(logger, err, fmt.Sprintf("Failed to initialize TUF client from %s : %v", tufRoot, err))
|
||||
}
|
||||
|
|
|
@ -171,10 +171,10 @@ func process(client dclient.Interface, kyvernoclient kyvernoclient.Interface, do
|
|||
select {
|
||||
case out <- executeRequest(client, kyvernoclient, req):
|
||||
case <-done:
|
||||
logger.Info("done")
|
||||
logger.V(4).Info("done")
|
||||
return
|
||||
case <-stopCh:
|
||||
logger.Info("shutting down")
|
||||
logger.V(4).Info("shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
@ -194,10 +194,10 @@ func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan err
|
|||
select {
|
||||
case out <- err:
|
||||
case <-done:
|
||||
logger.Info("done")
|
||||
logger.V(4).Info("done")
|
||||
return
|
||||
case <-stopCh:
|
||||
logger.Info("shutting down")
|
||||
logger.V(4).Info("shutting down")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -84,7 +84,7 @@ func showWarnings(ctx context.Context, logger logr.Logger) {
|
|||
logger = logger.WithName("warnings")
|
||||
// log if `forceFailurePolicyIgnore` flag has been set or not
|
||||
if toggle.FromContext(ctx).ForceFailurePolicyIgnore() {
|
||||
logger.Info("'ForceFailurePolicyIgnore' is enabled, all policies with policy failures will be set to Ignore")
|
||||
logger.V(2).Info("'ForceFailurePolicyIgnore' is enabled, all policies with policy failures will be set to Ignore")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -266,7 +266,7 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
setup.Logger.Info("background scan interval", "duration", backgroundScanInterval.String())
|
||||
setup.Logger.V(2).Info("background scan interval", "duration", backgroundScanInterval.String())
|
||||
// informer factories
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
|
||||
polexCache, polexController := internal.NewExceptionSelector(setup.Logger, kyvernoInformer)
|
||||
|
|
|
@ -80,7 +80,7 @@ func (o *canIOptions) RunAccessCheck(ctx context.Context) (bool, string, error)
|
|||
return false, "", err
|
||||
}
|
||||
if !result.Allowed {
|
||||
logger.Info("disallowed operation", "reason", result.Reason, "evaluationError", result.EvaluationError)
|
||||
logger.V(3).Info("disallowed operation", "reason", result.Reason, "evaluationError", result.EvaluationError)
|
||||
}
|
||||
return result.Allowed, result.Reason, nil
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func (c *GenerateController) deleteDownstream(policy kyvernov1.PolicyInterface,
|
|||
|
||||
func (c *GenerateController) handleNonPolicyChanges(policy kyvernov1.PolicyInterface, ruleContext kyvernov2.RuleContext, ur *kyvernov2.UpdateRequest) error {
|
||||
logger := c.log.V(4).WithValues("ur", ur.Name, "policy", ur.Spec.Policy, "rule", ruleContext.Rule)
|
||||
logger.Info("synchronize for none-policy changes")
|
||||
logger.Info("synchronize for non-policy changes")
|
||||
for _, rule := range policy.GetSpec().Rules {
|
||||
if ruleContext.Rule != rule.Name {
|
||||
continue
|
||||
|
|
|
@ -101,7 +101,7 @@ func NewGenerateController(
|
|||
func (c *GenerateController) ProcessUR(ur *kyvernov2.UpdateRequest) error {
|
||||
logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey())
|
||||
var genResources []kyvernov1.ResourceSpec
|
||||
logger.Info("start processing UR", "ur", ur.Name, "resourceVersion", ur.GetResourceVersion())
|
||||
logger.V(2).Info("start processing UR", "ur", ur.Name, "resourceVersion", ur.GetResourceVersion())
|
||||
|
||||
var failures []error
|
||||
policy, err := c.getPolicyObject(*ur)
|
||||
|
@ -121,7 +121,7 @@ func (c *GenerateController) ProcessUR(ur *kyvernov2.UpdateRequest) error {
|
|||
genResources, err = c.applyGenerate(*trigger, *ur, policy, i)
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), doesNotApply) {
|
||||
logger.V(4).Info(fmt.Sprintf("skipping rule %s: %v", rule.Rule, err.Error()))
|
||||
logger.V(3).Info(fmt.Sprintf("skipping rule %s: %v", rule.Rule, err.Error()))
|
||||
}
|
||||
|
||||
events := event.NewBackgroundFailedEvent(err, policy, ur.Spec.RuleContext[i].Rule, event.GeneratePolicyController,
|
||||
|
@ -332,7 +332,7 @@ func (c *GenerateController) ApplyGeneratePolicy(log logr.Logger, policyContext
|
|||
for _, s := range vars {
|
||||
for _, banned := range validationpolicy.ForbiddenUserVariables {
|
||||
if banned.Match([]byte(s[2])) {
|
||||
log.Info("warning: resources with admission request variables may not be regenerated", "policy", policy.GetName(), "rule", rule.Name, "variable", s[2])
|
||||
log.V(2).Info("warning: resources with admission request variables may not be regenerated", "policy", policy.GetName(), "rule", rule.Name, "variable", s[2])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -89,7 +89,7 @@ func (c *mutateExistingController) ProcessUR(ur *kyvernov2.UpdateRequest) error
|
|||
logger := c.log.WithValues("name", ur.GetName(), "policy", ur.Spec.GetPolicyKey(), "resource", ur.Spec.GetResource().String())
|
||||
var errs []error
|
||||
|
||||
logger.Info("processing mutate existing")
|
||||
logger.V(3).Info("processing mutate existing")
|
||||
policy, err := c.getPolicy(ur)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get policy")
|
||||
|
|
|
@ -114,8 +114,8 @@ func (c *controller) Run(ctx context.Context, workers int) {
|
|||
defer runtime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
logger.Info("starting")
|
||||
defer logger.Info("shutting down")
|
||||
logger.V(4).Info("starting")
|
||||
defer logger.V(4).Info("shutting down")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("background", ctx.Done(), c.informersSynced...) {
|
||||
return
|
||||
|
@ -243,7 +243,7 @@ func (c *controller) processUR(ur *kyvernov2.UpdateRequest) error {
|
|||
func (c *controller) reconcileURStatus(ur *kyvernov2.UpdateRequest) (kyvernov2.UpdateRequestState, error) {
|
||||
new, err := c.kyvernoClient.KyvernoV2().UpdateRequests(config.KyvernoNamespace()).Get(context.TODO(), ur.GetName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.V(2).Info("cannot fetch latest UR, fallback to the existing one", "reason", err.Error())
|
||||
logger.V(3).Info("cannot fetch latest UR, fallback to the existing one", "reason", err.Error())
|
||||
new = ur
|
||||
}
|
||||
|
||||
|
|
|
@ -379,16 +379,16 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
// load filters
|
||||
cd.filters = parseKinds(data[resourceFilters])
|
||||
cd.updateRequestThreshold = UpdateRequestThreshold
|
||||
logger.Info("filters configured", "filters", cd.filters)
|
||||
logger.V(4).Info("filters configured", "filters", cd.filters)
|
||||
// load defaultRegistry
|
||||
defaultRegistry, ok := data[defaultRegistry]
|
||||
if !ok {
|
||||
logger.Info("defaultRegistry not set")
|
||||
logger.V(2).Info("defaultRegistry not set")
|
||||
} else {
|
||||
logger := logger.WithValues("defaultRegistry", defaultRegistry)
|
||||
if valid.IsDNSName(defaultRegistry) {
|
||||
cd.defaultRegistry = defaultRegistry
|
||||
logger.Info("defaultRegistry configured")
|
||||
logger.V(2).Info("defaultRegistry configured")
|
||||
} else {
|
||||
logger.Error(errors.New("defaultRegistry is not a valid DNS hostname"), "failed to configure defaultRegistry")
|
||||
}
|
||||
|
@ -396,7 +396,7 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
// load enableDefaultRegistryMutation
|
||||
enableDefaultRegistryMutation, ok := data[enableDefaultRegistryMutation]
|
||||
if !ok {
|
||||
logger.Info("enableDefaultRegistryMutation not set")
|
||||
logger.V(2).Info("enableDefaultRegistryMutation not set")
|
||||
} else {
|
||||
logger := logger.WithValues("enableDefaultRegistryMutation", enableDefaultRegistryMutation)
|
||||
enableDefaultRegistryMutation, err := strconv.ParseBool(enableDefaultRegistryMutation)
|
||||
|
@ -404,45 +404,45 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "enableDefaultRegistryMutation is not a boolean")
|
||||
} else {
|
||||
cd.enableDefaultRegistryMutation = enableDefaultRegistryMutation
|
||||
logger.Info("enableDefaultRegistryMutation configured")
|
||||
logger.V(2).Info("enableDefaultRegistryMutation configured")
|
||||
}
|
||||
}
|
||||
// load excludeGroupRole
|
||||
excludedGroups, ok := data[excludeGroups]
|
||||
if !ok {
|
||||
logger.Info("excludeGroups not set")
|
||||
logger.V(2).Info("excludeGroups not set")
|
||||
} else {
|
||||
cd.exclusions.groups, cd.inclusions.groups = parseExclusions(excludedGroups)
|
||||
logger.Info("excludedGroups configured", "excludeGroups", cd.exclusions.groups, "includeGroups", cd.inclusions.groups)
|
||||
logger.V(2).Info("excludedGroups configured", "excludeGroups", cd.exclusions.groups, "includeGroups", cd.inclusions.groups)
|
||||
}
|
||||
// load excludeUsername
|
||||
excludedUsernames, ok := data[excludeUsernames]
|
||||
if !ok {
|
||||
logger.Info("excludeUsernames not set")
|
||||
logger.V(2).Info("excludeUsernames not set")
|
||||
} else {
|
||||
cd.exclusions.usernames, cd.inclusions.usernames = parseExclusions(excludedUsernames)
|
||||
logger.Info("excludedUsernames configured", "excludeUsernames", cd.exclusions.usernames, "includeUsernames", cd.inclusions.usernames)
|
||||
logger.V(2).Info("excludedUsernames configured", "excludeUsernames", cd.exclusions.usernames, "includeUsernames", cd.inclusions.usernames)
|
||||
}
|
||||
// load excludeRoles
|
||||
excludedRoles, ok := data[excludeRoles]
|
||||
if !ok {
|
||||
logger.Info("excludeRoles not set")
|
||||
logger.V(2).Info("excludeRoles not set")
|
||||
} else {
|
||||
cd.exclusions.roles, cd.inclusions.roles = parseExclusions(excludedRoles)
|
||||
logger.Info("excludedRoles configured", "excludeRoles", cd.exclusions.roles, "includeRoles", cd.inclusions.roles)
|
||||
logger.V(2).Info("excludedRoles configured", "excludeRoles", cd.exclusions.roles, "includeRoles", cd.inclusions.roles)
|
||||
}
|
||||
// load excludeClusterRoles
|
||||
excludedClusterRoles, ok := data[excludeClusterRoles]
|
||||
if !ok {
|
||||
logger.Info("excludeClusterRoles not set")
|
||||
logger.V(2).Info("excludeClusterRoles not set")
|
||||
} else {
|
||||
cd.exclusions.clusterroles, cd.inclusions.clusterroles = parseExclusions(excludedClusterRoles)
|
||||
logger.Info("excludedClusterRoles configured", "excludeClusterRoles", cd.exclusions.clusterroles, "includeClusterRoles", cd.inclusions.clusterroles)
|
||||
logger.V(2).Info("excludedClusterRoles configured", "excludeClusterRoles", cd.exclusions.clusterroles, "includeClusterRoles", cd.inclusions.clusterroles)
|
||||
}
|
||||
// load generateSuccessEvents
|
||||
generateSuccessEvents, ok := data[generateSuccessEvents]
|
||||
if !ok {
|
||||
logger.Info("generateSuccessEvents not set")
|
||||
logger.V(2).Info("generateSuccessEvents not set")
|
||||
} else {
|
||||
logger := logger.WithValues("generateSuccessEvents", generateSuccessEvents)
|
||||
generateSuccessEvents, err := strconv.ParseBool(generateSuccessEvents)
|
||||
|
@ -450,13 +450,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "generateSuccessEvents is not a boolean")
|
||||
} else {
|
||||
cd.generateSuccessEvents = generateSuccessEvents
|
||||
logger.Info("generateSuccessEvents configured")
|
||||
logger.V(2).Info("generateSuccessEvents configured")
|
||||
}
|
||||
}
|
||||
// load webhooks
|
||||
webhooks, ok := data[webhooks]
|
||||
if !ok {
|
||||
logger.Info("webhooks not set")
|
||||
logger.V(2).Info("webhooks not set")
|
||||
} else {
|
||||
logger := logger.WithValues("webhooks", webhooks)
|
||||
webhook, err := parseWebhooks(webhooks)
|
||||
|
@ -464,13 +464,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse webhooks")
|
||||
} else {
|
||||
cd.webhook = *webhook
|
||||
logger.Info("webhooks configured")
|
||||
logger.V(2).Info("webhooks configured")
|
||||
}
|
||||
}
|
||||
// load webhook annotations
|
||||
webhookAnnotations, ok := data[webhookAnnotations]
|
||||
if !ok {
|
||||
logger.Info("webhookAnnotations not set")
|
||||
logger.V(2).Info("webhookAnnotations not set")
|
||||
} else {
|
||||
logger := logger.WithValues("webhookAnnotations", webhookAnnotations)
|
||||
webhookAnnotations, err := parseWebhookAnnotations(webhookAnnotations)
|
||||
|
@ -478,13 +478,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse webhook annotations")
|
||||
} else {
|
||||
cd.webhookAnnotations = webhookAnnotations
|
||||
logger.Info("webhookAnnotations configured")
|
||||
logger.V(2).Info("webhookAnnotations configured")
|
||||
}
|
||||
}
|
||||
// load webhook annotations
|
||||
webhookLabels, ok := data[webhookLabels]
|
||||
if !ok {
|
||||
logger.Info("webhookLabels not set")
|
||||
logger.V(2).Info("webhookLabels not set")
|
||||
} else {
|
||||
logger := logger.WithValues("webhookLabels", webhookLabels)
|
||||
webhookLabels, err := parseWebhookLabels(webhookLabels)
|
||||
|
@ -492,13 +492,13 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse webhook labels")
|
||||
} else {
|
||||
cd.webhookLabels = webhookLabels
|
||||
logger.Info("webhookLabels configured")
|
||||
logger.V(2).Info("webhookLabels configured")
|
||||
}
|
||||
}
|
||||
// load match conditions
|
||||
matchConditions, ok := data[matchConditions]
|
||||
if !ok {
|
||||
logger.Info("matchConditions not set")
|
||||
logger.V(2).Info("matchConditions not set")
|
||||
} else {
|
||||
logger := logger.WithValues("matchConditions", matchConditions)
|
||||
matchConditions, err := parseMatchConditions(matchConditions)
|
||||
|
@ -506,12 +506,12 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse match conditions")
|
||||
} else {
|
||||
cd.matchConditions = matchConditions
|
||||
logger.Info("matchConditions configured")
|
||||
logger.V(2).Info("matchConditions configured")
|
||||
}
|
||||
}
|
||||
threshold, ok := data[updateRequestThreshold]
|
||||
if !ok {
|
||||
logger.Info("enableDefaultRegistryMutation not set")
|
||||
logger.V(2).Info("enableDefaultRegistryMutation not set")
|
||||
} else {
|
||||
logger := logger.WithValues("enableDefaultRegistryMutation", enableDefaultRegistryMutation)
|
||||
urThreshold, err := strconv.ParseInt(threshold, 10, 64)
|
||||
|
@ -519,7 +519,7 @@ func (cd *configuration) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "enableDefaultRegistryMutation is not a boolean")
|
||||
} else {
|
||||
cd.updateRequestThreshold = urThreshold
|
||||
logger.Info("enableDefaultRegistryMutation configured")
|
||||
logger.V(2).Info("enableDefaultRegistryMutation configured")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -537,7 +537,7 @@ func (cd *configuration) unload() {
|
|||
cd.webhook = WebhookConfig{}
|
||||
cd.webhookAnnotations = nil
|
||||
cd.webhookLabels = nil
|
||||
logger.Info("configuration unloaded")
|
||||
logger.V(2).Info("configuration unloaded")
|
||||
}
|
||||
|
||||
func (cd *configuration) notify() {
|
||||
|
|
|
@ -165,7 +165,7 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
// load metricsRefreshInterval
|
||||
metricsRefreshInterval, ok := data["metricsRefreshInterval"]
|
||||
if !ok {
|
||||
logger.Info("metricsRefreshInterval not set")
|
||||
logger.V(4).Info("metricsRefreshInterval not set")
|
||||
} else {
|
||||
logger := logger.WithValues("metricsRefreshInterval", metricsRefreshInterval)
|
||||
metricsRefreshInterval, err := time.ParseDuration(metricsRefreshInterval)
|
||||
|
@ -173,13 +173,13 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse metricsRefreshInterval")
|
||||
} else {
|
||||
cd.metricsRefreshInterval = metricsRefreshInterval
|
||||
logger.Info("metricsRefreshInterval configured")
|
||||
logger.V(4).Info("metricsRefreshInterval configured")
|
||||
}
|
||||
}
|
||||
// load namespaces
|
||||
namespaces, ok := data["namespaces"]
|
||||
if !ok {
|
||||
logger.Info("namespaces not set")
|
||||
logger.V(4).Info("namespaces not set")
|
||||
} else {
|
||||
logger := logger.WithValues("namespaces", namespaces)
|
||||
namespaces, err := parseIncludeExcludeNamespacesFromNamespacesConfig(namespaces)
|
||||
|
@ -187,13 +187,13 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse namespaces")
|
||||
} else {
|
||||
cd.namespaces = namespaces
|
||||
logger.Info("namespaces configured")
|
||||
logger.V(4).Info("namespaces configured")
|
||||
}
|
||||
}
|
||||
// load bucket boundaries
|
||||
bucketBoundariesString, ok := data["bucketBoundaries"]
|
||||
if !ok {
|
||||
logger.Info("bucketBoundaries not set")
|
||||
logger.V(4).Info("bucketBoundaries not set")
|
||||
} else {
|
||||
logger := logger.WithValues("bucketBoundaries", bucketBoundariesString)
|
||||
bucketBoundaries, err := parseBucketBoundariesConfig(bucketBoundariesString)
|
||||
|
@ -201,13 +201,13 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse bucketBoundariesString")
|
||||
} else {
|
||||
cd.bucketBoundaries = bucketBoundaries
|
||||
logger.Info("bucketBoundaries configured")
|
||||
logger.V(4).Info("bucketBoundaries configured")
|
||||
}
|
||||
}
|
||||
// load include resource details
|
||||
metricsExposureString, ok := data["metricsExposure"]
|
||||
if !ok {
|
||||
logger.Info("metricsExposure not set")
|
||||
logger.V(4).Info("metricsExposure not set")
|
||||
} else {
|
||||
logger := logger.WithValues("metricsExposure", metricsExposureString)
|
||||
metricsExposure, err := parseMetricExposureConfig(metricsExposureString, cd.bucketBoundaries)
|
||||
|
@ -215,7 +215,7 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
logger.Error(err, "failed to parse metricsExposure")
|
||||
} else {
|
||||
cd.metricsExposure = metricsExposure
|
||||
logger.Info("metricsExposure configured")
|
||||
logger.V(4).Info("metricsExposure configured")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func NewController(
|
|||
if obj.GetNamespace() != "" {
|
||||
logger = logger.WithValues("namespace", obj.GetNamespace())
|
||||
}
|
||||
logger.Info(operation)
|
||||
logger.V(2).Info(operation)
|
||||
if err := baseEnqueueFunc(obj); err != nil {
|
||||
logger.Error(err, "failed to enqueue object", "obj", obj)
|
||||
return err
|
||||
|
|
|
@ -42,7 +42,7 @@ func (c *controller) add(obj metav1.Object) {
|
|||
c.logger.Error(err, "failed to extract name", "object", obj)
|
||||
name = "unknown"
|
||||
}
|
||||
c.logger.Info("resource added", "name", name)
|
||||
c.logger.V(2).Info("resource added", "name", name)
|
||||
}
|
||||
|
||||
func (c *controller) update(old, obj metav1.Object) {
|
||||
|
@ -56,7 +56,7 @@ func (c *controller) update(old, obj metav1.Object) {
|
|||
c.logger.Error(err, "failed to extract name", "object", obj)
|
||||
name = "unknown"
|
||||
}
|
||||
c.logger.Info("resource updated", "name", name)
|
||||
c.logger.V(2).Info("resource updated", "name", name)
|
||||
}
|
||||
|
||||
func (c *controller) delete(obj metav1.Object) {
|
||||
|
@ -65,5 +65,5 @@ func (c *controller) delete(obj metav1.Object) {
|
|||
c.logger.Error(err, "failed to extract name", "object", obj)
|
||||
name = "unknown"
|
||||
}
|
||||
c.logger.Info("resource deleted", "name", name)
|
||||
c.logger.V(2).Info("resource deleted", "name", name)
|
||||
}
|
||||
|
|
|
@ -239,21 +239,21 @@ func (c *controller) reconcileWebhookDeletion(ctx context.Context) error {
|
|||
c.logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
|
||||
return err
|
||||
} else if err == nil {
|
||||
c.logger.Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
c.logger.V(4).Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
}
|
||||
|
||||
if err := c.postWebhookCleanup(ctx, c.logger); err != nil {
|
||||
c.logger.Error(err, "failed to clean up temporary rbac")
|
||||
return err
|
||||
} else {
|
||||
c.logger.Info("successfully deleted temporary rbac")
|
||||
c.logger.V(4).Info("successfully deleted temporary rbac")
|
||||
}
|
||||
} else {
|
||||
if err := c.webhookCleanupSetup(ctx, c.logger); err != nil {
|
||||
c.logger.Error(err, "failed to reconcile webhook cleanup setup")
|
||||
return err
|
||||
}
|
||||
c.logger.Info("reconciled webhook cleanup setup")
|
||||
c.logger.V(4).Info("reconciled webhook cleanup setup")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -79,7 +79,7 @@ func NewController(
|
|||
}
|
||||
|
||||
func (c *controller) addGTXEntry(obj *kyvernov2alpha1.GlobalContextEntry) {
|
||||
logger.Info("globalcontextentry created", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
|
||||
logger.V(4).Info("globalcontextentry created", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
|
||||
c.enqueueGCTXEntry(obj)
|
||||
}
|
||||
|
||||
|
@ -87,12 +87,12 @@ func (c *controller) updateGTXEntry(old, obj *kyvernov2alpha1.GlobalContextEntry
|
|||
if datautils.DeepEqual(old.Spec, obj.Spec) {
|
||||
return
|
||||
}
|
||||
logger.Info("globalcontextentry updated", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
|
||||
logger.V(4).Info("globalcontextentry updated", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
|
||||
c.enqueueGCTXEntry(obj)
|
||||
}
|
||||
|
||||
func (c *controller) deleteGTXEntry(obj *kyvernov2alpha1.GlobalContextEntry) {
|
||||
logger.Info("globalcontextentry deleted", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
|
||||
logger.V(4).Info("globalcontextentry deleted", "uid", obj.GetUID(), "kind", obj.Kind, "name", obj.GetName())
|
||||
c.enqueueGCTXEntry(obj)
|
||||
}
|
||||
|
||||
|
|
|
@ -66,8 +66,8 @@ func NewController(client dclient.Interface, pcache pcache.Cache, cpolInformer k
|
|||
}
|
||||
|
||||
func (c *controller) WarmUp() error {
|
||||
logger.Info("warming up ...")
|
||||
defer logger.Info("warm up done")
|
||||
logger.V(4).Info("warming up ...")
|
||||
defer logger.V(4).Info("warm up done")
|
||||
|
||||
pols, err := c.polLister.Policies(metav1.NamespaceAll).List(labels.Everything())
|
||||
if err != nil {
|
||||
|
|
|
@ -401,7 +401,7 @@ func (c *controller) frontReconcile(ctx context.Context, logger logr.Logger, _,
|
|||
if adopted, forbidden := c.adopt(ctx, reportMeta); adopted {
|
||||
return nil
|
||||
} else if forbidden {
|
||||
logger.Info("deleting because insufficient permission to fetch resource")
|
||||
logger.V(3).Info("deleting because insufficient permission to fetch resource")
|
||||
return c.deleteEphemeralReport(ctx, reportMeta.GetNamespace(), reportMeta.GetName())
|
||||
}
|
||||
// if not found and too old, forget about it
|
||||
|
|
|
@ -176,7 +176,7 @@ func NewController(
|
|||
}
|
||||
|
||||
func (c *controller) Run(ctx context.Context, workers int) {
|
||||
logger.Info("background scan", "interval", c.forceDelay.Abs().String())
|
||||
logger.V(2).Info("background scan", "interval", c.forceDelay.Abs().String())
|
||||
controllerutils.Run(ctx, logger, ControllerName, time.Second, c.queue, workers, maxRetries, c.reconcile)
|
||||
}
|
||||
|
||||
|
|
|
@ -196,9 +196,9 @@ func (c *controller) startWatcher(ctx context.Context, logger logr.Logger, gvr s
|
|||
c.notify(Added, uid, gvk, hashes[uid])
|
||||
}
|
||||
logger := logger.WithValues("resourceVersion", resourceVersion)
|
||||
logger.Info("start watcher ...")
|
||||
logger.V(2).Info("start watcher ...")
|
||||
watchFunc := func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
logger.Info("creating watcher...")
|
||||
logger.V(3).Info("creating watcher...")
|
||||
watch, err := c.client.GetDynamicInterface().Resource(gvr).Watch(context.Background(), options)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to watch")
|
||||
|
@ -216,7 +216,7 @@ func (c *controller) startWatcher(ctx context.Context, logger logr.Logger, gvr s
|
|||
hashes: hashes,
|
||||
}
|
||||
go func(gvr schema.GroupVersionResource) {
|
||||
defer logger.Info("watcher stopped")
|
||||
defer logger.V(2).Info("watcher stopped")
|
||||
for event := range watchInterface.ResultChan() {
|
||||
switch event.Type {
|
||||
case watch.Added:
|
||||
|
@ -317,12 +317,12 @@ func (c *controller) addGVKToGVRMapping(group, version, kind, subresource string
|
|||
if gvrs.SubResource == "" {
|
||||
gvk := schema.GroupVersionKind{Group: gvrs.Group, Version: gvrs.Version, Kind: kind}
|
||||
if !reportutils.IsGvkSupported(gvk) {
|
||||
logger.Info("kind is not supported", "gvk", gvk)
|
||||
logger.V(2).Info("kind is not supported", "gvk", gvk)
|
||||
} else {
|
||||
if slices.Contains(api.Verbs, "list") && slices.Contains(api.Verbs, "watch") {
|
||||
gvrMap[gvk] = gvrs.GroupVersionResource()
|
||||
} else {
|
||||
logger.Info("list/watch not supported for kind", "kind", kind)
|
||||
logger.V(2).Info("list/watch not supported for kind", "kind", kind)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -135,7 +135,7 @@ func determinePropagationPolicy(metaObj metav1.Object, logger logr.Logger) *meta
|
|||
case "":
|
||||
return nil
|
||||
default:
|
||||
logger.Info("Unknown propagationPolicy annotation, no global policy found", "policy", annotations[kyverno.AnnotationCleanupPropagationPolicy])
|
||||
logger.V(2).Info("Unknown propagationPolicy annotation, no global policy found", "policy", annotations[kyverno.AnnotationCleanupPropagationPolicy])
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -160,7 +160,7 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, itemKey
|
|||
}
|
||||
metaObj, err := meta.Accessor(obj)
|
||||
if err != nil {
|
||||
logger.Info("object is not of type metav1.Object")
|
||||
logger.V(2).Info("object is not of type metav1.Object")
|
||||
return err
|
||||
}
|
||||
commonLabels := []attribute.KeyValue{
|
||||
|
@ -197,7 +197,7 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, itemKey
|
|||
}
|
||||
return err
|
||||
}
|
||||
logger.Info("resource has been deleted")
|
||||
logger.V(2).Info("resource has been deleted")
|
||||
} else {
|
||||
if c.metrics.deletedObjectsTotal != nil {
|
||||
c.metrics.deletedObjectsTotal.Add(context.Background(), 1, metric.WithAttributes(commonLabels...))
|
||||
|
|
|
@ -127,8 +127,8 @@ func (m *manager) stop(ctx context.Context, gvr schema.GroupVersionResource) err
|
|||
if stopFunc, ok := m.resController[gvr]; ok {
|
||||
delete(m.resController, gvr)
|
||||
func() {
|
||||
defer logger.Info("controller stopped")
|
||||
logger.Info("stopping controller...")
|
||||
defer logger.V(2).Info("controller stopped")
|
||||
logger.V(2).Info("stopping controller...")
|
||||
stopFunc()
|
||||
}()
|
||||
}
|
||||
|
|
|
@ -115,7 +115,7 @@ func (c *controller) Run(ctx context.Context, workers int) {
|
|||
}
|
||||
|
||||
func (c *controller) addPolicy(obj kyvernov1.PolicyInterface) {
|
||||
logger.Info("policy created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
logger.V(2).Info("policy created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
c.enqueuePolicy(obj)
|
||||
}
|
||||
|
||||
|
@ -123,7 +123,7 @@ func (c *controller) updatePolicy(old, obj kyvernov1.PolicyInterface) {
|
|||
if datautils.DeepEqual(old.GetSpec(), obj.GetSpec()) {
|
||||
return
|
||||
}
|
||||
logger.Info("policy updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
logger.V(2).Info("policy updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
c.enqueuePolicy(obj)
|
||||
}
|
||||
|
||||
|
@ -138,7 +138,7 @@ func (c *controller) deletePolicy(obj kyvernov1.PolicyInterface) {
|
|||
return
|
||||
}
|
||||
|
||||
logger.Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "name", p.GetName())
|
||||
logger.V(2).Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "name", p.GetName())
|
||||
c.enqueuePolicy(obj)
|
||||
}
|
||||
|
||||
|
@ -152,7 +152,7 @@ func (c *controller) enqueuePolicy(obj kyvernov1.PolicyInterface) {
|
|||
}
|
||||
|
||||
func (c *controller) addException(obj *kyvernov2.PolicyException) {
|
||||
logger.Info("policy exception created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
logger.V(2).Info("policy exception created", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
c.enqueueException(obj)
|
||||
}
|
||||
|
||||
|
@ -160,14 +160,14 @@ func (c *controller) updateException(old, obj *kyvernov2.PolicyException) {
|
|||
if datautils.DeepEqual(old.Spec, obj.Spec) {
|
||||
return
|
||||
}
|
||||
logger.Info("policy exception updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
logger.V(2).Info("policy exception updated", "uid", obj.GetUID(), "kind", obj.GetKind(), "name", obj.GetName())
|
||||
c.enqueueException(obj)
|
||||
}
|
||||
|
||||
func (c *controller) deleteException(obj *kyvernov2.PolicyException) {
|
||||
polex := kubeutils.GetObjectWithTombstone(obj).(*kyvernov2.PolicyException)
|
||||
|
||||
logger.Info("policy exception deleted", "uid", polex.GetUID(), "kind", polex.GetKind(), "name", polex.GetName())
|
||||
logger.V(2).Info("policy exception deleted", "uid", polex.GetUID(), "kind", polex.GetKind(), "name", polex.GetName())
|
||||
c.enqueueException(obj)
|
||||
}
|
||||
|
||||
|
@ -305,14 +305,14 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam
|
|||
|
||||
// check if the controller has the required permissions to generate validating admission policies.
|
||||
if !admissionpolicy.HasValidatingAdmissionPolicyPermission(c.checker) {
|
||||
logger.Info("insufficient permissions to generate ValidatingAdmissionPolicies")
|
||||
logger.V(2).Info("insufficient permissions to generate ValidatingAdmissionPolicies")
|
||||
c.updateClusterPolicyStatus(ctx, *policy, false, "insufficient permissions to generate ValidatingAdmissionPolicies")
|
||||
return nil
|
||||
}
|
||||
|
||||
// check if the controller has the required permissions to generate validating admission policy bindings.
|
||||
if !admissionpolicy.HasValidatingAdmissionPolicyBindingPermission(c.checker) {
|
||||
logger.Info("insufficient permissions to generate ValidatingAdmissionPolicyBindings")
|
||||
logger.V(2).Info("insufficient permissions to generate ValidatingAdmissionPolicyBindings")
|
||||
c.updateClusterPolicyStatus(ctx, *policy, false, "insufficient permissions to generate ValidatingAdmissionPolicyBindings")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -433,7 +433,7 @@ func (c *controller) reconcileWebhookDeletion(ctx context.Context) error {
|
|||
logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
|
||||
return err
|
||||
} else if err == nil {
|
||||
logger.Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
logger.V(3).Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
}
|
||||
if err := c.mwcClient.DeleteCollection(ctx, metav1.DeleteOptions{}, metav1.ListOptions{
|
||||
LabelSelector: kyverno.LabelWebhookManagedBy,
|
||||
|
@ -441,21 +441,21 @@ func (c *controller) reconcileWebhookDeletion(ctx context.Context) error {
|
|||
logger.Error(err, "failed to clean up mutating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
|
||||
return err
|
||||
} else if err == nil {
|
||||
logger.Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
logger.V(3).Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
}
|
||||
|
||||
if err := c.postWebhookCleanup(ctx, logger); err != nil {
|
||||
logger.Error(err, "failed to clean up temporary rbac")
|
||||
return err
|
||||
} else {
|
||||
logger.Info("successfully deleted temporary rbac")
|
||||
logger.V(3).Info("successfully deleted temporary rbac")
|
||||
}
|
||||
} else {
|
||||
if err := c.webhookCleanupSetup(ctx, logger); err != nil {
|
||||
logger.Error(err, "failed to reconcile webhook cleanup setup")
|
||||
return err
|
||||
}
|
||||
logger.Info("reconciled webhook cleanup setup")
|
||||
logger.V(3).Info("reconciled webhook cleanup setup")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -106,7 +106,7 @@ func (a *executor) executeServiceCall(ctx context.Context, apiCall *kyvernov1.AP
|
|||
}
|
||||
}
|
||||
|
||||
a.logger.Info("executed service APICall", "name", a.name, "len", len(body))
|
||||
a.logger.V(4).Info("executed service APICall", "name", a.name, "len", len(body))
|
||||
return body, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -103,7 +103,7 @@ func (l *contextLoader) newLoader(
|
|||
ldr := loaders.NewConfigMapLoader(ctx, l.logger, entry, l.cmResolver, jsonContext)
|
||||
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
|
||||
} else {
|
||||
l.logger.Info("disabled loading of ConfigMap context entry", "name", entry.Name)
|
||||
l.logger.V(3).Info("disabled loading of ConfigMap context entry", "name", entry.Name)
|
||||
return nil, nil
|
||||
}
|
||||
} else if entry.APICall != nil {
|
||||
|
@ -111,7 +111,7 @@ func (l *contextLoader) newLoader(
|
|||
ldr := loaders.NewAPILoader(ctx, l.logger, entry, jsonContext, jp, client, l.apiCallConfig)
|
||||
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
|
||||
} else {
|
||||
l.logger.Info("disabled loading of APICall context entry", "name", entry.Name)
|
||||
l.logger.V(3).Info("disabled loading of APICall context entry", "name", entry.Name)
|
||||
return nil, nil
|
||||
}
|
||||
} else if entry.GlobalReference != nil {
|
||||
|
@ -119,7 +119,7 @@ func (l *contextLoader) newLoader(
|
|||
ldr := loaders.NewGCTXLoader(ctx, l.logger, entry, jsonContext, jp, gctx)
|
||||
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
|
||||
} else {
|
||||
l.logger.Info("disabled loading of GlobalContext context entry", "name", entry.Name)
|
||||
l.logger.V(3).Info("disabled loading of GlobalContext context entry", "name", entry.Name)
|
||||
return nil, nil
|
||||
}
|
||||
} else if entry.ImageRegistry != nil {
|
||||
|
@ -127,7 +127,7 @@ func (l *contextLoader) newLoader(
|
|||
ldr := loaders.NewImageDataLoader(ctx, l.logger, entry, jsonContext, jp, rclientFactory)
|
||||
return enginecontext.NewDeferredLoader(entry.Name, ldr, l.logger)
|
||||
} else {
|
||||
l.logger.Info("disabled loading of ImageRegistry context entry", "name", entry.Name)
|
||||
l.logger.V(3).Info("disabled loading of ImageRegistry context entry", "name", entry.Name)
|
||||
return nil, nil
|
||||
}
|
||||
} else if entry.Variable != nil {
|
||||
|
|
|
@ -45,7 +45,7 @@ func (f *forEachMutator) mutateForEach(ctx context.Context) *mutate.Response {
|
|||
if mutateResp.Status == engineapi.RuleStatusPass {
|
||||
f.resource.unstructured = mutateResp.PatchedResource
|
||||
}
|
||||
f.logger.Info("mutateResp.PatchedResource", "resource", mutateResp.PatchedResource)
|
||||
f.logger.V(3).Info("mutateResp.PatchedResource", "resource", mutateResp.PatchedResource)
|
||||
if err := f.policyContext.JSONContext().AddResource(mutateResp.PatchedResource.Object); err != nil {
|
||||
f.logger.Error(err, "failed to update resource in context")
|
||||
}
|
||||
|
@ -104,7 +104,7 @@ func (f *forEachMutator) mutateElements(ctx context.Context, foreach kyvernov1.F
|
|||
}
|
||||
|
||||
if !preconditionsPassed {
|
||||
f.logger.Info("mutate.foreach.preconditions not met", "elementIndex", index, "message", msg)
|
||||
f.logger.V(3).Info("mutate.foreach.preconditions not met", "elementIndex", index, "message", msg)
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -148,17 +148,17 @@ func (h validateManifestHandler) verifyManifest(
|
|||
// check if kyverno can 'create' dryrun resource
|
||||
ok, err := h.checkDryRunPermission(ctx, adreq.Kind.Kind, vo.DryRunNamespace)
|
||||
if err != nil {
|
||||
logger.V(1).Info("failed to check permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind, "error", err.Error())
|
||||
logger.V(2).Info("failed to check permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind, "error", err.Error())
|
||||
vo.DisableDryRun = true
|
||||
}
|
||||
if !ok {
|
||||
logger.V(1).Info("kyverno does not have permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind)
|
||||
logger.V(2).Info("kyverno does not have permissions to 'create' resource. disabled DryRun option.", "dryrun namespace", vo.DryRunNamespace, "kind", adreq.Kind.Kind)
|
||||
vo.DisableDryRun = true
|
||||
}
|
||||
// check if kyverno namespace is not used for dryrun
|
||||
ok = checkDryRunNamespace(vo.DryRunNamespace)
|
||||
if !ok {
|
||||
logger.V(1).Info("an inappropriate dryrun namespace is set; set a namespace other than kyverno.", "dryrun namespace", vo.DryRunNamespace)
|
||||
logger.V(2).Info("an inappropriate dryrun namespace is set; set a namespace other than kyverno.", "dryrun namespace", vo.DryRunNamespace)
|
||||
vo.DisableDryRun = true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -282,7 +282,7 @@ func (iv *ImageVerifier) verifyImage(
|
|||
}
|
||||
|
||||
if matchReferences(imageVerify.SkipImageReferences, image) {
|
||||
iv.logger.Info("skipping image reference", "image", image, "policy", iv.policyContext.Policy().GetName(), "ruleName", iv.rule.Name)
|
||||
iv.logger.V(3).Info("skipping image reference", "image", image, "policy", iv.policyContext.Policy().GetName(), "ruleName", iv.rule.Name)
|
||||
iv.ivm.Add(image, engineapi.ImageVerificationSkip)
|
||||
return engineapi.RuleSkip(iv.rule.Name, engineapi.ImageVerify, fmt.Sprintf("skipping image reference image %s, policy %s ruleName %s", image, iv.policyContext.Policy().GetName(), iv.rule.Name), iv.rule.ReportProperties).WithEmitWarning(true), ""
|
||||
}
|
||||
|
@ -538,7 +538,7 @@ func (iv *ImageVerifier) buildCosignVerifier(
|
|||
opts.Type = attestation.Type
|
||||
opts.IgnoreSCT = true // TODO: Add option to allow SCT when attestors are not provided
|
||||
if attestation.PredicateType != "" && attestation.Type == "" {
|
||||
iv.logger.Info("predicate type has been deprecated, please use type instead", "image", image)
|
||||
iv.logger.V(4).Info("predicate type has been deprecated, please use type instead", "image", image)
|
||||
opts.Type = attestation.PredicateType
|
||||
}
|
||||
opts.FetchAttestations = true
|
||||
|
@ -647,7 +647,7 @@ func (iv *ImageVerifier) buildNotaryVerifier(
|
|||
opts.Type = attestation.Type
|
||||
opts.PredicateType = attestation.PredicateType
|
||||
if attestation.PredicateType != "" && attestation.Type == "" {
|
||||
iv.logger.Info("predicate type has been deprecated, please use type instead", "image", image)
|
||||
iv.logger.V(2).Info("predicate type has been deprecated, please use type instead", "image", image)
|
||||
opts.Type = attestation.PredicateType
|
||||
}
|
||||
opts.FetchAttestations = true
|
||||
|
@ -673,11 +673,11 @@ func (iv *ImageVerifier) verifyAttestation(statements []map[string]interface{},
|
|||
iv.logger.V(4).Info("checking attestations", "predicates", types, "image", image)
|
||||
statements = statementsByPredicate[attestation.Type]
|
||||
if statements == nil {
|
||||
iv.logger.Info("no attestations found for predicate", "type", attestation.Type, "predicates", types, "image", imageInfo.String())
|
||||
iv.logger.V(2).Info("no attestations found for predicate", "type", attestation.Type, "predicates", types, "image", imageInfo.String())
|
||||
return fmt.Errorf("attestions not found for predicate type %s", attestation.Type)
|
||||
}
|
||||
for _, s := range statements {
|
||||
iv.logger.Info("checking attestation", "predicates", types, "image", imageInfo.String())
|
||||
iv.logger.V(3).Info("checking attestation", "predicates", types, "image", imageInfo.String())
|
||||
val, msg, err := iv.checkAttestations(attestation, s)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check attestations: %w", err)
|
||||
|
|
|
@ -101,8 +101,8 @@ func (gen *controller) Add(infos ...Info) {
|
|||
// Run begins generator
|
||||
func (gen *controller) Run(ctx context.Context, workers int) {
|
||||
logger := gen.logger
|
||||
logger.Info("start")
|
||||
defer logger.Info("terminated")
|
||||
logger.V(2).Info("start")
|
||||
defer logger.V(2).Info("terminated")
|
||||
defer utilruntime.HandleCrash()
|
||||
var waitGroup wait.Group
|
||||
for i := 0; i < workers; i++ {
|
||||
|
|
|
@ -80,23 +80,23 @@ func New(log logr.Logger, name, namespace string, kubeClient kubernetes.Interfac
|
|||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
atomic.StoreInt64(&e.isLeader, 1)
|
||||
e.log.Info("started leading")
|
||||
e.log.V(2).Info("started leading")
|
||||
if e.startWork != nil {
|
||||
e.startWork(ctx)
|
||||
}
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
atomic.StoreInt64(&e.isLeader, 0)
|
||||
e.log.Info("leadership lost, stopped leading")
|
||||
e.log.V(2).Info("leadership lost, stopped leading")
|
||||
if e.stopWork != nil {
|
||||
e.stopWork()
|
||||
}
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
if identity == e.lock.Identity() {
|
||||
e.log.Info("still leading")
|
||||
e.log.V(4).Info("still leading")
|
||||
} else {
|
||||
e.log.Info("another instance has been elected as leader", "leader", identity)
|
||||
e.log.V(2).Info("another instance has been elected as leader", "leader", identity)
|
||||
}
|
||||
},
|
||||
},
|
||||
|
|
|
@ -181,7 +181,7 @@ func (pc *policyController) canBackgroundProcess(p kyvernov1.PolicyInterface) bo
|
|||
func (pc *policyController) addPolicy(obj interface{}) {
|
||||
logger := pc.log
|
||||
p := castPolicy(obj)
|
||||
logger.Info("policy created", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName())
|
||||
logger.V(2).Info("policy created", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName())
|
||||
|
||||
if !pc.canBackgroundProcess(p) {
|
||||
return
|
||||
|
@ -230,7 +230,7 @@ func (pc *policyController) deletePolicy(obj interface{}) {
|
|||
return
|
||||
}
|
||||
|
||||
logger.Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName())
|
||||
logger.V(2).Info("policy deleted", "uid", p.GetUID(), "kind", p.GetKind(), "namespace", p.GetNamespace(), "name", p.GetName())
|
||||
err := pc.createURForDownstreamDeletion(p)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("failed to create UR on policy deletion, clean up downstream resource may be failed: %v", err))
|
||||
|
@ -254,8 +254,8 @@ func (pc *policyController) Run(ctx context.Context, workers int) {
|
|||
defer utilruntime.HandleCrash()
|
||||
defer pc.queue.ShutDown()
|
||||
|
||||
logger.Info("starting")
|
||||
defer logger.Info("shutting down")
|
||||
logger.V(2).Info("starting")
|
||||
defer logger.V(2).Info("shutting down")
|
||||
|
||||
if !cache.WaitForNamedCacheSync("PolicyController", ctx.Done(), pc.informersSynced...) {
|
||||
return
|
||||
|
@ -368,7 +368,7 @@ func (pc *policyController) forceReconciliation(ctx context.Context) {
|
|||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
logger.Info("reconciling generate and mutateExisting policies", "scan interval", pc.reconcilePeriod.String())
|
||||
logger.V(3).Info("reconciling generate and mutateExisting policies", "scan interval", pc.reconcilePeriod.String())
|
||||
pc.requeuePolicies()
|
||||
|
||||
case <-ctx.Done():
|
||||
|
|
|
@ -210,7 +210,7 @@ func (m *policyMap) get(key PolicyType, gvr schema.GroupVersionResource, subreso
|
|||
isNamespacedPolicy := ns != ""
|
||||
policy := m.policies[policyName]
|
||||
if policy == nil {
|
||||
logger.Info("nil policy in the cache, this should not happen")
|
||||
logger.V(4).Info("nil policy in the cache, this should not happen")
|
||||
}
|
||||
if !isNamespacedPolicy && namespace == "" {
|
||||
result = append(result, policy)
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
)
|
||||
|
||||
func Start(logger logr.Logger, address string) {
|
||||
logger.Info("Enable profiling, see details at https://github.com/kyverno/kyverno/wiki/Profiling-Kyverno-on-Kubernetes")
|
||||
logger.V(2).Info("Enable profiling, see details at https://github.com/kyverno/kyverno/wiki/Profiling-Kyverno-on-Kubernetes")
|
||||
go func() {
|
||||
s := http.Server{
|
||||
Addr: address,
|
||||
|
|
|
@ -55,8 +55,8 @@ func newControllerMetrics(logger logr.Logger, controllerName string) *controller
|
|||
}
|
||||
|
||||
func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName string, period time.Duration, queue workqueue.TypedRateLimitingInterface[T], n, maxRetries int, r reconcileFunc, routines ...func(context.Context, logr.Logger)) {
|
||||
logger.Info("starting ...")
|
||||
defer logger.Info("stopped")
|
||||
logger.V(2).Info("starting ...")
|
||||
defer logger.V(2).Info("stopped")
|
||||
var wg sync.WaitGroup
|
||||
defer wg.Wait()
|
||||
defer runtime.HandleCrash()
|
||||
|
@ -68,8 +68,8 @@ func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName s
|
|||
for i := 0; i < n; i++ {
|
||||
wg.Add(1)
|
||||
go func(logger logr.Logger) {
|
||||
logger.Info("starting worker")
|
||||
defer logger.Info("worker stopped")
|
||||
logger.V(4).Info("starting worker")
|
||||
defer logger.V(4).Info("worker stopped")
|
||||
defer wg.Done()
|
||||
wait.UntilWithContext(ctx, func(ctx context.Context) { worker(ctx, logger, metric, queue, maxRetries, r) }, period)
|
||||
}(logger.WithName("worker").WithValues("id", i))
|
||||
|
@ -77,15 +77,15 @@ func Run[T comparable](ctx context.Context, logger logr.Logger, controllerName s
|
|||
for i, routine := range routines {
|
||||
wg.Add(1)
|
||||
go func(logger logr.Logger, routine func(context.Context, logr.Logger)) {
|
||||
logger.Info("starting routine")
|
||||
defer logger.Info("routine stopped")
|
||||
logger.V(4).Info("starting routine")
|
||||
defer logger.V(4).Info("routine stopped")
|
||||
defer wg.Done()
|
||||
routine(ctx, logger)
|
||||
}(logger.WithName("routine").WithValues("id", i), routine)
|
||||
}
|
||||
<-ctx.Done()
|
||||
}()
|
||||
logger.Info("waiting for workers to terminate ...")
|
||||
logger.V(4).Info("waiting for workers to terminate ...")
|
||||
}
|
||||
|
||||
func worker[T comparable](ctx context.Context, logger logr.Logger, metric *controllerMetrics, queue workqueue.TypedRateLimitingInterface[T], maxRetries int, r reconcileFunc) {
|
||||
|
@ -112,7 +112,7 @@ func handleErr[T comparable](ctx context.Context, logger logr.Logger, metric *co
|
|||
logger.V(4).Info("Dropping request from the queue", "obj", obj, "error", err.Error())
|
||||
queue.Forget(obj)
|
||||
} else if queue.NumRequeues(obj) < maxRetries {
|
||||
logger.Info("Retrying request", "obj", obj, "error", err.Error())
|
||||
logger.V(3).Info("Retrying request", "obj", obj, "error", err.Error())
|
||||
queue.AddRateLimited(obj)
|
||||
if metric.requeueTotal != nil {
|
||||
metric.requeueTotal.Add(
|
||||
|
|
|
@ -68,7 +68,7 @@ func (c *runtime) IsRollingUpdate() bool {
|
|||
}
|
||||
nonTerminatedReplicas := deployment.Status.Replicas
|
||||
if nonTerminatedReplicas > replicas {
|
||||
c.logger.Info("detect Kyverno is in rolling update, won't trigger the update again")
|
||||
c.logger.V(2).Info("detect Kyverno is in rolling update, won't trigger the update again")
|
||||
return true
|
||||
}
|
||||
return false
|
||||
|
|
|
@ -46,5 +46,5 @@ func Hash() string {
|
|||
|
||||
// PrintVersionInfo displays the kyverno version - git version
|
||||
func PrintVersionInfo(log logr.Logger) {
|
||||
log.Info("version", "version", Version(), "hash", Hash(), "time", Time())
|
||||
log.V(2).Info("version", "version", Version(), "hash", Hash(), "time", Time())
|
||||
}
|
||||
|
|
|
@ -41,7 +41,7 @@ func dumpPayload(
|
|||
logger.Error(err, "Failed to extract resources")
|
||||
} else {
|
||||
logger = logger.WithValues("admission.response", response, "admission.request", reqPayload)
|
||||
logger.Info("admission request dump")
|
||||
logger.V(4).Info("admission request dump")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -158,7 +158,7 @@ func (h *resourceHandlers) Validate(ctx context.Context, logger logr.Logger, req
|
|||
}
|
||||
wg.Wait()
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
logger.V(4).Info("admission request denied")
|
||||
events := webhookutils.GenerateEvents(enforceResponses, true, h.configuration)
|
||||
h.eventGen.Add(events...)
|
||||
return admissionutils.Response(request.UID, errors.New(msg), warnings...)
|
||||
|
|
|
@ -133,7 +133,7 @@ func (v *mutationHandler) applyMutations(
|
|||
patches = append(patches, policyPatches...)
|
||||
rules := engineResponse.GetSuccessRules()
|
||||
if len(rules) != 0 {
|
||||
v.log.Info("mutation rules from policy applied successfully", "policy", policy.GetName(), "rules", rules)
|
||||
v.log.V(2).Info("mutation rules from policy applied successfully", "policy", policy.GetName(), "rules", rules)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -182,10 +182,10 @@ func (h *mutationHandler) applyMutation(ctx context.Context, request admissionv1
|
|||
|
||||
if !engineResponse.IsSuccessful() {
|
||||
if webhookutils.BlockRequest([]engineapi.EngineResponse{engineResponse}, failurePolicy, h.log) {
|
||||
h.log.Info("failed to apply policy, blocking request", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors())
|
||||
h.log.V(2).Info("failed to apply policy, blocking request", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors())
|
||||
return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy().GetName(), engineResponse.GetFailedRulesWithErrors())
|
||||
} else {
|
||||
h.log.Info("ignoring unsuccessful engine responses", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors())
|
||||
h.log.V(4).Info("ignoring unsuccessful engine responses", "policy", policyContext.Policy().GetName(), "rules", engineResponse.GetFailedRulesWithErrors())
|
||||
return &engineResponse, nil, nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -245,7 +245,7 @@ func (s *server) cleanup(ctx context.Context) {
|
|||
if err := s.leaseClient.Delete(ctx, name, metav1.DeleteOptions{}); err != nil && !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to clean up lease", "name", name)
|
||||
} else if err == nil {
|
||||
logger.Info("successfully deleted leases", "label", kyverno.LabelWebhookManagedBy)
|
||||
logger.V(2).Info("successfully deleted leases", "label", kyverno.LabelWebhookManagedBy)
|
||||
}
|
||||
}
|
||||
deleteVwc := func() {
|
||||
|
@ -254,7 +254,7 @@ func (s *server) cleanup(ctx context.Context) {
|
|||
}); err != nil && !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to clean up validating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
|
||||
} else if err == nil {
|
||||
logger.Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
logger.V(2).Info("successfully deleted validating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
}
|
||||
}
|
||||
deleteMwc := func() {
|
||||
|
@ -263,7 +263,7 @@ func (s *server) cleanup(ctx context.Context) {
|
|||
}); err != nil && !apierrors.IsNotFound(err) {
|
||||
logger.Error(err, "failed to clean up mutating webhook configuration", "label", kyverno.LabelWebhookManagedBy)
|
||||
} else if err == nil {
|
||||
logger.Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
logger.V(2).Info("successfully deleted mutating webhook configurations", "label", kyverno.LabelWebhookManagedBy)
|
||||
}
|
||||
}
|
||||
deleteLease("kyvernopre-lock")
|
||||
|
|
Loading…
Add table
Reference in a new issue