1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

feat: add options to configure resync period for informers in helm chart (#11420)

* feat: add options to configure resync period for informers in helm chart

Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com>

* fix: codegen

Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com>

---------

Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com>
This commit is contained in:
Vishal Choudhary 2024-10-17 12:53:17 +05:30 committed by GitHub
parent a5e082303d
commit e3b74f1384
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 64 additions and 42 deletions

View file

@ -374,6 +374,7 @@ The chart values are organised per component.
| admissionController.createSelfSignedCert | bool | `false` | Create self-signed certificates at deployment time. The certificates won't be automatically renewed if this is set to `true`. |
| admissionController.replicas | int | `nil` | Desired number of pods |
| admissionController.revisionHistoryLimit | int | `10` | The number of revisions to keep |
| admissionController.resyncPeriod | string | `"15m"` | Resync period for informers |
| admissionController.podLabels | object | `{}` | Additional labels to add to each pod |
| admissionController.podAnnotations | object | `{}` | Additional annotations to add to each pod |
| admissionController.annotations | object | `{}` | Deployment annotations. |
@ -477,6 +478,7 @@ The chart values are organised per component.
| backgroundController.imagePullSecrets | list | `[]` | Image pull secrets |
| backgroundController.replicas | int | `nil` | Desired number of pods |
| backgroundController.revisionHistoryLimit | int | `10` | The number of revisions to keep |
| backgroundController.resyncPeriod | string | `"15m"` | Resync period for informers |
| backgroundController.podLabels | object | `{}` | Additional labels to add to each pod |
| backgroundController.podAnnotations | object | `{}` | Additional annotations to add to each pod |
| backgroundController.annotations | object | `{}` | Deployment annotations. |
@ -551,6 +553,7 @@ The chart values are organised per component.
| cleanupController.imagePullSecrets | list | `[]` | Image pull secrets |
| cleanupController.replicas | int | `nil` | Desired number of pods |
| cleanupController.revisionHistoryLimit | int | `10` | The number of revisions to keep |
| cleanupController.resyncPeriod | string | `"15m"` | Resync period for informers |
| cleanupController.podLabels | object | `{}` | Additional labels to add to each pod |
| cleanupController.podAnnotations | object | `{}` | Additional annotations to add to each pod |
| cleanupController.annotations | object | `{}` | Deployment annotations. |
@ -633,6 +636,7 @@ The chart values are organised per component.
| reportsController.imagePullSecrets | list | `[]` | Image pull secrets |
| reportsController.replicas | int | `nil` | Desired number of pods |
| reportsController.revisionHistoryLimit | int | `10` | The number of revisions to keep |
| reportsController.resyncPeriod | string | `"15m"` | Resync period for informers |
| reportsController.podLabels | object | `{}` | Additional labels to add to each pod |
| reportsController.podAnnotations | object | `{}` | Additional annotations to add to each pod |
| reportsController.annotations | object | `{}` | Deployment annotations. |
@ -751,6 +755,7 @@ The chart values are organised per component.
|-----|------|---------|-------------|
| global.image.registry | string | `nil` | Global value that allows to set a single image registry across all deployments. When set, it will override any values set under `.image.registry` across the chart. |
| global.imagePullSecrets | list | `[]` | Global list of Image pull secrets When set, it will override any values set under `imagePullSecrets` under different components across the chart. |
| global.resyncPeriod | string | `"15m"` | Resync period for informers |
| global.caCertificates.data | string | `nil` | Global CA certificates to use with Kyverno deployments This value is expected to be one large string of CA certificates Individual controller values will override this global value |
| global.caCertificates.volume | object | `{}` | Global value to set single volume to be mounted for CA certificates for all deployments. Not used when `.Values.global.caCertificates.data` is defined Individual controller values will override this global value |
| global.extraEnvVars | list | `[]` | Additional container environment variables to apply to all containers and init containers |

View file

@ -148,6 +148,7 @@ spec:
- --reportsServiceAccountName=system:serviceaccount:{{ include "kyverno.namespace" . }}:{{ include "kyverno.reports-controller.serviceAccountName" . }}
- --servicePort={{ .Values.admissionController.service.port }}
- --webhookServerPort={{ .Values.admissionController.webhookServer.port }}
- --resyncPeriod={{ .Values.admissionController.resyncPeriod | default .Values.global.resyncPeriod }}
{{- if .Values.webhooksCleanup.autoDeleteWebhooks.enabled }}
- --autoDeleteWebhooks
{{- end }}

View file

@ -118,6 +118,7 @@ spec:
{{- if or .Values.imagePullSecrets .Values.existingImagePullSecrets }}
- --imagePullSecrets={{- join "," (concat (keys .Values.imagePullSecrets) .Values.existingImagePullSecrets) }}
{{- end }}
- --resyncPeriod={{ .Values.backgroundController.resyncPeriod | default .Values.global.resyncPeriod }}
{{- include "kyverno.features.flags" (pick (mergeOverwrite .Values.features .Values.backgroundController.featuresOverride)
"reporting"
"configMapCaching"

View file

@ -108,6 +108,7 @@ spec:
- --servicePort={{ .Values.cleanupController.service.port }}
- --cleanupServerPort={{ .Values.cleanupController.server.port }}
- --webhookServerPort={{ .Values.cleanupController.webhookServer.port }}
- --resyncPeriod={{ .Values.cleanupController.resyncPeriod | default .Values.global.resyncPeriod }}
{{- if .Values.webhooksCleanup.autoDeleteWebhooks.enabled }}
- --autoDeleteWebhooks
{{- end }}

View file

@ -118,6 +118,7 @@ spec:
{{- if or .Values.imagePullSecrets .Values.existingImagePullSecrets }}
- --imagePullSecrets={{- join "," (concat (keys .Values.imagePullSecrets) .Values.existingImagePullSecrets) }}
{{- end }}
- --resyncPeriod={{ .Values.reportsController.resyncPeriod | default .Values.global.resyncPeriod }}
{{- include "kyverno.features.flags" (pick (mergeOverwrite .Values.features .Values.reportsController.featuresOverride)
"reporting"
"admissionReports"

View file

@ -15,6 +15,9 @@ global:
# When set, it will override any values set under `imagePullSecrets` under different components across the chart.
imagePullSecrets: []
# -- Resync period for informers
resyncPeriod: 15m
caCertificates:
# -- Global CA certificates to use with Kyverno deployments
# This value is expected to be one large string of CA certificates
@ -778,6 +781,9 @@ admissionController:
# -- The number of revisions to keep
revisionHistoryLimit: 10
# -- Resync period for informers
resyncPeriod: 15m
# -- Additional labels to add to each pod
podLabels: {}
# example.com/label: foo
@ -1234,6 +1240,9 @@ backgroundController:
# -- The number of revisions to keep
revisionHistoryLimit: 10
# -- Resync period for informers
resyncPeriod: 15m
# -- Additional labels to add to each pod
podLabels: {}
# example.com/label: foo
@ -1497,6 +1506,9 @@ cleanupController:
# -- The number of revisions to keep
revisionHistoryLimit: 10
# -- Resync period for informers
resyncPeriod: 15m
# -- Additional labels to add to each pod
podLabels: {}
# example.com/label: foo
@ -1810,6 +1822,9 @@ reportsController:
# -- The number of revisions to keep
revisionHistoryLimit: 10
# -- Resync period for informers
resyncPeriod: 15m
# -- Additional labels to add to each pod
podLabels: {}
# example.com/label: foo

View file

@ -35,10 +35,6 @@ import (
kyamlopenapi "sigs.k8s.io/kustomize/kyaml/openapi"
)
const (
resyncPeriod = 15 * time.Minute
)
func sanityChecks(apiserverClient apiserver.Interface) error {
return kubeutils.CRDsInstalled(apiserverClient, "updaterequests.kyverno.io")
}
@ -158,7 +154,7 @@ func main() {
os.Exit(1)
}
// informer factories
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
polexCache, polexController := internal.NewExceptionSelector(setup.Logger, kyvernoInformer)
eventGenerator := event.NewEventGenerator(
setup.EventsClient,
@ -236,8 +232,8 @@ func main() {
func(ctx context.Context) {
logger := setup.Logger.WithName("leader")
// create leader factories
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, setup.ResyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
// create leader controllers
leaderControllers, err := createrLeaderControllers(
engine,

View file

@ -40,7 +40,6 @@ import (
)
const (
resyncPeriod = 15 * time.Minute
webhookWorkers = 2
policyWebhookControllerName = "policy-webhook-controller"
ttlWebhookControllerName = "ttl-webhook-controller"
@ -133,17 +132,17 @@ func main() {
os.Exit(1)
}
// certificates informers
caSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), caSecretName, resyncPeriod)
tlsSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tlsSecretName, resyncPeriod)
kyvernoDeployment := informers.NewDeploymentInformer(setup.KubeClient, config.KyvernoNamespace(), config.KyvernoDeploymentName(), resyncPeriod)
caSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), caSecretName, setup.ResyncPeriod)
tlsSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tlsSecretName, setup.ResyncPeriod)
kyvernoDeployment := informers.NewDeploymentInformer(setup.KubeClient, config.KyvernoNamespace(), config.KyvernoDeploymentName(), setup.ResyncPeriod)
if !informers.StartInformersAndWaitForCacheSync(ctx, setup.Logger, caSecret, tlsSecret, kyvernoDeployment) {
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
checker := checker.NewSelfChecker(setup.KubeClient.AuthorizationV1().SelfSubjectAccessReviews())
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, setup.ResyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
// listers
nsLister := kubeInformer.Core().V1().Namespaces().Lister()
// log policy changes
@ -204,10 +203,10 @@ func main() {
func(ctx context.Context) {
logger := setup.Logger.WithName("leader")
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, setup.ResyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
cmResolver := internal.NewConfigMapResolver(ctx, setup.Logger, setup.KubeClient, resyncPeriod)
cmResolver := internal.NewConfigMapResolver(ctx, setup.Logger, setup.KubeClient, setup.ResyncPeriod)
// controllers
renewer := tls.NewCertRenewer(
@ -344,6 +343,7 @@ func main() {
setup.KubeClient.Discovery(),
checker,
interval,
setup.ResyncPeriod,
),
ttlcontroller.Workers,
)

View file

@ -2,7 +2,6 @@ package internal
import (
"context"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
@ -11,10 +10,6 @@ import (
"k8s.io/client-go/kubernetes"
)
const (
resyncPeriod = 15 * time.Minute
)
func startConfigController(ctx context.Context, logger logr.Logger, client kubernetes.Interface, skipResourceFilters bool) config.Configuration {
configuration := config.NewDefaultConfiguration(skipResourceFilters)
configurationController := genericconfigmapcontroller.NewController(

View file

@ -41,7 +41,7 @@ func NewEngine(
exceptionsSelector engineapi.PolicyExceptionSelector,
gctxStore loaders.Store,
) engineapi.Engine {
configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, 15*time.Minute)
configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, resyncPeriod)
logger = logger.WithName("engine")
logger.Info("setup engine...")
return engine.NewEngine(

View file

@ -61,6 +61,8 @@ var (
enableGlobalContext bool
// reporting
enableReporting string
// resync
resyncPeriod time.Duration
)
func initLoggingFlags() {
@ -97,6 +99,7 @@ func initKubeconfigFlags(qps float64, burst int, eventsQPS float64, eventsBurst
flag.IntVar(&clientRateLimitBurst, "clientRateLimitBurst", burst, "Configure the maximum burst for throttle. Uses the client default if zero.")
flag.Float64Var(&eventsRateLimitQPS, "eventsRateLimitQPS", eventsQPS, "Configure the maximum QPS to the Kubernetes API server from Kyverno for events. Uses the client default if zero.")
flag.IntVar(&eventsRateLimitBurst, "eventsRateLimitBurst", eventsBurst, "Configure the maximum burst for throttle for events. Uses the client default if zero.")
flag.DurationVar(&resyncPeriod, "resyncPeriod", 15*time.Minute, "Configure the resync period for informer factory")
}
func initPolicyExceptionsFlags() {

View file

@ -50,6 +50,7 @@ type SetupResult struct {
KyvernoDynamicClient dclient.Interface
EventsClient eventsv1.EventsV1Interface
ReportingConfiguration reportutils.ReportingConfiguration
ResyncPeriod time.Duration
}
func Setup(config Configuration, name string, skipResourceFilters bool) (context.Context, SetupResult, context.CancelFunc) {
@ -97,7 +98,7 @@ func Setup(config Configuration, name string, skipResourceFilters bool) (context
}
var dClient dclient.Interface
if config.UsesKyvernoDynamicClient() {
dClient = createKyvernoDynamicClient(logger, ctx, dynamicClient, client, 15*time.Minute)
dClient = createKyvernoDynamicClient(logger, ctx, dynamicClient, client, resyncPeriod)
}
var eventsClient eventsv1.EventsV1Interface
if config.UsesEventsClient() {
@ -130,6 +131,7 @@ func Setup(config Configuration, name string, skipResourceFilters bool) (context
KyvernoDynamicClient: dClient,
EventsClient: eventsClient,
ReportingConfiguration: reportingConfig,
ResyncPeriod: resyncPeriod,
},
shutdown(logger.WithName("shutdown"), sdownMaxProcs, sdownMetrics, sdownTracing, sdownSignals)
}

View file

@ -58,7 +58,6 @@ import (
)
const (
resyncPeriod = 15 * time.Minute
exceptionWebhookControllerName = "exception-webhook-controller"
gctxWebhookControllerName = "global-context-webhook-controller"
webhookControllerFinalizerName = "kyverno.io/webhooks"
@ -346,9 +345,9 @@ func main() {
os.Exit(1)
}
}
caSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), caSecretName, resyncPeriod)
tlsSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tlsSecretName, resyncPeriod)
kyvernoDeployment := informers.NewDeploymentInformer(setup.KubeClient, config.KyvernoNamespace(), config.KyvernoDeploymentName(), resyncPeriod)
caSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), caSecretName, setup.ResyncPeriod)
tlsSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tlsSecretName, setup.ResyncPeriod)
kyvernoDeployment := informers.NewDeploymentInformer(setup.KubeClient, config.KyvernoNamespace(), config.KyvernoDeploymentName(), setup.ResyncPeriod)
if !informers.StartInformersAndWaitForCacheSync(signalCtx, setup.Logger, caSecret, tlsSecret, kyvernoDeployment) {
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
@ -364,9 +363,9 @@ func main() {
os.Exit(1)
}
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, setup.ResyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, setup.ResyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
certRenewer := tls.NewCertRenewer(
setup.KubeClient.CoreV1().Secrets(config.KyvernoNamespace()),
@ -480,8 +479,8 @@ func main() {
func(ctx context.Context) {
logger := setup.Logger.WithName("leader")
// create leader factories
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, setup.ResyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
// create leader controllers
leaderControllers, warmup, err := createrLeaderControllers(
generateValidatingAdmissionPolicy,

View file

@ -36,10 +36,6 @@ import (
kyamlopenapi "sigs.k8s.io/kustomize/kyaml/openapi"
)
const (
resyncPeriod = 15 * time.Minute
)
func sanityChecks(apiserverClient apiserver.Interface) error {
return kubeutils.CRDsInstalled(apiserverClient,
"clusterpolicyreports.wgpolicyk8s.io",
@ -273,7 +269,7 @@ func main() {
}
}
// informer factories
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
polexCache, polexController := internal.NewExceptionSelector(setup.Logger, kyvernoInformer)
eventGenerator := event.NewEventGenerator(
setup.EventsClient,
@ -347,10 +343,10 @@ func main() {
func(ctx context.Context) {
logger := setup.Logger.WithName("leader")
// create leader factories
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
metadataInformer := metadatainformers.NewSharedInformerFactory(setup.MetadataClient, 15*time.Minute)
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, setup.ResyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, setup.ResyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, setup.ResyncPeriod)
metadataInformer := metadatainformers.NewSharedInformerFactory(setup.MetadataClient, setup.ResyncPeriod)
// create leader controllers
leaderControllers, warmup, err := createrLeaderControllers(
engine,

View file

@ -51101,6 +51101,7 @@ spec:
- --reportsServiceAccountName=system:serviceaccount:kyverno:kyverno-reports-controller
- --servicePort=443
- --webhookServerPort=9443
- --resyncPeriod=15m
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000
@ -51266,6 +51267,7 @@ spec:
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000
- --resyncPeriod=15m
- --enableConfigMapCaching=true
- --enableDeferredLoading=true
- --maxAPICallResponseLength=2000000
@ -51372,6 +51374,7 @@ spec:
- --servicePort=443
- --cleanupServerPort=9443
- --webhookServerPort=9443
- --resyncPeriod=15m
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000
@ -51510,6 +51513,7 @@ spec:
- --disableMetrics=false
- --otelConfig=prometheus
- --metricsPort=8000
- --resyncPeriod=15m
- --admissionReports=true
- --aggregateReports=true
- --policyReports=true

View file

@ -41,6 +41,7 @@ type manager struct {
interval time.Duration
lock sync.Mutex
infoMetric metric.Int64ObservableGauge
resyncPeriod time.Duration
}
func NewManager(
@ -48,6 +49,7 @@ func NewManager(
discoveryInterface discovery.DiscoveryInterface,
checker checker.AuthChecker,
timeInterval time.Duration,
resyncPeriod time.Duration,
) controllers.Controller {
logger := logging.WithName(ControllerName)
meterProvider := otel.GetMeterProvider()
@ -67,6 +69,7 @@ func NewManager(
logger: logger,
interval: timeInterval,
infoMetric: infoMetric,
resyncPeriod: resyncPeriod,
}
if infoMetric != nil {
if _, err := meter.RegisterCallback(mgr.report, infoMetric); err != nil {
@ -143,7 +146,7 @@ func (m *manager) start(ctx context.Context, gvr schema.GroupVersionResource, wo
informer := metadatainformer.NewFilteredMetadataInformer(m.metadataClient,
gvr,
metav1.NamespaceAll,
10*time.Minute,
m.resyncPeriod,
indexers,
options,
)