1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

fix: cert manager duplicate event handler (#3772)

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

Co-authored-by: Sambhav Kothari <sambhavs.email@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-05-03 09:37:18 +02:00 committed by GitHub
parent 810369d876
commit 873e394e5f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 74 additions and 94 deletions

View file

@ -91,23 +91,46 @@ func main() {
flag.Float64Var(&clientRateLimitQPS, "clientRateLimitQPS", 0, "Configure the maximum QPS to the Kubernetes API server from Kyverno. Uses the client default if zero.")
flag.IntVar(&clientRateLimitBurst, "clientRateLimitBurst", 0, "Configure the maximum burst for throttle. Uses the client default if zero.")
flag.Func(toggle.AutogenInternalsFlagName, toggle.AutogenInternalsDescription, toggle.AutogenInternalsFlag)
flag.DurationVar(&webhookRegistrationTimeout, "webhookRegistrationTimeout", 120*time.Second, "Timeout for webhook registration, e.g., 30s, 1m, 5m.")
if err := flag.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level")
os.Exit(1)
}
flag.Parse()
version.PrintVersionInfo(log.Log)
cleanUp := make(chan struct{})
stopCh := signal.SetupSignalHandler()
debug := serverIP != ""
// clients
clientConfig, err := config.CreateClientConfig(kubeconfig, clientRateLimitQPS, clientRateLimitBurst)
if err != nil {
setupLog.Error(err, "Failed to build kubeconfig")
os.Exit(1)
}
kyvernoClient, err := kyvernoclient.NewForConfig(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create client")
os.Exit(1)
}
dynamicClient, err := dclient.NewClient(clientConfig, 15*time.Minute, stopCh, log.Log)
if err != nil {
setupLog.Error(err, "Failed to create dynamic client")
os.Exit(1)
}
kubeClient, err := utils.NewKubeClient(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create kubernetes client")
os.Exit(1)
}
// sanity checks
if !utils.CRDsInstalled(dynamicClient.Discovery()) {
setupLog.Error(fmt.Errorf("CRDs not installed"), "Failed to access Kyverno CRDs")
os.Exit(1)
}
var metricsServerMux *http.ServeMux
var promConfig *metrics.PromConfig
@ -123,36 +146,10 @@ func main() {
}()
}
// KYVERNO CRD CLIENT
pclient, err := kyvernoclient.NewForConfig(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create client")
os.Exit(1)
}
// DYNAMIC CLIENT
// - client for all registered resources
client, err := dclient.NewClient(clientConfig, 15*time.Minute, stopCh, log.Log)
if err != nil {
setupLog.Error(err, "Failed to create client")
os.Exit(1)
}
// CRD CHECK
// - verify if Kyverno CRDs are available
if !utils.CRDsInstalled(client.Discovery()) {
setupLog.Error(fmt.Errorf("CRDs not installed"), "Failed to access Kyverno CRDs")
os.Exit(1)
}
kubeClient, err := utils.NewKubeClient(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create kubernetes client")
os.Exit(1)
}
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(kyvernoClient, policyControllerResyncPeriod)
// load image registry secrets
secrets := strings.Split(imagePullSecrets, ",")
@ -168,54 +165,49 @@ func main() {
cosign.ImageSignatureRepository = imageSignatureRepository
}
// KYVERNO CRD INFORMERS
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, policyControllerResyncPeriod)
// EVENT GENERATOR
// - generate event with retry mechanism
eventGenerator := event.NewEventGenerator(
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
dynamicClient,
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
log.Log.WithName("EventGenerator"))
// POLICY Report GENERATOR
reportReqGen := policyreport.NewReportChangeRequestGenerator(pclient,
client,
pInformer.Kyverno().V1alpha2().ReportChangeRequests(),
pInformer.Kyverno().V1alpha2().ClusterReportChangeRequests(),
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
reportReqGen := policyreport.NewReportChangeRequestGenerator(kyvernoClient,
dynamicClient,
kyvernoInformer.Kyverno().V1alpha2().ReportChangeRequests(),
kyvernoInformer.Kyverno().V1alpha2().ClusterReportChangeRequests(),
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
log.Log.WithName("ReportChangeRequestGenerator"),
)
prgen, err := policyreport.NewReportGenerator(
pclient,
client,
pInformer.Wgpolicyk8s().V1alpha2().ClusterPolicyReports(),
pInformer.Wgpolicyk8s().V1alpha2().PolicyReports(),
pInformer.Kyverno().V1alpha2().ReportChangeRequests(),
pInformer.Kyverno().V1alpha2().ClusterReportChangeRequests(),
kyvernoClient,
dynamicClient,
kyvernoInformer.Wgpolicyk8s().V1alpha2().ClusterPolicyReports(),
kyvernoInformer.Wgpolicyk8s().V1alpha2().PolicyReports(),
kyvernoInformer.Kyverno().V1alpha2().ReportChangeRequests(),
kyvernoInformer.Kyverno().V1alpha2().ClusterReportChangeRequests(),
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyReportGenerator"),
)
if err != nil {
setupLog.Error(err, "Failed to create policy report controller")
os.Exit(1)
}
debug := serverIP != ""
webhookCfg := webhookconfig.NewRegister(
clientConfig,
client,
dynamicClient,
kubeClient,
pclient,
kyvernoClient,
kubeInformer.Admissionregistration().V1().MutatingWebhookConfigurations(),
kubeInformer.Admissionregistration().V1().ValidatingWebhookConfigurations(),
kubeKyvernoInformer.Apps().V1().Deployments(),
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
serverIP,
int32(webhookTimeout),
debug,
@ -274,11 +266,11 @@ func main() {
// - status aggregator: receives stats when a policy is applied & updates the policy status
policyCtrl, err := policy.NewPolicyController(
kubeClient,
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
pInformer.Kyverno().V1beta1().UpdateRequests(),
kyvernoClient,
dynamicClient,
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
configData,
eventGenerator,
reportReqGen,
@ -294,18 +286,18 @@ func main() {
os.Exit(1)
}
urgen := webhookgenerate.NewGenerator(pclient,
pInformer.Kyverno().V1beta1().UpdateRequests(),
urgen := webhookgenerate.NewGenerator(kyvernoClient,
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
stopCh,
log.Log.WithName("UpdateRequestGenerator"))
urc, err := background.NewController(
kubeClient,
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
pInformer.Kyverno().V1beta1().UpdateRequests(),
kyvernoClient,
dynamicClient,
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
eventGenerator,
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("BackgroundController"),
@ -318,11 +310,11 @@ func main() {
grcc, err := generatecleanup.NewController(
kubeClient,
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
pInformer.Kyverno().V1beta1().UpdateRequests(),
kyvernoClient,
dynamicClient,
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("GenerateCleanUpController"),
)
@ -332,8 +324,8 @@ func main() {
}
pCacheController := policycache.NewPolicyCacheController(
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
log.Log.WithName("PolicyCacheController"),
)
@ -346,7 +338,7 @@ func main() {
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("ValidateAuditHandler"),
configData,
client,
dynamicClient,
promConfig,
)
@ -358,7 +350,6 @@ func main() {
log.Log.WithName("CertManager"),
stopCh,
)
if err != nil {
setupLog.Error(err, "failed to initialize CertManager")
os.Exit(1)
@ -367,7 +358,7 @@ func main() {
registerWrapperRetry := common.RetryFunc(time.Second, webhookRegistrationTimeout, webhookCfg.Register, "failed to register webhook", setupLog)
registerWebhookConfigurations := func() {
certManager.InitTLSPemPair()
pInformer.WaitForCacheSync(stopCh)
kyvernoInformer.WaitForCacheSync(stopCh)
kubeInformer.WaitForCacheSync(stopCh)
kubeKyvernoInformer.WaitForCacheSync(stopCh)
@ -376,7 +367,6 @@ func main() {
setupLog.Error(err, "invalid format of the Kyverno init ConfigMap, please correct the format of 'data.webhooks'")
os.Exit(1)
}
if autoUpdateWebhooks {
go webhookCfg.UpdateWebhookConfigurations(configData)
}
@ -407,7 +397,7 @@ func main() {
go webhookRegisterLeader.Run(ctx)
// the webhook server runs across all instances
openAPIController := startOpenAPIController(client, stopCh)
openAPIController := startOpenAPIController(dynamicClient, stopCh)
var tlsPair *ktls.PemPair
tlsPair, err = certManager.GetTLSPemPair()
@ -423,11 +413,11 @@ func main() {
// -- generate policy violation resource
// -- generate events on policy and resource
server, err := webhooks.NewWebhookServer(
pclient,
client,
kyvernoClient,
dynamicClient,
tlsPair,
pInformer.Kyverno().V1beta1().UpdateRequests(),
pInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kubeInformer.Rbac().V1().RoleBindings(),
kubeInformer.Rbac().V1().ClusterRoleBindings(),
kubeInformer.Rbac().V1().Roles(),
@ -484,10 +474,10 @@ func main() {
os.Exit(1)
}
pInformer.Start(stopCh)
kyvernoInformer.Start(stopCh)
kubeInformer.Start(stopCh)
kubeKyvernoInformer.Start(stopCh)
pInformer.WaitForCacheSync(stopCh)
kyvernoInformer.WaitForCacheSync(stopCh)
kubeInformer.WaitForCacheSync(stopCh)
kubeKyvernoInformer.WaitForCacheSync(stopCh)
@ -521,13 +511,10 @@ func startOpenAPIController(client dclient.Interface, stopCh <-chan struct{}) *o
setupLog.Error(err, "Failed to create openAPIController")
os.Exit(1)
}
// Sync openAPI definitions of resources
openAPISync := openapi.NewCRDSync(client, openAPIController)
// start openAPI controller, this is used in admission review
// thus is required in each instance
openAPISync.Run(1, stopCh)
return openAPIController
}

View file

@ -90,13 +90,11 @@ func CreateClientConfig(kubeconfig string, qps float64, burst int) (*rest.Config
if err != nil {
return nil, err
}
if qps > math.MaxFloat32 {
return nil, fmt.Errorf("client rate limit QPS must not be higher than %e", math.MaxFloat32)
}
clientConfig.Burst = burst
clientConfig.QPS = float32(qps)
return clientConfig, nil
}

View file

@ -116,11 +116,6 @@ func (m *certManager) GetTLSPemPair() (*tls.PemPair, error) {
}
func (m *certManager) Run(stopCh <-chan struct{}) {
m.secretInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: m.addSecretFunc,
UpdateFunc: m.updateSecretFunc,
})
m.log.Info("start managing certificate")
certsRenewalTicker := time.NewTicker(tls.CertRenewalInterval)
defer certsRenewalTicker.Stop()