1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 10:28:36 +00:00

fix: harden certs secrets management (#7634)

* fix: harden certs secrets management

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* rbac

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* informers

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

---------

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-06-22 14:14:53 +02:00 committed by GitHub
parent ffc6ca7408
commit e267a1dacf
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 167 additions and 33 deletions

View file

@ -8,6 +8,12 @@ metadata:
{{- include "kyverno.cleanup-controller.labels" . | nindent 4 }}
namespace: {{ template "kyverno.namespace" . }}
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- create
- apiGroups:
- ''
resources:
@ -16,8 +22,10 @@ rules:
- get
- list
- watch
- create
- update
resourceNames:
- {{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-ca
- {{ template "kyverno.cleanup-controller.name" . }}.{{ template "kyverno.namespace" . }}.svc.kyverno-tls-pair
- apiGroups:
- ''
resources:

View file

@ -17,6 +17,7 @@ import (
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
genericloggingcontroller "github.com/kyverno/kyverno/pkg/controllers/generic/logging"
genericwebhookcontroller "github.com/kyverno/kyverno/pkg/controllers/generic/webhook"
"github.com/kyverno/kyverno/pkg/informers"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/tls"
"github.com/kyverno/kyverno/pkg/webhooks"
@ -72,6 +73,13 @@ func main() {
// setup
ctx, setup, sdown := internal.Setup(appConfig, "kyverno-cleanup-controller", false)
defer sdown()
// certificates informers
caSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tls.GenerateRootCASecretName(), resyncPeriod)
tlsSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tls.GenerateTLSPairSecretName(), resyncPeriod)
if !informers.StartInformersAndWaitForCacheSync(ctx, setup.Logger, caSecret, tlsSecret) {
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
// setup leader election
le, err := leaderelection.New(
setup.Logger.WithName("leader-election"),
@ -85,7 +93,6 @@ func main() {
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
// controllers
renewer := tls.NewCertRenewer(
setup.KubeClient.CoreV1().Secrets(config.KyvernoNamespace()),
@ -97,7 +104,8 @@ func main() {
certController := internal.NewController(
certmanager.ControllerName,
certmanager.NewController(
kubeKyvernoInformer.Core().V1().Secrets(),
caSecret,
tlsSecret,
renewer,
),
certmanager.Workers,
@ -108,7 +116,7 @@ func main() {
webhookControllerName,
setup.KubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations(),
kubeInformer.Admissionregistration().V1().ValidatingWebhookConfigurations(),
kubeKyvernoInformer.Core().V1().Secrets(),
caSecret,
config.CleanupValidatingWebhookConfigurationName,
config.CleanupValidatingWebhookServicePath,
serverIP,
@ -145,7 +153,7 @@ func main() {
cleanup.Workers,
)
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(ctx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
if !internal.StartInformersAndWaitForCacheSync(ctx, logger, kyvernoInformer, kubeInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
@ -165,10 +173,8 @@ func main() {
}
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
// listers
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
cpolLister := kyvernoInformer.Kyverno().V2alpha1().ClusterCleanupPolicies().Lister()
polLister := kyvernoInformer.Kyverno().V2alpha1().CleanupPolicies().Lister()
nsLister := kubeInformer.Core().V1().Namespaces().Lister()
@ -186,7 +192,7 @@ func main() {
genericloggingcontroller.CheckGeneration,
)
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(ctx, setup.Logger, kubeKyvernoInformer, kubeInformer, kyvernoInformer) {
if !internal.StartInformersAndWaitForCacheSync(ctx, setup.Logger, kubeInformer, kyvernoInformer) {
os.Exit(1)
}
// create handlers
@ -195,7 +201,7 @@ func main() {
// create server
server := NewServer(
func() ([]byte, []byte, error) {
secret, err := secretLister.Get(tls.GenerateTLSPairSecretName())
secret, err := tlsSecret.Lister().Secrets(config.KyvernoNamespace()).Get(tls.GenerateTLSPairSecretName())
if err != nil {
return nil, nil, err
}

View file

@ -25,6 +25,7 @@ import (
webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/informers"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/openapi"
@ -43,6 +44,7 @@ import (
corev1 "k8s.io/api/core/v1"
apiserver "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
kubeinformers "k8s.io/client-go/informers"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
kyamlopenapi "sigs.k8s.io/kustomize/kyaml/openapi"
)
@ -105,6 +107,8 @@ func createrLeaderControllers(
kubeInformer kubeinformers.SharedInformerFactory,
kubeKyvernoInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
caInformer corev1informers.SecretInformer,
tlsInformer corev1informers.SecretInformer,
kubeClient kubernetes.Interface,
kyvernoClient versioned.Interface,
dynamicClient dclient.Interface,
@ -114,7 +118,8 @@ func createrLeaderControllers(
configuration config.Configuration,
) ([]internal.Controller, func(context.Context) error, error) {
certManager := certmanager.NewController(
kubeKyvernoInformer.Core().V1().Secrets(),
caInformer,
tlsInformer,
certRenewer,
)
webhookController := webhookcontroller.NewController(
@ -127,7 +132,7 @@ func createrLeaderControllers(
kubeInformer.Admissionregistration().V1().ValidatingWebhookConfigurations(),
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
kubeKyvernoInformer.Core().V1().Secrets(),
caInformer,
kubeKyvernoInformer.Coordination().V1().Leases(),
kubeInformer.Rbac().V1().ClusterRoles(),
serverIP,
@ -142,7 +147,7 @@ func createrLeaderControllers(
exceptionWebhookControllerName,
kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations(),
kubeInformer.Admissionregistration().V1().ValidatingWebhookConfigurations(),
kubeKyvernoInformer.Core().V1().Secrets(),
caInformer,
config.ExceptionValidatingWebhookConfigurationName,
config.ExceptionValidatingWebhookServicePath,
serverIP,
@ -221,6 +226,12 @@ func main() {
// setup
signalCtx, setup, sdown := internal.Setup(appConfig, "kyverno-admission-controller", false)
defer sdown()
caSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tls.GenerateRootCASecretName(), resyncPeriod)
tlsSecret := informers.NewSecretInformer(setup.KubeClient, config.KyvernoNamespace(), tls.GenerateTLSPairSecretName(), resyncPeriod)
if !informers.StartInformersAndWaitForCacheSync(signalCtx, setup.Logger, caSecret, tlsSecret) {
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
// show version
showWarnings(signalCtx, setup.Logger)
// THIS IS AN UGLY FIX
@ -235,7 +246,6 @@ func main() {
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
openApiManager, err := openapi.NewManager(setup.Logger.WithName("openapi"))
if err != nil {
setup.Logger.Error(err, "Failed to create openapi manager")
@ -339,7 +349,6 @@ func main() {
logger := setup.Logger.WithName("leader")
// create leader factories
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(setup.KyvernoClient, resyncPeriod)
// create leader controllers
leaderControllers, warmup, err := createrLeaderControllers(
@ -350,6 +359,8 @@ func main() {
kubeInformer,
kubeKyvernoInformer,
kyvernoInformer,
caSecret,
tlsSecret,
setup.KubeClient,
setup.KyvernoClient,
setup.KyvernoDynamicClient,
@ -443,7 +454,7 @@ func main() {
DumpPayload: dumpPayload,
},
func() ([]byte, []byte, error) {
secret, err := secretLister.Get(tls.GenerateTLSPairSecretName())
secret, err := tlsSecret.Lister().Secrets(config.KyvernoNamespace()).Get(tls.GenerateTLSPairSecretName())
if err != nil {
return nil, nil, err
}

View file

@ -38388,6 +38388,12 @@ metadata:
app.kubernetes.io/version: latest
namespace: kyverno
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- create
- apiGroups:
- ''
resources:
@ -38396,8 +38402,10 @@ rules:
- get
- list
- watch
- create
- update
resourceNames:
- kyverno-cleanup-controller.kyverno.svc.kyverno-tls-ca
- kyverno-cleanup-controller.kyverno.svc.kyverno-tls-pair
- apiGroups:
- ''
resources:

View file

@ -29,20 +29,28 @@ type controller struct {
renewer tls.CertRenewer
// listers
secretLister corev1listers.SecretLister
caLister corev1listers.SecretLister
tlsLister corev1listers.SecretLister
// queue
queue workqueue.RateLimitingInterface
secretEnqueue controllerutils.EnqueueFunc
queue workqueue.RateLimitingInterface
caEnqueue controllerutils.EnqueueFunc
tlsEnqueue controllerutils.EnqueueFunc
}
func NewController(secretInformer corev1informers.SecretInformer, certRenewer tls.CertRenewer) controllers.Controller {
func NewController(
caInformer corev1informers.SecretInformer,
tlsInformer corev1informers.SecretInformer,
certRenewer tls.CertRenewer,
) controllers.Controller {
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName)
c := controller{
renewer: certRenewer,
secretLister: secretInformer.Lister(),
queue: queue,
secretEnqueue: controllerutils.AddDefaultEventHandlers(logger, secretInformer.Informer(), queue),
renewer: certRenewer,
caLister: caInformer.Lister(),
tlsLister: tlsInformer.Lister(),
queue: queue,
caEnqueue: controllerutils.AddDefaultEventHandlers(logger, caInformer.Informer(), queue),
tlsEnqueue: controllerutils.AddDefaultEventHandlers(logger, tlsInformer.Informer(), queue),
}
return &c
}
@ -50,7 +58,7 @@ func NewController(secretInformer corev1informers.SecretInformer, certRenewer tl
func (c *controller) Run(ctx context.Context, workers int) {
// we need to enqueue our secrets in case they don't exist yet in the cluster
// this way we ensure the reconcile happens (hence renewal/creation)
if err := c.secretEnqueue(&corev1.Secret{
if err := c.tlsEnqueue(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: config.KyvernoNamespace(),
Name: tls.GenerateTLSPairSecretName(),
@ -58,7 +66,7 @@ func (c *controller) Run(ctx context.Context, workers int) {
}); err != nil {
logger.Error(err, "failed to enqueue secret", "name", tls.GenerateTLSPairSecretName())
}
if err := c.secretEnqueue(&corev1.Secret{
if err := c.caEnqueue(&corev1.Secret{
ObjectMeta: metav1.ObjectMeta{
Namespace: config.KyvernoNamespace(),
Name: tls.GenerateRootCASecretName(),
@ -85,15 +93,29 @@ func (c *controller) ticker(ctx context.Context, logger logr.Logger) {
for {
select {
case <-certsRenewalTicker.C:
list, err := c.secretLister.List(labels.Everything())
if err == nil {
for _, secret := range list {
if err := c.secretEnqueue(secret); err != nil {
logger.Error(err, "failed to enqueue secret", "name", secret.Name)
{
list, err := c.caLister.List(labels.Everything())
if err == nil {
for _, secret := range list {
if err := c.caEnqueue(secret); err != nil {
logger.Error(err, "failed to enqueue secret", "name", secret.Name)
}
}
} else {
logger.Error(err, "falied to list secrets")
}
}
{
list, err := c.tlsLister.List(labels.Everything())
if err == nil {
for _, secret := range list {
if err := c.tlsEnqueue(secret); err != nil {
logger.Error(err, "failed to enqueue secret", "name", secret.Name)
}
}
} else {
logger.Error(err, "falied to list secrets")
}
} else {
logger.Error(err, "falied to list secrets")
}
case <-ctx.Done():
return

33
pkg/informers/helpers.go Normal file
View file

@ -0,0 +1,33 @@
package informers
import (
"context"
"github.com/go-logr/logr"
"k8s.io/client-go/tools/cache"
)
type informer interface {
Informer() cache.SharedIndexInformer
}
func StartInformers(ctx context.Context, informers ...informer) {
for i := range informers {
go func(informer cache.SharedIndexInformer) {
informer.Run(ctx.Done())
}(informers[i].Informer())
}
}
func WaitForCacheSync(ctx context.Context, logger logr.Logger, informers ...informer) bool {
var cacheSyncs []cache.InformerSynced
for i := range informers {
cacheSyncs = append(cacheSyncs, informers[i].Informer().HasSynced)
}
return cache.WaitForCacheSync(ctx.Done(), cacheSyncs...)
}
func StartInformersAndWaitForCacheSync(ctx context.Context, logger logr.Logger, informers ...informer) bool {
StartInformers(ctx, informers...)
return WaitForCacheSync(ctx, logger, informers...)
}

46
pkg/informers/secrets.go Normal file
View file

@ -0,0 +1,46 @@
package informers
import (
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
corev1informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
)
type secretInformer struct {
informer cache.SharedIndexInformer
lister corev1listers.SecretLister
}
func NewSecretInformer(
client kubernetes.Interface,
namespace string,
name string,
resyncPeriod time.Duration,
) corev1informers.SecretInformer {
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
options := func(lo *metav1.ListOptions) {
lo.FieldSelector = fields.OneTermEqualSelector(metav1.ObjectNameField, name).String()
}
informer := corev1informers.NewFilteredSecretInformer(
client,
namespace,
resyncPeriod,
indexers,
options,
)
lister := corev1listers.NewSecretLister(informer.GetIndexer())
return &secretInformer{informer, lister}
}
func (i *secretInformer) Informer() cache.SharedIndexInformer {
return i.informer
}
func (i *secretInformer) Lister() corev1listers.SecretLister {
return i.lister
}