mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 02:18:15 +00:00
refactor: configuration config map controller (#6829)
* refactor: configuration config map controller Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * rbac Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * clean Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * clean Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> --------- Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
parent
ba59d6391a
commit
3f51e99109
22 changed files with 430 additions and 360 deletions
|
@ -25,6 +25,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- {{ include "kyverno.config.configMapName" . }}
|
||||
- {{ include "kyverno.config.metricsConfigMapName" . }}
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
|
|
@ -16,6 +16,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- {{ include "kyverno.config.configMapName" . }}
|
||||
- {{ include "kyverno.config.metricsConfigMapName" . }}
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
|
|
@ -26,6 +26,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- {{ include "kyverno.config.configMapName" . }}
|
||||
- {{ include "kyverno.config.metricsConfigMapName" . }}
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
|
|
@ -16,6 +16,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- {{ include "kyverno.config.configMapName" . }}
|
||||
- {{ include "kyverno.config.metricsConfigMapName" . }}
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
|
||||
policymetricscontroller "github.com/kyverno/kyverno/pkg/controllers/metrics/policy"
|
||||
"github.com/kyverno/kyverno/pkg/cosign"
|
||||
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
|
||||
|
@ -108,20 +107,6 @@ func createrLeaderControllers(
|
|||
}, err
|
||||
}
|
||||
|
||||
func createNonLeaderControllers(
|
||||
configuration config.Configuration,
|
||||
kubeKyvernoInformer kubeinformers.SharedInformerFactory,
|
||||
) ([]internal.Controller, func() error) {
|
||||
configurationController := configcontroller.NewController(
|
||||
configuration,
|
||||
kubeKyvernoInformer.Core().V1().ConfigMaps(),
|
||||
)
|
||||
return []internal.Controller{
|
||||
internal.NewController(configcontroller.ControllerName, configurationController, configcontroller.Workers),
|
||||
},
|
||||
nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
genWorkers int
|
||||
|
@ -156,38 +141,33 @@ func main() {
|
|||
// setup signals
|
||||
// setup maxprocs
|
||||
// setup metrics
|
||||
signalCtx, logger, metricsConfig, sdown := internal.Setup("kyverno-background-controller")
|
||||
signalCtx, setup, sdown := internal.Setup("kyverno-background-controller", false)
|
||||
defer sdown()
|
||||
// logger := setup.Logger
|
||||
// create instrumented clients
|
||||
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
leaderElectionClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(logger, kyvernoclient.WithMetrics(metricsConfig, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(signalCtx, dynamicClient, kubeClient, 15*time.Minute)
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(signalCtx, dynamicClient, setup.KubeClient, 15*time.Minute)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create dynamic client")
|
||||
setup.Logger.Error(err, "failed to create dynamic client")
|
||||
os.Exit(1)
|
||||
}
|
||||
// THIS IS AN UGLY FIX
|
||||
// ELSE KYAML IS NOT THREAD SAFE
|
||||
kyamlopenapi.Schema()
|
||||
// informer factories
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
|
||||
// setup registry client
|
||||
rclient, err := setupRegistryClient(signalCtx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
|
||||
rclient, err := setupRegistryClient(signalCtx, setup.Logger, secretLister, imagePullSecrets, allowInsecureRegistry)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to setup registry client")
|
||||
setup.Logger.Error(err, "failed to setup registry client")
|
||||
os.Exit(1)
|
||||
}
|
||||
// setup cosign
|
||||
setupCosign(logger, imageSignatureRepository)
|
||||
configuration, err := config.NewConfiguration(kubeClient, false)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize configuration")
|
||||
os.Exit(1)
|
||||
}
|
||||
setupCosign(setup.Logger, imageSignatureRepository)
|
||||
eventGenerator := event.NewEventGenerator(
|
||||
dClient,
|
||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
||||
|
@ -198,52 +178,40 @@ func main() {
|
|||
// this controller only subscribe to events, nothing is returned...
|
||||
var wg sync.WaitGroup
|
||||
policymetricscontroller.NewController(
|
||||
metricsConfig,
|
||||
setup.MetricsManager,
|
||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
||||
kyvernoInformer.Kyverno().V1().Policies(),
|
||||
&wg,
|
||||
)
|
||||
engine := internal.NewEngine(
|
||||
signalCtx,
|
||||
logger,
|
||||
configuration,
|
||||
metricsConfig.Config(),
|
||||
setup.Logger,
|
||||
setup.Configuration,
|
||||
setup.MetricsConfiguration,
|
||||
dClient,
|
||||
rclient,
|
||||
kubeClient,
|
||||
setup.KubeClient,
|
||||
kyvernoClient,
|
||||
)
|
||||
// create non leader controllers
|
||||
nonLeaderControllers, nonLeaderBootstrap := createNonLeaderControllers(
|
||||
configuration,
|
||||
kubeKyvernoInformer,
|
||||
)
|
||||
// start informers and wait for cache sync
|
||||
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeKyvernoInformer) {
|
||||
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
if !internal.StartInformersAndWaitForCacheSync(signalCtx, setup.Logger, kyvernoInformer, kubeKyvernoInformer) {
|
||||
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
// bootstrap non leader controllers
|
||||
if nonLeaderBootstrap != nil {
|
||||
if err := nonLeaderBootstrap(); err != nil {
|
||||
logger.Error(err, "failed to bootstrap non leader controllers")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// start event generator
|
||||
go eventGenerator.Run(signalCtx, 3, &wg)
|
||||
// setup leader election
|
||||
le, err := leaderelection.New(
|
||||
logger.WithName("leader-election"),
|
||||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno-background-controller",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
func(ctx context.Context) {
|
||||
logger := logger.WithName("leader")
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// create leader factories
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
// create leader controllers
|
||||
leaderControllers, err := createrLeaderControllers(
|
||||
|
@ -254,8 +222,8 @@ func main() {
|
|||
kyvernoClient,
|
||||
dClient,
|
||||
rclient,
|
||||
configuration,
|
||||
metricsConfig,
|
||||
setup.Configuration,
|
||||
setup.MetricsManager,
|
||||
eventGenerator,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -278,13 +246,9 @@ func main() {
|
|||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize leader election")
|
||||
setup.Logger.Error(err, "failed to initialize leader election")
|
||||
os.Exit(1)
|
||||
}
|
||||
// start non leader controllers
|
||||
for _, controller := range nonLeaderControllers {
|
||||
controller.Run(signalCtx, logger.WithName("controllers"), &wg)
|
||||
}
|
||||
// start leader election
|
||||
le.Run(signalCtx)
|
||||
wg.Wait()
|
||||
|
|
|
@ -78,33 +78,30 @@ func main() {
|
|||
// setup signals
|
||||
// setup maxprocs
|
||||
// setup metrics
|
||||
ctx, logger, metricsConfig, sdown := internal.Setup("kyverno-cleanup-controller")
|
||||
ctx, setup, sdown := internal.Setup("kyverno-cleanup-controller", false)
|
||||
defer sdown()
|
||||
// configuration
|
||||
configuration := config.NewDefaultConfiguration(false)
|
||||
// create instrumented clients
|
||||
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
leaderElectionClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(logger, kyvernoclient.WithMetrics(metricsConfig, metrics.KubeClient), kyvernoclient.WithTracing())
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kyvernoclient.WithTracing())
|
||||
// setup leader election
|
||||
le, err := leaderelection.New(
|
||||
logger.WithName("leader-election"),
|
||||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno-cleanup-controller",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
func(ctx context.Context) {
|
||||
logger := logger.WithName("leader")
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// informer factories
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod)
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
// listers
|
||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
|
||||
// controllers
|
||||
renewer := tls.NewCertRenewer(
|
||||
kubeClient.CoreV1().Secrets(config.KyvernoNamespace()),
|
||||
setup.KubeClient.CoreV1().Secrets(config.KyvernoNamespace()),
|
||||
secretLister,
|
||||
tls.CertRenewalInterval,
|
||||
tls.CAValidityDuration,
|
||||
|
@ -123,7 +120,7 @@ func main() {
|
|||
webhookControllerName,
|
||||
genericwebhookcontroller.NewController(
|
||||
webhookControllerName,
|
||||
kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations(),
|
||||
setup.KubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations(),
|
||||
kubeInformer.Admissionregistration().V1().ValidatingWebhookConfigurations(),
|
||||
kubeKyvernoInformer.Core().V1().Secrets(),
|
||||
config.CleanupValidatingWebhookConfigurationName,
|
||||
|
@ -146,14 +143,14 @@ func main() {
|
|||
}},
|
||||
genericwebhookcontroller.Fail,
|
||||
genericwebhookcontroller.None,
|
||||
configuration,
|
||||
setup.Configuration,
|
||||
),
|
||||
webhookWorkers,
|
||||
)
|
||||
cleanupController := internal.NewController(
|
||||
cleanup.ControllerName,
|
||||
cleanup.NewController(
|
||||
kubeClient,
|
||||
setup.KubeClient,
|
||||
kyvernoInformer.Kyverno().V2alpha1().ClusterCleanupPolicies(),
|
||||
kyvernoInformer.Kyverno().V2alpha1().CleanupPolicies(),
|
||||
kubeInformer.Batch().V1().CronJobs(),
|
||||
|
@ -177,14 +174,14 @@ func main() {
|
|||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize leader election")
|
||||
setup.Logger.Error(err, "failed to initialize leader election")
|
||||
os.Exit(1)
|
||||
}
|
||||
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient := internal.CreateDClient(logger, ctx, dynamicClient, kubeClient, 15*time.Minute)
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient := internal.CreateDClient(setup.Logger, ctx, dynamicClient, setup.KubeClient, 15*time.Minute)
|
||||
// informer factories
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
// listers
|
||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
|
||||
|
@ -193,24 +190,24 @@ func main() {
|
|||
nsLister := kubeInformer.Core().V1().Namespaces().Lister()
|
||||
// log policy changes
|
||||
genericloggingcontroller.NewController(
|
||||
logger.WithName("cleanup-policy"),
|
||||
setup.Logger.WithName("cleanup-policy"),
|
||||
"CleanupPolicy",
|
||||
kyvernoInformer.Kyverno().V2alpha1().CleanupPolicies(),
|
||||
genericloggingcontroller.CheckGeneration,
|
||||
)
|
||||
genericloggingcontroller.NewController(
|
||||
logger.WithName("cluster-cleanup-policy"),
|
||||
setup.Logger.WithName("cluster-cleanup-policy"),
|
||||
"ClusterCleanupPolicy",
|
||||
kyvernoInformer.Kyverno().V2alpha1().ClusterCleanupPolicies(),
|
||||
genericloggingcontroller.CheckGeneration,
|
||||
)
|
||||
// start informers and wait for cache sync
|
||||
if !internal.StartInformersAndWaitForCacheSync(ctx, logger, kubeKyvernoInformer, kubeInformer, kyvernoInformer) {
|
||||
if !internal.StartInformersAndWaitForCacheSync(ctx, setup.Logger, kubeKyvernoInformer, kubeInformer, kyvernoInformer) {
|
||||
os.Exit(1)
|
||||
}
|
||||
// create handlers
|
||||
admissionHandlers := admissionhandlers.New(dClient)
|
||||
cleanupHandlers := cleanuphandlers.New(dClient, cpolLister, polLister, nsLister, logger.WithName("cleanup-handler"))
|
||||
cleanupHandlers := cleanuphandlers.New(dClient, cpolLister, polLister, nsLister, setup.Logger.WithName("cleanup-handler"))
|
||||
// create server
|
||||
server := NewServer(
|
||||
func() ([]byte, []byte, error) {
|
||||
|
@ -222,12 +219,12 @@ func main() {
|
|||
},
|
||||
admissionHandlers.Validate,
|
||||
cleanupHandlers.Cleanup,
|
||||
metricsConfig,
|
||||
setup.MetricsManager,
|
||||
webhooks.DebugModeOptions{
|
||||
DumpPayload: dumpPayload,
|
||||
},
|
||||
probes{},
|
||||
configuration,
|
||||
setup.Configuration,
|
||||
)
|
||||
// start server
|
||||
server.Run(ctx.Done())
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
aggregator "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset"
|
||||
)
|
||||
|
||||
func CreateClientConfig(logger logr.Logger) *rest.Config {
|
||||
func createClientConfig(logger logr.Logger) *rest.Config {
|
||||
clientConfig, err := config.CreateClientConfig(kubeconfig, clientRateLimitQPS, clientRateLimitBurst)
|
||||
checkError(logger, err, "failed to create rest client configuration")
|
||||
clientConfig.Wrap(
|
||||
|
@ -39,7 +39,7 @@ func CreateClientConfig(logger logr.Logger) *rest.Config {
|
|||
func CreateKubernetesClient(logger logr.Logger, opts ...kube.NewOption) kubernetes.Interface {
|
||||
logger = logger.WithName("kube-client")
|
||||
logger.Info("create kube client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := kube.NewForConfig(CreateClientConfig(logger), opts...)
|
||||
client, err := kube.NewForConfig(createClientConfig(logger), opts...)
|
||||
checkError(logger, err, "failed to create kubernetes client")
|
||||
return client
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func CreateKubernetesClient(logger logr.Logger, opts ...kube.NewOption) kubernet
|
|||
func CreateKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versioned.Interface {
|
||||
logger = logger.WithName("kyverno-client")
|
||||
logger.Info("create kyverno client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := kyverno.NewForConfig(CreateClientConfig(logger), opts...)
|
||||
client, err := kyverno.NewForConfig(createClientConfig(logger), opts...)
|
||||
checkError(logger, err, "failed to create kyverno client")
|
||||
return client
|
||||
}
|
||||
|
@ -55,7 +55,7 @@ func CreateKyvernoClient(logger logr.Logger, opts ...kyverno.NewOption) versione
|
|||
func CreateDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Interface {
|
||||
logger = logger.WithName("dynamic-client")
|
||||
logger.Info("create dynamic client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := dyn.NewForConfig(CreateClientConfig(logger), opts...)
|
||||
client, err := dyn.NewForConfig(createClientConfig(logger), opts...)
|
||||
checkError(logger, err, "failed to create dynamic client")
|
||||
return client
|
||||
}
|
||||
|
@ -63,7 +63,7 @@ func CreateDynamicClient(logger logr.Logger, opts ...dyn.NewOption) dynamic.Inte
|
|||
func CreateMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.Interface {
|
||||
logger = logger.WithName("metadata-client")
|
||||
logger.Info("create metadata client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := meta.NewForConfig(CreateClientConfig(logger), opts...)
|
||||
client, err := meta.NewForConfig(createClientConfig(logger), opts...)
|
||||
checkError(logger, err, "failed to create metadata client")
|
||||
return client
|
||||
}
|
||||
|
@ -71,7 +71,7 @@ func CreateMetadataClient(logger logr.Logger, opts ...meta.NewOption) metadata.I
|
|||
func CreateApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserver.Interface {
|
||||
logger = logger.WithName("apiserver-client")
|
||||
logger.Info("create apiserver client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := apisrv.NewForConfig(CreateClientConfig(logger), opts...)
|
||||
client, err := apisrv.NewForConfig(createClientConfig(logger), opts...)
|
||||
checkError(logger, err, "failed to create apiserver client")
|
||||
return client
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func CreateApiServerClient(logger logr.Logger, opts ...apisrv.NewOption) apiserv
|
|||
func CreateAggregatorClient(logger logr.Logger, opts ...agg.NewOption) aggregator.Interface {
|
||||
logger = logger.WithName("aggregator-client")
|
||||
logger.Info("create aggregator client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := agg.NewForConfig(CreateClientConfig(logger), opts...)
|
||||
client, err := agg.NewForConfig(createClientConfig(logger), opts...)
|
||||
checkError(logger, err, "failed to create aggregator client")
|
||||
return client
|
||||
}
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package internal
|
||||
|
||||
import "flag"
|
||||
import (
|
||||
"flag"
|
||||
)
|
||||
|
||||
type Configuration interface {
|
||||
UsesMetrics() bool
|
||||
|
|
52
cmd/internal/configmap.go
Normal file
52
cmd/internal/configmap.go
Normal file
|
@ -0,0 +1,52 @@
|
|||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
genericconfigmapcontroller "github.com/kyverno/kyverno/pkg/controllers/generic/configmap"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
resyncPeriod = 15 * time.Minute
|
||||
)
|
||||
|
||||
func startConfigController(ctx context.Context, logger logr.Logger, client kubernetes.Interface, skipResourceFilters bool) config.Configuration {
|
||||
configuration := config.NewDefaultConfiguration(skipResourceFilters)
|
||||
configurationController := genericconfigmapcontroller.NewController(
|
||||
"config-controller",
|
||||
client,
|
||||
resyncPeriod,
|
||||
config.KyvernoNamespace(),
|
||||
config.KyvernoConfigMapName(),
|
||||
func(ctx context.Context, cm *corev1.ConfigMap) error {
|
||||
configuration.Load(cm)
|
||||
return nil
|
||||
},
|
||||
)
|
||||
checkError(logger, configurationController.WarmUp(ctx), "failed to init config controller")
|
||||
go configurationController.Run(ctx, 1)
|
||||
return configuration
|
||||
}
|
||||
|
||||
func startMetricsConfigController(ctx context.Context, logger logr.Logger, client kubernetes.Interface) config.MetricsConfiguration {
|
||||
configuration := config.NewDefaultMetricsConfiguration()
|
||||
configurationController := genericconfigmapcontroller.NewController(
|
||||
"metrics-config-controller",
|
||||
client,
|
||||
resyncPeriod,
|
||||
config.KyvernoNamespace(),
|
||||
config.KyvernoMetricsConfigMapName(),
|
||||
func(ctx context.Context, cm *corev1.ConfigMap) error {
|
||||
configuration.Load(cm)
|
||||
return nil
|
||||
},
|
||||
)
|
||||
checkError(logger, configurationController.WarmUp(ctx), "failed to init metrics config controller")
|
||||
go configurationController.Run(ctx, 1)
|
||||
return configuration
|
||||
}
|
|
@ -29,6 +29,8 @@ func NewEngine(
|
|||
) engineapi.Engine {
|
||||
configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, 15*time.Minute)
|
||||
exceptionsSelector := NewExceptionSelector(ctx, logger, kyvernoClient, 15*time.Minute)
|
||||
logger = logger.WithName("engine")
|
||||
logger.Info("setup engine...")
|
||||
return engine.NewEngine(
|
||||
configuration,
|
||||
metricsConfiguration,
|
||||
|
@ -46,6 +48,7 @@ func NewExceptionSelector(
|
|||
resyncPeriod time.Duration,
|
||||
) engineapi.PolicyExceptionSelector {
|
||||
logger = logger.WithName("exception-selector").WithValues("enablePolicyException", enablePolicyException, "exceptionNamespace", exceptionNamespace)
|
||||
logger.Info("setup exception selector...")
|
||||
var exceptionsLister engineapi.PolicyExceptionSelector
|
||||
if enablePolicyException {
|
||||
factory := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
|
@ -70,6 +73,7 @@ func NewConfigMapResolver(
|
|||
resyncPeriod time.Duration,
|
||||
) engineapi.ConfigmapResolver {
|
||||
logger = logger.WithName("configmap-resolver").WithValues("enableConfigMapCaching", enableConfigMapCaching)
|
||||
logger.Info("setup config map resolver...")
|
||||
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
|
||||
checkError(logger, err, "failed to create client based resolver")
|
||||
if !enableConfigMapCaching {
|
||||
|
|
|
@ -13,17 +13,9 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func GetMetricsConfiguration(logger logr.Logger, client kubernetes.Interface) config.MetricsConfiguration {
|
||||
logger.Info("load metrics configuration...")
|
||||
metricsConfiguration, err := config.NewMetricsConfiguration(client)
|
||||
checkError(logger, err, "failed to load metrics configuration")
|
||||
return metricsConfiguration
|
||||
}
|
||||
|
||||
func SetupMetrics(ctx context.Context, logger logr.Logger, kubeClient kubernetes.Interface) (metrics.MetricsConfigManager, context.CancelFunc) {
|
||||
func SetupMetrics(ctx context.Context, logger logr.Logger, metricsConfiguration config.MetricsConfiguration, kubeClient kubernetes.Interface) (metrics.MetricsConfigManager, context.CancelFunc) {
|
||||
logger = logger.WithName("metrics")
|
||||
logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
|
||||
metricsConfiguration := GetMetricsConfiguration(logger, kubeClient)
|
||||
metricsAddr := ":" + metricsPort
|
||||
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
|
||||
ctx,
|
||||
|
|
|
@ -4,7 +4,10 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
func shutdown(logger logr.Logger, sdowns ...context.CancelFunc) context.CancelFunc {
|
||||
|
@ -18,14 +21,33 @@ func shutdown(logger logr.Logger, sdowns ...context.CancelFunc) context.CancelFu
|
|||
}
|
||||
}
|
||||
|
||||
func Setup(name string) (context.Context, logr.Logger, metrics.MetricsConfigManager, context.CancelFunc) {
|
||||
type SetupResult struct {
|
||||
Logger logr.Logger
|
||||
Configuration config.Configuration
|
||||
MetricsConfiguration config.MetricsConfiguration
|
||||
MetricsManager metrics.MetricsConfigManager
|
||||
KubeClient kubernetes.Interface
|
||||
}
|
||||
|
||||
func Setup(name string, skipResourceFilters bool) (context.Context, SetupResult, context.CancelFunc) {
|
||||
logger := SetupLogger()
|
||||
ShowVersion(logger)
|
||||
sdownMaxProcs := SetupMaxProcs(logger)
|
||||
SetupProfiling(logger)
|
||||
client := CreateKubernetesClient(logger)
|
||||
ctx, sdownSignals := SetupSignals(logger)
|
||||
metricsManager, sdownMetrics := SetupMetrics(ctx, logger, client)
|
||||
client := kubeclient.From(CreateKubernetesClient(logger), kubeclient.WithTracing())
|
||||
metricsConfiguration := startMetricsConfigController(ctx, logger, client)
|
||||
metricsManager, sdownMetrics := SetupMetrics(ctx, logger, metricsConfiguration, client)
|
||||
client = client.WithMetrics(metricsManager, metrics.KubeClient)
|
||||
configuration := startConfigController(ctx, logger, client, skipResourceFilters)
|
||||
sdownTracing := SetupTracing(logger, name, client)
|
||||
return ctx, logger, metricsManager, shutdown(logger.WithName("shutdown"), sdownMaxProcs, sdownMetrics, sdownTracing, sdownSignals)
|
||||
return ctx,
|
||||
SetupResult{
|
||||
Logger: logger,
|
||||
Configuration: configuration,
|
||||
MetricsConfiguration: metricsConfiguration,
|
||||
MetricsManager: metricsManager,
|
||||
KubeClient: client,
|
||||
},
|
||||
shutdown(logger.WithName("shutdown"), sdownMaxProcs, sdownMetrics, sdownTracing, sdownSignals)
|
||||
}
|
||||
|
|
|
@ -40,15 +40,14 @@ func main() {
|
|||
// start profiling
|
||||
// setup signals
|
||||
// setup maxprocs
|
||||
ctx, logger, _, sdown := internal.Setup("kyverno-init-controller")
|
||||
ctx, setup, sdown := internal.Setup("kyverno-init-controller", false)
|
||||
defer sdown()
|
||||
// create clients
|
||||
kubeClient := internal.CreateKubernetesClient(logger)
|
||||
dynamicClient := internal.CreateDynamicClient(logger)
|
||||
kyvernoClient := internal.CreateKyvernoClient(logger)
|
||||
client := internal.CreateDClient(logger, ctx, dynamicClient, kubeClient, 15*time.Minute)
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger)
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger)
|
||||
client := internal.CreateDClient(setup.Logger, ctx, dynamicClient, setup.KubeClient, 15*time.Minute)
|
||||
// Exit for unsupported version of kubernetes cluster
|
||||
if !kubeutils.HigherThanKubernetesVersion(kubeClient.Discovery(), logging.GlobalLogger(), 1, 16, 0) {
|
||||
if !kubeutils.HigherThanKubernetesVersion(setup.KubeClient.Discovery(), logging.GlobalLogger(), 1, 16, 0) {
|
||||
os.Exit(1)
|
||||
}
|
||||
requests := []request{
|
||||
|
@ -67,7 +66,7 @@ func main() {
|
|||
|
||||
run := func(context.Context) {
|
||||
name := tls.GenerateRootCASecretName()
|
||||
_, err := kubeClient.CoreV1().Secrets(config.KyvernoNamespace()).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
_, err := setup.KubeClient.CoreV1().Secrets(config.KyvernoNamespace()).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logging.V(2).Info("failed to fetch root CA secret", "name", name, "error", err.Error())
|
||||
if !errors.IsNotFound(err) {
|
||||
|
@ -76,7 +75,7 @@ func main() {
|
|||
}
|
||||
|
||||
name = tls.GenerateTLSPairSecretName()
|
||||
_, err = kubeClient.CoreV1().Secrets(config.KyvernoNamespace()).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
_, err = setup.KubeClient.CoreV1().Secrets(config.KyvernoNamespace()).Get(context.TODO(), name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logging.V(2).Info("failed to fetch TLS Pair secret", "name", name, "error", err.Error())
|
||||
if !errors.IsNotFound(err) {
|
||||
|
@ -84,7 +83,7 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
if err = acquireLeader(ctx, kubeClient); err != nil {
|
||||
if err = acquireLeader(ctx, setup.KubeClient); err != nil {
|
||||
logging.V(2).Info("Failed to create lease 'kyvernopre-lock'")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
@ -115,14 +114,14 @@ func main() {
|
|||
logging.WithName("kyvernopre/LeaderElection"),
|
||||
"kyvernopre",
|
||||
config.KyvernoNamespace(),
|
||||
kubeClient,
|
||||
setup.KubeClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderelection.DefaultRetryPeriod,
|
||||
run,
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to elect a leader")
|
||||
setup.Logger.Error(err, "failed to elect a leader")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
|
|
|
@ -22,7 +22,6 @@ import (
|
|||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/controllers/certmanager"
|
||||
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
|
||||
genericloggingcontroller "github.com/kyverno/kyverno/pkg/controllers/generic/logging"
|
||||
genericwebhookcontroller "github.com/kyverno/kyverno/pkg/controllers/generic/webhook"
|
||||
policymetricscontroller "github.com/kyverno/kyverno/pkg/controllers/metrics/policy"
|
||||
|
@ -105,14 +104,14 @@ func createNonLeaderControllers(
|
|||
eng engineapi.Engine,
|
||||
genWorkers int,
|
||||
kubeInformer kubeinformers.SharedInformerFactory,
|
||||
kubeKyvernoInformer kubeinformers.SharedInformerFactory,
|
||||
kyvernoInformer kyvernoinformer.SharedInformerFactory,
|
||||
kubeClient kubernetes.Interface,
|
||||
kyvernoClient versioned.Interface,
|
||||
dynamicClient dclient.Interface,
|
||||
configuration config.Configuration,
|
||||
policyCache policycache.Cache,
|
||||
manager openapi.Manager,
|
||||
) ([]internal.Controller, func() error) {
|
||||
) ([]internal.Controller, func(context.Context) error) {
|
||||
policyCacheController := policycachecontroller.NewController(
|
||||
dynamicClient,
|
||||
policyCache,
|
||||
|
@ -123,17 +122,15 @@ func createNonLeaderControllers(
|
|||
dynamicClient,
|
||||
manager,
|
||||
)
|
||||
configurationController := configcontroller.NewController(
|
||||
configuration,
|
||||
kubeKyvernoInformer.Core().V1().ConfigMaps(),
|
||||
)
|
||||
return []internal.Controller{
|
||||
internal.NewController(policycachecontroller.ControllerName, policyCacheController, policycachecontroller.Workers),
|
||||
internal.NewController(openapicontroller.ControllerName, openApiController, openapicontroller.Workers),
|
||||
internal.NewController(configcontroller.ControllerName, configurationController, configcontroller.Workers),
|
||||
},
|
||||
func() error {
|
||||
return policyCacheController.WarmUp()
|
||||
func(ctx context.Context) error {
|
||||
if err := policyCacheController.WarmUp(); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -265,19 +262,18 @@ func main() {
|
|||
// setup signals
|
||||
// setup maxprocs
|
||||
// setup metrics
|
||||
signalCtx, logger, metricsConfig, sdown := internal.Setup("kyverno-admission-controller")
|
||||
signalCtx, setup, sdown := internal.Setup("kyverno-admission-controller", false)
|
||||
defer sdown()
|
||||
// show version
|
||||
showWarnings(logger)
|
||||
showWarnings(setup.Logger)
|
||||
// create instrumented clients
|
||||
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
leaderElectionClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(logger, kyvernoclient.WithMetrics(metricsConfig, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
apiserverClient := internal.CreateApiServerClient(logger, apiserverclient.WithMetrics(metricsConfig, metrics.KubeClient), apiserverclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(signalCtx, dynamicClient, kubeClient, 15*time.Minute)
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
apiserverClient := internal.CreateApiServerClient(setup.Logger, apiserverclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), apiserverclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(signalCtx, dynamicClient, setup.KubeClient, 15*time.Minute)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create dynamic client")
|
||||
setup.Logger.Error(err, "failed to create dynamic client")
|
||||
os.Exit(1)
|
||||
}
|
||||
// THIS IS AN UGLY FIX
|
||||
|
@ -285,35 +281,30 @@ func main() {
|
|||
kyamlopenapi.Schema()
|
||||
// check we can run
|
||||
if err := sanityChecks(apiserverClient); err != nil {
|
||||
logger.Error(err, "sanity checks failed")
|
||||
setup.Logger.Error(err, "sanity checks failed")
|
||||
os.Exit(1)
|
||||
}
|
||||
// informer factories
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
|
||||
// setup registry client
|
||||
rclient, err := setupRegistryClient(signalCtx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
|
||||
rclient, err := setupRegistryClient(signalCtx, setup.Logger, secretLister, imagePullSecrets, allowInsecureRegistry)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to setup registry client")
|
||||
setup.Logger.Error(err, "failed to setup registry client")
|
||||
os.Exit(1)
|
||||
}
|
||||
// setup cosign
|
||||
setupCosign(logger, imageSignatureRepository)
|
||||
configuration, err := config.NewConfiguration(kubeClient, false)
|
||||
setupCosign(setup.Logger, imageSignatureRepository)
|
||||
openApiManager, err := openapi.NewManager(setup.Logger.WithName("openapi"))
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize configuration")
|
||||
os.Exit(1)
|
||||
}
|
||||
openApiManager, err := openapi.NewManager(logger.WithName("openapi"))
|
||||
if err != nil {
|
||||
logger.Error(err, "Failed to create openapi manager")
|
||||
setup.Logger.Error(err, "Failed to create openapi manager")
|
||||
os.Exit(1)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
certRenewer := tls.NewCertRenewer(
|
||||
kubeClient.CoreV1().Secrets(config.KyvernoNamespace()),
|
||||
setup.KubeClient.CoreV1().Secrets(config.KyvernoNamespace()),
|
||||
secretLister,
|
||||
tls.CertRenewalInterval,
|
||||
tls.CAValidityDuration,
|
||||
|
@ -330,26 +321,26 @@ func main() {
|
|||
)
|
||||
// this controller only subscribe to events, nothing is returned...
|
||||
policymetricscontroller.NewController(
|
||||
metricsConfig,
|
||||
setup.MetricsManager,
|
||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
||||
kyvernoInformer.Kyverno().V1().Policies(),
|
||||
&wg,
|
||||
)
|
||||
// log policy changes
|
||||
genericloggingcontroller.NewController(
|
||||
logger.WithName("policy"),
|
||||
setup.Logger.WithName("policy"),
|
||||
"Policy",
|
||||
kyvernoInformer.Kyverno().V1().Policies(),
|
||||
genericloggingcontroller.CheckGeneration,
|
||||
)
|
||||
genericloggingcontroller.NewController(
|
||||
logger.WithName("cluster-policy"),
|
||||
setup.Logger.WithName("cluster-policy"),
|
||||
"ClusterPolicy",
|
||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
||||
genericloggingcontroller.CheckGeneration,
|
||||
)
|
||||
runtime := runtimeutils.NewRuntime(
|
||||
logger.WithName("runtime-checks"),
|
||||
setup.Logger.WithName("runtime-checks"),
|
||||
serverIP,
|
||||
kubeKyvernoInformer.Apps().V1().Deployments(),
|
||||
certRenewer,
|
||||
|
@ -357,12 +348,12 @@ func main() {
|
|||
// engine
|
||||
engine := internal.NewEngine(
|
||||
signalCtx,
|
||||
logger,
|
||||
configuration,
|
||||
metricsConfig.Config(),
|
||||
setup.Logger,
|
||||
setup.Configuration,
|
||||
setup.MetricsConfiguration,
|
||||
dClient,
|
||||
rclient,
|
||||
kubeClient,
|
||||
setup.KubeClient,
|
||||
kyvernoClient,
|
||||
)
|
||||
// create non leader controllers
|
||||
|
@ -370,23 +361,23 @@ func main() {
|
|||
engine,
|
||||
genWorkers,
|
||||
kubeInformer,
|
||||
kubeKyvernoInformer,
|
||||
kyvernoInformer,
|
||||
setup.KubeClient,
|
||||
kyvernoClient,
|
||||
dClient,
|
||||
configuration,
|
||||
setup.Configuration,
|
||||
policyCache,
|
||||
openApiManager,
|
||||
)
|
||||
// start informers and wait for cache sync
|
||||
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
|
||||
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
if !internal.StartInformersAndWaitForCacheSync(signalCtx, setup.Logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
|
||||
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
// bootstrap non leader controllers
|
||||
if nonLeaderBootstrap != nil {
|
||||
if err := nonLeaderBootstrap(); err != nil {
|
||||
logger.Error(err, "failed to bootstrap non leader controllers")
|
||||
if err := nonLeaderBootstrap(signalCtx); err != nil {
|
||||
setup.Logger.Error(err, "failed to bootstrap non leader controllers")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
@ -394,17 +385,17 @@ func main() {
|
|||
go eventGenerator.Run(signalCtx, 3, &wg)
|
||||
// setup leader election
|
||||
le, err := leaderelection.New(
|
||||
logger.WithName("leader-election"),
|
||||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
func(ctx context.Context) {
|
||||
logger := logger.WithName("leader")
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// create leader factories
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
// create leader controllers
|
||||
leaderControllers, warmup, err := createrLeaderControllers(
|
||||
|
@ -415,13 +406,13 @@ func main() {
|
|||
kubeInformer,
|
||||
kubeKyvernoInformer,
|
||||
kyvernoInformer,
|
||||
kubeClient,
|
||||
setup.KubeClient,
|
||||
kyvernoClient,
|
||||
dClient,
|
||||
certRenewer,
|
||||
runtime,
|
||||
int32(servicePort),
|
||||
configuration,
|
||||
setup.Configuration,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create leader controllers")
|
||||
|
@ -449,12 +440,12 @@ func main() {
|
|||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize leader election")
|
||||
setup.Logger.Error(err, "failed to initialize leader election")
|
||||
os.Exit(1)
|
||||
}
|
||||
// start non leader controllers
|
||||
for _, controller := range nonLeaderControllers {
|
||||
controller.Run(signalCtx, logger.WithName("controllers"), &wg)
|
||||
controller.Run(signalCtx, setup.Logger.WithName("controllers"), &wg)
|
||||
}
|
||||
// start leader election
|
||||
go func() {
|
||||
|
@ -479,8 +470,8 @@ func main() {
|
|||
dClient,
|
||||
kyvernoClient,
|
||||
rclient,
|
||||
configuration,
|
||||
metricsConfig,
|
||||
setup.Configuration,
|
||||
setup.MetricsManager,
|
||||
policyCache,
|
||||
kubeInformer.Core().V1().Namespaces().Lister(),
|
||||
kubeInformer.Rbac().V1().RoleBindings().Lister(),
|
||||
|
@ -502,8 +493,8 @@ func main() {
|
|||
policyHandlers,
|
||||
resourceHandlers,
|
||||
exceptionHandlers,
|
||||
configuration,
|
||||
metricsConfig,
|
||||
setup.Configuration,
|
||||
setup.MetricsManager,
|
||||
webhooks.DebugModeOptions{
|
||||
DumpPayload: dumpPayload,
|
||||
},
|
||||
|
@ -514,9 +505,9 @@ func main() {
|
|||
}
|
||||
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
|
||||
},
|
||||
kubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations(),
|
||||
kubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations(),
|
||||
kubeClient.CoordinationV1().Leases(config.KyvernoNamespace()),
|
||||
setup.KubeClient.AdmissionregistrationV1().MutatingWebhookConfigurations(),
|
||||
setup.KubeClient.AdmissionregistrationV1().ValidatingWebhookConfigurations(),
|
||||
setup.KubeClient.CoordinationV1().Leases(config.KyvernoNamespace()),
|
||||
runtime,
|
||||
kubeInformer.Rbac().V1().RoleBindings().Lister(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
|
||||
|
@ -524,13 +515,13 @@ func main() {
|
|||
)
|
||||
// start informers and wait for cache sync
|
||||
// we need to call start again because we potentially registered new informers
|
||||
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
|
||||
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
if !internal.StartInformersAndWaitForCacheSync(signalCtx, setup.Logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
|
||||
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
// start webhooks server
|
||||
server.Run(signalCtx.Done())
|
||||
wg.Wait()
|
||||
// say goodbye...
|
||||
logger.V(2).Info("Kyverno shutdown successful")
|
||||
setup.Logger.V(2).Info("Kyverno shutdown successful")
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
metadataclient "github.com/kyverno/kyverno/pkg/clients/metadata"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
|
||||
admissionreportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/admission"
|
||||
aggregatereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/aggregate"
|
||||
backgroundscancontroller "github.com/kyverno/kyverno/pkg/controllers/report/background"
|
||||
|
@ -186,20 +185,6 @@ func createrLeaderControllers(
|
|||
return reportControllers, warmup, nil
|
||||
}
|
||||
|
||||
func createNonLeaderControllers(
|
||||
configuration config.Configuration,
|
||||
kubeKyvernoInformer kubeinformers.SharedInformerFactory,
|
||||
) ([]internal.Controller, func() error) {
|
||||
configurationController := configcontroller.NewController(
|
||||
configuration,
|
||||
kubeKyvernoInformer.Core().V1().ConfigMaps(),
|
||||
)
|
||||
return []internal.Controller{
|
||||
internal.NewController(configcontroller.ControllerName, configurationController, configcontroller.Workers),
|
||||
},
|
||||
nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
var (
|
||||
leaderElectionRetryPeriod time.Duration
|
||||
|
@ -244,39 +229,33 @@ func main() {
|
|||
// setup signals
|
||||
// setup maxprocs
|
||||
// setup metrics
|
||||
ctx, logger, metricsConfig, sdown := internal.Setup("kyverno-reports-controller")
|
||||
ctx, setup, sdown := internal.Setup("kyverno-reports-controller", skipResourceFilters)
|
||||
defer sdown()
|
||||
// create instrumented clients
|
||||
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
leaderElectionClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(logger, kyvernoclient.WithMetrics(metricsConfig, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
metadataClient := internal.CreateMetadataClient(logger, metadataclient.WithMetrics(metricsConfig, metrics.KyvernoClient), metadataclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(ctx, dynamicClient, kubeClient, 15*time.Minute)
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
metadataClient := internal.CreateMetadataClient(setup.Logger, metadataclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), metadataclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(ctx, dynamicClient, setup.KubeClient, 15*time.Minute)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create dynamic client")
|
||||
setup.Logger.Error(err, "failed to create dynamic client")
|
||||
os.Exit(1)
|
||||
}
|
||||
// THIS IS AN UGLY FIX
|
||||
// ELSE KYAML IS NOT THREAD SAFE
|
||||
kyamlopenapi.Schema()
|
||||
// informer factories
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
|
||||
// setup registry client
|
||||
rclient, err := setupRegistryClient(ctx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
|
||||
rclient, err := setupRegistryClient(ctx, setup.Logger, secretLister, imagePullSecrets, allowInsecureRegistry)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to setup registry client")
|
||||
setup.Logger.Error(err, "failed to setup registry client")
|
||||
os.Exit(1)
|
||||
}
|
||||
// setup cosign
|
||||
setupCosign(logger, imageSignatureRepository)
|
||||
configuration, err := config.NewConfiguration(kubeClient, skipResourceFilters)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize configuration")
|
||||
os.Exit(1)
|
||||
}
|
||||
setupCosign(setup.Logger, imageSignatureRepository)
|
||||
eventGenerator := event.NewEventGenerator(
|
||||
dClient,
|
||||
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
|
||||
|
@ -287,47 +266,35 @@ func main() {
|
|||
// engine
|
||||
engine := internal.NewEngine(
|
||||
ctx,
|
||||
logger,
|
||||
configuration,
|
||||
metricsConfig.Config(),
|
||||
setup.Logger,
|
||||
setup.Configuration,
|
||||
setup.MetricsConfiguration,
|
||||
dClient,
|
||||
rclient,
|
||||
kubeClient,
|
||||
setup.KubeClient,
|
||||
kyvernoClient,
|
||||
)
|
||||
// create non leader controllers
|
||||
nonLeaderControllers, nonLeaderBootstrap := createNonLeaderControllers(
|
||||
configuration,
|
||||
kubeKyvernoInformer,
|
||||
)
|
||||
// start informers and wait for cache sync
|
||||
if !internal.StartInformersAndWaitForCacheSync(ctx, logger, kyvernoInformer, kubeKyvernoInformer) {
|
||||
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
if !internal.StartInformersAndWaitForCacheSync(ctx, setup.Logger, kyvernoInformer, kubeKyvernoInformer) {
|
||||
setup.Logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
|
||||
os.Exit(1)
|
||||
}
|
||||
// bootstrap non leader controllers
|
||||
if nonLeaderBootstrap != nil {
|
||||
if err := nonLeaderBootstrap(); err != nil {
|
||||
logger.Error(err, "failed to bootstrap non leader controllers")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// start event generator
|
||||
var wg sync.WaitGroup
|
||||
go eventGenerator.Run(ctx, 3, &wg)
|
||||
// setup leader election
|
||||
le, err := leaderelection.New(
|
||||
logger.WithName("leader-election"),
|
||||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno-reports-controller",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
func(ctx context.Context) {
|
||||
logger := logger.WithName("leader")
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// create leader factories
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactory(setup.KubeClient, resyncPeriod)
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(setup.KubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
|
||||
metadataInformer := metadatainformers.NewSharedInformerFactory(metadataClient, 15*time.Minute)
|
||||
// create leader controllers
|
||||
|
@ -343,7 +310,7 @@ func main() {
|
|||
kyvernoClient,
|
||||
dClient,
|
||||
rclient,
|
||||
configuration,
|
||||
setup.Configuration,
|
||||
eventGenerator,
|
||||
backgroundScanInterval,
|
||||
)
|
||||
|
@ -376,13 +343,9 @@ func main() {
|
|||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to initialize leader election")
|
||||
setup.Logger.Error(err, "failed to initialize leader election")
|
||||
os.Exit(1)
|
||||
}
|
||||
// start non leader controllers
|
||||
for _, controller := range nonLeaderControllers {
|
||||
controller.Run(ctx, logger.WithName("controllers"), &wg)
|
||||
}
|
||||
le.Run(ctx)
|
||||
wg.Wait()
|
||||
}
|
||||
|
|
|
@ -34309,6 +34309,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- kyverno
|
||||
- kyverno-metrics
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
@ -34358,6 +34361,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- kyverno
|
||||
- kyverno-metrics
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
@ -34398,6 +34404,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- kyverno
|
||||
- kyverno-metrics
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
@ -34428,6 +34437,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
resourceNames:
|
||||
- kyverno
|
||||
- kyverno-metrics
|
||||
- apiGroups:
|
||||
- coordination.k8s.io
|
||||
resources:
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
@ -10,10 +9,7 @@ import (
|
|||
osutils "github.com/kyverno/kyverno/pkg/utils/os"
|
||||
"github.com/kyverno/kyverno/pkg/utils/wildcard"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
||||
|
@ -101,6 +97,8 @@ var (
|
|||
kyvernoPodName = osutils.GetEnvWithFallback("KYVERNO_POD_NAME", "kyverno")
|
||||
// kyvernoConfigMapName is the Kyverno configmap name
|
||||
kyvernoConfigMapName = osutils.GetEnvWithFallback("INIT_CONFIG", "kyverno")
|
||||
// kyvernoMetricsConfigMapName is the Kyverno metrics configmap name
|
||||
kyvernoMetricsConfigMapName = osutils.GetEnvWithFallback("METRICS_CONFIG", "kyverno-metrics")
|
||||
// kyvernoDryRunNamespace is the namespace for DryRun option of YAML verification
|
||||
kyvernoDryrunNamespace = osutils.GetEnvWithFallback("KYVERNO_DRYRUN_NAMESPACE", "kyverno-dryrun")
|
||||
)
|
||||
|
@ -133,6 +131,10 @@ func KyvernoConfigMapName() string {
|
|||
return kyvernoConfigMapName
|
||||
}
|
||||
|
||||
func KyvernoMetricsConfigMapName() string {
|
||||
return kyvernoMetricsConfigMapName
|
||||
}
|
||||
|
||||
// Configuration to be used by consumer to check filters
|
||||
type Configuration interface {
|
||||
// GetDefaultRegistry return default image registry
|
||||
|
@ -187,19 +189,6 @@ func NewDefaultConfiguration(skipResourceFilters bool) *configuration {
|
|||
}
|
||||
}
|
||||
|
||||
// NewConfiguration ...
|
||||
func NewConfiguration(client kubernetes.Interface, skipResourceFilters bool) (Configuration, error) {
|
||||
cd := NewDefaultConfiguration(skipResourceFilters)
|
||||
if cm, err := client.CoreV1().ConfigMaps(kyvernoNamespace).Get(context.TODO(), kyvernoConfigMapName, metav1.GetOptions{}); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
cd.load(cm)
|
||||
}
|
||||
return cd, nil
|
||||
}
|
||||
|
||||
func (cd *configuration) OnChanged(callback func()) {
|
||||
cd.mux.Lock()
|
||||
defer cd.mux.Unlock()
|
||||
|
|
|
@ -1,21 +1,13 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// metricsConfigEnvVar is the name of an environment variable containing the name of the configmap
|
||||
// that stores the information associated with Kyverno's metrics exposure
|
||||
const metricsConfigEnvVar string = "METRICS_CONFIG"
|
||||
|
||||
// MetricsConfig stores the config for metrics
|
||||
type MetricsConfiguration interface {
|
||||
// GetExcludeNamespaces returns the namespaces to ignore for metrics exposure
|
||||
|
@ -26,12 +18,18 @@ type MetricsConfiguration interface {
|
|||
GetMetricsRefreshInterval() time.Duration
|
||||
// CheckNamespace returns `true` if the namespace has to be considered
|
||||
CheckNamespace(string) bool
|
||||
// Load loads configuration from a configmap
|
||||
Load(*corev1.ConfigMap)
|
||||
// OnChanged adds a callback to be invoked when the configuration is reloaded
|
||||
OnChanged(func())
|
||||
}
|
||||
|
||||
// metricsConfig stores the config for metrics
|
||||
type metricsConfig struct {
|
||||
namespaces namespacesConfig
|
||||
metricsRefreshInterval time.Duration
|
||||
mux sync.RWMutex
|
||||
callbacks []func()
|
||||
}
|
||||
|
||||
// NewDefaultMetricsConfiguration ...
|
||||
|
@ -45,39 +43,37 @@ func NewDefaultMetricsConfiguration() *metricsConfig {
|
|||
}
|
||||
}
|
||||
|
||||
// NewMetricsConfiguration ...
|
||||
func NewMetricsConfiguration(client kubernetes.Interface) (MetricsConfiguration, error) {
|
||||
configuration := NewDefaultMetricsConfiguration()
|
||||
cmName := os.Getenv(metricsConfigEnvVar)
|
||||
if cmName != "" {
|
||||
if cm, err := client.CoreV1().ConfigMaps(kyvernoNamespace).Get(context.TODO(), cmName, metav1.GetOptions{}); err != nil {
|
||||
if !errors.IsNotFound(err) {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
configuration.load(cm)
|
||||
}
|
||||
}
|
||||
return configuration, nil
|
||||
func (cd *metricsConfig) OnChanged(callback func()) {
|
||||
cd.mux.Lock()
|
||||
defer cd.mux.Unlock()
|
||||
cd.callbacks = append(cd.callbacks, callback)
|
||||
}
|
||||
|
||||
// GetExcludeNamespaces returns the namespaces to ignore for metrics exposure
|
||||
func (mcd *metricsConfig) GetExcludeNamespaces() []string {
|
||||
mcd.mux.RLock()
|
||||
defer mcd.mux.RUnlock()
|
||||
return mcd.namespaces.ExcludeNamespaces
|
||||
}
|
||||
|
||||
// GetIncludeNamespaces returns the namespaces to specifically consider for metrics exposure
|
||||
func (mcd *metricsConfig) GetIncludeNamespaces() []string {
|
||||
mcd.mux.RLock()
|
||||
defer mcd.mux.RUnlock()
|
||||
return mcd.namespaces.IncludeNamespaces
|
||||
}
|
||||
|
||||
// GetMetricsRefreshInterval returns the refresh interval for the metrics
|
||||
func (mcd *metricsConfig) GetMetricsRefreshInterval() time.Duration {
|
||||
mcd.mux.RLock()
|
||||
defer mcd.mux.RUnlock()
|
||||
return mcd.metricsRefreshInterval
|
||||
}
|
||||
|
||||
// CheckNamespace returns `true` if the namespace has to be considered
|
||||
func (mcd *metricsConfig) CheckNamespace(namespace string) bool {
|
||||
mcd.mux.RLock()
|
||||
defer mcd.mux.RUnlock()
|
||||
// TODO(eddycharly): check we actually need `"-"`
|
||||
if namespace == "" || namespace == "-" {
|
||||
return true
|
||||
|
@ -91,10 +87,22 @@ func (mcd *metricsConfig) CheckNamespace(namespace string) bool {
|
|||
return slices.Contains(mcd.namespaces.IncludeNamespaces, namespace)
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) Load(cm *corev1.ConfigMap) {
|
||||
if cm != nil {
|
||||
mcd.load(cm)
|
||||
} else {
|
||||
mcd.unload()
|
||||
}
|
||||
}
|
||||
|
||||
func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
||||
logger := logger.WithValues("name", cm.Name, "namespace", cm.Namespace)
|
||||
if cm.Data == nil {
|
||||
return
|
||||
cd.mux.Lock()
|
||||
defer cd.mux.Unlock()
|
||||
defer cd.notify()
|
||||
data := cm.Data
|
||||
if data == nil {
|
||||
data = map[string]string{}
|
||||
}
|
||||
// reset
|
||||
cd.metricsRefreshInterval = 0
|
||||
|
@ -103,23 +111,48 @@ func (cd *metricsConfig) load(cm *corev1.ConfigMap) {
|
|||
ExcludeNamespaces: []string{},
|
||||
}
|
||||
// load metricsRefreshInterval
|
||||
metricsRefreshInterval, found := cm.Data["metricsRefreshInterval"]
|
||||
if found {
|
||||
metricsRefreshInterval, ok := data["metricsRefreshInterval"]
|
||||
if !ok {
|
||||
logger.Info("metricsRefreshInterval not set")
|
||||
} else {
|
||||
logger := logger.WithValues("metricsRefreshInterval", metricsRefreshInterval)
|
||||
metricsRefreshInterval, err := time.ParseDuration(metricsRefreshInterval)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to parse metricsRefreshInterval")
|
||||
} else {
|
||||
cd.metricsRefreshInterval = metricsRefreshInterval
|
||||
logger.Info("metricsRefreshInterval configured")
|
||||
}
|
||||
}
|
||||
// load namespaces
|
||||
namespaces, ok := cm.Data["namespaces"]
|
||||
if ok {
|
||||
namespaces, ok := data["namespaces"]
|
||||
if !ok {
|
||||
logger.Info("namespaces not set")
|
||||
} else {
|
||||
logger := logger.WithValues("namespaces", namespaces)
|
||||
namespaces, err := parseIncludeExcludeNamespacesFromNamespacesConfig(namespaces)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to parse namespaces")
|
||||
} else {
|
||||
cd.namespaces = namespaces
|
||||
logger.Info("namespaces configured")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) unload() {
|
||||
mcd.mux.Lock()
|
||||
defer mcd.mux.Unlock()
|
||||
defer mcd.notify()
|
||||
mcd.metricsRefreshInterval = 0
|
||||
mcd.namespaces = namespacesConfig{
|
||||
IncludeNamespaces: []string{},
|
||||
ExcludeNamespaces: []string{},
|
||||
}
|
||||
}
|
||||
|
||||
func (mcd *metricsConfig) notify() {
|
||||
for _, callback := range mcd.callbacks {
|
||||
callback()
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,61 +0,0 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/controllers"
|
||||
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
// Workers is the number of workers for this controller
|
||||
Workers = 3
|
||||
ControllerName = "config-controller"
|
||||
maxRetries = 10
|
||||
)
|
||||
|
||||
type controller struct {
|
||||
configuration config.Configuration
|
||||
|
||||
// listers
|
||||
configmapLister corev1listers.ConfigMapLister
|
||||
|
||||
// queue
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
func NewController(configuration config.Configuration, configmapInformer corev1informers.ConfigMapInformer) controllers.Controller {
|
||||
c := controller{
|
||||
configuration: configuration,
|
||||
configmapLister: configmapInformer.Lister(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName),
|
||||
}
|
||||
controllerutils.AddDefaultEventHandlers(logger, configmapInformer.Informer(), c.queue)
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *controller) Run(ctx context.Context, workers int) {
|
||||
controllerutils.Run(ctx, logger, ControllerName, time.Second, c.queue, workers, maxRetries, c.reconcile)
|
||||
}
|
||||
|
||||
func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, namespace, name string) error {
|
||||
if namespace != config.KyvernoNamespace() || name != config.KyvernoConfigMapName() {
|
||||
return nil
|
||||
}
|
||||
configMap, err := c.configmapLister.ConfigMaps(namespace).Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
c.configuration.Load(nil)
|
||||
}
|
||||
return err
|
||||
}
|
||||
c.configuration.Load(configMap.DeepCopy())
|
||||
return nil
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package config
|
||||
|
||||
import "github.com/kyverno/kyverno/pkg/logging"
|
||||
|
||||
var logger = logging.ControllerLogger(ControllerName)
|
108
pkg/controllers/generic/configmap/controller.go
Normal file
108
pkg/controllers/generic/configmap/controller.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package configmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kyverno/kyverno/pkg/controllers"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
corev1informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
// Workers is the number of workers for this controller
|
||||
Workers = 1
|
||||
maxRetries = 10
|
||||
)
|
||||
|
||||
type Controller interface {
|
||||
controllers.Controller
|
||||
WarmUp(context.Context) error
|
||||
}
|
||||
|
||||
type controller struct {
|
||||
// listers
|
||||
informer cache.SharedIndexInformer
|
||||
lister corev1listers.ConfigMapNamespaceLister
|
||||
|
||||
// queue
|
||||
queue workqueue.RateLimitingInterface
|
||||
|
||||
// config
|
||||
controllerName string
|
||||
logger logr.Logger
|
||||
name string
|
||||
callback callback
|
||||
}
|
||||
|
||||
type callback func(context.Context, *corev1.ConfigMap) error
|
||||
|
||||
func NewController(
|
||||
controllerName string,
|
||||
client kubernetes.Interface,
|
||||
resyncPeriod time.Duration,
|
||||
namespace string,
|
||||
name string,
|
||||
callback callback,
|
||||
) Controller {
|
||||
indexers := cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}
|
||||
options := func(lo *metav1.ListOptions) {
|
||||
lo.FieldSelector = fields.OneTermEqualSelector(metav1.ObjectNameField, name).String()
|
||||
}
|
||||
informer := corev1informers.NewFilteredConfigMapInformer(
|
||||
client,
|
||||
namespace,
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
options,
|
||||
)
|
||||
c := controller{
|
||||
informer: informer,
|
||||
lister: corev1listers.NewConfigMapLister(informer.GetIndexer()).ConfigMaps(namespace),
|
||||
controllerName: controllerName,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName),
|
||||
logger: logging.ControllerLogger(controllerName),
|
||||
name: name,
|
||||
callback: callback,
|
||||
}
|
||||
controllerutils.AddDefaultEventHandlers(c.logger, informer, c.queue)
|
||||
return &c
|
||||
}
|
||||
|
||||
func (c *controller) WarmUp(ctx context.Context) error {
|
||||
go c.informer.Run(ctx.Done())
|
||||
if synced := cache.WaitForCacheSync(ctx.Done(), c.informer.HasSynced); !synced {
|
||||
return errors.New("configmap informer cache failed to sync")
|
||||
}
|
||||
return c.doReconcile(ctx, c.logger)
|
||||
}
|
||||
|
||||
func (c *controller) Run(ctx context.Context, workers int) {
|
||||
controllerutils.Run(ctx, c.logger, c.controllerName, time.Second, c.queue, workers, maxRetries, c.reconcile)
|
||||
}
|
||||
|
||||
func (c *controller) reconcile(ctx context.Context, logger logr.Logger, _, _, _ string) error {
|
||||
return c.doReconcile(ctx, c.logger)
|
||||
}
|
||||
|
||||
func (c *controller) doReconcile(ctx context.Context, logger logr.Logger) error {
|
||||
observed, err := c.lister.Get(c.name)
|
||||
if err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return err
|
||||
}
|
||||
return c.callback(ctx, nil)
|
||||
}
|
||||
return c.callback(ctx, observed)
|
||||
}
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
// initializeMockConfig initializes a basic configuration with a fake dynamic client
|
||||
|
@ -20,11 +19,8 @@ func initializeMockConfig(defaultRegistry string, enableDefaultRegistryMutation
|
|||
ObjectMeta: metav1.ObjectMeta{Namespace: "kyverno", Name: "kyverno"},
|
||||
Data: configMapData,
|
||||
}
|
||||
cs := fake.NewSimpleClientset(&cm)
|
||||
dynamicConfig, err := config.NewConfiguration(cs, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dynamicConfig := config.NewDefaultConfiguration(false)
|
||||
dynamicConfig.Load(&cm)
|
||||
return dynamicConfig, nil
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue