1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

refactor: factorise engine creation (#6837)

* refactor: factorise engine creation

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* more

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

---------

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-04-11 09:12:05 +02:00 committed by GitHub
parent 9fe330d7cf
commit 289ff3e1ce
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 218 additions and 175 deletions

View file

@ -22,9 +22,7 @@ import (
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
policymetricscontroller "github.com/kyverno/kyverno/pkg/controllers/metrics/policy"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/logging"
@ -75,7 +73,6 @@ func createrLeaderControllers(
configuration config.Configuration,
metricsConfig metrics.MetricsConfigManager,
eventGenerator event.Interface,
configMapResolver engineapi.ConfigmapResolver,
) ([]internal.Controller, error) {
policyCtrl, err := policy.NewPolicyController(
kyvernoClient,
@ -87,7 +84,6 @@ func createrLeaderControllers(
configuration,
eventGenerator,
kubeInformer.Core().V1().Namespaces(),
configMapResolver,
logging.WithName("PolicyController"),
time.Hour,
metricsConfig,
@ -105,7 +101,6 @@ func createrLeaderControllers(
kubeInformer.Core().V1().Namespaces(),
eventGenerator,
configuration,
configMapResolver,
)
return []internal.Controller{
internal.NewController("policy-controller", policyCtrl, 2),
@ -149,6 +144,8 @@ func main() {
internal.WithMetrics(),
internal.WithTracing(),
internal.WithKubeconfig(),
internal.WithPolicyExceptions(),
internal.WithConfigMapCaching(),
internal.WithFlagSets(flagset),
)
// parse flags
@ -171,18 +168,12 @@ func main() {
logger.Error(err, "failed to create dynamic client")
os.Exit(1)
}
// THIS IS AN UGLY FIX
// ELSE KYAML IS NOT THREAD SAFE
kyamlopenapi.Schema()
// informer factories
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
cacheInformer, err := resolvers.GetCacheInformerFactory(kubeClient, resyncPeriod)
if err != nil {
logger.Error(err, "failed to create cache informer factory")
os.Exit(1)
}
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
// setup registry client
rclient, err := setupRegistryClient(signalCtx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
@ -192,21 +183,6 @@ func main() {
}
// setup cosign
setupCosign(logger, imageSignatureRepository)
informerBasedResolver, err := resolvers.NewInformerBasedResolver(cacheInformer.Core().V1().ConfigMaps().Lister())
if err != nil {
logger.Error(err, "failed to create informer based resolver")
os.Exit(1)
}
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
if err != nil {
logger.Error(err, "failed to create client based resolver")
os.Exit(1)
}
configMapResolver, err := engineapi.NewNamespacedResourceResolver(informerBasedResolver, clientBasedResolver)
if err != nil {
logger.Error(err, "failed to create config map resolver")
os.Exit(1)
}
configuration, err := config.NewConfiguration(kubeClient, false)
if err != nil {
logger.Error(err, "failed to initialize configuration")
@ -227,14 +203,15 @@ func main() {
kyvernoInformer.Kyverno().V1().Policies(),
&wg,
)
engine := engine.NewEngine(
engine := internal.NewEngine(
signalCtx,
logger,
configuration,
metricsConfig.Config(),
dClient,
rclient,
engineapi.DefaultContextLoaderFactory(configMapResolver),
// TODO: do we need exceptions here ?
nil,
kubeClient,
kyvernoClient,
)
// create non leader controllers
nonLeaderControllers, nonLeaderBootstrap := createNonLeaderControllers(
@ -242,7 +219,7 @@ func main() {
kubeKyvernoInformer,
)
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeKyvernoInformer, cacheInformer) {
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeKyvernoInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
@ -280,7 +257,6 @@ func main() {
configuration,
metricsConfig,
eventGenerator,
configMapResolver,
)
if err != nil {
logger.Error(err, "failed to create leader controllers")

View file

@ -7,6 +7,8 @@ type Configuration interface {
UsesTracing() bool
UsesProfiling() bool
UsesKubeconfig() bool
UsesPolicyExceptions() bool
UsesConfigMapCaching() bool
FlagSets() []*flag.FlagSet
}
@ -44,6 +46,18 @@ func WithKubeconfig() ConfigurationOption {
}
}
func WithPolicyExceptions() ConfigurationOption {
return func(c *configuration) {
c.usesPolicyExceptions = true
}
}
func WithConfigMapCaching() ConfigurationOption {
return func(c *configuration) {
c.usesConfigMapCaching = true
}
}
func WithFlagSets(flagsets ...*flag.FlagSet) ConfigurationOption {
return func(c *configuration) {
c.flagSets = append(c.flagSets, flagsets...)
@ -51,11 +65,13 @@ func WithFlagSets(flagsets ...*flag.FlagSet) ConfigurationOption {
}
type configuration struct {
usesMetrics bool
usesTracing bool
usesProfiling bool
usesKubeconfig bool
flagSets []*flag.FlagSet
usesMetrics bool
usesTracing bool
usesProfiling bool
usesKubeconfig bool
usesPolicyExceptions bool
usesConfigMapCaching bool
flagSets []*flag.FlagSet
}
func (c *configuration) UsesMetrics() bool {
@ -74,6 +90,14 @@ func (c *configuration) UsesKubeconfig() bool {
return c.usesKubeconfig
}
func (c *configuration) UsesPolicyExceptions() bool {
return c.usesPolicyExceptions
}
func (c *configuration) UsesConfigMapCaching() bool {
return c.usesConfigMapCaching
}
func (c *configuration) FlagSets() []*flag.FlagSet {
return c.flagSets
}

89
cmd/internal/engine.go Normal file
View file

@ -0,0 +1,89 @@
package internal
import (
"context"
"errors"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/registryclient"
"k8s.io/client-go/kubernetes"
)
func NewEngine(
ctx context.Context,
logger logr.Logger,
configuration config.Configuration,
metricsConfiguration config.MetricsConfiguration,
client dclient.Interface,
rclient registryclient.Client,
kubeClient kubernetes.Interface,
kyvernoClient versioned.Interface,
) engineapi.Engine {
configMapResolver := NewConfigMapResolver(ctx, logger, kubeClient, 15*time.Minute)
exceptionsSelector := NewExceptionSelector(ctx, logger, kyvernoClient, 15*time.Minute)
return engine.NewEngine(
configuration,
metricsConfiguration,
client,
rclient,
engineapi.DefaultContextLoaderFactory(configMapResolver),
exceptionsSelector,
)
}
func NewExceptionSelector(
ctx context.Context,
logger logr.Logger,
kyvernoClient versioned.Interface,
resyncPeriod time.Duration,
) engineapi.PolicyExceptionSelector {
logger = logger.WithName("exception-selector").WithValues("enablePolicyException", enablePolicyException, "exceptionNamespace", exceptionNamespace)
var exceptionsLister engineapi.PolicyExceptionSelector
if enablePolicyException {
factory := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
lister := factory.Kyverno().V2alpha1().PolicyExceptions().Lister()
if exceptionNamespace != "" {
exceptionsLister = lister.PolicyExceptions(exceptionNamespace)
} else {
exceptionsLister = lister
}
// start informers and wait for cache sync
if !StartInformersAndWaitForCacheSync(ctx, logger, factory) {
checkError(logger, errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
}
}
return exceptionsLister
}
func NewConfigMapResolver(
ctx context.Context,
logger logr.Logger,
kubeClient kubernetes.Interface,
resyncPeriod time.Duration,
) engineapi.ConfigmapResolver {
logger = logger.WithName("configmap-resolver").WithValues("enableConfigMapCaching", enableConfigMapCaching)
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
checkError(logger, err, "failed to create client based resolver")
if !enableConfigMapCaching {
return clientBasedResolver
}
factory, err := resolvers.GetCacheInformerFactory(kubeClient, resyncPeriod)
checkError(logger, err, "failed to create cache informer factory")
informerBasedResolver, err := resolvers.NewInformerBasedResolver(factory.Core().V1().ConfigMaps().Lister())
checkError(logger, err, "failed to create informer based resolver")
configMapResolver, err := engineapi.NewNamespacedResourceResolver(informerBasedResolver, clientBasedResolver)
checkError(logger, err, "failed to create config map resolver")
// start informers and wait for cache sync
if !StartInformersAndWaitForCacheSync(ctx, logger, factory) {
checkError(logger, errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
}
return configMapResolver
}

View file

@ -28,6 +28,10 @@ var (
kubeconfig string
clientRateLimitQPS float64
clientRateLimitBurst int
// engine
enablePolicyException bool
exceptionNamespace string
enableConfigMapCaching bool
)
func initLoggingFlags() {
@ -63,6 +67,15 @@ func initKubeconfigFlags() {
flag.IntVar(&clientRateLimitBurst, "clientRateLimitBurst", 50, "Configure the maximum burst for throttle. Uses the client default if zero.")
}
func initPolicyExceptionsFlags() {
flag.StringVar(&exceptionNamespace, "exceptionNamespace", "", "Configure the namespace to accept PolicyExceptions.")
flag.BoolVar(&enablePolicyException, "enablePolicyException", false, "Enable PolicyException feature.")
}
func initConfigMapCachingFlags() {
flag.BoolVar(&enableConfigMapCaching, "enableConfigMapCaching", true, "Enable config maps caching.")
}
func InitFlags(config Configuration) {
// logging
initLoggingFlags()
@ -82,6 +95,14 @@ func InitFlags(config Configuration) {
if config.UsesKubeconfig() {
initKubeconfigFlags()
}
// policy exceptions
if config.UsesPolicyExceptions() {
initPolicyExceptionsFlags()
}
// config map caching
if config.UsesConfigMapCaching() {
initConfigMapCachingFlags()
}
for _, flagset := range config.FlagSets() {
flagset.VisitAll(func(f *flag.Flag) {
flag.CommandLine.Var(f.Value, f.Name, f.Usage)
@ -93,3 +114,11 @@ func ParseFlags(config Configuration) {
InitFlags(config)
flag.Parse()
}
func ExceptionNamespace() string {
return exceptionNamespace
}
func PolicyExceptionEnabled() bool {
return enablePolicyException
}

View file

@ -30,9 +30,7 @@ import (
policycachecontroller "github.com/kyverno/kyverno/pkg/controllers/policycache"
webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/logging"
@ -229,8 +227,6 @@ func main() {
admissionReports bool
dumpPayload bool
leaderElectionRetryPeriod time.Duration
enablePolicyException bool
exceptionNamespace string
servicePort int
backgroundServiceAccountName string
)
@ -249,8 +245,6 @@ func main() {
flagset.Func(toggle.ForceFailurePolicyIgnoreFlagName, toggle.ForceFailurePolicyIgnoreDescription, toggle.ForceFailurePolicyIgnore.Parse)
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
flagset.StringVar(&exceptionNamespace, "exceptionNamespace", "", "Configure the namespace to accept PolicyExceptions.")
flagset.BoolVar(&enablePolicyException, "enablePolicyException", false, "Enable PolicyException feature.")
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
flagset.StringVar(&backgroundServiceAccountName, "backgroundServiceAccountName", "", "Background service account name.")
// config
@ -259,6 +253,8 @@ func main() {
internal.WithTracing(),
internal.WithMetrics(),
internal.WithKubeconfig(),
internal.WithPolicyExceptions(),
internal.WithConfigMapCaching(),
internal.WithFlagSets(flagset),
)
// parse flags
@ -296,11 +292,6 @@ func main() {
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
cacheInformer, err := resolvers.GetCacheInformerFactory(kubeClient, resyncPeriod)
if err != nil {
logger.Error(err, "failed to create cache informer factory")
os.Exit(1)
}
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
// setup registry client
rclient, err := setupRegistryClient(signalCtx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
@ -310,21 +301,6 @@ func main() {
}
// setup cosign
setupCosign(logger, imageSignatureRepository)
informerBasedResolver, err := resolvers.NewInformerBasedResolver(cacheInformer.Core().V1().ConfigMaps().Lister())
if err != nil {
logger.Error(err, "failed to create informer based resolver")
os.Exit(1)
}
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
if err != nil {
logger.Error(err, "failed to create client based resolver")
os.Exit(1)
}
configMapResolver, err := engineapi.NewNamespacedResourceResolver(informerBasedResolver, clientBasedResolver)
if err != nil {
logger.Error(err, "failed to create config map resolver")
os.Exit(1)
}
configuration, err := config.NewConfiguration(kubeClient, false)
if err != nil {
logger.Error(err, "failed to initialize configuration")
@ -378,26 +354,20 @@ func main() {
kubeKyvernoInformer.Apps().V1().Deployments(),
certRenewer,
)
var exceptionsLister engineapi.PolicyExceptionSelector
if enablePolicyException {
lister := kyvernoInformer.Kyverno().V2alpha1().PolicyExceptions().Lister()
if exceptionNamespace != "" {
exceptionsLister = lister.PolicyExceptions(exceptionNamespace)
} else {
exceptionsLister = lister
}
}
eng := engine.NewEngine(
// engine
engine := internal.NewEngine(
signalCtx,
logger,
configuration,
metricsConfig.Config(),
dClient,
rclient,
engineapi.DefaultContextLoaderFactory(configMapResolver),
exceptionsLister,
kubeClient,
kyvernoClient,
)
// create non leader controllers
nonLeaderControllers, nonLeaderBootstrap := createNonLeaderControllers(
eng,
engine,
genWorkers,
kubeInformer,
kubeKyvernoInformer,
@ -409,7 +379,7 @@ func main() {
openApiManager,
)
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer, cacheInformer) {
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
@ -505,7 +475,7 @@ func main() {
openApiManager,
)
resourceHandlers := webhooksresource.NewHandlers(
eng,
engine,
dClient,
kyvernoClient,
rclient,
@ -525,8 +495,8 @@ func main() {
backgroundServiceAccountName,
)
exceptionHandlers := webhooksexception.NewHandlers(exception.ValidationOptions{
Enabled: enablePolicyException,
Namespace: exceptionNamespace,
Enabled: internal.PolicyExceptionEnabled(),
Namespace: internal.ExceptionNamespace(),
})
server := webhooks.NewServer(
policyHandlers,
@ -554,7 +524,7 @@ func main() {
)
// start informers and wait for cache sync
// we need to call start again because we potentially registered new informers
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer, cacheInformer) {
if !internal.StartInformersAndWaitForCacheSync(signalCtx, logger, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}

View file

@ -25,9 +25,7 @@ import (
backgroundscancontroller "github.com/kyverno/kyverno/pkg/controllers/report/background"
resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/logging"
@ -79,7 +77,6 @@ func createReportControllers(
metadataFactory metadatainformers.SharedInformerFactory,
kubeInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
configMapResolver engineapi.ConfigmapResolver,
backgroundScanInterval time.Duration,
configuration config.Configuration,
eventGenerator event.Interface,
@ -136,7 +133,6 @@ func createReportControllers(
kyvernoV1.ClusterPolicies(),
kubeInformer.Core().V1().Namespaces(),
resourceReportController,
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
@ -169,7 +165,6 @@ func createrLeaderControllers(
rclient registryclient.Client,
configuration config.Configuration,
eventGenerator event.Interface,
configMapResolver engineapi.ConfigmapResolver,
backgroundScanInterval time.Duration,
) ([]internal.Controller, func(context.Context) error, error) {
reportControllers, warmup := createReportControllers(
@ -184,7 +179,6 @@ func createrLeaderControllers(
metadataInformer,
kubeInformer,
kyvernoInformer,
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
@ -218,8 +212,6 @@ func main() {
backgroundScanWorkers int
backgroundScanInterval time.Duration
maxQueuedEvents int
enablePolicyException bool
exceptionNamespace string
skipResourceFilters bool
)
flagset := flag.NewFlagSet("reports-controller", flag.ExitOnError)
@ -233,8 +225,6 @@ func main() {
flagset.IntVar(&backgroundScanWorkers, "backgroundScanWorkers", backgroundscancontroller.Workers, "Configure the number of background scan workers.")
flagset.DurationVar(&backgroundScanInterval, "backgroundScanInterval", time.Hour, "Configure background scan interval.")
flagset.IntVar(&maxQueuedEvents, "maxQueuedEvents", 1000, "Maximum events to be queued.")
flagset.StringVar(&exceptionNamespace, "exceptionNamespace", "", "Configure the namespace to accept PolicyExceptions.")
flagset.BoolVar(&enablePolicyException, "enablePolicyException", false, "Enable PolicyException feature.")
flagset.BoolVar(&skipResourceFilters, "skipResourceFilters", true, "If true, resource filters wont be considered.")
// config
appConfig := internal.NewConfiguration(
@ -242,6 +232,8 @@ func main() {
internal.WithMetrics(),
internal.WithTracing(),
internal.WithKubeconfig(),
internal.WithPolicyExceptions(),
internal.WithConfigMapCaching(),
internal.WithFlagSets(flagset),
)
// parse flags
@ -271,11 +263,6 @@ func main() {
// informer factories
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
cacheInformer, err := resolvers.GetCacheInformerFactory(kubeClient, resyncPeriod)
if err != nil {
logger.Error(err, "failed to create cache informer factory")
os.Exit(1)
}
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
// setup registry client
rclient, err := setupRegistryClient(ctx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
@ -285,21 +272,6 @@ func main() {
}
// setup cosign
setupCosign(logger, imageSignatureRepository)
informerBasedResolver, err := resolvers.NewInformerBasedResolver(cacheInformer.Core().V1().ConfigMaps().Lister())
if err != nil {
logger.Error(err, "failed to create informer based resolver")
os.Exit(1)
}
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
if err != nil {
logger.Error(err, "failed to create client based resolver")
os.Exit(1)
}
configMapResolver, err := engineapi.NewNamespacedResourceResolver(informerBasedResolver, clientBasedResolver)
if err != nil {
logger.Error(err, "failed to create config map resolver")
os.Exit(1)
}
configuration, err := config.NewConfiguration(kubeClient, skipResourceFilters)
if err != nil {
logger.Error(err, "failed to initialize configuration")
@ -312,22 +284,16 @@ func main() {
maxQueuedEvents,
logging.WithName("EventGenerator"),
)
var exceptionsLister engineapi.PolicyExceptionSelector
if enablePolicyException {
lister := kyvernoInformer.Kyverno().V2alpha1().PolicyExceptions().Lister()
if exceptionNamespace != "" {
exceptionsLister = lister.PolicyExceptions(exceptionNamespace)
} else {
exceptionsLister = lister
}
}
eng := engine.NewEngine(
// engine
engine := internal.NewEngine(
ctx,
logger,
configuration,
metricsConfig.Config(),
dClient,
rclient,
engineapi.DefaultContextLoaderFactory(configMapResolver),
exceptionsLister,
kubeClient,
kyvernoClient,
)
// create non leader controllers
nonLeaderControllers, nonLeaderBootstrap := createNonLeaderControllers(
@ -335,7 +301,7 @@ func main() {
kubeKyvernoInformer,
)
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(ctx, logger, kyvernoInformer, kubeKyvernoInformer, cacheInformer) {
if !internal.StartInformersAndWaitForCacheSync(ctx, logger, kyvernoInformer, kubeKyvernoInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
@ -366,7 +332,7 @@ func main() {
metadataInformer := metadatainformers.NewSharedInformerFactory(metadataClient, 15*time.Minute)
// create leader controllers
leaderControllers, warmup, err := createrLeaderControllers(
eng,
engine,
backgroundScan,
admissionReports,
reportsChunkSize,
@ -379,7 +345,6 @@ func main() {
rclient,
configuration,
eventGenerator,
configMapResolver,
backgroundScanInterval,
)
if err != nil {

View file

@ -56,9 +56,8 @@ type controller struct {
// queue
queue workqueue.RateLimitingInterface
eventGen event.Interface
configuration config.Configuration
informerCacheResolvers engineapi.ConfigmapResolver
eventGen event.Interface
configuration config.Configuration
}
// NewController returns an instance of the Generate-Request Controller
@ -72,21 +71,19 @@ func NewController(
namespaceInformer corev1informers.NamespaceInformer,
eventGen event.Interface,
dynamicConfig config.Configuration,
informerCacheResolvers engineapi.ConfigmapResolver,
) Controller {
urLister := urInformer.Lister().UpdateRequests(config.KyvernoNamespace())
c := controller{
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
cpolLister: cpolInformer.Lister(),
polLister: polInformer.Lister(),
urLister: urLister,
nsLister: namespaceInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "background"),
eventGen: eventGen,
configuration: dynamicConfig,
informerCacheResolvers: informerCacheResolvers,
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
cpolLister: cpolInformer.Lister(),
polLister: polInformer.Lister(),
urLister: urLister,
nsLister: namespaceInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "background"),
eventGen: eventGen,
configuration: dynamicConfig,
}
_, _ = urInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addUR,

View file

@ -59,9 +59,8 @@ type controller struct {
queue workqueue.RateLimitingInterface
// cache
metadataCache resource.MetadataCache
informerCacheResolvers engineapi.ConfigmapResolver
forceDelay time.Duration
metadataCache resource.MetadataCache
forceDelay time.Duration
// config
config config.Configuration
@ -77,7 +76,6 @@ func NewController(
cpolInformer kyvernov1informers.ClusterPolicyInformer,
nsInformer corev1informers.NamespaceInformer,
metadataCache resource.MetadataCache,
informerCacheResolvers engineapi.ConfigmapResolver,
forceDelay time.Duration,
config config.Configuration,
eventGen event.Interface,
@ -86,20 +84,19 @@ func NewController(
cbgscanr := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("clusterbackgroundscanreports"))
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName)
c := controller{
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
polLister: polInformer.Lister(),
cpolLister: cpolInformer.Lister(),
bgscanrLister: bgscanr.Lister(),
cbgscanrLister: cbgscanr.Lister(),
nsLister: nsInformer.Lister(),
queue: queue,
metadataCache: metadataCache,
informerCacheResolvers: informerCacheResolvers,
forceDelay: forceDelay,
config: config,
eventGen: eventGen,
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
polLister: polInformer.Lister(),
cpolLister: cpolInformer.Lister(),
bgscanrLister: bgscanr.Lister(),
cbgscanrLister: cbgscanr.Lister(),
nsLister: nsInformer.Lister(),
queue: queue,
metadataCache: metadataCache,
forceDelay: forceDelay,
config: config,
eventGen: eventGen,
}
controllerutils.AddDefaultEventHandlers(logger, bgscanr.Informer(), queue)
controllerutils.AddDefaultEventHandlers(logger, cbgscanr.Informer(), queue)

View file

@ -75,8 +75,6 @@ type PolicyController struct {
// nsLister can list/get namespaces from the shared informer's store
nsLister corev1listers.NamespaceLister
informerCacheResolvers engineapi.ConfigmapResolver
informersSynced []cache.InformerSynced
// helpers to validate against current loaded configuration
@ -100,7 +98,6 @@ func NewPolicyController(
configHandler config.Configuration,
eventGen event.Interface,
namespaces corev1informers.NamespaceInformer,
informerCacheResolvers engineapi.ConfigmapResolver,
log logr.Logger,
reconcilePeriod time.Duration,
metricsConfig metrics.MetricsConfigManager,
@ -112,19 +109,18 @@ func NewPolicyController(
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pc := PolicyController{
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
pInformer: pInformer,
npInformer: npInformer,
eventGen: eventGen,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
informerCacheResolvers: informerCacheResolvers,
reconcilePeriod: reconcilePeriod,
metricsConfig: metricsConfig,
log: log,
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
pInformer: pInformer,
npInformer: npInformer,
eventGen: eventGen,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, corev1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
reconcilePeriod: reconcilePeriod,
metricsConfig: metricsConfig,
log: log,
}
pc.pLister = pInformer.Lister()