1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00
Signed-off-by: ShutingZhao <shuting@nirmata.com>

Signed-off-by: ShutingZhao <shuting@nirmata.com>
Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
shuting 2023-01-19 16:03:55 +08:00 committed by GitHub
parent 73a4c94f90
commit c0f32d9807
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
12 changed files with 433 additions and 31 deletions

View file

@ -136,7 +136,7 @@ The command removes all the Kubernetes components associated with the chart and
| initImage.repository | string | `"ghcr.io/kyverno/kyvernopre"` | Image repository | | initImage.repository | string | `"ghcr.io/kyverno/kyvernopre"` | Image repository |
| initImage.tag | string | `nil` | Image tag If initImage.tag is missing, defaults to image.tag | | initImage.tag | string | `nil` | Image tag If initImage.tag is missing, defaults to image.tag |
| initImage.pullPolicy | string | `nil` | Image pull policy If initImage.pullPolicy is missing, defaults to image.pullPolicy | | initImage.pullPolicy | string | `nil` | Image pull policy If initImage.pullPolicy is missing, defaults to image.pullPolicy |
| initContainer.extraArgs | list | `["--loggingFormat=text"]` | Extra arguments to give to the kyvernopre binary. | | initContainer.extraArgs | list | `["--loggingFormat=text","--exceptionNamespace={{ include \"kyverno.namespace\" . }}"]` | Extra arguments to give to the kyvernopre binary. |
| testImage.registry | string | `nil` | Image registry | | testImage.registry | string | `nil` | Image registry |
| testImage.repository | string | `"busybox"` | Image repository | | testImage.repository | string | `"busybox"` | Image repository |
| testImage.tag | string | `nil` | Image tag Defaults to `latest` if omitted | | testImage.tag | string | `nil` | Image tag Defaults to `latest` if omitted |

View file

@ -65,6 +65,7 @@ initContainer:
# -- Extra arguments to give to the kyvernopre binary. # -- Extra arguments to give to the kyvernopre binary.
extraArgs: extraArgs:
- --loggingFormat=text - --loggingFormat=text
- --exceptionNamespace={{ include "kyverno.namespace" . }}
testImage: testImage:

View file

@ -34,6 +34,7 @@ import (
resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource" resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource"
webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook" webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook"
"github.com/kyverno/kyverno/pkg/cosign" "github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers" "github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event" "github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/leaderelection" "github.com/kyverno/kyverno/pkg/leaderelection"
@ -172,6 +173,8 @@ func createReportControllers(
backgroundScanInterval time.Duration, backgroundScanInterval time.Duration,
configuration config.Configuration, configuration config.Configuration,
eventGenerator event.Interface, eventGenerator event.Interface,
enablePolicyException bool,
exceptionNamespace string,
) ([]internal.Controller, func(context.Context) error) { ) ([]internal.Controller, func(context.Context) error) {
var ctrls []internal.Controller var ctrls []internal.Controller
var warmups []func(context.Context) error var warmups []func(context.Context) error
@ -215,6 +218,15 @@ func createReportControllers(
)) ))
} }
if backgroundScan { if backgroundScan {
var exceptionsLister engine.PolicyExceptionLister
if enablePolicyException {
lister := kyvernoV2Alpha1.PolicyExceptions().Lister()
if exceptionNamespace != "" {
exceptionsLister = lister.PolicyExceptions(exceptionNamespace)
} else {
exceptionsLister = lister
}
}
ctrls = append(ctrls, internal.NewController( ctrls = append(ctrls, internal.NewController(
backgroundscancontroller.ControllerName, backgroundscancontroller.ControllerName,
backgroundscancontroller.NewController( backgroundscancontroller.NewController(
@ -225,7 +237,7 @@ func createReportControllers(
kyvernoV1.Policies(), kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(), kyvernoV1.ClusterPolicies(),
kubeInformer.Core().V1().Namespaces(), kubeInformer.Core().V1().Namespaces(),
kyvernoV2Alpha1.PolicyExceptions(), exceptionsLister,
resourceReportController, resourceReportController,
configMapResolver, configMapResolver,
backgroundScanInterval, backgroundScanInterval,
@ -269,6 +281,8 @@ func createrLeaderControllers(
runtime runtimeutils.Runtime, runtime runtimeutils.Runtime,
configMapResolver resolvers.ConfigmapResolver, configMapResolver resolvers.ConfigmapResolver,
backgroundScanInterval time.Duration, backgroundScanInterval time.Duration,
enablePolicyException bool,
exceptionNamespace string,
) ([]internal.Controller, func(context.Context) error, error) { ) ([]internal.Controller, func(context.Context) error, error) {
policyCtrl, err := policy.NewPolicyController( policyCtrl, err := policy.NewPolicyController(
kyvernoClient, kyvernoClient,
@ -349,6 +363,8 @@ func createrLeaderControllers(
backgroundScanInterval, backgroundScanInterval,
configuration, configuration,
eventGenerator, eventGenerator,
enablePolicyException,
exceptionNamespace,
) )
return append( return append(
[]internal.Controller{ []internal.Controller{
@ -383,6 +399,8 @@ func main() {
dumpPayload bool dumpPayload bool
leaderElectionRetryPeriod time.Duration leaderElectionRetryPeriod time.Duration
backgroundScanInterval time.Duration backgroundScanInterval time.Duration
enablePolicyException bool
exceptionNamespace string
) )
flagset := flag.NewFlagSet("kyverno", flag.ExitOnError) flagset := flag.NewFlagSet("kyverno", flag.ExitOnError)
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.") flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
@ -403,6 +421,8 @@ func main() {
flagset.IntVar(&backgroundScanWorkers, "backgroundScanWorkers", backgroundscancontroller.Workers, "Configure the number of background scan workers.") flagset.IntVar(&backgroundScanWorkers, "backgroundScanWorkers", backgroundscancontroller.Workers, "Configure the number of background scan workers.")
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.") flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
flagset.DurationVar(&backgroundScanInterval, "backgroundScanInterval", time.Hour, "Configure background scan interval.") flagset.DurationVar(&backgroundScanInterval, "backgroundScanInterval", time.Hour, "Configure background scan interval.")
flagset.StringVar(&exceptionNamespace, "exceptionNamespace", "", "Configure the namespace to accept PolicyExceptions.")
flagset.BoolVar(&enablePolicyException, "enablePolicyException", false, "Enable PolicyException feature.")
// config // config
appConfig := internal.NewConfiguration( appConfig := internal.NewConfiguration(
internal.WithProfiling(), internal.WithProfiling(),
@ -586,6 +606,8 @@ func main() {
runtime, runtime,
configMapResolver, configMapResolver,
backgroundScanInterval, backgroundScanInterval,
enablePolicyException,
exceptionNamespace,
) )
if err != nil { if err != nil {
logger.Error(err, "failed to create leader controllers") logger.Error(err, "failed to create leader controllers")
@ -642,6 +664,15 @@ func main() {
dClient, dClient,
openApiManager, openApiManager,
) )
var exceptionsLister engine.PolicyExceptionLister
if enablePolicyException {
lister := kyvernoInformer.Kyverno().V2alpha1().PolicyExceptions().Lister()
if exceptionNamespace != "" {
exceptionsLister = lister.PolicyExceptions(exceptionNamespace)
} else {
exceptionsLister = lister
}
}
resourceHandlers := webhooksresource.NewHandlers( resourceHandlers := webhooksresource.NewHandlers(
dClient, dClient,
kyvernoClient, kyvernoClient,
@ -654,7 +685,7 @@ func main() {
kubeInformer.Rbac().V1().RoleBindings().Lister(), kubeInformer.Rbac().V1().RoleBindings().Lister(),
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(), kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests().Lister().UpdateRequests(config.KyvernoNamespace()), kyvernoInformer.Kyverno().V1beta1().UpdateRequests().Lister().UpdateRequests(config.KyvernoNamespace()),
kyvernoInformer.Kyverno().V2alpha1().PolicyExceptions().Lister(), exceptionsLister,
urgen, urgen,
eventGenerator, eventGenerator,
openApiManager, openApiManager,

View file

@ -0,0 +1,363 @@
package main
import (
"context"
"errors"
"flag"
"os"
"strings"
"sync"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/cmd/internal"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
"github.com/kyverno/kyverno/pkg/clients/dclient"
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
metadataclient "github.com/kyverno/kyverno/pkg/clients/metadata"
"github.com/kyverno/kyverno/pkg/config"
admissionreportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/admission"
aggregatereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/aggregate"
backgroundscancontroller "github.com/kyverno/kyverno/pkg/controllers/report/background"
resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/registryclient"
kubeinformers "k8s.io/client-go/informers"
corev1listers "k8s.io/client-go/listers/core/v1"
metadatainformers "k8s.io/client-go/metadata/metadatainformer"
kyamlopenapi "sigs.k8s.io/kustomize/kyaml/openapi"
)
const (
resyncPeriod = 15 * time.Minute
)
func setupRegistryClient(ctx context.Context, logger logr.Logger, lister corev1listers.SecretNamespaceLister, imagePullSecrets string, allowInsecureRegistry bool) (registryclient.Client, error) {
logger = logger.WithName("registry-client")
logger.Info("setup registry client...", "secrets", imagePullSecrets, "insecure", allowInsecureRegistry)
registryOptions := []registryclient.Option{
registryclient.WithTracing(),
}
secrets := strings.Split(imagePullSecrets, ",")
if imagePullSecrets != "" && len(secrets) > 0 {
registryOptions = append(registryOptions, registryclient.WithKeychainPullSecrets(ctx, lister, secrets...))
}
if allowInsecureRegistry {
registryOptions = append(registryOptions, registryclient.WithAllowInsecureRegistry())
}
return registryclient.New(registryOptions...)
}
func setupCosign(logger logr.Logger, imageSignatureRepository string) {
logger = logger.WithName("cosign")
logger.Info("setup cosign...", "repository", imageSignatureRepository)
if imageSignatureRepository != "" {
cosign.ImageSignatureRepository = imageSignatureRepository
}
}
func createReportControllers(
backgroundScan bool,
admissionReports bool,
reportsChunkSize int,
backgroundScanWorkers int,
client dclient.Interface,
kyvernoClient versioned.Interface,
rclient registryclient.Client,
metadataFactory metadatainformers.SharedInformerFactory,
kubeInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
configMapResolver resolvers.ConfigmapResolver,
backgroundScanInterval time.Duration,
configuration config.Configuration,
eventGenerator event.Interface,
) ([]internal.Controller, func(context.Context) error) {
var ctrls []internal.Controller
var warmups []func(context.Context) error
kyvernoV1 := kyvernoInformer.Kyverno().V1()
kyvernoV2Alpha1 := kyvernoInformer.Kyverno().V2alpha1()
if backgroundScan || admissionReports {
resourceReportController := resourcereportcontroller.NewController(
client,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
)
warmups = append(warmups, func(ctx context.Context) error {
return resourceReportController.Warmup(ctx)
})
ctrls = append(ctrls, internal.NewController(
resourcereportcontroller.ControllerName,
resourceReportController,
resourcereportcontroller.Workers,
))
ctrls = append(ctrls, internal.NewController(
aggregatereportcontroller.ControllerName,
aggregatereportcontroller.NewController(
kyvernoClient,
metadataFactory,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
resourceReportController,
reportsChunkSize,
),
aggregatereportcontroller.Workers,
))
if admissionReports {
ctrls = append(ctrls, internal.NewController(
admissionreportcontroller.ControllerName,
admissionreportcontroller.NewController(
kyvernoClient,
metadataFactory,
resourceReportController,
),
admissionreportcontroller.Workers,
))
}
if backgroundScan {
ctrls = append(ctrls, internal.NewController(
backgroundscancontroller.ControllerName,
backgroundscancontroller.NewController(
client,
kyvernoClient,
rclient,
metadataFactory,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
kubeInformer.Core().V1().Namespaces(),
kyvernoV2Alpha1.PolicyExceptions().Lister(),
resourceReportController,
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
),
backgroundScanWorkers,
))
}
}
return ctrls, func(ctx context.Context) error {
for _, warmup := range warmups {
if err := warmup(ctx); err != nil {
return err
}
}
return nil
}
}
func createrLeaderControllers(
backgroundScan bool,
admissionReports bool,
reportsChunkSize int,
backgroundScanWorkers int,
kubeInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
metadataInformer metadatainformers.SharedInformerFactory,
kyvernoClient versioned.Interface,
dynamicClient dclient.Interface,
rclient registryclient.Client,
configuration config.Configuration,
eventGenerator event.Interface,
configMapResolver resolvers.ConfigmapResolver,
backgroundScanInterval time.Duration,
) ([]internal.Controller, func(context.Context) error, error) {
reportControllers, warmup := createReportControllers(
backgroundScan,
admissionReports,
reportsChunkSize,
backgroundScanWorkers,
dynamicClient,
kyvernoClient,
rclient,
metadataInformer,
kubeInformer,
kyvernoInformer,
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
)
return reportControllers, warmup, nil
}
func main() {
var (
leaderElectionRetryPeriod time.Duration
imagePullSecrets string
imageSignatureRepository string
allowInsecureRegistry bool
backgroundScan bool
admissionReports bool
reportsChunkSize int
backgroundScanWorkers int
backgroundScanInterval time.Duration
maxQueuedEvents int
)
flagset := flag.NewFlagSet("reports-controller", flag.ExitOnError)
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
flagset.StringVar(&imagePullSecrets, "imagePullSecrets", "", "Secret resource names for image registry access credentials.")
flagset.StringVar(&imageSignatureRepository, "imageSignatureRepository", "", "Alternate repository for image signatures. Can be overridden per rule via `verifyImages.Repository`.")
flagset.BoolVar(&allowInsecureRegistry, "allowInsecureRegistry", false, "Whether to allow insecure connections to registries. Don't use this for anything but testing.")
flagset.BoolVar(&backgroundScan, "backgroundScan", true, "Enable or disable backgound scan.")
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
flagset.IntVar(&reportsChunkSize, "reportsChunkSize", 1000, "Max number of results in generated reports, reports will be split accordingly if there are more results to be stored.")
flagset.IntVar(&backgroundScanWorkers, "backgroundScanWorkers", backgroundscancontroller.Workers, "Configure the number of background scan workers.")
flagset.DurationVar(&backgroundScanInterval, "backgroundScanInterval", time.Hour, "Configure background scan interval.")
flagset.IntVar(&maxQueuedEvents, "maxQueuedEvents", 1000, "Maximum events to be queued.")
// config
appConfig := internal.NewConfiguration(
internal.WithProfiling(),
internal.WithMetrics(),
internal.WithTracing(),
internal.WithKubeconfig(),
internal.WithFlagSets(flagset),
)
// parse flags
internal.ParseFlags(appConfig)
// setup logger
// show version
// start profiling
// setup signals
// setup maxprocs
// setup metrics
ctx, logger, metricsConfig, sdown := internal.Setup()
defer sdown()
// create instrumented clients
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
leaderElectionClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
kyvernoClient := internal.CreateKyvernoClient(logger, kyvernoclient.WithMetrics(metricsConfig, metrics.KyvernoClient), kyvernoclient.WithTracing())
metadataClient := internal.CreateMetadataClient(logger, metadataclient.WithMetrics(metricsConfig, metrics.KyvernoClient), metadataclient.WithTracing())
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
dClient, err := dclient.NewClient(ctx, dynamicClient, kubeClient, 15*time.Minute)
if err != nil {
logger.Error(err, "failed to create dynamic client")
os.Exit(1)
}
// THIS IS AN UGLY FIX
// ELSE KYAML IS NOT THREAD SAFE
kyamlopenapi.Schema()
// informer factories
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
cacheInformer, err := resolvers.GetCacheInformerFactory(kubeClient, resyncPeriod)
if err != nil {
logger.Error(err, "failed to create cache informer factory")
os.Exit(1)
}
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister().Secrets(config.KyvernoNamespace())
// setup registry client
rclient, err := setupRegistryClient(ctx, logger, secretLister, imagePullSecrets, allowInsecureRegistry)
if err != nil {
logger.Error(err, "failed to setup registry client")
os.Exit(1)
}
// setup cosign
setupCosign(logger, imageSignatureRepository)
informerBasedResolver, err := resolvers.NewInformerBasedResolver(cacheInformer.Core().V1().ConfigMaps().Lister())
if err != nil {
logger.Error(err, "failed to create informer based resolver")
os.Exit(1)
}
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
if err != nil {
logger.Error(err, "failed to create client based resolver")
os.Exit(1)
}
configMapResolver, err := resolvers.NewResolverChain(informerBasedResolver, clientBasedResolver)
if err != nil {
logger.Error(err, "failed to create config map resolver")
os.Exit(1)
}
configuration, err := config.NewConfiguration(kubeClient)
if err != nil {
logger.Error(err, "failed to initialize configuration")
os.Exit(1)
}
eventGenerator := event.NewEventGenerator(
dClient,
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
maxQueuedEvents,
logging.WithName("EventGenerator"),
)
// setup leader election
le, err := leaderelection.New(
logger.WithName("leader-election"),
"kyverno-reports-controller",
config.KyvernoNamespace(),
leaderElectionClient,
config.KyvernoPodName(),
leaderElectionRetryPeriod,
func(ctx context.Context) {
logger := logger.WithName("leader")
// create leader factories
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
metadataInformer := metadatainformers.NewSharedInformerFactory(metadataClient, 15*time.Minute)
// create leader controllers
leaderControllers, warmup, err := createrLeaderControllers(
backgroundScan,
admissionReports,
reportsChunkSize,
backgroundScanWorkers,
kubeInformer,
kyvernoInformer,
metadataInformer,
kyvernoClient,
dClient,
rclient,
configuration,
eventGenerator,
configMapResolver,
backgroundScanInterval,
)
if err != nil {
logger.Error(err, "failed to create leader controllers")
os.Exit(1)
}
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(ctx, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
internal.StartInformers(ctx, metadataInformer)
if !internal.CheckCacheSync(metadataInformer.WaitForCacheSync(ctx.Done())) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
if err := warmup(ctx); err != nil {
logger.Error(err, "failed to run warmup")
os.Exit(1)
}
// start leader controllers
var wg sync.WaitGroup
for _, controller := range leaderControllers {
controller.Run(ctx, logger.WithName("controllers"), &wg)
}
// wait all controllers shut down
wg.Wait()
},
nil,
)
if err != nil {
logger.Error(err, "failed to initialize leader election")
os.Exit(1)
}
for {
select {
case <-ctx.Done():
return
default:
le.Run(ctx)
}
}
}

View file

@ -31063,6 +31063,7 @@ spec:
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
args: args:
- --loggingFormat=text - --loggingFormat=text
- --exceptionNamespace=kyverno
resources: resources:
limits: limits:
cpu: 100m cpu: 100m

View file

@ -11,14 +11,13 @@ import (
policyreportv1alpha2 "github.com/kyverno/kyverno/api/policyreport/v1alpha2" policyreportv1alpha2 "github.com/kyverno/kyverno/api/policyreport/v1alpha2"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned" "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1" kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov2alpha1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v2alpha1"
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1" kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
kyvernov2alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient" "github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config" "github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/controllers" "github.com/kyverno/kyverno/pkg/controllers"
"github.com/kyverno/kyverno/pkg/controllers/report/resource" "github.com/kyverno/kyverno/pkg/controllers/report/resource"
"github.com/kyverno/kyverno/pkg/controllers/report/utils" "github.com/kyverno/kyverno/pkg/controllers/report/utils"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers" "github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event" "github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/registryclient" "github.com/kyverno/kyverno/pkg/registryclient"
@ -57,7 +56,7 @@ type controller struct {
bgscanrLister cache.GenericLister bgscanrLister cache.GenericLister
cbgscanrLister cache.GenericLister cbgscanrLister cache.GenericLister
nsLister corev1listers.NamespaceLister nsLister corev1listers.NamespaceLister
polexLister kyvernov2alpha1listers.PolicyExceptionLister polexLister engine.PolicyExceptionLister
// queue // queue
queue workqueue.RateLimitingInterface queue workqueue.RateLimitingInterface
@ -80,7 +79,7 @@ func NewController(
polInformer kyvernov1informers.PolicyInformer, polInformer kyvernov1informers.PolicyInformer,
cpolInformer kyvernov1informers.ClusterPolicyInformer, cpolInformer kyvernov1informers.ClusterPolicyInformer,
nsInformer corev1informers.NamespaceInformer, nsInformer corev1informers.NamespaceInformer,
polexInformer kyvernov2alpha1informers.PolicyExceptionInformer, polexLister engine.PolicyExceptionLister,
metadataCache resource.MetadataCache, metadataCache resource.MetadataCache,
informerCacheResolvers resolvers.ConfigmapResolver, informerCacheResolvers resolvers.ConfigmapResolver,
forceDelay time.Duration, forceDelay time.Duration,
@ -99,7 +98,7 @@ func NewController(
bgscanrLister: bgscanr.Lister(), bgscanrLister: bgscanr.Lister(),
cbgscanrLister: cbgscanr.Lister(), cbgscanrLister: cbgscanr.Lister(),
nsLister: nsInformer.Lister(), nsLister: nsInformer.Lister(),
polexLister: polexInformer.Lister(), polexLister: polexLister,
queue: queue, queue: queue,
metadataCache: metadataCache, metadataCache: metadataCache,
informerCacheResolvers: informerCacheResolvers, informerCacheResolvers: informerCacheResolvers,
@ -310,7 +309,7 @@ func (c *controller) reconcileReport(
// calculate necessary results // calculate necessary results
for _, policy := range backgroundPolicies { for _, policy := range backgroundPolicies {
if full || actual[reportutils.PolicyLabel(policy)] != policy.GetResourceVersion() { if full || actual[reportutils.PolicyLabel(policy)] != policy.GetResourceVersion() {
scanner := utils.NewScanner(logger, c.client, c.rclient, c.informerCacheResolvers, c.polexLister) scanner := utils.NewScanner(logger, c.client, c.rclient, c.informerCacheResolvers, c.polexLister, c.config)
for _, result := range scanner.ScanResource(ctx, *target, nsLabels, policy) { for _, result := range scanner.ScanResource(ctx, *target, nsLabels, policy) {
if result.Error != nil { if result.Error != nil {
return result.Error return result.Error

View file

@ -5,8 +5,8 @@ import (
"github.com/go-logr/logr" "github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov2alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient" "github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine" "github.com/kyverno/kyverno/pkg/engine"
enginecontext "github.com/kyverno/kyverno/pkg/engine/context" enginecontext "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers" "github.com/kyverno/kyverno/pkg/engine/context/resolvers"
@ -21,7 +21,7 @@ type scanner struct {
client dclient.Interface client dclient.Interface
rclient registryclient.Client rclient registryclient.Client
informerCacheResolvers resolvers.ConfigmapResolver informerCacheResolvers resolvers.ConfigmapResolver
polexLister kyvernov2alpha1listers.PolicyExceptionLister polexLister engine.PolicyExceptionLister
excludeGroupRole []string excludeGroupRole []string
} }
@ -39,7 +39,8 @@ func NewScanner(
client dclient.Interface, client dclient.Interface,
rclient registryclient.Client, rclient registryclient.Client,
informerCacheResolvers resolvers.ConfigmapResolver, informerCacheResolvers resolvers.ConfigmapResolver,
polexLister kyvernov2alpha1listers.PolicyExceptionLister, polexLister engine.PolicyExceptionLister,
config config.Configuration,
excludeGroupRole ...string, excludeGroupRole ...string,
) Scanner { ) Scanner {
return &scanner{ return &scanner{

View file

@ -4,7 +4,6 @@ import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1" kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1" kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
kyvernov2alpha1 "github.com/kyverno/kyverno/api/kyverno/v2alpha1" kyvernov2alpha1 "github.com/kyverno/kyverno/api/kyverno/v2alpha1"
kyvernov2alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient" "github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config" "github.com/kyverno/kyverno/pkg/config"
enginectx "github.com/kyverno/kyverno/pkg/engine/context" enginectx "github.com/kyverno/kyverno/pkg/engine/context"
@ -21,6 +20,12 @@ import (
// ExcludeFunc is a function used to determine if a resource is excluded // ExcludeFunc is a function used to determine if a resource is excluded
type ExcludeFunc = func(kind, namespace, name string) bool type ExcludeFunc = func(kind, namespace, name string) bool
type PolicyExceptionLister interface {
// List lists all PolicyExceptions in the indexer.
// Objects returned here must be treated as read-only.
List(selector labels.Selector) (ret []*kyvernov2alpha1.PolicyException, err error)
}
// PolicyContext contains the contexts for engine to process // PolicyContext contains the contexts for engine to process
type PolicyContext struct { type PolicyContext struct {
// policy is the policy to be processed // policy is the policy to be processed
@ -80,7 +85,7 @@ type PolicyContext struct {
} }
// peLister list all policy exceptions // peLister list all policy exceptions
peLister kyvernov2alpha1listers.PolicyExceptionLister peLister PolicyExceptionLister
} }
// Getters // Getters
@ -218,7 +223,7 @@ func (c *PolicyContext) WithSubresourcesInPolicy(subresourcesInPolicy []struct {
return copy return copy
} }
func (c *PolicyContext) WithExceptions(peLister kyvernov2alpha1listers.PolicyExceptionLister) *PolicyContext { func (c *PolicyContext) WithExceptions(peLister PolicyExceptionLister) *PolicyContext {
copy := c.Copy() copy := c.Copy()
copy.peLister = peLister copy.peLister = peLister
return copy return copy
@ -245,7 +250,7 @@ func NewPolicyContextFromAdmissionRequest(
configuration config.Configuration, configuration config.Configuration,
client dclient.Interface, client dclient.Interface,
informerCacheResolver resolvers.ConfigmapResolver, informerCacheResolver resolvers.ConfigmapResolver,
peLister kyvernov2alpha1listers.PolicyExceptionLister, polexLister PolicyExceptionLister,
) (*PolicyContext, error) { ) (*PolicyContext, error) {
ctx, err := newVariablesContext(request, &admissionInfo) ctx, err := newVariablesContext(request, &admissionInfo)
if err != nil { if err != nil {
@ -269,7 +274,7 @@ func NewPolicyContextFromAdmissionRequest(
WithInformerCacheResolver(informerCacheResolver). WithInformerCacheResolver(informerCacheResolver).
WithRequestResource(*requestResource). WithRequestResource(*requestResource).
WithSubresource(request.SubResource). WithSubresource(request.SubResource).
WithExceptions(peLister) WithExceptions(polexLister)
return policyContext, nil return policyContext, nil
} }

View file

@ -10,9 +10,9 @@ import (
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1" kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned" "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernov1beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1" kyvernov1beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
kyvernov2alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient" "github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config" "github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
enginectx "github.com/kyverno/kyverno/pkg/engine/context" enginectx "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers" "github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event" "github.com/kyverno/kyverno/pkg/event"
@ -50,11 +50,11 @@ type handlers struct {
pCache policycache.Cache pCache policycache.Cache
// listers // listers
nsLister corev1listers.NamespaceLister nsLister corev1listers.NamespaceLister
rbLister rbacv1listers.RoleBindingLister rbLister rbacv1listers.RoleBindingLister
crbLister rbacv1listers.ClusterRoleBindingLister crbLister rbacv1listers.ClusterRoleBindingLister
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister urLister kyvernov1beta1listers.UpdateRequestNamespaceLister
peLister kyvernov2alpha1listers.PolicyExceptionLister polexLister engine.PolicyExceptionLister
urGenerator webhookgenerate.Generator urGenerator webhookgenerate.Generator
eventGen event.Interface eventGen event.Interface
@ -77,7 +77,7 @@ func NewHandlers(
rbLister rbacv1listers.RoleBindingLister, rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister, crbLister rbacv1listers.ClusterRoleBindingLister,
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister, urLister kyvernov1beta1listers.UpdateRequestNamespaceLister,
peLister kyvernov2alpha1listers.PolicyExceptionLister, polexLister engine.PolicyExceptionLister,
urGenerator webhookgenerate.Generator, urGenerator webhookgenerate.Generator,
eventGen event.Interface, eventGen event.Interface,
openApiManager openapi.ValidateInterface, openApiManager openapi.ValidateInterface,
@ -94,11 +94,11 @@ func NewHandlers(
rbLister: rbLister, rbLister: rbLister,
crbLister: crbLister, crbLister: crbLister,
urLister: urLister, urLister: urLister,
peLister: peLister, polexLister: polexLister,
urGenerator: urGenerator, urGenerator: urGenerator,
eventGen: eventGen, eventGen: eventGen,
openApiManager: openApiManager, openApiManager: openApiManager,
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, client, rbLister, crbLister, informerCacheResolvers, peLister), pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, client, rbLister, crbLister, informerCacheResolvers, polexLister),
urUpdater: webhookutils.NewUpdateRequestUpdater(kyvernoClient, urLister), urUpdater: webhookutils.NewUpdateRequestUpdater(kyvernoClient, urLister),
admissionReports: admissionReports, admissionReports: admissionReports,
} }

View file

@ -2,7 +2,6 @@ package utils
import ( import (
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1" kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
kyvernov2alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v2alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient" "github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config" "github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine" "github.com/kyverno/kyverno/pkg/engine"
@ -23,7 +22,7 @@ type policyContextBuilder struct {
rbLister rbacv1listers.RoleBindingLister rbLister rbacv1listers.RoleBindingLister
crbLister rbacv1listers.ClusterRoleBindingLister crbLister rbacv1listers.ClusterRoleBindingLister
informerCacheResolvers resolvers.ConfigmapResolver informerCacheResolvers resolvers.ConfigmapResolver
peLister kyvernov2alpha1listers.PolicyExceptionLister polexLister engine.PolicyExceptionLister
} }
func NewPolicyContextBuilder( func NewPolicyContextBuilder(
@ -32,7 +31,7 @@ func NewPolicyContextBuilder(
rbLister rbacv1listers.RoleBindingLister, rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister, crbLister rbacv1listers.ClusterRoleBindingLister,
informerCacheResolvers resolvers.ConfigmapResolver, informerCacheResolvers resolvers.ConfigmapResolver,
peLister kyvernov2alpha1listers.PolicyExceptionLister, polexLister engine.PolicyExceptionLister,
) PolicyContextBuilder { ) PolicyContextBuilder {
return &policyContextBuilder{ return &policyContextBuilder{
configuration: configuration, configuration: configuration,
@ -40,7 +39,7 @@ func NewPolicyContextBuilder(
rbLister: rbLister, rbLister: rbLister,
crbLister: crbLister, crbLister: crbLister,
informerCacheResolvers: informerCacheResolvers, informerCacheResolvers: informerCacheResolvers,
peLister: peLister, polexLister: polexLister,
} }
} }
@ -54,5 +53,5 @@ func (b *policyContextBuilder) Build(request *admissionv1.AdmissionRequest) (*en
userRequestInfo.Roles = roles userRequestInfo.Roles = roles
userRequestInfo.ClusterRoles = clusterRoles userRequestInfo.ClusterRoles = clusterRoles
} }
return engine.NewPolicyContextFromAdmissionRequest(request, userRequestInfo, b.configuration, b.client, b.informerCacheResolvers, b.peLister) return engine.NewPolicyContextFromAdmissionRequest(request, userRequestInfo, b.configuration, b.client, b.informerCacheResolvers, b.polexLister)
} }

View file

@ -7,6 +7,7 @@ extraArgs:
- --enableTracing - --enableTracing
- --tracingAddress=tempo.monitoring - --tracingAddress=tempo.monitoring
- --tracingPort=4317 - --tracingPort=4317
- --enablePolicyException
serviceMonitor: serviceMonitor:
enabled: true enabled: true

View file

@ -4,6 +4,7 @@ initContainer:
extraArgs: extraArgs:
- --loggingFormat=json - --loggingFormat=json
- --enablePolicyException
cleanupController: cleanupController:
rbac: rbac: