1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

refactor: more engine interface (#6199)

* refactor: more engine interface

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fixes

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

---------

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-02-03 06:01:11 +01:00 committed by GitHub
parent 0020a7c749
commit 6c0549febe
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
32 changed files with 216 additions and 240 deletions

View file

@ -80,7 +80,6 @@ func createNonLeaderControllers(
kyvernoClient,
dynamicClient,
eng,
engine.LegacyContextLoaderFactory(rclient),
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
@ -94,6 +93,7 @@ func createNonLeaderControllers(
}
func createrLeaderControllers(
eng engineapi.Engine,
kubeInformer kubeinformers.SharedInformerFactory,
kyvernoInformer kyvernoinformer.SharedInformerFactory,
kyvernoClient versioned.Interface,
@ -107,7 +107,7 @@ func createrLeaderControllers(
policyCtrl, err := policy.NewPolicyController(
kyvernoClient,
dynamicClient,
engine.LegacyContextLoaderFactory(rclient),
eng,
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
@ -226,7 +226,10 @@ func main() {
kyvernoInformer.Kyverno().V1().ClusterPolicies(),
kyvernoInformer.Kyverno().V1().Policies(),
)
engine := engine.NewEgine()
engine := engine.NewEngine(
configuration,
engine.LegacyContextLoaderFactory(rclient, configMapResolver),
)
// create non leader controllers
nonLeaderControllers := createNonLeaderControllers(
engine,
@ -263,6 +266,7 @@ func main() {
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
// create leader controllers
leaderControllers, err := createrLeaderControllers(
engine,
kubeInformer,
kyvernoInformer,
kyvernoClient,

View file

@ -474,7 +474,10 @@ OuterLoop:
})
}
}
eng := engine.NewEgine()
eng := engine.NewEngine(
cfg,
engine.LegacyContextLoaderFactory(registryclient.NewOrDie(), nil),
)
policyContext := engine.NewPolicyContextWithJsonContext(ctx).
WithPolicy(c.Policy).
WithNewResource(*updatedResource).
@ -485,7 +488,6 @@ OuterLoop:
mutateResponse := eng.Mutate(
context.Background(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
policyContext,
)
if mutateResponse != nil {
@ -513,9 +515,7 @@ OuterLoop:
if policyHasValidate {
validateResponse = eng.Validate(
context.Background(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
policyContext,
cfg,
)
info = ProcessValidateEngineResponse(c.Policy, validateResponse, resPath, c.Rc, c.PolicyReport, c.AuditWarn)
}
@ -526,10 +526,8 @@ OuterLoop:
verifyImageResponse, _ := eng.VerifyAndPatchImages(
context.Background(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
registryclient.NewOrDie(),
policyContext,
cfg,
)
if verifyImageResponse != nil && !verifyImageResponse.IsEmpty() {
engineResponses = append(engineResponses, verifyImageResponse)
@ -544,8 +542,7 @@ OuterLoop:
}
if policyHasGenerate {
generateResponse := engine.ApplyBackgroundChecks(
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
generateResponse := eng.ApplyBackgroundChecks(
policyContext,
)
if generateResponse != nil && !generateResponse.IsEmpty() {
@ -1080,7 +1077,10 @@ func initializeMockController(objects []runtime.Object) (*generate.GenerateContr
}
client.SetDiscovery(dclient.NewFakeDiscoveryClient(nil))
c := generate.NewGenerateControllerWithOnlyClient(client, engine.LegacyContextLoaderFactory(nil))
c := generate.NewGenerateControllerWithOnlyClient(client, engine.NewEngine(
config.NewDefaultConfiguration(),
engine.LegacyContextLoaderFactory(nil, nil),
))
return c, nil
}

View file

@ -355,7 +355,10 @@ func main() {
kubeKyvernoInformer.Apps().V1().Deployments(),
certRenewer,
)
eng := engine.NewEgine()
eng := engine.NewEngine(
configuration,
engine.LegacyContextLoaderFactory(rclient, configMapResolver),
)
// create non leader controllers
nonLeaderControllers, nonLeaderBootstrap := createNonLeaderControllers(
eng,
@ -476,14 +479,12 @@ func main() {
}
resourceHandlers := webhooksresource.NewHandlers(
eng,
engine.LegacyContextLoaderFactory(rclient),
dClient,
kyvernoClient,
rclient,
configuration,
metricsConfig,
policyCache,
configMapResolver,
kubeInformer.Core().V1().Namespaces().Lister(),
kubeInformer.Rbac().V1().RoleBindings().Lister(),
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),

View file

@ -132,7 +132,6 @@ func createReportControllers(
kyvernoClient,
rclient,
eng,
engine.LegacyContextLoaderFactory(rclient),
metadataFactory,
kyvernoV1.Policies(),
kyvernoV1.ClusterPolicies(),
@ -302,7 +301,10 @@ func main() {
}
// start event generator
go eventGenerator.Run(ctx, 3)
eng := engine.NewEgine()
eng := engine.NewEngine(
configuration,
engine.LegacyContextLoaderFactory(rclient, configMapResolver),
)
// setup leader election
le, err := leaderelection.New(
logger.WithName("leader-election"),

View file

@ -10,7 +10,6 @@ import (
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/engine/context"
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -20,7 +19,6 @@ func NewBackgroundContext(dclient dclient.Interface, ur *kyvernov1beta1.UpdateRe
policy kyvernov1.PolicyInterface,
trigger *unstructured.Unstructured,
cfg config.Configuration,
informerCacheResolvers engineapi.ConfigmapResolver,
namespaceLabels map[string]string,
logger logr.Logger,
) (*engine.PolicyContext, bool, error) {
@ -85,8 +83,7 @@ func NewBackgroundContext(dclient dclient.Interface, ur *kyvernov1beta1.UpdateRe
WithAdmissionInfo(ur.Spec.Context.UserRequestInfo).
WithConfiguration(cfg).
WithNamespaceLabels(namespaceLabels).
WithClient(dclient).
WithInformerCacheResolver(informerCacheResolvers)
WithClient(dclient)
return policyContext, false, nil
}

View file

@ -42,7 +42,7 @@ type GenerateController struct {
client dclient.Interface
kyvernoClient versioned.Interface
statusControl common.StatusControlInterface
contextLoader engineapi.ContextLoaderFactory
engine engineapi.Engine
// listers
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister
@ -50,9 +50,8 @@ type GenerateController struct {
policyLister kyvernov1listers.ClusterPolicyLister
npolicyLister kyvernov1listers.PolicyLister
configuration config.Configuration
informerCacheResolvers engineapi.ConfigmapResolver
eventGen event.Interface
configuration config.Configuration
eventGen event.Interface
log logr.Logger
}
@ -62,29 +61,27 @@ func NewGenerateController(
client dclient.Interface,
kyvernoClient versioned.Interface,
statusControl common.StatusControlInterface,
contextLoader engineapi.ContextLoaderFactory,
engine engineapi.Engine,
policyLister kyvernov1listers.ClusterPolicyLister,
npolicyLister kyvernov1listers.PolicyLister,
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister,
nsLister corev1listers.NamespaceLister,
dynamicConfig config.Configuration,
informerCacheResolvers engineapi.ConfigmapResolver,
eventGen event.Interface,
log logr.Logger,
) *GenerateController {
c := GenerateController{
client: client,
contextLoader: contextLoader,
kyvernoClient: kyvernoClient,
statusControl: statusControl,
policyLister: policyLister,
npolicyLister: npolicyLister,
urLister: urLister,
nsLister: nsLister,
configuration: dynamicConfig,
informerCacheResolvers: informerCacheResolvers,
eventGen: eventGen,
log: log,
client: client,
kyvernoClient: kyvernoClient,
statusControl: statusControl,
engine: engine,
policyLister: policyLister,
npolicyLister: npolicyLister,
urLister: urLister,
nsLister: nsLister,
configuration: dynamicConfig,
eventGen: eventGen,
log: log,
}
return &c
}
@ -194,13 +191,13 @@ func (c *GenerateController) applyGenerate(resource unstructured.Unstructured, u
return nil, false, err
}
policyContext, precreatedResource, err := common.NewBackgroundContext(c.client, &ur, &policy, &resource, c.configuration, c.informerCacheResolvers, namespaceLabels, logger)
policyContext, precreatedResource, err := common.NewBackgroundContext(c.client, &ur, &policy, &resource, c.configuration, namespaceLabels, logger)
if err != nil {
return nil, precreatedResource, err
}
// check if the policy still applies to the resource
engineResponse := engine.GenerateResponse(c.contextLoader, policyContext, ur)
engineResponse := c.engine.GenerateResponse(policyContext, ur)
if len(engineResponse.PolicyResponse.Rules) == 0 {
logger.V(4).Info(doesNotApply)
return nil, false, errors.New(doesNotApply)
@ -346,7 +343,7 @@ func (c *GenerateController) ApplyGeneratePolicy(log logr.Logger, policyContext
}
// add configmap json data to context
if err := engine.LoadContext(context.TODO(), c.contextLoader, rule.Context, policyContext, rule.Name); err != nil {
if err := c.engine.ContextLoader(policyContext, rule.Name).Load(context.TODO(), rule.Context, policyContext.JSONContext()); err != nil {
log.Error(err, "cannot add configmaps to context")
return nil, processExisting, err
}
@ -828,10 +825,10 @@ func (c *GenerateController) ApplyResource(resource *unstructured.Unstructured)
}
// NewGenerateControllerWithOnlyClient returns an instance of Controller with only the client.
func NewGenerateControllerWithOnlyClient(client dclient.Interface, contextLoader engineapi.ContextLoaderFactory) *GenerateController {
func NewGenerateControllerWithOnlyClient(client dclient.Interface, engine engineapi.Engine) *GenerateController {
c := GenerateController{
client: client,
contextLoader: contextLoader,
client: client,
engine: engine,
}
return &c
}

View file

@ -31,16 +31,14 @@ type MutateExistingController struct {
client dclient.Interface
statusControl common.StatusControlInterface
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
// listers
policyLister kyvernov1listers.ClusterPolicyLister
npolicyLister kyvernov1listers.PolicyLister
nsLister corev1listers.NamespaceLister
configuration config.Configuration
informerCacheResolvers engineapi.ConfigmapResolver
eventGen event.Interface
configuration config.Configuration
eventGen event.Interface
log logr.Logger
}
@ -50,27 +48,23 @@ func NewMutateExistingController(
client dclient.Interface,
statusControl common.StatusControlInterface,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
policyLister kyvernov1listers.ClusterPolicyLister,
npolicyLister kyvernov1listers.PolicyLister,
nsLister corev1listers.NamespaceLister,
dynamicConfig config.Configuration,
informerCacheResolvers engineapi.ConfigmapResolver,
eventGen event.Interface,
log logr.Logger,
) *MutateExistingController {
c := MutateExistingController{
client: client,
statusControl: statusControl,
engine: engine,
contextLoader: contextLoader,
policyLister: policyLister,
npolicyLister: npolicyLister,
nsLister: nsLister,
configuration: dynamicConfig,
informerCacheResolvers: informerCacheResolvers,
eventGen: eventGen,
log: log,
client: client,
statusControl: statusControl,
engine: engine,
policyLister: policyLister,
npolicyLister: npolicyLister,
nsLister: nsLister,
configuration: dynamicConfig,
eventGen: eventGen,
log: log,
}
return &c
}
@ -98,14 +92,14 @@ func (c *MutateExistingController) ProcessUR(ur *kyvernov1beta1.UpdateRequest) e
}
namespaceLabels := engineutils.GetNamespaceSelectorsFromNamespaceLister(trigger.GetKind(), trigger.GetNamespace(), c.nsLister, logger)
policyContext, _, err := common.NewBackgroundContext(c.client, ur, policy, trigger, c.configuration, c.informerCacheResolvers, namespaceLabels, logger)
policyContext, _, err := common.NewBackgroundContext(c.client, ur, policy, trigger, c.configuration, namespaceLabels, logger)
if err != nil {
logger.WithName(rule.Name).Error(err, "failed to build policy context")
errs = append(errs, err)
continue
}
er := c.engine.Mutate(context.TODO(), c.contextLoader, policyContext)
er := c.engine.Mutate(context.TODO(), policyContext)
for _, r := range er.PolicyResponse.Rules {
patched := r.PatchedTarget
patchedTargetSubresourceName := r.PatchedTargetSubresourceName

View file

@ -48,7 +48,6 @@ type controller struct {
client dclient.Interface
kyvernoClient versioned.Interface
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
// listers
cpolLister kyvernov1listers.ClusterPolicyLister
@ -72,7 +71,6 @@ func NewController(
kyvernoClient versioned.Interface,
client dclient.Interface,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
cpolInformer kyvernov1informers.ClusterPolicyInformer,
polInformer kyvernov1informers.PolicyInformer,
urInformer kyvernov1beta1informers.UpdateRequestInformer,
@ -87,7 +85,6 @@ func NewController(
client: client,
kyvernoClient: kyvernoClient,
engine: engine,
contextLoader: contextLoader,
cpolLister: cpolInformer.Lister(),
polLister: polInformer.Lister(),
urLister: urLister,
@ -421,10 +418,10 @@ func (c *controller) processUR(ur *kyvernov1beta1.UpdateRequest) error {
statusControl := common.NewStatusControl(c.kyvernoClient, c.urLister)
switch ur.Spec.Type {
case kyvernov1beta1.Mutate:
ctrl := mutate.NewMutateExistingController(c.client, statusControl, c.engine, c.contextLoader, c.cpolLister, c.polLister, c.nsLister, c.configuration, c.informerCacheResolvers, c.eventGen, logger)
ctrl := mutate.NewMutateExistingController(c.client, statusControl, c.engine, c.cpolLister, c.polLister, c.nsLister, c.configuration, c.eventGen, logger)
return ctrl.ProcessUR(ur)
case kyvernov1beta1.Generate:
ctrl := generate.NewGenerateController(c.client, c.kyvernoClient, statusControl, c.contextLoader, c.cpolLister, c.polLister, c.urLister, c.nsLister, c.configuration, c.informerCacheResolvers, c.eventGen, logger)
ctrl := generate.NewGenerateController(c.client, c.kyvernoClient, statusControl, c.engine, c.cpolLister, c.polLister, c.urLister, c.nsLister, c.configuration, c.eventGen, logger)
return ctrl.ProcessUR(ur)
}
return nil

View file

@ -50,7 +50,6 @@ type controller struct {
kyvernoClient versioned.Interface
rclient registryclient.Client
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
// listers
polLister kyvernov1listers.PolicyLister
@ -78,7 +77,6 @@ func NewController(
kyvernoClient versioned.Interface,
rclient registryclient.Client,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
metadataFactory metadatainformers.SharedInformerFactory,
polInformer kyvernov1informers.PolicyInformer,
cpolInformer kyvernov1informers.ClusterPolicyInformer,
@ -98,7 +96,6 @@ func NewController(
kyvernoClient: kyvernoClient,
rclient: rclient,
engine: engine,
contextLoader: contextLoader,
polLister: polInformer.Lister(),
cpolLister: cpolInformer.Lister(),
bgscanrLister: bgscanr.Lister(),
@ -315,7 +312,7 @@ func (c *controller) reconcileReport(
// calculate necessary results
for _, policy := range backgroundPolicies {
if full || actual[reportutils.PolicyLabel(policy)] != policy.GetResourceVersion() {
scanner := utils.NewScanner(logger, c.engine, c.contextLoader, c.client, c.rclient, c.informerCacheResolvers, c.polexLister, c.config)
scanner := utils.NewScanner(logger, c.engine, c.client, c.rclient, c.polexLister, c.config)
for _, result := range scanner.ScanResource(ctx, *target, nsLabels, policy) {
if result.Error != nil {
return result.Error

View file

@ -16,15 +16,13 @@ import (
)
type scanner struct {
logger logr.Logger
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
client dclient.Interface
rclient registryclient.Client
informerCacheResolvers engineapi.ConfigmapResolver
polexLister engine.PolicyExceptionLister
excludeGroupRole []string
config config.Configuration
logger logr.Logger
engine engineapi.Engine
client dclient.Interface
rclient registryclient.Client
polexLister engine.PolicyExceptionLister
excludeGroupRole []string
config config.Configuration
}
type ScanResult struct {
@ -39,24 +37,20 @@ type Scanner interface {
func NewScanner(
logger logr.Logger,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
client dclient.Interface,
rclient registryclient.Client,
informerCacheResolvers engineapi.ConfigmapResolver,
polexLister engine.PolicyExceptionLister,
config config.Configuration,
excludeGroupRole ...string,
) Scanner {
return &scanner{
logger: logger,
engine: engine,
contextLoader: contextLoader,
client: client,
rclient: rclient,
informerCacheResolvers: informerCacheResolvers,
polexLister: polexLister,
config: config,
excludeGroupRole: excludeGroupRole,
logger: logger,
engine: engine,
client: client,
rclient: rclient,
polexLister: polexLister,
config: config,
excludeGroupRole: excludeGroupRole,
}
}
@ -107,9 +101,8 @@ func (s *scanner) validateResource(ctx context.Context, resource unstructured.Un
WithClient(s.client).
WithNamespaceLabels(nsLabels).
WithExcludeGroupRole(s.excludeGroupRole...).
WithInformerCacheResolver(s.informerCacheResolvers).
WithExceptions(s.polexLister)
return s.engine.Validate(ctx, s.contextLoader, policyCtx, s.config), nil
return s.engine.Validate(ctx, policyCtx), nil
}
func (s *scanner) validateImages(ctx context.Context, resource unstructured.Unstructured, nsLabels map[string]string, policy kyvernov1.PolicyInterface) (*engineapi.EngineResponse, error) {
@ -132,9 +125,8 @@ func (s *scanner) validateImages(ctx context.Context, resource unstructured.Unst
WithClient(s.client).
WithNamespaceLabels(nsLabels).
WithExcludeGroupRole(s.excludeGroupRole...).
WithInformerCacheResolver(s.informerCacheResolvers).
WithExceptions(s.polexLister)
response, _ := s.engine.VerifyAndPatchImages(ctx, s.contextLoader, s.rclient, policyCtx, s.config)
response, _ := s.engine.VerifyAndPatchImages(ctx, s.rclient, policyCtx)
if len(response.PolicyResponse.Rules) > 0 {
s.logger.Info("validateImages", "policy", policy, "response", response)
}

View file

@ -3,7 +3,7 @@ package api
import (
"context"
"github.com/kyverno/kyverno/pkg/config"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/registryclient"
)
@ -11,24 +11,39 @@ type Engine interface {
// Validate applies validation rules from policy on the resource
Validate(
ctx context.Context,
contextLoader ContextLoaderFactory,
policyContext PolicyContext,
cfg config.Configuration,
) *EngineResponse
// Mutate performs mutation. Overlay first and then mutation patches
Mutate(
ctx context.Context,
contextLoader ContextLoaderFactory,
policyContext PolicyContext,
) *EngineResponse
// VerifyAndPatchImages ...
VerifyAndPatchImages(
ctx context.Context,
contextLoader ContextLoaderFactory,
rclient registryclient.Client,
policyContext PolicyContext,
cfg config.Configuration,
) (*EngineResponse, *ImageVerificationMetadata)
// ApplyBackgroundChecks checks for validity of generate and mutateExisting rules on the resource
// 1. validate variables to be substitute in the general ruleInfo (match,exclude,condition)
// - the caller has to check the ruleResponse to determine whether the path exist
//
// 2. returns the list of rules that are applicable on this policy and resource, if 1 succeed
ApplyBackgroundChecks(
policyContext PolicyContext,
) *EngineResponse
// GenerateResponse checks for validity of generate rule on the resource
GenerateResponse(
policyContext PolicyContext,
gr kyvernov1beta1.UpdateRequest,
) *EngineResponse
ContextLoader(
policyContext PolicyContext,
ruleName string,
) ContextLoader
}

View file

@ -1,14 +1,11 @@
package api
import (
"context"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
kyvernov2alpha1 "github.com/kyverno/kyverno/api/kyverno/v2alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
enginecontext "github.com/kyverno/kyverno/pkg/engine/context"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
@ -41,5 +38,4 @@ type PolicyContext interface {
FindExceptions(rule string) ([]*kyvernov2alpha1.PolicyException, error)
ExcludeResourceFunc() ExcludeFunc
ResolveConfigMap(ctx context.Context, namespace string, name string) (*corev1.ConfigMap, error)
}

View file

@ -17,7 +17,7 @@ import (
// - the caller has to check the ruleResponse to determine whether the path exist
//
// 2. returns the list of rules that are applicable on this policy and resource, if 1 succeed
func ApplyBackgroundChecks(
func doApplyBackgroundChecks(
contextLoader engineapi.ContextLoaderFactory,
policyContext engineapi.PolicyContext,
) (resp *engineapi.EngineResponse) {

View file

@ -4,7 +4,7 @@ import (
"context"
"errors"
"github.com/kyverno/kyverno/pkg/engine/api"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@ -15,7 +15,7 @@ type informerBasedResolver struct {
lister corev1listers.ConfigMapLister
}
func NewInformerBasedResolver(lister corev1listers.ConfigMapLister) (api.ConfigmapResolver, error) {
func NewInformerBasedResolver(lister corev1listers.ConfigMapLister) (engineapi.ConfigmapResolver, error) {
if lister == nil {
return nil, errors.New("lister must not be nil")
}
@ -30,7 +30,7 @@ type clientBasedResolver struct {
kubeClient kubernetes.Interface
}
func NewClientBasedResolver(client kubernetes.Interface) (api.ConfigmapResolver, error) {
func NewClientBasedResolver(client kubernetes.Interface) (engineapi.ConfigmapResolver, error) {
if client == nil {
return nil, errors.New("client must not be nil")
}

View file

@ -3,40 +3,65 @@ package engine
import (
"context"
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/config"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/registryclient"
)
type engine struct{}
type engine struct {
configuration config.Configuration
contextLoader engineapi.ContextLoaderFactory
}
func NewEgine() engineapi.Engine {
return &engine{}
func NewEngine(
configuration config.Configuration,
contextLoader engineapi.ContextLoaderFactory,
) engineapi.Engine {
return &engine{
configuration: configuration,
contextLoader: contextLoader,
}
}
func (e *engine) Validate(
ctx context.Context,
contextLoader engineapi.ContextLoaderFactory,
policyContext engineapi.PolicyContext,
cfg config.Configuration,
) *engineapi.EngineResponse {
return doValidate(ctx, contextLoader, policyContext, cfg)
return doValidate(ctx, e.contextLoader, policyContext, e.configuration)
}
func (e *engine) Mutate(
ctx context.Context,
contextLoader engineapi.ContextLoaderFactory,
policyContext engineapi.PolicyContext,
) *engineapi.EngineResponse {
return doMutate(ctx, contextLoader, policyContext)
return doMutate(ctx, e.contextLoader, policyContext)
}
func (e *engine) VerifyAndPatchImages(
ctx context.Context,
contextLoader engineapi.ContextLoaderFactory,
rclient registryclient.Client,
policyContext engineapi.PolicyContext,
cfg config.Configuration,
) (*engineapi.EngineResponse, *engineapi.ImageVerificationMetadata) {
return doVerifyAndPatchImages(ctx, contextLoader, rclient, policyContext, cfg)
return doVerifyAndPatchImages(ctx, e.contextLoader, rclient, policyContext, e.configuration)
}
func (e *engine) ApplyBackgroundChecks(
policyContext engineapi.PolicyContext,
) *engineapi.EngineResponse {
return doApplyBackgroundChecks(e.contextLoader, policyContext)
}
func (e *engine) GenerateResponse(
policyContext engineapi.PolicyContext,
gr kyvernov1beta1.UpdateRequest,
) *engineapi.EngineResponse {
return doGenerateResponse(e.contextLoader, policyContext, gr)
}
func (e *engine) ContextLoader(
policyContext engineapi.PolicyContext,
ruleName string,
) engineapi.ContextLoader {
return e.contextLoader(policyContext, ruleName)
}

View file

@ -11,7 +11,7 @@ import (
)
// GenerateResponse checks for validity of generate rule on the resource
func GenerateResponse(
func doGenerateResponse(
contextLoader engineapi.ContextLoaderFactory,
policyContext engineapi.PolicyContext,
gr kyvernov1beta1.UpdateRequest,

View file

@ -163,12 +163,13 @@ var cfg = config.NewDefaultConfiguration()
func testVerifyAndPatchImages(
ctx context.Context,
rclient registryclient.Client,
cmResolver engineapi.ConfigmapResolver,
pContext engineapi.PolicyContext,
cfg config.Configuration,
) (*engineapi.EngineResponse, *engineapi.ImageVerificationMetadata) {
return doVerifyAndPatchImages(
ctx,
LegacyContextLoaderFactory(rclient),
LegacyContextLoaderFactory(rclient, cmResolver),
rclient,
pContext,
cfg,
@ -181,7 +182,7 @@ func Test_CosignMockAttest(t *testing.T) {
err := cosign.SetMock("ghcr.io/jimbugwadia/pause2:latest", attestationPayloads)
assert.NilError(t, err)
er, ivm := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
er, ivm := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(er.PolicyResponse.Rules), 1)
assert.Equal(t, er.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass,
fmt.Sprintf("expected: %v, got: %v, failure: %v",
@ -195,7 +196,7 @@ func Test_CosignMockAttest_fail(t *testing.T) {
err := cosign.SetMock("ghcr.io/jimbugwadia/pause2:latest", attestationPayloads)
assert.NilError(t, err)
er, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
er, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(er.PolicyResponse.Rules), 1)
assert.Equal(t, er.PolicyResponse.Rules[0].Status, engineapi.RuleStatusFail)
}
@ -444,7 +445,7 @@ var (
func Test_ConfigMapMissingSuccess(t *testing.T) {
policyContext := buildContext(t, testConfigMapMissing, testConfigMapMissingResource, "")
cosign.ClearMock()
err, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
err, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(err.PolicyResponse.Rules), 1)
assert.Equal(t, err.PolicyResponse.Rules[0].Status, engineapi.RuleStatusSkip, err.PolicyResponse.Rules[0].Message)
}
@ -454,9 +455,8 @@ func Test_ConfigMapMissingFailure(t *testing.T) {
policyContext := buildContext(t, testConfigMapMissing, ghcrImage, "")
resolver, err := resolvers.NewClientBasedResolver(kubefake.NewSimpleClientset())
assert.NilError(t, err)
policyContext.informerCacheResolvers = resolver
cosign.ClearMock()
resp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
resp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), resolver, policyContext, cfg)
assert.Equal(t, len(resp.PolicyResponse.Rules), 1)
assert.Equal(t, resp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusError, resp.PolicyResponse.Rules[0].Message)
}
@ -465,7 +465,7 @@ func Test_SignatureGoodSigned(t *testing.T) {
policyContext := buildContext(t, testSampleSingleKeyPolicy, testSampleResource, "")
policyContext.policy.GetSpec().Rules[0].VerifyImages[0].MutateDigest = true
cosign.ClearMock()
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(engineResp.PolicyResponse.Rules), 1)
assert.Equal(t, engineResp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass, engineResp.PolicyResponse.Rules[0].Message)
assert.Equal(t, len(engineResp.PolicyResponse.Rules[0].Patches), 1)
@ -477,7 +477,7 @@ func Test_SignatureUnsigned(t *testing.T) {
cosign.ClearMock()
unsigned := strings.Replace(testSampleResource, ":signed", ":unsigned", -1)
policyContext := buildContext(t, testSampleSingleKeyPolicy, unsigned, "")
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(engineResp.PolicyResponse.Rules), 1)
assert.Equal(t, engineResp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusFail, engineResp.PolicyResponse.Rules[0].Message)
}
@ -486,7 +486,7 @@ func Test_SignatureWrongKey(t *testing.T) {
cosign.ClearMock()
otherKey := strings.Replace(testSampleResource, ":signed", ":signed-by-someone-else", -1)
policyContext := buildContext(t, testSampleSingleKeyPolicy, otherKey, "")
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(engineResp.PolicyResponse.Rules), 1)
assert.Equal(t, engineResp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusFail, engineResp.PolicyResponse.Rules[0].Message)
}
@ -497,7 +497,7 @@ func Test_SignaturesMultiKey(t *testing.T) {
policy = strings.Replace(policy, "KEY2", testVerifyImageKey, -1)
policy = strings.Replace(policy, "COUNT", "0", -1)
policyContext := buildContext(t, policy, testSampleResource, "")
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(engineResp.PolicyResponse.Rules), 1)
assert.Equal(t, engineResp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass, engineResp.PolicyResponse.Rules[0].Message)
}
@ -507,7 +507,7 @@ func Test_SignaturesMultiKeyFail(t *testing.T) {
policy := strings.Replace(testSampleMultipleKeyPolicy, "KEY1", testVerifyImageKey, -1)
policy = strings.Replace(policy, "COUNT", "0", -1)
policyContext := buildContext(t, policy, testSampleResource, "")
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(engineResp.PolicyResponse.Rules), 1)
assert.Equal(t, engineResp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusFail, engineResp.PolicyResponse.Rules[0].Message)
}
@ -518,7 +518,7 @@ func Test_SignaturesMultiKeyOneGoodKey(t *testing.T) {
policy = strings.Replace(policy, "KEY2", testOtherKey, -1)
policy = strings.Replace(policy, "COUNT", "1", -1)
policyContext := buildContext(t, policy, testSampleResource, "")
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(engineResp.PolicyResponse.Rules), 1)
assert.Equal(t, engineResp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass, engineResp.PolicyResponse.Rules[0].Message)
}
@ -529,7 +529,7 @@ func Test_SignaturesMultiKeyZeroGoodKey(t *testing.T) {
policy = strings.Replace(policy, "KEY2", testOtherKey, -1)
policy = strings.Replace(policy, "COUNT", "1", -1)
policyContext := buildContext(t, policy, testSampleResource, "")
resp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
resp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(resp.PolicyResponse.Rules), 1)
assert.Equal(t, resp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusFail, resp.PolicyResponse.Rules[0].Message)
}
@ -545,14 +545,14 @@ func Test_RuleSelectorImageVerify(t *testing.T) {
applyAll := kyverno.ApplyAll
spec.ApplyRules = &applyAll
resp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
resp, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(resp.PolicyResponse.Rules), 2)
assert.Equal(t, resp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass, resp.PolicyResponse.Rules[0].Message)
assert.Equal(t, resp.PolicyResponse.Rules[1].Status, engineapi.RuleStatusFail, resp.PolicyResponse.Rules[1].Message)
applyOne := kyverno.ApplyOne
spec.ApplyRules = &applyOne
resp, _ = testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
resp, _ = testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(resp.PolicyResponse.Rules), 1)
assert.Equal(t, resp.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass, resp.PolicyResponse.Rules[0].Message)
}
@ -656,7 +656,7 @@ func Test_NestedAttestors(t *testing.T) {
policy = strings.Replace(policy, "KEY2", testVerifyImageKey, -1)
policy = strings.Replace(policy, "COUNT", "0", -1)
policyContext := buildContext(t, policy, testSampleResource, "")
err, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
err, _ := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(err.PolicyResponse.Rules), 1)
assert.Equal(t, err.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass)
@ -664,7 +664,7 @@ func Test_NestedAttestors(t *testing.T) {
policy = strings.Replace(policy, "KEY2", testOtherKey, -1)
policy = strings.Replace(policy, "COUNT", "0", -1)
policyContext = buildContext(t, policy, testSampleResource, "")
err, _ = testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
err, _ = testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(err.PolicyResponse.Rules), 1)
assert.Equal(t, err.PolicyResponse.Rules[0].Status, engineapi.RuleStatusFail)
@ -672,7 +672,7 @@ func Test_NestedAttestors(t *testing.T) {
policy = strings.Replace(policy, "KEY2", testOtherKey, -1)
policy = strings.Replace(policy, "COUNT", "1", -1)
policyContext = buildContext(t, policy, testSampleResource, "")
err, _ = testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
err, _ = testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Equal(t, len(err.PolicyResponse.Rules), 1)
assert.Equal(t, err.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass)
}
@ -765,7 +765,7 @@ func Test_MarkImageVerified(t *testing.T) {
err := cosign.SetMock(image, attestationPayloads)
assert.NilError(t, err)
engineResponse, verifiedImages := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResponse, verifiedImages := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Assert(t, engineResponse != nil)
assert.Equal(t, len(engineResponse.PolicyResponse.Rules), 1)
assert.Equal(t, engineResponse.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass)
@ -858,7 +858,7 @@ func Test_ParsePEMDelimited(t *testing.T) {
err := cosign.SetMock(image, signaturePayloads)
assert.NilError(t, err)
engineResponse, verifiedImages := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), policyContext, cfg)
engineResponse, verifiedImages := testVerifyAndPatchImages(context.TODO(), registryclient.NewOrDie(), nil, policyContext, cfg)
assert.Assert(t, engineResponse != nil)
assert.Equal(t, len(engineResponse.PolicyResponse.Rules), 1)
assert.Equal(t, engineResponse.PolicyResponse.Rules[0].Status, engineapi.RuleStatusPass)

View file

@ -16,10 +16,12 @@ import (
"github.com/kyverno/kyverno/pkg/engine/variables"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/registryclient"
corev1 "k8s.io/api/core/v1"
)
func LegacyContextLoaderFactory(rclient registryclient.Client) engineapi.ContextLoaderFactory {
func LegacyContextLoaderFactory(
rclient registryclient.Client,
cmResolver engineapi.ConfigmapResolver,
) engineapi.ContextLoaderFactory {
if store.IsMock() {
return func(pContext engineapi.PolicyContext, ruleName string) engineapi.ContextLoader {
policy := pContext.Policy()
@ -29,7 +31,7 @@ func LegacyContextLoaderFactory(rclient registryclient.Client) engineapi.Context
ruleName: ruleName,
client: pContext.Client(),
rclient: rclient,
cmResolver: pContext.ResolveConfigMap,
cmResolver: cmResolver,
}
}
}
@ -38,7 +40,7 @@ func LegacyContextLoaderFactory(rclient registryclient.Client) engineapi.Context
logger: logging.WithName("LegacyContextLoaderFactory"),
client: pContext.Client(),
rclient: rclient,
cmResolver: pContext.ResolveConfigMap,
cmResolver: cmResolver,
}
}
}
@ -47,7 +49,7 @@ type contextLoader struct {
logger logr.Logger
rclient registryclient.Client
client dclient.Interface
cmResolver func(context.Context, string, string) (*corev1.ConfigMap, error)
cmResolver engineapi.ConfigmapResolver
}
func (l *contextLoader) Load(ctx context.Context, contextEntries []kyvernov1.ContextEntry, enginectx enginecontext.Interface) error {
@ -79,7 +81,7 @@ type mockContextLoader struct {
ruleName string
rclient registryclient.Client
client dclient.Interface
cmResolver func(context.Context, string, string) (*corev1.ConfigMap, error)
cmResolver engineapi.ConfigmapResolver
}
func (l *mockContextLoader) Load(ctx context.Context, contextEntries []kyvernov1.ContextEntry, enginectx enginecontext.Interface) error {
@ -298,7 +300,7 @@ func applyJMESPath(jmesPath string, data interface{}) (interface{}, error) {
return jp.Search(data)
}
func loadConfigMap(ctx context.Context, logger logr.Logger, entry kyvernov1.ContextEntry, enginectx enginecontext.Interface, resolver func(context.Context, string, string) (*corev1.ConfigMap, error)) error {
func loadConfigMap(ctx context.Context, logger logr.Logger, entry kyvernov1.ContextEntry, enginectx enginecontext.Interface, resolver engineapi.ConfigmapResolver) error {
data, err := fetchConfigMap(ctx, logger, entry, enginectx, resolver)
if err != nil {
return fmt.Errorf("failed to retrieve config map for context entry %s: %v", entry.Name, err)
@ -310,7 +312,7 @@ func loadConfigMap(ctx context.Context, logger logr.Logger, entry kyvernov1.Cont
return nil
}
func fetchConfigMap(ctx context.Context, logger logr.Logger, entry kyvernov1.ContextEntry, enginectx enginecontext.Interface, resolver func(context.Context, string, string) (*corev1.ConfigMap, error)) ([]byte, error) {
func fetchConfigMap(ctx context.Context, logger logr.Logger, entry kyvernov1.ContextEntry, enginectx enginecontext.Interface, resolver engineapi.ConfigmapResolver) ([]byte, error) {
contextData := make(map[string]interface{})
name, err := variables.SubstituteAll(logger, enginectx, entry.ConfigMap.Name)
@ -327,7 +329,7 @@ func fetchConfigMap(ctx context.Context, logger logr.Logger, entry kyvernov1.Con
namespace = "default"
}
obj, err := resolver(ctx, namespace.(string), name.(string))
obj, err := resolver.Get(ctx, namespace.(string), name.(string))
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s/%s : %v", namespace, name, err)
}

View file

@ -27,7 +27,7 @@ func testMutate(
) *engineapi.EngineResponse {
return doMutate(
ctx,
LegacyContextLoaderFactory(rclient),
LegacyContextLoaderFactory(rclient, nil),
pContext,
)
}

View file

@ -1,7 +1,6 @@
package engine
import (
"context"
"fmt"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
@ -13,7 +12,6 @@ import (
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
admissionv1 "k8s.io/api/admission/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
@ -70,9 +68,6 @@ type PolicyContext struct {
// admissionOperation represents if the caller is from the webhook server
admissionOperation bool
// informerCacheResolvers - used to get resources from informer cache
informerCacheResolvers engineapi.ConfigmapResolver
// subresource is the subresource being requested, if any (for example, "status" or "scale")
subresource string
@ -172,10 +167,6 @@ func (c *PolicyContext) ExcludeResourceFunc() engineapi.ExcludeFunc {
return c.excludeResourceFunc
}
func (c *PolicyContext) ResolveConfigMap(ctx context.Context, namespace string, name string) (*corev1.ConfigMap, error) {
return c.informerCacheResolvers.Get(ctx, namespace, name)
}
// Mutators
func (c *PolicyContext) WithPolicy(policy kyvernov1.PolicyInterface) *PolicyContext {
@ -246,12 +237,6 @@ func (c *PolicyContext) WithAdmissionOperation(admissionOperation bool) *PolicyC
return copy
}
func (c *PolicyContext) WithInformerCacheResolver(informerCacheResolver engineapi.ConfigmapResolver) *PolicyContext {
copy := c.copy()
copy.informerCacheResolvers = informerCacheResolver
return copy
}
func (c *PolicyContext) WithSubresource(subresource string) *PolicyContext {
copy := c.copy()
copy.subresource = subresource
@ -294,7 +279,6 @@ func NewPolicyContextFromAdmissionRequest(
admissionInfo kyvernov1beta1.RequestInfo,
configuration config.Configuration,
client dclient.Interface,
informerCacheResolver engineapi.ConfigmapResolver,
polexLister PolicyExceptionLister,
) (*PolicyContext, error) {
ctx, err := newVariablesContext(request, &admissionInfo)
@ -316,7 +300,6 @@ func NewPolicyContextFromAdmissionRequest(
WithConfiguration(configuration).
WithClient(client).
WithAdmissionOperation(true).
WithInformerCacheResolver(informerCacheResolver).
WithRequestResource(*requestResource).
WithSubresource(request.SubResource).
WithExceptions(polexLister)

View file

@ -22,7 +22,7 @@ import (
func testValidate(ctx context.Context, rclient registryclient.Client, pContext *PolicyContext, cfg config.Configuration) *engineapi.EngineResponse {
return doValidate(
ctx,
LegacyContextLoaderFactory(rclient),
LegacyContextLoaderFactory(rclient, nil),
pContext,
cfg,
)

View file

@ -21,7 +21,6 @@ import (
kyvernov1beta1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/metrics"
@ -57,7 +56,7 @@ const (
type PolicyController struct {
client dclient.Interface
kyvernoClient versioned.Interface
contextLoader engineapi.ContextLoaderFactory
engine engineapi.Engine
pInformer kyvernov1informers.ClusterPolicyInformer
npInformer kyvernov1informers.PolicyInformer
@ -98,7 +97,7 @@ type PolicyController struct {
func NewPolicyController(
kyvernoClient versioned.Interface,
client dclient.Interface,
contextLoader engineapi.ContextLoaderFactory,
engine engineapi.Engine,
pInformer kyvernov1informers.ClusterPolicyInformer,
npInformer kyvernov1informers.PolicyInformer,
urInformer kyvernov1beta1informers.UpdateRequestInformer,
@ -119,7 +118,7 @@ func NewPolicyController(
pc := PolicyController{
client: client,
kyvernoClient: kyvernoClient,
contextLoader: contextLoader,
engine: engine,
pInformer: pInformer,
npInformer: npInformer,
eventGen: eventGen,
@ -509,12 +508,12 @@ func (pc *PolicyController) updateUR(policyKey string, policy kyvernov1.PolicyIn
func (pc *PolicyController) handleUpdateRequest(ur *kyvernov1beta1.UpdateRequest, triggerResource *unstructured.Unstructured, rule kyvernov1.Rule, policy kyvernov1.PolicyInterface) (skip bool, err error) {
namespaceLabels := engineutils.GetNamespaceSelectorsFromNamespaceLister(triggerResource.GetKind(), triggerResource.GetNamespace(), pc.nsLister, pc.log)
policyContext, _, err := backgroundcommon.NewBackgroundContext(pc.client, ur, policy, triggerResource, pc.configHandler, pc.informerCacheResolvers, namespaceLabels, pc.log)
policyContext, _, err := backgroundcommon.NewBackgroundContext(pc.client, ur, policy, triggerResource, pc.configHandler, namespaceLabels, pc.log)
if err != nil {
return false, fmt.Errorf("failed to build policy context for rule %s: %w", rule.Name, err)
}
engineResponse := engine.ApplyBackgroundChecks(pc.contextLoader, policyContext)
engineResponse := pc.engine.ApplyBackgroundChecks(policyContext)
if len(engineResponse.PolicyResponse.Rules) == 0 {
return true, nil
}

View file

@ -146,10 +146,9 @@ func runTestCase(t *testing.T, tc TestCase) bool {
}
policyContext := engine.NewPolicyContext().WithPolicy(policy).WithNewResource(*resource)
eng := engine.NewEgine()
eng := engine.NewEngine(config.NewDefaultConfiguration(), engine.LegacyContextLoaderFactory(registryclient.NewOrDie(), nil))
er := eng.Mutate(
context.TODO(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
policyContext,
)
t.Log("---Mutation---")
@ -163,12 +162,9 @@ func runTestCase(t *testing.T, tc TestCase) bool {
policyContext = policyContext.WithNewResource(*resource)
cfg := config.NewDefaultConfiguration()
er = eng.Validate(
context.TODO(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
policyContext,
cfg,
)
t.Log("---Validation---")
validateResponse(t, er.PolicyResponse, tc.Expected.Validation.PolicyResponse)
@ -185,8 +181,7 @@ func runTestCase(t *testing.T, tc TestCase) bool {
} else {
policyContext := policyContext.WithClient(client)
er = engine.ApplyBackgroundChecks(
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
er = eng.ApplyBackgroundChecks(
policyContext,
)
t.Log(("---Generation---"))

View file

@ -54,9 +54,8 @@ func NewFakeHandlers(ctx context.Context, policyCache policycache.Cache) webhook
urGenerator: updaterequest.NewFake(),
eventGen: event.NewFake(),
openApiManager: openapi.NewFake(),
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, dclient, rbLister, crbLister, configMapResolver, peLister),
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, dclient, rbLister, crbLister, peLister),
urUpdater: webhookutils.NewUpdateRequestUpdater(kyvernoclient, urLister),
engine: engine.NewEgine(),
contextLoader: engine.LegacyContextLoaderFactory(rclient),
engine: engine.NewEngine(configuration, engine.LegacyContextLoaderFactory(rclient, configMapResolver)),
}
}

View file

@ -37,9 +37,9 @@ type GenerationHandler interface {
func NewGenerationHandler(
log logr.Logger,
engine engineapi.Engine,
client dclient.Interface,
kyvernoClient versioned.Interface,
contextLoader engineapi.ContextLoaderFactory,
nsLister corev1listers.NamespaceLister,
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister,
urGenerator webhookgenerate.Generator,
@ -49,9 +49,9 @@ func NewGenerationHandler(
) GenerationHandler {
return &generationHandler{
log: log,
engine: engine,
client: client,
kyvernoClient: kyvernoClient,
contextLoader: contextLoader,
nsLister: nsLister,
urLister: urLister,
urGenerator: urGenerator,
@ -63,9 +63,9 @@ func NewGenerationHandler(
type generationHandler struct {
log logr.Logger
engine engineapi.Engine
client dclient.Interface
kyvernoClient versioned.Interface
contextLoader engineapi.ContextLoaderFactory
nsLister corev1listers.NamespaceLister
urLister kyvernov1beta1listers.UpdateRequestNamespaceLister
urGenerator webhookgenerate.Generator
@ -92,7 +92,7 @@ func (h *generationHandler) Handle(
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
policyContext = policyContext.WithNamespaceLabels(engineutils.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, h.log))
}
engineResponse := engine.ApplyBackgroundChecks(h.contextLoader, policyContext)
engineResponse := h.engine.ApplyBackgroundChecks(policyContext)
for _, rule := range engineResponse.PolicyResponse.Rules {
if rule.Status != engineapi.RuleStatusPass {
h.deleteGR(ctx, engineResponse)

View file

@ -42,7 +42,6 @@ type handlers struct {
kyvernoClient versioned.Interface
rclient registryclient.Client
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
// config
configuration config.Configuration
@ -69,14 +68,12 @@ type handlers struct {
func NewHandlers(
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
client dclient.Interface,
kyvernoClient versioned.Interface,
rclient registryclient.Client,
configuration config.Configuration,
metricsConfig metrics.MetricsConfigManager,
pCache policycache.Cache,
informerCacheResolvers engineapi.ConfigmapResolver,
nsLister corev1listers.NamespaceLister,
rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister,
@ -89,7 +86,6 @@ func NewHandlers(
) webhooks.ResourceHandlers {
return &handlers{
engine: engine,
contextLoader: contextLoader,
client: client,
kyvernoClient: kyvernoClient,
rclient: rclient,
@ -104,7 +100,7 @@ func NewHandlers(
urGenerator: urGenerator,
eventGen: eventGen,
openApiManager: openApiManager,
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, client, rbLister, crbLister, informerCacheResolvers, polexLister),
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, client, rbLister, crbLister, polexLister),
urUpdater: webhookutils.NewUpdateRequestUpdater(kyvernoClient, urLister),
admissionReports: admissionReports,
}
@ -127,7 +123,7 @@ func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *ad
}
if len(generatePolicies) == 0 && request.Operation == admissionv1.Update {
// handle generate source resource updates
gh := generation.NewGenerationHandler(logger, h.client, h.kyvernoClient, h.contextLoader, h.nsLister, h.urLister, h.urGenerator, h.urUpdater, h.eventGen, h.metricsConfig)
gh := generation.NewGenerationHandler(logger, h.engine, h.client, h.kyvernoClient, h.nsLister, h.urLister, h.urGenerator, h.urUpdater, h.eventGen, h.metricsConfig)
go gh.HandleUpdatesForGenerateRules(context.TODO(), request, []kyvernov1.PolicyInterface{})
}
@ -143,7 +139,7 @@ func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *ad
namespaceLabels = engineutils.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, logger)
}
policyContext = policyContext.WithNamespaceLabels(namespaceLabels)
vh := validation.NewValidationHandler(logger, h.kyvernoClient, h.engine, h.contextLoader, h.pCache, h.pcBuilder, h.eventGen, h.admissionReports, h.metricsConfig, h.configuration)
vh := validation.NewValidationHandler(logger, h.kyvernoClient, h.engine, h.pCache, h.pcBuilder, h.eventGen, h.admissionReports, h.metricsConfig, h.configuration)
ok, msg, warnings := vh.HandleValidation(ctx, request, policies, policyContext, startTime)
if !ok {
@ -177,7 +173,7 @@ func (h *handlers) Mutate(ctx context.Context, logger logr.Logger, request *admi
if err := enginectx.MutateResourceWithImageInfo(request.Object.Raw, policyContext.JSONContext()); err != nil {
logger.Error(err, "failed to patch images info to resource, policies that mutate images may be impacted")
}
mh := mutation.NewMutationHandler(logger, h.engine, h.contextLoader, h.eventGen, h.openApiManager, h.nsLister, h.metricsConfig)
mh := mutation.NewMutationHandler(logger, h.engine, h.eventGen, h.openApiManager, h.nsLister, h.metricsConfig)
mutatePatches, mutateWarnings, err := mh.HandleMutation(ctx, request, mutatePolicies, policyContext, startTime)
if err != nil {
logger.Error(err, "mutation failed")
@ -190,7 +186,7 @@ func (h *handlers) Mutate(ctx context.Context, logger logr.Logger, request *admi
logger.Error(err, "failed to build policy context")
return admissionutils.Response(request.UID, err)
}
ivh := imageverification.NewImageVerificationHandler(logger, h.kyvernoClient, h.engine, h.contextLoader, h.rclient, h.eventGen, h.admissionReports, h.configuration)
ivh := imageverification.NewImageVerificationHandler(logger, h.kyvernoClient, h.engine, h.rclient, h.eventGen, h.admissionReports, h.configuration)
imagePatches, imageVerifyWarnings, err := ivh.Handle(ctx, newRequest, verifyImagesPolicies, policyContext)
if err != nil {
logger.Error(err, "image verification failed")

View file

@ -33,7 +33,6 @@ type ImageVerificationHandler interface {
type imageVerificationHandler struct {
kyvernoClient versioned.Interface
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
rclient registryclient.Client
log logr.Logger
eventGen event.Interface
@ -45,7 +44,6 @@ func NewImageVerificationHandler(
log logr.Logger,
kyvernoClient versioned.Interface,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
rclient registryclient.Client,
eventGen event.Interface,
admissionReports bool,
@ -54,7 +52,6 @@ func NewImageVerificationHandler(
return &imageVerificationHandler{
kyvernoClient: kyvernoClient,
engine: engine,
contextLoader: contextLoader,
rclient: rclient,
log: log,
eventGen: eventGen,
@ -97,7 +94,7 @@ func (h *imageVerificationHandler) handleVerifyImages(
fmt.Sprintf("POLICY %s/%s", policy.GetNamespace(), policy.GetName()),
func(ctx context.Context, span trace.Span) {
policyContext := policyContext.WithPolicy(policy)
resp, ivm := h.engine.VerifyAndPatchImages(ctx, h.contextLoader, h.rclient, policyContext, h.cfg)
resp, ivm := h.engine.VerifyAndPatchImages(ctx, h.rclient, policyContext)
engineResponses = append(engineResponses, resp)
patches = append(patches, resp.GetPatches()...)

View file

@ -35,7 +35,6 @@ type MutationHandler interface {
func NewMutationHandler(
log logr.Logger,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
eventGen event.Interface,
openApiManager openapi.ValidateInterface,
nsLister corev1listers.NamespaceLister,
@ -44,7 +43,6 @@ func NewMutationHandler(
return &mutationHandler{
log: log,
engine: engine,
contextLoader: contextLoader,
eventGen: eventGen,
openApiManager: openApiManager,
nsLister: nsLister,
@ -55,7 +53,6 @@ func NewMutationHandler(
type mutationHandler struct {
log logr.Logger
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
eventGen event.Interface
openApiManager openapi.ValidateInterface
nsLister corev1listers.NamespaceLister
@ -159,7 +156,7 @@ func (h *mutationHandler) applyMutation(ctx context.Context, request *admissionv
policyContext = policyContext.WithNamespaceLabels(engineutils.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, h.log))
}
engineResponse := h.engine.Mutate(ctx, h.contextLoader, policyContext)
engineResponse := h.engine.Mutate(ctx, policyContext)
policyPatches := engineResponse.GetPatches()
if !engineResponse.IsSuccessful() {

View file

@ -18,7 +18,7 @@ import (
// createUpdateRequests applies generate and mutateExisting policies, and creates update requests for background reconcile
func (h *handlers) createUpdateRequests(logger logr.Logger, request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, generatePolicies, mutatePolicies []kyvernov1.PolicyInterface, ts time.Time) {
gh := generation.NewGenerationHandler(logger, h.client, h.kyvernoClient, h.contextLoader, h.nsLister, h.urLister, h.urGenerator, h.urUpdater, h.eventGen, h.metricsConfig)
gh := generation.NewGenerationHandler(logger, h.engine, h.client, h.kyvernoClient, h.nsLister, h.urLister, h.urGenerator, h.urUpdater, h.eventGen, h.metricsConfig)
go h.handleMutateExisting(context.TODO(), logger, request, mutatePolicies, policyContext, ts)
go gh.Handle(context.TODO(), request, generatePolicies, policyContext, ts)
}
@ -43,7 +43,7 @@ func (h *handlers) handleMutateExisting(ctx context.Context, logger logr.Logger,
var rules []engineapi.RuleResponse
policyContext := policyContext.WithPolicy(policy)
engineResponse := engine.ApplyBackgroundChecks(h.contextLoader, policyContext)
engineResponse := h.engine.ApplyBackgroundChecks(policyContext)
for _, rule := range engineResponse.PolicyResponse.Rules {
if rule.Status == engineapi.RuleStatusPass {

View file

@ -38,7 +38,6 @@ func NewValidationHandler(
log logr.Logger,
kyvernoClient versioned.Interface,
engine engineapi.Engine,
contextLoader engineapi.ContextLoaderFactory,
pCache policycache.Cache,
pcBuilder webhookutils.PolicyContextBuilder,
eventGen event.Interface,
@ -50,7 +49,6 @@ func NewValidationHandler(
log: log,
kyvernoClient: kyvernoClient,
engine: engine,
contextLoader: contextLoader,
pCache: pCache,
pcBuilder: pcBuilder,
eventGen: eventGen,
@ -64,7 +62,6 @@ type validationHandler struct {
log logr.Logger
kyvernoClient versioned.Interface
engine engineapi.Engine
contextLoader engineapi.ContextLoaderFactory
pCache policycache.Cache
pcBuilder webhookutils.PolicyContextBuilder
eventGen event.Interface
@ -109,7 +106,7 @@ func (v *validationHandler) HandleValidation(
failurePolicy = kyvernov1.Fail
}
engineResponse := v.engine.Validate(ctx, v.contextLoader, policyContext, v.cfg)
engineResponse := v.engine.Validate(ctx, policyContext)
if engineResponse.IsNil() {
// we get an empty response if old and new resources created the same response
// allow updates if resource update doesnt change the policy evaluation
@ -167,7 +164,7 @@ func (v *validationHandler) buildAuditResponses(
fmt.Sprintf("POLICY %s/%s", policy.GetNamespace(), policy.GetName()),
func(ctx context.Context, span trace.Span) {
policyContext := policyContext.WithPolicy(policy)
responses = append(responses, v.engine.Validate(ctx, v.contextLoader, policyContext, v.cfg))
responses = append(responses, v.engine.Validate(ctx, policyContext))
},
)
}

View file

@ -1049,8 +1049,10 @@ func TestValidate_failure_action_overrides(t *testing.T) {
},
}
cfg := config.NewDefaultConfiguration()
eng := engine.NewEgine()
eng := engine.NewEngine(
config.NewDefaultConfiguration(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie(), nil),
)
for i, tc := range testcases {
t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) {
var policy kyvernov1.ClusterPolicy
@ -1062,9 +1064,7 @@ func TestValidate_failure_action_overrides(t *testing.T) {
ctx := engine.NewPolicyContext().WithPolicy(&policy).WithNewResource(*resourceUnstructured).WithNamespaceLabels(tc.rawResourceNamespaceLabels)
er := eng.Validate(
context.TODO(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
ctx,
cfg,
)
if tc.blocked && tc.messages != nil {
for _, r := range er.PolicyResponse.Rules {
@ -1125,13 +1125,13 @@ func Test_RuleSelector(t *testing.T) {
ctx := engine.NewPolicyContext().WithPolicy(&policy).WithNewResource(*resourceUnstructured)
cfg := config.NewDefaultConfiguration()
eng := engine.NewEgine()
eng := engine.NewEngine(
config.NewDefaultConfiguration(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie(), nil),
)
resp := eng.Validate(
context.TODO(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
ctx,
cfg,
)
assert.Assert(t, resp.PolicyResponse.RulesAppliedCount == 2)
assert.Assert(t, resp.PolicyResponse.RulesErrorCount == 0)
@ -1144,9 +1144,7 @@ func Test_RuleSelector(t *testing.T) {
policy.Spec.ApplyRules = &applyOne
resp = eng.Validate(
context.TODO(),
engine.LegacyContextLoaderFactory(registryclient.NewOrDie()),
ctx,
cfg,
)
assert.Assert(t, resp.PolicyResponse.RulesAppliedCount == 1)
assert.Assert(t, resp.PolicyResponse.RulesErrorCount == 0)

View file

@ -7,7 +7,6 @@ import (
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
"github.com/kyverno/kyverno/pkg/userinfo"
admissionv1 "k8s.io/api/admission/v1"
rbacv1listers "k8s.io/client-go/listers/rbac/v1"
@ -18,12 +17,11 @@ type PolicyContextBuilder interface {
}
type policyContextBuilder struct {
configuration config.Configuration
client dclient.Interface
rbLister rbacv1listers.RoleBindingLister
crbLister rbacv1listers.ClusterRoleBindingLister
informerCacheResolvers engineapi.ConfigmapResolver
polexLister engine.PolicyExceptionLister
configuration config.Configuration
client dclient.Interface
rbLister rbacv1listers.RoleBindingLister
crbLister rbacv1listers.ClusterRoleBindingLister
polexLister engine.PolicyExceptionLister
}
func NewPolicyContextBuilder(
@ -31,16 +29,14 @@ func NewPolicyContextBuilder(
client dclient.Interface,
rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister,
informerCacheResolvers engineapi.ConfigmapResolver,
polexLister engine.PolicyExceptionLister,
) PolicyContextBuilder {
return &policyContextBuilder{
configuration: configuration,
client: client,
rbLister: rbLister,
crbLister: crbLister,
informerCacheResolvers: informerCacheResolvers,
polexLister: polexLister,
configuration: configuration,
client: client,
rbLister: rbLister,
crbLister: crbLister,
polexLister: polexLister,
}
}
@ -54,5 +50,5 @@ func (b *policyContextBuilder) Build(request *admissionv1.AdmissionRequest) (*en
userRequestInfo.Roles = roles
userRequestInfo.ClusterRoles = clusterRoles
}
return engine.NewPolicyContextFromAdmissionRequest(request, userRequestInfo, b.configuration, b.client, b.informerCacheResolvers, b.polexLister)
return engine.NewPolicyContextFromAdmissionRequest(request, userRequestInfo, b.configuration, b.client, b.polexLister)
}