1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

issue-4613: Add support for cache enhancements with informers (#5484)

Signed-off-by: Pratik Shah <pratik@infracloud.io>

Signed-off-by: Pratik Shah <pratik@infracloud.io>
This commit is contained in:
Pratik Shah 2022-12-02 19:29:51 +05:30 committed by GitHub
parent bd0b9389c8
commit f8ed1a9301
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
13 changed files with 485 additions and 26 deletions

View file

@ -33,6 +33,7 @@ import (
resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource"
webhookcontroller "github.com/kyverno/kyverno/pkg/controllers/webhook"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
event "github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/logging"
@ -407,6 +408,26 @@ func main() {
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, resyncPeriod)
cacheInformer, err := resolvers.GetCacheInformerFactory(kubeClient, resyncPeriod)
if err != nil {
logger.Error(err, "failed to create cache informer factory")
os.Exit(1)
}
informerBasedResolver, err := resolvers.NewInformerBasedResolver(cacheInformer.Core().V1().ConfigMaps().Lister())
if err != nil {
logger.Error(err, "failed to create informer based resolver")
os.Exit(1)
}
clientBasedResolver, err := resolvers.NewClientBasedResolver(kubeClient)
if err != nil {
logger.Error(err, "failed to create client based resolver")
os.Exit(1)
}
configMapResolver, err := resolvers.NewResolverChain(informerBasedResolver, clientBasedResolver)
if err != nil {
logger.Error(err, "failed to create config map resolver")
os.Exit(1)
}
configuration, err := config.NewConfiguration(kubeClient)
if err != nil {
logger.Error(err, "failed to initialize configuration")
@ -459,7 +480,7 @@ func main() {
openApiManager,
)
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(signalCtx, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
if !internal.StartInformersAndWaitForCacheSync(signalCtx, kyvernoInformer, kubeInformer, kubeKyvernoInformer, cacheInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}
@ -575,6 +596,7 @@ func main() {
configuration,
metricsConfig,
policyCache,
configMapResolver,
kubeInformer.Core().V1().Namespaces().Lister(),
kubeInformer.Rbac().V1().RoleBindings().Lister(),
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
@ -607,7 +629,7 @@ func main() {
)
// start informers and wait for cache sync
// we need to call start again because we potentially registered new informers
if !internal.StartInformersAndWaitForCacheSync(signalCtx, kyvernoInformer, kubeInformer, kubeKyvernoInformer) {
if !internal.StartInformersAndWaitForCacheSync(signalCtx, kyvernoInformer, kubeInformer, kubeKyvernoInformer, cacheInformer) {
logger.Error(errors.New("failed to wait for cache sync"), "failed to wait for cache sync")
os.Exit(1)
}

View file

@ -0,0 +1,7 @@
package resolvers
const (
TEST_NAMESPACE = "default"
TEST_CONFIGMAP = "myconfigmap"
LabelCacheKey = "cache.kyverno.io/enabled"
)

View file

@ -0,0 +1,70 @@
package resolvers
import (
"context"
"errors"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
corev1listers "k8s.io/client-go/listers/core/v1"
)
type informerBasedResolver struct {
lister corev1listers.ConfigMapLister
}
func NewInformerBasedResolver(lister corev1listers.ConfigMapLister) (ConfigmapResolver, error) {
if lister == nil {
return nil, errors.New("lister must not be nil")
}
return &informerBasedResolver{lister}, nil
}
func (i *informerBasedResolver) Get(ctx context.Context, namespace, name string) (*corev1.ConfigMap, error) {
return i.lister.ConfigMaps(namespace).Get(name)
}
type clientBasedResolver struct {
kubeClient kubernetes.Interface
}
func NewClientBasedResolver(client kubernetes.Interface) (ConfigmapResolver, error) {
if client == nil {
return nil, errors.New("client must not be nil")
}
return &clientBasedResolver{client}, nil
}
func (c *clientBasedResolver) Get(ctx context.Context, namespace, name string) (*corev1.ConfigMap, error) {
return c.kubeClient.CoreV1().ConfigMaps(namespace).Get(ctx, name, metav1.GetOptions{})
}
type resolverChain []ConfigmapResolver
func NewResolverChain(resolvers ...ConfigmapResolver) (ConfigmapResolver, error) {
if len(resolvers) == 0 {
return nil, errors.New("no resolvers")
}
for _, resolver := range resolvers {
if resolver == nil {
return nil, errors.New("at least one resolver is nil")
}
}
return resolverChain(resolvers), nil
}
func (chain resolverChain) Get(ctx context.Context, namespace, name string) (*corev1.ConfigMap, error) {
// if CM is not found in informer cache, error will be stored in
// lastErr variable and resolver chain will try to get CM using
// Kubernetes client
var lastErr error
for _, resolver := range chain {
cm, err := resolver.Get(ctx, namespace, name)
if err == nil {
return cm, nil
}
lastErr = err
}
return nil, lastErr
}

View file

@ -0,0 +1,230 @@
package resolvers
import (
"context"
"reflect"
"testing"
"time"
"gotest.tools/assert"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
kubefake "k8s.io/client-go/kubernetes/fake"
corev1listers "k8s.io/client-go/listers/core/v1"
)
func newEmptyFakeClient() *kubefake.Clientset {
return kubefake.NewSimpleClientset()
}
func createConfigMaps(ctx context.Context, client *kubefake.Clientset, addLabel bool) error {
cm := &corev1.ConfigMap{
ObjectMeta: metav1.ObjectMeta{
Name: TEST_CONFIGMAP,
Namespace: TEST_NAMESPACE,
},
Data: map[string]string{"configmapkey": "key1"},
}
if addLabel {
cm.ObjectMeta.Labels = map[string]string{LabelCacheKey: "true"}
}
_, err := client.CoreV1().ConfigMaps(TEST_NAMESPACE).Create(
ctx, cm, metav1.CreateOptions{})
return err
}
func initialiseInformer(client *kubefake.Clientset) kubeinformers.SharedInformerFactory {
selector, err := GetCacheSelector()
if err != nil {
return nil
}
labelOptions := kubeinformers.WithTweakListOptions(func(opts *metav1.ListOptions) {
opts.LabelSelector = selector.String()
})
kubeResourceInformer := kubeinformers.NewSharedInformerFactoryWithOptions(client, 15*time.Minute, labelOptions)
return kubeResourceInformer
}
func Test_InformerCacheSuccess(t *testing.T) {
client := newEmptyFakeClient()
ctx := context.TODO()
err := createConfigMaps(ctx, client, true)
assert.NilError(t, err, "error while creating configmap")
informer := initialiseInformer(client)
informerResolver, err := NewInformerBasedResolver(informer.Core().V1().ConfigMaps().Lister())
assert.NilError(t, err)
informer.Start(make(<-chan struct{}))
time.Sleep(10 * time.Second)
_, err = informerResolver.Get(ctx, TEST_NAMESPACE, TEST_CONFIGMAP)
assert.NilError(t, err, "informer didn't have expected configmap")
}
func Test_InformerCacheFailure(t *testing.T) {
client := newEmptyFakeClient()
ctx := context.TODO()
err := createConfigMaps(ctx, client, false)
assert.NilError(t, err, "error while creating configmap")
informer := initialiseInformer(client)
resolver, err := NewInformerBasedResolver(informer.Core().V1().ConfigMaps().Lister())
assert.NilError(t, err)
informer.Start(make(<-chan struct{}))
time.Sleep(10 * time.Second)
_, err = resolver.Get(ctx, TEST_NAMESPACE, TEST_CONFIGMAP)
assert.Equal(t, err.Error(), "configmap \"myconfigmap\" not found")
}
func Test_ClientBasedResolver(t *testing.T) {
client := newEmptyFakeClient()
ctx := context.TODO()
err := createConfigMaps(ctx, client, false)
assert.NilError(t, err, "error while creating configmap")
resolver, err := NewClientBasedResolver(client)
assert.NilError(t, err)
_, err = resolver.Get(ctx, TEST_NAMESPACE, TEST_CONFIGMAP)
assert.NilError(t, err, "error while getting configmap from client")
}
func Test_ResolverChainWithExistingConfigMap(t *testing.T) {
client := newEmptyFakeClient()
informer := initialiseInformer(client)
lister := informer.Core().V1().ConfigMaps().Lister()
informerBasedResolver, err := NewInformerBasedResolver(lister)
assert.NilError(t, err)
clientBasedResolver, err := NewClientBasedResolver(client)
assert.NilError(t, err)
resolvers, err := NewResolverChain(informerBasedResolver, clientBasedResolver)
assert.NilError(t, err)
ctx := context.TODO()
err = createConfigMaps(ctx, client, true)
assert.NilError(t, err, "error while creating configmap")
_, err = resolvers.Get(ctx, TEST_NAMESPACE, TEST_CONFIGMAP)
assert.NilError(t, err, "error while getting configmap")
}
func Test_ResolverChainWithNonExistingConfigMap(t *testing.T) {
client := newEmptyFakeClient()
informer := initialiseInformer(client)
lister := informer.Core().V1().ConfigMaps().Lister()
informerBasedResolver, err := NewInformerBasedResolver(lister)
assert.NilError(t, err)
clientBasedResolver, err := NewClientBasedResolver(client)
assert.NilError(t, err)
resolvers, err := NewResolverChain(informerBasedResolver, clientBasedResolver)
assert.NilError(t, err)
ctx := context.TODO()
_, err = resolvers.Get(ctx, TEST_NAMESPACE, TEST_CONFIGMAP)
assert.Error(t, err, "configmaps \"myconfigmap\" not found")
}
func TestNewInformerBasedResolver(t *testing.T) {
type args struct {
lister corev1listers.ConfigMapLister
}
client := newEmptyFakeClient()
informer := initialiseInformer(client)
lister := informer.Core().V1().ConfigMaps().Lister()
tests := []struct {
name string
args args
want ConfigmapResolver
wantErr bool
}{{
name: "nil shoud return an error",
wantErr: true,
}, {
name: "not nil",
args: args{lister},
want: &informerBasedResolver{lister},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewInformerBasedResolver(tt.args.lister)
if (err != nil) != tt.wantErr {
t.Errorf("NewInformerBasedResolver() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewInformerBasedResolver() = %v, want %v", got, tt.want)
}
})
}
}
func TestNewClientBasedResolver(t *testing.T) {
type args struct {
client kubernetes.Interface
}
client := newEmptyFakeClient()
tests := []struct {
name string
args args
want ConfigmapResolver
wantErr bool
}{{
name: "nil shoud return an error",
wantErr: true,
}, {
name: "not nil",
args: args{client},
want: &clientBasedResolver{client},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewClientBasedResolver(tt.args.client)
if (err != nil) != tt.wantErr {
t.Errorf("NewClientBasedResolver() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewClientBasedResolver() = %v, want %v", got, tt.want)
}
})
}
}
type dummyResolver struct{}
func (c dummyResolver) Get(context.Context, string, string) (*corev1.ConfigMap, error) {
return nil, nil
}
func TestNewResolverChain(t *testing.T) {
type args struct {
resolvers []ConfigmapResolver
}
tests := []struct {
name string
args args
want ConfigmapResolver
wantErr bool
}{{
name: "nil shoud return an error",
wantErr: true,
}, {
name: "empty list shoud return an error",
args: args{[]ConfigmapResolver{}},
wantErr: true,
}, {
name: "one nil in the list shoud return an error",
args: args{[]ConfigmapResolver{dummyResolver{}, nil}},
wantErr: true,
}, {
name: "no nil",
args: args{[]ConfigmapResolver{dummyResolver{}, dummyResolver{}, dummyResolver{}}},
want: resolverChain{dummyResolver{}, dummyResolver{}, dummyResolver{}},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := NewResolverChain(tt.args.resolvers...)
if (err != nil) != tt.wantErr {
t.Errorf("NewResolverChain() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("NewResolverChain() = %v, want %v", got, tt.want)
}
})
}
}

View file

@ -0,0 +1,13 @@
package resolvers
import (
"context"
corev1 "k8s.io/api/core/v1"
)
type NamespacedResourceResolver[T any] interface {
Get(context.Context, string, string) (T, error)
}
type ConfigmapResolver = NamespacedResourceResolver[*corev1.ConfigMap]

View file

@ -0,0 +1,38 @@
package resolvers
import (
"errors"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
)
func GetCacheSelector() (labels.Selector, error) {
selector := labels.Everything()
requirement, err := labels.NewRequirement(LabelCacheKey, selection.Exists, nil)
if err != nil {
return nil, err
}
return selector.Add(*requirement), err
}
func GetCacheInformerFactory(client kubernetes.Interface, resyncPeriod time.Duration) (kubeinformers.SharedInformerFactory, error) {
if client == nil {
return nil, errors.New("client cannot be nil")
}
selector, err := GetCacheSelector()
if err != nil {
return nil, err
}
return kubeinformers.NewSharedInformerFactoryWithOptions(
client,
resyncPeriod,
kubeinformers.WithTweakListOptions(func(opts *metav1.ListOptions) {
opts.LabelSelector = selector.String()
}),
), nil
}

View file

@ -0,0 +1,58 @@
package resolvers
import (
"reflect"
"testing"
"time"
"k8s.io/client-go/kubernetes"
)
func TestGetCacheSelector(t *testing.T) {
tests := []struct {
name string
want string
wantErr bool
}{{
name: "ok",
want: LabelCacheKey,
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := GetCacheSelector()
if (err != nil) != tt.wantErr {
t.Errorf("GetCacheSelector() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got.String(), tt.want) {
t.Errorf("GetCacheSelector() = %v, want %v", got, tt.want)
}
})
}
}
func TestGetCacheInformerFactory(t *testing.T) {
tests := []struct {
name string
want string
client kubernetes.Interface
wantErr bool
}{{
name: "nil client",
wantErr: true,
client: nil,
}, {
name: "ok",
wantErr: false,
client: newEmptyFakeClient(),
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
_, err := GetCacheInformerFactory(tt.client, 10*time.Minute)
if (err != nil) != tt.wantErr {
t.Errorf("GetCacheInformerFactor() error = %v, wantErr %v", err, tt.wantErr)
return
}
})
}
}

View file

@ -8,11 +8,12 @@ import (
"github.com/kyverno/kyverno/pkg/logging"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
kubefake "k8s.io/client-go/kubernetes/fake"
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
client "github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/cosign"
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/engine/utils"
"gotest.tools/assert"
@ -418,11 +419,13 @@ func Test_ConfigMapMissingSuccess(t *testing.T) {
func Test_ConfigMapMissingFailure(t *testing.T) {
ghcrImage := strings.Replace(testConfigMapMissingResource, "nginx:latest", "ghcr.io/kyverno/test-verify-image:signed", -1)
policyContext := buildContext(t, testConfigMapMissing, ghcrImage, "")
policyContext.client = client.NewEmptyFakeClient()
resolver, err := resolvers.NewClientBasedResolver(kubefake.NewSimpleClientset())
assert.NilError(t, err)
policyContext.informerCacheResolvers = resolver
cosign.ClearMock()
err, _ := VerifyAndPatchImages(policyContext)
assert.Equal(t, len(err.PolicyResponse.Rules), 1)
assert.Equal(t, err.PolicyResponse.Rules[0].Status, response.RuleStatusError, err.PolicyResponse.Rules[0].Message)
resp, _ := VerifyAndPatchImages(policyContext)
assert.Equal(t, len(resp.PolicyResponse.Rules), 1)
assert.Equal(t, resp.PolicyResponse.Rules[0].Status, response.RuleStatusError, resp.PolicyResponse.Rules[0].Message)
}
func Test_SignatureGoodSigned(t *testing.T) {

View file

@ -351,16 +351,14 @@ func fetchConfigMap(logger logr.Logger, entry kyvernov1.ContextEntry, ctx *Polic
namespace = "default"
}
obj, err := ctx.client.GetResource(context.TODO(), "v1", "ConfigMap", namespace.(string), name.(string))
obj, err := ctx.informerCacheResolvers.Get(context.TODO(), namespace.(string), name.(string))
if err != nil {
return nil, fmt.Errorf("failed to get configmap %s/%s : %v", namespace, name, err)
}
unstructuredObj := obj.DeepCopy().Object
// extract configmap data
contextData["data"] = unstructuredObj["data"]
contextData["metadata"] = unstructuredObj["metadata"]
contextData["data"] = obj.Data
contextData["metadata"] = obj.ObjectMeta
data, err := json.Marshal(contextData)
if err != nil {
return nil, fmt.Errorf("failed to unmarshal configmap %s/%s: %v", namespace, name, err)

View file

@ -7,6 +7,7 @@ import (
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine/context"
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/pkg/errors"
admissionv1 "k8s.io/api/admission/v1"
@ -49,6 +50,9 @@ type PolicyContext struct {
// admissionOperation represents if the caller is from the webhook server
admissionOperation bool
// informerCacheResolvers - used to get resources from informer cache
informerCacheResolvers resolvers.ConfigmapResolver
}
// Getters
@ -137,6 +141,12 @@ func (c *PolicyContext) WithAdmissionOperation(admissionOperation bool) *PolicyC
return copy
}
func (c *PolicyContext) WithInformerCacheResolver(informerCacheResolver resolvers.ConfigmapResolver) *PolicyContext {
copy := c.Copy()
copy.informerCacheResolvers = informerCacheResolver
return copy
}
// Constructors
func NewPolicyContextWithJsonContext(jsonContext context.Interface) *PolicyContext {
@ -158,6 +168,7 @@ func NewPolicyContextFromAdmissionRequest(
admissionInfo kyvernov1beta1.RequestInfo,
configuration config.Configuration,
client dclient.Interface,
informerCacheResolver resolvers.ConfigmapResolver,
) (*PolicyContext, error) {
ctx, err := newVariablesContext(request, &admissionInfo)
if err != nil {
@ -176,7 +187,8 @@ func NewPolicyContextFromAdmissionRequest(
WithAdmissionInfo(admissionInfo).
WithConfiguration(configuration).
WithClient(client).
WithAdmissionOperation(true)
WithAdmissionOperation(true).
WithInformerCacheResolver(informerCacheResolver)
return policyContext, nil
}

View file

@ -7,6 +7,7 @@ import (
kyvernoinformers "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/openapi"
@ -14,7 +15,7 @@ import (
"github.com/kyverno/kyverno/pkg/webhooks"
"github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
"k8s.io/client-go/informers"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes/fake"
)
@ -22,11 +23,12 @@ func NewFakeHandlers(ctx context.Context, policyCache policycache.Cache) webhook
client := fake.NewSimpleClientset()
metricsConfig := metrics.NewFakeMetricsConfig()
informers := informers.NewSharedInformerFactory(client, 0)
informers := kubeinformers.NewSharedInformerFactory(client, 0)
informers.Start(ctx.Done())
kyvernoclient := fakekyvernov1.NewSimpleClientset()
kyvernoInformers := kyvernoinformers.NewSharedInformerFactory(kyvernoclient, 0)
configMapResolver, _ := resolvers.NewClientBasedResolver(client)
kyvernoInformers.Start(ctx.Done())
dclient := dclient.NewEmptyFakeClient()
@ -47,7 +49,7 @@ func NewFakeHandlers(ctx context.Context, policyCache policycache.Cache) webhook
urGenerator: updaterequest.NewFake(),
eventGen: event.NewFake(),
openApiManager: openapi.NewFake(),
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, dclient, rbLister, crbLister),
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, dclient, rbLister, crbLister, configMapResolver),
urUpdater: webhookutils.NewUpdateRequestUpdater(kyvernoclient, urLister),
}
}

View file

@ -14,6 +14,7 @@ import (
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/config"
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
engineutils2 "github.com/kyverno/kyverno/pkg/engine/utils"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/metrics"
@ -66,6 +67,7 @@ func NewHandlers(
configuration config.Configuration,
metricsConfig metrics.MetricsConfigManager,
pCache policycache.Cache,
informerCacheResolvers resolvers.ConfigmapResolver,
nsLister corev1listers.NamespaceLister,
rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister,
@ -88,7 +90,7 @@ func NewHandlers(
urGenerator: urGenerator,
eventGen: eventGen,
openApiManager: openApiManager,
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, client, rbLister, crbLister),
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, client, rbLister, crbLister, informerCacheResolvers),
urUpdater: webhookutils.NewUpdateRequestUpdater(kyvernoClient, urLister),
admissionReports: admissionReports,
}

View file

@ -5,6 +5,7 @@ import (
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/userinfo"
"github.com/pkg/errors"
admissionv1 "k8s.io/api/admission/v1"
@ -20,6 +21,7 @@ type policyContextBuilder struct {
client dclient.Interface
rbLister rbacv1listers.RoleBindingLister
crbLister rbacv1listers.ClusterRoleBindingLister
informerCacheResolvers resolvers.ConfigmapResolver
}
func NewPolicyContextBuilder(
@ -27,12 +29,14 @@ func NewPolicyContextBuilder(
client dclient.Interface,
rbLister rbacv1listers.RoleBindingLister,
crbLister rbacv1listers.ClusterRoleBindingLister,
informerCacheResolvers resolvers.ConfigmapResolver,
) PolicyContextBuilder {
return &policyContextBuilder{
configuration: configuration,
client: client,
rbLister: rbLister,
crbLister: crbLister,
informerCacheResolvers: informerCacheResolvers,
}
}
@ -46,5 +50,5 @@ func (b *policyContextBuilder) Build(request *admissionv1.AdmissionRequest) (*en
userRequestInfo.Roles = roles
userRequestInfo.ClusterRoles = clusterRoles
}
return engine.NewPolicyContextFromAdmissionRequest(request, userRequestInfo, b.configuration, b.client)
return engine.NewPolicyContextFromAdmissionRequest(request, userRequestInfo, b.configuration, b.client, b.informerCacheResolvers)
}