1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 02:18:15 +00:00

refactor: separate policy cache and controller (#3925)

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-05-16 18:36:19 +02:00 committed by GitHub
parent c112aaefa1
commit 53adf904d6
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 293 additions and 215 deletions

View file

@ -20,6 +20,7 @@ import (
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/controllers/certmanager"
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
policycachecontroller "github.com/kyverno/kyverno/pkg/controllers/policycache"
"github.com/kyverno/kyverno/pkg/cosign"
dclient "github.com/kyverno/kyverno/pkg/dclient"
event "github.com/kyverno/kyverno/pkg/event"
@ -228,7 +229,7 @@ func main() {
setupLog.Error(err, "failed to initialize configuration")
os.Exit(1)
}
configurationController := configcontroller.NewController(kubeKyvernoInformer.Core().V1().ConfigMaps(), configuration)
configurationController := configcontroller.NewController(configuration, kubeKyvernoInformer.Core().V1().ConfigMaps())
metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
if err != nil {
@ -317,10 +318,11 @@ func main() {
os.Exit(1)
}
pCacheController := policycache.NewCache(kyvernoV1.ClusterPolicies(), kyvernoV1.Policies())
policyCache := policycache.NewCache()
policyCacheController := policycachecontroller.NewController(policyCache, kyvernoV1.ClusterPolicies(), kyvernoV1.Policies())
auditHandler := webhooksresource.NewValidateAuditHandler(
pCacheController,
policyCache,
eventGenerator,
reportReqGen,
kubeInformer.Rbac().V1().RoleBindings(),
@ -413,7 +415,7 @@ func main() {
kyvernoClient,
configuration,
promConfig,
pCacheController,
policyCache,
kubeInformer.Core().V1().Namespaces().Lister(),
kubeInformer.Rbac().V1().RoleBindings().Lister(),
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
@ -467,10 +469,15 @@ func main() {
startInformersAndWaitForCacheSync(stopCh, kyvernoInformer, kubeInformer, kubeKyvernoInformer)
pCacheController.CheckPolicySync(stopCh)
// warmup policy cache
if err := policyCacheController.WarmUp(); err != nil {
setupLog.Error(err, "Failed to warm up policy cache")
os.Exit(1)
}
// init events handlers
// start Kyverno controllers
go policyCacheController.Run(stopCh)
go urc.Run(genWorkers, stopCh)
go le.Run(ctx)
go reportReqGen.Run(2, stopCh)

View file

@ -30,7 +30,7 @@ type controller struct {
queue workqueue.RateLimitingInterface
}
func NewController(configmapInformer corev1informers.ConfigMapInformer, configuration config.Configuration) *controller {
func NewController(configuration config.Configuration, configmapInformer corev1informers.ConfigMapInformer) *controller {
c := controller{
configuration: configuration,
configmapLister: configmapInformer.Lister(),
@ -73,13 +73,13 @@ func (c *controller) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
} else if errors.IsNotFound(err) {
logger.V(4).Info("Dropping update request from the queue", "key", key, "error", err.Error())
logger.V(4).Info("Dropping request from the queue", "key", key, "error", err.Error())
c.queue.Forget(key)
} else if c.queue.NumRequeues(key) < maxRetries {
logger.V(3).Info("retrying update request", "key", key, "error", err.Error())
logger.V(3).Info("Retrying request", "key", key, "error", err.Error())
c.queue.AddRateLimited(key)
} else {
logger.Error(err, "failed to process update request", "key", key)
logger.Error(err, "Failed to process request", "key", key)
c.queue.Forget(key)
}
}
@ -100,7 +100,7 @@ func (c *controller) worker() {
func (c *controller) Run(stopCh <-chan struct{}) {
defer runtime.HandleCrash()
logger.Info("start")
logger.Info("starting ...")
defer logger.Info("shutting down")
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)

View file

@ -0,0 +1,167 @@
package policycache
import (
"time"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1informer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov1lister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
pcache "github.com/kyverno/kyverno/pkg/policycache"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
maxRetries = 10
workers = 3
)
type controller struct {
cache pcache.Cache
// listers
cpolLister kyvernov1lister.ClusterPolicyLister
polLister kyvernov1lister.PolicyLister
// queue
queue workqueue.RateLimitingInterface
}
func NewController(pcache pcache.Cache, cpolInformer kyvernov1informer.ClusterPolicyInformer, polInformer kyvernov1informer.PolicyInformer) *controller {
c := controller{
cache: pcache,
cpolLister: cpolInformer.Lister(),
polLister: polInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policycache-controller"),
}
cpolInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.add,
UpdateFunc: c.update,
DeleteFunc: c.delete,
})
polInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.add,
UpdateFunc: c.update,
DeleteFunc: c.delete,
})
return &c
}
func (c *controller) add(obj interface{}) {
c.enqueue(obj)
}
func (c *controller) update(_, cur interface{}) {
c.enqueue(cur)
}
func (c *controller) delete(obj interface{}) {
c.enqueue(kubeutils.GetObjectWithTombstone(obj))
}
func (c *controller) enqueue(obj interface{}) {
if key, err := cache.MetaNamespaceKeyFunc(obj); err != nil {
logger.Error(err, "failed to compute key name")
} else {
c.queue.Add(key)
}
}
func (c *controller) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
} else if errors.IsNotFound(err) {
logger.V(4).Info("Dropping request from the queue", "key", key, "error", err.Error())
c.queue.Forget(key)
} else if c.queue.NumRequeues(key) < maxRetries {
logger.V(3).Info("Retrying request", "key", key, "error", err.Error())
c.queue.AddRateLimited(key)
} else {
logger.Error(err, "Failed to process request", "key", key)
c.queue.Forget(key)
}
}
func (c *controller) processNextWorkItem() bool {
if key, quit := c.queue.Get(); !quit {
defer c.queue.Done(key)
c.handleErr(c.reconcile(key.(string)), key)
return true
}
return false
}
func (c *controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *controller) WarmUp() error {
logger.Info("warming up ...")
defer logger.Info("warm up done²")
pols, err := c.polLister.Policies(metav1.NamespaceAll).List(labels.Everything())
if err != nil {
return err
}
for _, policy := range pols {
if key, err := cache.MetaNamespaceKeyFunc(policy); err != nil {
return err
} else {
c.cache.Set(key, policy)
}
}
cpols, err := c.cpolLister.List(labels.Everything())
if err != nil {
return err
}
for _, policy := range cpols {
if key, err := cache.MetaNamespaceKeyFunc(policy); err != nil {
return err
} else {
c.cache.Set(key, policy)
}
}
return nil
}
func (c *controller) Run(stopCh <-chan struct{}) {
defer runtime.HandleCrash()
logger.Info("starting ...")
defer logger.Info("shutting down")
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
func (c *controller) reconcile(key string) error {
logger.Info("reconciling ...", "key", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
policy, err := c.loadPolicy(namespace, name)
if err != nil {
if errors.IsNotFound(err) {
c.cache.Unset(key)
}
return err
}
// TODO: check resource version ?
c.cache.Set(key, policy)
return nil
}
func (c *controller) loadPolicy(namespace, name string) (kyvernov1.PolicyInterface, error) {
if namespace == "" {
return c.cpolLister.Get(name)
} else {
return c.polLister.Policies(namespace).Get(name)
}
}

View file

@ -0,0 +1,5 @@
package policycache
import "sigs.k8s.io/controller-runtime/pkg/log"
var logger = log.Log.WithName("policycache-controller")

View file

@ -1,62 +1,40 @@
package policycache
import (
"os"
"reflect"
"sync/atomic"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1informer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov1lister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// Cache get method use for to get policy names and mostly use to test cache testcases
type Cache interface {
// Set inserts a policy in the cache
Set(string, kyvernov1.PolicyInterface)
// Unset removes a policy from the cache
Unset(string)
// GetPolicies returns all policies that apply to a namespace, including cluster-wide policies
// If the namespace is empty, only cluster-wide policies are returned
GetPolicies(PolicyType, string, string) []kyvernov1.PolicyInterface
// CheckPolicySync wait until the internal policy cache is fully loaded
CheckPolicySync(<-chan struct{})
}
// controller is responsible for synchronizing Policy Cache,
// it embeds a policy informer to handle policy events.
// The cache is synced when a policy is add/update/delete.
// This cache is only used in the admission webhook to fast retrieve
// policies based on types (Mutate/ValidateEnforce/Generate/imageVerify).
type controller struct {
store
cpolLister kyvernov1lister.ClusterPolicyLister
polLister kyvernov1lister.PolicyLister
pCounter int64
type cache struct {
store store
}
// NewCache create a new Cache
func NewCache(pInformer kyvernov1informer.ClusterPolicyInformer, nspInformer kyvernov1informer.PolicyInformer) Cache {
pc := controller{
store: newPolicyCache(),
cpolLister: pInformer.Lister(),
polLister: nspInformer.Lister(),
pCounter: -1,
func NewCache() Cache {
return &cache{
store: newPolicyCache(),
}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicy,
UpdateFunc: pc.updatePolicy,
DeleteFunc: pc.deletePolicy,
})
nspInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addNsPolicy,
UpdateFunc: pc.updateNsPolicy,
DeleteFunc: pc.deleteNsPolicy,
})
return &pc
}
func (c *controller) GetPolicies(pkey PolicyType, kind, nspace string) []kyvernov1.PolicyInterface {
func (c *cache) Set(key string, policy kyvernov1.PolicyInterface) {
c.store.set(key, policy)
}
func (c *cache) Unset(key string) {
c.store.unset(key)
}
func (c *cache) GetPolicies(pkey PolicyType, kind, nspace string) []kyvernov1.PolicyInterface {
var result []kyvernov1.PolicyInterface
result = append(result, c.store.get(pkey, kind, "")...)
result = append(result, c.store.get(pkey, "*", "")...)
@ -66,83 +44,3 @@ func (c *controller) GetPolicies(pkey PolicyType, kind, nspace string) []kyverno
}
return result
}
func (c *controller) CheckPolicySync(stopCh <-chan struct{}) {
logger.Info("starting")
policies := []kyvernov1.PolicyInterface{}
polList, err := c.polLister.Policies(metav1.NamespaceAll).List(labels.Everything())
if err != nil {
logger.Error(err, "failed to list Policy")
os.Exit(1)
}
for _, p := range polList {
policies = append(policies, p)
}
cpolList, err := c.cpolLister.List(labels.Everything())
if err != nil {
logger.Error(err, "failed to list Cluster Policy")
os.Exit(1)
}
for _, p := range cpolList {
policies = append(policies, p)
}
atomic.StoreInt64(&c.pCounter, int64(len(policies)))
for _, policy := range policies {
c.store.set(policy)
atomic.AddInt64(&c.pCounter, ^int64(0))
}
if !c.hasPolicySynced() {
logger.Error(nil, "Failed to sync policy with cache")
os.Exit(1)
}
}
func (c *controller) addPolicy(obj interface{}) {
p := obj.(*kyvernov1.ClusterPolicy)
c.store.set(p)
}
func (c *controller) updatePolicy(old, cur interface{}) {
pOld := old.(*kyvernov1.ClusterPolicy)
pNew := cur.(*kyvernov1.ClusterPolicy)
if reflect.DeepEqual(pOld.Spec, pNew.Spec) {
return
}
c.store.set(pNew)
}
func (c *controller) deletePolicy(obj interface{}) {
p, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1.ClusterPolicy)
if ok {
c.store.unset(p)
} else {
logger.Info("Failed to get deleted object, the deleted cluster policy cannot be removed from the cache", "obj", obj)
}
}
func (c *controller) addNsPolicy(obj interface{}) {
p := obj.(*kyvernov1.Policy)
c.store.set(p)
}
func (c *controller) updateNsPolicy(old, cur interface{}) {
npOld := old.(*kyvernov1.Policy)
npNew := cur.(*kyvernov1.Policy)
if reflect.DeepEqual(npOld.Spec, npNew.Spec) {
return
}
c.store.set(npNew)
}
func (c *controller) deleteNsPolicy(obj interface{}) {
p, ok := kubeutils.GetObjectWithTombstone(obj).(*kyvernov1.Policy)
if ok {
c.store.unset(p)
} else {
logger.Info("Failed to get deleted object, the deleted policy cannot be removed from the cache", "obj", obj)
}
}
func (c *controller) hasPolicySynced() bool {
return atomic.LoadInt64(&c.pCounter) == 0
}

View file

@ -4,16 +4,27 @@ import (
"encoding/json"
"testing"
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
"gotest.tools/assert"
kubecache "k8s.io/client-go/tools/cache"
)
func setPolicy(store store, policy kyvernov1.PolicyInterface) {
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
store.set(key, policy)
}
func unsetPolicy(store store, policy kyvernov1.PolicyInterface) {
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
store.unset(key)
}
func Test_All(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
//add
pCache.set(policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -35,7 +46,7 @@ func Test_All(t *testing.T) {
}
// remove
pCache.unset(policy)
unsetPolicy(pCache, policy)
kind := "pod"
validateEnforce := pCache.get(ValidateEnforce, kind, "")
assert.Assert(t, len(validateEnforce) == 0)
@ -44,9 +55,9 @@ func Test_All(t *testing.T) {
func Test_Add_Duplicate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
pCache.set(policy)
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -70,11 +81,11 @@ func Test_Add_Duplicate_Policy(t *testing.T) {
func Test_Add_Validate_Audit(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
policy.Spec.ValidationFailureAction = "audit"
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -95,7 +106,7 @@ func Test_Add_Remove(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
kind := "Pod"
pCache.set(policy)
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(validateEnforce) != 1 {
@ -112,7 +123,7 @@ func Test_Add_Remove(t *testing.T) {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
pCache.unset(policy)
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
@ -123,7 +134,7 @@ func Test_Add_Remove_Any(t *testing.T) {
pCache := newPolicyCache()
policy := newAnyPolicy(t)
kind := "Pod"
pCache.set(policy)
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(validateEnforce) != 1 {
@ -140,7 +151,7 @@ func Test_Add_Remove_Any(t *testing.T) {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
pCache.unset(policy)
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
@ -151,10 +162,10 @@ func Test_Remove_From_Empty_Cache(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
pCache.unset(policy)
unsetPolicy(pCache, policy)
}
func newPolicy(t *testing.T) *kyverno.ClusterPolicy {
func newPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "test-policy"
@ -256,14 +267,14 @@ func newPolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newAnyPolicy(t *testing.T) *kyverno.ClusterPolicy {
func newAnyPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "test-policy"
@ -429,14 +440,14 @@ func newAnyPolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newNsPolicy(t *testing.T) kyverno.PolicyInterface {
func newNsPolicy(t *testing.T) kyvernov1.PolicyInterface {
rawPolicy := []byte(`{
"metadata": {
"name": "test-policy",
@ -536,14 +547,14 @@ func newNsPolicy(t *testing.T) kyverno.PolicyInterface {
}
}`)
var policy *kyverno.Policy
var policy *kyvernov1.Policy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newGVKPolicy(t *testing.T) *kyverno.ClusterPolicy {
func newGVKPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "add-networkpolicy1",
@ -593,14 +604,14 @@ func newGVKPolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newUserTestPolicy(t *testing.T) kyverno.PolicyInterface {
func newUserTestPolicy(t *testing.T) kyvernov1.PolicyInterface {
rawPolicy := []byte(`{
"apiVersion": "kyverno.io/v1",
"kind": "Policy",
@ -635,14 +646,14 @@ func newUserTestPolicy(t *testing.T) kyverno.PolicyInterface {
}
}`)
var policy *kyverno.Policy
var policy *kyvernov1.Policy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newGeneratePolicy(t *testing.T) *kyverno.ClusterPolicy {
func newGeneratePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "add-networkpolicy",
@ -684,13 +695,13 @@ func newGeneratePolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newMutatePolicy(t *testing.T) *kyverno.ClusterPolicy {
func newMutatePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "logger-sidecar"
@ -728,13 +739,13 @@ func newMutatePolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newNsMutatePolicy(t *testing.T) kyverno.PolicyInterface {
func newNsMutatePolicy(t *testing.T) kyvernov1.PolicyInterface {
rawPolicy := []byte(`{
"metadata": {
"name": "logger-sidecar",
@ -773,14 +784,14 @@ func newNsMutatePolicy(t *testing.T) kyverno.PolicyInterface {
}
}`)
var policy *kyverno.Policy
var policy *kyvernov1.Policy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newValidateAuditPolicy(t *testing.T) *kyverno.ClusterPolicy {
func newValidateAuditPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "check-label-app-audit"
@ -827,14 +838,14 @@ func newValidateAuditPolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newValidateEnforcePolicy(t *testing.T) *kyverno.ClusterPolicy {
func newValidateEnforcePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
"name": "check-label-app-enforce"
@ -881,7 +892,7 @@ func newValidateEnforcePolicy(t *testing.T) *kyverno.ClusterPolicy {
}
}`)
var policy *kyverno.ClusterPolicy
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
@ -892,7 +903,7 @@ func Test_Ns_All(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
//add
pCache.set(policy)
setPolicy(pCache, policy)
nspace := policy.GetNamespace()
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -914,7 +925,7 @@ func Test_Ns_All(t *testing.T) {
}
}
// remove
pCache.unset(policy)
unsetPolicy(pCache, policy)
kind := "pod"
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
assert.Assert(t, len(validateEnforce) == 0)
@ -923,9 +934,9 @@ func Test_Ns_All(t *testing.T) {
func Test_Ns_Add_Duplicate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
pCache.set(policy)
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
nspace := policy.GetNamespace()
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -950,12 +961,12 @@ func Test_Ns_Add_Duplicate_Policy(t *testing.T) {
func Test_Ns_Add_Validate_Audit(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
nspace := policy.GetNamespace()
policy.GetSpec().ValidationFailureAction = "audit"
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -977,13 +988,13 @@ func Test_Ns_Add_Remove(t *testing.T) {
policy := newNsPolicy(t)
nspace := policy.GetNamespace()
kind := "Pod"
pCache.set(policy)
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
pCache.unset(policy)
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
@ -994,7 +1005,7 @@ func Test_GVk_Cache(t *testing.T) {
pCache := newPolicyCache()
policy := newGVKPolicy(t)
//add
pCache.set(policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -1010,13 +1021,13 @@ func Test_GVK_Add_Remove(t *testing.T) {
pCache := newPolicyCache()
policy := newGVKPolicy(t)
kind := "ClusterRole"
pCache.set(policy)
setPolicy(pCache, policy)
generate := pCache.get(Generate, kind, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
pCache.unset(policy)
unsetPolicy(pCache, policy)
deletedGenerate := pCache.get(Generate, kind, "")
if len(deletedGenerate) != 0 {
t.Errorf("expected 0 generate policy, found %v", len(deletedGenerate))
@ -1028,7 +1039,7 @@ func Test_Add_Validate_Enforce(t *testing.T) {
policy := newUserTestPolicy(t)
nspace := policy.GetNamespace()
//add
pCache.set(policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
@ -1044,13 +1055,13 @@ func Test_Ns_Add_Remove_User(t *testing.T) {
policy := newUserTestPolicy(t)
nspace := policy.GetNamespace()
kind := "Deployment"
pCache.set(policy)
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
pCache.unset(policy)
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
@ -1061,9 +1072,9 @@ func Test_Mutate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newMutatePolicy(t)
//add
pCache.set(policy)
pCache.set(policy)
pCache.set(policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -1080,7 +1091,7 @@ func Test_Generate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newGeneratePolicy(t)
//add
pCache.set(policy)
setPolicy(pCache, policy)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
@ -1098,10 +1109,10 @@ func Test_NsMutate_Policy(t *testing.T) {
policy := newMutatePolicy(t)
nspolicy := newNsMutatePolicy(t)
//add
pCache.set(policy)
pCache.set(nspolicy)
pCache.set(policy)
pCache.set(nspolicy)
setPolicy(pCache, policy)
setPolicy(pCache, nspolicy)
setPolicy(pCache, policy)
setPolicy(pCache, nspolicy)
nspace := policy.GetNamespace()
// get
@ -1122,8 +1133,8 @@ func Test_Validate_Enforce_Policy(t *testing.T) {
pCache := newPolicyCache()
policy1 := newValidateAuditPolicy(t)
policy2 := newValidateEnforcePolicy(t)
pCache.set(policy1)
pCache.set(policy2)
setPolicy(pCache, policy1)
setPolicy(pCache, policy2)
validateEnforce := pCache.get(ValidateEnforce, "Pod", "")
if len(validateEnforce) != 2 {
@ -1135,8 +1146,8 @@ func Test_Validate_Enforce_Policy(t *testing.T) {
t.Errorf("adding: expected 0 validate audit policy, found %v", len(validateAudit))
}
pCache.unset(policy1)
pCache.unset(policy2)
unsetPolicy(pCache, policy1)
unsetPolicy(pCache, policy2)
validateEnforce = pCache.get(ValidateEnforce, "Pod", "")
if len(validateEnforce) != 0 {

View file

@ -12,9 +12,9 @@ import (
type store interface {
// set inserts a policy in the cache
set(kyvernov1.PolicyInterface)
set(string, kyvernov1.PolicyInterface)
// unset removes a policy from the cache
unset(kyvernov1.PolicyInterface)
unset(string)
// get finds policies that match a given type, gvk and namespace
get(PolicyType, string, string) []kyvernov1.PolicyInterface
}
@ -30,18 +30,18 @@ func newPolicyCache() store {
}
}
func (pc *policyCache) set(policy kyvernov1.PolicyInterface) {
func (pc *policyCache) set(key string, policy kyvernov1.PolicyInterface) {
pc.lock.Lock()
defer pc.lock.Unlock()
pc.store.set(policy)
logger.V(4).Info("policy is added to cache", "name", policy.GetName())
pc.store.set(key, policy)
logger.V(4).Info("policy is added to cache", "key", key)
}
func (pc *policyCache) unset(p kyvernov1.PolicyInterface) {
func (pc *policyCache) unset(key string) {
pc.lock.Lock()
defer pc.lock.Unlock()
pc.store.unset(p)
logger.V(4).Info("policy is removed from cache", "name", p.GetName())
pc.store.unset(key)
logger.V(4).Info("policy is removed from cache", "key", key)
}
func (pc *policyCache) get(pkey PolicyType, kind, nspace string) []kyvernov1.PolicyInterface {
@ -73,15 +73,6 @@ func computeKind(gvk string) string {
return kind
}
func computeKey(policy kyvernov1.PolicyInterface) string {
namespace := policy.GetNamespace()
if namespace == "" {
return policy.GetName()
} else {
return namespace + "/" + policy.GetName()
}
}
func computeEnforcePolicy(spec *kyvernov1.Spec) bool {
if spec.GetValidationFailureAction() == kyvernov1.Enforce {
return true
@ -102,8 +93,8 @@ func set(set sets.String, item string, value bool) sets.String {
}
}
func (m *policyMap) set(policy kyvernov1.PolicyInterface) {
key, enforcePolicy := computeKey(policy), computeEnforcePolicy(policy.GetSpec())
func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface) {
enforcePolicy := computeEnforcePolicy(policy.GetSpec())
m.policies[key] = policy
type state struct {
hasMutate, hasValidate, hasGenerate, hasVerifyImages, hasImagesValidationChecks bool
@ -141,8 +132,7 @@ func (m *policyMap) set(policy kyvernov1.PolicyInterface) {
}
}
func (m *policyMap) unset(policy kyvernov1.PolicyInterface) {
key := computeKey(policy)
func (m *policyMap) unset(key string) {
delete(m.policies, key)
for kind := range m.kindType {
for policyType := range m.kindType[kind] {