1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-15 00:36:28 +00:00

fix: policy cache use GVR instead of kind (#6543)

* fix: policy cache use GVR instead of kind

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* unit tests

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* unit tests

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* GVRS

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* ephemeralcontainers

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* kuttl

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* nit

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix kuttl

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

---------

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-03-13 15:44:39 +01:00 committed by GitHub
parent 8b2effa706
commit 861776d50c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 611 additions and 380 deletions

View file

@ -16,9 +16,27 @@ import (
"k8s.io/client-go/discovery"
)
// GroupVersionResourceSubresource contains a group/version/resource/subresource reference
type GroupVersionResourceSubresource struct {
schema.GroupVersionResource
SubResource string
}
func (gvrs GroupVersionResourceSubresource) ResourceSubresource() string {
if gvrs.SubResource == "" {
return gvrs.Resource
}
return gvrs.Resource + "/" + gvrs.SubResource
}
func (gvrs GroupVersionResourceSubresource) WithSubResource(subresource string) GroupVersionResourceSubresource {
gvrs.SubResource = subresource
return gvrs
}
// IDiscovery provides interface to mange Kind and GVR mapping
type IDiscovery interface {
FindResources(group, version, kind, subresource string) ([]schema.GroupVersionResource, error)
FindResources(group, version, kind, subresource string) ([]GroupVersionResourceSubresource, error)
FindResource(groupVersion string, kind string) (apiResource, parentAPIResource *metav1.APIResource, gvr schema.GroupVersionResource, err error)
// TODO: there's no mapping from GVK to GVR, this is very error prone
GetGVRFromGVK(schema.GroupVersionKind) (schema.GroupVersionResource, error)
@ -148,7 +166,7 @@ func (c serverResources) FindResource(groupVersion string, kind string) (apiReso
return nil, nil, schema.GroupVersionResource{}, err
}
func (c serverResources) FindResources(group, version, kind, subresource string) ([]schema.GroupVersionResource, error) {
func (c serverResources) FindResources(group, version, kind, subresource string) ([]GroupVersionResourceSubresource, error) {
resources, err := c.findResources(group, version, kind, subresource)
if err != nil {
if !c.cachedClient.Fresh() {
@ -159,7 +177,7 @@ func (c serverResources) FindResources(group, version, kind, subresource string)
return resources, err
}
func (c serverResources) findResources(group, version, kind, subresource string) ([]schema.GroupVersionResource, error) {
func (c serverResources) findResources(group, version, kind, subresource string) ([]GroupVersionResourceSubresource, error) {
_, serverGroupsAndResources, err := c.cachedClient.ServerGroupsAndResources()
if err != nil && !strings.Contains(err.Error(), "Got empty response for") {
if discovery.IsGroupDiscoveryFailedError(err) {
@ -184,7 +202,7 @@ func (c serverResources) findResources(group, version, kind, subresource string)
Kind: kind,
}
}
resources := sets.New[schema.GroupVersionResource]()
resources := sets.New[GroupVersionResourceSubresource]()
// first match resouces
for _, list := range serverGroupsAndResources {
gv, err := schema.ParseGroupVersion(list.GroupVersion)
@ -195,20 +213,23 @@ func (c serverResources) findResources(group, version, kind, subresource string)
if !strings.Contains(resource.Name, "/") {
gvk := getGVK(gv, resource.Group, resource.Version, resource.Kind)
if wildcard.Match(group, gvk.Group) && wildcard.Match(version, gvk.Version) && wildcard.Match(kind, gvk.Kind) {
resources.Insert(gvk.GroupVersion().WithResource(resource.Name))
resources.Insert(GroupVersionResourceSubresource{
GroupVersionResource: gvk.GroupVersion().WithResource(resource.Name),
})
}
}
}
}
}
// second match subresouces if necessary
subresources := sets.New[schema.GroupVersionResource]()
subresources := sets.New[GroupVersionResourceSubresource]()
if subresource != "" {
for _, list := range serverGroupsAndResources {
for _, resource := range list.APIResources {
for parent := range resources {
if wildcard.Match(parent.Resource+"/"+subresource, resource.Name) {
subresources.Insert(parent.GroupVersion().WithResource(resource.Name))
parts := strings.Split(resource.Name, "/")
subresources.Insert(parent.WithSubResource(parts[1]))
break
}
}
@ -225,7 +246,11 @@ func (c serverResources) findResources(group, version, kind, subresource string)
for _, resource := range list.APIResources {
gvk := getGVK(gv, resource.Group, resource.Version, resource.Kind)
if wildcard.Match(group, gvk.Group) && wildcard.Match(version, gvk.Version) && wildcard.Match(kind, gvk.Kind) {
resources.Insert(gv.WithResource(resource.Name))
parts := strings.Split(resource.Name, "/")
resources.Insert(GroupVersionResourceSubresource{
GroupVersionResource: gv.WithResource(parts[0]),
SubResource: parts[1],
})
}
}
}

View file

@ -86,7 +86,7 @@ func (c *fakeDiscoveryClient) FindResource(groupVersion string, kind string) (ap
return nil, nil, schema.GroupVersionResource{}, fmt.Errorf("not implemented")
}
func (c *fakeDiscoveryClient) FindResources(group, version, kind, subresource string) ([]schema.GroupVersionResource, error) {
func (c *fakeDiscoveryClient) FindResources(group, version, kind, subresource string) ([]GroupVersionResourceSubresource, error) {
return nil, fmt.Errorf("not implemented")
}

View file

@ -6,14 +6,12 @@ import (
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/controllers"
pcache "github.com/kyverno/kyverno/pkg/policycache"
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
@ -72,8 +70,7 @@ func (c *controller) WarmUp() error {
if key, err := cache.MetaNamespaceKeyFunc(policy); err != nil {
return err
} else {
subresourceGVKToKind := getSubresourceGVKToKindMap(policy, c.client)
c.cache.Set(key, policy, subresourceGVKToKind)
return c.cache.Set(key, policy, c.client.Discovery())
}
}
cpols, err := c.cpolLister.List(labels.Everything())
@ -84,8 +81,7 @@ func (c *controller) WarmUp() error {
if key, err := cache.MetaNamespaceKeyFunc(policy); err != nil {
return err
} else {
subresourceGVKToKind := getSubresourceGVKToKindMap(policy, c.client)
c.cache.Set(key, policy, subresourceGVKToKind)
return c.cache.Set(key, policy, c.client.Discovery())
}
}
return nil
@ -103,10 +99,7 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam
}
return err
}
// TODO: check resource version ?
subresourceGVKToKind := getSubresourceGVKToKindMap(policy, c.client)
c.cache.Set(key, policy, subresourceGVKToKind)
return nil
return c.cache.Set(key, policy, c.client.Discovery())
}
func (c *controller) loadPolicy(namespace, name string) (kyvernov1.PolicyInterface, error) {
@ -116,22 +109,3 @@ func (c *controller) loadPolicy(namespace, name string) (kyvernov1.PolicyInterfa
return c.polLister.Policies(namespace).Get(name)
}
}
func getSubresourceGVKToKindMap(policy kyvernov1.PolicyInterface, client dclient.Interface) map[string]string {
subresourceGVKToKind := make(map[string]string)
for _, rule := range autogen.ComputeRules(policy) {
for _, gvk := range rule.MatchResources.GetKinds() {
gv, k := kubeutils.GetKindFromGVK(gvk)
_, subresource := kubeutils.SplitSubresource(k)
if subresource != "" {
apiResource, _, _, err := client.Discovery().FindResource(gv, k)
if err != nil {
logger.Error(err, "failed to fetch resource group versions", "gv", gv, "kind", k)
continue
}
subresourceGVKToKind[gvk] = apiResource.Kind
}
}
}
return subresourceGVKToKind
}

View file

@ -813,7 +813,7 @@ func (c *controller) getLease() (*coordinationv1.Lease, error) {
// mergeWebhook merges the matching kinds of the policy to webhook.rule
func (c *controller) mergeWebhook(dst *webhook, policy kyvernov1.PolicyInterface, updateValidate bool) {
matchedGVK := make([]string, 0)
var matchedGVK []string
for _, rule := range autogen.ComputeRules(policy) {
// matching kinds in generate policies need to be added to both webhook
if rule.HasGenerate() {
@ -829,34 +829,35 @@ func (c *controller) mergeWebhook(dst *webhook, policy kyvernov1.PolicyInterface
matchedGVK = append(matchedGVK, rule.MatchResources.GetKinds()...)
}
}
gvkMap := make(map[string]int)
gvrList := make([]schema.GroupVersionResource, 0)
var gvrsList []dclient.GroupVersionResourceSubresource
for _, gvk := range matchedGVK {
if _, ok := gvkMap[gvk]; !ok {
gvkMap[gvk] = 1
// NOTE: webhook stores GVR in its rules while policy stores GVK in its rules definition
group, version, kind, subresource := kubeutils.ParseKindSelector(gvk)
// if kind is `*` no need to lookup resources
if kind == "*" && subresource == "*" {
gvrList = append(gvrList, schema.GroupVersionResource{Group: group, Version: version, Resource: "*/*"})
} else if kind == "*" && subresource == "" {
gvrList = append(gvrList, schema.GroupVersionResource{Group: group, Version: version, Resource: "*"})
} else if kind == "*" && subresource != "" {
gvrList = append(gvrList, schema.GroupVersionResource{Group: group, Version: version, Resource: "*/" + subresource})
} else {
gvrs, err := c.discoveryClient.FindResources(group, version, kind, subresource)
if err != nil {
logger.Error(err, "unable to find resource", "group", group, "version", version, "kind", kind, "subresource", subresource)
continue
}
for _, gvr := range gvrs {
logger.V(4).Info("configuring webhook", "GVK", gvk, "GVR", gvr)
gvrList = append(gvrList, gvr)
}
// NOTE: webhook stores GVR in its rules while policy stores GVK in its rules definition
group, version, kind, subresource := kubeutils.ParseKindSelector(gvk)
// if kind is `*` no need to lookup resources
if kind == "*" && subresource == "*" {
gvrsList = append(gvrsList, dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{Group: group, Version: version, Resource: "*"},
SubResource: "*",
})
} else if kind == "*" && subresource == "" {
gvrsList = append(gvrsList, dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{Group: group, Version: version, Resource: "*"},
})
} else if kind == "*" && subresource != "" {
gvrsList = append(gvrsList, dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{Group: group, Version: version, Resource: "*"},
SubResource: subresource,
})
} else {
gvrss, err := c.discoveryClient.FindResources(group, version, kind, subresource)
if err != nil {
logger.Error(err, "unable to find resource", "group", group, "version", version, "kind", kind, "subresource", subresource)
continue
}
gvrsList = append(gvrsList, gvrss...)
}
}
for _, gvr := range gvrList {
for _, gvr := range gvrsList {
dst.set(gvr)
}
spec := policy.GetSpec()

View file

@ -4,6 +4,7 @@ import (
"strings"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/utils"
"golang.org/x/exp/slices"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
@ -70,13 +71,13 @@ func (wh *webhook) buildRulesWithOperations(ops ...admissionregistrationv1.Opera
return rules
}
func (wh *webhook) set(gvr schema.GroupVersionResource) {
gv := gvr.GroupVersion()
func (wh *webhook) set(gvrs dclient.GroupVersionResourceSubresource) {
gv := gvrs.GroupVersion()
resources := wh.rules[gv]
if resources == nil {
wh.rules[gv] = sets.New(gvr.Resource)
wh.rules[gv] = sets.New(gvrs.ResourceSubresource())
} else {
resources.Insert(gvr.Resource)
resources.Insert(gvrs.ResourceSubresource())
}
}

View file

@ -6,6 +6,7 @@ import (
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"gotest.tools/assert"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
@ -16,7 +17,11 @@ func Test_webhook_isEmpty(t *testing.T) {
empty := newWebhook(DefaultWebhookTimeout, admissionregistrationv1.Ignore)
assert.Equal(t, empty.isEmpty(), true)
notEmpty := newWebhook(DefaultWebhookTimeout, admissionregistrationv1.Ignore)
notEmpty.set(schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"})
notEmpty.set(dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{
Group: "", Version: "v1", Resource: "pods",
},
})
assert.Equal(t, notEmpty.isEmpty(), false)
}

View file

@ -2,18 +2,23 @@ package policycache
import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/utils/wildcard"
)
type ResourceFinder interface {
FindResources(group, version, kind, subresource string) ([]dclient.GroupVersionResourceSubresource, error)
}
// Cache get method use for to get policy names and mostly use to test cache testcases
type Cache interface {
// Set inserts a policy in the cache
Set(string, kyvernov1.PolicyInterface, map[string]string)
Set(string, kyvernov1.PolicyInterface, ResourceFinder) error
// Unset removes a policy from the cache
Unset(string)
// GetPolicies returns all policies that apply to a namespace, including cluster-wide policies
// If the namespace is empty, only cluster-wide policies are returned
GetPolicies(PolicyType, string, string) []kyvernov1.PolicyInterface
GetPolicies(PolicyType, dclient.GroupVersionResourceSubresource, string) []kyvernov1.PolicyInterface
}
type cache struct {
@ -27,37 +32,32 @@ func NewCache() Cache {
}
}
func (c *cache) Set(key string, policy kyvernov1.PolicyInterface, subresourceGVKToKind map[string]string) {
c.store.set(key, policy, subresourceGVKToKind)
func (c *cache) Set(key string, policy kyvernov1.PolicyInterface, client ResourceFinder) error {
return c.store.set(key, policy, client)
}
func (c *cache) Unset(key string) {
c.store.unset(key)
}
func (c *cache) GetPolicies(pkey PolicyType, kind, nspace string) []kyvernov1.PolicyInterface {
func (c *cache) GetPolicies(pkey PolicyType, gvrs dclient.GroupVersionResourceSubresource, nspace string) []kyvernov1.PolicyInterface {
var result []kyvernov1.PolicyInterface
result = append(result, c.store.get(pkey, kind, "")...)
result = append(result, c.store.get(pkey, "*", "")...)
result = append(result, c.store.get(pkey, gvrs, "")...)
if nspace != "" {
result = append(result, c.store.get(pkey, kind, nspace)...)
result = append(result, c.store.get(pkey, "*", nspace)...)
result = append(result, c.store.get(pkey, gvrs, nspace)...)
}
if pkey == ValidateAudit { // also get policies with ValidateEnforce
result = append(result, c.store.get(ValidateEnforce, kind, "")...)
result = append(result, c.store.get(ValidateEnforce, "*", "")...)
// also get policies with ValidateEnforce
if pkey == ValidateAudit {
result = append(result, c.store.get(ValidateEnforce, gvrs, "")...)
}
if pkey == ValidateAudit || pkey == ValidateEnforce {
result = filterPolicies(pkey, result, nspace, kind)
result = filterPolicies(pkey, result, nspace)
}
return result
}
// Filter cluster policies using validationFailureAction override
func filterPolicies(pkey PolicyType, result []kyvernov1.PolicyInterface, nspace, kind string) []kyvernov1.PolicyInterface {
func filterPolicies(pkey PolicyType, result []kyvernov1.PolicyInterface, nspace string) []kyvernov1.PolicyInterface {
var policies []kyvernov1.PolicyInterface
for _, policy := range result {
keepPolicy := true
@ -67,7 +67,8 @@ func filterPolicies(pkey PolicyType, result []kyvernov1.PolicyInterface, nspace,
case ValidateEnforce:
keepPolicy = checkValidationFailureActionOverrides(true, nspace, policy)
}
if keepPolicy { // add policy to result
// add policy to result
if keepPolicy {
policies = append(policies, policy)
}
}

View file

@ -6,13 +6,15 @@ import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"gotest.tools/assert"
kubecache "k8s.io/client-go/tools/cache"
)
func setPolicy(store store, policy kyvernov1.PolicyInterface) {
func setPolicy(t *testing.T, store store, policy kyvernov1.PolicyInterface, finder ResourceFinder) {
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
store.set(key, policy, make(map[string]string))
err := store.set(key, policy, finder)
assert.NilError(t, err)
}
func unsetPolicy(store store, policy kyvernov1.PolicyInterface) {
@ -23,56 +25,64 @@ func unsetPolicy(store store, policy kyvernov1.PolicyInterface) {
func Test_All(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
// get
mutate := pCache.get(Mutate, kind, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, kind, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
// get
mutate := pCache.get(Mutate, gvr, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
}
}
// remove
unsetPolicy(pCache, policy)
kind := "pod"
validateEnforce := pCache.get(ValidateEnforce, kind, "")
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
assert.Assert(t, len(validateEnforce) == 0)
}
func Test_Add_Duplicate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
mutate := pCache.get(Mutate, gvr, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
mutate := pCache.get(Mutate, kind, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, kind, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
validateEnforce := pCache.get(ValidateEnforce, gvr, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
}
}
@ -81,22 +91,27 @@ func Test_Add_Duplicate_Policy(t *testing.T) {
func Test_Add_Validate_Audit(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
policy.Spec.ValidationFailureAction = "audit"
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
validateEnforce := pCache.get(ValidateEnforce, gvr, "")
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate (enforce) policy, found %v", len(validateEnforce))
}
validateEnforce := pCache.get(ValidateEnforce, kind, "")
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate (enforce) policy, found %v", len(validateEnforce))
}
validateAudit := pCache.get(ValidateAudit, kind, "")
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate (audit) policy, found %v", len(validateAudit))
validateAudit := pCache.get(ValidateAudit, gvr, "")
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate (audit) policy, found %v", len(validateAudit))
}
}
}
}
@ -105,26 +120,22 @@ func Test_Add_Validate_Audit(t *testing.T) {
func Test_Add_Remove(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
kind := "Pod"
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, "")
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := pCache.get(Mutate, kind, "")
mutate := pCache.get(Mutate, podsGVRS, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := pCache.get(Generate, kind, "")
generate := pCache.get(Generate, podsGVRS, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, "")
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -133,26 +144,22 @@ func Test_Add_Remove(t *testing.T) {
func Test_Add_Remove_Any(t *testing.T) {
pCache := newPolicyCache()
policy := newAnyPolicy(t)
kind := "Pod"
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, "")
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := pCache.get(Mutate, kind, "")
mutate := pCache.get(Mutate, podsGVRS, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := pCache.get(Generate, kind, "")
generate := pCache.get(Generate, podsGVRS, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, "")
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -161,7 +168,6 @@ func Test_Add_Remove_Any(t *testing.T) {
func Test_Remove_From_Empty_Cache(t *testing.T) {
pCache := newPolicyCache()
policy := newPolicy(t)
unsetPolicy(pCache, policy)
}
@ -266,11 +272,9 @@ func newPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
]
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -439,11 +443,9 @@ func newAnyPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
]
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -475,7 +477,7 @@ func newNsPolicy(t *testing.T) kyvernov1.PolicyInterface {
"value": "a"
}
]
}
}
}
}
},
@ -546,11 +548,9 @@ func newNsPolicy(t *testing.T) kyvernov1.PolicyInterface {
]
}
}`)
var policy *kyvernov1.Policy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -603,11 +603,9 @@ func newGVKPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
]
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -645,11 +643,9 @@ func newUserTestPolicy(t *testing.T) kyvernov1.PolicyInterface {
]
}
}`)
var policy *kyvernov1.Policy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -694,13 +690,12 @@ func newGeneratePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
]
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newMutatePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
rawPolicy := []byte(`{
"metadata": {
@ -738,13 +733,12 @@ func newMutatePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
"validationFailureAction": "audit"
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func newNsMutatePolicy(t *testing.T) kyvernov1.PolicyInterface {
rawPolicy := []byte(`{
"metadata": {
@ -783,11 +777,9 @@ func newNsMutatePolicy(t *testing.T) kyvernov1.PolicyInterface {
"validationFailureAction": "audit"
}
}`)
var policy *kyvernov1.Policy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -837,11 +829,9 @@ func newValidateAuditPolicy(t *testing.T) *kyvernov1.ClusterPolicy {
]
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
@ -891,68 +881,73 @@ func newValidateEnforcePolicy(t *testing.T) *kyvernov1.ClusterPolicy {
]
}
}`)
var policy *kyvernov1.ClusterPolicy
err := json.Unmarshal(rawPolicy, &policy)
assert.NilError(t, err)
return policy
}
func Test_Ns_All(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
nspace := policy.GetNamespace()
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
// get
mutate := pCache.get(Mutate, kind, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, kind, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
// get
mutate := pCache.get(Mutate, gvr, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
}
}
// remove
unsetPolicy(pCache, policy)
kind := "pod"
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, nspace)
assert.Assert(t, len(validateEnforce) == 0)
}
func Test_Ns_Add_Duplicate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
nspace := policy.GetNamespace()
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
mutate := pCache.get(Mutate, kind, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, kind, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
mutate := pCache.get(Mutate, gvr, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
}
}
@ -961,23 +956,28 @@ func Test_Ns_Add_Duplicate_Policy(t *testing.T) {
func Test_Ns_Add_Validate_Audit(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
nspace := policy.GetNamespace()
policy.GetSpec().ValidationFailureAction = "audit"
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate (enforce) policy, found %v", len(validateEnforce))
}
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate (enforce) policy, found %v", len(validateEnforce))
}
validateAudit := pCache.get(ValidateAudit, kind, nspace)
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate (audit) policy, found %v", len(validateAudit))
validateAudit := pCache.get(ValidateAudit, gvr, nspace)
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate (audit) policy, found %v", len(validateAudit))
}
}
}
}
@ -986,16 +986,15 @@ func Test_Ns_Add_Validate_Audit(t *testing.T) {
func Test_Ns_Add_Remove(t *testing.T) {
pCache := newPolicyCache()
policy := newNsPolicy(t)
finder := TestResourceFinder{}
nspace := policy.GetNamespace()
kind := "Pod"
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, nspace)
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS, nspace)
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -1004,14 +1003,19 @@ func Test_Ns_Add_Remove(t *testing.T) {
func Test_GVk_Cache(t *testing.T) {
pCache := newPolicyCache()
policy := newGVKPolicy(t)
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
generate := pCache.get(Generate, kind, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
generate := pCache.get(Generate, gvr, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
}
}
@ -1020,15 +1024,14 @@ func Test_GVk_Cache(t *testing.T) {
func Test_GVK_Add_Remove(t *testing.T) {
pCache := newPolicyCache()
policy := newGVKPolicy(t)
kind := "ClusterRole"
setPolicy(pCache, policy)
generate := pCache.get(Generate, kind, "")
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
generate := pCache.get(Generate, clusterrolesGVRS, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
unsetPolicy(pCache, policy)
deletedGenerate := pCache.get(Generate, kind, "")
deletedGenerate := pCache.get(Generate, clusterrolesGVRS, "")
if len(deletedGenerate) != 0 {
t.Errorf("expected 0 generate policy, found %v", len(deletedGenerate))
}
@ -1038,13 +1041,19 @@ func Test_Add_Validate_Enforce(t *testing.T) {
pCache := newPolicyCache()
policy := newUserTestPolicy(t)
nspace := policy.GetNamespace()
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
}
}
}
@ -1054,15 +1063,15 @@ func Test_Ns_Add_Remove_User(t *testing.T) {
pCache := newPolicyCache()
policy := newUserTestPolicy(t)
nspace := policy.GetNamespace()
kind := "Deployment"
setPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, kind, nspace)
finder := TestResourceFinder{}
// kind := "Deployment"
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, deploymentsGVRS, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, kind, nspace)
deletedValidateEnforce := pCache.get(ValidateEnforce, deploymentsGVRS, nspace)
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -1071,17 +1080,22 @@ func Test_Ns_Add_Remove_User(t *testing.T) {
func Test_Mutate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newMutatePolicy(t)
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
// get
mutate := pCache.get(Mutate, kind, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
// get
mutate := pCache.get(Mutate, gvr, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
}
}
}
@ -1090,15 +1104,20 @@ func Test_Mutate_Policy(t *testing.T) {
func Test_Generate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newGeneratePolicy(t)
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(t, pCache, policy, finder)
for _, rule := range autogen.ComputeRules(policy) {
for _, kind := range rule.MatchResources.Kinds {
// get
generate := pCache.get(Generate, kind, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
group, version, kind, subresource := kubeutils.ParseKindSelector(kind)
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for _, gvr := range gvrs {
// get
generate := pCache.get(Generate, gvr, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
}
}
@ -1108,53 +1127,47 @@ func Test_NsMutate_Policy(t *testing.T) {
pCache := newPolicyCache()
policy := newMutatePolicy(t)
nspolicy := newNsMutatePolicy(t)
finder := TestResourceFinder{}
//add
setPolicy(pCache, policy)
setPolicy(pCache, nspolicy)
setPolicy(pCache, policy)
setPolicy(pCache, nspolicy)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, nspolicy, finder)
setPolicy(t, pCache, policy, finder)
setPolicy(t, pCache, nspolicy, finder)
nspace := policy.GetNamespace()
// get
mutate := pCache.get(Mutate, "StatefulSet", "")
mutate := pCache.get(Mutate, statefulsetsGVRS, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
// get
nsMutate := pCache.get(Mutate, "StatefulSet", nspace)
nsMutate := pCache.get(Mutate, statefulsetsGVRS, nspace)
if len(nsMutate) != 1 {
t.Errorf("expected 1 namespace mutate policy, found %v", len(nsMutate))
}
}
func Test_Validate_Enforce_Policy(t *testing.T) {
pCache := newPolicyCache()
policy1 := newValidateAuditPolicy(t)
policy2 := newValidateEnforcePolicy(t)
setPolicy(pCache, policy1)
setPolicy(pCache, policy2)
validateEnforce := pCache.get(ValidateEnforce, "Pod", "")
finder := TestResourceFinder{}
setPolicy(t, pCache, policy1, finder)
setPolicy(t, pCache, policy2, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
if len(validateEnforce) != 2 {
t.Errorf("adding: expected 2 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit := pCache.get(ValidateAudit, "Pod", "")
validateAudit := pCache.get(ValidateAudit, podsGVRS, "")
if len(validateAudit) != 0 {
t.Errorf("adding: expected 0 validate audit policy, found %v", len(validateAudit))
}
unsetPolicy(pCache, policy1)
unsetPolicy(pCache, policy2)
validateEnforce = pCache.get(ValidateEnforce, "Pod", "")
validateEnforce = pCache.get(ValidateEnforce, podsGVRS, "")
if len(validateEnforce) != 0 {
t.Errorf("removing: expected 0 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit = pCache.get(ValidateAudit, "Pod", "")
validateAudit = pCache.get(ValidateAudit, podsGVRS, "")
if len(validateAudit) != 0 {
t.Errorf("removing: expected 0 validate audit policy, found %v", len(validateAudit))
}
@ -1163,59 +1176,51 @@ func Test_Validate_Enforce_Policy(t *testing.T) {
func Test_Get_Policies(t *testing.T) {
cache := NewCache()
policy := newPolicy(t)
finder := TestResourceFinder{}
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
cache.Set(key, policy, make(map[string]string))
validateAudit := cache.GetPolicies(ValidateAudit, "Namespace", "")
cache.Set(key, policy, finder)
validateAudit := cache.GetPolicies(ValidateAudit, namespacesGVRS, "")
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateAudit = cache.GetPolicies(ValidateAudit, "Pod", "test")
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS, "test")
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateEnforce := cache.GetPolicies(ValidateEnforce, "Namespace", "")
validateEnforce := cache.GetPolicies(ValidateEnforce, namespacesGVRS, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := cache.GetPolicies(Mutate, "Pod", "")
mutate := cache.GetPolicies(Mutate, podsGVRS, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := cache.GetPolicies(Generate, "Pod", "")
generate := cache.GetPolicies(Generate, podsGVRS, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
}
func Test_Get_Policies_Ns(t *testing.T) {
cache := NewCache()
policy := newNsPolicy(t)
finder := TestResourceFinder{}
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
cache.Set(key, policy, make(map[string]string))
cache.Set(key, policy, finder)
nspace := policy.GetNamespace()
validateAudit := cache.GetPolicies(ValidateAudit, "Pod", nspace)
validateAudit := cache.GetPolicies(ValidateAudit, podsGVRS, nspace)
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateEnforce := cache.GetPolicies(ValidateEnforce, "Pod", nspace)
validateEnforce := cache.GetPolicies(ValidateEnforce, podsGVRS, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := cache.GetPolicies(Mutate, "Pod", nspace)
mutate := cache.GetPolicies(Mutate, podsGVRS, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := cache.GetPolicies(Generate, "Pod", nspace)
generate := cache.GetPolicies(Generate, podsGVRS, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -1225,39 +1230,33 @@ func Test_Get_Policies_Validate_Failure_Action_Overrides(t *testing.T) {
cache := NewCache()
policy1 := newValidateAuditPolicy(t)
policy2 := newValidateEnforcePolicy(t)
finder := TestResourceFinder{}
key1, _ := kubecache.MetaNamespaceKeyFunc(policy1)
cache.Set(key1, policy1, make(map[string]string))
cache.Set(key1, policy1, finder)
key2, _ := kubecache.MetaNamespaceKeyFunc(policy2)
cache.Set(key2, policy2, make(map[string]string))
validateAudit := cache.GetPolicies(ValidateAudit, "Pod", "")
cache.Set(key2, policy2, finder)
validateAudit := cache.GetPolicies(ValidateAudit, podsGVRS, "")
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate audit policy, found %v", len(validateAudit))
}
validateEnforce := cache.GetPolicies(ValidateEnforce, "Pod", "")
validateEnforce := cache.GetPolicies(ValidateEnforce, podsGVRS, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit = cache.GetPolicies(ValidateAudit, "Pod", "test")
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS, "test")
if len(validateAudit) != 2 {
t.Errorf("expected 2 validate audit policy, found %v", len(validateAudit))
}
validateEnforce = cache.GetPolicies(ValidateEnforce, "Pod", "test")
validateEnforce = cache.GetPolicies(ValidateEnforce, podsGVRS, "test")
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit = cache.GetPolicies(ValidateAudit, "Pod", "default")
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS, "default")
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateEnforce = cache.GetPolicies(ValidateEnforce, "Pod", "default")
validateEnforce = cache.GetPolicies(ValidateEnforce, podsGVRS, "default")
if len(validateEnforce) != 2 {
t.Errorf("expected 2 validate enforce policy, found %v", len(validateEnforce))
}
}

View file

@ -5,18 +5,20 @@ import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
"github.com/kyverno/kyverno/pkg/clients/dclient"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"go.uber.org/multierr"
"k8s.io/apimachinery/pkg/util/sets"
kcache "k8s.io/client-go/tools/cache"
)
type store interface {
// set inserts a policy in the cache
set(string, kyvernov1.PolicyInterface, map[string]string)
set(string, kyvernov1.PolicyInterface, ResourceFinder) error
// unset removes a policy from the cache
unset(string)
// get finds policies that match a given type, gvk and namespace
get(PolicyType, string, string) []kyvernov1.PolicyInterface
// get finds policies that match a given type, gvr and namespace
get(PolicyType, dclient.GroupVersionResourceSubresource, string) []kyvernov1.PolicyInterface
}
type policyCache struct {
@ -30,11 +32,14 @@ func newPolicyCache() store {
}
}
func (pc *policyCache) set(key string, policy kyvernov1.PolicyInterface, subresourceGVKToKind map[string]string) {
func (pc *policyCache) set(key string, policy kyvernov1.PolicyInterface, client ResourceFinder) error {
pc.lock.Lock()
defer pc.lock.Unlock()
pc.store.set(key, policy, subresourceGVKToKind)
if err := pc.store.set(key, policy, client); err != nil {
return err
}
logger.V(4).Info("policy is added to cache", "key", key)
return nil
}
func (pc *policyCache) unset(key string) {
@ -44,35 +49,27 @@ func (pc *policyCache) unset(key string) {
logger.V(4).Info("policy is removed from cache", "key", key)
}
func (pc *policyCache) get(pkey PolicyType, kind, nspace string) []kyvernov1.PolicyInterface {
func (pc *policyCache) get(pkey PolicyType, gvrs dclient.GroupVersionResourceSubresource, nspace string) []kyvernov1.PolicyInterface {
pc.lock.RLock()
defer pc.lock.RUnlock()
return pc.store.get(pkey, kind, nspace)
return pc.store.get(pkey, gvrs, nspace)
}
type policyMap struct {
// policies maps names to policy interfaces
policies map[string]kyvernov1.PolicyInterface
// kindType stores names of ClusterPolicies and Namespaced Policies.
// Since both the policy name use same type (i.e. string), Both policies can be differentiated based on
// "namespace". namespace policy get stored with policy namespace with policy name"
// kindDataMap {"kind": {{"policytype" : {"policyName","nsname/policyName}}},"kind2": {{"policytype" : {"nsname/policyName" }}}}
kindType map[string]map[PolicyType]sets.Set[string]
// They are accessed first by GVRS then by PolicyType.
kindType map[dclient.GroupVersionResourceSubresource]map[PolicyType]sets.Set[string]
}
func newPolicyMap() *policyMap {
return &policyMap{
policies: map[string]kyvernov1.PolicyInterface{},
kindType: map[string]map[PolicyType]sets.Set[string]{},
kindType: map[dclient.GroupVersionResourceSubresource]map[PolicyType]sets.Set[string]{},
}
}
func computeKind(gvk string) string {
_, k := kubeutils.GetKindFromGVK(gvk)
kind, _ := kubeutils.SplitSubresource(k)
return kind
}
func computeEnforcePolicy(spec *kyvernov1.Spec) bool {
if spec.ValidationFailureAction.Enforce() {
return true
@ -93,31 +90,51 @@ func set(set sets.Set[string], item string, value bool) sets.Set[string] {
}
}
func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface, subresourceGVKToKind map[string]string) {
func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface, client ResourceFinder) error {
var errs []error
enforcePolicy := computeEnforcePolicy(policy.GetSpec())
m.policies[key] = policy
type state struct {
hasMutate, hasValidate, hasGenerate, hasVerifyImages, hasImagesValidationChecks, hasVerifyYAML bool
}
kindStates := map[string]state{}
kindStates := map[dclient.GroupVersionResourceSubresource]state{}
for _, rule := range autogen.ComputeRules(policy) {
entries := sets.New[dclient.GroupVersionResourceSubresource]()
for _, gvk := range rule.MatchResources.GetKinds() {
kind, ok := subresourceGVKToKind[gvk]
if !ok {
kind = computeKind(gvk)
group, version, kind, subresource := kubeutils.ParseKindSelector(gvk)
gvrss, err := client.FindResources(group, version, kind, subresource)
if err != nil {
logger.Error(err, "failed to fetch resource group versions", "group", group, "version", version, "kind", kind)
errs = append(errs, err)
} else {
entries.Insert(gvrss...)
}
}
if entries.Len() > 0 {
// account for pods/ephemeralcontainers special case
if entries.Has(podsGVRS) {
entries.Insert(podsGVRS.WithSubResource("ephemeralcontainers"))
}
hasMutate := rule.HasMutate()
hasValidate := rule.HasValidate()
hasGenerate := rule.HasGenerate()
hasVerifyImages := rule.HasVerifyImages()
hasImagesValidationChecks := rule.HasImagesValidationChecks()
for gvrs := range entries {
entry := kindStates[gvrs]
entry.hasMutate = entry.hasMutate || hasMutate
entry.hasValidate = entry.hasValidate || hasValidate
entry.hasGenerate = entry.hasGenerate || hasGenerate
entry.hasVerifyImages = entry.hasVerifyImages || hasVerifyImages
entry.hasImagesValidationChecks = entry.hasImagesValidationChecks || hasImagesValidationChecks
// TODO: hasVerifyYAML ?
kindStates[gvrs] = entry
}
entry := kindStates[kind]
entry.hasMutate = entry.hasMutate || rule.HasMutate()
entry.hasValidate = entry.hasValidate || rule.HasValidate()
entry.hasGenerate = entry.hasGenerate || rule.HasGenerate()
entry.hasVerifyImages = entry.hasVerifyImages || rule.HasVerifyImages()
entry.hasImagesValidationChecks = entry.hasImagesValidationChecks || rule.HasImagesValidationChecks()
kindStates[kind] = entry
}
}
for kind, state := range kindStates {
if m.kindType[kind] == nil {
m.kindType[kind] = map[PolicyType]sets.Set[string]{
for gvrs, state := range kindStates {
if m.kindType[gvrs] == nil {
m.kindType[gvrs] = map[PolicyType]sets.Set[string]{
Mutate: sets.New[string](),
ValidateEnforce: sets.New[string](),
ValidateAudit: sets.New[string](),
@ -127,29 +144,29 @@ func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface, subresourc
VerifyYAML: sets.New[string](),
}
}
m.kindType[kind][Mutate] = set(m.kindType[kind][Mutate], key, state.hasMutate)
m.kindType[kind][ValidateEnforce] = set(m.kindType[kind][ValidateEnforce], key, state.hasValidate && enforcePolicy)
m.kindType[kind][ValidateAudit] = set(m.kindType[kind][ValidateAudit], key, state.hasValidate && !enforcePolicy)
m.kindType[kind][Generate] = set(m.kindType[kind][Generate], key, state.hasGenerate)
m.kindType[kind][VerifyImagesMutate] = set(m.kindType[kind][VerifyImagesMutate], key, state.hasVerifyImages)
m.kindType[kind][VerifyImagesValidate] = set(m.kindType[kind][VerifyImagesValidate], key, state.hasVerifyImages && state.hasImagesValidationChecks)
m.kindType[kind][VerifyYAML] = set(m.kindType[kind][VerifyYAML], key, state.hasVerifyYAML)
m.kindType[gvrs][Mutate] = set(m.kindType[gvrs][Mutate], key, state.hasMutate)
m.kindType[gvrs][ValidateEnforce] = set(m.kindType[gvrs][ValidateEnforce], key, state.hasValidate && enforcePolicy)
m.kindType[gvrs][ValidateAudit] = set(m.kindType[gvrs][ValidateAudit], key, state.hasValidate && !enforcePolicy)
m.kindType[gvrs][Generate] = set(m.kindType[gvrs][Generate], key, state.hasGenerate)
m.kindType[gvrs][VerifyImagesMutate] = set(m.kindType[gvrs][VerifyImagesMutate], key, state.hasVerifyImages)
m.kindType[gvrs][VerifyImagesValidate] = set(m.kindType[gvrs][VerifyImagesValidate], key, state.hasVerifyImages && state.hasImagesValidationChecks)
m.kindType[gvrs][VerifyYAML] = set(m.kindType[gvrs][VerifyYAML], key, state.hasVerifyYAML)
}
return multierr.Combine(errs...)
}
func (m *policyMap) unset(key string) {
delete(m.policies, key)
for kind := range m.kindType {
for policyType := range m.kindType[kind] {
m.kindType[kind][policyType] = m.kindType[kind][policyType].Delete(key)
for gvrs := range m.kindType {
for policyType := range m.kindType[gvrs] {
m.kindType[gvrs][policyType] = m.kindType[gvrs][policyType].Delete(key)
}
}
}
func (m *policyMap) get(key PolicyType, gvk, namespace string) []kyvernov1.PolicyInterface {
kind := computeKind(gvk)
func (m *policyMap) get(key PolicyType, gvrs dclient.GroupVersionResourceSubresource, namespace string) []kyvernov1.PolicyInterface {
var result []kyvernov1.PolicyInterface
for policyName := range m.kindType[kind][key] {
for policyName := range m.kindType[gvrs][key] {
ns, _, err := kcache.SplitMetaNamespaceKey(policyName)
if err != nil {
logger.Error(err, "failed to parse policy name", "policyName", policyName)

60
pkg/policycache/test.go Normal file
View file

@ -0,0 +1,60 @@
package policycache
import (
"fmt"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"k8s.io/apimachinery/pkg/runtime/schema"
)
var (
podsGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"}
namespacesGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"}
clusterrolesGVR = schema.GroupVersionResource{Group: "rbac.authorization.k8s.io", Version: "v1", Resource: "clusterroles"}
deploymentsGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}
statefulsetsGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}
daemonsetsGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}
jobsGVR = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}
cronjobsGVR = schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "cronjobs"}
replicasetsGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}
replicationcontrollersGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}
podsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: podsGVR}
namespacesGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: namespacesGVR}
clusterrolesGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: clusterrolesGVR}
deploymentsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: deploymentsGVR}
statefulsetsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: statefulsetsGVR}
daemonsetsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: daemonsetsGVR}
jobsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: jobsGVR}
cronjobsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: cronjobsGVR}
replicasetsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: replicasetsGVR}
replicationcontrollersGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: replicationcontrollersGVR}
)
type TestResourceFinder struct{}
func (TestResourceFinder) FindResources(group, version, kind, subresource string) ([]dclient.GroupVersionResourceSubresource, error) {
switch kind {
case "Pod":
return []dclient.GroupVersionResourceSubresource{podsGVRS}, nil
case "Namespace":
return []dclient.GroupVersionResourceSubresource{namespacesGVRS}, nil
case "ClusterRole":
return []dclient.GroupVersionResourceSubresource{clusterrolesGVRS}, nil
case "Deployment":
return []dclient.GroupVersionResourceSubresource{deploymentsGVRS}, nil
case "StatefulSet":
return []dclient.GroupVersionResourceSubresource{statefulsetsGVRS}, nil
case "DaemonSet":
return []dclient.GroupVersionResourceSubresource{daemonsetsGVRS}, nil
case "ReplicaSet":
return []dclient.GroupVersionResourceSubresource{replicasetsGVRS}, nil
case "Job":
return []dclient.GroupVersionResourceSubresource{jobsGVRS}, nil
case "ReplicationController":
return []dclient.GroupVersionResourceSubresource{replicationcontrollersGVRS}, nil
case "CronJob":
return []dclient.GroupVersionResourceSubresource{cronjobsGVRS}, nil
}
return nil, fmt.Errorf("not found: %s", kind)
}

View file

@ -29,6 +29,7 @@ import (
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
admissionv1 "k8s.io/api/admission/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
corev1listers "k8s.io/client-go/listers/core/v1"
rbacv1listers "k8s.io/client-go/listers/rbac/v1"
)
@ -108,10 +109,14 @@ func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *ad
logger.V(4).Info("received an admission request in validating webhook")
// timestamp at which this admission request got triggered
policies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.ValidateEnforce, kind, request.Namespace)...)
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, kind, request.Namespace)...)
generatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Generate, kind, request.Namespace)...)
imageVerifyValidatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesValidate, kind, request.Namespace)...)
gvrs := dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource(request.Resource),
SubResource: request.SubResource,
}
policies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.ValidateEnforce, gvrs, request.Namespace)...)
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, gvrs, request.Namespace)...)
generatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Generate, gvrs, request.Namespace)...)
imageVerifyValidatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesValidate, gvrs, request.Namespace)...)
policies = append(policies, imageVerifyValidatePolicies...)
if len(policies) == 0 && len(mutatePolicies) == 0 && len(generatePolicies) == 0 {
@ -146,8 +151,12 @@ func (h *handlers) Mutate(ctx context.Context, logger logr.Logger, request *admi
kind := request.Kind.Kind
logger = logger.WithValues("kind", kind)
logger.V(4).Info("received an admission request in mutating webhook")
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, kind, request.Namespace)...)
verifyImagesPolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesMutate, kind, request.Namespace)...)
gvrs := dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource(request.Resource),
SubResource: request.SubResource,
}
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, gvrs, request.Namespace)...)
verifyImagesPolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesMutate, gvrs, request.Namespace)...)
if len(mutatePolicies) == 0 && len(verifyImagesPolicies) == 0 {
logger.V(4).Info("no policies matched mutate admission request")
return admissionutils.ResponseSuccess(request.UID)

View file

@ -271,13 +271,12 @@ func Test_AdmissionResponseValid(t *testing.T) {
assert.NilError(t, err)
key := makeKey(&validPolicy)
subresourceGVKToKind := make(map[string]string)
policyCache.Set(key, &validPolicy, subresourceGVKToKind)
policyCache.Set(key, &validPolicy, policycache.TestResourceFinder{})
request := &v1.AdmissionRequest{
Operation: v1.Create,
Kind: metav1.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "Pod"},
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
Object: runtime.RawExtension{
Raw: []byte(pod),
},
@ -293,7 +292,7 @@ func Test_AdmissionResponseValid(t *testing.T) {
assert.Equal(t, len(response.Warnings), 0)
validPolicy.Spec.ValidationFailureAction = "Enforce"
policyCache.Set(key, &validPolicy, subresourceGVKToKind)
policyCache.Set(key, &validPolicy, policycache.TestResourceFinder{})
response = handlers.Validate(ctx, logger, request, "", time.Now())
assert.Equal(t, response.Allowed, false)
@ -318,7 +317,7 @@ func Test_AdmissionResponseInvalid(t *testing.T) {
request := &v1.AdmissionRequest{
Operation: v1.Create,
Kind: metav1.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "Pod"},
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
Object: runtime.RawExtension{
Raw: []byte(pod),
},
@ -327,8 +326,7 @@ func Test_AdmissionResponseInvalid(t *testing.T) {
keyInvalid := makeKey(&invalidPolicy)
invalidPolicy.Spec.ValidationFailureAction = "Enforce"
subresourceGVKToKind := make(map[string]string)
policyCache.Set(keyInvalid, &invalidPolicy, subresourceGVKToKind)
policyCache.Set(keyInvalid, &invalidPolicy, policycache.TestResourceFinder{})
response := handlers.Validate(ctx, logger, request, "", time.Now())
assert.Equal(t, response.Allowed, false)
@ -336,7 +334,7 @@ func Test_AdmissionResponseInvalid(t *testing.T) {
var ignore kyverno.FailurePolicyType = kyverno.Ignore
invalidPolicy.Spec.FailurePolicy = &ignore
policyCache.Set(keyInvalid, &invalidPolicy, subresourceGVKToKind)
policyCache.Set(keyInvalid, &invalidPolicy, policycache.TestResourceFinder{})
response = handlers.Validate(ctx, logger, request, "", time.Now())
assert.Equal(t, response.Allowed, true)
@ -357,13 +355,12 @@ func Test_ImageVerify(t *testing.T) {
assert.NilError(t, err)
key := makeKey(&policy)
subresourceGVKToKind := make(map[string]string)
policyCache.Set(key, &policy, subresourceGVKToKind)
policyCache.Set(key, &policy, policycache.TestResourceFinder{})
request := &v1.AdmissionRequest{
Operation: v1.Create,
Kind: metav1.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "Pod"},
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
Object: runtime.RawExtension{
Raw: []byte(pod),
},
@ -371,7 +368,7 @@ func Test_ImageVerify(t *testing.T) {
}
policy.Spec.ValidationFailureAction = "Enforce"
policyCache.Set(key, &policy, subresourceGVKToKind)
policyCache.Set(key, &policy, policycache.TestResourceFinder{})
response := handlers.Mutate(ctx, logger, request, "", time.Now())
assert.Equal(t, response.Allowed, false)
@ -379,7 +376,7 @@ func Test_ImageVerify(t *testing.T) {
var ignore kyverno.FailurePolicyType = kyverno.Ignore
policy.Spec.FailurePolicy = &ignore
policyCache.Set(key, &policy, subresourceGVKToKind)
policyCache.Set(key, &policy, policycache.TestResourceFinder{})
response = handlers.Mutate(ctx, logger, request, "", time.Now())
assert.Equal(t, response.Allowed, false)
@ -400,7 +397,7 @@ func Test_MutateAndVerify(t *testing.T) {
assert.NilError(t, err)
key := makeKey(&policy)
policyCache.Set(key, &policy, make(map[string]string))
policyCache.Set(key, &policy, policycache.TestResourceFinder{})
request := &v1.AdmissionRequest{
Operation: v1.Create,

View file

@ -9,6 +9,7 @@ import (
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
@ -152,7 +153,11 @@ func (v *validationHandler) buildAuditResponses(
request *admissionv1.AdmissionRequest,
namespaceLabels map[string]string,
) ([]*engineapi.EngineResponse, error) {
policies := v.pCache.GetPolicies(policycache.ValidateAudit, request.Kind.Kind, request.Namespace)
gvrs := dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource(request.Resource),
SubResource: request.SubResource,
}
policies := v.pCache.GetPolicies(policycache.ValidateAudit, gvrs, request.Namespace)
policyContext, err := v.pcBuilder.Build(request)
if err != nil {
return nil, err

View file

@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- policies.yaml
assert:
- policies-assert.yaml

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- resources.yaml

View file

@ -0,0 +1,17 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- script: |
if kubectl scale deployment nginx-deployment --replicas 2
then
exit 0
else
exit 1
fi
- script: |
if kubectl scale sts nginx-sts --replicas 2
then
exit 1
else
exit 0
fi

View file

@ -0,0 +1,9 @@
## Description
This test create two policies:
- one that denies `Deployment/scale` in Audit mode
- one that denies `StatefulSet/scale` in Enforce mode
It then create a statefulset and a deployment.
Finally it tries to create the statefulset and expects failure, the, scales the deployment and expects success.

View file

@ -0,0 +1,19 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deny-scale-deployment
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deny-scale-statefulset
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -0,0 +1,37 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deny-scale-deployment
annotations:
pod-policies.kyverno.io/autogen-controllers: none
spec:
validationFailureAction: Audit
background: false
rules:
- name: deny-scale-deployment
match:
any:
- resources:
kinds:
- Deployment/scale
validate:
deny: {}
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deny-scale-statefulset
annotations:
pod-policies.kyverno.io/autogen-controllers: none
spec:
validationFailureAction: Enforce
background: false
rules:
- name: deny-scale-statefulset
match:
any:
- resources:
kinds:
- StatefulSet/scale
validate:
deny: {}

View file

@ -0,0 +1,45 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
namespace: default
labels:
app: nginx-deployment
spec:
replicas: 1
selector:
matchLabels:
app: nginx-deployment
template:
metadata:
labels:
app: nginx-deployment
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: nginx-sts
namespace: default
labels:
app: nginx-sts
spec:
replicas: 1
selector:
matchLabels:
app: nginx-sts
template:
metadata:
labels:
app: nginx-sts
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80