1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

refactor: do not allow matching with subresource kind (#6625)

* refactor: do not allow matching with subresource kind

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix kuttl

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix kuttl

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fixes

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

---------

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-03-21 14:28:00 +01:00 committed by GitHub
parent 08def22d04
commit e06c20f5cc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
18 changed files with 166 additions and 214 deletions

View file

@ -16,10 +16,20 @@ import (
// GroupVersionResourceSubresource contains a group/version/resource/subresource reference
type GroupVersionResourceSubresource struct {
schema.GroupVersionResource
schema.GroupVersion
Kind string
Resource string
SubResource string
}
func (gvrs GroupVersionResourceSubresource) GroupVersionResource() schema.GroupVersionResource {
return gvrs.WithResource(gvrs.Resource)
}
func (gvrs GroupVersionResourceSubresource) GroupVersionKind() schema.GroupVersionKind {
return gvrs.WithKind(gvrs.Kind)
}
func (gvrs GroupVersionResourceSubresource) ResourceSubresource() string {
if gvrs.SubResource == "" {
return gvrs.Resource
@ -206,7 +216,9 @@ func (c serverResources) findResources(group, version, kind, subresource string)
gvk := getGVK(gv, resource.Group, resource.Version, resource.Kind)
if wildcard.Match(group, gvk.Group) && wildcard.Match(version, gvk.Version) && wildcard.Match(kind, gvk.Kind) {
gvrs := GroupVersionResourceSubresource{
GroupVersionResource: gv.WithResource(resource.Name),
GroupVersion: gv,
Kind: resource.Kind,
Resource: resource.Name,
}
resources[gvrs] = resource
}
@ -229,27 +241,6 @@ func (c serverResources) findResources(group, version, kind, subresource string)
}
}
}
// third if no resource matched, try again but consider subresources this time
if len(resources) == 0 {
for _, list := range serverGroupsAndResources {
gv, err := schema.ParseGroupVersion(list.GroupVersion)
if err != nil {
return nil, err
} else {
for _, resource := range list.APIResources {
gvk := getGVK(gv, resource.Group, resource.Version, resource.Kind)
if wildcard.Match(group, gvk.Group) && wildcard.Match(version, gvk.Version) && wildcard.Match(kind, gvk.Kind) {
parts := strings.Split(resource.Name, "/")
gvrs := GroupVersionResourceSubresource{
GroupVersionResource: gv.WithResource(parts[0]),
SubResource: parts[1],
}
resources[gvrs] = resource
}
}
}
}
}
if kind == "*" && subresource == "*" {
for key, value := range subresources {
resources[key] = value

View file

@ -235,7 +235,7 @@ func (c *controller) updateDynamicWatchers(ctx context.Context) error {
logger.Info("kind is not supported", "gvk", gvk)
} else {
if slices.Contains(api.Verbs, "list") && slices.Contains(api.Verbs, "watch") {
gvkToGvr[gvk] = gvrs.GroupVersionResource
gvkToGvr[gvk] = gvrs.GroupVersionResource()
} else {
logger.Info("list/watch not supported for kind", "kind", kind)
}

View file

@ -829,25 +829,17 @@ func (c *controller) mergeWebhook(dst *webhook, policy kyvernov1.PolicyInterface
matchedGVK = append(matchedGVK, rule.MatchResources.GetKinds()...)
}
}
var gvrsList []dclient.GroupVersionResourceSubresource
var gvrsList []schema.GroupVersionResource
for _, gvk := range matchedGVK {
// NOTE: webhook stores GVR in its rules while policy stores GVK in its rules definition
group, version, kind, subresource := kubeutils.ParseKindSelector(gvk)
// if kind is `*` no need to lookup resources
if kind == "*" && subresource == "*" {
gvrsList = append(gvrsList, dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{Group: group, Version: version, Resource: "*"},
SubResource: "*",
})
gvrsList = append(gvrsList, schema.GroupVersionResource{Group: group, Version: version, Resource: "*/*"})
} else if kind == "*" && subresource == "" {
gvrsList = append(gvrsList, dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{Group: group, Version: version, Resource: "*"},
})
gvrsList = append(gvrsList, schema.GroupVersionResource{Group: group, Version: version, Resource: "*"})
} else if kind == "*" && subresource != "" {
gvrsList = append(gvrsList, dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{Group: group, Version: version, Resource: "*"},
SubResource: subresource,
})
gvrsList = append(gvrsList, schema.GroupVersionResource{Group: group, Version: version, Resource: "*/" + subresource})
} else {
gvrss, err := c.discoveryClient.FindResources(group, version, kind, subresource)
if err != nil {
@ -855,7 +847,7 @@ func (c *controller) mergeWebhook(dst *webhook, policy kyvernov1.PolicyInterface
continue
}
for gvrs := range gvrss {
gvrsList = append(gvrsList, gvrs)
gvrsList = append(gvrsList, gvrs.GroupVersion.WithResource(gvrs.ResourceSubresource()))
}
}
}

View file

@ -4,7 +4,6 @@ import (
"strings"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/utils"
"golang.org/x/exp/slices"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
@ -71,13 +70,13 @@ func (wh *webhook) buildRulesWithOperations(ops ...admissionregistrationv1.Opera
return rules
}
func (wh *webhook) set(gvrs dclient.GroupVersionResourceSubresource) {
func (wh *webhook) set(gvrs schema.GroupVersionResource) {
gv := gvrs.GroupVersion()
resources := wh.rules[gv]
if resources == nil {
wh.rules[gv] = sets.New(gvrs.ResourceSubresource())
wh.rules[gv] = sets.New(gvrs.Resource)
} else {
resources.Insert(gvrs.ResourceSubresource())
resources.Insert(gvrs.Resource)
}
}

View file

@ -6,7 +6,6 @@ import (
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"gotest.tools/assert"
admissionregistrationv1 "k8s.io/api/admissionregistration/v1"
@ -17,10 +16,8 @@ func Test_webhook_isEmpty(t *testing.T) {
empty := newWebhook(DefaultWebhookTimeout, admissionregistrationv1.Ignore)
assert.Equal(t, empty.isEmpty(), true)
notEmpty := newWebhook(DefaultWebhookTimeout, admissionregistrationv1.Ignore)
notEmpty.set(dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource{
Group: "", Version: "v1", Resource: "pods",
},
notEmpty.set(schema.GroupVersionResource{
Group: "", Version: "v1", Resource: "pods",
})
assert.Equal(t, notEmpty.isEmpty(), false)
}

View file

@ -83,7 +83,7 @@ func getTargets(client dclient.Interface, target kyvernov1.ResourceSpec, ctx eng
return nil, err
}
for gvrs := range gvrss {
dyn := client.GetDynamicInterface().Resource(gvrs.GroupVersionResource)
dyn := client.GetDynamicInterface().Resource(gvrs.GroupVersionResource())
var sub []string
if gvrs.SubResource != "" {
sub = []string{gvrs.SubResource}
@ -96,7 +96,11 @@ func getTargets(client dclient.Interface, target kyvernov1.ResourceSpec, ctx eng
if err != nil {
return nil, err
}
targetObjects = append(targetObjects, resourceInfo{unstructured: *obj, subresource: gvrs.SubResource, parentResourceGVR: metav1.GroupVersionResource(gvrs.GroupVersionResource)})
targetObjects = append(targetObjects, resourceInfo{
unstructured: *obj,
subresource: gvrs.SubResource,
parentResourceGVR: metav1.GroupVersionResource(gvrs.GroupVersionResource()),
})
} else {
// we can use `LIST`
if gvrs.SubResource == "" {
@ -132,7 +136,11 @@ func getTargets(client dclient.Interface, target kyvernov1.ResourceSpec, ctx eng
if err != nil {
return nil, err
}
targetObjects = append(targetObjects, resourceInfo{unstructured: *obj, subresource: gvrs.SubResource, parentResourceGVR: metav1.GroupVersionResource(gvrs.GroupVersionResource)})
targetObjects = append(targetObjects, resourceInfo{
unstructured: *obj,
subresource: gvrs.SubResource,
parentResourceGVR: metav1.GroupVersionResource(gvrs.GroupVersionResource()),
})
}
}
}

View file

@ -5,6 +5,7 @@ import (
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/utils/wildcard"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
)
type ResourceFinder interface {
@ -19,7 +20,7 @@ type Cache interface {
Unset(string)
// GetPolicies returns all policies that apply to a namespace, including cluster-wide policies
// If the namespace is empty, only cluster-wide policies are returned
GetPolicies(PolicyType, dclient.GroupVersionResourceSubresource, string) []kyvernov1.PolicyInterface
GetPolicies(PolicyType, schema.GroupVersionResource, string, string) []kyvernov1.PolicyInterface
}
type cache struct {
@ -41,15 +42,15 @@ func (c *cache) Unset(key string) {
c.store.unset(key)
}
func (c *cache) GetPolicies(pkey PolicyType, gvrs dclient.GroupVersionResourceSubresource, nspace string) []kyvernov1.PolicyInterface {
func (c *cache) GetPolicies(pkey PolicyType, gvr schema.GroupVersionResource, subresource string, nspace string) []kyvernov1.PolicyInterface {
var result []kyvernov1.PolicyInterface
result = append(result, c.store.get(pkey, gvrs, "")...)
result = append(result, c.store.get(pkey, gvr, subresource, "")...)
if nspace != "" {
result = append(result, c.store.get(pkey, gvrs, nspace)...)
result = append(result, c.store.get(pkey, gvr, subresource, nspace)...)
}
// also get policies with ValidateEnforce
if pkey == ValidateAudit {
result = append(result, c.store.get(ValidateEnforce, gvrs, "")...)
result = append(result, c.store.get(ValidateEnforce, gvr, subresource, "")...)
}
if pkey == ValidateAudit || pkey == ValidateEnforce {
result = filterPolicies(pkey, result, nspace)

View file

@ -35,15 +35,15 @@ func Test_All(t *testing.T) {
assert.NilError(t, err)
for gvr := range gvrs {
// get
mutate := pCache.get(Mutate, gvr, "")
mutate := pCache.get(Mutate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, "")
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, "")
generate := pCache.get(Generate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -53,7 +53,7 @@ func Test_All(t *testing.T) {
// remove
unsetPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
validateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
assert.Assert(t, len(validateEnforce) == 0)
}
@ -70,16 +70,16 @@ func Test_Add_Duplicate_Policy(t *testing.T) {
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for gvr := range gvrs {
mutate := pCache.get(Mutate, gvr, "")
mutate := pCache.get(Mutate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, "")
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, "")
generate := pCache.get(Generate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -103,12 +103,12 @@ func Test_Add_Validate_Audit(t *testing.T) {
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for gvr := range gvrs {
validateEnforce := pCache.get(ValidateEnforce, gvr, "")
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate (enforce) policy, found %v", len(validateEnforce))
}
validateAudit := pCache.get(ValidateAudit, gvr, "")
validateAudit := pCache.get(ValidateAudit, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate (audit) policy, found %v", len(validateAudit))
}
@ -122,20 +122,20 @@ func Test_Add_Remove(t *testing.T) {
policy := newPolicy(t)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
validateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := pCache.get(Mutate, podsGVRS, "")
mutate := pCache.get(Mutate, podsGVRS.GroupVersionResource(), "", "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := pCache.get(Generate, podsGVRS, "")
generate := pCache.get(Generate, podsGVRS.GroupVersionResource(), "", "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -146,20 +146,20 @@ func Test_Add_Remove_Any(t *testing.T) {
policy := newAnyPolicy(t)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
validateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := pCache.get(Mutate, podsGVRS, "")
mutate := pCache.get(Mutate, podsGVRS.GroupVersionResource(), "", "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := pCache.get(Generate, podsGVRS, "")
generate := pCache.get(Generate, podsGVRS.GroupVersionResource(), "", "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -901,15 +901,15 @@ func Test_Ns_All(t *testing.T) {
assert.NilError(t, err)
for gvr := range gvrs {
// get
mutate := pCache.get(Mutate, gvr, nspace)
mutate := pCache.get(Mutate, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, nspace)
generate := pCache.get(Generate, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -918,7 +918,7 @@ func Test_Ns_All(t *testing.T) {
}
// remove
unsetPolicy(pCache, policy)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, nspace)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", nspace)
assert.Assert(t, len(validateEnforce) == 0)
}
@ -936,15 +936,15 @@ func Test_Ns_Add_Duplicate_Policy(t *testing.T) {
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for gvr := range gvrs {
mutate := pCache.get(Mutate, gvr, nspace)
mutate := pCache.get(Mutate, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
generate := pCache.get(Generate, gvr, nspace)
generate := pCache.get(Generate, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -969,12 +969,12 @@ func Test_Ns_Add_Validate_Audit(t *testing.T) {
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for gvr := range gvrs {
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate (enforce) policy, found %v", len(validateEnforce))
}
validateAudit := pCache.get(ValidateAudit, gvr, nspace)
validateAudit := pCache.get(ValidateAudit, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate (audit) policy, found %v", len(validateAudit))
}
@ -989,12 +989,12 @@ func Test_Ns_Add_Remove(t *testing.T) {
finder := TestResourceFinder{}
nspace := policy.GetNamespace()
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, nspace)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS, nspace)
deletedValidateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", nspace)
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -1012,7 +1012,7 @@ func Test_GVk_Cache(t *testing.T) {
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for gvr := range gvrs {
generate := pCache.get(Generate, gvr, "")
generate := pCache.get(Generate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -1026,12 +1026,12 @@ func Test_GVK_Add_Remove(t *testing.T) {
policy := newGVKPolicy(t)
finder := TestResourceFinder{}
setPolicy(t, pCache, policy, finder)
generate := pCache.get(Generate, clusterrolesGVRS, "")
generate := pCache.get(Generate, clusterrolesGVRS.GroupVersionResource(), "", "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
unsetPolicy(pCache, policy)
deletedGenerate := pCache.get(Generate, clusterrolesGVRS, "")
deletedGenerate := pCache.get(Generate, clusterrolesGVRS.GroupVersionResource(), "", "")
if len(deletedGenerate) != 0 {
t.Errorf("expected 0 generate policy, found %v", len(deletedGenerate))
}
@ -1050,7 +1050,7 @@ func Test_Add_Validate_Enforce(t *testing.T) {
gvrs, err := finder.FindResources(group, version, kind, subresource)
assert.NilError(t, err)
for gvr := range gvrs {
validateEnforce := pCache.get(ValidateEnforce, gvr, nspace)
validateEnforce := pCache.get(ValidateEnforce, gvr.GroupVersionResource(), gvr.SubResource, nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate policy, found %v", len(validateEnforce))
}
@ -1066,12 +1066,12 @@ func Test_Ns_Add_Remove_User(t *testing.T) {
finder := TestResourceFinder{}
// kind := "Deployment"
setPolicy(t, pCache, policy, finder)
validateEnforce := pCache.get(ValidateEnforce, deploymentsGVRS, nspace)
validateEnforce := pCache.get(ValidateEnforce, deploymentsGVRS.GroupVersionResource(), "", nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
unsetPolicy(pCache, policy)
deletedValidateEnforce := pCache.get(ValidateEnforce, deploymentsGVRS, nspace)
deletedValidateEnforce := pCache.get(ValidateEnforce, deploymentsGVRS.GroupVersionResource(), "", nspace)
if len(deletedValidateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(deletedValidateEnforce))
}
@ -1092,7 +1092,7 @@ func Test_Mutate_Policy(t *testing.T) {
assert.NilError(t, err)
for gvr := range gvrs {
// get
mutate := pCache.get(Mutate, gvr, "")
mutate := pCache.get(Mutate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
@ -1114,7 +1114,7 @@ func Test_Generate_Policy(t *testing.T) {
assert.NilError(t, err)
for gvr := range gvrs {
// get
generate := pCache.get(Generate, gvr, "")
generate := pCache.get(Generate, gvr.GroupVersionResource(), gvr.SubResource, "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -1135,12 +1135,12 @@ func Test_NsMutate_Policy(t *testing.T) {
setPolicy(t, pCache, nspolicy, finder)
nspace := policy.GetNamespace()
// get
mutate := pCache.get(Mutate, statefulsetsGVRS, "")
mutate := pCache.get(Mutate, statefulsetsGVRS.GroupVersionResource(), "", "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
// get
nsMutate := pCache.get(Mutate, statefulsetsGVRS, nspace)
nsMutate := pCache.get(Mutate, statefulsetsGVRS.GroupVersionResource(), "", nspace)
if len(nsMutate) != 1 {
t.Errorf("expected 1 namespace mutate policy, found %v", len(nsMutate))
}
@ -1153,21 +1153,21 @@ func Test_Validate_Enforce_Policy(t *testing.T) {
finder := TestResourceFinder{}
setPolicy(t, pCache, policy1, finder)
setPolicy(t, pCache, policy2, finder)
validateEnforce := pCache.get(ValidateEnforce, podsGVRS, "")
validateEnforce := pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(validateEnforce) != 2 {
t.Errorf("adding: expected 2 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit := pCache.get(ValidateAudit, podsGVRS, "")
validateAudit := pCache.get(ValidateAudit, podsGVRS.GroupVersionResource(), "", "")
if len(validateAudit) != 0 {
t.Errorf("adding: expected 0 validate audit policy, found %v", len(validateAudit))
}
unsetPolicy(pCache, policy1)
unsetPolicy(pCache, policy2)
validateEnforce = pCache.get(ValidateEnforce, podsGVRS, "")
validateEnforce = pCache.get(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(validateEnforce) != 0 {
t.Errorf("removing: expected 0 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit = pCache.get(ValidateAudit, podsGVRS, "")
validateAudit = pCache.get(ValidateAudit, podsGVRS.GroupVersionResource(), "", "")
if len(validateAudit) != 0 {
t.Errorf("removing: expected 0 validate audit policy, found %v", len(validateAudit))
}
@ -1179,23 +1179,23 @@ func Test_Get_Policies(t *testing.T) {
finder := TestResourceFinder{}
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
cache.Set(key, policy, finder)
validateAudit := cache.GetPolicies(ValidateAudit, namespacesGVRS, "")
validateAudit := cache.GetPolicies(ValidateAudit, namespacesGVRS.GroupVersionResource(), "", "")
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS, "test")
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS.GroupVersionResource(), "", "test")
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateEnforce := cache.GetPolicies(ValidateEnforce, namespacesGVRS, "")
validateEnforce := cache.GetPolicies(ValidateEnforce, namespacesGVRS.GroupVersionResource(), "", "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := cache.GetPolicies(Mutate, podsGVRS, "")
mutate := cache.GetPolicies(Mutate, podsGVRS.GroupVersionResource(), "", "")
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := cache.GetPolicies(Generate, podsGVRS, "")
generate := cache.GetPolicies(Generate, podsGVRS.GroupVersionResource(), "", "")
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -1208,19 +1208,19 @@ func Test_Get_Policies_Ns(t *testing.T) {
key, _ := kubecache.MetaNamespaceKeyFunc(policy)
cache.Set(key, policy, finder)
nspace := policy.GetNamespace()
validateAudit := cache.GetPolicies(ValidateAudit, podsGVRS, nspace)
validateAudit := cache.GetPolicies(ValidateAudit, podsGVRS.GroupVersionResource(), "", nspace)
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateEnforce := cache.GetPolicies(ValidateEnforce, podsGVRS, nspace)
validateEnforce := cache.GetPolicies(ValidateEnforce, podsGVRS.GroupVersionResource(), "", nspace)
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
mutate := cache.GetPolicies(Mutate, podsGVRS, nspace)
mutate := cache.GetPolicies(Mutate, podsGVRS.GroupVersionResource(), "", nspace)
if len(mutate) != 1 {
t.Errorf("expected 1 mutate policy, found %v", len(mutate))
}
generate := cache.GetPolicies(Generate, podsGVRS, nspace)
generate := cache.GetPolicies(Generate, podsGVRS.GroupVersionResource(), "", nspace)
if len(generate) != 1 {
t.Errorf("expected 1 generate policy, found %v", len(generate))
}
@ -1235,27 +1235,27 @@ func Test_Get_Policies_Validate_Failure_Action_Overrides(t *testing.T) {
cache.Set(key1, policy1, finder)
key2, _ := kubecache.MetaNamespaceKeyFunc(policy2)
cache.Set(key2, policy2, finder)
validateAudit := cache.GetPolicies(ValidateAudit, podsGVRS, "")
validateAudit := cache.GetPolicies(ValidateAudit, podsGVRS.GroupVersionResource(), "", "")
if len(validateAudit) != 1 {
t.Errorf("expected 1 validate audit policy, found %v", len(validateAudit))
}
validateEnforce := cache.GetPolicies(ValidateEnforce, podsGVRS, "")
validateEnforce := cache.GetPolicies(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "")
if len(validateEnforce) != 1 {
t.Errorf("expected 1 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS, "test")
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS.GroupVersionResource(), "", "test")
if len(validateAudit) != 2 {
t.Errorf("expected 2 validate audit policy, found %v", len(validateAudit))
}
validateEnforce = cache.GetPolicies(ValidateEnforce, podsGVRS, "test")
validateEnforce = cache.GetPolicies(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "test")
if len(validateEnforce) != 0 {
t.Errorf("expected 0 validate enforce policy, found %v", len(validateEnforce))
}
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS, "default")
validateAudit = cache.GetPolicies(ValidateAudit, podsGVRS.GroupVersionResource(), "", "default")
if len(validateAudit) != 0 {
t.Errorf("expected 0 validate audit policy, found %v", len(validateAudit))
}
validateEnforce = cache.GetPolicies(ValidateEnforce, podsGVRS, "default")
validateEnforce = cache.GetPolicies(ValidateEnforce, podsGVRS.GroupVersionResource(), "", "default")
if len(validateEnforce) != 2 {
t.Errorf("expected 2 validate enforce policy, found %v", len(validateEnforce))
}

View file

@ -5,9 +5,9 @@ import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/autogen"
"github.com/kyverno/kyverno/pkg/clients/dclient"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"go.uber.org/multierr"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
kcache "k8s.io/client-go/tools/cache"
)
@ -17,8 +17,8 @@ type store interface {
set(string, kyvernov1.PolicyInterface, ResourceFinder) error
// unset removes a policy from the cache
unset(string)
// get finds policies that match a given type, gvr and namespace
get(PolicyType, dclient.GroupVersionResourceSubresource, string) []kyvernov1.PolicyInterface
// get finds policies that match a given type, gvr, subresource and namespace
get(PolicyType, schema.GroupVersionResource, string, string) []kyvernov1.PolicyInterface
}
type policyCache struct {
@ -49,24 +49,33 @@ func (pc *policyCache) unset(key string) {
logger.V(4).Info("policy is removed from cache", "key", key)
}
func (pc *policyCache) get(pkey PolicyType, gvrs dclient.GroupVersionResourceSubresource, nspace string) []kyvernov1.PolicyInterface {
func (pc *policyCache) get(pkey PolicyType, gvr schema.GroupVersionResource, subresource string, nspace string) []kyvernov1.PolicyInterface {
pc.lock.RLock()
defer pc.lock.RUnlock()
return pc.store.get(pkey, gvrs, nspace)
return pc.store.get(pkey, gvr, subresource, nspace)
}
type policyKey struct {
Group string
Version string
Resource string
SubResource string
}
var podsKey = policyKey{"", "v1", "pods", ""}
type policyMap struct {
// policies maps names to policy interfaces
policies map[string]kyvernov1.PolicyInterface
// kindType stores names of ClusterPolicies and Namespaced Policies.
// They are accessed first by GVRS then by PolicyType.
kindType map[dclient.GroupVersionResourceSubresource]map[PolicyType]sets.Set[string]
kindType map[policyKey]map[PolicyType]sets.Set[string]
}
func newPolicyMap() *policyMap {
return &policyMap{
policies: map[string]kyvernov1.PolicyInterface{},
kindType: map[dclient.GroupVersionResourceSubresource]map[PolicyType]sets.Set[string]{},
kindType: map[policyKey]map[PolicyType]sets.Set[string]{},
}
}
@ -97,9 +106,9 @@ func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface, client Res
type state struct {
hasMutate, hasValidate, hasGenerate, hasVerifyImages, hasImagesValidationChecks bool
}
kindStates := map[dclient.GroupVersionResourceSubresource]state{}
kindStates := map[policyKey]state{}
for _, rule := range autogen.ComputeRules(policy) {
entries := sets.New[dclient.GroupVersionResourceSubresource]()
entries := sets.New[policyKey]()
for _, gvk := range rule.MatchResources.GetKinds() {
group, version, kind, subresource := kubeutils.ParseKindSelector(gvk)
gvrss, err := client.FindResources(group, version, kind, subresource)
@ -108,14 +117,24 @@ func (m *policyMap) set(key string, policy kyvernov1.PolicyInterface, client Res
errs = append(errs, err)
} else {
for gvrs := range gvrss {
entries.Insert(gvrs)
entries.Insert(policyKey{
Group: gvrs.Group,
Version: gvrs.Version,
Resource: gvrs.Resource,
SubResource: gvrs.SubResource,
})
}
}
}
if entries.Len() > 0 {
// account for pods/ephemeralcontainers special case
if entries.Has(podsGVRS) {
entries.Insert(podsGVRS.WithSubResource("ephemeralcontainers"))
if entries.Has(podsKey) {
entries.Insert(policyKey{
Group: podsKey.Group,
Version: podsKey.Version,
Resource: podsKey.Resource,
SubResource: "ephemeralcontainers",
})
}
hasMutate := rule.HasMutate()
hasValidate := rule.HasValidate()
@ -163,9 +182,10 @@ func (m *policyMap) unset(key string) {
}
}
func (m *policyMap) get(key PolicyType, gvrs dclient.GroupVersionResourceSubresource, namespace string) []kyvernov1.PolicyInterface {
func (m *policyMap) get(key PolicyType, gvr schema.GroupVersionResource, subresource string, namespace string) []kyvernov1.PolicyInterface {
var result []kyvernov1.PolicyInterface
for policyName := range m.kindType[gvrs][key] {
pKey := policyKey{gvr.Group, gvr.Version, gvr.Resource, subresource}
for policyName := range m.kindType[pKey][key] {
ns, _, err := kcache.SplitMetaNamespaceKey(policyName)
if err != nil {
logger.Error(err, "failed to parse policy name", "policyName", policyName)

View file

@ -20,18 +20,26 @@ var (
replicasetsGVR = schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}
replicationcontrollersGVR = schema.GroupVersionResource{Group: "", Version: "v1", Resource: "replicationcontrollers"}
podsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: podsGVR}
namespacesGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: namespacesGVR}
clusterrolesGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: clusterrolesGVR}
deploymentsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: deploymentsGVR}
statefulsetsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: statefulsetsGVR}
daemonsetsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: daemonsetsGVR}
jobsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: jobsGVR}
cronjobsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: cronjobsGVR}
replicasetsGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: replicasetsGVR}
replicationcontrollersGVRS = dclient.GroupVersionResourceSubresource{GroupVersionResource: replicationcontrollersGVR}
podsGVRS = mapGVR(podsGVR, "Pod")
namespacesGVRS = mapGVR(namespacesGVR, "Namespace")
clusterrolesGVRS = mapGVR(clusterrolesGVR, "ClusterRole")
deploymentsGVRS = mapGVR(deploymentsGVR, "Deployment")
statefulsetsGVRS = mapGVR(statefulsetsGVR, "StatefulSet")
daemonsetsGVRS = mapGVR(daemonsetsGVR, "DaemonSet")
jobsGVRS = mapGVR(jobsGVR, "Jon")
cronjobsGVRS = mapGVR(cronjobsGVR, "CronJob")
replicasetsGVRS = mapGVR(replicasetsGVR, "ReplicaSet")
replicationcontrollersGVRS = mapGVR(replicationcontrollersGVR, "ReplicationController")
)
func mapGVR(gvr schema.GroupVersionResource, kind string) dclient.GroupVersionResourceSubresource {
return dclient.GroupVersionResourceSubresource{
GroupVersion: gvr.GroupVersion(),
Kind: kind,
Resource: gvr.Resource,
}
}
type TestResourceFinder struct{}
func (TestResourceFinder) FindResources(group, version, kind, subresource string) (map[dclient.GroupVersionResourceSubresource]metav1.APIResource, error) {

View file

@ -107,14 +107,11 @@ func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *ad
logger.V(4).Info("received an admission request in validating webhook")
// timestamp at which this admission request got triggered
gvrs := dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource(request.Resource),
SubResource: request.SubResource,
}
policies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.ValidateEnforce, gvrs, request.Namespace)...)
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, gvrs, request.Namespace)...)
generatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Generate, gvrs, request.Namespace)...)
imageVerifyValidatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesValidate, gvrs, request.Namespace)...)
gvr := schema.GroupVersionResource(request.Resource)
policies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.ValidateEnforce, gvr, request.SubResource, request.Namespace)...)
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, gvr, request.SubResource, request.Namespace)...)
generatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Generate, gvr, request.SubResource, request.Namespace)...)
imageVerifyValidatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesValidate, gvr, request.SubResource, request.Namespace)...)
policies = append(policies, imageVerifyValidatePolicies...)
if len(policies) == 0 && len(mutatePolicies) == 0 && len(generatePolicies) == 0 {
@ -149,12 +146,9 @@ func (h *handlers) Mutate(ctx context.Context, logger logr.Logger, request *admi
kind := request.Kind.Kind
logger = logger.WithValues("kind", kind)
logger.V(4).Info("received an admission request in mutating webhook")
gvrs := dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource(request.Resource),
SubResource: request.SubResource,
}
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, gvrs, request.Namespace)...)
verifyImagesPolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesMutate, gvrs, request.Namespace)...)
gvr := schema.GroupVersionResource(request.Resource)
mutatePolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.Mutate, gvr, request.SubResource, request.Namespace)...)
verifyImagesPolicies := filterPolicies(failurePolicy, h.pCache.GetPolicies(policycache.VerifyImagesMutate, gvr, request.SubResource, request.Namespace)...)
if len(mutatePolicies) == 0 && len(verifyImagesPolicies) == 0 {
logger.V(4).Info("no policies matched mutate admission request")
return admissionutils.ResponseSuccess(request.UID)

View file

@ -9,7 +9,6 @@ import (
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
@ -152,11 +151,8 @@ func (v *validationHandler) buildAuditResponses(
request *admissionv1.AdmissionRequest,
namespaceLabels map[string]string,
) ([]*engineapi.EngineResponse, error) {
gvrs := dclient.GroupVersionResourceSubresource{
GroupVersionResource: schema.GroupVersionResource(request.Resource),
SubResource: request.SubResource,
}
policies := v.pCache.GetPolicies(policycache.ValidateAudit, gvrs, request.Namespace)
gvr := schema.GroupVersionResource(request.Resource)
policies := v.pCache.GetPolicies(policycache.ValidateAudit, gvr, request.SubResource, request.Namespace)
policyContext, err := v.pcBuilder.Build(request)
if err != nil {
return nil, err

View file

@ -24,7 +24,7 @@ spec:
match:
resources:
kinds:
- PodExecOptions
- Pod/exec
context:
- name: podexeclabel
apiCall:

View file

@ -1,6 +1,5 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
apply:
- policy.yaml
assert:
- policy-assert.yaml
- file: policy.yaml
shouldFail: true

View file

@ -1,4 +0,0 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- webhooks.yaml

View file

@ -1,10 +1,4 @@
## Description
This test verifies the resource validation webhook is configured correctly when a policy targets all `Scale` resource.
It should be equivalent to using `*/scale`
## Steps
1. - Create a policy targeting `Scale`
- Assert policy gets ready
1. - Assert that the resource validation webhook is configured correctly
This test tries to create a policy targeting the `Scale` kind.
The `Scale` kind doesn't map to a top level resource and therefore the policy is expected to be rejected.

View file

@ -1,9 +0,0 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-labels
status:
conditions:
- reason: Succeeded
status: "True"
type: Ready

View file

@ -1,34 +0,0 @@
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
webhook.kyverno.io/managed-by: kyverno
name: kyverno-resource-validating-webhook-cfg
webhooks:
- rules:
- apiGroups:
- ""
apiVersions:
- v1
operations:
- CREATE
- UPDATE
- DELETE
- CONNECT
resources:
- replicationcontrollers/scale
scope: '*'
- apiGroups:
- apps
apiVersions:
- v1
operations:
- CREATE
- UPDATE
- DELETE
- CONNECT
resources:
- deployments/scale
- replicasets/scale
- statefulsets/scale
scope: '*'