1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-30 19:35:06 +00:00

Feature/configmaps var 724 (#1118)

* added configmap data substitution for foreground mutate and validate

* added configmap data substitution for foreground mutate and validate fmt

* added configmap lookup for background

* added comments to resource cache

* added configmap data lookup in preConditions

* added parse strings in In operator and configmap lookup docs

* added configmap lookup docs

* modified configmap lookup docs
This commit is contained in:
Mohan B E 2020-09-23 02:41:49 +05:30 committed by GitHub
parent fe20f3aa4c
commit 51ac382c6c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
34 changed files with 593 additions and 42 deletions

View file

@ -108,6 +108,7 @@ See [docs](https://github.com/nirmata/kyverno/#documentation) for complete detai
- [Preconditions](documentation/writing-policies-preconditions.md)
- [Auto-Generation of Pod Controller Policies](documentation/writing-policies-autogen.md)
- [Background Processing](documentation/writing-policies-background.md)
- [Configmap Lookup](documentation/writing-policies-configmap-reference.md)
- [Testing Policies](documentation/testing-policies.md)
- [Policy Violations](documentation/policy-violations.md)
- [Kyverno CLI](documentation/kyverno-cli.md)

View file

@ -23,6 +23,7 @@ import (
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/resourcecache"
"github.com/nirmata/kyverno/pkg/signal"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/version"
@ -105,6 +106,15 @@ func main() {
os.Exit(1)
}
// ======================= resource cache ====================
rCache, err := resourcecache.NewResourceCache(log.Log, clientConfig, client, []string{"configmaps"}, []string{})
if err != nil {
setupLog.Error(err, "Failed to create resource cache")
os.Exit(1)
}
rCache.RunAllInformers(log.Log)
// ===========================================================
// CRD CHECK
// - verify if the CRD for Policy & PolicyViolation are available
if !utils.CRDInstalled(client.DiscoveryClient, log.Log) {
@ -201,6 +211,7 @@ func main() {
rWebhookWatcher,
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyController"),
rCache,
)
if err != nil {
@ -223,6 +234,7 @@ func main() {
statusSync.Listener,
log.Log.WithName("GenerateController"),
configData,
rCache,
)
// GENERATE REQUEST CLEANUP
@ -251,6 +263,7 @@ func main() {
kubeInformer.Rbac().V1().ClusterRoleBindings(),
log.Log.WithName("ValidateAuditHandler"),
configData,
rCache,
)
// CONFIGURE CERTIFICATES
@ -309,6 +322,7 @@ func main() {
cleanUp,
log.Log.WithName("WebhookServer"),
openAPIController,
rCache,
)
if err != nil {

View file

@ -17,4 +17,4 @@ spec:
The default value of `background` is `true`. When a policy is created or modified, the policy validation logic will report an error if a rule uses `userInfo` and does not set `background` to `false`.
<small>*Read Next >> [Testing Policies](/documentation/testing-policies.md)*</small>
<small>*Read Next >> [Configmap Lookup](/documentation/writing-policies-configmap-reference.md)*</small>

View file

@ -0,0 +1,51 @@
<small>*[documentation](/README.md#documentation) / [Writing Policies](/documentation/writing-policies.md) / Configmap Lookup*</small>
# Configmap Reference in Kyverno Policy
There are many cases where the values that are passed into kyverno policies are dynamic, In such a cases the values are added/ modified inside policy itself.
The Configmap Reference allows the reference of configmap values inside kyverno policy rules as a JMESPATH, So for any required changes in the values of a policy, a modification has to be done on the referred configmap.
# Defining Rule Context
To refer Configmap inside any Rule provide the context inside each rule defining the list of configmaps which will be referenced in that Rule.
```
rules:
- name: add-sidecar-pod
# added context to define the configmap information which will be referred
context:
# unique name to identify configmap
- name: mycmapRef
configMap:
# configmap name - name of the configmap which will be referred
name: mycmap
# configmap namepsace - namespace of the configmap which will be referred
```
Referenced Configmap Definition
```
apiVersion: v1
data:
env: production, sandbox, staging
kind: ConfigMap
metadata:
name: mycmap
```
# Referring Value
The configmaps that are defined inside rule context can be referred using the unique name that is used to identify configmap inside context.
We can refer it's value using a JMESPATH
`{{<name>.<data>.<key>}}`
So for the above context we can refer it's value using
`{{mycmapRef.data.env}}`
<small>*Read Next >> [Testing Policies](/documentation/testing-policies.md)*</small>

1
go.mod
View file

@ -28,6 +28,7 @@ require (
github.com/onsi/gomega v1.8.1
github.com/ory/go-acc v0.2.1 // indirect
github.com/pkg/errors v0.9.1
github.com/pkg/profile v1.2.1
github.com/prometheus/common v0.4.1
github.com/rogpeppe/godef v1.1.2 // indirect
github.com/spf13/cobra v1.0.0

3
go.sum
View file

@ -819,6 +819,7 @@ github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/profile v1.2.1 h1:F++O52m40owAmADcojzM+9gyjmMOY/T4oYJkgFDH8RE=
github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA=
github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
@ -1324,6 +1325,8 @@ k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZ
k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw=
k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
k8s.io/apimachinery v0.18.6 h1:RtFHnfGNfd1N0LeSrKCUznz5xtUP1elRGvHJbL3Ntag=
k8s.io/apimachinery v0.19.0 h1:gjKnAda/HZp5k4xQYjL0K/Yb66IvNqjthCb03QlKpaQ=
k8s.io/apimachinery v0.19.1 h1:cwsxZazM/LA9aUsBaL4bRS5ygoM6bYp8dFk22DSYQa4=
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc=
k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=

View file

@ -180,6 +180,18 @@ type Rule struct {
// Specifies patterns to create additional resources
// +optional
Generation Generation `json:"generate,omitempty" yaml:"generate,omitempty"`
// Context
Context []ContextEntry `json:"context,omitempty" yaml:"context,omitempty"`
}
type ContextEntry struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
ConfigMap ConfigMapReference `json:"configMap,omitempty" yaml:"configMap,omitempty"`
}
type ConfigMapReference struct {
Name string `json:"name,omitempty" yaml:"name,omitempty"`
Namespace string `json:"namespace,omitempty" yaml:"namespace,omitempty"`
}
//Condition defines the evaluation condition

View file

@ -174,6 +174,39 @@ func (in *Condition) DeepCopy() *Condition {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ConfigMapReference) DeepCopyInto(out *ConfigMapReference) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapReference.
func (in *ConfigMapReference) DeepCopy() *ConfigMapReference {
if in == nil {
return nil
}
out := new(ConfigMapReference)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ContextEntry) DeepCopyInto(out *ContextEntry) {
*out = *in
out.ConfigMap = in.ConfigMap
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContextEntry.
func (in *ContextEntry) DeepCopy() *ContextEntry {
if in == nil {
return nil
}
out := new(ContextEntry)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Deny) DeepCopyInto(out *Deny) {
*out = *in
@ -612,6 +645,13 @@ func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Annotations != nil {
in, out := &in.Annotations, &out.Annotations
*out = make(map[string]string, len(*in))
for key, val := range *in {
(*out)[key] = val
}
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
@ -661,6 +701,11 @@ func (in *Rule) DeepCopyInto(out *Rule) {
in.Mutation.DeepCopyInto(&out.Mutation)
in.Validation.DeepCopyInto(&out.Validation)
in.Generation.DeepCopyInto(&out.Generation)
if in.Context != nil {
in, out := &in.Context, &out.Context
*out = make([]ContextEntry, len(*in))
copy(*out, *in)
}
return
}

View file

@ -9,6 +9,7 @@ import (
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/resourcecache"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
@ -23,17 +24,20 @@ func Generate(policyContext PolicyContext) (resp response.EngineResponse) {
admissionInfo := policyContext.AdmissionInfo
ctx := policyContext.Context
resCache := policyContext.ResourceCache
jsonContext := policyContext.JSONContext
logger := log.Log.WithName("Generate").WithValues("policy", policy.Name, "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
return filterRules(policy, resource, admissionInfo, ctx, logger, policyContext.ExcludeGroupRole)
return filterRules(policy, resource, admissionInfo, ctx, logger, policyContext.ExcludeGroupRole, resCache, jsonContext)
}
func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger, excludeGroupRole []string) *response.RuleResponse {
func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger, excludeGroupRole []string, resCache resourcecache.ResourceCacheIface, jsonContext *context.Context) *response.RuleResponse {
if !rule.HasGenerate() {
return nil
}
startTime := time.Now()
if err := MatchesResourceDescription(resource, rule, admissionInfo, excludeGroupRole); err != nil {
return &response.RuleResponse{
Name: rule.Name,
@ -44,6 +48,12 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
},
}
}
// add configmap json data to context
if err := AddResourceToContext(log, rule.Context, resCache, jsonContext); err != nil {
log.Info("cannot add configmaps to context", "reason", err.Error())
return nil
}
// operate on the copy of the conditions, as we perform variable substitution
copyConditions := copyConditions(rule.Conditions)
@ -63,7 +73,7 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
}
}
func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger, excludeGroupRole []string) response.EngineResponse {
func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger, excludeGroupRole []string, resCache resourcecache.ResourceCacheIface, jsonContext *context.Context) response.EngineResponse {
resp := response.EngineResponse{
PolicyResponse: response.PolicyResponse{
Policy: policy.Name,
@ -75,7 +85,7 @@ func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
},
}
for _, rule := range policy.Spec.Rules {
if ruleResp := filterRule(rule, resource, admissionInfo, ctx, log, excludeGroupRole); ruleResp != nil {
if ruleResp := filterRule(rule, resource, admissionInfo, ctx, log, excludeGroupRole, resCache, jsonContext); ruleResp != nil {
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *ruleResp)
}
}

View file

@ -29,7 +29,7 @@ func generatePatches(src, dst []byte) ([][]byte, error) {
}
patchesBytes = append(patchesBytes, pbytes)
fmt.Printf("generated patch %s\n", p)
// fmt.Printf("generated patch %s\n", p)
}
return patchesBytes, err

View file

@ -27,6 +27,9 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
policy := policyContext.Policy
patchedResource := policyContext.NewResource
ctx := policyContext.Context
resCache := policyContext.ResourceCache
jsonContext := policyContext.JSONContext
logger := log.Log.WithName("EngineMutate").WithValues("policy", policy.Name, "kind", patchedResource.GetKind(),
"namespace", patchedResource.GetNamespace(), "name", patchedResource.GetName())
@ -59,6 +62,11 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
logger.V(3).Info("resource not matched", "reason", err.Error())
continue
}
// add configmap json data to context
if err := AddResourceToContext(logger, rule.Context, resCache, jsonContext); err != nil {
logger.V(4).Info("cannot add configmaps to context", "reason", err.Error())
continue
}
// operate on the copy of the conditions, as we perform variable substitution
copyConditions := copyConditions(rule.Conditions)

View file

@ -4,6 +4,7 @@ import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/resourcecache"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
@ -22,4 +23,10 @@ type PolicyContext struct {
Context context.EvalInterface
// Config handler
ExcludeGroupRole []string
// ResourceCache provides listers to resources
// Currently Supports Configmap
ResourceCache resourcecache.ResourceCacheIface
// JSONContext ...
JSONContext *context.Context
}

View file

@ -27,7 +27,6 @@ type PolicyResponse struct {
Rules []RuleResponse `json:"rules"`
// ValidationFailureAction: audit(default if not set),enforce
ValidationFailureAction string
}
//ResourceSpec resource action applied on

View file

@ -1,22 +1,27 @@
package engine
import (
"encoding/json"
"errors"
"fmt"
"reflect"
"strings"
"time"
"github.com/go-logr/logr"
"github.com/nirmata/kyverno/pkg/utils"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
"reflect"
"sigs.k8s.io/controller-runtime/pkg/log"
"strings"
"time"
"github.com/minio/minio/pkg/wildcard"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/resourcecache"
"k8s.io/apimachinery/pkg/runtime"
)
//EngineStats stores in the statistics for a single application of resource
@ -278,3 +283,55 @@ func SkipPolicyApplication(policy kyverno.ClusterPolicy, resource unstructured.U
return false
}
// AddResourceToContext - Add the Configmap JSON to Context.
// it will read configmaps (can be extended to get other type of resource like secrets, namespace etc) from the informer cache
// and add the configmap data to context
func AddResourceToContext(logger logr.Logger, contexts []kyverno.ContextEntry, resCache resourcecache.ResourceCacheIface, ctx *context.Context) error {
if len(contexts) == 0 {
return nil
}
// get GVR Cache for "configmaps"
// can get cache for other resources if the informers are enabled in resource cache
gvrC := resCache.GetGVRCache("configmaps")
if gvrC != nil {
lister := gvrC.GetLister()
for _, context := range contexts {
contextData := make(map[string]interface{})
name := context.ConfigMap.Name
namespace := context.ConfigMap.Namespace
if namespace == "" {
namespace = "default"
}
key := fmt.Sprintf("%s/%s", namespace, name)
obj, err := lister.Get(key)
if err != nil {
logger.Error(err, fmt.Sprintf("failed to read configmap %s/%s from cache", namespace, name))
continue
}
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
logger.Error(err, "failed to convert context runtime object to unstructured")
continue
}
// extract configmap data
contextData["data"] = unstructuredObj["data"]
contextData["metadata"] = unstructuredObj["metadata"]
contextNamedData := make(map[string]interface{})
contextNamedData[context.Name] = contextData
jdata, err := json.Marshal(contextNamedData)
if err != nil {
logger.Error(err, "failed to unmarshal context data")
continue
}
// add data to context
err = ctx.AddJSON(jdata)
if err != nil {
logger.Error(err, "failed to load context json")
continue
}
}
return nil
}
return errors.New("configmaps GVR Cache not found")
}

View file

@ -12,6 +12,7 @@ import (
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/engine/validate"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/resourcecache"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
@ -24,6 +25,9 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) {
oldR := policyContext.OldResource
ctx := policyContext.Context
admissionInfo := policyContext.AdmissionInfo
resCache := policyContext.ResourceCache
jsonContext := policyContext.JSONContext
logger := log.Log.WithName("EngineValidate").WithValues("policy", policy.Name)
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
@ -33,7 +37,6 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) {
}
logger.V(4).Info("start processing", "startTime", startTime)
defer func() {
if reflect.DeepEqual(resp, response.EngineResponse{}) {
return
@ -62,19 +65,18 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) {
// If request is delete, newR will be empty
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
return *isRequestDenied(logger, ctx, policy, oldR, admissionInfo, policyContext.ExcludeGroupRole)
return *isRequestDenied(logger, ctx, policy, oldR, admissionInfo, policyContext.ExcludeGroupRole, resCache, jsonContext)
}
if denyResp := isRequestDenied(logger, ctx, policy, newR, admissionInfo, policyContext.ExcludeGroupRole); !denyResp.IsSuccessful() {
if denyResp := isRequestDenied(logger, ctx, policy, newR, admissionInfo, policyContext.ExcludeGroupRole, resCache, jsonContext); !denyResp.IsSuccessful() {
return *denyResp
}
if reflect.DeepEqual(oldR, unstructured.Unstructured{}) {
return *validateResource(logger, ctx, policy, newR, admissionInfo, policyContext.ExcludeGroupRole)
return *validateResource(logger, ctx, policy, newR, admissionInfo, policyContext.ExcludeGroupRole, resCache, jsonContext)
}
oldResponse := validateResource(logger, ctx, policy, oldR, admissionInfo, policyContext.ExcludeGroupRole)
newResponse := validateResource(logger, ctx, policy, newR, admissionInfo, policyContext.ExcludeGroupRole)
oldResponse := validateResource(logger, ctx, policy, oldR, admissionInfo, policyContext.ExcludeGroupRole, resCache, jsonContext)
newResponse := validateResource(logger, ctx, policy, newR, admissionInfo, policyContext.ExcludeGroupRole, resCache, jsonContext)
if !isSameResponse(oldResponse, newResponse) {
return *newResponse
}
@ -102,7 +104,7 @@ func incrementAppliedCount(resp *response.EngineResponse) {
resp.PolicyResponse.RulesAppliedCount++
}
func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, excludeGroupRole []string) *response.EngineResponse {
func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, excludeGroupRole []string, resCache resourcecache.ResourceCacheIface, jsonContext *context.Context) *response.EngineResponse {
resp := &response.EngineResponse{}
if SkipPolicyApplication(policy, resource) {
log.V(5).Info("Skip applying policy, Pod has ownerRef set", "policy", policy.GetName())
@ -117,6 +119,12 @@ func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.
continue
}
// add configmap json data to context
if err := AddResourceToContext(log, rule.Context, resCache, jsonContext); err != nil {
log.V(4).Info("cannot add configmaps to context", "reason", err.Error())
continue
}
if err := MatchesResourceDescription(resource, rule, admissionInfo, excludeResource); err != nil {
log.V(4).Info("resource fails the match description", "reason", err.Error())
continue
@ -147,7 +155,7 @@ func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.
return resp
}
func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, excludeGroupRole []string) *response.EngineResponse {
func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, excludeGroupRole []string, resCache resourcecache.ResourceCacheIface, jsonContext *context.Context) *response.EngineResponse {
resp := &response.EngineResponse{}
if SkipPolicyApplication(policy, resource) {
@ -172,6 +180,11 @@ func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno
log.V(4).Info("resource fails the match description", "reason", err.Error())
continue
}
// add configmap json data to context
if err := AddResourceToContext(log, rule.Context, resCache, jsonContext); err != nil {
log.V(4).Info("cannot add configmaps to context", "reason", err.Error())
continue
}
// operate on the copy of the conditions, as we perform variable substitution
preconditionsCopy := copyConditions(rule.Conditions)

View file

@ -3,6 +3,7 @@ package operator
import (
"fmt"
"reflect"
"strings"
"github.com/go-logr/logr"
"github.com/nirmata/kyverno/pkg/engine/context"
@ -68,6 +69,14 @@ func ValidateStringPattern(key string, value interface{}) (invalidType bool, key
keyExists = true
}
}
case string:
valuesAvaliable = strings.TrimSpace(valuesAvaliable)
vars := strings.Split(valuesAvaliable, ",")
for _, val := range vars {
if key == val {
keyExists = true
}
}
default:
return true, false
}

View file

@ -11,6 +11,7 @@ import (
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/resourcecache"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -58,7 +59,8 @@ type Controller struct {
policyStatusListener policystatus.Listener
log logr.Logger
Config config.Interface
Config config.Interface
resCache resourcecache.ResourceCacheIface
}
//NewController returns an instance of the Generate-Request Controller
@ -72,6 +74,7 @@ func NewController(
policyStatus policystatus.Listener,
log logr.Logger,
dynamicConfig config.Interface,
resCache resourcecache.ResourceCacheIface,
) *Controller {
c := Controller{
client: client,
@ -84,6 +87,7 @@ func NewController(
log: log,
policyStatusListener: policyStatus,
Config: dynamicConfig,
resCache: resCache,
}
c.statusControl = StatusControl{client: kyvernoclient}

View file

@ -99,6 +99,8 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
Context: ctx,
AdmissionInfo: gr.Spec.Context.UserRequestInfo,
ExcludeGroupRole: c.Config.GetExcludeGroupRole(),
ResourceCache: c.resCache,
JSONContext: ctx,
}
// check if the policy still applies to the resource
@ -148,6 +150,9 @@ func (c *Controller) applyGeneratePolicy(log logr.Logger, policyContext engine.P
policy := policyContext.Policy
resource := policyContext.NewResource
ctx := policyContext.Context
resCache := policyContext.ResourceCache
jsonContext := policyContext.JSONContext
// To manage existing resources, we compare the creation time for the default resource to be generated and policy creation time
ruleNameToProcessingTime := make(map[string]time.Duration)
@ -168,6 +173,13 @@ func (c *Controller) applyGeneratePolicy(log logr.Logger, policyContext engine.P
}()
}
}
// add configmap json data to context
if err := engine.AddResourceToContext(log, rule.Context, resCache, jsonContext); err != nil {
log.Info("cannot add configmaps to context", "reason", err.Error())
return nil, err
}
genResource, err := applyRule(log, c.client, rule, resource, ctx, policy.Name, gr, processExisting)
if err != nil {

View file

@ -113,6 +113,12 @@ func PolicyHasNonAllowedVariables(policy v1.ClusterPolicy) bool {
matchedVarsRegex := regexp.MustCompile(regexStr)
if len(allVarsRegex.FindAllStringSubmatch(string(policyRaw), -1)) > len(matchedVarsRegex.FindAllStringSubmatch(string(policyRaw), -1)) {
// If rules contains Context then skip this validation
for _, rule := range policy.Spec.Rules {
if len(rule.Context) > 0 {
return false
}
}
return true
}
return false

View file

@ -13,13 +13,14 @@ import (
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/resourcecache"
"github.com/nirmata/kyverno/pkg/utils"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// applyPolicy applies policy on a resource
//TODO: generation rules
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger, excludeGroupRole []string) (responses []response.EngineResponse) {
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger, excludeGroupRole []string, resCache resourcecache.ResourceCacheIface) (responses []response.EngineResponse) {
startTime := time.Now()
defer func() {
name := resource.GetKind() + "/" + resource.GetName()
@ -41,21 +42,21 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
logger.Error(err, "enable to add transform resource to ctx")
}
//MUTATION
engineResponseMutation, err = mutation(policy, resource, ctx, logger)
engineResponseMutation, err = mutation(policy, resource, ctx, logger, resCache, ctx)
if err != nil {
logger.Error(err, "failed to process mutation rule")
}
//VALIDATION
engineResponseValidation = engine.Validate(engine.PolicyContext{Policy: policy, Context: ctx, NewResource: resource, ExcludeGroupRole: excludeGroupRole})
engineResponseValidation = engine.Validate(engine.PolicyContext{Policy: policy, Context: ctx, NewResource: resource, ExcludeGroupRole: excludeGroupRole, ResourceCache: resCache, JSONContext: ctx})
engineResponses = append(engineResponses, mergeRuleRespose(engineResponseMutation, engineResponseValidation))
//TODO: GENERATION
return engineResponses
}
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, ctx context.EvalInterface, log logr.Logger) (response.EngineResponse, error) {
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, ctx context.EvalInterface, log logr.Logger, resCache resourcecache.ResourceCacheIface, jsonContext *context.Context) (response.EngineResponse, error) {
engineResponse := engine.Mutate(engine.PolicyContext{Policy: policy, NewResource: resource, Context: ctx})
engineResponse := engine.Mutate(engine.PolicyContext{Policy: policy, NewResource: resource, Context: ctx, ResourceCache: resCache, JSONContext: jsonContext})
if !engineResponse.IsSuccessful() {
log.V(4).Info("failed to apply mutation rules; reporting them")
return engineResponse, nil

View file

@ -20,6 +20,10 @@ func ContainsVariablesOtherThanObject(policy kyverno.ClusterPolicy) error {
if path := userInfoDefined(rule.ExcludeResources.UserInfo); path != "" {
return fmt.Errorf("invalid variable used at path: spec/rules[%d]/exclude/%s", idx, path)
}
// Skip Validation if rule contains Context
if len(rule.Context) > 0 {
return nil
}
filterVars := []string{"request.object"}
ctx := context.NewContext(filterVars...)

View file

@ -19,6 +19,7 @@ import (
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/resourcecache"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -99,11 +100,13 @@ type PolicyController struct {
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
// resourceWebhookWatcher queues the webhook creation request, creates the webhook
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
log logr.Logger
// resCache - controls creation and fetching of resource informer cache
resCache resourcecache.ResourceCacheIface
}
// NewPolicyController create a new PolicyController
@ -118,7 +121,9 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
pvGenerator policyviolation.GeneratorInterface,
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
namespaces informers.NamespaceInformer,
log logr.Logger) (*PolicyController, error) {
log logr.Logger,
resCache resourcecache.ResourceCacheIface,
) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
@ -139,6 +144,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
pvGenerator: pvGenerator,
resourceWebhookWatcher: resourceWebhookWatcher,
log: log,
resCache: resCache,
}
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}
@ -319,7 +325,7 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
logger.Info("starting")
defer logger.Info("shutting down")
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced, pc.nsListerSynced,pc.grListerSynced) {
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced, pc.nsListerSynced, pc.grListerSynced) {
logger.Info("failed to sync informer cache")
return
}
@ -400,7 +406,7 @@ func (pc *PolicyController) syncPolicy(key string) error {
if errors.IsNotFound(err) {
for _, v := range grList {
if key == v.Spec.Policy {
err := pc.kyvernoClient.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Delete(v.GetName(),&metav1.DeleteOptions{})
err := pc.kyvernoClient.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Delete(v.GetName(), &metav1.DeleteOptions{})
if err != nil {
logger.Error(err, "failed to delete gr")
}
@ -417,9 +423,9 @@ func (pc *PolicyController) syncPolicy(key string) error {
for _, v := range grList {
if policy.Name == v.Spec.Policy {
v.SetLabels(map[string]string{
"policy-update" :fmt.Sprintf("revision-count-%d",rand.Intn(100000)),
"policy-update": fmt.Sprintf("revision-count-%d", rand.Intn(100000)),
})
_,err := pc.kyvernoClient.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Update(v)
_, err := pc.kyvernoClient.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Update(v)
if err != nil {
logger.Error(err, "failed to update gr")
return err
@ -427,7 +433,6 @@ func (pc *PolicyController) syncPolicy(key string) error {
}
}
pc.resourceWebhookWatcher.RegisterResourceWebhook()
engineResponses := pc.processExistingResources(policy)

View file

@ -37,7 +37,7 @@ func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPoli
}
// apply the policy on each
engineResponse := applyPolicy(*policy, resource, logger, pc.configHandler.GetExcludeGroupRole())
engineResponse := applyPolicy(*policy, resource, logger, pc.configHandler.GetExcludeGroupRole(), pc.resCache)
// get engine response for mutation & validation independently
engineResponses = append(engineResponses, engineResponse...)
// post-processing, register the resource as processed

View file

@ -0,0 +1,53 @@
package resourcecache
import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
)
// GVRCacheIface - allows operation on a single resource
type GVRCacheIface interface {
StopInformer()
IsNamespaced() bool
GetLister() cache.GenericLister
GetNamespacedLister(namespace string) cache.GenericNamespaceLister
}
// GVRCache ...
type GVRCache struct {
// GVR Group Version Resource of a resource
GVR schema.GroupVersionResource
// Namespaced - identifies if a resource is namespaced or not
Namespaced bool
// stopCh - channel to stop the informer when needed
stopCh chan struct{}
// genericInformer - contains instance of informers.GenericInformer for a specific resource
// which in turn contains Listers() which gives access to cached resources.
genericInformer informers.GenericInformer
}
// NewGVRCache ...
func NewGVRCache(gvr schema.GroupVersionResource, namespaced bool, stopCh chan struct{}, genericInformer informers.GenericInformer) GVRCacheIface {
return &GVRCache{GVR: gvr, Namespaced: namespaced, stopCh: stopCh, genericInformer: genericInformer}
}
// StopInformer ...
func (gvrc *GVRCache) StopInformer() {
close(gvrc.stopCh)
}
// IsNamespaced ...
func (gvrc *GVRCache) IsNamespaced() bool {
return gvrc.Namespaced
}
// GetLister - get access to Lister() instance of a resource in GVRCache
func (gvrc *GVRCache) GetLister() cache.GenericLister {
return gvrc.genericInformer.Lister()
}
// GetNamespacedLister - get access to namespaced Lister() instance of a resource in GVRCache
func (gvrc *GVRCache) GetNamespacedLister(namespace string) cache.GenericNamespaceLister {
return gvrc.genericInformer.Lister().ByNamespace(namespace)
}

View file

@ -0,0 +1,45 @@
package resourcecache
import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/cache"
)
// TODO :- Implementation for mocking
type TestGVRCache struct {
}
func NewTestGVRCache() GVRCacheIface {
return &GVRCache{}
}
func (tg *TestGVRCache) StopInformer() {
}
func (tg *TestGVRCache) IsNamespaced() bool {
return true
}
func (tg *TestGVRCache) GetLister() cache.GenericLister {
return &TestLister{}
}
func (tg *TestGVRCache) GetNamespacedLister(namespace string) cache.GenericNamespaceLister {
return &TestLister{}
}
type TestLister struct {
}
func (tl *TestLister) List(selector labels.Selector) ([]runtime.Object, error) {
return []runtime.Object{}, nil
}
func (tl *TestLister) Get(name string) (runtime.Object, error) {
return nil, nil
}
func (tl *TestLister) ByNamespace(namespace string) cache.GenericNamespaceLister {
return &TestLister{}
}

86
pkg/resourcecache/main.go Normal file
View file

@ -0,0 +1,86 @@
package resourcecache
import (
// "fmt"
// "time"
"github.com/go-logr/logr"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/discovery"
"k8s.io/client-go/dynamic/dynamicinformer"
"k8s.io/client-go/rest"
)
// ResourceCacheIface - allows the creation, deletion and saving the resource informers as a cache
type ResourceCacheIface interface {
RunAllInformers(log logr.Logger)
CreateResourceInformer(log logr.Logger, resource string) (bool, error)
StopResourceInformer(log logr.Logger, resource string) bool
GetGVRCache(resource string) *GVRCache
}
// ResourceCache ...
type ResourceCache struct {
dinformer dynamicinformer.DynamicSharedInformerFactory
// match - matches the resources for which the informers needs to be created.
// if the matches contains any resource name then the informers are created for only that resource
// else informers are created for all the server supported resources
match []string
// excludes the creation of informers for a specific resources
// if a specific resource is available in both match and exclude then exclude overrides it
exclude []string
// GVRCacheData - stores the informers and lister object for a resource.
// it uses resource name as key (For ex :- namespaces for Namespace, pods for Pod, clusterpolicies for ClusterPolicy etc)
// GVRCache stores GVR (Group Version Resource) for the resource, Informer() instance and Lister() instance for that resource.
GVRCacheData map[string]*GVRCache
}
// NewResourceCache - initializes the ResourceCache where it initially stores the GVR and Namespaced codition for the allowed resources in GVRCacheData
func NewResourceCache(log logr.Logger, config *rest.Config, dclient *dclient.Client, match []string, exclude []string) (ResourceCacheIface, error) {
logger := log.WithName("resourcecache")
discoveryIface := dclient.GetDiscoveryCache()
cacheData := make(map[string]*GVRCache)
dInformer := dclient.NewDynamicSharedInformerFactory(0)
resCache := &ResourceCache{GVRCacheData: cacheData, dinformer: dInformer, match: match, exclude: exclude}
err := udateGVRCache(logger, resCache, discoveryIface)
if err != nil {
logger.Error(err, "error in udateGVRCache function")
return nil, err
}
return resCache, nil
}
func udateGVRCache(log logr.Logger, resc *ResourceCache, discoveryIface discovery.CachedDiscoveryInterface) error {
serverResources, err := discoveryIface.ServerPreferredResources()
if err != nil {
return err
}
for _, serverResource := range serverResources {
groupVersion := serverResource.GroupVersion
for _, resource := range serverResource.APIResources {
gv, err := schema.ParseGroupVersion(groupVersion)
if err != nil {
return err
}
mok := resc.matchGVRKey(resource.Name)
if !mok {
continue
}
eok := resc.excludeGVRKey(resource.Name)
if eok {
continue
}
_, ok := resc.GVRCacheData[resource.Name]
if !ok {
gvrc := &GVRCache{GVR: gv.WithResource(resource.Name), Namespaced: resource.Namespaced}
resc.GVRCacheData[resource.Name] = gvrc
}
}
}
return nil
}

View file

@ -0,0 +1,48 @@
package resourcecache
import (
"github.com/go-logr/logr"
)
// RunAllInformers - run the informers for the GVR of all the resources available in GVRCacheData
func (resc *ResourceCache) RunAllInformers(log logr.Logger) {
for key, _ := range resc.GVRCacheData {
resc.CreateResourceInformer(log, key)
log.V(4).Info("created informer for resource", "name", key)
}
}
// CreateResourceInformer - check the availability of the given resource in ResourceCache.
// if available then create an informer for that GVR and store that GenericInformer instance in the cache and start watching for that resource
func (resc *ResourceCache) CreateResourceInformer(log logr.Logger, resource string) (bool, error) {
res, ok := resc.GVRCacheData[resource]
if ok {
stopCh := make(chan struct{})
res.stopCh = stopCh
genInformer := resc.dinformer.ForResource(res.GVR)
res.genericInformer = genInformer
go startWatching(stopCh, genInformer.Informer())
}
return true, nil
}
// StopResourceInformer - delete the given resource information from ResourceCache and stop watching for the given resource
func (resc *ResourceCache) StopResourceInformer(log logr.Logger, resource string) bool {
res, ok := resc.GVRCacheData[resource]
if ok {
delete(resc.GVRCacheData, resource)
log.V(4).Info("deleted resource from gvr cache", "name", resource)
res.StopInformer()
log.V(4).Info("closed informer for resource", "name", resource)
}
return false
}
// GetGVRCache - get the GVRCache for a given resource if available
func (resc *ResourceCache) GetGVRCache(resource string) *GVRCache {
res, ok := resc.GVRCacheData[resource]
if ok {
return res
}
return nil
}

View file

@ -0,0 +1,29 @@
package resourcecache
func (resc *ResourceCache) matchGVRKey(key string) bool {
if len(resc.match) == 0 {
return true
}
ok := false
for _, mkey := range resc.match {
if key == mkey {
ok = true
break
}
}
return ok
}
func (resc *ResourceCache) excludeGVRKey(key string) bool {
if len(resc.exclude) == 0 {
return false
}
ok := true
for _, ekey := range resc.exclude {
if key == ekey {
ok = false
break
}
}
return ok
}

View file

@ -0,0 +1,9 @@
package resourcecache
import (
"k8s.io/client-go/tools/cache"
)
func startWatching(stopCh <-chan struct{}, s cache.SharedIndexInformer) {
s.Run(stopCh)
}

View file

@ -44,6 +44,8 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
AdmissionInfo: userRequestInfo,
Context: ctx,
ExcludeGroupRole: dynamicConfig.GetExcludeGroupRole(),
ResourceCache: ws.resCache,
JSONContext: ctx,
}
// engine.Generate returns a list of rules that are applicable on this resource
@ -65,12 +67,11 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
}
}
}
}else{
rules = append(rules,rule)
} else {
rules = append(rules, rule)
}
}
if len(rules) > 0 {
engineResponse.PolicyResponse.Rules = rules
// some generate rules do apply to the resource

View file

@ -43,6 +43,8 @@ func (ws *WebhookServer) HandleMutation(
AdmissionInfo: userRequestInfo,
Context: ctx,
ExcludeGroupRole: ws.configHandler.GetExcludeGroupRole(),
ResourceCache: ws.resCache,
JSONContext: ctx,
}
if request.Operation == v1beta1.Update {

View file

@ -38,6 +38,8 @@ import (
rbacinformer "k8s.io/client-go/informers/rbac/v1"
rbaclister "k8s.io/client-go/listers/rbac/v1"
"k8s.io/client-go/tools/cache"
"github.com/nirmata/kyverno/pkg/resourcecache"
)
// WebhookServer contains configured TLS server with MutationWebhook.
@ -111,6 +113,9 @@ type WebhookServer struct {
openAPIController *openapi.Controller
supportMutateValidate bool
// resCache - controls creation and fetching of resource informer cache
resCache resourcecache.ResourceCacheIface
}
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
@ -137,6 +142,7 @@ func NewWebhookServer(
cleanUp chan<- struct{},
log logr.Logger,
openAPIController *openapi.Controller,
resCache resourcecache.ResourceCacheIface,
) (*WebhookServer, error) {
if tlsPair == nil {
@ -178,6 +184,7 @@ func NewWebhookServer(
log: log,
openAPIController: openAPIController,
supportMutateValidate: supportMutateValidate,
resCache: resCache,
}
mux := httprouter.New()
@ -347,7 +354,7 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
ws.auditHandler.Add(request.DeepCopy())
// VALIDATION
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log, ws.configHandler)
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log, ws.configHandler, ws.resCache)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{
@ -473,7 +480,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
logger.Error(err, "failed to load service account in context")
}
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log, ws.configHandler)
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log, ws.configHandler, ws.resCache)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{

View file

@ -12,6 +12,7 @@ import (
"github.com/nirmata/kyverno/pkg/policycache"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/resourcecache"
"github.com/nirmata/kyverno/pkg/userinfo"
"github.com/pkg/errors"
"k8s.io/api/admission/v1beta1"
@ -52,6 +53,7 @@ type auditHandler struct {
log logr.Logger
configHandler config.Interface
resCache resourcecache.ResourceCacheIface
}
// NewValidateAuditHandler returns a new instance of audit policy handler
@ -62,7 +64,8 @@ func NewValidateAuditHandler(pCache policycache.Interface,
rbInformer rbacinformer.RoleBindingInformer,
crbInformer rbacinformer.ClusterRoleBindingInformer,
log logr.Logger,
dynamicConfig config.Interface) AuditHandler {
dynamicConfig config.Interface,
resCache resourcecache.ResourceCacheIface) AuditHandler {
return &auditHandler{
pCache: pCache,
@ -76,6 +79,7 @@ func NewValidateAuditHandler(pCache policycache.Interface,
crbSynced: crbInformer.Informer().HasSynced,
log: log,
configHandler: dynamicConfig,
resCache: resCache,
}
}
@ -167,7 +171,7 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
return errors.Wrap(err, "failed to load service account in context")
}
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.pvGenerator, logger, h.configHandler)
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.pvGenerator, logger, h.configHandler, h.resCache)
return nil
}

View file

@ -20,6 +20,8 @@ import (
v1beta1 "k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"github.com/nirmata/kyverno/pkg/resourcecache"
)
// HandleValidation handles validating webhook admission request
@ -35,7 +37,8 @@ func HandleValidation(
eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
log logr.Logger,
dynamicConfig config.Interface) (bool, string) {
dynamicConfig config.Interface,
resCache resourcecache.ResourceCacheIface) (bool, string) {
if len(policies) == 0 {
return true, ""
@ -73,6 +76,8 @@ func HandleValidation(
Context: ctx,
AdmissionInfo: userRequestInfo,
ExcludeGroupRole: dynamicConfig.GetExcludeGroupRole(),
ResourceCache: resCache,
JSONContext: ctx,
}
var engineResponses []response.EngineResponse