1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 02:18:15 +00:00

fix: background scan events (#5807)

* fix: background scan events

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* remove old code

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* remove old code

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix config

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* cleanup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* cleanup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* events

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-01-04 06:15:30 +01:00 committed by GitHub
parent 744d151468
commit 3c0b7856eb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 53 additions and 609 deletions

View file

@ -171,6 +171,7 @@ func createReportControllers(
configMapResolver resolvers.ConfigmapResolver,
backgroundScanInterval time.Duration,
configuration config.Configuration,
eventGenerator event.Interface,
) ([]internal.Controller, func(context.Context) error) {
var ctrls []internal.Controller
var warmups []func(context.Context) error
@ -227,6 +228,7 @@ func createReportControllers(
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
),
backgroundScanWorkers,
))
@ -343,6 +345,7 @@ func createrLeaderControllers(
configMapResolver,
backgroundScanInterval,
configuration,
eventGenerator,
)
return append(
[]internal.Controller{

View file

@ -18,6 +18,7 @@ import (
"github.com/kyverno/kyverno/pkg/controllers/report/utils"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/registryclient"
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
reportutils "github.com/kyverno/kyverno/pkg/utils/report"
@ -65,7 +66,9 @@ type controller struct {
informerCacheResolvers resolvers.ConfigmapResolver
forceDelay time.Duration
cfg config.Configuration
// config
config config.Configuration
eventGen event.Interface
}
func NewController(
@ -79,7 +82,8 @@ func NewController(
metadataCache resource.MetadataCache,
informerCacheResolvers resolvers.ConfigmapResolver,
forceDelay time.Duration,
cfg config.Configuration,
config config.Configuration,
eventGen event.Interface,
) controllers.Controller {
bgscanr := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("backgroundscanreports"))
cbgscanr := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("clusterbackgroundscanreports"))
@ -99,7 +103,8 @@ func NewController(
metadataCache: metadataCache,
informerCacheResolvers: informerCacheResolvers,
forceDelay: forceDelay,
cfg: cfg,
config: config,
eventGen: eventGen,
}
controllerutils.AddEventHandlersT(polInformer.Informer(), c.addPolicy, c.updatePolicy, c.deletePolicy)
controllerutils.AddEventHandlersT(cpolInformer.Informer(), c.addPolicy, c.updatePolicy, c.deletePolicy)
@ -230,15 +235,18 @@ func (c *controller) updateReport(ctx context.Context, meta metav1.Object, gvk s
} else {
annTime, err := time.Parse(time.RFC3339, metaAnnotations[annotationLastScanTime])
if err != nil {
logger.Error(err, "failed to parse last scan time", "namespace", resource.Namespace, "name", resource.Name)
logger.Error(err, "failed to parse last scan time annotation", "namespace", resource.Namespace, "name", resource.Name, "hash", resource.Hash)
force = true
} else {
force = time.Now().After(annTime.Add(c.forceDelay))
}
}
if force {
logger.Info("force bg scan report", "namespace", resource.Namespace, "name", resource.Name, "hash", resource.Hash)
}
// if the resource changed, we need to rebuild the report
if force || !reportutils.CompareHash(meta, resource.Hash) {
scanner := utils.NewScanner(logger, c.client, c.rclient, c.informerCacheResolvers, c.cfg)
scanner := utils.NewScanner(logger, c.client, c.rclient, c.informerCacheResolvers, c.config)
before, err := c.getReport(ctx, meta.GetNamespace(), meta.GetName())
if err != nil {
return nil
@ -266,6 +274,7 @@ func (c *controller) updateReport(ctx context.Context, meta metav1.Object, gvk s
logger.Error(result.Error, "failed to apply policy")
} else {
responses = append(responses, result.EngineResponse)
utils.GenerateEvents(logger, c.eventGen, c.config, result.EngineResponse)
}
}
controllerutils.SetAnnotation(report, annotationLastScanTime, time.Now().Format(time.RFC3339))
@ -328,7 +337,7 @@ func (c *controller) updateReport(ctx context.Context, meta metav1.Object, gvk s
}
// creations
if len(toCreate) > 0 {
scanner := utils.NewScanner(logger, c.client, c.rclient, c.informerCacheResolvers, c.cfg)
scanner := utils.NewScanner(logger, c.client, c.rclient, c.informerCacheResolvers, c.config)
resource, err := c.client.GetResource(ctx, gvk.GroupVersion().String(), gvk.Kind, resource.Namespace, resource.Name)
if err != nil {
return err
@ -348,6 +357,7 @@ func (c *controller) updateReport(ctx context.Context, meta metav1.Object, gvk s
} else {
reportutils.SetPolicyLabel(report, result.EngineResponse.Policy)
ruleResults = append(ruleResults, reportutils.EngineResponseToReportResults(result.EngineResponse)...)
utils.GenerateEvents(logger, c.eventGen, c.config, result.EngineResponse)
}
}
}

View file

@ -1,12 +1,23 @@
package policy
package utils
import (
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
)
func generateSuccessEvents(log logr.Logger, ers []*response.EngineResponse) (eventInfos []event.Info) {
func GenerateEvents(logger logr.Logger, eventGen event.Interface, config config.Configuration, results ...*response.EngineResponse) {
for _, result := range results {
eventInfos := generateFailEvents(logger, result)
if config.GetGenerateSuccessEvents() {
eventInfos = append(eventInfos, generateSuccessEvents(logger, result)...)
}
eventGen.Add(eventInfos...)
}
}
func generateSuccessEvents(log logr.Logger, ers ...*response.EngineResponse) (eventInfos []event.Info) {
for _, er := range ers {
logger := log.WithValues("policy", er.PolicyResponse.Policy, "kind", er.PolicyResponse.Resource.Kind, "namespace", er.PolicyResponse.Resource.Namespace, "name", er.PolicyResponse.Resource.Name)
if !er.IsFailed() {
@ -15,11 +26,10 @@ func generateSuccessEvents(log logr.Logger, ers []*response.EngineResponse) (eve
eventInfos = append(eventInfos, e)
}
}
return eventInfos
}
func generateFailEvents(log logr.Logger, ers []*response.EngineResponse) (eventInfos []event.Info) {
func generateFailEvents(log logr.Logger, ers ...*response.EngineResponse) (eventInfos []event.Info) {
for _, er := range ers {
eventInfos = append(eventInfos, generateFailEventsPerEr(log, er)...)
}
@ -28,23 +38,22 @@ func generateFailEvents(log logr.Logger, ers []*response.EngineResponse) (eventI
func generateFailEventsPerEr(log logr.Logger, er *response.EngineResponse) []event.Info {
var eventInfos []event.Info
logger := log.WithValues("policy", er.PolicyResponse.Policy.Name,
"kind", er.PolicyResponse.Resource.Kind, "namespace", er.PolicyResponse.Resource.Namespace,
"name", er.PolicyResponse.Resource.Name)
logger := log.WithValues(
"policy", er.PolicyResponse.Policy.Name,
"kind", er.PolicyResponse.Resource.Kind,
"namespace", er.PolicyResponse.Resource.Namespace,
"name", er.PolicyResponse.Resource.Name,
)
for i, rule := range er.PolicyResponse.Rules {
if rule.Status != response.RuleStatusPass && rule.Status != response.RuleStatusSkip {
eventResource := event.NewResourceViolationEvent(event.PolicyController, event.PolicyViolation, er, &er.PolicyResponse.Rules[i])
eventInfos = append(eventInfos, eventResource)
eventPolicy := event.NewPolicyFailEvent(event.PolicyController, event.PolicyViolation, er, &er.PolicyResponse.Rules[i], false)
eventInfos = append(eventInfos, eventPolicy)
}
}
if len(eventInfos) > 0 {
logger.V(4).Info("generating events for policy", "events", eventInfos)
}
return eventInfos
}

View file

@ -21,8 +21,8 @@ type scanner struct {
client dclient.Interface
rclient registryclient.Client
informerCacheResolvers resolvers.ConfigmapResolver
cfg config.Configuration
excludeGroupRole []string
config config.Configuration
}
type ScanResult struct {
@ -34,13 +34,20 @@ type Scanner interface {
ScanResource(context.Context, unstructured.Unstructured, map[string]string, ...kyvernov1.PolicyInterface) map[kyvernov1.PolicyInterface]ScanResult
}
func NewScanner(logger logr.Logger, client dclient.Interface, rclient registryclient.Client, informerCacheResolvers resolvers.ConfigmapResolver, cfg config.Configuration, excludeGroupRole ...string) Scanner {
func NewScanner(
logger logr.Logger,
client dclient.Interface,
rclient registryclient.Client,
informerCacheResolvers resolvers.ConfigmapResolver,
config config.Configuration,
excludeGroupRole ...string,
) Scanner {
return &scanner{
logger: logger,
client: client,
rclient: rclient,
informerCacheResolvers: informerCacheResolvers,
cfg: cfg,
config: config,
excludeGroupRole: excludeGroupRole,
}
}
@ -80,7 +87,7 @@ func (s *scanner) validateResource(ctx context.Context, resource unstructured.Un
if err := enginectx.AddNamespace(resource.GetNamespace()); err != nil {
return nil, err
}
if err := enginectx.AddImageInfos(&resource, s.cfg); err != nil {
if err := enginectx.AddImageInfos(&resource, s.config); err != nil {
return nil, err
}
if err := enginectx.AddOperation("CREATE"); err != nil {
@ -93,7 +100,7 @@ func (s *scanner) validateResource(ctx context.Context, resource unstructured.Un
WithNamespaceLabels(nsLabels).
WithExcludeGroupRole(s.excludeGroupRole...).
WithInformerCacheResolver(s.informerCacheResolvers)
return engine.Validate(ctx, s.rclient, policyCtx, s.cfg), nil
return engine.Validate(ctx, s.rclient, policyCtx, s.config), nil
}
func (s *scanner) validateImages(ctx context.Context, resource unstructured.Unstructured, nsLabels map[string]string, policy kyvernov1.PolicyInterface) (*response.EngineResponse, error) {
@ -104,7 +111,7 @@ func (s *scanner) validateImages(ctx context.Context, resource unstructured.Unst
if err := enginectx.AddNamespace(resource.GetNamespace()); err != nil {
return nil, err
}
if err := enginectx.AddImageInfos(&resource, s.cfg); err != nil {
if err := enginectx.AddImageInfos(&resource, s.config); err != nil {
return nil, err
}
if err := enginectx.AddOperation("CREATE"); err != nil {
@ -117,7 +124,7 @@ func (s *scanner) validateImages(ctx context.Context, resource unstructured.Unst
WithNamespaceLabels(nsLabels).
WithExcludeGroupRole(s.excludeGroupRole...).
WithInformerCacheResolver(s.informerCacheResolvers)
response, _ := engine.VerifyAndPatchImages(ctx, s.rclient, policyCtx, s.cfg)
response, _ := engine.VerifyAndPatchImages(ctx, s.rclient, policyCtx, s.config)
if len(response.PolicyResponse.Rules) > 0 {
s.logger.Info("validateImages", "policy", policy, "response", response)
}

View file

@ -1,176 +0,0 @@
package policy
import (
"context"
"fmt"
"reflect"
"strings"
"time"
jsonpatch "github.com/evanphx/json-patch/v5"
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
enginecontext "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/context/resolvers"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/registryclient"
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// applyPolicy applies policy on a resource
func applyPolicy(
policy kyvernov1.PolicyInterface,
resource unstructured.Unstructured,
logger logr.Logger,
excludeGroupRole []string,
client dclient.Interface,
rclient registryclient.Client,
informerCacheResolvers resolvers.ConfigmapResolver,
namespaceLabels map[string]string,
cfg config.Configuration,
) (responses []*response.EngineResponse) {
startTime := time.Now()
defer func() {
name := resource.GetKind() + "/" + resource.GetName()
ns := resource.GetNamespace()
if ns != "" {
name = ns + "/" + name
}
logger.V(3).Info("applyPolicy", "resource", name, "processingTime", time.Since(startTime).String())
}()
var engineResponses []*response.EngineResponse
var engineResponseMutation, engineResponseValidation *response.EngineResponse
var err error
ctx := enginecontext.NewContext()
data, err := resource.MarshalJSON()
if err != nil {
logging.Error(err, "failed to marshal resource")
}
err = enginecontext.AddResource(ctx, data)
if err != nil {
logger.Error(err, "failed to add transform resource to ctx")
}
err = ctx.AddNamespace(resource.GetNamespace())
if err != nil {
logger.Error(err, "failed to add namespace to ctx")
}
if err := ctx.AddImageInfos(&resource, cfg); err != nil {
logger.Error(err, "unable to add image info to variables context")
}
if err := ctx.AddOperation("CREATE"); err != nil {
logger.Error(err, "unable to set operation in context")
}
engineResponseMutation, err = mutation(policy, resource, logger, ctx, rclient, informerCacheResolvers, namespaceLabels)
if err != nil {
logger.Error(err, "failed to process mutation rule")
}
policyCtx := engine.NewPolicyContextWithJsonContext(ctx).
WithPolicy(policy).
WithNewResource(resource).
WithNamespaceLabels(namespaceLabels).
WithClient(client).
WithExcludeGroupRole(excludeGroupRole...).
WithInformerCacheResolver(informerCacheResolvers)
engineResponseValidation = engine.Validate(context.TODO(), rclient, policyCtx, cfg)
engineResponses = append(engineResponses, mergeRuleRespose(engineResponseMutation, engineResponseValidation))
return engineResponses
}
func mutation(
policy kyvernov1.PolicyInterface,
resource unstructured.Unstructured,
log logr.Logger,
jsonContext enginecontext.Interface,
rclient registryclient.Client,
informerCacheResolvers resolvers.ConfigmapResolver,
namespaceLabels map[string]string,
) (*response.EngineResponse, error) {
policyContext := engine.NewPolicyContextWithJsonContext(jsonContext).
WithPolicy(policy).
WithNamespaceLabels(namespaceLabels).
WithNewResource(resource).
WithInformerCacheResolver(informerCacheResolvers)
engineResponse := engine.Mutate(context.TODO(), rclient, policyContext)
if !engineResponse.IsSuccessful() {
log.V(4).Info("failed to apply mutation rules; reporting them")
return engineResponse, nil
}
// Verify if the JSON patches returned by the Mutate are already applied to the resource
if reflect.DeepEqual(resource, engineResponse.PatchedResource) {
// resources matches
log.V(4).Info("resource already satisfies the policy")
return engineResponse, nil
}
return getFailedOverallRuleInfo(resource, engineResponse, log)
}
// getFailedOverallRuleInfo gets detailed info for over-all mutation failure
func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse *response.EngineResponse, log logr.Logger) (*response.EngineResponse, error) {
rawResource, err := resource.MarshalJSON()
if err != nil {
log.Error(err, "failed to marshall resource")
return &response.EngineResponse{}, err
}
// resource does not match so there was a mutation rule violated
for index, rule := range engineResponse.PolicyResponse.Rules {
log.V(4).Info("verifying if policy rule was applied before", "rule", rule.Name)
patches := rule.Patches
patch, err := jsonpatch.DecodePatch(jsonutils.JoinPatches(patches...))
if err != nil {
log.Error(err, "failed to decode JSON patch", "patches", patches)
return &response.EngineResponse{}, err
}
// apply the patches returned by mutate to the original resource
patchedResource, err := patch.Apply(rawResource)
if err != nil {
log.Error(err, "failed to apply JSON patch", "patches", patches)
return &response.EngineResponse{}, err
}
if !jsonpatch.Equal(patchedResource, rawResource) {
log.V(4).Info("policy rule conditions not satisfied by resource", "rule", rule.Name)
engineResponse.PolicyResponse.Rules[index].Status = response.RuleStatusFail
engineResponse.PolicyResponse.Rules[index].Message = fmt.Sprintf("mutation json patches not found at resource path %s", extractPatchPath(patches, log))
}
}
return engineResponse, nil
}
func extractPatchPath(patches [][]byte, log logr.Logger) string {
var resultPath []string
// extract the patch path and value
for _, patch := range patches {
if data, err := jsonutils.UnmarshalPatchOperation(patch); err != nil {
log.Error(err, "failed to decode the generate patch", "patch", string(patch))
continue
} else {
resultPath = append(resultPath, data.Path)
}
}
return strings.Join(resultPath, ";")
}
func mergeRuleRespose(mutation, validation *response.EngineResponse) *response.EngineResponse {
mutation.PolicyResponse.Rules = append(mutation.PolicyResponse.Rules, validation.PolicyResponse.Rules...)
return mutation
}

View file

@ -1,124 +0,0 @@
package policy
import (
"reflect"
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/utils/wildcard"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
)
// excludeAutoGenResources filter out the pods / jobs with ownerReference
func excludeAutoGenResources(policy kyvernov1.PolicyInterface, resourceMap map[string]unstructured.Unstructured, log logr.Logger) {
for uid, r := range resourceMap {
if engine.ManagedPodResource(policy, r) {
log.V(4).Info("exclude resource", "namespace", r.GetNamespace(), "kind", r.GetKind(), "name", r.GetName())
delete(resourceMap, uid)
}
}
}
// ExcludeResources ...
func excludeResources(included map[string]unstructured.Unstructured, exclude kyvernov1.ResourceDescription, configHandler config.Configuration, log logr.Logger) {
if reflect.DeepEqual(exclude, (kyvernov1.ResourceDescription{})) {
return
}
excludeName := func(name string) Condition {
if exclude.Name == "" {
return NotEvaluate
}
if wildcard.Match(exclude.Name, name) {
return Skip
}
return Process
}
excludeNamespace := func(namespace string) Condition {
if len(exclude.Namespaces) == 0 {
return NotEvaluate
}
if wildcard.CheckPatterns(exclude.Namespaces, namespace) {
return Skip
}
return Process
}
excludeSelector := func(labelsMap map[string]string) Condition {
if exclude.Selector == nil {
return NotEvaluate
}
selector, err := metav1.LabelSelectorAsSelector(exclude.Selector)
// if the label selector is incorrect, should be fail or
if err != nil {
log.Error(err, "failed to build label selector")
return Skip
}
if selector.Matches(labels.Set(labelsMap)) {
return Skip
}
return Process
}
findKind := func(kind string, kinds []string) bool {
for _, k := range kinds {
if k == kind {
return true
}
}
return false
}
excludeKind := func(kind string) Condition {
if len(exclude.Kinds) == 0 {
return NotEvaluate
}
if findKind(kind, exclude.Kinds) {
return Skip
}
return Process
}
// check exclude condition for each resource
for uid, resource := range included {
// 0 -> don't check
// 1 -> is not to be exclude
// 2 -> to be exclude
excludeEval := []Condition{}
if ret := excludeName(resource.GetName()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
if ret := excludeNamespace(resource.GetNamespace()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
if ret := excludeSelector(resource.GetLabels()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
if ret := excludeKind(resource.GetKind()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
// exclude the filtered resources
if configHandler.ToFilter(resource.GetKind(), resource.GetNamespace(), resource.GetName()) {
delete(included, uid)
continue
}
func() {
for _, ret := range excludeEval {
if ret == Process {
// Process the resources
continue
}
// Skip the resource from processing
delete(included, uid)
}
}()
}
}

View file

@ -27,12 +27,8 @@ import (
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/metrics"
policyExecutionDuration "github.com/kyverno/kyverno/pkg/metrics/policyexecutionduration"
policyResults "github.com/kyverno/kyverno/pkg/metrics/policyresults"
"github.com/kyverno/kyverno/pkg/registryclient"
engineutils "github.com/kyverno/kyverno/pkg/utils/engine"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
"github.com/kyverno/kyverno/pkg/utils/wildcard"
"github.com/pkg/errors"
"go.uber.org/multierr"
"golang.org/x/exp/slices"
@ -92,9 +88,6 @@ type PolicyController struct {
informersSynced []cache.InformerSynced
// Resource manager, manages the mapping for already processed resource
rm ResourceManager
// helpers to validate against current loaded configuration
configHandler config.Configuration
@ -149,9 +142,6 @@ func NewPolicyController(
pc.urLister = urInformer.Lister()
pc.informersSynced = []cache.InformerSynced{pInformer.Informer().HasSynced, npInformer.Informer().HasSynced, urInformer.Informer().HasSynced, namespaces.Informer().HasSynced}
// resource manager
// rebuild after 300 seconds/ 5 mins
pc.rm = NewResourceManager(30)
return &pc, nil
}
@ -382,7 +372,6 @@ func (pc *PolicyController) syncPolicy(key string) error {
logger.Error(err, "failed to updateUR on Policy update")
}
}
pc.processExistingResources(policy)
return nil
}
@ -399,86 +388,6 @@ func (pc *PolicyController) getPolicy(key string) (kyvernov1.PolicyInterface, er
}
}
func (pc *PolicyController) processExistingResources(policy kyvernov1.PolicyInterface) {
logger := pc.log.WithValues("policy", policy.GetName())
logger.V(4).Info("applying policy to existing resources")
// Parse through all the resources drops the cache after configured rebuild time
pc.rm.Drop()
for _, rule := range autogen.ComputeRules(policy) {
if !rule.HasValidate() && !rule.HasVerifyImages() {
continue
}
matchKinds := rule.MatchResources.GetKinds()
pc.processExistingKinds(matchKinds, policy, rule, logger)
}
}
func (pc *PolicyController) applyAndReportPerNamespace(policy kyvernov1.PolicyInterface, kind string, ns string, rule kyvernov1.Rule, logger logr.Logger, metricAlreadyRegistered *bool) {
rMap := pc.getResourcesPerNamespace(kind, ns, rule, logger)
excludeAutoGenResources(policy, rMap, logger)
if len(rMap) == 0 {
return
}
var engineResponses []*response.EngineResponse
for _, resource := range rMap {
responses := pc.applyPolicy(policy, resource, logger)
engineResponses = append(engineResponses, responses...)
}
if !*metricAlreadyRegistered && len(engineResponses) > 0 {
for _, engineResponse := range engineResponses {
// registering the kyverno_policy_results_total metric concurrently
go pc.registerPolicyResultsMetricValidation(logger, policy, *engineResponse)
// registering the kyverno_policy_execution_duration_seconds metric concurrently
go pc.registerPolicyExecutionDurationMetricValidate(logger, policy, *engineResponse)
}
*metricAlreadyRegistered = true
}
pc.report(engineResponses, logger)
}
func (pc *PolicyController) registerPolicyResultsMetricValidation(logger logr.Logger, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
if err := policyResults.ProcessEngineResponse(context.TODO(), pc.metricsConfig, policy, engineResponse, metrics.BackgroundScan, metrics.ResourceCreated); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_results_total metrics for the above policy", "name", policy.GetName())
}
}
func (pc *PolicyController) registerPolicyExecutionDurationMetricValidate(logger logr.Logger, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
if err := policyExecutionDuration.ProcessEngineResponse(context.TODO(), pc.metricsConfig, policy, engineResponse, metrics.BackgroundScan, metrics.ResourceCreated); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_execution_duration_seconds metrics for the above policy", "name", policy.GetName())
}
}
func (pc *PolicyController) applyPolicy(policy kyvernov1.PolicyInterface, resource unstructured.Unstructured, logger logr.Logger) (engineResponses []*response.EngineResponse) {
// pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.GetName(), policy.GetResourceVersion(), resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
logger.V(4).Info("policy and resource already processed", "policyResourceVersion", policy.GetResourceVersion(), "resourceResourceVersion", resource.GetResourceVersion(), "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
}
namespaceLabels := engineutils.GetNamespaceSelectorsFromNamespaceLister(resource.GetKind(), resource.GetNamespace(), pc.nsLister, logger)
engineResponse := applyPolicy(policy, resource, logger, pc.configHandler.GetExcludeGroupRole(), pc.client, pc.rclient, pc.informerCacheResolvers, namespaceLabels, pc.configHandler)
engineResponses = append(engineResponses, engineResponse...)
// post-processing, register the resource as processed
pc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
return
}
func (pc *PolicyController) report(engineResponses []*response.EngineResponse, logger logr.Logger) {
eventInfos := generateFailEvents(logger, engineResponses)
pc.eventGen.Add(eventInfos...)
if pc.configHandler.GetGenerateSuccessEvents() {
successEventInfos := generateSuccessEvents(logger, engineResponses)
pc.eventGen.Add(successEventInfos...)
}
}
// forceReconciliation forces a background scan by adding all policies to the workqueue
func (pc *PolicyController) forceReconciliation(ctx context.Context) {
logger := pc.log.WithName("forceReconciliation")
@ -644,95 +553,6 @@ func (pc *PolicyController) listGenerateURs(policyKey string, trigger *unstructu
return generateURs
}
func (pc *PolicyController) getResourceList(kind, namespace string, labelSelector *metav1.LabelSelector, log logr.Logger) *unstructured.UnstructuredList {
gv, k := kubeutils.GetKindFromGVK(kind)
resourceList, err := pc.client.ListResource(context.TODO(), gv, k, namespace, labelSelector)
if err != nil {
log.Error(err, "failed to list resources", "kind", k, "namespace", namespace)
return nil
}
return resourceList
}
// GetResourcesPerNamespace returns
// - Namespaced resources across all namespaces if namespace is set to empty "", for Namespaced Kind
// - Namespaced resources in the given namespace
// - Cluster-wide resources for Cluster-wide Kind
func (pc *PolicyController) getResourcesPerNamespace(kind string, namespace string, rule kyvernov1.Rule, log logr.Logger) map[string]unstructured.Unstructured {
resourceMap := map[string]unstructured.Unstructured{}
if kind == "Namespace" {
namespace = ""
}
list := pc.getResourceList(kind, namespace, rule.MatchResources.Selector, log)
if list != nil {
for _, r := range list.Items {
if pc.match(r, rule) {
resourceMap[string(r.GetUID())] = r
}
}
}
// skip resources to be filtered
excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, pc.configHandler, log)
return resourceMap
}
func (pc *PolicyController) match(r unstructured.Unstructured, rule kyvernov1.Rule) bool {
if r.GetDeletionTimestamp() != nil {
return false
}
if r.GetKind() == "Pod" {
if !isRunningPod(r) {
return false
}
}
// match name
if rule.MatchResources.Name != "" {
if !wildcard.Match(rule.MatchResources.Name, r.GetName()) {
return false
}
}
// Skip the filtered resources
if pc.configHandler.ToFilter(r.GetKind(), r.GetNamespace(), r.GetName()) {
return false
}
return true
}
func (pc *PolicyController) processExistingKinds(kinds []string, policy kyvernov1.PolicyInterface, rule kyvernov1.Rule, logger logr.Logger) {
for _, kind := range kinds {
logger = logger.WithValues("rule", rule.Name, "kind", kind)
_, err := pc.rm.GetScope(kind)
if err != nil {
gv, k := kubeutils.GetKindFromGVK(kind)
if !strings.Contains(k, "*") {
resourceSchema, _, _, err := pc.client.Discovery().FindResource(gv, k)
if err != nil {
logger.Error(err, "failed to find resource", "kind", k)
continue
}
pc.rm.RegisterScope(k, resourceSchema.Namespaced)
}
}
// this tracker would help to ensure that even for multiple namespaces, duplicate metric are not generated
metricRegisteredTracker := false
if policy.GetNamespace() != "" {
ns := policy.GetNamespace()
pc.applyAndReportPerNamespace(policy, kind, ns, rule, logger.WithValues("kind", kind).WithValues("ns", ns), &metricRegisteredTracker)
continue
}
pc.applyAndReportPerNamespace(policy, kind, "", rule, logger.WithValues("kind", kind), &metricRegisteredTracker)
}
}
func generateTriggers(client dclient.Interface, rule kyvernov1.Rule, log logr.Logger) []*unstructured.Unstructured {
list := &unstructured.UnstructuredList{}

View file

@ -1,95 +0,0 @@
package policy
import (
"errors"
"sync"
"time"
)
type ResourceManager interface {
ProcessResource(policy, pv, kind, ns, name, rv string) bool
// TODO removeResource(kind, ns, name string) error
RegisterResource(policy, pv, kind, ns, name, rv string)
RegisterScope(kind string, namespaced bool)
GetScope(kind string) (bool, error)
Drop()
}
// resourceManager stores the details on already processed resources for caching
type resourceManager struct {
// we drop and re-build the cache
// based on the memory consumer of by the map
scope map[string]bool
data map[string]interface{}
mux sync.RWMutex
time time.Time
rebuildTime int64 // after how many seconds should we rebuild the cache
}
// resourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) ResourceManager {
return &resourceManager{
scope: make(map[string]bool),
data: make(map[string]interface{}),
time: time.Now(),
rebuildTime: rebuildTime,
}
}
// Drop drop the cache after every rebuild interval mins
func (rm *resourceManager) Drop() {
timeSince := time.Since(rm.time)
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.data = map[string]interface{}{}
rm.time = time.Now()
}
}
var empty struct{}
// RegisterResource stores if the policy is processed on this resource version
func (rm *resourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
rm.mux.Lock()
defer rm.mux.Unlock()
// add the resource
key := buildKey(policy, pv, kind, ns, name, rv)
rm.data[key] = empty
}
// ProcessResource returns true if the policy was not applied on the resource
func (rm *resourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
rm.mux.RLock()
defer rm.mux.RUnlock()
key := buildKey(policy, pv, kind, ns, name, rv)
_, ok := rm.data[key]
return !ok
}
// RegisterScope stores the scope of the given kind
func (rm *resourceManager) RegisterScope(kind string, namespaced bool) {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.scope[kind] = namespaced
}
// GetScope gets the scope of the given kind
// return error if kind is not registered
func (rm *resourceManager) GetScope(kind string) (bool, error) {
rm.mux.RLock()
defer rm.mux.RUnlock()
namespaced, ok := rm.scope[kind]
if !ok {
return false, errors.New("NotFound")
}
return namespaced, nil
}
func buildKey(policy, pv, kind, ns, name, rv string) string {
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
}

View file

@ -5,16 +5,6 @@ import (
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func isRunningPod(obj unstructured.Unstructured) bool {
objMap := obj.UnstructuredContent()
phase, ok, err := unstructured.NestedString(objMap, "status", "phase")
if !ok || err != nil {
return false
}
return phase == "Running"
}
// check if all slice elements are same
func isMatchResourcesAllValid(rule kyvernov1.Rule) bool {
var kindlist []string