1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-08 10:04:25 +00:00

refactor: add per resource reports aggregation ()

* refactor: add per resource reports aggregation

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* added controller implementation

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* clean

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix kuttl tests

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix kuttl tests

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* vaps

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

---------

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2023-09-20 08:51:32 +02:00 committed by GitHub
parent b4861015f0
commit 2444b7c670
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
50 changed files with 612 additions and 216 deletions

View file

@ -15,7 +15,7 @@ import (
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/config"
admissionreportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/admission"
aggregatereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/aggregate/namespace"
aggregatereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/aggregate/resource"
backgroundscancontroller "github.com/kyverno/kyverno/pkg/controllers/report/background"
resourcereportcontroller "github.com/kyverno/kyverno/pkg/controllers/report/resource"
engineapi "github.com/kyverno/kyverno/pkg/engine/api"

1
go.mod
View file

@ -196,6 +196,7 @@ require (
github.com/emirpasic/gods v1.18.1 // indirect
github.com/evanphx/json-patch v5.6.0+incompatible // indirect
github.com/felixge/httpsnoop v1.0.3 // indirect
github.com/frankban/quicktest v1.14.5 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/fxamacker/cbor/v2 v2.5.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect

6
go.sum
View file

@ -490,8 +490,8 @@ github.com/foxcpp/go-mockdns v1.0.0 h1:7jBqxd3WDWwi/6WhDvacvH1XsN3rOLXyHM1uhvIx6
github.com/foxcpp/go-mockdns v1.0.0/go.mod h1:lgRN6+KxQBawyIghpnl5CezHFGS9VLzvtVlwxvzXTQ4=
github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4=
github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20=
github.com/frankban/quicktest v1.14.4 h1:g2rn0vABPOOXmZUj+vbmUp0lPoXEMuhTpIluN0XL9UY=
github.com/frankban/quicktest v1.14.4/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/frankban/quicktest v1.14.5 h1:dfYrrRyLtiqT9GyKXgdh+k4inNeTvmGbuSgZ3lx3GhA=
github.com/frankban/quicktest v1.14.5/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
@ -1274,6 +1274,7 @@ github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU=
github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.0.0-20181023235946-059132a15dd0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
@ -1376,6 +1377,7 @@ github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.2/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=

View file

@ -223,7 +223,7 @@ func (c *controller) mergeBackgroundScanReports(ctx context.Context, namespace s
func (c *controller) reconcileReport(ctx context.Context, policyMap map[string]policyMapEntry, report kyvernov1alpha2.ReportInterface, namespace, name string, results ...policyreportv1alpha2.PolicyReportResult) (kyvernov1alpha2.ReportInterface, error) {
if report == nil {
report = reportutils.NewPolicyReport(namespace, name, results...)
report = reportutils.NewPolicyReport(namespace, name, nil, results...)
for _, result := range results {
policy := policyMap[result.Policy]
if policy.policy != nil {

View file

@ -0,0 +1,357 @@
package resource
import (
"context"
"time"
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
kyvernov1alpha2 "github.com/kyverno/kyverno/api/kyverno/v1alpha2"
policyreportv1alpha2 "github.com/kyverno/kyverno/api/policyreport/v1alpha2"
"github.com/kyverno/kyverno/pkg/autogen"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernov1informers "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernov1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/controllers"
"github.com/kyverno/kyverno/pkg/controllers/report/resource"
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
reportutils "github.com/kyverno/kyverno/pkg/utils/report"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
metadatainformers "k8s.io/client-go/metadata/metadatainformer"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// Workers is the number of workers for this controller
Workers = 10
ControllerName = "resource-aggregate-report-controller"
maxRetries = 10
enqueueDelay = 10 * time.Second
)
type controller struct {
// clients
client versioned.Interface
// listers
polLister kyvernov1listers.PolicyLister
cpolLister kyvernov1listers.ClusterPolicyLister
// queue
queue workqueue.RateLimitingInterface
// cache
metadataCache resource.MetadataCache
chunkSize int
}
type policyMapEntry struct {
policy kyvernov1.PolicyInterface
rules sets.Set[string]
}
func NewController(
client versioned.Interface,
metadataFactory metadatainformers.SharedInformerFactory,
polInformer kyvernov1informers.PolicyInformer,
cpolInformer kyvernov1informers.ClusterPolicyInformer,
metadataCache resource.MetadataCache,
chunkSize int,
) controllers.Controller {
admrInformer := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("admissionreports"))
cadmrInformer := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("clusteradmissionreports"))
bgscanrInformer := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("backgroundscanreports"))
cbgscanrInformer := metadataFactory.ForResource(kyvernov1alpha2.SchemeGroupVersion.WithResource("clusterbackgroundscanreports"))
polrInformer := metadataFactory.ForResource(policyreportv1alpha2.SchemeGroupVersion.WithResource("policyreports"))
cpolrInformer := metadataFactory.ForResource(policyreportv1alpha2.SchemeGroupVersion.WithResource("clusterpolicyreports"))
c := controller{
client: client,
polLister: polInformer.Lister(),
cpolLister: cpolInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName),
metadataCache: metadataCache,
chunkSize: chunkSize,
}
enqueueAll := func() {
if list, err := polrInformer.Lister().List(labels.Everything()); err == nil {
for _, item := range list {
c.queue.AddAfter(controllerutils.MetaObjectToName(item.(*metav1.PartialObjectMetadata)), enqueueDelay)
}
}
if list, err := cpolrInformer.Lister().List(labels.Everything()); err == nil {
for _, item := range list {
c.queue.AddAfter(controllerutils.MetaObjectToName(item.(*metav1.PartialObjectMetadata)), enqueueDelay)
}
}
}
if _, err := controllerutils.AddEventHandlersT(
polInformer.Informer(),
func(_ metav1.Object) { enqueueAll() },
func(_, _ metav1.Object) { enqueueAll() },
func(_ metav1.Object) { enqueueAll() },
); err != nil {
logger.Error(err, "failed to register event handlers")
}
if _, err := controllerutils.AddEventHandlersT(
cpolInformer.Informer(),
func(_ metav1.Object) { enqueueAll() },
func(_, _ metav1.Object) { enqueueAll() },
func(_ metav1.Object) { enqueueAll() },
); err != nil {
logger.Error(err, "failed to register event handlers")
}
if _, _, err := controllerutils.AddDelayedDefaultEventHandlers(logger, bgscanrInformer.Informer(), c.queue, enqueueDelay); err != nil {
logger.Error(err, "failed to register event handlers")
}
if _, _, err := controllerutils.AddDelayedDefaultEventHandlers(logger, cbgscanrInformer.Informer(), c.queue, enqueueDelay); err != nil {
logger.Error(err, "failed to register event handlers")
}
enqueueFromAdmr := func(obj metav1.Object) {
// no need to consider non aggregated reports
if controllerutils.HasLabel(obj, reportutils.LabelAggregatedReport) {
c.queue.AddAfter(controllerutils.MetaObjectToName(obj), enqueueDelay)
}
}
if _, err := controllerutils.AddEventHandlersT(
admrInformer.Informer(),
func(obj metav1.Object) { enqueueFromAdmr(obj) },
func(_, obj metav1.Object) { enqueueFromAdmr(obj) },
func(obj metav1.Object) { enqueueFromAdmr(obj) },
); err != nil {
logger.Error(err, "failed to register event handlers")
}
if _, err := controllerutils.AddEventHandlersT(
cadmrInformer.Informer(),
func(obj metav1.Object) { enqueueFromAdmr(obj) },
func(_, obj metav1.Object) { enqueueFromAdmr(obj) },
func(obj metav1.Object) { enqueueFromAdmr(obj) },
); err != nil {
logger.Error(err, "failed to register event handlers")
}
return &c
}
func (c *controller) Run(ctx context.Context, workers int) {
controllerutils.Run(ctx, logger, ControllerName, time.Second, c.queue, workers, maxRetries, c.reconcile)
}
func mergeReports(policyMap map[string]policyMapEntry, accumulator map[string]policyreportv1alpha2.PolicyReportResult, uid types.UID, reports ...kyvernov1alpha2.ReportInterface) {
for _, report := range reports {
if report != nil {
for _, result := range report.GetResults() {
currentPolicy := policyMap[result.Policy]
// TODO: vap map
if currentPolicy.rules != nil && currentPolicy.rules.Has(result.Rule) || result.Source == "ValidatingAdmissionPolicy" {
key := result.Source + "/" + result.Policy + "/" + result.Rule + "/" + string(uid)
if rule, exists := accumulator[key]; !exists {
accumulator[key] = result
} else if rule.Timestamp.Seconds < result.Timestamp.Seconds {
accumulator[key] = result
}
}
}
}
}
}
func (c *controller) createPolicyMap() (map[string]policyMapEntry, error) {
results := map[string]policyMapEntry{}
cpols, err := c.cpolLister.List(labels.Everything())
if err != nil {
return nil, err
}
for _, cpol := range cpols {
key, err := cache.MetaNamespaceKeyFunc(cpol)
if err != nil {
return nil, err
}
results[key] = policyMapEntry{
policy: cpol,
rules: sets.New[string](),
}
for _, rule := range autogen.ComputeRules(cpol) {
results[key].rules.Insert(rule.Name)
}
}
pols, err := c.polLister.List(labels.Everything())
if err != nil {
return nil, err
}
for _, pol := range pols {
key, err := cache.MetaNamespaceKeyFunc(pol)
if err != nil {
return nil, err
}
results[key] = policyMapEntry{
policy: pol,
rules: sets.New[string](),
}
for _, rule := range autogen.ComputeRules(pol) {
results[key].rules.Insert(rule.Name)
}
}
return results, nil
}
func (c *controller) getBackgroundScanReport(ctx context.Context, namespace, name string) (kyvernov1alpha2.ReportInterface, error) {
if namespace == "" {
report, err := c.client.KyvernoV1alpha2().ClusterBackgroundScanReports().Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return report, nil
} else {
report, err := c.client.KyvernoV1alpha2().BackgroundScanReports(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return report, nil
}
}
func (c *controller) getAdmissionReport(ctx context.Context, namespace, name string) (kyvernov1alpha2.ReportInterface, error) {
if namespace == "" {
report, err := c.client.KyvernoV1alpha2().ClusterAdmissionReports().Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return report, nil
} else {
report, err := c.client.KyvernoV1alpha2().AdmissionReports(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return report, nil
}
}
func (c *controller) getPolicyReport(ctx context.Context, namespace, name string) (kyvernov1alpha2.ReportInterface, error) {
if namespace == "" {
report, err := c.client.Wgpolicyk8sV1alpha2().ClusterPolicyReports().Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return report, nil
} else {
report, err := c.client.Wgpolicyk8sV1alpha2().PolicyReports(namespace).Get(ctx, name, metav1.GetOptions{})
if err != nil {
if apierrors.IsNotFound(err) {
return nil, nil
}
return nil, err
}
return report, nil
}
}
func (c *controller) getReports(ctx context.Context, namespace, name string) (kyvernov1alpha2.ReportInterface, kyvernov1alpha2.ReportInterface, error) {
admissionReport, err := c.getAdmissionReport(ctx, namespace, name)
if err != nil {
return nil, nil, err
}
backgroundReport, err := c.getBackgroundScanReport(ctx, namespace, name)
if err != nil {
return nil, nil, err
}
return admissionReport, backgroundReport, nil
}
func (c *controller) reconcile(ctx context.Context, logger logr.Logger, _, namespace, name string) error {
uid := types.UID(name)
resource, gvk, exists := c.metadataCache.GetResourceHash(uid)
if exists {
admissionReport, backgroundReport, err := c.getReports(ctx, namespace, name)
if err != nil {
return err
}
policyReport, err := c.getPolicyReport(ctx, namespace, name)
if err != nil {
return err
}
create := false
scope := &corev1.ObjectReference{
Kind: gvk.Kind,
Namespace: namespace,
Name: resource.Name,
UID: uid,
APIVersion: gvk.GroupVersion().String(),
}
if policyReport == nil {
create = true
policyReport = reportutils.NewPolicyReport(namespace, name, scope)
controllerutils.SetOwner(policyReport, gvk.GroupVersion().String(), gvk.Kind, resource.Name, uid)
}
// aggregate reports
policyMap, err := c.createPolicyMap()
if err != nil {
return err
}
merged := map[string]policyreportv1alpha2.PolicyReportResult{}
mergeReports(policyMap, merged, uid, policyReport, admissionReport, backgroundReport)
var results []policyreportv1alpha2.PolicyReportResult
for _, result := range merged {
results = append(results, result)
}
if len(results) == 0 {
if !create {
if err := reportutils.DeleteReport(ctx, policyReport, c.client); err != nil {
return err
}
}
} else {
reportutils.SetResults(policyReport, results...)
if create {
if _, err := reportutils.CreateReport(ctx, policyReport, c.client); err != nil {
return err
}
} else {
if _, err := reportutils.UpdateReport(ctx, policyReport, c.client); err != nil {
return err
}
}
}
if admissionReport != nil {
if err := reportutils.DeleteReport(ctx, admissionReport, c.client); err != nil {
return err
}
}
if backgroundReport != nil {
if err := reportutils.DeleteReport(ctx, backgroundReport, c.client); err != nil {
return err
}
}
} else {
policyReport, err := c.getPolicyReport(ctx, namespace, name)
if err != nil {
return err
}
if policyReport != nil {
if err := reportutils.DeleteReport(ctx, policyReport, c.client); err != nil {
return err
}
}
}
return nil
}

View file

@ -0,0 +1,5 @@
package resource
import "github.com/kyverno/kyverno/pkg/logging"
var logger = logging.ControllerLogger(ControllerName)

View file

@ -109,20 +109,12 @@ func NewController(
eventGen: eventGen,
policyReports: policyReports,
}
if vapInformer != nil {
c.vapLister = vapInformer.Lister()
if _, err := controllerutils.AddEventHandlersT(vapInformer.Informer(), c.addVAP, c.updateVAP, c.deleteVAP); err != nil {
logger.Error(err, "failed to register event handlers")
}
}
if _, _, err := controllerutils.AddDefaultEventHandlers(logger, bgscanr.Informer(), queue); err != nil {
logger.Error(err, "failed to register event handlers")
}
if _, _, err := controllerutils.AddDefaultEventHandlers(logger, cbgscanr.Informer(), queue); err != nil {
logger.Error(err, "failed to register event handlers")
}
if _, err := controllerutils.AddEventHandlersT(polInformer.Informer(), c.addPolicy, c.updatePolicy, c.deletePolicy); err != nil {
logger.Error(err, "failed to register event handlers")
}

View file

@ -135,6 +135,10 @@ func QueueAfter(queue workqueue.RateLimitingInterface, delay time.Duration) Enqu
}
}
func MetaObjectToName(obj metav1.Object) string {
return cache.MetaObjectToName(obj).String()
}
func MetaNamespaceKey(obj interface{}) (interface{}, error) {
return cache.MetaNamespaceKeyFunc(obj)
}

View file

@ -6,6 +6,7 @@ import (
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
admissionv1 "k8s.io/api/admission/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
@ -48,12 +49,16 @@ func NewBackgroundScanReport(namespace, name string, gvk schema.GroupVersionKind
return report
}
func NewPolicyReport(namespace, name string, results ...policyreportv1alpha2.PolicyReportResult) kyvernov1alpha2.ReportInterface {
func NewPolicyReport(namespace, name string, scope *corev1.ObjectReference, results ...policyreportv1alpha2.PolicyReportResult) kyvernov1alpha2.ReportInterface {
var report kyvernov1alpha2.ReportInterface
if namespace == "" {
report = &policyreportv1alpha2.ClusterPolicyReport{}
report = &policyreportv1alpha2.ClusterPolicyReport{
Scope: scope,
}
} else {
report = &policyreportv1alpha2.PolicyReport{}
report = &policyreportv1alpha2.PolicyReport{
Scope: scope,
}
}
report.SetName(name)
report.SetNamespace(namespace)

View file

@ -1,7 +1,7 @@
## Description
This test creates a policy, a policy exception and a configmap.
It makes sure the generated admission report contains a skipped result instead of a failed one.
It makes sure the generated policy report contains a skipped result instead of a failed one.
## Steps
@ -9,4 +9,4 @@ It makes sure the generated admission report contains a skipped result instead o
- Assert the policy becomes ready
1. - Create a policy exception for the cluster policy created above, configured to apply to configmap named `emergency`
1. - Try to create a confimap named `emergency`
1. - Assert that an admission report exists with a skipped result
1. - Assert that a policy report exists with a skipped result

View file

@ -1,24 +1,23 @@
apiVersion: kyverno.io/v1alpha2
kind: AdmissionReport
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1
kind: ConfigMap
name: emergency
spec:
results:
- policy: require-labels
resources:
- apiVersion: v1
kind: ConfigMap
name: emergency
result: skip
rule: require-team
scored: true
source: kyverno
summary:
error: 0
fail: 0
pass: 0
skip: 1
warn: 0
scope:
apiVersion: v1
kind: ConfigMap
name: emergency
results:
- policy: require-labels
result: skip
rule: require-team
scored: true
source: kyverno
summary:
error: 0
fail: 0
pass: 0
skip: 1
warn: 0

View file

@ -1,14 +1,17 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
name: cpol-test-audit-reports-namespacesselector
namespace: test-audit-reports-namespacesselector
results:
- policy: test-audit-reports-namespacesselector
resources:
ownerReferences:
- apiVersion: v1
kind: Pod
name: audit-pod
namespace: test-audit-reports-namespacesselector
scope:
apiVersion: v1
kind: Pod
name: audit-pod
namespace: test-audit-reports-namespacesselector
results:
- policy: test-audit-reports-namespacesselector
result: fail
rule: test-audit-reports-namespacesselector

View file

@ -1,5 +1,8 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
name: cpol-test-audit-reports-namespacesselector
namespace: test-non-nonaudit-reports-namespacesselector
namespace: test-non-audit-reports-namespacesselector
ownerReferences:
- apiVersion: v1
kind: Pod
name: non-audit-pod

View file

@ -1,14 +1,17 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: ClusterPolicyReport
metadata:
name: cpol-require-owner
results:
- message: validation rule 'check-owner' passed.
policy: require-owner
resources:
ownerReferences:
- apiVersion: v1
kind: Namespace
name: bar
scope:
apiVersion: v1
kind: Namespace
name: bar
results:
- message: validation rule 'check-owner' passed.
policy: require-owner
result: pass
rule: check-owner
scored: true

View file

@ -1,13 +1,13 @@
## Description
This test verifies that aggregated admission report is correctly updated when a resource changes.
This test verifies that policy report is correctly updated when a resource changes.
A policy in Audit mode is created.
A deployment is created, the deployment violates the policy and we assert the admission report contains a `fail` result.
The deployment is then updated to not violate the policy anymore and we assert the admission report changes to contain `pass` result.
A deployment is created, the deployment violates the policy and we assert the policy report contains a `fail` result.
The deployment is then updated to not violate the policy anymore and we assert the policy report changes to contain `pass` result.
## Expected result
When the resource does not violate the policy anymore, the result in the admission report should change from `fail` to `pass`.
When the resource does not violate the policy anymore, the result in the policy report should change from `fail` to `pass`.
## Related issue(s)

View file

@ -1,25 +1,24 @@
apiVersion: kyverno.io/v1alpha2
kind: AdmissionReport
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: test-dpl-1
spec:
results:
- message: 'validation error: Using a mutable image tag e.g. ''latest'' is not allowed.
rule autogen-validate-image-tag-pod failed at path /spec/template/spec/containers/0/image/'
policy: disallow-latest-tag
resources:
- apiVersion: apps/v1
kind: Deployment
name: test-dpl-1
result: fail
rule: autogen-validate-image-tag-pod
source: kyverno
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0
scope:
apiVersion: apps/v1
kind: Deployment
name: test-dpl-1
results:
- message: 'validation error: Using a mutable image tag e.g. ''latest'' is not allowed.
rule autogen-validate-image-tag-pod failed at path /spec/template/spec/containers/0/image/'
policy: disallow-latest-tag
result: fail
rule: autogen-validate-image-tag-pod
source: kyverno
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -1,24 +1,23 @@
apiVersion: kyverno.io/v1alpha2
kind: AdmissionReport
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: test-dpl-1
spec:
results:
- message: validation rule 'autogen-validate-image-tag-pod' passed.
policy: disallow-latest-tag
resources:
- apiVersion: apps/v1
kind: Deployment
name: test-dpl-1
result: pass
rule: autogen-validate-image-tag-pod
source: kyverno
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0
scope:
apiVersion: apps/v1
kind: Deployment
name: test-dpl-1
results:
- message: validation rule 'autogen-validate-image-tag-pod' passed.
policy: disallow-latest-tag
result: pass
rule: autogen-validate-image-tag-pod
source: kyverno
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0

View file

@ -1,4 +0,0 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- background-scan-report-assert.yaml

View file

@ -1,14 +0,0 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: badpod01
spec:
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -9,4 +9,4 @@ It makes sure the generated background scan report contains a skipped result ins
- Assert the policy becomes ready
1. - Create a policy exception for the cluster policy created above, configured to apply to configmap named `emergency`
1. - Try to create a confimap named `emergency`
1. - Assert that an background scan report exists with a skipped result
1. - Assert that a policy report exists with a skipped result

View file

@ -4,6 +4,7 @@ metadata:
name: require-labels
spec:
validationFailureAction: Enforce
admission: false
background: true
rules:
- name: require-team

View file

@ -1,20 +1,23 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1
kind: ConfigMap
name: emergency
spec:
results:
- policy: require-labels
result: skip
rule: require-team
scored: true
source: kyverno
summary:
error: 0
fail: 0
pass: 0
skip: 1
warn: 0
scope:
apiVersion: v1
kind: ConfigMap
name: emergency
results:
- policy: require-labels
result: skip
rule: require-team
scored: true
source: kyverno
summary:
error: 0
fail: 0
pass: 0
skip: 1
warn: 0

View file

@ -4,5 +4,3 @@ delete:
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
name: podsecurity-subrule-restricted
error:
- background-scan-report-error.yaml

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
error:
- report-error.yaml

View file

@ -8,6 +8,6 @@ When the policy is deleted, the background scan report should also be deleted.
1. - Create a cluster policy
- Assert the policy becomes ready
1. - Create a pod
1. - Assert a background scan report is created for the pod and contains the right summary
1. - Assert a policy report is created for the pod and contains the right summary
1. - Delete the policy
- Assert the background scan report is deleted for the pod
- Assert the policy report is deleted for the pod

View file

@ -3,6 +3,7 @@ kind: ClusterPolicy
metadata:
name: podsecurity-subrule-restricted
spec:
admission: false
background: true
rules:
- match:

View file

@ -0,0 +1,17 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: badpod01
scope:
apiVersion: v1
kind: Pod
name: badpod01
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -1,5 +1,5 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1

View file

@ -1,18 +1,25 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
name: cpol-podsecurity-subrule-restricted
namespace: default
ownerReferences:
- apiVersion: v1
kind: Pod
name: badpod01
scope:
apiVersion: v1
kind: Pod
name: badpod01
namespace: default
results:
- category: Pod Security
message: |
Validation rule 'restricted' failed. It violates PodSecurity "restricted:latest": ({Allowed:false ForbiddenReason:unrestricted capabilities ForbiddenDetail:container "container01" must set securityContext.capabilities.drop=["ALL"]})
policy: podsecurity-subrule-restricted
resources:
- apiVersion: v1
kind: Pod
name: badpod01
namespace: default
properties:
controls: capabilities_restricted
standard: restricted
version: latest
result: fail
rule: restricted
scored: true

View file

@ -1,4 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- bgscanr-assert.yaml
- report-assert.yaml

View file

@ -7,4 +7,4 @@ Note: the pods have to be created first because we don't want the policy to appl
## Expected Behavior
The pods are created and background scan reports are generated with a fail result.
The pods are created and policy reports are generated with a fail result.

View file

@ -1,29 +0,0 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: unsigned
spec:
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0
---
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: signed-by-someone-else
spec:
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -0,0 +1,35 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: unsigned
scope:
apiVersion: v1
kind: Pod
name: unsigned
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0
---
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: signed-by-someone-else
scope:
apiVersion: v1
kind: Pod
name: signed-by-someone-else
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -1,4 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- bgscanr-assert.yaml
- report-assert.yaml

View file

@ -7,4 +7,4 @@ Note: the pod has to be created first because we don't want the policy to apply
## Expected Behavior
The pod is created and a background scan report is generated for it with a pass result.
The pod is created and a policy report is generated for it with a pass result.

View file

@ -1,14 +0,0 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: signed
spec:
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0

View file

@ -0,0 +1,17 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: v1
kind: Pod
name: signed
scope:
apiVersion: v1
kind: Pod
name: signed
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0

View file

@ -5,4 +5,4 @@ It then creates a validating admission policy that checks the replicas of the de
## Expected Behavior
The deployment is created and a background scan report is generated for it with a fail result.
The deployment is created and a policy report is generated for it with a fail result.

View file

@ -1,14 +0,0 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
metadata:
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: deployment-fail
spec:
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -0,0 +1,13 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: deployment-fail
summary:
error: 0
fail: 1
pass: 0
skip: 0
warn: 0

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
assert:
- report-assert.yaml

View file

@ -5,4 +5,4 @@ It then creates a validating admission policy that checks the replicas of the de
## Expected Behavior
The deployment is created and a background scan report is generated for it with a pass result.
The deployment is created and a policy report is generated for it with a pass result.

View file

@ -1,14 +0,0 @@
apiVersion: kyverno.io/v1alpha2
kind: BackgroundScanReport
metadata:
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: deployment-pass
spec:
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0

View file

@ -0,0 +1,13 @@
apiVersion: wgpolicyk8s.io/v1alpha2
kind: PolicyReport
metadata:
ownerReferences:
- apiVersion: apps/v1
kind: Deployment
name: deployment-pass
summary:
error: 0
fail: 0
pass: 1
skip: 0
warn: 0