1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-07 00:17:13 +00:00
kyverno/pkg/controllers/report/utils/scanner.go
Charles-Edouard Brétéché e0ab72bb9a
feat: reports v2 implementation (#4608)
This PR refactors the reports generation code.
It removes RCR and CRCR crds and replaces them with AdmissionReport, ClusterAdmissionReport, BackgroundScanReport and ClusterBackgroundScanReport crds.

The new reports system is based on 4 controllers:

Admission reports controller is responsible for cleaning up admission reports and attaching admission reports to their corresponding resource in case of a creation
Background scan reports controller is responsible for creating background scan reports when a resource and/or policy changes
Aggregation controller takes care of aggregation per resource reports into higher level reports (per namespace)
Resources controller is responsible for watching reports that need background scan reports
I added two new flags to disable admission reports and/or background scan reports, the whole reporting system can be disabled if something goes wrong.

I also added a flag to split reports in chunks to avoid creating too large resources.

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

Signed-off-by: prateekpandey14 <prateek.pandey@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
Signed-off-by: prateekpandey14 <prateek.pandey@nirmata.com>
Co-authored-by: prateekpandey14 <prateek.pandey@nirmata.com>
2022-09-28 17:15:16 +05:30

70 lines
2 KiB
Go

package utils
import (
"github.com/go-logr/logr"
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/response"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type scanner struct {
logger logr.Logger
client dclient.Interface
excludeGroupRole []string
}
type ScanResult struct {
EngineResponse *response.EngineResponse
Error error
}
type Scanner interface {
ScanResource(unstructured.Unstructured, map[string]string, ...kyvernov1.PolicyInterface) map[kyvernov1.PolicyInterface]ScanResult
}
func NewScanner(logger logr.Logger, client dclient.Interface, excludeGroupRole ...string) Scanner {
return &scanner{
logger: logger,
client: client,
excludeGroupRole: excludeGroupRole,
}
}
func (s *scanner) ScanResource(resource unstructured.Unstructured, nsLabels map[string]string, policies ...kyvernov1.PolicyInterface) map[kyvernov1.PolicyInterface]ScanResult {
results := map[kyvernov1.PolicyInterface]ScanResult{}
for _, policy := range policies {
response, err := s.scan(resource, nsLabels, policy)
if err != nil {
s.logger.Error(err, "failed to scan resource")
}
results[policy] = ScanResult{response, err}
}
return results
}
func (s *scanner) scan(resource unstructured.Unstructured, nsLabels map[string]string, policy kyvernov1.PolicyInterface) (*response.EngineResponse, error) {
ctx := context.NewContext()
err := ctx.AddResource(resource.Object)
if err != nil {
return nil, err
}
err = ctx.AddNamespace(resource.GetNamespace())
if err != nil {
return nil, err
}
if err := ctx.AddImageInfos(&resource); err != nil {
return nil, err
}
policyCtx := &engine.PolicyContext{
Policy: policy,
NewResource: resource,
JSONContext: ctx,
Client: s.client,
NamespaceLabels: nsLabels,
ExcludeGroupRole: s.excludeGroupRole,
}
return engine.Validate(policyCtx), nil
}