mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-07 00:17:13 +00:00
* add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
175 lines
4.8 KiB
Go
175 lines
4.8 KiB
Go
package generate
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"time"
|
|
|
|
backoff "github.com/cenkalti/backoff"
|
|
"github.com/go-logr/logr"
|
|
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
|
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
|
"github.com/kyverno/kyverno/pkg/config"
|
|
"github.com/kyverno/kyverno/pkg/constant"
|
|
"k8s.io/api/admission/v1beta1"
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
|
"k8s.io/apimachinery/pkg/util/wait"
|
|
)
|
|
|
|
//GenerateRequests provides interface to manage generate requests
|
|
type GenerateRequests interface {
|
|
Apply(gr kyverno.GenerateRequestSpec, action v1beta1.Operation) error
|
|
}
|
|
|
|
type GeneratorChannel struct {
|
|
spec kyverno.GenerateRequestSpec
|
|
action v1beta1.Operation
|
|
}
|
|
|
|
// Generator defines the implmentation to mange generate request resource
|
|
type Generator struct {
|
|
// channel to receive request
|
|
ch chan GeneratorChannel
|
|
client *kyvernoclient.Clientset
|
|
stopCh <-chan struct{}
|
|
log logr.Logger
|
|
}
|
|
|
|
//NewGenerator returns a new instance of Generate-Request resource generator
|
|
func NewGenerator(client *kyvernoclient.Clientset, stopCh <-chan struct{}, log logr.Logger) *Generator {
|
|
gen := &Generator{
|
|
ch: make(chan GeneratorChannel, 1000),
|
|
client: client,
|
|
stopCh: stopCh,
|
|
log: log,
|
|
}
|
|
return gen
|
|
}
|
|
|
|
//Create to create generate request resoruce (blocking call if channel is full)
|
|
func (g *Generator) Apply(gr kyverno.GenerateRequestSpec, action v1beta1.Operation) error {
|
|
logger := g.log
|
|
logger.V(4).Info("creating Generate Request", "request", gr)
|
|
// Send to channel
|
|
message := GeneratorChannel{
|
|
action: action,
|
|
spec: gr,
|
|
}
|
|
select {
|
|
case g.ch <- message:
|
|
return nil
|
|
case <-g.stopCh:
|
|
logger.Info("shutting down channel")
|
|
return fmt.Errorf("shutting down gr create channel")
|
|
}
|
|
}
|
|
|
|
// Run starts the generate request spec
|
|
func (g *Generator) Run(workers int) {
|
|
logger := g.log
|
|
defer utilruntime.HandleCrash()
|
|
logger.V(4).Info("starting")
|
|
defer func() {
|
|
logger.V(4).Info("shutting down")
|
|
}()
|
|
for i := 0; i < workers; i++ {
|
|
go wait.Until(g.processApply, constant.GenerateControllerResync, g.stopCh)
|
|
}
|
|
<-g.stopCh
|
|
}
|
|
|
|
func (g *Generator) processApply() {
|
|
logger := g.log
|
|
for r := range g.ch {
|
|
logger.V(4).Info("recieved generate request", "request", r)
|
|
if err := g.generate(r.spec, r.action); err != nil {
|
|
logger.Error(err, "failed to generate request CR")
|
|
}
|
|
}
|
|
}
|
|
|
|
func (g *Generator) generate(grSpec kyverno.GenerateRequestSpec, action v1beta1.Operation) error {
|
|
// create/update a generate request
|
|
|
|
if err := retryApplyResource(g.client, grSpec, g.log, action); err != nil {
|
|
return err
|
|
}
|
|
return nil
|
|
}
|
|
|
|
// -> receiving channel to take requests to create request
|
|
// use worker pattern to read and create the CR resource
|
|
|
|
func retryApplyResource(client *kyvernoclient.Clientset,
|
|
grSpec kyverno.GenerateRequestSpec,
|
|
log logr.Logger,
|
|
action v1beta1.Operation,
|
|
) error {
|
|
var i int
|
|
var err error
|
|
|
|
applyResource := func() error {
|
|
gr := kyverno.GenerateRequest{
|
|
Spec: grSpec,
|
|
}
|
|
|
|
gr.SetNamespace(config.KubePolicyNamespace)
|
|
// Initial state "Pending"
|
|
// TODO: status is not updated
|
|
// gr.Status.State = kyverno.Pending
|
|
// generate requests created in kyverno namespace
|
|
isExist := false
|
|
if action == v1beta1.Create || action == v1beta1.Update {
|
|
grList, err := client.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).List(context.TODO(), metav1.ListOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
for i, v := range grList.Items {
|
|
if grSpec.Policy == v.Spec.Policy && grSpec.Resource.Name == v.Spec.Resource.Name && grSpec.Resource.Kind == v.Spec.Resource.Kind && grSpec.Resource.Namespace == v.Spec.Resource.Namespace {
|
|
gr.SetLabels(map[string]string{
|
|
"resources-update": "true",
|
|
})
|
|
|
|
v.Spec.Context = gr.Spec.Context
|
|
v.Spec.Policy = gr.Spec.Policy
|
|
v.Spec.Resource = gr.Spec.Resource
|
|
_, err = client.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Update(context.TODO(), &grList.Items[i], metav1.UpdateOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
isExist = true
|
|
}
|
|
}
|
|
if !isExist {
|
|
gr.SetGenerateName("gr-")
|
|
_, err = client.KyvernoV1().GenerateRequests(config.KubePolicyNamespace).Create(context.TODO(), &gr, metav1.CreateOptions{})
|
|
if err != nil {
|
|
return err
|
|
}
|
|
}
|
|
}
|
|
|
|
log.V(4).Info("retrying update generate request CR", "retryCount", i, "name", gr.GetGenerateName(), "namespace", gr.GetNamespace())
|
|
i++
|
|
return err
|
|
}
|
|
|
|
exbackoff := &backoff.ExponentialBackOff{
|
|
InitialInterval: 500 * time.Millisecond,
|
|
RandomizationFactor: 0.5,
|
|
Multiplier: 1.5,
|
|
MaxInterval: time.Second,
|
|
MaxElapsedTime: 3 * time.Second,
|
|
Clock: backoff.SystemClock,
|
|
}
|
|
|
|
exbackoff.Reset()
|
|
err = backoff.Retry(applyResource, exbackoff)
|
|
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|