1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00
kyverno/cmd/initContainer/main.go
shuting 5e07ecc5f3
Add Policy Report (#1229)
* add report in cli

* policy report crd added

* policy report added

* configmap added

* added jobs

* added jobs

* bug fixed

* added logic for cli

* common function added

* sub command added for policy report

* subcommand added for report

* common package changed

* configmap added

* added logic for kyverno cli

* added logic for jobs

* added logic for jobs

* added logic for jobs

* added logic for cli

* buf fix

* cli changes

* count bug fix

* docs added for command

* go fmt

* refactor codebase

* remove policy controller for policyreport

* policy report removed

* bug fixes

* bug fixes

* added job trigger if needed

* job deletation logic added

* build failed fix

* fixed e2e test

* remove hard coded variables

* packages adde

* improvment added in jobs sheduler

* policy report yaml added

* cronjob added

* small fixes

* remove background sync

* documentation added for report command

* remove extra log

* small improvement

* tested policy report

* revert hardcoded changes

* changes for demo

* demo changes

* resource aggrigation added

* More changes

* More changes

* - resolve PR comments; - refactor jobs controller

* set rbac for jobs

* add clean up in job controller

* add short names

* remove application scope for policyreport

* move job controller to policyreport

* add report logic in command apply

* - update policy report types;  - upgrade k8s library; - update code gen

* temporarily comment out code to pass CI build

* generate / update policyreport to cluster

* add unit test for CLI report

* add test for apply - generate policy report

* fix unit test

* - remove job controller; - remove in-memory configmap; - clean up kustomize manifest

* remove dependency

* add reportRequest / clusterReportRequest

* clean up policy report

* generate report request

* update crd clusterReportRequest

* - update json tag of report summary; - update definition manifests; -  fix dclient creation

* aggregate reportRequest into policy report

* fix unit tests

* - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report

* remove

* generate reportRequest in kyverno namespace

* update resource filter in helm chart

* - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest

* generate policy report in background scan

* skip generating report change request if there's entry results

* fix results entry removal when policy / rule gets deleted

* rename apiversion from policy.kubernetes.io to policy.k8s.io

* update summary.* to lower case

* move reportChangeRequest to kyverno.io/v1alpha1

* remove policy report flag

* fix report update

* clean up policy violation CRD

* remove violation CRD from manifest

* clean up policy violation code - remove pvGenerator

* change severity fields to lower case

* update import library

* set report category

Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com>
Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com>
Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00

302 lines
8.3 KiB
Go

/*
Cleans up stale webhookconfigurations created by kyverno that were not cleanedup
*/
package main
import (
"flag"
"fmt"
"os"
"sync"
"time"
"github.com/gardener/controller-manager-library/pkg/logger"
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/signal"
"github.com/kyverno/kyverno/pkg/utils"
"k8s.io/apimachinery/pkg/api/errors"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
"k8s.io/klog/klogr"
"sigs.k8s.io/controller-runtime/pkg/log"
)
var (
kubeconfig string
setupLog = log.Log.WithName("setup")
)
const (
mutatingWebhookConfigKind string = "MutatingWebhookConfiguration"
validatingWebhookConfigKind string = "ValidatingWebhookConfiguration"
policyReportKind string = "PolicyReport"
clusterPolicyReportKind string = "ClusterPolicyReport"
policyViolation string = "PolicyViolation"
clusterPolicyViolation string = "ClusterPolicyViolation"
)
func main() {
klog.InitFlags(nil)
log.SetLogger(klogr.New())
// arguments
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
if err := flag.Set("v", "2"); err != nil {
klog.Fatalf("failed to set log level: %v", err)
}
flag.Parse()
// os signal handler
stopCh := signal.SetupSignalHandler()
// create client config
clientConfig, err := createClientConfig(kubeconfig)
if err != nil {
setupLog.Error(err, "Failed to build kubeconfig")
os.Exit(1)
}
// DYNAMIC CLIENT
// - client for all registered resources
client, err := client.NewClient(clientConfig, 15*time.Minute, stopCh, log.Log)
if err != nil {
setupLog.Error(err, "Failed to create client")
os.Exit(1)
}
// Exit for unsupported version of kubernetes cluster
if !utils.HigherThanKubernetesVersion(client, log.Log, 1, 14, 0) {
os.Exit(1)
}
requests := []request{
// Resource
{validatingWebhookConfigKind, config.ValidatingWebhookConfigurationName},
{validatingWebhookConfigKind, config.ValidatingWebhookConfigurationDebugName},
{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
// Policy
{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName},
{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName},
{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName},
{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName},
// policy report
{policyReportKind, ""},
{clusterPolicyReportKind, ""},
// clean up policy violation
{policyViolation, ""},
{clusterPolicyViolation, ""},
}
done := make(chan struct{})
defer close(done)
failure := false
// use pipline to pass request to cleanup resources
// generate requests
in := gen(done, stopCh, requests...)
// process requests
// processing routine count : 2
p1 := process(client, done, stopCh, in)
p2 := process(client, done, stopCh, in)
// merge results from processing routines
for err := range merge(done, stopCh, p1, p2) {
if err != nil {
failure = true
log.Log.Error(err, "failed to cleanup resource")
}
}
// if there is any failure then we fail process
if failure {
log.Log.Info("failed to cleanup webhook configurations")
os.Exit(1)
}
}
func executeRequest(client *client.Client, req request) error {
switch req.kind {
case mutatingWebhookConfigKind, validatingWebhookConfigKind:
return removeWebhookIfExists(client, req.kind, req.name)
case policyReportKind:
return removePolicyReport(client, req.kind)
case clusterPolicyReportKind:
return removeClusterPolicyReport(client, req.kind)
case policyViolation, clusterPolicyViolation:
return removeViolationCRD(client)
}
return nil
}
func createClientConfig(kubeconfig string) (*rest.Config, error) {
logger := log.Log
if kubeconfig == "" {
logger.Info("Using in-cluster configuration")
return rest.InClusterConfig()
}
logger.Info(fmt.Sprintf("Using configuration from '%s'", kubeconfig))
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
type request struct {
kind string
name string
}
/* Processing Pipeline
-> Process Requests
Generate Requests -> Process Requests -> Merge Results
-> Process Requests
- number of processes can be controlled
- stop processing on SIGTERM OR SIGNKILL signal
- stop processing if any process fails(supported)
*/
// Generates requests to be processed
func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-chan request {
out := make(chan request)
go func() {
defer close(out)
for _, req := range requests {
select {
case out <- req:
case <-done:
println("done generate")
return
case <-stopCh:
println("shutting down generate")
return
}
}
}()
return out
}
// processes the requests
func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error {
logger := log.Log.WithName("process")
out := make(chan error)
go func() {
defer close(out)
for req := range requests {
select {
case out <- executeRequest(client, req):
case <-done:
logger.Info("done")
return
case <-stopCh:
logger.Info("shutting down")
return
}
}
}()
return out
}
// waits for all processes to be complete and merges result
func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error {
logger := log.Log.WithName("merge")
var wg sync.WaitGroup
out := make(chan error)
// gets the output from each process
output := func(ch <-chan error) {
defer wg.Done()
for err := range ch {
select {
case out <- err:
case <-done:
logger.Info("done")
return
case <-stopCh:
logger.Info("shutting down")
return
}
}
}
wg.Add(len(processes))
for _, process := range processes {
go output(process)
}
// close when all the process goroutines are done
go func() {
wg.Wait()
close(out)
}()
return out
}
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
logger := log.Log.WithName("removeExistingWebhook").WithValues("kind", kind, "name", name)
var err error
// Get resource
_, err = client.GetResource("", kind, "", name)
if errors.IsNotFound(err) {
logger.V(4).Info("resource not found")
return nil
}
if err != nil {
logger.Error(err, "failed to get resource")
return err
}
// Delete resource
err = client.DeleteResource("", kind, "", name, false)
if err != nil {
logger.Error(err, "failed to delete resource")
return err
}
logger.Info("removed the resource")
return nil
}
func removeClusterPolicyReport(client *client.Client, kind string) error {
logger := log.Log.WithName("removeClusterPolicyReport")
cpolrs, err := client.ListResource("", kind, "", nil)
if err != nil && !errors.IsNotFound(err) {
logger.Error(err, "failed to list clusterPolicyReport")
return err
}
for _, cpolr := range cpolrs.Items {
if err := client.DeleteResource(cpolr.GetAPIVersion(), cpolr.GetKind(), "", cpolr.GetName(), false); err != nil {
logger.Error(err, "failed to delete clusterPolicyReport", "name", cpolr.GetName())
}
}
return nil
}
func removePolicyReport(client *client.Client, kind string) error {
logger := log.Log.WithName("removePolicyReport")
namespaces, err := client.ListResource("", "Namespace", "", nil)
if err != nil {
logger.Error(err, "failed to list namespaces")
return err
}
// name of namespace policy report follows the name convention
// policyreport-ns-<namespace name>
for _, ns := range namespaces.Items {
reportName := fmt.Sprintf("policyreport-ns-%s", ns.GetName())
err := client.DeleteResource("", kind, ns.GetName(), reportName, false)
if err != nil && !errors.IsNotFound(err) {
logger.Error(err, "failed to delete policyReport", "name", reportName)
}
}
return nil
}
func removeViolationCRD(client *client.Client) error {
if err := client.DeleteResource("", "CustomResourceDefinition", "", "policyviolations.kyverno.io", false); err != nil {
if !errors.IsNotFound(err) {
logger.Error(err, "failed to delete CRD policyViolation")
}
}
if err := client.DeleteResource("", "CustomResourceDefinition", "", "clusterpolicyviolations.kyverno.io", false); err != nil {
if !errors.IsNotFound(err) {
logger.Error(err, "failed to delete CRD clusterPolicyViolation")
}
}
return nil
}