/* Cleans up stale webhookconfigurations created by kyverno that were not cleanedup */ package main import ( "flag" "fmt" "os" "sync" "time" "github.com/kyverno/kyverno/pkg/config" client "github.com/kyverno/kyverno/pkg/dclient" "github.com/kyverno/kyverno/pkg/signal" "github.com/kyverno/kyverno/pkg/utils" "k8s.io/apimachinery/pkg/api/errors" rest "k8s.io/client-go/rest" clientcmd "k8s.io/client-go/tools/clientcmd" "k8s.io/klog" "k8s.io/klog/klogr" "sigs.k8s.io/controller-runtime/pkg/log" ) var ( kubeconfig string setupLog = log.Log.WithName("setup") ) const ( mutatingWebhookConfigKind string = "MutatingWebhookConfiguration" validatingWebhookConfigKind string = "ValidatingWebhookConfiguration" ) func main() { klog.InitFlags(nil) log.SetLogger(klogr.New()) // arguments flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") if err := flag.Set("v", "2"); err != nil { klog.Fatalf("failed to set log level: %v", err) } flag.Parse() // os signal handler stopCh := signal.SetupSignalHandler() // create client config clientConfig, err := createClientConfig(kubeconfig) if err != nil { setupLog.Error(err, "Failed to build kubeconfig") os.Exit(1) } // DYNAMIC CLIENT // - client for all registered resources client, err := client.NewClient(clientConfig, 15*time.Minute, stopCh, log.Log) if err != nil { setupLog.Error(err, "Failed to create client") os.Exit(1) } // Exit for unsupported version of kubernetes cluster // https://github.com/kyverno/kyverno/issues/700 // - supported from v1.12.7+ if !utils.HigherThanKubernetesVersion(client, log.Log, 1, 12, 7) { os.Exit(1) } requests := []request{ // Resource {validatingWebhookConfigKind, config.ValidatingWebhookConfigurationName}, {validatingWebhookConfigKind, config.ValidatingWebhookConfigurationDebugName}, {mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName}, {mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName}, // Policy {validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName}, {validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName}, {mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName}, {mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName}, } done := make(chan struct{}) defer close(done) failure := false // use pipline to pass request to cleanup resources // generate requests in := gen(done, stopCh, requests...) // process requests // processing routine count : 2 p1 := process(client, done, stopCh, in) p2 := process(client, done, stopCh, in) // merge results from processing routines for err := range merge(done, stopCh, p1, p2) { if err != nil { failure = true log.Log.Error(err, "failed to cleanup resource") } } // if there is any failure then we fail process if failure { log.Log.Info("failed to cleanup webhook configurations") os.Exit(1) } } func removeWebhookIfExists(client *client.Client, kind string, name string) error { logger := log.Log.WithName("removeExistingWebhook").WithValues("kind", kind, "name", name) var err error // Get resource _, err = client.GetResource("", kind, "", name) if errors.IsNotFound(err) { logger.V(4).Info("resource not found") return nil } if err != nil { logger.Error(err, "failed to get resource") return err } // Delete resource err = client.DeleteResource("", kind, "", name, false) if err != nil { logger.Error(err, "failed to delete resource") return err } logger.Info("removed the resource") return nil } func createClientConfig(kubeconfig string) (*rest.Config, error) { logger := log.Log if kubeconfig == "" { logger.Info("Using in-cluster configuration") return rest.InClusterConfig() } logger.Info(fmt.Sprintf("Using configuration from '%s'", kubeconfig)) return clientcmd.BuildConfigFromFlags("", kubeconfig) } type request struct { kind string name string } /* Processing Pipeline -> Process Requests Generate Requests -> Process Requests -> Merge Results -> Process Requests - number of processes can be controlled - stop processing on SIGTERM OR SIGNKILL signal - stop processing if any process fails(supported) */ // Generates requests to be processed func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-chan request { out := make(chan request) go func() { defer close(out) for _, req := range requests { select { case out <- req: case <-done: println("done generate") return case <-stopCh: println("shutting down generate") return } } }() return out } // processes the requests func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error { logger := log.Log.WithName("process") out := make(chan error) go func() { defer close(out) for req := range requests { select { case out <- removeWebhookIfExists(client, req.kind, req.name): case <-done: logger.Info("done") return case <-stopCh: logger.Info("shutting down") return } } }() return out } // waits for all processes to be complete and merges result func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error { logger := log.Log.WithName("merge") var wg sync.WaitGroup out := make(chan error) // gets the output from each process output := func(ch <-chan error) { defer wg.Done() for err := range ch { select { case out <- err: case <-done: logger.Info("done") return case <-stopCh: logger.Info("shutting down") return } } } wg.Add(len(processes)) for _, process := range processes { go output(process) } // close when all the process goroutines are done go func() { wg.Wait() close(out) }() return out }