1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 07:57:07 +00:00
kyverno/cmd/cleanup-controller/main.go
Charles-Edouard Brétéché 41800c63a0
feat: support flagsets in internal cmd package (#5461)
* feat: add signal in internal cmd package

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* logger

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* feat: support flagsets in internal cmd package

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* setup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* setup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* init container

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com>
2022-11-24 20:57:01 +01:00

144 lines
4.9 KiB
Go

package main
import (
"context"
"flag"
"net/http"
"os"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/cmd/internal"
"github.com/kyverno/kyverno/pkg/clients/dclient"
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/metrics"
corev1 "k8s.io/api/core/v1"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
)
var (
otel string
otelCollector string
metricsPort string
transportCreds string
disableMetricsExport bool
)
const (
resyncPeriod = 15 * time.Minute
)
func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics.MetricsConfig, context.CancelFunc, error) {
logger = logger.WithName("metrics")
logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
if err != nil {
return nil, nil, err
}
metricsAddr := ":" + metricsPort
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
disableMetricsExport,
otel,
metricsAddr,
otelCollector,
metricsConfigData,
transportCreds,
kubeClient,
logging.WithName("metrics"),
)
if err != nil {
return nil, nil, err
}
var cancel context.CancelFunc
if otel == "grpc" {
cancel = func() {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
metrics.ShutDownController(ctx, metricsPusher)
}
}
if otel == "prometheus" {
go func() {
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
logger.Error(err, "failed to enable metrics", "address", metricsAddr)
}
}()
}
return metricsConfig, cancel, nil
}
func main() {
// application flags
flagset := flag.NewFlagSet("application", flag.ExitOnError)
flagset.StringVar(&otel, "otelConfig", "prometheus", "Set this flag to 'grpc', to enable exporting metrics to an Opentelemetry Collector. The default collector is set to \"prometheus\"")
flagset.StringVar(&otelCollector, "otelCollector", "opentelemetrycollector.kyverno.svc.cluster.local", "Set this flag to the OpenTelemetry Collector Service Address. Kyverno will try to connect to this on the metrics port.")
flagset.StringVar(&transportCreds, "transportCreds", "", "Set this flag to the CA secret containing the certificate which is used by our Opentelemetry Metrics Client. If empty string is set, means an insecure connection will be used")
flagset.StringVar(&metricsPort, "metricsPort", "8000", "Expose prometheus metrics at the given port, default to 8000.")
flagset.BoolVar(&disableMetricsExport, "disableMetrics", false, "Set this flag to 'true' to disable metrics.")
// config
appConfig := internal.NewConfiguration(
internal.WithProfiling(),
internal.WithTracing(),
internal.WithKubeconfig(),
internal.WithFlagSets(flagset),
)
// parse flags
internal.ParseFlags(appConfig)
// setup logger
// show version
// start profiling
// setup signals
// setup maxprocs
ctx, logger, sdown := internal.Setup()
defer sdown()
// create raw client
rawClient := internal.CreateKubernetesClient(logger)
// setup signals
signalCtx, signalCancel := internal.SetupSignals(logger)
defer signalCancel()
// setup metrics
metricsConfig, metricsShutdown, err := setupMetrics(logger, rawClient)
if err != nil {
logger.Error(err, "failed to setup metrics")
os.Exit(1)
}
if metricsShutdown != nil {
defer metricsShutdown()
}
// create instrumented clients
kubeClient := internal.CreateKubernetesClient(logger, kubeclient.WithMetrics(metricsConfig, metrics.KubeClient), kubeclient.WithTracing())
dynamicClient := internal.CreateDynamicClient(logger, dynamicclient.WithMetrics(metricsConfig, metrics.KyvernoClient), dynamicclient.WithTracing())
dClient, err := dclient.NewClient(signalCtx, dynamicClient, kubeClient, 15*time.Minute)
if err != nil {
logger.Error(err, "failed to create dynamic client")
os.Exit(1)
}
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
policyHandlers := NewHandlers(
dClient,
)
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
// start informers and wait for cache sync
// we need to call start again because we potentially registered new informers
if !internal.StartInformersAndWaitForCacheSync(ctx, kubeKyvernoInformer) {
os.Exit(1)
}
server := NewServer(
policyHandlers,
func() ([]byte, []byte, error) {
secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
if err != nil {
return nil, nil, err
}
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
},
)
// start webhooks server
server.Run(ctx.Done())
// wait for termination signal
<-ctx.Done()
}