1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

refactor: split main func for metrics (#4796)

* refactor: split main func for metrics

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* clients

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* todo

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* metrics shutdown

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-10-04 11:05:21 +02:00 committed by GitHub
parent f079bfb25f
commit 4cb171c980
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23

View file

@ -131,6 +131,45 @@ func parseFlags() error {
return nil
}
func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics.MetricsConfig, context.CancelFunc, error) {
logger = logger.WithName("metrics")
metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
if err != nil {
return nil, nil, err
}
metricsAddr := ":" + metricsPort
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
disableMetricsExport,
otel,
metricsAddr,
otelCollector,
metricsConfigData,
transportCreds,
kubeClient,
logging.WithName("Metrics"),
)
if err != nil {
return nil, nil, err
}
var cancel context.CancelFunc
if otel == "grpc" {
cancel = func() {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer cancel()
metrics.ShutDownController(ctx, metricsPusher)
}
}
if otel == "prometheus" {
go func() {
logger.Info("Enabling Metrics for Kyverno", "address", metricsAddr)
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
logger.Error(err, "failed to enable metrics", "address", metricsAddr)
}
}()
}
return metricsConfig, cancel, nil
}
func showStartup(logger logr.Logger) {
logger = logger.WithName("startup")
logger.Info("kyverno is staring...")
@ -155,16 +194,25 @@ func startProfiling(logger logr.Logger) {
}
}
func createKubeClient() (*rest.Config, *kubernetes.Clientset, error) {
func createKubeClients() (*rest.Config, *kubernetes.Clientset, metadataclient.Interface, kubernetes.Interface, error) {
clientConfig, err := config.CreateClientConfig(kubeconfig, clientRateLimitQPS, clientRateLimitBurst)
if err != nil {
return nil, nil, err
return nil, nil, nil, nil, err
}
kubeClient, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, nil, err
return nil, nil, nil, nil, err
}
return clientConfig, kubeClient, nil
metadataClient, err := metadataclient.NewForConfig(clientConfig)
if err != nil {
return nil, nil, nil, nil, err
}
// The leader queries/updates the lease object quite frequently. So we use a separate kube-client to eliminate the throttle issue
kubeClientLeaderElection, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, nil, nil, nil, err
}
return clientConfig, kubeClient, metadataClient, kubeClientLeaderElection, nil
}
func main() {
@ -182,9 +230,9 @@ func main() {
// start profiling
startProfiling(logger)
// create client config and kube client
clientConfig, kubeClient, err := createKubeClient()
clientConfig, kubeClient, metadataClient, kubeClientLeaderElection, err := createKubeClients()
if err != nil {
logger.Error(err, "failed to create kube client")
logger.Error(err, "failed to create kubernetes clients")
os.Exit(1)
}
// show startup message
@ -193,30 +241,16 @@ func main() {
signalCtx, signalCancel := signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
defer signalCancel()
// Metrics Configuration
var metricsConfig *metrics.MetricsConfig
metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
// setup metrics
metricsConfig, metricsShutdown, err := setupMetrics(logger, kubeClient)
if err != nil {
logger.Error(err, "failed to fetch metrics config")
logger.Error(err, "failed to setup metrics")
os.Exit(1)
}
metricsAddr := ":" + metricsPort
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
disableMetricsExport,
otel,
metricsAddr,
otelCollector,
metricsConfigData,
transportCreds,
kubeClient,
logging.WithName("Metrics"),
)
if err != nil {
logger.Error(err, "failed to initialize metrics")
os.Exit(1)
if metricsShutdown != nil {
defer metricsShutdown()
}
// instrumented clients
kyvernoClient, err := kyvernoclient.NewForConfig(clientConfig, metricsConfig)
if err != nil {
logger.Error(err, "Failed to create client")
@ -227,17 +261,6 @@ func main() {
logger.Error(err, "Failed to create dynamic client")
os.Exit(1)
}
metadataClient, err := metadataclient.NewForConfig(clientConfig)
if err != nil {
logger.Error(err, "Failed to create metadata client")
os.Exit(1)
}
// The leader queries/updates the lease object quite frequently. So we use a separate kube-client to eliminate the throttle issue
kubeClientLeaderElection, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
logger.Error(err, "Failed to create kubernetes leader client")
os.Exit(1)
}
// sanity checks
if !utils.CRDsInstalled(dynamicClient.Discovery()) {
logger.Error(fmt.Errorf("CRDs not installed"), "Failed to access Kyverno CRDs")
@ -256,21 +279,6 @@ func main() {
var registryOptions []registryclient.Option
if otel == "grpc" {
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
defer metrics.ShutDownController(ctx, metricsPusher)
defer cancel()
}
if otel == "prometheus" {
go func() {
logger.Info("Enabling Metrics for Kyverno", "address", metricsAddr)
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
logger.Error(err, "failed to enable metrics", "address", metricsAddr)
}
}()
}
// load image registry secrets
secrets := strings.Split(imagePullSecrets, ",")
if imagePullSecrets != "" && len(secrets) > 0 {