chore: cleanup of code, logging and variables.

This also fixes an issue with a copy-paste variable causing an error starting the metrics server.
This commit is contained in:
Tommy 2024-09-26 05:54:52 +02:00
parent 4664f08721
commit 9ab70156e7
Signed by: tommy
SSH key fingerprint: SHA256:1LWgQT3QPHIT29plS8jjXc3S1FcE/4oGvsx3Efxs6Uc
2 changed files with 17 additions and 70 deletions

View file

@ -3,6 +3,7 @@ package main
import ( import (
"context" "context"
"flag" "flag"
"log"
"net/http" "net/http"
"os" "os"
"os/signal" "os/signal"
@ -13,7 +14,6 @@ import (
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto" "github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp" "github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
@ -27,15 +27,15 @@ import (
) )
var ( var (
kubeconfig string kubeconfig string
namespace string namespace string
cmName string cmName string
id string id string
leaseLockName string leaseLockName string
leaderOverride bool
serverPort string serverPort string
healthPort string healthPort string
metricsPort string
requestCounter = promauto.NewCounterVec( requestCounter = promauto.NewCounterVec(
prometheus.CounterOpts{ prometheus.CounterOpts{
@ -55,8 +55,6 @@ var (
func parseFlags() { func parseFlags() {
klog.InitFlags(nil) klog.InitFlags(nil)
log.SetFormatter(&log.JSONFormatter{})
log.SetLevel(log.InfoLevel)
// Check for KUBECONFIG environment variable // Check for KUBECONFIG environment variable
if kubeconfigEnv := os.Getenv("KUBECONFIG"); kubeconfigEnv != "" { if kubeconfigEnv := os.Getenv("KUBECONFIG"); kubeconfigEnv != "" {
@ -72,7 +70,7 @@ func parseFlags() {
flag.StringVar(&leaseLockName, "lease-lock-name", "well-known", "the lease lock resource name") flag.StringVar(&leaseLockName, "lease-lock-name", "well-known", "the lease lock resource name")
flag.StringVar(&serverPort, "server-port", "8080", "server port") flag.StringVar(&serverPort, "server-port", "8080", "server port")
flag.StringVar(&healthPort, "health-port", "8081", "health port") flag.StringVar(&healthPort, "health-port", "8081", "health port")
flag.BoolVar(&leaderOverride, "leader-override", false, "manually override leader election") flag.StringVar(&metricsPort, "metrics-port", "9090", "metrics port")
flag.Parse() flag.Parse()
if id == "" { if id == "" {
@ -129,53 +127,9 @@ func main() {
clientset := getClientset() clientset := getClientset()
wks := NewWellKnownService(clientset, namespace, cmName) wks := NewWellKnownService(clientset, namespace, cmName)
if leaderOverride {
klog.Infof("Manual leader override enabled. This instance (%s) is the leader.", id)
// Run the discovery loop with context handling
go func() {
runLeaderTasks(ctx, wks)
}()
} else {
// Start the leader election code loop
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
Lock: &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: leaseLockName,
Namespace: namespace,
},
Client: clientset.CoordinationV1(),
LockConfig: resourcelock.ResourceLockConfig{
Identity: id,
},
},
ReleaseOnCancel: true,
LeaseDuration: 60 * time.Second,
RenewDeadline: 15 * time.Second,
RetryPeriod: 5 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
klog.Infof("This instance is now the leader: %s", id)
runLeaderTasks(ctx, wks)
},
OnStoppedLeading: func() {
klog.Infof("Leader lost: %s", id)
os.Exit(0)
},
OnNewLeader: func(identity string) {
if identity == id {
return
}
klog.Infof("New leader elected: %s", identity)
},
},
})
}
// Start the server // Start the server
go func() { go func() {
klog.Infof("Running /.well-known/{id} endpoint on :%s", serverPort) klog.Infof("Starting /.well-known endpoint on :%s", serverPort)
if err := http.ListenAndServe(":"+serverPort, GetServer(wks)); err != nil { if err := http.ListenAndServe(":"+serverPort, GetServer(wks)); err != nil {
klog.Error(err) klog.Error(err)
os.Exit(1) os.Exit(1)
@ -184,19 +138,17 @@ func main() {
// Start the health server // Start the health server
go func() { go func() {
klog.Infof("Running /healthz endpoint on :%s", healthPort) klog.Infof("Starting /healthz endpoint on :%s", healthPort)
if err := http.ListenAndServe(":"+healthPort, GetHealthServer()); err != nil { if err := http.ListenAndServe(":"+healthPort, GetHealthServer()); err != nil {
klog.Error(err) klog.Error(err)
os.Exit(1) os.Exit(1)
} }
}() }()
metricsPort := "9090" // You can set this via flag or environment variable
// Start the metrics server // Start the metrics server
go func() { go func() {
klog.Infof("Starting /metrics endpoint on :%s", metricsPort)
http.Handle("/metrics", promhttp.Handler()) http.Handle("/metrics", promhttp.Handler())
log.Infof("Running metrics endpoint on :%s", metricsPort)
if err := http.ListenAndServe(":"+metricsPort, nil); err != nil { if err := http.ListenAndServe(":"+metricsPort, nil); err != nil {
log.Fatalf("Error starting metrics server: %v", err) log.Fatalf("Error starting metrics server: %v", err)
} }
@ -222,20 +174,17 @@ func main() {
OnStartedLeading: func(ctx context.Context) { OnStartedLeading: func(ctx context.Context) {
klog.Infof("This instance is now the leader: %s", id) klog.Infof("This instance is now the leader: %s", id)
for { runLeaderTasks(ctx, wks)
wks.DiscoveryLoop(ctx)
}
}, },
OnStoppedLeading: func() { OnStoppedLeading: func() {
klog.Infof("leader lost: %s", id) klog.Infof("Leader lost: %s", id)
os.Exit(0) os.Exit(0)
}, },
OnNewLeader: func(identity string) { OnNewLeader: func(identity string) {
if identity == id { if identity == id {
klog.Infof("This instance is still the leader: %s", id) return
} else {
klog.Infof("New leader elected: %s (this instance: %s)", identity, id)
} }
klog.Infof("New leader elected: %s", identity)
}, },
}, },
}) })

View file

@ -208,8 +208,6 @@ func (s *WellKnownService) collectData(ctx context.Context) (wkRegistry, error)
return reg, err return reg, err
} }
klog.Infof("Found %d services in namespace %s", len(svcs.Items), s.namespace)
for _, svc := range svcs.Items { for _, svc := range svcs.Items {
//klog.Infof("Processing service: %s", svc.GetName()) //klog.Infof("Processing service: %s", svc.GetName())