chore: cleanup of code, logging and variables.
This also fixes an issue with a copy-paste variable causing an error starting the metrics server.
This commit is contained in:
parent
4664f08721
commit
9ab70156e7
2 changed files with 17 additions and 70 deletions
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
@ -13,7 +14,6 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
log "github.com/sirupsen/logrus"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -27,15 +27,15 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
namespace string
|
||||
cmName string
|
||||
id string
|
||||
leaseLockName string
|
||||
leaderOverride bool
|
||||
kubeconfig string
|
||||
namespace string
|
||||
cmName string
|
||||
id string
|
||||
leaseLockName string
|
||||
|
||||
serverPort string
|
||||
healthPort string
|
||||
serverPort string
|
||||
healthPort string
|
||||
metricsPort string
|
||||
|
||||
requestCounter = promauto.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
|
@ -55,8 +55,6 @@ var (
|
|||
|
||||
func parseFlags() {
|
||||
klog.InitFlags(nil)
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
log.SetLevel(log.InfoLevel)
|
||||
|
||||
// Check for KUBECONFIG environment variable
|
||||
if kubeconfigEnv := os.Getenv("KUBECONFIG"); kubeconfigEnv != "" {
|
||||
|
@ -72,7 +70,7 @@ func parseFlags() {
|
|||
flag.StringVar(&leaseLockName, "lease-lock-name", "well-known", "the lease lock resource name")
|
||||
flag.StringVar(&serverPort, "server-port", "8080", "server port")
|
||||
flag.StringVar(&healthPort, "health-port", "8081", "health port")
|
||||
flag.BoolVar(&leaderOverride, "leader-override", false, "manually override leader election")
|
||||
flag.StringVar(&metricsPort, "metrics-port", "9090", "metrics port")
|
||||
flag.Parse()
|
||||
|
||||
if id == "" {
|
||||
|
@ -129,53 +127,9 @@ func main() {
|
|||
clientset := getClientset()
|
||||
wks := NewWellKnownService(clientset, namespace, cmName)
|
||||
|
||||
if leaderOverride {
|
||||
klog.Infof("Manual leader override enabled. This instance (%s) is the leader.", id)
|
||||
|
||||
// Run the discovery loop with context handling
|
||||
go func() {
|
||||
runLeaderTasks(ctx, wks)
|
||||
}()
|
||||
|
||||
} else {
|
||||
// Start the leader election code loop
|
||||
leaderelection.RunOrDie(ctx, leaderelection.LeaderElectionConfig{
|
||||
Lock: &resourcelock.LeaseLock{
|
||||
LeaseMeta: metav1.ObjectMeta{
|
||||
Name: leaseLockName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Client: clientset.CoordinationV1(),
|
||||
LockConfig: resourcelock.ResourceLockConfig{
|
||||
Identity: id,
|
||||
},
|
||||
},
|
||||
ReleaseOnCancel: true,
|
||||
LeaseDuration: 60 * time.Second,
|
||||
RenewDeadline: 15 * time.Second,
|
||||
RetryPeriod: 5 * time.Second,
|
||||
Callbacks: leaderelection.LeaderCallbacks{
|
||||
OnStartedLeading: func(ctx context.Context) {
|
||||
klog.Infof("This instance is now the leader: %s", id)
|
||||
runLeaderTasks(ctx, wks)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
klog.Infof("Leader lost: %s", id)
|
||||
os.Exit(0)
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
if identity == id {
|
||||
return
|
||||
}
|
||||
klog.Infof("New leader elected: %s", identity)
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Start the server
|
||||
go func() {
|
||||
klog.Infof("Running /.well-known/{id} endpoint on :%s", serverPort)
|
||||
klog.Infof("Starting /.well-known endpoint on :%s", serverPort)
|
||||
if err := http.ListenAndServe(":"+serverPort, GetServer(wks)); err != nil {
|
||||
klog.Error(err)
|
||||
os.Exit(1)
|
||||
|
@ -184,19 +138,17 @@ func main() {
|
|||
|
||||
// Start the health server
|
||||
go func() {
|
||||
klog.Infof("Running /healthz endpoint on :%s", healthPort)
|
||||
klog.Infof("Starting /healthz endpoint on :%s", healthPort)
|
||||
if err := http.ListenAndServe(":"+healthPort, GetHealthServer()); err != nil {
|
||||
klog.Error(err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
metricsPort := "9090" // You can set this via flag or environment variable
|
||||
|
||||
// Start the metrics server
|
||||
go func() {
|
||||
klog.Infof("Starting /metrics endpoint on :%s", metricsPort)
|
||||
http.Handle("/metrics", promhttp.Handler())
|
||||
log.Infof("Running metrics endpoint on :%s", metricsPort)
|
||||
if err := http.ListenAndServe(":"+metricsPort, nil); err != nil {
|
||||
log.Fatalf("Error starting metrics server: %v", err)
|
||||
}
|
||||
|
@ -222,20 +174,17 @@ func main() {
|
|||
OnStartedLeading: func(ctx context.Context) {
|
||||
klog.Infof("This instance is now the leader: %s", id)
|
||||
|
||||
for {
|
||||
wks.DiscoveryLoop(ctx)
|
||||
}
|
||||
runLeaderTasks(ctx, wks)
|
||||
},
|
||||
OnStoppedLeading: func() {
|
||||
klog.Infof("leader lost: %s", id)
|
||||
klog.Infof("Leader lost: %s", id)
|
||||
os.Exit(0)
|
||||
},
|
||||
OnNewLeader: func(identity string) {
|
||||
if identity == id {
|
||||
klog.Infof("This instance is still the leader: %s", id)
|
||||
} else {
|
||||
klog.Infof("New leader elected: %s (this instance: %s)", identity, id)
|
||||
return
|
||||
}
|
||||
klog.Infof("New leader elected: %s", identity)
|
||||
},
|
||||
},
|
||||
})
|
||||
|
|
|
@ -208,8 +208,6 @@ func (s *WellKnownService) collectData(ctx context.Context) (wkRegistry, error)
|
|||
return reg, err
|
||||
}
|
||||
|
||||
klog.Infof("Found %d services in namespace %s", len(svcs.Items), s.namespace)
|
||||
|
||||
for _, svc := range svcs.Items {
|
||||
//klog.Infof("Processing service: %s", svc.GetName())
|
||||
|
||||
|
|
Loading…
Reference in a new issue