1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-30 19:35:06 +00:00

feat: support flagsets in internal cmd package (#5461)

* feat: add signal in internal cmd package

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* logger

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* feat: support flagsets in internal cmd package

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* setup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* setup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* init container

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
Co-authored-by: Vyankatesh Kudtarkar <vyankateshkd@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-11-24 20:57:01 +01:00 committed by GitHub
parent 035ab3bb06
commit 41800c63a0
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 81 additions and 54 deletions

View file

@ -32,16 +32,6 @@ const (
resyncPeriod = 15 * time.Minute resyncPeriod = 15 * time.Minute
) )
func parseFlags(config internal.Configuration) {
internal.InitFlags(config)
flag.StringVar(&otel, "otelConfig", "prometheus", "Set this flag to 'grpc', to enable exporting metrics to an Opentelemetry Collector. The default collector is set to \"prometheus\"")
flag.StringVar(&otelCollector, "otelCollector", "opentelemetrycollector.kyverno.svc.cluster.local", "Set this flag to the OpenTelemetry Collector Service Address. Kyverno will try to connect to this on the metrics port.")
flag.StringVar(&transportCreds, "transportCreds", "", "Set this flag to the CA secret containing the certificate which is used by our Opentelemetry Metrics Client. If empty string is set, means an insecure connection will be used")
flag.StringVar(&metricsPort, "metricsPort", "8000", "Expose prometheus metrics at the given port, default to 8000.")
flag.BoolVar(&disableMetricsExport, "disableMetrics", false, "Set this flag to 'true' to disable metrics.")
flag.Parse()
}
func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics.MetricsConfig, context.CancelFunc, error) { func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics.MetricsConfig, context.CancelFunc, error) {
logger = logger.WithName("metrics") logger = logger.WithName("metrics")
logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds) logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
@ -82,23 +72,29 @@ func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics
} }
func main() { func main() {
// application flags
flagset := flag.NewFlagSet("application", flag.ExitOnError)
flagset.StringVar(&otel, "otelConfig", "prometheus", "Set this flag to 'grpc', to enable exporting metrics to an Opentelemetry Collector. The default collector is set to \"prometheus\"")
flagset.StringVar(&otelCollector, "otelCollector", "opentelemetrycollector.kyverno.svc.cluster.local", "Set this flag to the OpenTelemetry Collector Service Address. Kyverno will try to connect to this on the metrics port.")
flagset.StringVar(&transportCreds, "transportCreds", "", "Set this flag to the CA secret containing the certificate which is used by our Opentelemetry Metrics Client. If empty string is set, means an insecure connection will be used")
flagset.StringVar(&metricsPort, "metricsPort", "8000", "Expose prometheus metrics at the given port, default to 8000.")
flagset.BoolVar(&disableMetricsExport, "disableMetrics", false, "Set this flag to 'true' to disable metrics.")
// config // config
appConfig := internal.NewConfiguration( appConfig := internal.NewConfiguration(
internal.WithProfiling(), internal.WithProfiling(),
internal.WithTracing(), internal.WithTracing(),
internal.WithKubeconfig(), internal.WithKubeconfig(),
internal.WithFlagSets(flagset),
) )
// parse flags // parse flags
parseFlags(appConfig) internal.ParseFlags(appConfig)
// setup logger // setup logger
logger := internal.SetupLogger()
// setup maxprocs
undo := internal.SetupMaxProcs(logger)
defer undo()
// show version // show version
internal.ShowVersion(logger)
// start profiling // start profiling
internal.SetupProfiling(logger) // setup signals
// setup maxprocs
ctx, logger, sdown := internal.Setup()
defer sdown()
// create raw client // create raw client
rawClient := internal.CreateKubernetesClient(logger) rawClient := internal.CreateKubernetesClient(logger)
// setup signals // setup signals
@ -128,7 +124,7 @@ func main() {
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister() secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
// start informers and wait for cache sync // start informers and wait for cache sync
// we need to call start again because we potentially registered new informers // we need to call start again because we potentially registered new informers
if !internal.StartInformersAndWaitForCacheSync(signalCtx, kubeKyvernoInformer) { if !internal.StartInformersAndWaitForCacheSync(ctx, kubeKyvernoInformer) {
os.Exit(1) os.Exit(1)
} }
server := NewServer( server := NewServer(
@ -142,7 +138,7 @@ func main() {
}, },
) )
// start webhooks server // start webhooks server
server.Run(signalCtx.Done()) server.Run(ctx.Done())
// wait for termination signal // wait for termination signal
<-signalCtx.Done() <-ctx.Done()
} }

View file

@ -6,7 +6,6 @@ package main
import ( import (
"context" "context"
"encoding/json" "encoding/json"
"flag"
"os" "os"
"sync" "sync"
"time" "time"
@ -34,46 +33,29 @@ const (
convertGenerateRequest string = "ConvertGenerateRequest" convertGenerateRequest string = "ConvertGenerateRequest"
) )
func parseFlags(config internal.Configuration) {
internal.InitFlags(config)
flag.Parse()
}
func main() { func main() {
// config // config
appConfig := internal.NewConfiguration( appConfig := internal.NewConfiguration(
internal.WithKubeconfig(), internal.WithKubeconfig(),
) )
// parse flags // parse flags
parseFlags(appConfig) internal.ParseFlags(appConfig)
// setup logger // setup logger
logger := internal.SetupLogger()
// setup maxprocs
undo := internal.SetupMaxProcs(logger)
defer undo()
// show version // show version
internal.ShowVersion(logger) // start profiling
// os signal handler // setup signals
signalCtx, signalCancel := internal.SetupSignals(logger) // setup maxprocs
defer signalCancel() ctx, logger, sdown := internal.Setup()
defer sdown()
// create clients
kubeClient := internal.CreateKubernetesClient(logger) kubeClient := internal.CreateKubernetesClient(logger)
dynamicClient := internal.CreateDynamicClient(logger) dynamicClient := internal.CreateDynamicClient(logger)
kyvernoClient := internal.CreateKyvernoClient(logger) kyvernoClient := internal.CreateKyvernoClient(logger)
client, err := dclient.NewClient(ctx, dynamicClient, kubeClient, 15*time.Minute)
// DYNAMIC CLIENT
// - client for all registered resources
client, err := dclient.NewClient(
signalCtx,
dynamicClient,
kubeClient,
15*time.Minute,
)
if err != nil { if err != nil {
logger.Error(err, "Failed to create client") logger.Error(err, "Failed to create client")
os.Exit(1) os.Exit(1)
} }
// Exit for unsupported version of kubernetes cluster // Exit for unsupported version of kubernetes cluster
if !utils.HigherThanKubernetesVersion(kubeClient.Discovery(), logging.GlobalLogger(), 1, 16, 0) { if !utils.HigherThanKubernetesVersion(kubeClient.Discovery(), logging.GlobalLogger(), 1, 16, 0) {
os.Exit(1) os.Exit(1)
@ -86,8 +68,8 @@ func main() {
} }
go func() { go func() {
defer signalCancel() defer sdown()
<-signalCtx.Done() <-ctx.Done()
}() }()
done := make(chan struct{}) done := make(chan struct{})
@ -113,19 +95,19 @@ func main() {
} }
} }
if err = acquireLeader(signalCtx, kubeClient); err != nil { if err = acquireLeader(ctx, kubeClient); err != nil {
logging.V(2).Info("Failed to create lease 'kyvernopre-lock'") logging.V(2).Info("Failed to create lease 'kyvernopre-lock'")
os.Exit(1) os.Exit(1)
} }
// use pipeline to pass request to cleanup resources // use pipeline to pass request to cleanup resources
in := gen(done, signalCtx.Done(), requests...) in := gen(done, ctx.Done(), requests...)
// process requests // process requests
// processing routine count : 2 // processing routine count : 2
p1 := process(client, kyvernoClient, done, signalCtx.Done(), in) p1 := process(client, kyvernoClient, done, ctx.Done(), in)
p2 := process(client, kyvernoClient, done, signalCtx.Done(), in) p2 := process(client, kyvernoClient, done, ctx.Done(), in)
// merge results from processing routines // merge results from processing routines
for err := range merge(done, signalCtx.Done(), p1, p2) { for err := range merge(done, ctx.Done(), p1, p2) {
if err != nil { if err != nil {
failure = true failure = true
logging.Error(err, "failed to cleanup resource") logging.Error(err, "failed to cleanup resource")
@ -155,7 +137,7 @@ func main() {
os.Exit(1) os.Exit(1)
} }
le.Run(signalCtx) le.Run(ctx)
} }
func acquireLeader(ctx context.Context, kubeClient kubernetes.Interface) error { func acquireLeader(ctx context.Context, kubeClient kubernetes.Interface) error {

View file

@ -1,9 +1,12 @@
package internal package internal
import "flag"
type Configuration interface { type Configuration interface {
UsesTracing() bool UsesTracing() bool
UsesProfiling() bool UsesProfiling() bool
UsesKubeconfig() bool UsesKubeconfig() bool
FlagSets() []*flag.FlagSet
} }
func NewConfiguration(options ...ConfigurationOption) Configuration { func NewConfiguration(options ...ConfigurationOption) Configuration {
@ -34,10 +37,17 @@ func WithKubeconfig() ConfigurationOption {
} }
} }
func WithFlagSets(flagsets ...*flag.FlagSet) ConfigurationOption {
return func(c *configuration) {
c.flagSets = append(c.flagSets, flagsets...)
}
}
type configuration struct { type configuration struct {
usesTracing bool usesTracing bool
usesProfiling bool usesProfiling bool
usesKubeconfig bool usesKubeconfig bool
flagSets []*flag.FlagSet
} }
func (c *configuration) UsesTracing() bool { func (c *configuration) UsesTracing() bool {
@ -51,3 +61,7 @@ func (c *configuration) UsesProfiling() bool {
func (c *configuration) UsesKubeconfig() bool { func (c *configuration) UsesKubeconfig() bool {
return c.usesKubeconfig return c.usesKubeconfig
} }
func (c *configuration) FlagSets() []*flag.FlagSet {
return c.flagSets
}

View file

@ -64,4 +64,14 @@ func InitFlags(config Configuration) {
if config.UsesKubeconfig() { if config.UsesKubeconfig() {
initKubeconfigFlags() initKubeconfigFlags()
} }
for _, flagset := range config.FlagSets() {
flagset.VisitAll(func(f *flag.Flag) {
flag.CommandLine.Var(f.Value, f.Name, f.Usage)
})
}
}
func ParseFlags(config Configuration) {
InitFlags(config)
flag.Parse()
} }

25
cmd/internal/setup.go Normal file
View file

@ -0,0 +1,25 @@
package internal
import (
"context"
"github.com/go-logr/logr"
)
func shutdown(logger logr.Logger, sdowns ...context.CancelFunc) context.CancelFunc {
return func() {
for i := range sdowns {
logger.Info("shuting down...")
defer sdowns[i]()
}
}
}
func Setup() (context.Context, logr.Logger, context.CancelFunc) {
logger := SetupLogger()
ShowVersion(logger)
sdownMaxProcs := SetupMaxProcs(logger)
SetupProfiling(logger)
ctx, sdownSignals := SetupSignals(logger)
return ctx, logger, shutdown(logger.WithName("shutdown"), sdownMaxProcs, sdownSignals)
}