mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
refactor: move leader election code in internal package (#6854)
* refactor: move registry client init in internal package Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * refactor: move leader election code in internal package Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> --------- Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
This commit is contained in:
parent
89b0a649e3
commit
a0548898b7
13 changed files with 70 additions and 48 deletions
|
@ -14,7 +14,6 @@ import (
|
|||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
|
||||
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
policymetricscontroller "github.com/kyverno/kyverno/pkg/controllers/metrics/policy"
|
||||
|
@ -81,14 +80,12 @@ func createrLeaderControllers(
|
|||
|
||||
func main() {
|
||||
var (
|
||||
genWorkers int
|
||||
maxQueuedEvents int
|
||||
leaderElectionRetryPeriod time.Duration
|
||||
genWorkers int
|
||||
maxQueuedEvents int
|
||||
)
|
||||
flagset := flag.NewFlagSet("updaterequest-controller", flag.ExitOnError)
|
||||
flagset.IntVar(&genWorkers, "genWorkers", 10, "Workers for the background controller.")
|
||||
flagset.IntVar(&maxQueuedEvents, "maxQueuedEvents", 1000, "Maximum events to be queued.")
|
||||
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
||||
// config
|
||||
appConfig := internal.NewConfiguration(
|
||||
internal.WithProfiling(),
|
||||
|
@ -99,6 +96,7 @@ func main() {
|
|||
internal.WithConfigMapCaching(),
|
||||
internal.WithCosign(),
|
||||
internal.WithRegistryClient(),
|
||||
internal.WithLeaderElection(),
|
||||
internal.WithFlagSets(flagset),
|
||||
)
|
||||
// parse flags
|
||||
|
@ -107,7 +105,6 @@ func main() {
|
|||
signalCtx, setup, sdown := internal.Setup(appConfig, "kyverno-background-controller", false)
|
||||
defer sdown()
|
||||
// create instrumented clients
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
dClient, err := dclient.NewClient(signalCtx, dynamicClient, setup.KubeClient, 15*time.Minute)
|
||||
|
@ -157,9 +154,9 @@ func main() {
|
|||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno-background-controller",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
setup.LeaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
internal.LeaderElectionRetryPeriod(),
|
||||
func(ctx context.Context) {
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// create leader factories
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
"github.com/kyverno/kyverno/cmd/internal"
|
||||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
|
||||
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/controllers/certmanager"
|
||||
|
@ -52,14 +51,12 @@ func (probes) IsLive() bool {
|
|||
|
||||
func main() {
|
||||
var (
|
||||
leaderElectionRetryPeriod time.Duration
|
||||
dumpPayload bool
|
||||
serverIP string
|
||||
servicePort int
|
||||
dumpPayload bool
|
||||
serverIP string
|
||||
servicePort int
|
||||
)
|
||||
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
||||
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
|
||||
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
||||
flagset.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
||||
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
|
||||
// config
|
||||
|
@ -68,6 +65,7 @@ func main() {
|
|||
internal.WithMetrics(),
|
||||
internal.WithTracing(),
|
||||
internal.WithKubeconfig(),
|
||||
internal.WithLeaderElection(),
|
||||
internal.WithFlagSets(flagset),
|
||||
)
|
||||
// parse flags
|
||||
|
@ -76,16 +74,15 @@ func main() {
|
|||
ctx, setup, sdown := internal.Setup(appConfig, "kyverno-cleanup-controller", false)
|
||||
defer sdown()
|
||||
// create instrumented clients
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kyvernoclient.WithTracing())
|
||||
// setup leader election
|
||||
le, err := leaderelection.New(
|
||||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno-cleanup-controller",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
setup.LeaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
internal.LeaderElectionRetryPeriod(),
|
||||
func(ctx context.Context) {
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// informer factories
|
||||
|
|
|
@ -36,7 +36,7 @@ func createClientConfig(logger logr.Logger) *rest.Config {
|
|||
return clientConfig
|
||||
}
|
||||
|
||||
func CreateKubernetesClient(logger logr.Logger, opts ...kube.NewOption) kubernetes.Interface {
|
||||
func createKubernetesClient(logger logr.Logger, opts ...kube.NewOption) kubernetes.Interface {
|
||||
logger = logger.WithName("kube-client")
|
||||
logger.Info("create kube client...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
client, err := kube.NewForConfig(createClientConfig(logger), opts...)
|
||||
|
|
|
@ -13,6 +13,7 @@ type Configuration interface {
|
|||
UsesConfigMapCaching() bool
|
||||
UsesCosign() bool
|
||||
UsesRegistryClient() bool
|
||||
UsesLeaderElection() bool
|
||||
FlagSets() []*flag.FlagSet
|
||||
}
|
||||
|
||||
|
@ -74,6 +75,12 @@ func WithRegistryClient() ConfigurationOption {
|
|||
}
|
||||
}
|
||||
|
||||
func WithLeaderElection() ConfigurationOption {
|
||||
return func(c *configuration) {
|
||||
c.usesLeaderElection = true
|
||||
}
|
||||
}
|
||||
|
||||
func WithFlagSets(flagsets ...*flag.FlagSet) ConfigurationOption {
|
||||
return func(c *configuration) {
|
||||
c.flagSets = append(c.flagSets, flagsets...)
|
||||
|
@ -89,6 +96,7 @@ type configuration struct {
|
|||
usesConfigMapCaching bool
|
||||
usesCosign bool
|
||||
usesRegistryClient bool
|
||||
usesLeaderElection bool
|
||||
flagSets []*flag.FlagSet
|
||||
}
|
||||
|
||||
|
@ -124,6 +132,10 @@ func (c *configuration) UsesRegistryClient() bool {
|
|||
return c.usesRegistryClient
|
||||
}
|
||||
|
||||
func (c *configuration) UsesLeaderElection() bool {
|
||||
return c.usesLeaderElection
|
||||
}
|
||||
|
||||
func (c *configuration) FlagSets() []*flag.FlagSet {
|
||||
return c.flagSets
|
||||
}
|
||||
|
|
|
@ -2,7 +2,9 @@ package internal
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/leaderelection"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
)
|
||||
|
||||
|
@ -37,6 +39,8 @@ var (
|
|||
// registry client
|
||||
imagePullSecrets string
|
||||
allowInsecureRegistry bool
|
||||
// leader election
|
||||
leaderElectionRetryPeriod time.Duration
|
||||
)
|
||||
|
||||
func initLoggingFlags() {
|
||||
|
@ -90,6 +94,10 @@ func initRegistryClientFlags() {
|
|||
flag.StringVar(&imagePullSecrets, "imagePullSecrets", "", "Secret resource names for image registry access credentials.")
|
||||
}
|
||||
|
||||
func initLeaderElectionFlags() {
|
||||
flag.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
||||
}
|
||||
|
||||
func InitFlags(config Configuration) {
|
||||
// logging
|
||||
initLoggingFlags()
|
||||
|
@ -125,6 +133,10 @@ func InitFlags(config Configuration) {
|
|||
if config.UsesRegistryClient() {
|
||||
initRegistryClientFlags()
|
||||
}
|
||||
// leader election
|
||||
if config.UsesLeaderElection() {
|
||||
initLeaderElectionFlags()
|
||||
}
|
||||
for _, flagset := range config.FlagSets() {
|
||||
flagset.VisitAll(func(f *flag.Flag) {
|
||||
flag.CommandLine.Var(f.Value, f.Name, f.Usage)
|
||||
|
@ -144,3 +156,7 @@ func ExceptionNamespace() string {
|
|||
func PolicyExceptionEnabled() bool {
|
||||
return enablePolicyException
|
||||
}
|
||||
|
||||
func LeaderElectionRetryPeriod() time.Duration {
|
||||
return leaderElectionRetryPeriod
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
)
|
||||
|
||||
func SetupLogger() logr.Logger {
|
||||
func setupLogger() logr.Logger {
|
||||
logLevel, err := strconv.Atoi(flag.Lookup("v").Value.String())
|
||||
checkErr(err, "failed to setup logger")
|
||||
checkErr(logging.Setup(loggingFormat, logLevel), "failed to setup logger")
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"go.uber.org/automaxprocs/maxprocs"
|
||||
)
|
||||
|
||||
func SetupMaxProcs(logger logr.Logger) func() {
|
||||
func setupMaxProcs(logger logr.Logger) func() {
|
||||
logger = logger.WithName("maxprocs")
|
||||
logger.Info("setup maxprocs...")
|
||||
undo, err := maxprocs.Set(
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/profiling"
|
||||
)
|
||||
|
||||
func SetupProfiling(logger logr.Logger) {
|
||||
func setupProfiling(logger logr.Logger) {
|
||||
logger = logger.WithName("profiling").WithValues("enabled", profilingEnabled, "address", profilingAddress, "port", profilingPort)
|
||||
if profilingEnabled {
|
||||
logger.Info("setup profiling...")
|
||||
|
|
|
@ -28,16 +28,17 @@ type SetupResult struct {
|
|||
MetricsConfiguration config.MetricsConfiguration
|
||||
MetricsManager metrics.MetricsConfigManager
|
||||
KubeClient kubernetes.Interface
|
||||
LeaderElectionClient kubernetes.Interface
|
||||
RegistryClient registryclient.Client
|
||||
}
|
||||
|
||||
func Setup(config Configuration, name string, skipResourceFilters bool) (context.Context, SetupResult, context.CancelFunc) {
|
||||
logger := SetupLogger()
|
||||
ShowVersion(logger)
|
||||
sdownMaxProcs := SetupMaxProcs(logger)
|
||||
SetupProfiling(logger)
|
||||
ctx, sdownSignals := SetupSignals(logger)
|
||||
client := kubeclient.From(CreateKubernetesClient(logger), kubeclient.WithTracing())
|
||||
logger := setupLogger()
|
||||
showVersion(logger)
|
||||
sdownMaxProcs := setupMaxProcs(logger)
|
||||
setupProfiling(logger)
|
||||
ctx, sdownSignals := setupSignals(logger)
|
||||
client := kubeclient.From(createKubernetesClient(logger), kubeclient.WithTracing())
|
||||
metricsConfiguration := startMetricsConfigController(ctx, logger, client)
|
||||
metricsManager, sdownMetrics := SetupMetrics(ctx, logger, metricsConfiguration, client)
|
||||
client = client.WithMetrics(metricsManager, metrics.KubeClient)
|
||||
|
@ -48,6 +49,10 @@ func Setup(config Configuration, name string, skipResourceFilters bool) (context
|
|||
if config.UsesRegistryClient() {
|
||||
registryClient = setupRegistryClient(ctx, logger, client)
|
||||
}
|
||||
var leaderElectionClient kubernetes.Interface
|
||||
if config.UsesLeaderElection() {
|
||||
leaderElectionClient = createKubernetesClient(logger, kubeclient.WithMetrics(metricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
}
|
||||
return ctx,
|
||||
SetupResult{
|
||||
Logger: logger,
|
||||
|
@ -55,6 +60,7 @@ func Setup(config Configuration, name string, skipResourceFilters bool) (context
|
|||
MetricsConfiguration: metricsConfiguration,
|
||||
MetricsManager: metricsManager,
|
||||
KubeClient: client,
|
||||
LeaderElectionClient: leaderElectionClient,
|
||||
RegistryClient: registryClient,
|
||||
},
|
||||
shutdown(logger.WithName("shutdown"), sdownMaxProcs, sdownMetrics, sdownTracing, sdownSignals)
|
||||
|
|
|
@ -11,7 +11,7 @@ import (
|
|||
|
||||
var Context = context.Background()
|
||||
|
||||
func SetupSignals(logger logr.Logger) (context.Context, context.CancelFunc) {
|
||||
func setupSignals(logger logr.Logger) (context.Context, context.CancelFunc) {
|
||||
logger = logger.WithName("signals")
|
||||
logger.Info("setup signals...")
|
||||
return signal.NotifyContext(Context, os.Interrupt, syscall.SIGTERM)
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/version"
|
||||
)
|
||||
|
||||
func ShowVersion(logger logr.Logger) {
|
||||
func showVersion(logger logr.Logger) {
|
||||
logger = logger.WithName("version")
|
||||
version.PrintVersionInfo(logger)
|
||||
}
|
||||
|
|
|
@ -17,7 +17,6 @@ import (
|
|||
apiserverclient "github.com/kyverno/kyverno/pkg/clients/apiserver"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
|
||||
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/controllers/certmanager"
|
||||
|
@ -192,7 +191,6 @@ func main() {
|
|||
webhookRegistrationTimeout time.Duration
|
||||
admissionReports bool
|
||||
dumpPayload bool
|
||||
leaderElectionRetryPeriod time.Duration
|
||||
servicePort int
|
||||
backgroundServiceAccountName string
|
||||
)
|
||||
|
@ -207,7 +205,6 @@ func main() {
|
|||
flagset.Func(toggle.ProtectManagedResourcesFlagName, toggle.ProtectManagedResourcesDescription, toggle.ProtectManagedResources.Parse)
|
||||
flagset.Func(toggle.ForceFailurePolicyIgnoreFlagName, toggle.ForceFailurePolicyIgnoreDescription, toggle.ForceFailurePolicyIgnore.Parse)
|
||||
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
|
||||
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
||||
flagset.IntVar(&servicePort, "servicePort", 443, "Port used by the Kyverno Service resource and for webhook configurations.")
|
||||
flagset.StringVar(&backgroundServiceAccountName, "backgroundServiceAccountName", "", "Background service account name.")
|
||||
// config
|
||||
|
@ -220,6 +217,7 @@ func main() {
|
|||
internal.WithConfigMapCaching(),
|
||||
internal.WithCosign(),
|
||||
internal.WithRegistryClient(),
|
||||
internal.WithLeaderElection(),
|
||||
internal.WithFlagSets(flagset),
|
||||
)
|
||||
// parse flags
|
||||
|
@ -230,7 +228,6 @@ func main() {
|
|||
// show version
|
||||
showWarnings(setup.Logger)
|
||||
// create instrumented clients
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
apiserverClient := internal.CreateApiServerClient(setup.Logger, apiserverclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), apiserverclient.WithTracing())
|
||||
|
@ -343,9 +340,9 @@ func main() {
|
|||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
setup.LeaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
internal.LeaderElectionRetryPeriod(),
|
||||
func(ctx context.Context) {
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// create leader factories
|
||||
|
|
|
@ -13,7 +13,6 @@ import (
|
|||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
|
||||
kubeclient "github.com/kyverno/kyverno/pkg/clients/kube"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/kyverno"
|
||||
metadataclient "github.com/kyverno/kyverno/pkg/clients/metadata"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
|
@ -159,17 +158,15 @@ func createrLeaderControllers(
|
|||
|
||||
func main() {
|
||||
var (
|
||||
leaderElectionRetryPeriod time.Duration
|
||||
backgroundScan bool
|
||||
admissionReports bool
|
||||
reportsChunkSize int
|
||||
backgroundScanWorkers int
|
||||
backgroundScanInterval time.Duration
|
||||
maxQueuedEvents int
|
||||
skipResourceFilters bool
|
||||
backgroundScan bool
|
||||
admissionReports bool
|
||||
reportsChunkSize int
|
||||
backgroundScanWorkers int
|
||||
backgroundScanInterval time.Duration
|
||||
maxQueuedEvents int
|
||||
skipResourceFilters bool
|
||||
)
|
||||
flagset := flag.NewFlagSet("reports-controller", flag.ExitOnError)
|
||||
flagset.DurationVar(&leaderElectionRetryPeriod, "leaderElectionRetryPeriod", leaderelection.DefaultRetryPeriod, "Configure leader election retry period.")
|
||||
flagset.BoolVar(&backgroundScan, "backgroundScan", true, "Enable or disable backgound scan.")
|
||||
flagset.BoolVar(&admissionReports, "admissionReports", true, "Enable or disable admission reports.")
|
||||
flagset.IntVar(&reportsChunkSize, "reportsChunkSize", 1000, "Max number of results in generated reports, reports will be split accordingly if there are more results to be stored.")
|
||||
|
@ -187,6 +184,7 @@ func main() {
|
|||
internal.WithConfigMapCaching(),
|
||||
internal.WithCosign(),
|
||||
internal.WithRegistryClient(),
|
||||
internal.WithLeaderElection(),
|
||||
internal.WithFlagSets(flagset),
|
||||
)
|
||||
// parse flags
|
||||
|
@ -195,7 +193,6 @@ func main() {
|
|||
ctx, setup, sdown := internal.Setup(appConfig, "kyverno-reports-controller", skipResourceFilters)
|
||||
defer sdown()
|
||||
// create instrumented clients
|
||||
leaderElectionClient := internal.CreateKubernetesClient(setup.Logger, kubeclient.WithMetrics(setup.MetricsManager, metrics.KubeClient), kubeclient.WithTracing())
|
||||
kyvernoClient := internal.CreateKyvernoClient(setup.Logger, kyvernoclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), kyvernoclient.WithTracing())
|
||||
metadataClient := internal.CreateMetadataClient(setup.Logger, metadataclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), metadataclient.WithTracing())
|
||||
dynamicClient := internal.CreateDynamicClient(setup.Logger, dynamicclient.WithMetrics(setup.MetricsManager, metrics.KyvernoClient), dynamicclient.WithTracing())
|
||||
|
@ -240,9 +237,9 @@ func main() {
|
|||
setup.Logger.WithName("leader-election"),
|
||||
"kyverno-reports-controller",
|
||||
config.KyvernoNamespace(),
|
||||
leaderElectionClient,
|
||||
setup.LeaderElectionClient,
|
||||
config.KyvernoPodName(),
|
||||
leaderElectionRetryPeriod,
|
||||
internal.LeaderElectionRetryPeriod(),
|
||||
func(ctx context.Context) {
|
||||
logger := setup.Logger.WithName("leader")
|
||||
// create leader factories
|
||||
|
|
Loading…
Reference in a new issue