1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

refactor: use pod name as leader id (#4680)

* refactor: use pod name as leader id

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* fix manifests

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* makefile

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* leader client

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-09-26 18:25:27 +02:00 committed by GitHub
parent 665e513c5e
commit 481a09823f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 5846 additions and 74 deletions

View file

@ -84,6 +84,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: KYVERNO_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KYVERNO_DEPLOYMENT
value: {{ template "kyverno.fullname" . }}
{{- with .Values.envVarsInit }}

View file

@ -181,7 +181,7 @@ func main() {
os.Exit(0)
}
le, err := leaderelection.New("kyvernopre", config.KyvernoNamespace(), kubeClient, run, nil, log.Log.WithName("kyvernopre/LeaderElection"))
le, err := leaderelection.New("kyvernopre", config.KyvernoNamespace(), kubeClient, config.KyvernoPodName(), run, nil, log.Log.WithName("kyvernopre/LeaderElection"))
if err != nil {
setupLog.Error(err, "failed to elect a leader")
os.Exit(1)

View file

@ -178,7 +178,12 @@ func main() {
setupLog.Error(err, "Failed to create dynamic client")
os.Exit(1)
}
// The leader queries/updates the lease object quite frequently. So we use a separate kube-client to eliminate the throttle issue
kubeClientLeaderElection, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create kubernetes leader client")
os.Exit(1)
}
// sanity checks
if !utils.CRDsInstalled(dynamicClient.Discovery()) {
setupLog.Error(fmt.Errorf("CRDs not installed"), "Failed to access Kyverno CRDs")
@ -434,7 +439,7 @@ func main() {
}()
// webhookconfigurations are registered by the leader only
webhookRegisterLeader, err := leaderelection.New("webhook-register", config.KyvernoNamespace(), kubeClient, registerWebhookConfigurations, nil, log.Log.WithName("webhookRegister/LeaderElection"))
webhookRegisterLeader, err := leaderelection.New("webhook-register", config.KyvernoNamespace(), kubeClient, config.KyvernoPodName(), registerWebhookConfigurations, nil, log.Log.WithName("webhookRegister/LeaderElection"))
if err != nil {
setupLog.Error(err, "failed to elect a leader")
os.Exit(1)
@ -487,12 +492,6 @@ func main() {
go prgen.Run(1, stopCh)
}
kubeClientLeaderElection, err := kubernetes.NewForConfig(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create kubernetes client")
os.Exit(1)
}
// cleanup Kyverno managed resources followed by webhook shutdown
// No need to exit here, as server.Stop(ctx) closes the cleanUp
// chan, thus the main process exits.
@ -502,7 +501,7 @@ func main() {
server.Stop(c)
}
le, err := leaderelection.New("kyverno", config.KyvernoNamespace(), kubeClientLeaderElection, run, stop, log.Log.WithName("kyverno/LeaderElection"))
le, err := leaderelection.New("kyverno", config.KyvernoNamespace(), kubeClientLeaderElection, config.KyvernoPodName(), run, stop, log.Log.WithName("kyverno/LeaderElection"))
if err != nil {
setupLog.Error(err, "failed to elect a leader")
os.Exit(1)

File diff suppressed because it is too large Load diff

File diff suppressed because it is too large Load diff

View file

@ -63,6 +63,10 @@ spec:
env:
- name: METRICS_CONFIG
value: kyverno-metrics
- name: KYVERNO_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: KYVERNO_NAMESPACE
valueFrom:
fieldRef:

View file

@ -8,7 +8,6 @@ import (
"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
@ -34,7 +33,7 @@ type Interface interface {
GetLeader() string
}
type Config struct {
type config struct {
name string
namespace string
startWork func()
@ -47,14 +46,7 @@ type Config struct {
log logr.Logger
}
func New(name, namespace string, kubeClient kubernetes.Interface, startWork, stopWork func(), log logr.Logger) (Interface, error) {
id, err := os.Hostname()
if err != nil {
return nil, errors.Wrapf(err, "error getting host name: %s/%s", namespace, name)
}
id = id + "_" + string(uuid.NewUUID())
func New(name, namespace string, kubeClient kubernetes.Interface, id string, startWork, stopWork func(), log logr.Logger) (Interface, error) {
lock, err := resourcelock.New(
resourcelock.LeasesResourceLock,
namespace,
@ -68,17 +60,15 @@ func New(name, namespace string, kubeClient kubernetes.Interface, startWork, sto
if err != nil {
return nil, errors.Wrapf(err, "error initializing resource lock: %s/%s", namespace, name)
}
e := &Config{
e := &config{
name: name,
namespace: namespace,
kubeClient: kubeClient,
lock: lock,
startWork: startWork,
stopWork: stopWork,
log: log,
log: log.WithValues("id", lock.Identity()),
}
e.leaderElectionCfg = leaderelection.LeaderElectionConfig{
Lock: e.lock,
ReleaseOnCancel: true,
@ -88,63 +78,58 @@ func New(name, namespace string, kubeClient kubernetes.Interface, startWork, sto
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
atomic.StoreInt64(&e.isLeader, 1)
e.log.WithValues("id", e.lock.Identity()).Info("started leading")
e.log.Info("started leading")
if e.startWork != nil {
e.startWork()
}
},
OnStoppedLeading: func() {
atomic.StoreInt64(&e.isLeader, 0)
e.log.WithValues("id", e.lock.Identity()).Info("leadership lost, stopped leading")
e.log.Info("leadership lost, stopped leading")
if e.stopWork != nil {
e.stopWork()
}
},
OnNewLeader: func(identity string) {
if identity == e.lock.Identity() {
return
e.log.Info("still leading")
} else {
e.log.Info("another instance has been elected as leader", "leader", identity)
}
e.log.WithValues("current id", e.lock.Identity(), "leader", identity).Info("another instance has been elected as leader")
},
},
}
e.leaderElector, err = leaderelection.NewLeaderElector(e.leaderElectionCfg)
if err != nil {
e.log.Error(err, "failed to create leaderElector")
os.Exit(1)
}
if e.leaderElectionCfg.WatchDog != nil {
e.leaderElectionCfg.WatchDog.SetLeaderElection(e.leaderElector)
}
return e, nil
}
func (e *Config) Name() string {
func (e *config) Name() string {
return e.name
}
func (e *Config) Namespace() string {
func (e *config) Namespace() string {
return e.namespace
}
func (e *Config) ID() string {
func (e *config) ID() string {
return e.lock.Identity()
}
func (e *Config) IsLeader() bool {
func (e *config) IsLeader() bool {
return atomic.LoadInt64(&e.isLeader) == 1
}
func (e *Config) GetLeader() string {
func (e *config) GetLeader() string {
return e.leaderElector.GetLeader()
}
func (e *Config) Run(ctx context.Context) {
func (e *config) Run(ctx context.Context) {
e.leaderElector.Run(ctx)
}