1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 07:57:07 +00:00
kyverno/pkg/leaderelection/leaderelection.go
shuting e9a972a362
feat: HA (#1931)
* Fix Dev setup

* webhook monitor - start webhook monitor in main process

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leaderelection

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* - add isLeader; - update to use configmap lock

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* - add initialization method - add methods to get attributes

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* address comments

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* remove newContext in runLeaderElection

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to GenerateController

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* skip processing for non-leaders

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* skip processing for non-leaders

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* add leader election to generate cleanup controller

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* Gracefully drain request

* HA - Webhook Register / Webhook Monitor / Certificate Renewer (#1920)

* enable leader election for webhook register

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* extract certManager to its own process

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* leader election for cert manager

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* certManager - init certs by the leader

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to webhook monitor

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* update log message

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to policy controller

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to policy report controller

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* rebuild leader election config

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* start informers in leaderelection

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* start policy informers in main

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* enable leader election in main

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* move eventHandler to the leader election start method

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* address reviewdog comments

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add clusterrole leaderelection

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* fixed generate flow (#1936)

Signed-off-by: NoSkillGirl <singhpooja240393@gmail.com>

* - init separate kubeclient for leaderelection - fix webhook monitor

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* address reviewdog comments

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* cleanup Kyverno managed resources on stopLeading

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* tag v1.4.0-beta1

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* fix cleanup process on Kyverno stops

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* bump kind to 0.11.0, k8s v1.21 (#1980)

Co-authored-by: vyankatesh <vyankatesh@neualto.com>
Co-authored-by: vyankatesh <vyankateshkd@gmail.com>
Co-authored-by: Jim Bugwadia <jim@nirmata.com>
Co-authored-by: Pooja Singh <36136335+NoSkillGirl@users.noreply.github.com>
2021-06-08 12:37:19 -07:00

152 lines
3.5 KiB
Go

package leaderelection
import (
"context"
"os"
"sync/atomic"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
type Interface interface {
// Run is a blocking call that runs a leader election
Run(ctx context.Context)
// ID returns this instances unique identifier
ID() string
// Name returns the name of the leader election
Name() string
// Namespace is the Kubernetes namespace used to coordinate the leader election
Namespace() string
// IsLeader indicates if this instance is the leader
IsLeader() bool
// GetLeader returns the leader ID
GetLeader() string
}
type Config struct {
name string
namespace string
id string
startWork func()
stopWork func()
kubeClient kubernetes.Interface
lock resourcelock.Interface
leaderElectionCfg leaderelection.LeaderElectionConfig
leaderElector *leaderelection.LeaderElector
isLeader int64
log logr.Logger
}
func New(name, namespace string, kubeClient kubernetes.Interface, startWork, stopWork func(), log logr.Logger) (Interface, error) {
id, err := os.Hostname()
if err != nil {
return nil, errors.Wrapf(err, "error getting host name: %s/%s", namespace, name)
}
id = id + "_" + string(uuid.NewUUID())
lock, err := resourcelock.New(
resourcelock.LeasesResourceLock,
namespace,
name,
kubeClient.CoreV1(),
kubeClient.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
},
)
if err != nil {
return nil, errors.Wrapf(err, "error initializing resource lock: %s/%s", namespace, name)
}
e := &Config{
name: name,
namespace: namespace,
kubeClient: kubeClient,
lock: lock,
startWork: startWork,
stopWork: stopWork,
log: log,
}
e.leaderElectionCfg = leaderelection.LeaderElectionConfig{
Lock: e.lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
atomic.StoreInt64(&e.isLeader, 1)
e.log.WithValues("id", e.lock.Identity()).Info("started leading")
if e.startWork != nil {
e.startWork()
}
},
OnStoppedLeading: func() {
atomic.StoreInt64(&e.isLeader, 0)
e.log.WithValues("id", e.lock.Identity()).Info("leadership lost, stopped leading")
if e.stopWork != nil {
e.stopWork()
}
},
OnNewLeader: func(identity string) {
if identity == e.lock.Identity() {
return
}
e.log.WithValues("current id", e.lock.Identity(), "leader", identity).Info("another instance has been elected as leader")
},
}}
e.leaderElector, err = leaderelection.NewLeaderElector(e.leaderElectionCfg)
if err != nil {
e.log.Error(err, "failed to create leaderElector")
os.Exit(1)
}
if e.leaderElectionCfg.WatchDog != nil {
e.leaderElectionCfg.WatchDog.SetLeaderElection(e.leaderElector)
}
return e, nil
}
func (e *Config) Name() string {
return e.name
}
func (e *Config) Namespace() string {
return e.namespace
}
func (e *Config) ID() string {
return e.lock.Identity()
}
func (e *Config) IsLeader() bool {
return atomic.LoadInt64(&e.isLeader) == 1
}
func (e *Config) GetLeader() string {
return e.leaderElector.GetLeader()
}
func (e *Config) Run(ctx context.Context) {
e.leaderElector.Run(ctx)
}