mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
Leader Election for initContainer (#2489)
* Local build Signed-off-by: Kumar Mallikarjuna <kumarmallikarjuna1@gmail.com> * Leader Election for initContainer Signed-off-by: Kumar Mallikarjuna <kumarmallikarjuna1@gmail.com> * Lease deletion Signed-off-by: Kumar Mallikarjuna <kumarmallikarjuna1@gmail.com> * Use wrc client Signed-off-by: Kumar Mallikarjuna <kumarmallikarjuna1@gmail.com> * log error out Signed-off-by: ShutingZhao <shutting06@gmail.com> Co-authored-by: ShutingZhao <shutting06@gmail.com>
This commit is contained in:
parent
efe0c28f6b
commit
254be4c1d3
4 changed files with 73 additions and 14 deletions
5
Makefile
5
Makefile
|
@ -62,6 +62,11 @@ docker-push-initContainer:
|
||||||
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
|
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||||
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS)
|
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||||
|
|
||||||
|
docker-build-initContainer-local:
|
||||||
|
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(INITC_PATH)/kyvernopre -tags $(TAGS) -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||||
|
@docker build -f $(PWD)/$(INITC_PATH)/localDockerfile -t $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) $(PWD)/$(INITC_PATH)
|
||||||
|
@docker tag $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) $(REPO)/$(INITC_IMAGE):latest
|
||||||
|
|
||||||
##################################
|
##################################
|
||||||
# KYVERNO CONTAINER
|
# KYVERNO CONTAINER
|
||||||
##################################
|
##################################
|
||||||
|
|
4
cmd/initContainer/localDockerfile
Normal file
4
cmd/initContainer/localDockerfile
Normal file
|
@ -0,0 +1,4 @@
|
||||||
|
FROM scratch
|
||||||
|
ADD kyvernopre /kyvernopre
|
||||||
|
USER 10001
|
||||||
|
ENTRYPOINT ["/kyvernopre"]
|
|
@ -14,8 +14,10 @@ import (
|
||||||
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||||
"github.com/kyverno/kyverno/pkg/config"
|
"github.com/kyverno/kyverno/pkg/config"
|
||||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||||
|
"github.com/kyverno/kyverno/pkg/leaderelection"
|
||||||
"github.com/kyverno/kyverno/pkg/signal"
|
"github.com/kyverno/kyverno/pkg/signal"
|
||||||
"github.com/kyverno/kyverno/pkg/utils"
|
"github.com/kyverno/kyverno/pkg/utils"
|
||||||
|
coord "k8s.io/api/coordination/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
rest "k8s.io/client-go/rest"
|
rest "k8s.io/client-go/rest"
|
||||||
|
@ -95,29 +97,72 @@ func main() {
|
||||||
{clusterPolicyViolation, ""},
|
{clusterPolicyViolation, ""},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
kubeClientLeaderElection, err := utils.NewKubeClient(clientConfig)
|
||||||
|
if err != nil {
|
||||||
|
setupLog.Error(err, "Failed to create kubernetes client")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
<-stopCh
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
|
||||||
done := make(chan struct{})
|
done := make(chan struct{})
|
||||||
defer close(done)
|
defer close(done)
|
||||||
failure := false
|
failure := false
|
||||||
// use pipline to pass request to cleanup resources
|
|
||||||
// generate requests
|
run := func() {
|
||||||
in := gen(done, stopCh, requests...)
|
_, err := kubeClientLeaderElection.CoordinationV1().Leases(getKyvernoNameSpace()).Get(ctx, "kyvernopre-lock", v1.GetOptions{})
|
||||||
// process requests
|
|
||||||
// processing routine count : 2
|
|
||||||
p1 := process(client, pclient, done, stopCh, in)
|
|
||||||
p2 := process(client, pclient, done, stopCh, in)
|
|
||||||
// merge results from processing routines
|
|
||||||
for err := range merge(done, stopCh, p1, p2) {
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
failure = true
|
log.Log.Info("Lease 'kyvernopre-lock' not found. Starting clean-up...")
|
||||||
log.Log.Error(err, "failed to cleanup resource")
|
} else {
|
||||||
|
log.Log.Info("Clean-up complete. Leader exiting...")
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// use pipline to pass request to cleanup resources
|
||||||
|
// generate requests
|
||||||
|
in := gen(done, stopCh, requests...)
|
||||||
|
// process requests
|
||||||
|
// processing routine count : 2
|
||||||
|
p1 := process(client, pclient, done, stopCh, in)
|
||||||
|
p2 := process(client, pclient, done, stopCh, in)
|
||||||
|
// merge results from processing routines
|
||||||
|
for err := range merge(done, stopCh, p1, p2) {
|
||||||
|
if err != nil {
|
||||||
|
failure = true
|
||||||
|
log.Log.Error(err, "failed to cleanup resource")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if there is any failure then we fail process
|
||||||
|
if failure {
|
||||||
|
log.Log.Info("failed to cleanup prior configurations")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
|
||||||
|
lease := coord.Lease{}
|
||||||
|
lease.ObjectMeta.Name = "kyvernopre-lock"
|
||||||
|
_, err = kubeClientLeaderElection.CoordinationV1().Leases(getKyvernoNameSpace()).Create(ctx, &lease, v1.CreateOptions{})
|
||||||
|
if err != nil {
|
||||||
|
log.Log.Info("Failed to create lease 'kyvernopre-lock'")
|
||||||
|
}
|
||||||
|
|
||||||
|
log.Log.Info("Clean-up complete. Leader exiting...")
|
||||||
|
|
||||||
|
os.Exit(0)
|
||||||
}
|
}
|
||||||
|
|
||||||
// if there is any failure then we fail process
|
le, err := leaderelection.New("kyvernopre", getKyvernoNameSpace(), kubeClientLeaderElection, run, nil, log.Log.WithName("kyvernopre/LeaderElection"))
|
||||||
if failure {
|
if err != nil {
|
||||||
log.Log.Info("failed to cleanup prior configurations")
|
setupLog.Error(err, "failed to elect a leader")
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
le.Run(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func executeRequest(client *client.Client, pclient *kyvernoclient.Clientset, req request) error {
|
func executeRequest(client *client.Client, pclient *kyvernoclient.Clientset, req request) error {
|
||||||
|
|
|
@ -172,6 +172,11 @@ func (wrc *Register) Remove(cleanUp chan<- struct{}) {
|
||||||
|
|
||||||
wrc.removeWebhookConfigurations()
|
wrc.removeWebhookConfigurations()
|
||||||
wrc.removeSecrets()
|
wrc.removeSecrets()
|
||||||
|
err := wrc.client.DeleteResource("coordination.k8s.io/v1", "Lease", config.KyvernoNamespace, "kyvernopre-lock", false)
|
||||||
|
if err != nil && errorsapi.IsNotFound(err) {
|
||||||
|
wrc.log.WithName("cleanup").Error(err, "failed to clean up Lease lock")
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// +deprecated
|
// +deprecated
|
||||||
|
|
Loading…
Add table
Reference in a new issue