1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-29 19:05:27 +00:00

Reset policy status on termination ()

- reset policy status to false on termination
- retry reconciling policies when .status.ready=false


Signed-off-by: ShutingZhao <shuting@nirmata.com>
This commit is contained in:
shuting 2022-07-27 16:45:06 +08:00 committed by GitHub
parent 975a2a21fa
commit 750b4b106c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 57 additions and 9 deletions

View file

@ -345,6 +345,7 @@ func (m *webhookConfigManager) reconcileWebhook(namespace, name string) error {
}
ready := true
var updateErr error
// build webhook only if auto-update is enabled, otherwise directly update status to ready
if m.autoUpdateWebhooks {
webhooks, err := m.buildWebhooks(namespace)
@ -354,7 +355,7 @@ func (m *webhookConfigManager) reconcileWebhook(namespace, name string) error {
if err := m.updateWebhookConfig(webhooks); err != nil {
ready = false
logger.Error(err, "failed to update webhook configurations for policy")
updateErr = errors.Wrapf(err, "failed to update webhook configurations for policy")
}
// DELETION of the policy
@ -370,7 +371,7 @@ func (m *webhookConfigManager) reconcileWebhook(namespace, name string) error {
if ready {
logger.Info("policy is ready to serve admission requests")
}
return nil
return updateErr
}
func (m *webhookConfigManager) getPolicy(namespace, name string) (kyvernov1.PolicyInterface, error) {

View file

@ -40,8 +40,9 @@ const (
// 5. Webhook Status Mutation
type Register struct {
// clients
kubeClient kubernetes.Interface
clientConfig *rest.Config
kubeClient kubernetes.Interface
kyvernoClient kyvernoclient.Interface
clientConfig *rest.Config
// listers
mwcLister admissionregistrationv1listers.MutatingWebhookConfigurationLister
@ -84,6 +85,7 @@ func NewRegister(
register := &Register{
clientConfig: clientConfig,
kubeClient: kubeClient,
kyvernoClient: kyvernoClient,
mwcLister: mwcInformer.Lister(),
vwcLister: vwcInformer.Lister(),
kDeplLister: kDeplInformer.Lister(),
@ -161,17 +163,50 @@ func (wrc *Register) Check() error {
}
// Remove removes all webhook configurations
func (wrc *Register) Remove(cleanUp chan<- struct{}) {
defer close(cleanUp)
func (wrc *Register) Remove(cleanupKyvernoResource bool, wg *sync.WaitGroup) {
defer wg.Done()
// delete Lease object to let init container do the cleanup
if err := wrc.kubeClient.CoordinationV1().Leases(config.KyvernoNamespace()).Delete(context.TODO(), "kyvernopre-lock", metav1.DeleteOptions{}); err != nil && errorsapi.IsNotFound(err) {
wrc.log.WithName("cleanup").Error(err, "failed to clean up Lease lock")
}
if wrc.shouldCleanupKyvernoResource() {
if cleanupKyvernoResource {
wrc.removeWebhookConfigurations()
}
}
func (wrc *Register) ResetPolicyStatus(kyvernoInTermination bool, wg *sync.WaitGroup) {
defer wg.Done()
if !kyvernoInTermination {
return
}
logger := wrc.log.WithName("ResetPolicyStatus")
cpols, err := wrc.kyvernoClient.KyvernoV1().ClusterPolicies().List(context.TODO(), metav1.ListOptions{})
if err == nil {
for _, cpol := range cpols.Items {
cpol.Status.SetReady(false)
if _, err := wrc.kyvernoClient.KyvernoV1().ClusterPolicies().UpdateStatus(context.TODO(), &cpol, metav1.UpdateOptions{}); err != nil {
logger.Error(err, "failed to set ClusterPolicy status READY=false", "name", cpol.GetName())
}
}
} else {
logger.Error(err, "failed to list clusterpolicies")
}
pols, err := wrc.kyvernoClient.KyvernoV1().Policies(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
if err == nil {
for _, pol := range pols.Items {
pol.Status.SetReady(false)
if _, err := wrc.kyvernoClient.KyvernoV1().Policies(pol.GetNamespace()).UpdateStatus(context.TODO(), &pol, metav1.UpdateOptions{}); err != nil {
logger.Error(err, "failed to set Policy status READY=false", "namespace", pol.GetNamespace(), "name", pol.GetName())
}
}
} else {
logger.Error(err, "failed to list namespaced policies")
}
}
// GetWebhookTimeOut returns the value of webhook timeout
func (wrc *Register) GetWebhookTimeOut() time.Duration {
return time.Duration(wrc.timeoutSeconds)
@ -520,7 +555,7 @@ func (wrc *Register) updateValidatingWebhookConfiguration(targetConfig *admissio
return nil
}
func (wrc *Register) shouldCleanupKyvernoResource() bool {
func (wrc *Register) ShouldCleanupKyvernoResource() bool {
logger := wrc.log.WithName("cleanupKyvernoResource")
deploy, err := wrc.kubeClient.AppsV1().Deployments(config.KyvernoNamespace()).Get(context.TODO(), config.KyvernoDeploymentName(), metav1.GetOptions{})
if err != nil {

View file

@ -4,6 +4,7 @@ import (
"context"
"crypto/tls"
"net/http"
"sync"
"time"
"github.com/go-logr/logr"
@ -94,7 +95,7 @@ func (s *server) Run(stopCh <-chan struct{}) {
}
func (s *server) Stop(ctx context.Context) {
go s.webhookRegister.Remove(s.cleanUp)
s.cleanup(ctx)
err := s.server.Shutdown(ctx)
if err != nil {
logger.Error(err, "shutting down server")
@ -105,6 +106,17 @@ func (s *server) Stop(ctx context.Context) {
}
}
func (s *server) cleanup(ctx context.Context) {
cleanupKyvernoResource := s.webhookRegister.ShouldCleanupKyvernoResource()
var wg sync.WaitGroup
wg.Add(2)
go s.webhookRegister.Remove(cleanupKyvernoResource, &wg)
go s.webhookRegister.ResetPolicyStatus(cleanupKyvernoResource, &wg)
wg.Wait()
close(s.cleanUp)
}
func filter(configuration config.Configuration, inner handlers.AdmissionHandler) handlers.AdmissionHandler {
return handlers.Filter(configuration, inner)
}