mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
Reset policy status on termination (#4269)
- reset policy status to false on termination - retry reconciling policies when .status.ready=false Signed-off-by: ShutingZhao <shuting@nirmata.com>
This commit is contained in:
parent
975a2a21fa
commit
750b4b106c
3 changed files with 57 additions and 9 deletions
|
@ -345,6 +345,7 @@ func (m *webhookConfigManager) reconcileWebhook(namespace, name string) error {
|
|||
}
|
||||
|
||||
ready := true
|
||||
var updateErr error
|
||||
// build webhook only if auto-update is enabled, otherwise directly update status to ready
|
||||
if m.autoUpdateWebhooks {
|
||||
webhooks, err := m.buildWebhooks(namespace)
|
||||
|
@ -354,7 +355,7 @@ func (m *webhookConfigManager) reconcileWebhook(namespace, name string) error {
|
|||
|
||||
if err := m.updateWebhookConfig(webhooks); err != nil {
|
||||
ready = false
|
||||
logger.Error(err, "failed to update webhook configurations for policy")
|
||||
updateErr = errors.Wrapf(err, "failed to update webhook configurations for policy")
|
||||
}
|
||||
|
||||
// DELETION of the policy
|
||||
|
@ -370,7 +371,7 @@ func (m *webhookConfigManager) reconcileWebhook(namespace, name string) error {
|
|||
if ready {
|
||||
logger.Info("policy is ready to serve admission requests")
|
||||
}
|
||||
return nil
|
||||
return updateErr
|
||||
}
|
||||
|
||||
func (m *webhookConfigManager) getPolicy(namespace, name string) (kyvernov1.PolicyInterface, error) {
|
||||
|
|
|
@ -41,6 +41,7 @@ const (
|
|||
type Register struct {
|
||||
// clients
|
||||
kubeClient kubernetes.Interface
|
||||
kyvernoClient kyvernoclient.Interface
|
||||
clientConfig *rest.Config
|
||||
|
||||
// listers
|
||||
|
@ -84,6 +85,7 @@ func NewRegister(
|
|||
register := &Register{
|
||||
clientConfig: clientConfig,
|
||||
kubeClient: kubeClient,
|
||||
kyvernoClient: kyvernoClient,
|
||||
mwcLister: mwcInformer.Lister(),
|
||||
vwcLister: vwcInformer.Lister(),
|
||||
kDeplLister: kDeplInformer.Lister(),
|
||||
|
@ -161,17 +163,50 @@ func (wrc *Register) Check() error {
|
|||
}
|
||||
|
||||
// Remove removes all webhook configurations
|
||||
func (wrc *Register) Remove(cleanUp chan<- struct{}) {
|
||||
defer close(cleanUp)
|
||||
func (wrc *Register) Remove(cleanupKyvernoResource bool, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// delete Lease object to let init container do the cleanup
|
||||
if err := wrc.kubeClient.CoordinationV1().Leases(config.KyvernoNamespace()).Delete(context.TODO(), "kyvernopre-lock", metav1.DeleteOptions{}); err != nil && errorsapi.IsNotFound(err) {
|
||||
wrc.log.WithName("cleanup").Error(err, "failed to clean up Lease lock")
|
||||
}
|
||||
if wrc.shouldCleanupKyvernoResource() {
|
||||
if cleanupKyvernoResource {
|
||||
wrc.removeWebhookConfigurations()
|
||||
}
|
||||
}
|
||||
|
||||
func (wrc *Register) ResetPolicyStatus(kyvernoInTermination bool, wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
|
||||
if !kyvernoInTermination {
|
||||
return
|
||||
}
|
||||
|
||||
logger := wrc.log.WithName("ResetPolicyStatus")
|
||||
cpols, err := wrc.kyvernoClient.KyvernoV1().ClusterPolicies().List(context.TODO(), metav1.ListOptions{})
|
||||
if err == nil {
|
||||
for _, cpol := range cpols.Items {
|
||||
cpol.Status.SetReady(false)
|
||||
if _, err := wrc.kyvernoClient.KyvernoV1().ClusterPolicies().UpdateStatus(context.TODO(), &cpol, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "failed to set ClusterPolicy status READY=false", "name", cpol.GetName())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Error(err, "failed to list clusterpolicies")
|
||||
}
|
||||
|
||||
pols, err := wrc.kyvernoClient.KyvernoV1().Policies(metav1.NamespaceAll).List(context.TODO(), metav1.ListOptions{})
|
||||
if err == nil {
|
||||
for _, pol := range pols.Items {
|
||||
pol.Status.SetReady(false)
|
||||
if _, err := wrc.kyvernoClient.KyvernoV1().Policies(pol.GetNamespace()).UpdateStatus(context.TODO(), &pol, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "failed to set Policy status READY=false", "namespace", pol.GetNamespace(), "name", pol.GetName())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
logger.Error(err, "failed to list namespaced policies")
|
||||
}
|
||||
}
|
||||
|
||||
// GetWebhookTimeOut returns the value of webhook timeout
|
||||
func (wrc *Register) GetWebhookTimeOut() time.Duration {
|
||||
return time.Duration(wrc.timeoutSeconds)
|
||||
|
@ -520,7 +555,7 @@ func (wrc *Register) updateValidatingWebhookConfiguration(targetConfig *admissio
|
|||
return nil
|
||||
}
|
||||
|
||||
func (wrc *Register) shouldCleanupKyvernoResource() bool {
|
||||
func (wrc *Register) ShouldCleanupKyvernoResource() bool {
|
||||
logger := wrc.log.WithName("cleanupKyvernoResource")
|
||||
deploy, err := wrc.kubeClient.AppsV1().Deployments(config.KyvernoNamespace()).Get(context.TODO(), config.KyvernoDeploymentName(), metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
@ -94,7 +95,7 @@ func (s *server) Run(stopCh <-chan struct{}) {
|
|||
}
|
||||
|
||||
func (s *server) Stop(ctx context.Context) {
|
||||
go s.webhookRegister.Remove(s.cleanUp)
|
||||
s.cleanup(ctx)
|
||||
err := s.server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
logger.Error(err, "shutting down server")
|
||||
|
@ -105,6 +106,17 @@ func (s *server) Stop(ctx context.Context) {
|
|||
}
|
||||
}
|
||||
|
||||
func (s *server) cleanup(ctx context.Context) {
|
||||
cleanupKyvernoResource := s.webhookRegister.ShouldCleanupKyvernoResource()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
go s.webhookRegister.Remove(cleanupKyvernoResource, &wg)
|
||||
go s.webhookRegister.ResetPolicyStatus(cleanupKyvernoResource, &wg)
|
||||
wg.Wait()
|
||||
close(s.cleanUp)
|
||||
}
|
||||
|
||||
func filter(configuration config.Configuration, inner handlers.AdmissionHandler) handlers.AdmissionHandler {
|
||||
return handlers.Filter(configuration, inner)
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue