1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 02:18:15 +00:00

feat: add cleanup handler (#5576)

* feat: add cleanup handler

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* cleanup handler

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* cleanup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* cleanup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* service

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* cleanup

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* fix

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

* codegen

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>

Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com>
Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-12-07 11:30:47 +01:00 committed by GitHub
parent 72745f391a
commit a6aaffded3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 284 additions and 89 deletions

View file

@ -730,8 +730,7 @@ kind-deploy-kyverno: $(HELM) kind-load-all ## Build images, load them in kind cl
--set image.tag=$(IMAGE_TAG_DEV) \
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
--set initImage.tag=$(IMAGE_TAG_DEV) \
--set initContainer.extraArgs={--loggingFormat=text} \
--set "extraArgs={--loggingFormat=text}"
--values ./scripts/kyverno.yaml
@echo Restart kyverno pods... >&2
@kubectl rollout restart deployment -n kyverno

View file

@ -223,6 +223,7 @@ The command removes all the Kubernetes components associated with the chart and
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
| cleanupController.args | list | `["--cleanupService=https://{{ template \"kyverno.cleanup-controller.deploymentName\" . }}.{{ template \"kyverno.namespace\" . }}.svc"]` | Arguments passed to the container on the command line |
| cleanupController.service.port | int | `443` | Service port. |
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |

View file

@ -35,6 +35,7 @@ rules:
- update
- watch
{{- with .Values.cleanupController.rbac.clusterRole.extraResources }}
{{- range . }}
- apiGroups:
{{- toYaml .apiGroups | nindent 6 }}
resources:
@ -43,5 +44,6 @@ rules:
- delete
- list
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View file

@ -39,6 +39,10 @@ spec:
- containerPort: 8000
name: metrics
protocol: TCP
{{- with .Values.cleanupController.args }}
args:
{{- tpl (toYaml .) $ | nindent 12 }}
{{- end }}
env:
- name: KYVERNO_NAMESPACE
valueFrom:

View file

@ -488,6 +488,11 @@ cleanupController:
clusterRole:
# -- Extra resource permissions to add in the cluster role
extraResources: []
# - apiGroups:
# - ''
# resources:
# - pods
# -- Enable cleanup controller.
enabled: true
@ -509,6 +514,10 @@ cleanupController:
pullSecrets: []
# - secretName
# -- Arguments passed to the container on the command line
args:
- --cleanupService=https://{{ template "kyverno.cleanup-controller.deploymentName" . }}.{{ template "kyverno.namespace" . }}.svc
service:
# -- Service port.
port: 443

View file

@ -1,4 +1,4 @@
package main
package admission
import (
"context"
@ -11,17 +11,17 @@ import (
admissionv1 "k8s.io/api/admission/v1"
)
type cleanupPolicyHandlers struct {
type handlers struct {
client dclient.Interface
}
func NewHandlers(client dclient.Interface) CleanupPolicyHandlers {
return &cleanupPolicyHandlers{
func New(client dclient.Interface) *handlers {
return &handlers{
client: client,
}
}
func (h *cleanupPolicyHandlers) Validate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse {
func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse {
policy, _, err := admissionutils.GetCleanupPolicies(request)
if err != nil {
logger.Error(err, "failed to unmarshal policies from admission request")

View file

@ -0,0 +1,73 @@
package cleanup
import (
"context"
"time"
"github.com/go-logr/logr"
kyvernov1alpha1 "github.com/kyverno/kyverno/api/kyverno/v1alpha1"
kyvernov1alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1"
"github.com/kyverno/kyverno/pkg/clients/dclient"
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/client-go/tools/cache"
)
type handlers struct {
client dclient.Interface
cpolLister kyvernov1alpha1listers.ClusterCleanupPolicyLister
polLister kyvernov1alpha1listers.CleanupPolicyLister
}
func New(
client dclient.Interface,
cpolLister kyvernov1alpha1listers.ClusterCleanupPolicyLister,
polLister kyvernov1alpha1listers.CleanupPolicyLister,
) *handlers {
return &handlers{
client: client,
cpolLister: cpolLister,
polLister: polLister,
}
}
func (h *handlers) Cleanup(ctx context.Context, logger logr.Logger, name string, _ time.Time) error {
logger.Info("cleaning up...")
namespace, name, err := cache.SplitMetaNamespaceKey(name)
if err != nil {
return err
}
policy, err := h.lookupPolicy(namespace, name)
if err != nil {
return err
}
return h.executePolicy(ctx, logger, policy)
}
func (h *handlers) lookupPolicy(namespace, name string) (kyvernov1alpha1.CleanupPolicyInterface, error) {
if namespace == "" {
return h.cpolLister.Get(name)
} else {
return h.polLister.CleanupPolicies(namespace).Get(name)
}
}
func (h *handlers) executePolicy(ctx context.Context, logger logr.Logger, policy kyvernov1alpha1.CleanupPolicyInterface) error {
spec := policy.GetSpec()
kinds := sets.NewString(spec.MatchResources.GetKinds()...)
for kind := range kinds {
logger := logger.WithValues("kind", kind)
logger.Info("processing...")
list, err := h.client.ListResource(ctx, "", kind, policy.GetNamespace(), nil)
if err != nil {
return err
}
for i := range list.Items {
if !controllerutils.IsManagedByKyverno(&list.Items[i]) {
logger := logger.WithValues("name", list.Items[i].GetName(), "namespace", list.Items[i].GetNamespace())
logger.Info("item...")
}
}
}
return nil
}

View file

@ -1,10 +1,13 @@
package main
import (
"flag"
"os"
"sync"
"time"
admissionhandlers "github.com/kyverno/kyverno/cmd/cleanup-controller/handlers/admission"
cleanuphandlers "github.com/kyverno/kyverno/cmd/cleanup-controller/handlers/cleanup"
"github.com/kyverno/kyverno/cmd/internal"
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
@ -22,12 +25,16 @@ const (
)
func main() {
var cleanupService string
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
flagset.StringVar(&cleanupService, "cleanupService", "https://cleanup-controller.kyverno.svc", "The url to join the cleanup service.")
// config
appConfig := internal.NewConfiguration(
internal.WithProfiling(),
internal.WithMetrics(),
internal.WithTracing(),
internal.WithKubeconfig(),
internal.WithFlagSets(flagset),
)
// parse flags
internal.ParseFlags(appConfig)
@ -56,18 +63,24 @@ func main() {
kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies(),
kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies(),
kubeInformer.Batch().V1().CronJobs(),
cleanupService,
),
cleanup.Workers,
)
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
cpolLister := kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies().Lister()
polLister := kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies().Lister()
// start informers and wait for cache sync
if !internal.StartInformersAndWaitForCacheSync(ctx, kubeKyvernoInformer, kubeInformer, kyvernoInformer) {
os.Exit(1)
}
var wg sync.WaitGroup
controller.Run(ctx, logger.WithName("cleanup-controller"), &wg)
// create handlers
admissionHandlers := admissionhandlers.New(dClient)
cleanupHandlers := cleanuphandlers.New(dClient, cpolLister, polLister)
// create server
server := NewServer(
NewHandlers(dClient),
func() ([]byte, []byte, error) {
secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
if err != nil {
@ -75,8 +88,10 @@ func main() {
}
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
},
admissionHandlers.Validate,
cleanupHandlers.Cleanup,
)
// start webhooks server
// start server
server.Run(ctx.Done())
// wait for termination signal
wg.Wait()

View file

@ -8,13 +8,17 @@ import (
"github.com/go-logr/logr"
"github.com/julienschmidt/httprouter"
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
"github.com/kyverno/kyverno/pkg/logging"
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
admissionv1 "k8s.io/api/admission/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
// ValidatingWebhookServicePath is the path for validation webhook
const ValidatingWebhookServicePath = "/validate"
const (
// validatingWebhookServicePath is the path for validation webhook
validatingWebhookServicePath = "/validate"
)
type Server interface {
// Run TLS server in separate thread and returns control immediately
@ -23,32 +27,51 @@ type Server interface {
Stop(context.Context)
}
type CleanupPolicyHandlers interface {
// Validate performs the validation check on policy resources
Validate(context.Context, logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse
}
type server struct {
server *http.Server
}
type TlsProvider func() ([]byte, []byte, error)
type (
TlsProvider = func() ([]byte, []byte, error)
ValidationHandler = func(context.Context, logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse
CleanupHandler = func(context.Context, logr.Logger, string, time.Time) error
)
// NewServer creates new instance of server accordingly to given configuration
func NewServer(
policyHandlers CleanupPolicyHandlers,
tlsProvider TlsProvider,
validationHandler ValidationHandler,
cleanupHandler CleanupHandler,
) Server {
policyLogger := logging.WithName("cleanup-policy")
cleanupLogger := logging.WithName("cleanup")
mux := httprouter.New()
mux.HandlerFunc(
"POST",
ValidatingWebhookServicePath,
handlers.FromAdmissionFunc("VALIDATE", policyHandlers.Validate).
validatingWebhookServicePath,
handlers.FromAdmissionFunc("VALIDATE", validationHandler).
WithSubResourceFilter().
WithAdmission(policyLogger.WithName("validate")).
ToHandlerFunc(),
)
mux.HandlerFunc(
"GET",
cleanup.CleanupServicePath,
func(w http.ResponseWriter, r *http.Request) {
policy := r.URL.Query().Get("policy")
logger := cleanupLogger.WithValues("policy", policy)
err := cleanupHandler(r.Context(), logger, policy, time.Now())
if err == nil {
w.WriteHeader(http.StatusOK)
} else {
if apierrors.IsNotFound(err) {
w.WriteHeader(http.StatusNotFound)
} else {
w.WriteHeader(http.StatusInternalServerError)
}
}
},
)
return &server{
server: &http.Server{
Addr: ":9443",

View file

@ -2,6 +2,7 @@ package cleanup
import (
"context"
"fmt"
"time"
"github.com/go-logr/logr"
@ -12,14 +13,21 @@ import (
"github.com/kyverno/kyverno/pkg/controllers"
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
batchv1informers "k8s.io/client-go/informers/batch/v1"
"k8s.io/client-go/kubernetes"
batchv1listers "k8s.io/client-go/listers/batch/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// CleanupServicePath is the path for triggering cleanup
CleanupServicePath = "/cleanup"
)
type controller struct {
// clients
client kubernetes.Interface
@ -33,6 +41,9 @@ type controller struct {
queue workqueue.RateLimitingInterface
cpolEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.ClusterCleanupPolicy]
polEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.CleanupPolicy]
// config
cleanupService string
}
const (
@ -46,16 +57,18 @@ func NewController(
cpolInformer kyvernov1alpha1informers.ClusterCleanupPolicyInformer,
polInformer kyvernov1alpha1informers.CleanupPolicyInformer,
cjInformer batchv1informers.CronJobInformer,
cleanupService string,
) controllers.Controller {
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName)
c := &controller{
client: client,
cpolLister: cpolInformer.Lister(),
polLister: polInformer.Lister(),
cjLister: cjInformer.Lister(),
queue: queue,
cpolEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.ClusterCleanupPolicy](logger, cpolInformer.Informer(), queue),
polEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.CleanupPolicy](logger, polInformer.Informer(), queue),
client: client,
cpolLister: cpolInformer.Lister(),
polLister: polInformer.Lister(),
cjLister: cjInformer.Lister(),
queue: queue,
cpolEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.ClusterCleanupPolicy](logger, cpolInformer.Informer(), queue),
polEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.CleanupPolicy](logger, polInformer.Informer(), queue),
cleanupService: cleanupService,
}
controllerutils.AddEventHandlersT(
cjInformer.Informer(),
@ -121,6 +134,64 @@ func (c *controller) getCronjob(namespace, name string) (*batchv1.CronJob, error
return cj, nil
}
func (c *controller) buildCronJob(cronJob *batchv1.CronJob, pol kyvernov1alpha1.CleanupPolicyInterface) error {
// TODO: find a better way to do that, it looks like resources returned by WATCH don't have the GVK
apiVersion := "kyverno.io/v1alpha1"
kind := "CleanupPolicy"
if pol.GetNamespace() == "" {
kind = "ClusterCleanupPolicy"
}
policyName, err := cache.MetaNamespaceKeyFunc(pol)
if err != nil {
return err
}
// set owner reference
cronJob.OwnerReferences = []metav1.OwnerReference{
{
APIVersion: apiVersion,
Kind: kind,
Name: pol.GetName(),
UID: pol.GetUID(),
},
}
var successfulJobsHistoryLimit int32 = 0
var failedJobsHistoryLimit int32 = 1
// set spec
cronJob.Spec = batchv1.CronJobSpec{
Schedule: pol.GetSpec().Schedule,
SuccessfulJobsHistoryLimit: &successfulJobsHistoryLimit,
FailedJobsHistoryLimit: &failedJobsHistoryLimit,
ConcurrencyPolicy: batchv1.ForbidConcurrent,
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "cleanup",
Image: "curlimages/curl:7.86.0",
Args: []string{
"-k",
// TODO: ca
// "--cacert",
// "/tmp/ca.crt",
fmt.Sprintf("%s%s?policy=%s", c.cleanupService, CleanupServicePath, policyName),
},
},
},
},
},
},
},
}
// set labels
controllerutils.SetManagedByKyvernoLabel(cronJob)
controllerutils.SetManagedByKyvernoLabel(&cronJob.Spec.JobTemplate)
controllerutils.SetManagedByKyvernoLabel(&cronJob.Spec.JobTemplate.Spec.Template)
return nil
}
func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, namespace, name string) error {
policy, err := c.getPolicy(namespace, name)
if err != nil {
@ -134,17 +205,28 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam
if namespace == "" {
cronjobNs = config.KyvernoNamespace()
}
if cronjob, err := c.getCronjob(cronjobNs, string(policy.GetUID())); err != nil {
observed, err := c.getCronjob(cronjobNs, string(policy.GetUID()))
if err != nil {
if !apierrors.IsNotFound(err) {
return err
}
cronjob := getCronJobForTriggerResource(policy)
_, err = c.client.BatchV1().CronJobs(cronjobNs).Create(ctx, cronjob, metav1.CreateOptions{})
observed = &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: string(policy.GetUID()),
Namespace: cronjobNs,
},
}
}
if observed.ResourceVersion == "" {
err := c.buildCronJob(observed, policy)
if err != nil {
return err
}
_, err = c.client.BatchV1().CronJobs(cronjobNs).Create(ctx, observed, metav1.CreateOptions{})
return err
} else {
_, err = controllerutils.Update(ctx, cronjob, c.client.BatchV1().CronJobs(cronjobNs), func(cronjob *batchv1.CronJob) error {
cronjob.Spec.Schedule = policy.GetSpec().Schedule
return nil
_, err = controllerutils.Update(ctx, observed, c.client.BatchV1().CronJobs(cronjobNs), func(observed *batchv1.CronJob) error {
return c.buildCronJob(observed, policy)
})
return err
}

View file

@ -1,54 +0,0 @@
package cleanup
import (
kyvernov1alpha1 "github.com/kyverno/kyverno/api/kyverno/v1alpha1"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func getCronJobForTriggerResource(pol kyvernov1alpha1.CleanupPolicyInterface) *batchv1.CronJob {
// TODO: find a better way to do that, it looks like resources returned by WATCH don't have the GVK
apiVersion := "kyverno.io/v1alpha1"
kind := "CleanupPolicy"
if pol.GetNamespace() == "" {
kind = "ClusterCleanupPolicy"
}
cronjob := &batchv1.CronJob{
ObjectMeta: metav1.ObjectMeta{
Name: string(pol.GetUID()),
OwnerReferences: []metav1.OwnerReference{
{
APIVersion: apiVersion,
Kind: kind,
Name: pol.GetName(),
UID: pol.GetUID(),
},
},
},
Spec: batchv1.CronJobSpec{
Schedule: pol.GetSpec().Schedule,
JobTemplate: batchv1.JobTemplateSpec{
Spec: batchv1.JobSpec{
Template: corev1.PodTemplateSpec{
Spec: corev1.PodSpec{
RestartPolicy: corev1.RestartPolicyOnFailure,
Containers: []corev1.Container{
{
Name: "cleanup",
Image: "bitnami/kubectl:latest",
Args: []string{
"/bin/sh",
"-c",
`echo "Hello World"`,
},
},
},
},
},
},
},
},
}
return cronjob
}

View file

@ -344,7 +344,7 @@ func (c *controller) getPolicyReports(ctx context.Context, namespace string) ([]
return nil, err
}
for i := range list.Items {
if controllerutils.CheckLabel(&list.Items[i], kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) {
if controllerutils.IsManagedByKyverno(&list.Items[i]) {
reports = append(reports, &list.Items[i])
}
}
@ -354,7 +354,7 @@ func (c *controller) getPolicyReports(ctx context.Context, namespace string) ([]
return nil, err
}
for i := range list.Items {
if controllerutils.CheckLabel(&list.Items[i], kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) {
if controllerutils.IsManagedByKyverno(&list.Items[i]) {
reports = append(reports, &list.Items[i])
}
}

View file

@ -1,6 +1,7 @@
package controller
import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
@ -23,6 +24,14 @@ func CheckLabel(obj metav1.Object, key, value string) bool {
return labels[key] == value
}
func SetManagedByKyvernoLabel(obj metav1.Object) {
SetLabel(obj, kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp)
}
func IsManagedByKyverno(obj metav1.Object) bool {
return CheckLabel(obj, kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp)
}
func HasLabel(obj metav1.Object, key string) bool {
labels := obj.GetLabels()
if labels == nil {

View file

@ -0,0 +1,16 @@
package controller
import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/selection"
)
func SelectorNotManagedByKyverno() (labels.Selector, error) {
selector := labels.Everything()
requirement, err := labels.NewRequirement(kyvernov1.LabelAppManagedBy, selection.NotEquals, []string{kyvernov1.ValueKyvernoApp})
if err == nil {
selector = selector.Add(*requirement)
}
return selector, err
}

16
scripts/kyverno.yaml Normal file
View file

@ -0,0 +1,16 @@
initContainer:
extraArgs:
- --loggingFormat=text
extraArgs:
- --loggingFormat=text
cleanupController:
rbac:
clusterRole:
extraResources:
- apiGroups:
- ''
resources:
- pods