mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
feat: add cleanup handler (#5576)
* feat: add cleanup handler Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * cleanup handler Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * cleanup Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * cleanup Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * service Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * cleanup Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * codegen Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
parent
72745f391a
commit
a6aaffded3
15 changed files with 284 additions and 89 deletions
3
Makefile
3
Makefile
|
@ -730,8 +730,7 @@ kind-deploy-kyverno: $(HELM) kind-load-all ## Build images, load them in kind cl
|
||||||
--set image.tag=$(IMAGE_TAG_DEV) \
|
--set image.tag=$(IMAGE_TAG_DEV) \
|
||||||
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
|
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
|
||||||
--set initImage.tag=$(IMAGE_TAG_DEV) \
|
--set initImage.tag=$(IMAGE_TAG_DEV) \
|
||||||
--set initContainer.extraArgs={--loggingFormat=text} \
|
--values ./scripts/kyverno.yaml
|
||||||
--set "extraArgs={--loggingFormat=text}"
|
|
||||||
@echo Restart kyverno pods... >&2
|
@echo Restart kyverno pods... >&2
|
||||||
@kubectl rollout restart deployment -n kyverno
|
@kubectl rollout restart deployment -n kyverno
|
||||||
|
|
||||||
|
|
|
@ -223,6 +223,7 @@ The command removes all the Kubernetes components associated with the chart and
|
||||||
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
|
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
|
||||||
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
| cleanupController.image.pullPolicy | string | `"IfNotPresent"` | Image pull policy |
|
||||||
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
|
| cleanupController.image.pullSecrets | list | `[]` | Image pull secrets |
|
||||||
|
| cleanupController.args | list | `["--cleanupService=https://{{ template \"kyverno.cleanup-controller.deploymentName\" . }}.{{ template \"kyverno.namespace\" . }}.svc"]` | Arguments passed to the container on the command line |
|
||||||
| cleanupController.service.port | int | `443` | Service port. |
|
| cleanupController.service.port | int | `443` | Service port. |
|
||||||
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
|
| cleanupController.service.type | string | `"ClusterIP"` | Service type. |
|
||||||
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
|
| cleanupController.service.nodePort | string | `nil` | Service node port. Only used if `service.type` is `NodePort`. |
|
||||||
|
|
|
@ -35,6 +35,7 @@ rules:
|
||||||
- update
|
- update
|
||||||
- watch
|
- watch
|
||||||
{{- with .Values.cleanupController.rbac.clusterRole.extraResources }}
|
{{- with .Values.cleanupController.rbac.clusterRole.extraResources }}
|
||||||
|
{{- range . }}
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
{{- toYaml .apiGroups | nindent 6 }}
|
{{- toYaml .apiGroups | nindent 6 }}
|
||||||
resources:
|
resources:
|
||||||
|
@ -43,5 +44,6 @@ rules:
|
||||||
- delete
|
- delete
|
||||||
- list
|
- list
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
{{- end }}
|
{{- end }}
|
||||||
|
|
|
@ -39,6 +39,10 @@ spec:
|
||||||
- containerPort: 8000
|
- containerPort: 8000
|
||||||
name: metrics
|
name: metrics
|
||||||
protocol: TCP
|
protocol: TCP
|
||||||
|
{{- with .Values.cleanupController.args }}
|
||||||
|
args:
|
||||||
|
{{- tpl (toYaml .) $ | nindent 12 }}
|
||||||
|
{{- end }}
|
||||||
env:
|
env:
|
||||||
- name: KYVERNO_NAMESPACE
|
- name: KYVERNO_NAMESPACE
|
||||||
valueFrom:
|
valueFrom:
|
||||||
|
|
|
@ -488,6 +488,11 @@ cleanupController:
|
||||||
clusterRole:
|
clusterRole:
|
||||||
# -- Extra resource permissions to add in the cluster role
|
# -- Extra resource permissions to add in the cluster role
|
||||||
extraResources: []
|
extraResources: []
|
||||||
|
# - apiGroups:
|
||||||
|
# - ''
|
||||||
|
# resources:
|
||||||
|
# - pods
|
||||||
|
|
||||||
|
|
||||||
# -- Enable cleanup controller.
|
# -- Enable cleanup controller.
|
||||||
enabled: true
|
enabled: true
|
||||||
|
@ -509,6 +514,10 @@ cleanupController:
|
||||||
pullSecrets: []
|
pullSecrets: []
|
||||||
# - secretName
|
# - secretName
|
||||||
|
|
||||||
|
# -- Arguments passed to the container on the command line
|
||||||
|
args:
|
||||||
|
- --cleanupService=https://{{ template "kyverno.cleanup-controller.deploymentName" . }}.{{ template "kyverno.namespace" . }}.svc
|
||||||
|
|
||||||
service:
|
service:
|
||||||
# -- Service port.
|
# -- Service port.
|
||||||
port: 443
|
port: 443
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
package main
|
package admission
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
@ -11,17 +11,17 @@ import (
|
||||||
admissionv1 "k8s.io/api/admission/v1"
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
type cleanupPolicyHandlers struct {
|
type handlers struct {
|
||||||
client dclient.Interface
|
client dclient.Interface
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewHandlers(client dclient.Interface) CleanupPolicyHandlers {
|
func New(client dclient.Interface) *handlers {
|
||||||
return &cleanupPolicyHandlers{
|
return &handlers{
|
||||||
client: client,
|
client: client,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (h *cleanupPolicyHandlers) Validate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse {
|
func (h *handlers) Validate(ctx context.Context, logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse {
|
||||||
policy, _, err := admissionutils.GetCleanupPolicies(request)
|
policy, _, err := admissionutils.GetCleanupPolicies(request)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error(err, "failed to unmarshal policies from admission request")
|
logger.Error(err, "failed to unmarshal policies from admission request")
|
73
cmd/cleanup-controller/handlers/cleanup/handlers.go
Normal file
73
cmd/cleanup-controller/handlers/cleanup/handlers.go
Normal file
|
@ -0,0 +1,73 @@
|
||||||
|
package cleanup
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
kyvernov1alpha1 "github.com/kyverno/kyverno/api/kyverno/v1alpha1"
|
||||||
|
kyvernov1alpha1listers "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1"
|
||||||
|
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||||
|
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
|
||||||
|
"k8s.io/apimachinery/pkg/util/sets"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
|
)
|
||||||
|
|
||||||
|
type handlers struct {
|
||||||
|
client dclient.Interface
|
||||||
|
cpolLister kyvernov1alpha1listers.ClusterCleanupPolicyLister
|
||||||
|
polLister kyvernov1alpha1listers.CleanupPolicyLister
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(
|
||||||
|
client dclient.Interface,
|
||||||
|
cpolLister kyvernov1alpha1listers.ClusterCleanupPolicyLister,
|
||||||
|
polLister kyvernov1alpha1listers.CleanupPolicyLister,
|
||||||
|
) *handlers {
|
||||||
|
return &handlers{
|
||||||
|
client: client,
|
||||||
|
cpolLister: cpolLister,
|
||||||
|
polLister: polLister,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handlers) Cleanup(ctx context.Context, logger logr.Logger, name string, _ time.Time) error {
|
||||||
|
logger.Info("cleaning up...")
|
||||||
|
namespace, name, err := cache.SplitMetaNamespaceKey(name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
policy, err := h.lookupPolicy(namespace, name)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return h.executePolicy(ctx, logger, policy)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handlers) lookupPolicy(namespace, name string) (kyvernov1alpha1.CleanupPolicyInterface, error) {
|
||||||
|
if namespace == "" {
|
||||||
|
return h.cpolLister.Get(name)
|
||||||
|
} else {
|
||||||
|
return h.polLister.CleanupPolicies(namespace).Get(name)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (h *handlers) executePolicy(ctx context.Context, logger logr.Logger, policy kyvernov1alpha1.CleanupPolicyInterface) error {
|
||||||
|
spec := policy.GetSpec()
|
||||||
|
kinds := sets.NewString(spec.MatchResources.GetKinds()...)
|
||||||
|
for kind := range kinds {
|
||||||
|
logger := logger.WithValues("kind", kind)
|
||||||
|
logger.Info("processing...")
|
||||||
|
list, err := h.client.ListResource(ctx, "", kind, policy.GetNamespace(), nil)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for i := range list.Items {
|
||||||
|
if !controllerutils.IsManagedByKyverno(&list.Items[i]) {
|
||||||
|
logger := logger.WithValues("name", list.Items[i].GetName(), "namespace", list.Items[i].GetNamespace())
|
||||||
|
logger.Info("item...")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
|
@ -1,10 +1,13 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"flag"
|
||||||
"os"
|
"os"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
admissionhandlers "github.com/kyverno/kyverno/cmd/cleanup-controller/handlers/admission"
|
||||||
|
cleanuphandlers "github.com/kyverno/kyverno/cmd/cleanup-controller/handlers/cleanup"
|
||||||
"github.com/kyverno/kyverno/cmd/internal"
|
"github.com/kyverno/kyverno/cmd/internal"
|
||||||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||||
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
|
dynamicclient "github.com/kyverno/kyverno/pkg/clients/dynamic"
|
||||||
|
@ -22,12 +25,16 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
|
var cleanupService string
|
||||||
|
flagset := flag.NewFlagSet("cleanup-controller", flag.ExitOnError)
|
||||||
|
flagset.StringVar(&cleanupService, "cleanupService", "https://cleanup-controller.kyverno.svc", "The url to join the cleanup service.")
|
||||||
// config
|
// config
|
||||||
appConfig := internal.NewConfiguration(
|
appConfig := internal.NewConfiguration(
|
||||||
internal.WithProfiling(),
|
internal.WithProfiling(),
|
||||||
internal.WithMetrics(),
|
internal.WithMetrics(),
|
||||||
internal.WithTracing(),
|
internal.WithTracing(),
|
||||||
internal.WithKubeconfig(),
|
internal.WithKubeconfig(),
|
||||||
|
internal.WithFlagSets(flagset),
|
||||||
)
|
)
|
||||||
// parse flags
|
// parse flags
|
||||||
internal.ParseFlags(appConfig)
|
internal.ParseFlags(appConfig)
|
||||||
|
@ -56,18 +63,24 @@ func main() {
|
||||||
kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies(),
|
kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies(),
|
||||||
kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies(),
|
kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies(),
|
||||||
kubeInformer.Batch().V1().CronJobs(),
|
kubeInformer.Batch().V1().CronJobs(),
|
||||||
|
cleanupService,
|
||||||
),
|
),
|
||||||
cleanup.Workers,
|
cleanup.Workers,
|
||||||
)
|
)
|
||||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
|
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
|
||||||
|
cpolLister := kyvernoInformer.Kyverno().V1alpha1().ClusterCleanupPolicies().Lister()
|
||||||
|
polLister := kyvernoInformer.Kyverno().V1alpha1().CleanupPolicies().Lister()
|
||||||
// start informers and wait for cache sync
|
// start informers and wait for cache sync
|
||||||
if !internal.StartInformersAndWaitForCacheSync(ctx, kubeKyvernoInformer, kubeInformer, kyvernoInformer) {
|
if !internal.StartInformersAndWaitForCacheSync(ctx, kubeKyvernoInformer, kubeInformer, kyvernoInformer) {
|
||||||
os.Exit(1)
|
os.Exit(1)
|
||||||
}
|
}
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
controller.Run(ctx, logger.WithName("cleanup-controller"), &wg)
|
controller.Run(ctx, logger.WithName("cleanup-controller"), &wg)
|
||||||
|
// create handlers
|
||||||
|
admissionHandlers := admissionhandlers.New(dClient)
|
||||||
|
cleanupHandlers := cleanuphandlers.New(dClient, cpolLister, polLister)
|
||||||
|
// create server
|
||||||
server := NewServer(
|
server := NewServer(
|
||||||
NewHandlers(dClient),
|
|
||||||
func() ([]byte, []byte, error) {
|
func() ([]byte, []byte, error) {
|
||||||
secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
|
secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -75,8 +88,10 @@ func main() {
|
||||||
}
|
}
|
||||||
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
|
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
|
||||||
},
|
},
|
||||||
|
admissionHandlers.Validate,
|
||||||
|
cleanupHandlers.Cleanup,
|
||||||
)
|
)
|
||||||
// start webhooks server
|
// start server
|
||||||
server.Run(ctx.Done())
|
server.Run(ctx.Done())
|
||||||
// wait for termination signal
|
// wait for termination signal
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
|
@ -8,13 +8,17 @@ import (
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
"github.com/julienschmidt/httprouter"
|
"github.com/julienschmidt/httprouter"
|
||||||
|
"github.com/kyverno/kyverno/pkg/controllers/cleanup"
|
||||||
"github.com/kyverno/kyverno/pkg/logging"
|
"github.com/kyverno/kyverno/pkg/logging"
|
||||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||||
admissionv1 "k8s.io/api/admission/v1"
|
admissionv1 "k8s.io/api/admission/v1"
|
||||||
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ValidatingWebhookServicePath is the path for validation webhook
|
const (
|
||||||
const ValidatingWebhookServicePath = "/validate"
|
// validatingWebhookServicePath is the path for validation webhook
|
||||||
|
validatingWebhookServicePath = "/validate"
|
||||||
|
)
|
||||||
|
|
||||||
type Server interface {
|
type Server interface {
|
||||||
// Run TLS server in separate thread and returns control immediately
|
// Run TLS server in separate thread and returns control immediately
|
||||||
|
@ -23,32 +27,51 @@ type Server interface {
|
||||||
Stop(context.Context)
|
Stop(context.Context)
|
||||||
}
|
}
|
||||||
|
|
||||||
type CleanupPolicyHandlers interface {
|
|
||||||
// Validate performs the validation check on policy resources
|
|
||||||
Validate(context.Context, logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
type server struct {
|
type server struct {
|
||||||
server *http.Server
|
server *http.Server
|
||||||
}
|
}
|
||||||
|
|
||||||
type TlsProvider func() ([]byte, []byte, error)
|
type (
|
||||||
|
TlsProvider = func() ([]byte, []byte, error)
|
||||||
|
ValidationHandler = func(context.Context, logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse
|
||||||
|
CleanupHandler = func(context.Context, logr.Logger, string, time.Time) error
|
||||||
|
)
|
||||||
|
|
||||||
// NewServer creates new instance of server accordingly to given configuration
|
// NewServer creates new instance of server accordingly to given configuration
|
||||||
func NewServer(
|
func NewServer(
|
||||||
policyHandlers CleanupPolicyHandlers,
|
|
||||||
tlsProvider TlsProvider,
|
tlsProvider TlsProvider,
|
||||||
|
validationHandler ValidationHandler,
|
||||||
|
cleanupHandler CleanupHandler,
|
||||||
) Server {
|
) Server {
|
||||||
policyLogger := logging.WithName("cleanup-policy")
|
policyLogger := logging.WithName("cleanup-policy")
|
||||||
|
cleanupLogger := logging.WithName("cleanup")
|
||||||
mux := httprouter.New()
|
mux := httprouter.New()
|
||||||
mux.HandlerFunc(
|
mux.HandlerFunc(
|
||||||
"POST",
|
"POST",
|
||||||
ValidatingWebhookServicePath,
|
validatingWebhookServicePath,
|
||||||
handlers.FromAdmissionFunc("VALIDATE", policyHandlers.Validate).
|
handlers.FromAdmissionFunc("VALIDATE", validationHandler).
|
||||||
WithSubResourceFilter().
|
WithSubResourceFilter().
|
||||||
WithAdmission(policyLogger.WithName("validate")).
|
WithAdmission(policyLogger.WithName("validate")).
|
||||||
ToHandlerFunc(),
|
ToHandlerFunc(),
|
||||||
)
|
)
|
||||||
|
mux.HandlerFunc(
|
||||||
|
"GET",
|
||||||
|
cleanup.CleanupServicePath,
|
||||||
|
func(w http.ResponseWriter, r *http.Request) {
|
||||||
|
policy := r.URL.Query().Get("policy")
|
||||||
|
logger := cleanupLogger.WithValues("policy", policy)
|
||||||
|
err := cleanupHandler(r.Context(), logger, policy, time.Now())
|
||||||
|
if err == nil {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
} else {
|
||||||
|
if apierrors.IsNotFound(err) {
|
||||||
|
w.WriteHeader(http.StatusNotFound)
|
||||||
|
} else {
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
},
|
||||||
|
)
|
||||||
return &server{
|
return &server{
|
||||||
server: &http.Server{
|
server: &http.Server{
|
||||||
Addr: ":9443",
|
Addr: ":9443",
|
||||||
|
|
|
@ -2,6 +2,7 @@ package cleanup
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/go-logr/logr"
|
"github.com/go-logr/logr"
|
||||||
|
@ -12,14 +13,21 @@ import (
|
||||||
"github.com/kyverno/kyverno/pkg/controllers"
|
"github.com/kyverno/kyverno/pkg/controllers"
|
||||||
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
|
controllerutils "github.com/kyverno/kyverno/pkg/utils/controller"
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
batchv1 "k8s.io/api/batch/v1"
|
||||||
|
corev1 "k8s.io/api/core/v1"
|
||||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
batchv1informers "k8s.io/client-go/informers/batch/v1"
|
batchv1informers "k8s.io/client-go/informers/batch/v1"
|
||||||
"k8s.io/client-go/kubernetes"
|
"k8s.io/client-go/kubernetes"
|
||||||
batchv1listers "k8s.io/client-go/listers/batch/v1"
|
batchv1listers "k8s.io/client-go/listers/batch/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// CleanupServicePath is the path for triggering cleanup
|
||||||
|
CleanupServicePath = "/cleanup"
|
||||||
|
)
|
||||||
|
|
||||||
type controller struct {
|
type controller struct {
|
||||||
// clients
|
// clients
|
||||||
client kubernetes.Interface
|
client kubernetes.Interface
|
||||||
|
@ -33,6 +41,9 @@ type controller struct {
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
cpolEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.ClusterCleanupPolicy]
|
cpolEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.ClusterCleanupPolicy]
|
||||||
polEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.CleanupPolicy]
|
polEnqueue controllerutils.EnqueueFuncT[*kyvernov1alpha1.CleanupPolicy]
|
||||||
|
|
||||||
|
// config
|
||||||
|
cleanupService string
|
||||||
}
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -46,6 +57,7 @@ func NewController(
|
||||||
cpolInformer kyvernov1alpha1informers.ClusterCleanupPolicyInformer,
|
cpolInformer kyvernov1alpha1informers.ClusterCleanupPolicyInformer,
|
||||||
polInformer kyvernov1alpha1informers.CleanupPolicyInformer,
|
polInformer kyvernov1alpha1informers.CleanupPolicyInformer,
|
||||||
cjInformer batchv1informers.CronJobInformer,
|
cjInformer batchv1informers.CronJobInformer,
|
||||||
|
cleanupService string,
|
||||||
) controllers.Controller {
|
) controllers.Controller {
|
||||||
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName)
|
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), ControllerName)
|
||||||
c := &controller{
|
c := &controller{
|
||||||
|
@ -56,6 +68,7 @@ func NewController(
|
||||||
queue: queue,
|
queue: queue,
|
||||||
cpolEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.ClusterCleanupPolicy](logger, cpolInformer.Informer(), queue),
|
cpolEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.ClusterCleanupPolicy](logger, cpolInformer.Informer(), queue),
|
||||||
polEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.CleanupPolicy](logger, polInformer.Informer(), queue),
|
polEnqueue: controllerutils.AddDefaultEventHandlersT[*kyvernov1alpha1.CleanupPolicy](logger, polInformer.Informer(), queue),
|
||||||
|
cleanupService: cleanupService,
|
||||||
}
|
}
|
||||||
controllerutils.AddEventHandlersT(
|
controllerutils.AddEventHandlersT(
|
||||||
cjInformer.Informer(),
|
cjInformer.Informer(),
|
||||||
|
@ -121,6 +134,64 @@ func (c *controller) getCronjob(namespace, name string) (*batchv1.CronJob, error
|
||||||
return cj, nil
|
return cj, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c *controller) buildCronJob(cronJob *batchv1.CronJob, pol kyvernov1alpha1.CleanupPolicyInterface) error {
|
||||||
|
// TODO: find a better way to do that, it looks like resources returned by WATCH don't have the GVK
|
||||||
|
apiVersion := "kyverno.io/v1alpha1"
|
||||||
|
kind := "CleanupPolicy"
|
||||||
|
if pol.GetNamespace() == "" {
|
||||||
|
kind = "ClusterCleanupPolicy"
|
||||||
|
}
|
||||||
|
policyName, err := cache.MetaNamespaceKeyFunc(pol)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// set owner reference
|
||||||
|
cronJob.OwnerReferences = []metav1.OwnerReference{
|
||||||
|
{
|
||||||
|
APIVersion: apiVersion,
|
||||||
|
Kind: kind,
|
||||||
|
Name: pol.GetName(),
|
||||||
|
UID: pol.GetUID(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
var successfulJobsHistoryLimit int32 = 0
|
||||||
|
var failedJobsHistoryLimit int32 = 1
|
||||||
|
// set spec
|
||||||
|
cronJob.Spec = batchv1.CronJobSpec{
|
||||||
|
Schedule: pol.GetSpec().Schedule,
|
||||||
|
SuccessfulJobsHistoryLimit: &successfulJobsHistoryLimit,
|
||||||
|
FailedJobsHistoryLimit: &failedJobsHistoryLimit,
|
||||||
|
ConcurrencyPolicy: batchv1.ForbidConcurrent,
|
||||||
|
JobTemplate: batchv1.JobTemplateSpec{
|
||||||
|
Spec: batchv1.JobSpec{
|
||||||
|
Template: corev1.PodTemplateSpec{
|
||||||
|
Spec: corev1.PodSpec{
|
||||||
|
RestartPolicy: corev1.RestartPolicyOnFailure,
|
||||||
|
Containers: []corev1.Container{
|
||||||
|
{
|
||||||
|
Name: "cleanup",
|
||||||
|
Image: "curlimages/curl:7.86.0",
|
||||||
|
Args: []string{
|
||||||
|
"-k",
|
||||||
|
// TODO: ca
|
||||||
|
// "--cacert",
|
||||||
|
// "/tmp/ca.crt",
|
||||||
|
fmt.Sprintf("%s%s?policy=%s", c.cleanupService, CleanupServicePath, policyName),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
// set labels
|
||||||
|
controllerutils.SetManagedByKyvernoLabel(cronJob)
|
||||||
|
controllerutils.SetManagedByKyvernoLabel(&cronJob.Spec.JobTemplate)
|
||||||
|
controllerutils.SetManagedByKyvernoLabel(&cronJob.Spec.JobTemplate.Spec.Template)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, namespace, name string) error {
|
func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, namespace, name string) error {
|
||||||
policy, err := c.getPolicy(namespace, name)
|
policy, err := c.getPolicy(namespace, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -134,17 +205,28 @@ func (c *controller) reconcile(ctx context.Context, logger logr.Logger, key, nam
|
||||||
if namespace == "" {
|
if namespace == "" {
|
||||||
cronjobNs = config.KyvernoNamespace()
|
cronjobNs = config.KyvernoNamespace()
|
||||||
}
|
}
|
||||||
if cronjob, err := c.getCronjob(cronjobNs, string(policy.GetUID())); err != nil {
|
observed, err := c.getCronjob(cronjobNs, string(policy.GetUID()))
|
||||||
|
if err != nil {
|
||||||
if !apierrors.IsNotFound(err) {
|
if !apierrors.IsNotFound(err) {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
cronjob := getCronJobForTriggerResource(policy)
|
observed = &batchv1.CronJob{
|
||||||
_, err = c.client.BatchV1().CronJobs(cronjobNs).Create(ctx, cronjob, metav1.CreateOptions{})
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: string(policy.GetUID()),
|
||||||
|
Namespace: cronjobNs,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if observed.ResourceVersion == "" {
|
||||||
|
err := c.buildCronJob(observed, policy)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = c.client.BatchV1().CronJobs(cronjobNs).Create(ctx, observed, metav1.CreateOptions{})
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
_, err = controllerutils.Update(ctx, cronjob, c.client.BatchV1().CronJobs(cronjobNs), func(cronjob *batchv1.CronJob) error {
|
_, err = controllerutils.Update(ctx, observed, c.client.BatchV1().CronJobs(cronjobNs), func(observed *batchv1.CronJob) error {
|
||||||
cronjob.Spec.Schedule = policy.GetSpec().Schedule
|
return c.buildCronJob(observed, policy)
|
||||||
return nil
|
|
||||||
})
|
})
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
package cleanup
|
|
||||||
|
|
||||||
import (
|
|
||||||
kyvernov1alpha1 "github.com/kyverno/kyverno/api/kyverno/v1alpha1"
|
|
||||||
batchv1 "k8s.io/api/batch/v1"
|
|
||||||
corev1 "k8s.io/api/core/v1"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
||||||
)
|
|
||||||
|
|
||||||
func getCronJobForTriggerResource(pol kyvernov1alpha1.CleanupPolicyInterface) *batchv1.CronJob {
|
|
||||||
// TODO: find a better way to do that, it looks like resources returned by WATCH don't have the GVK
|
|
||||||
apiVersion := "kyverno.io/v1alpha1"
|
|
||||||
kind := "CleanupPolicy"
|
|
||||||
if pol.GetNamespace() == "" {
|
|
||||||
kind = "ClusterCleanupPolicy"
|
|
||||||
}
|
|
||||||
cronjob := &batchv1.CronJob{
|
|
||||||
ObjectMeta: metav1.ObjectMeta{
|
|
||||||
Name: string(pol.GetUID()),
|
|
||||||
OwnerReferences: []metav1.OwnerReference{
|
|
||||||
{
|
|
||||||
APIVersion: apiVersion,
|
|
||||||
Kind: kind,
|
|
||||||
Name: pol.GetName(),
|
|
||||||
UID: pol.GetUID(),
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
Spec: batchv1.CronJobSpec{
|
|
||||||
Schedule: pol.GetSpec().Schedule,
|
|
||||||
JobTemplate: batchv1.JobTemplateSpec{
|
|
||||||
Spec: batchv1.JobSpec{
|
|
||||||
Template: corev1.PodTemplateSpec{
|
|
||||||
Spec: corev1.PodSpec{
|
|
||||||
RestartPolicy: corev1.RestartPolicyOnFailure,
|
|
||||||
Containers: []corev1.Container{
|
|
||||||
{
|
|
||||||
Name: "cleanup",
|
|
||||||
Image: "bitnami/kubectl:latest",
|
|
||||||
Args: []string{
|
|
||||||
"/bin/sh",
|
|
||||||
"-c",
|
|
||||||
`echo "Hello World"`,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
return cronjob
|
|
||||||
}
|
|
|
@ -344,7 +344,7 @@ func (c *controller) getPolicyReports(ctx context.Context, namespace string) ([]
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for i := range list.Items {
|
for i := range list.Items {
|
||||||
if controllerutils.CheckLabel(&list.Items[i], kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) {
|
if controllerutils.IsManagedByKyverno(&list.Items[i]) {
|
||||||
reports = append(reports, &list.Items[i])
|
reports = append(reports, &list.Items[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -354,7 +354,7 @@ func (c *controller) getPolicyReports(ctx context.Context, namespace string) ([]
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
for i := range list.Items {
|
for i := range list.Items {
|
||||||
if controllerutils.CheckLabel(&list.Items[i], kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp) {
|
if controllerutils.IsManagedByKyverno(&list.Items[i]) {
|
||||||
reports = append(reports, &list.Items[i])
|
reports = append(reports, &list.Items[i])
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,6 +1,7 @@
|
||||||
package controller
|
package controller
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
@ -23,6 +24,14 @@ func CheckLabel(obj metav1.Object, key, value string) bool {
|
||||||
return labels[key] == value
|
return labels[key] == value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func SetManagedByKyvernoLabel(obj metav1.Object) {
|
||||||
|
SetLabel(obj, kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func IsManagedByKyverno(obj metav1.Object) bool {
|
||||||
|
return CheckLabel(obj, kyvernov1.LabelAppManagedBy, kyvernov1.ValueKyvernoApp)
|
||||||
|
}
|
||||||
|
|
||||||
func HasLabel(obj metav1.Object, key string) bool {
|
func HasLabel(obj metav1.Object, key string) bool {
|
||||||
labels := obj.GetLabels()
|
labels := obj.GetLabels()
|
||||||
if labels == nil {
|
if labels == nil {
|
||||||
|
|
16
pkg/utils/controller/selector.go
Normal file
16
pkg/utils/controller/selector.go
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
package controller
|
||||||
|
|
||||||
|
import (
|
||||||
|
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
"k8s.io/apimachinery/pkg/selection"
|
||||||
|
)
|
||||||
|
|
||||||
|
func SelectorNotManagedByKyverno() (labels.Selector, error) {
|
||||||
|
selector := labels.Everything()
|
||||||
|
requirement, err := labels.NewRequirement(kyvernov1.LabelAppManagedBy, selection.NotEquals, []string{kyvernov1.ValueKyvernoApp})
|
||||||
|
if err == nil {
|
||||||
|
selector = selector.Add(*requirement)
|
||||||
|
}
|
||||||
|
return selector, err
|
||||||
|
}
|
16
scripts/kyverno.yaml
Normal file
16
scripts/kyverno.yaml
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
initContainer:
|
||||||
|
extraArgs:
|
||||||
|
- --loggingFormat=text
|
||||||
|
|
||||||
|
extraArgs:
|
||||||
|
- --loggingFormat=text
|
||||||
|
|
||||||
|
cleanupController:
|
||||||
|
rbac:
|
||||||
|
clusterRole:
|
||||||
|
extraResources:
|
||||||
|
- apiGroups:
|
||||||
|
- ''
|
||||||
|
resources:
|
||||||
|
- pods
|
||||||
|
|
Loading…
Add table
Reference in a new issue