1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00
* Fix Dev setup

* webhook monitor - start webhook monitor in main process

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leaderelection

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* - add isLeader; - update to use configmap lock

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* - add initialization method - add methods to get attributes

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* address comments

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* remove newContext in runLeaderElection

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to GenerateController

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* skip processing for non-leaders

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* skip processing for non-leaders

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* add leader election to generate cleanup controller

Signed-off-by: Jim Bugwadia <jim@nirmata.com>

* Gracefully drain request

* HA - Webhook Register / Webhook Monitor / Certificate Renewer (#1920)

* enable leader election for webhook register

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* extract certManager to its own process

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* leader election for cert manager

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* certManager - init certs by the leader

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to webhook monitor

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* update log message

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to policy controller

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add leader election to policy report controller

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* rebuild leader election config

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* start informers in leaderelection

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* start policy informers in main

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* enable leader election in main

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* move eventHandler to the leader election start method

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* address reviewdog comments

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* add clusterrole leaderelection

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* fixed generate flow (#1936)

Signed-off-by: NoSkillGirl <singhpooja240393@gmail.com>

* - init separate kubeclient for leaderelection - fix webhook monitor

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* address reviewdog comments

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* cleanup Kyverno managed resources on stopLeading

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* tag v1.4.0-beta1

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* fix cleanup process on Kyverno stops

Signed-off-by: Shuting Zhao <shutting06@gmail.com>

* bump kind to 0.11.0, k8s v1.21 (#1980)

Co-authored-by: vyankatesh <vyankatesh@neualto.com>
Co-authored-by: vyankatesh <vyankateshkd@gmail.com>
Co-authored-by: Jim Bugwadia <jim@nirmata.com>
Co-authored-by: Pooja Singh <36136335+NoSkillGirl@users.noreply.github.com>
This commit is contained in:
shuting 2021-06-08 12:37:19 -07:00 committed by GitHub
parent eaa96f3def
commit e9a972a362
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
28 changed files with 1348 additions and 490 deletions

View file

@ -1,7 +1,7 @@
apiVersion: v1
name: kyverno
version: v1.3.6
appVersion: v1.3.6
version: v1.4.0-beta1
appVersion: v1.4.0-beta1
icon: https://github.com/kyverno/kyverno/raw/main/img/logo.png
description: Kubernetes Native Policy Management
keywords:

View file

@ -1,6 +1,24 @@
{{- if .Values.rbac.create }}
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:leaderelection
labels: {{ include "kyverno.labels" . | nindent 4 }}
app: kyverno
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: {{ template "kyverno.fullname" . }}:webhook
labels: {{ include "kyverno.labels" . | nindent 4 }}

View file

@ -1,6 +1,21 @@
{{- if .Values.rbac.create }}
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:leaderelection
labels: {{ include "kyverno.labels" . | nindent 4 }}
app: kyverno
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: {{ template "kyverno.fullname" . }}:leaderelection
subjects:
- kind: ServiceAccount
name: {{ template "kyverno.serviceAccountName" . }}
namespace: {{ template "kyverno.namespace" . }}
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: {{ template "kyverno.fullname" . }}:webhook
labels: {{ include "kyverno.labels" . | nindent 4 }}

View file

@ -9,14 +9,23 @@ import (
"os"
"time"
"github.com/prometheus/client_golang/prometheus/promhttp"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
log "sigs.k8s.io/controller-runtime/pkg/log"
backwardcompatibility "github.com/kyverno/kyverno/pkg/backward_compatibility"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/config"
dclient "github.com/kyverno/kyverno/pkg/dclient"
event "github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/generate"
generatecleanup "github.com/kyverno/kyverno/pkg/generate/cleanup"
"github.com/kyverno/kyverno/pkg/leaderelection"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/openapi"
"github.com/kyverno/kyverno/pkg/policy"
"github.com/kyverno/kyverno/pkg/policycache"
@ -30,13 +39,6 @@ import (
"github.com/kyverno/kyverno/pkg/webhookconfig"
"github.com/kyverno/kyverno/pkg/webhooks"
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/generate"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
log "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const resyncPeriod = 15 * time.Minute
@ -173,7 +175,11 @@ func main() {
debug,
log.Log)
webhookMonitor := webhookconfig.NewMonitor(kubeInformer.Core().V1().Secrets(), log.Log.WithName("WebhookMonitor"))
webhookMonitor, err := webhookconfig.NewMonitor(kubeClient, log.Log.WithName("WebhookMonitor"))
if err != nil {
setupLog.Error(err, "failed to initialize webhookMonitor")
os.Exit(1)
}
// KYVERNO CRD INFORMER
// watches CRD resources:
@ -208,7 +214,9 @@ func main() {
log.Log.WithName("ReportChangeRequestGenerator"),
)
prgen := policyreport.NewReportGenerator(pclient,
prgen, err := policyreport.NewReportGenerator(
kubeClient,
pclient,
client,
pInformer.Wgpolicyk8s().V1alpha1().ClusterPolicyReports(),
pInformer.Wgpolicyk8s().V1alpha1().PolicyReports(),
@ -218,6 +226,11 @@ func main() {
log.Log.WithName("PolicyReportGenerator"),
)
if err != nil {
setupLog.Error(err, "Failed to create policy report controller")
os.Exit(1)
}
// Configuration Data
// dynamically load the configuration from configMap
// - resource filters
@ -236,7 +249,9 @@ func main() {
// - reconciliation policy and policy violation
// - process policy on existing resources
// - status aggregator: receives stats when a policy is applied & updates the policy status
policyCtrl, err := policy.NewPolicyController(pclient,
policyCtrl, err := policy.NewPolicyController(
kubeClient,
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
@ -263,6 +278,7 @@ func main() {
// GENERATE CONTROLLER
// - applies generate rules on resources based on generate requests created by webhook
grc, err := generate.NewController(
kubeClient,
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
@ -282,6 +298,7 @@ func main() {
// GENERATE REQUEST CLEANUP
// -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout
grcc, err := generatecleanup.NewController(
kubeClient,
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
@ -315,45 +332,58 @@ func main() {
promConfig,
)
certRenewer := ktls.NewCertRenewer(client, clientConfig, ktls.CertRenewalInterval, ktls.CertValidityDuration, log.Log.WithName("CertRenewer"))
// Configure certificates
tlsPair, err := certRenewer.InitTLSPemPair(serverIP)
certRenewer := ktls.NewCertRenewer(client, clientConfig, ktls.CertRenewalInterval, ktls.CertValidityDuration, serverIP, log.Log.WithName("CertRenewer"))
certManager, err := webhookconfig.NewCertManager(
kubeInformer.Core().V1().Secrets(),
kubeClient,
certRenewer,
log.Log.WithName("CertManager"),
stopCh,
)
if err != nil {
setupLog.Error(err, "Failed to initialize TLS key/certificate pair")
setupLog.Error(err, "failed to initialize CertManager")
os.Exit(1)
}
// Register webhookCfg
go func() {
registerTimeout := time.After(30 * time.Second)
registerTicker := time.NewTicker(time.Second)
defer registerTicker.Stop()
var err error
loop:
for {
select {
case <-registerTicker.C:
err = webhookCfg.Register()
if err != nil {
setupLog.V(3).Info("Failed to register admission control webhooks", "reason", err.Error())
} else {
break loop
}
case <-registerTimeout:
setupLog.Error(err, "Timeout registering admission control webhooks")
os.Exit(1)
}
registerWrapperRetry := common.RetryFunc(time.Second, 30*time.Second, webhookCfg.Register, setupLog)
registerWebhookConfigurations := func() {
certManager.InitTLSPemPair()
if registrationErr := registerWrapperRetry(); registrationErr != nil {
setupLog.Error(err, "Timeout registering admission control webhooks")
os.Exit(1)
}
}
// leader election context
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
// cancel leader election context on shutdown signals
go func() {
<-stopCh
cancel()
}()
openAPIController, err := openapi.NewOpenAPIController()
// register webhooks by the leader, it's a one-time job
webhookRegisterLeader, err := leaderelection.New("webhook-register", config.KyvernoNamespace, kubeClient, registerWebhookConfigurations, nil, log.Log.WithName("webhookRegister/LeaderElection"))
if err != nil {
setupLog.Error(err, "Failed to create openAPIController")
setupLog.Error(err, "failed to elector leader")
os.Exit(1)
}
// Sync openAPI definitions of resources
openAPISync := openapi.NewCRDSync(client, openAPIController)
go webhookRegisterLeader.Run(ctx)
// the webhook server runs across all instances
openAPIController := startOpenAPIController(client, stopCh)
var tlsPair *ktls.PemPair
tlsPair, err = certManager.GetTLSPemPair()
if err != nil {
setupLog.Error(err, "Failed to get TLS key/certificate pair")
os.Exit(1)
}
// WEBHOOK
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
@ -376,7 +406,6 @@ func main() {
pCacheController.Cache,
webhookCfg,
webhookMonitor,
certRenewer,
statusSync.Listener,
configData,
reportReqGen,
@ -387,7 +416,6 @@ func main() {
openAPIController,
rCache,
grc,
debug,
promConfig,
)
@ -396,44 +424,83 @@ func main() {
os.Exit(1)
}
// Start the components
// wrap all controllers that need leaderelection
// start them once by the leader
run := func() {
go certManager.Run(stopCh)
go policyCtrl.Run(2, prgen.ReconcileCh, stopCh)
go prgen.Run(1, stopCh)
go grc.Run(genWorkers, stopCh)
go grcc.Run(1, stopCh)
}
kubeClientLeaderElection, err := utils.NewKubeClient(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create kubernetes client")
os.Exit(1)
}
// cleanup Kyverno managed resources followed by webhook shutdown
// No need to exit here, as server.Stop(ctx) closes the cleanUp
// chan, thus the main process exits.
stop := func() {
c, cancel := context.WithCancel(context.Background())
defer cancel()
server.Stop(c)
}
le, err := leaderelection.New("kyverno", config.KyvernoNamespace, kubeClientLeaderElection, run, stop, log.Log.WithName("kyverno/LeaderElection"))
if err != nil {
setupLog.Error(err, "failed to elector leader")
os.Exit(1)
}
// init events handlers
// start Kyverno controllers
go le.Run(ctx)
go reportReqGen.Run(2, stopCh)
go configData.Run(stopCh)
go eventGenerator.Run(3, stopCh)
go grgen.Run(10, stopCh)
go statusSync.Run(1, stopCh)
go pCacheController.Run(1, stopCh)
go auditHandler.Run(10, stopCh)
if !debug {
go webhookMonitor.Run(webhookCfg, certRenewer, eventGenerator, stopCh)
}
go backwardcompatibility.AddLabels(pclient, pInformer.Kyverno().V1().GenerateRequests())
go backwardcompatibility.AddCloneLabel(client, pInformer.Kyverno().V1().ClusterPolicies())
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
kubedynamicInformer.Start(stopCh)
go reportReqGen.Run(2, stopCh)
go prgen.Run(1, stopCh)
go configData.Run(stopCh)
go policyCtrl.Run(2, prgen.ReconcileCh, stopCh)
go eventGenerator.Run(3, stopCh)
go grgen.Run(10, stopCh)
go grc.Run(genWorkers, stopCh)
go grcc.Run(1, stopCh)
go statusSync.Run(1, stopCh)
go pCacheController.Run(1, stopCh)
go auditHandler.Run(10, stopCh)
openAPISync.Run(1, stopCh)
// verifies if the admission control is enabled and active
server.RunAsync(stopCh)
go backwardcompatibility.AddLabels(pclient, pInformer.Kyverno().V1().GenerateRequests())
go backwardcompatibility.AddCloneLabel(client, pInformer.Kyverno().V1().ClusterPolicies())
<-stopCh
// by default http.Server waits indefinitely for connections to return to idle and then shuts down
// adding a threshold will handle zombie connections
// adjust the context deadline to 5 seconds
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer func() {
cancel()
}()
// cleanup webhookconfigurations followed by webhook shutdown
server.Stop(ctx)
// resource cleanup
// remove webhook configurations
<-cleanUp
setupLog.Info("Kyverno shutdown successful")
}
func startOpenAPIController(client *dclient.Client, stopCh <-chan struct{}) *openapi.Controller {
openAPIController, err := openapi.NewOpenAPIController()
if err != nil {
setupLog.Error(err, "Failed to create openAPIController")
os.Exit(1)
}
// Sync openAPI definitions of resources
openAPISync := openapi.NewCRDSync(client, openAPIController)
// start openAPI controller, this is used in admission review
// thus is required in each instance
openAPISync.Run(1, stopCh)
return openAPIController
}

View file

@ -8,7 +8,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno
---
apiVersion: apiextensions.k8s.io/v1
@ -23,7 +23,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: clusterpolicies.kyverno.io
spec:
group: kyverno.io
@ -517,7 +517,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: clusterpolicyreports.wgpolicyk8s.io
spec:
group: wgpolicyk8s.io
@ -770,7 +770,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: clusterreportchangerequests.kyverno.io
spec:
group: kyverno.io
@ -1023,7 +1023,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: generaterequests.kyverno.io
spec:
group: kyverno.io
@ -1195,7 +1195,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: policies.kyverno.io
spec:
group: kyverno.io
@ -1689,7 +1689,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: policyreports.wgpolicyk8s.io
spec:
group: wgpolicyk8s.io
@ -1942,7 +1942,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: reportchangerequests.kyverno.io
spec:
group: kyverno.io
@ -2193,7 +2193,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno-service-account
namespace: kyverno
---
@ -2207,7 +2207,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:admin-policies
rules:
@ -2229,7 +2229,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:admin-policyreport
rules:
@ -2251,7 +2251,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:admin-reportchangerequest
rules:
@ -2273,7 +2273,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:customresources
rules:
- apiGroups:
@ -2318,7 +2318,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:generatecontroller
rules:
- apiGroups:
@ -2353,7 +2353,31 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:policycontroller
rules:
- apiGroups:
@ -2376,7 +2400,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:userinfo
rules:
- apiGroups:
@ -2402,7 +2426,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:webhook
rules:
- apiGroups:
@ -2454,7 +2478,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:customresources
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2475,7 +2499,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:generatecontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2496,7 +2520,28 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:leaderelection
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:leaderelection
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:policycontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2517,7 +2562,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:userinfo
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2538,7 +2583,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2562,7 +2607,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: init-config
namespace: kyverno
---
@ -2576,7 +2621,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno-svc
namespace: kyverno
spec:
@ -2601,7 +2646,7 @@ metadata:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno
namespace: kyverno
spec:
@ -2619,7 +2664,7 @@ spec:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
spec:
containers:
- args:
@ -2634,7 +2679,7 @@ spec:
fieldPath: metadata.namespace
- name: KYVERNO_SVC
value: kyverno-svc
image: ghcr.io/kyverno/kyverno:v1.3.6
image: ghcr.io/kyverno/kyverno:v1.4.0-beta1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 2
@ -2679,7 +2724,7 @@ spec:
readOnlyRootFilesystem: true
runAsNonRoot: true
initContainers:
- image: ghcr.io/kyverno/kyvernopre:v1.3.6
- image: ghcr.io/kyverno/kyvernopre:v1.4.0-beta1
imagePullPolicy: IfNotPresent
name: kyverno-pre
resources:

View file

@ -2254,6 +2254,24 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
name: kyverno:leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
@ -2367,6 +2385,21 @@ subjects:
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
name: kyverno:leaderelection
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:leaderelection
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno

View file

@ -1,6 +1,21 @@
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:leaderelection
labels:
app: kyverno
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:leaderelection
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
app: kyverno

View file

@ -1,6 +1,24 @@
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:leaderelection
labels:
app: kyverno
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno

View file

@ -12,7 +12,7 @@ resources:
images:
- name: ghcr.io/kyverno/kyverno
newName: ghcr.io/kyverno/kyverno
newTag: v1.3.6
newTag: v1.4.0-beta1
- name: ghcr.io/kyverno/kyvernopre
newName: ghcr.io/kyverno/kyvernopre
newTag: v1.3.6
newTag: v1.4.0-beta1

View file

@ -9,7 +9,7 @@ labels:
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.3.6
app.kubernetes.io/version: v1.4.0-beta1
fieldSpecs:
- path: metadata/labels
create: true

View file

@ -1,6 +1,14 @@
apiVersion: v1
kind: Namespace
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno
---
apiVersion: apiextensions.k8s.io/v1
@ -9,6 +17,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: clusterpolicies.kyverno.io
spec:
group: kyverno.io
@ -496,6 +511,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: clusterpolicyreports.wgpolicyk8s.io
spec:
group: wgpolicyk8s.io
@ -742,6 +764,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: clusterreportchangerequests.kyverno.io
spec:
group: kyverno.io
@ -988,6 +1017,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: generaterequests.kyverno.io
spec:
group: kyverno.io
@ -1153,6 +1189,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: policies.kyverno.io
spec:
group: kyverno.io
@ -1640,6 +1683,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: policyreports.wgpolicyk8s.io
spec:
group: wgpolicyk8s.io
@ -1886,6 +1936,13 @@ metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.4.0
creationTimestamp: null
labels:
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: reportchangerequests.kyverno.io
spec:
group: kyverno.io
@ -2129,6 +2186,14 @@ status:
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno-service-account
namespace: kyverno
---
@ -2136,6 +2201,13 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:admin-policies
rules:
@ -2151,6 +2223,13 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:admin-policyreport
rules:
@ -2166,6 +2245,13 @@ apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:admin-reportchangerequest
rules:
@ -2180,6 +2266,14 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:customresources
rules:
- apiGroups:
@ -2217,6 +2311,14 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:generatecontroller
rules:
- apiGroups:
@ -2244,6 +2346,38 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:policycontroller
rules:
- apiGroups:
@ -2259,6 +2393,14 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:userinfo
rules:
- apiGroups:
@ -2277,6 +2419,14 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:webhook
rules:
- apiGroups:
@ -2321,6 +2471,14 @@ rules:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:customresources
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2334,6 +2492,14 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:generatecontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2347,6 +2513,35 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:leaderelection
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:leaderelection
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:policycontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2360,6 +2555,14 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:userinfo
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2373,6 +2576,14 @@ subjects:
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno:webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
@ -2389,6 +2600,14 @@ data:
resourceFilters: '[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][SelfSubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*][ReportChangeRequest,*,*][ClusterReportChangeRequest,*,*][PolicyReport,*,*][ClusterPolicyReport,*,*]'
kind: ConfigMap
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: init-config
namespace: kyverno
---
@ -2397,13 +2616,22 @@ kind: Service
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno-svc
namespace: kyverno
spec:
ports:
- port: 443
- name: https
port: 443
targetPort: https
- name: metrics-port
port: 8000
targetPort: metrics-port
selector:
app: kyverno
app.kubernetes.io/name: kyverno
@ -2413,7 +2641,12 @@ kind: Deployment
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
name: kyverno
namespace: kyverno
spec:
@ -2426,7 +2659,12 @@ spec:
metadata:
labels:
app: kyverno
app.kubernetes.io/component: kyverno
app.kubernetes.io/instance: kyverno
app.kubernetes.io/managed-by: Kustomize
app.kubernetes.io/name: kyverno
app.kubernetes.io/part-of: kyverno
app.kubernetes.io/version: v1.4.0-beta1
spec:
containers:
- args:
@ -2441,7 +2679,7 @@ spec:
fieldPath: metadata.namespace
- name: KYVERNO_SVC
value: kyverno-svc
image: ghcr.io/kyverno/kyverno:v1.3.6
image: ghcr.io/kyverno/kyverno:v1.4.0-beta1
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 2
@ -2458,6 +2696,9 @@ spec:
- containerPort: 9443
name: https
protocol: TCP
- containerPort: 8000
name: metrics-port
protocol: TCP
readinessProbe:
failureThreshold: 4
httpGet:
@ -2483,7 +2724,7 @@ spec:
readOnlyRootFilesystem: true
runAsNonRoot: true
initContainers:
- image: ghcr.io/kyverno/kyvernopre:v1.3.6
- image: ghcr.io/kyverno/kyvernopre:v1.4.0-beta1
imagePullPolicy: IfNotPresent
name: kyverno-pre
resources:
@ -2503,4 +2744,4 @@ spec:
runAsNonRoot: true
securityContext:
runAsNonRoot: true
serviceAccountName: kyverno-service-account
serviceAccountName: kyverno-service-account

1
go.mod
View file

@ -14,7 +14,6 @@ require (
github.com/go-git/go-billy/v5 v5.0.0
github.com/go-git/go-git/v5 v5.2.0
github.com/go-logr/logr v0.4.0
github.com/google/uuid v1.1.2
github.com/googleapis/gnostic v0.5.4
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
github.com/julienschmidt/httprouter v1.3.0

2
go.sum
View file

@ -746,8 +746,6 @@ github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOms
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/sirupsen/logrus v1.7.0 h1:ShrD1U9pZB12TX0cVy0DtePoCH97K8EtX+mg7ZARUtM=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skyrings/skyring-common v0.0.0-20160929130248-d1c0bb1cbd5e/go.mod h1:d8hQseuYt4rJoOo21lFzYJdhMjmDqLY++ayArbgYjWI=

View file

@ -4,9 +4,11 @@ import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/go-logr/logr"
enginutils "github.com/kyverno/kyverno/pkg/engine/utils"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/informers"
@ -108,3 +110,29 @@ func VariableToJSON(key, value string) []byte {
var jsonData = []byte(finalString)
return jsonData
}
func RetryFunc(retryInterval, timeout time.Duration, run func() error, logger logr.Logger) func() error {
return func() error {
registerTimeout := time.After(timeout)
registerTicker := time.NewTicker(retryInterval)
defer registerTicker.Stop()
var err error
loop:
for {
select {
case <-registerTicker.C:
err = run()
if err != nil {
logger.V(3).Info("Failed to register admission control webhooks", "reason", err.Error())
} else {
break loop
}
case <-registerTimeout:
return errors.Wrap(err, "Timeout registering admission control webhooks")
}
}
return nil
}
}

View file

@ -3,6 +3,8 @@ package cleanup
import (
"time"
"k8s.io/client-go/kubernetes"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
@ -10,7 +12,6 @@ import (
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/config"
dclient "github.com/kyverno/kyverno/pkg/dclient"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -27,37 +28,47 @@ const (
//Controller manages life-cycle of generate-requests
type Controller struct {
// dynamic client implementation
client *dclient.Client
// typed client for kyverno CRDs
kyvernoClient *kyvernoclient.Clientset
// handler for GR CR
syncHandler func(grKey string) error
// handler to enqueue GR
enqueueGR func(gr *kyverno.GenerateRequest)
pInformer kyvernoinformer.ClusterPolicyInformer
grInformer kyvernoinformer.GenerateRequestInformer
// control is used to delete the GR
control ControlInterface
// gr that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get cluster policy from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// grLister can list/get generate request from the shared informer's store
grLister kyvernolister.GenerateRequestNamespaceLister
// pSynced returns true if the cluster policy has been synced at least once
pSynced cache.InformerSynced
// grSynced returns true if the generate request store has been synced at least once
grSynced cache.InformerSynced
// dynamic sharedinformer factory
dynamicInformer dynamicinformer.DynamicSharedInformerFactory
//TODO: list of generic informers
// only support Namespaces for deletion of resource
// namespace informer
nsInformer informers.GenericInformer
log logr.Logger
// logger
log logr.Logger
}
//NewController returns a new controller instance to manage generate-requests
func NewController(
kubeClient kubernetes.Interface,
kyvernoclient *kyvernoclient.Clientset,
client *dclient.Client,
pInformer kyvernoinformer.ClusterPolicyInformer,
@ -68,14 +79,14 @@ func NewController(
c := Controller{
kyvernoClient: kyvernoclient,
client: client,
pInformer: pInformer,
grInformer: grInformer,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "generate-request-cleanup"),
dynamicInformer: dynamicInformer,
log: log,
}
c.control = Control{client: kyvernoclient}
c.enqueueGR = c.enqueue
c.syncHandler = c.syncGenerateRequest
c.pLister = pInformer.Lister()
c.grLister = grInformer.Lister().GenerateRequests(config.KyvernoNamespace)
@ -83,18 +94,6 @@ func NewController(
c.pSynced = pInformer.Informer().HasSynced
c.grSynced = grInformer.Informer().HasSynced
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deletePolicy, // we only cleanup if the policy is delete
})
grInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addGR,
UpdateFunc: c.updateGR,
DeleteFunc: c.deleteGR,
})
//TODO: dynamic registration
// Only supported for namespaces
gvr, err := client.DiscoveryClient.GetGVRFromKind("Namespace")
if err != nil {
return nil, err
@ -102,9 +101,6 @@ func NewController(
nsInformer := dynamicInformer.ForResource(gvr)
c.nsInformer = nsInformer
c.nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deleteGenericResource,
})
return &c, nil
}
@ -119,7 +115,7 @@ func (c *Controller) deleteGenericResource(obj interface{}) {
}
// re-evaluate the GR as the resource was deleted
for _, gr := range grs {
c.enqueueGR(gr)
c.enqueue(gr)
}
}
@ -156,12 +152,12 @@ func (c *Controller) deletePolicy(obj interface{}) {
func (c *Controller) addGR(obj interface{}) {
gr := obj.(*kyverno.GenerateRequest)
c.enqueueGR(gr)
c.enqueue(gr)
}
func (c *Controller) updateGR(old, cur interface{}) {
gr := cur.(*kyverno.GenerateRequest)
c.enqueueGR(gr)
c.enqueue(gr)
}
func (c *Controller) deleteGR(obj interface{}) {
@ -198,7 +194,7 @@ func (c *Controller) deleteGR(obj interface{}) {
logger.V(4).Info("deleting Generate Request CR", "name", gr.Name)
// sync Handler will remove it from the queue
c.enqueueGR(gr)
c.enqueue(gr)
}
func (c *Controller) enqueue(gr *kyverno.GenerateRequest) {
@ -230,14 +226,30 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
logger.Info("failed to sync informer cache")
return
}
c.pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deletePolicy, // we only cleanup if the policy is delete
})
c.grInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addGR,
UpdateFunc: c.updateGR,
DeleteFunc: c.deleteGR,
})
c.nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deleteGenericResource,
})
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just de-queues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
// It enforces that the syncGenerateRequest is never invoked concurrently with the same key.
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
@ -249,7 +261,7 @@ func (c *Controller) processNextWorkItem() bool {
return false
}
defer c.queue.Done(key)
err := c.syncHandler(key.(string))
err := c.syncGenerateRequest(key.(string))
c.handleErr(err, key)
return true
@ -262,7 +274,7 @@ func (c *Controller) handleErr(err error, key interface{}) {
return
}
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
logger.V(4).Info("dropping generate request", "key", key, "error", err.Error())
c.queue.Forget(key)
return
@ -287,7 +299,7 @@ func (c *Controller) syncGenerateRequest(key string) error {
logger.V(4).Info("finished syncing generate request", "processingTIme", time.Since(startTime).String())
}()
_, grName, err := cache.SplitMetaNamespaceKey(key)
if errors.IsNotFound(err) {
if apierrors.IsNotFound(err) {
logger.Info("generate request has been deleted")
return nil
}
@ -301,7 +313,7 @@ func (c *Controller) syncGenerateRequest(key string) error {
_, err = c.pLister.Get(gr.Spec.Policy)
if err != nil {
if !errors.IsNotFound(err) {
if !apierrors.IsNotFound(err) {
return err
}
c.control.Delete(gr.Name)

View file

@ -4,6 +4,8 @@ import (
"reflect"
"time"
"k8s.io/client-go/kubernetes"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
@ -14,7 +16,6 @@ import (
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/resourcecache"
"k8s.io/apimachinery/pkg/api/errors"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -37,6 +38,8 @@ type Controller struct {
// typed client for Kyverno CRDs
kyvernoClient *kyvernoclient.Clientset
policyInformer kyvernoinformer.ClusterPolicyInformer
// event generator interface
eventGen event.Interface
@ -73,6 +76,7 @@ type Controller struct {
//NewController returns an instance of the Generate-Request Controller
func NewController(
kubeClient kubernetes.Interface,
kyvernoClient *kyvernoclient.Clientset,
client *dclient.Client,
policyInformer kyvernoinformer.ClusterPolicyInformer,
@ -88,6 +92,7 @@ func NewController(
c := Controller{
client: client,
kyvernoClient: kyvernoClient,
policyInformer: policyInformer,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "generate-request"),
dynamicInformer: dynamicInformer,
@ -99,11 +104,9 @@ func NewController(
c.statusControl = StatusControl{client: kyvernoClient}
policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updatePolicy, // We only handle updates to policy
// Deletion of policy will be handled by cleanup controller
})
c.policySynced = policyInformer.Informer().HasSynced
c.grSynced = grInformer.Informer().HasSynced
grInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.addGR,
UpdateFunc: c.updateGR,
@ -113,23 +116,110 @@ func NewController(
c.policyLister = policyInformer.Lister()
c.grLister = grInformer.Lister().GenerateRequests(config.KyvernoNamespace)
c.policySynced = policyInformer.Informer().HasSynced
c.grSynced = grInformer.Informer().HasSynced
//TODO: dynamic registration
// Only supported for namespaces
gvr, err := client.DiscoveryClient.GetGVRFromKind("Namespace")
if err != nil {
return nil, err
}
nsInformer := dynamicInformer.ForResource(gvr)
c.nsInformer = nsInformer
c.nsInformer = dynamicInformer.ForResource(gvr)
return &c, nil
}
// Run starts workers
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
defer c.log.Info("shutting down")
if !cache.WaitForCacheSync(stopCh, c.policySynced, c.grSynced) {
c.log.Info("failed to sync informer cache")
return
}
c.policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updatePolicy, // We only handle updates to policy
// Deletion of policy will be handled by cleanup controller
})
c.nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updateGenericResource,
})
return &c, nil
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncGenerateRequest(key.(string))
c.handleErr(err, key)
return true
}
func (c *Controller) handleErr(err error, key interface{}) {
logger := c.log
if err == nil {
c.queue.Forget(key)
return
}
if apierrors.IsNotFound(err) {
c.queue.Forget(key)
logger.V(4).Info("Dropping generate request from the queue", "key", key, "error", err.Error())
return
}
if c.queue.NumRequeues(key) < maxRetries {
logger.V(3).Info("retrying generate request", "key", key, "error", err.Error())
c.queue.AddRateLimited(key)
return
}
logger.Error(err, "failed to process generate request", "key", key)
c.queue.Forget(key)
}
func (c *Controller) syncGenerateRequest(key string) error {
logger := c.log
var err error
startTime := time.Now()
logger.V(4).Info("started sync", "key", key, "startTime", startTime)
defer func() {
logger.V(4).Info("completed sync generate request", "key", key, "processingTime", time.Since(startTime).String())
}()
_, grName, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
gr, err := c.grLister.Get(grName)
if err != nil {
if apierrors.IsNotFound(err) {
return nil
}
logger.Error(err, "failed to fetch generate request", "key", key)
return err
}
return c.processGR(gr)
}
func (c *Controller) updateGenericResource(old, cur interface{}) {
@ -148,6 +238,11 @@ func (c *Controller) updateGenericResource(old, cur interface{}) {
}
}
// EnqueueGenerateRequestFromWebhook - enqueueing generate requests from webhook
func (c *Controller) EnqueueGenerateRequestFromWebhook(gr *kyverno.GenerateRequest) {
c.enqueueGenerateRequest(gr)
}
func (c *Controller) enqueueGenerateRequest(gr *kyverno.GenerateRequest) {
c.log.V(5).Info("enqueuing generate request", "gr", gr.Name)
key, err := cache.MetaNamespaceKeyFunc(gr)
@ -235,6 +330,7 @@ func (c *Controller) deleteGR(obj interface{}) {
return
}
}
for _, resource := range gr.Status.GeneratedResources {
r, err := c.client.GetResource(resource.APIVersion, resource.Kind, resource.Namespace, resource.Name)
if err != nil && !apierrors.IsNotFound(err) {
@ -254,100 +350,3 @@ func (c *Controller) deleteGR(obj interface{}) {
// sync Handler will remove it from the queue
c.enqueueGenerateRequest(gr)
}
//Run ...
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
logger := c.log
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
logger.Info("starting", "workers", workers)
defer logger.Info("shutting down")
if !cache.WaitForCacheSync(stopCh, c.policySynced, c.grSynced) {
logger.Info("failed to sync informer cache")
return
}
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (c *Controller) worker() {
c.log.V(3).Info("starting new worker...")
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncGenerateRequest(key.(string))
c.handleErr(err, key)
return true
}
func (c *Controller) handleErr(err error, key interface{}) {
logger := c.log
if err == nil {
c.queue.Forget(key)
return
}
if errors.IsNotFound(err) {
c.queue.Forget(key)
logger.V(4).Info("Dropping generate request from the queue", "key", key, "error", err.Error())
return
}
if c.queue.NumRequeues(key) < maxRetries {
logger.V(3).Info("retrying generate request", "key", key, "error", err.Error())
c.queue.AddRateLimited(key)
return
}
logger.Error(err, "failed to process generate request", "key", key)
c.queue.Forget(key)
}
func (c *Controller) syncGenerateRequest(key string) error {
logger := c.log
var err error
startTime := time.Now()
logger.V(4).Info("started sync", "key", key, "startTime", startTime)
defer func() {
logger.V(4).Info("completed sync generate request", "key", key, "processingTime", time.Since(startTime).String())
}()
_, grName, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
gr, err := c.grLister.Get(grName)
if err != nil {
if errors.IsNotFound(err) {
return nil
}
logger.Error(err, "failed to list generate requests")
return err
}
return c.processGR(gr)
}
// EnqueueGenerateRequestFromWebhook - enqueueing generate requests from webhook
func (c *Controller) EnqueueGenerateRequestFromWebhook(gr *kyverno.GenerateRequest) {
c.enqueueGenerateRequest(gr)
}

View file

@ -0,0 +1,152 @@
package leaderelection
import (
"context"
"os"
"sync/atomic"
"time"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
type Interface interface {
// Run is a blocking call that runs a leader election
Run(ctx context.Context)
// ID returns this instances unique identifier
ID() string
// Name returns the name of the leader election
Name() string
// Namespace is the Kubernetes namespace used to coordinate the leader election
Namespace() string
// IsLeader indicates if this instance is the leader
IsLeader() bool
// GetLeader returns the leader ID
GetLeader() string
}
type Config struct {
name string
namespace string
id string
startWork func()
stopWork func()
kubeClient kubernetes.Interface
lock resourcelock.Interface
leaderElectionCfg leaderelection.LeaderElectionConfig
leaderElector *leaderelection.LeaderElector
isLeader int64
log logr.Logger
}
func New(name, namespace string, kubeClient kubernetes.Interface, startWork, stopWork func(), log logr.Logger) (Interface, error) {
id, err := os.Hostname()
if err != nil {
return nil, errors.Wrapf(err, "error getting host name: %s/%s", namespace, name)
}
id = id + "_" + string(uuid.NewUUID())
lock, err := resourcelock.New(
resourcelock.LeasesResourceLock,
namespace,
name,
kubeClient.CoreV1(),
kubeClient.CoordinationV1(),
resourcelock.ResourceLockConfig{
Identity: id,
},
)
if err != nil {
return nil, errors.Wrapf(err, "error initializing resource lock: %s/%s", namespace, name)
}
e := &Config{
name: name,
namespace: namespace,
kubeClient: kubeClient,
lock: lock,
startWork: startWork,
stopWork: stopWork,
log: log,
}
e.leaderElectionCfg = leaderelection.LeaderElectionConfig{
Lock: e.lock,
ReleaseOnCancel: true,
LeaseDuration: 15 * time.Second,
RenewDeadline: 10 * time.Second,
RetryPeriod: 2 * time.Second,
Callbacks: leaderelection.LeaderCallbacks{
OnStartedLeading: func(ctx context.Context) {
atomic.StoreInt64(&e.isLeader, 1)
e.log.WithValues("id", e.lock.Identity()).Info("started leading")
if e.startWork != nil {
e.startWork()
}
},
OnStoppedLeading: func() {
atomic.StoreInt64(&e.isLeader, 0)
e.log.WithValues("id", e.lock.Identity()).Info("leadership lost, stopped leading")
if e.stopWork != nil {
e.stopWork()
}
},
OnNewLeader: func(identity string) {
if identity == e.lock.Identity() {
return
}
e.log.WithValues("current id", e.lock.Identity(), "leader", identity).Info("another instance has been elected as leader")
},
}}
e.leaderElector, err = leaderelection.NewLeaderElector(e.leaderElectionCfg)
if err != nil {
e.log.Error(err, "failed to create leaderElector")
os.Exit(1)
}
if e.leaderElectionCfg.WatchDog != nil {
e.leaderElectionCfg.WatchDog.SetLeaderElection(e.leaderElector)
}
return e, nil
}
func (e *Config) Name() string {
return e.name
}
func (e *Config) Namespace() string {
return e.namespace
}
func (e *Config) ID() string {
return e.lock.Identity()
}
func (e *Config) IsLeader() bool {
return atomic.LoadInt64(&e.isLeader) == 1
}
func (e *Config) GetLeader() string {
return e.leaderElector.GetLeader()
}
func (e *Config) Run(ctx context.Context) {
e.leaderElector.Run(ctx)
}

View file

@ -33,6 +33,7 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
listerv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
@ -56,6 +57,9 @@ const (
type PolicyController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
pInformer kyvernoinformer.ClusterPolicyInformer
npInformer kyvernoinformer.PolicyInformer
eventGen event.Interface
eventRecorder record.EventRecorder
@ -114,7 +118,9 @@ type PolicyController struct {
}
// NewPolicyController create a new PolicyController
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
func NewPolicyController(
kubeClient kubernetes.Interface,
kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
pInformer kyvernoinformer.ClusterPolicyInformer,
npInformer kyvernoinformer.PolicyInformer,
@ -141,35 +147,26 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
pc := PolicyController{
client: client,
kyvernoClient: kyvernoClient,
pInformer: pInformer,
npInformer: npInformer,
eventGen: eventGen,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
prGenerator: prGenerator,
policyReportEraser: policyReportEraser,
log: log,
resCache: resCache,
reconcilePeriod: reconcilePeriod,
promConfig: promConfig,
log: log,
}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicy,
UpdateFunc: pc.updatePolicy,
DeleteFunc: pc.deletePolicy,
})
npInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addNsPolicy,
UpdateFunc: pc.updateNsPolicy,
DeleteFunc: pc.deleteNsPolicy,
})
pc.pLister = pInformer.Lister()
pc.npLister = npInformer.Lister()
pc.nsLister = namespaces.Lister()
pc.grLister = grInformer.Lister()
pc.pListerSynced = pInformer.Informer().HasSynced
pc.npListerSynced = npInformer.Informer().HasSynced
@ -553,6 +550,18 @@ func (pc *PolicyController) Run(workers int, reconcileCh <-chan bool, stopCh <-c
return
}
pc.pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicy,
UpdateFunc: pc.updatePolicy,
DeleteFunc: pc.deletePolicy,
})
pc.npInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addNsPolicy,
UpdateFunc: pc.updateNsPolicy,
DeleteFunc: pc.deleteNsPolicy,
})
for i := 0; i < workers; i++ {
go wait.Until(pc.worker, time.Second, stopCh)
}

View file

@ -7,15 +7,6 @@ import (
"time"
"github.com/go-logr/logr"
changerequest "github.com/kyverno/kyverno/pkg/api/kyverno/v1alpha1"
report "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
requestinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
policyreportinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
requestlister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1"
policyreport "github.com/kyverno/kyverno/pkg/client/listers/policyreport/v1alpha1"
"github.com/kyverno/kyverno/pkg/config"
dclient "github.com/kyverno/kyverno/pkg/dclient"
v1 "k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -26,9 +17,20 @@ import (
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
listerv1 "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
changerequest "github.com/kyverno/kyverno/pkg/api/kyverno/v1alpha1"
report "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
requestinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
policyreportinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
requestlister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1"
policyreport "github.com/kyverno/kyverno/pkg/client/listers/policyreport/v1alpha1"
"github.com/kyverno/kyverno/pkg/config"
dclient "github.com/kyverno/kyverno/pkg/dclient"
)
const (
@ -41,6 +43,11 @@ type ReportGenerator struct {
pclient *kyvernoclient.Clientset
dclient *dclient.Client
clusterReportInformer policyreportinformer.ClusterPolicyReportInformer
reportInformer policyreportinformer.PolicyReportInformer
reportReqInformer requestinformer.ReportChangeRequestInformer
clusterReportReqInformer requestinformer.ClusterReportChangeRequestInformer
reportLister policyreport.PolicyReportLister
reportSynced cache.InformerSynced
@ -67,6 +74,7 @@ type ReportGenerator struct {
// NewReportGenerator returns a new instance of policy report generator
func NewReportGenerator(
kubeClient kubernetes.Interface,
pclient *kyvernoclient.Clientset,
dclient *dclient.Client,
clusterReportInformer policyreportinformer.ClusterPolicyReportInformer,
@ -74,38 +82,20 @@ func NewReportGenerator(
reportReqInformer requestinformer.ReportChangeRequestInformer,
clusterReportReqInformer requestinformer.ClusterReportChangeRequestInformer,
namespace informers.NamespaceInformer,
log logr.Logger) *ReportGenerator {
log logr.Logger) (*ReportGenerator, error) {
gen := &ReportGenerator{
pclient: pclient,
dclient: dclient,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), prWorkQueueName),
ReconcileCh: make(chan bool, 10),
log: log,
pclient: pclient,
dclient: dclient,
clusterReportInformer: clusterReportInformer,
reportInformer: reportInformer,
reportReqInformer: reportReqInformer,
clusterReportReqInformer: clusterReportReqInformer,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), prWorkQueueName),
ReconcileCh: make(chan bool, 10),
log: log,
}
reportReqInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: gen.addReportChangeRequest,
UpdateFunc: gen.updateReportChangeRequest,
})
clusterReportReqInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: gen.addClusterReportChangeRequest,
UpdateFunc: gen.updateClusterReportChangeRequest,
})
reportInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: gen.deletePolicyReport,
})
clusterReportInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: gen.deleteClusterPolicyReport,
})
gen.clusterReportLister = clusterReportInformer.Lister()
gen.clusterReportSynced = clusterReportInformer.Informer().HasSynced
gen.reportLister = reportInformer.Lister()
@ -117,7 +107,7 @@ func NewReportGenerator(
gen.nsLister = namespace.Lister()
gen.nsListerSynced = namespace.Informer().HasSynced
return gen
return gen, nil
}
const deletedPolicyKey string = "deletedpolicy"
@ -209,6 +199,28 @@ func (g *ReportGenerator) Run(workers int, stopCh <-chan struct{}) {
logger.Info("failed to sync informer cache")
}
g.reportReqInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: g.addReportChangeRequest,
UpdateFunc: g.updateReportChangeRequest,
})
g.clusterReportReqInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
AddFunc: g.addClusterReportChangeRequest,
UpdateFunc: g.updateClusterReportChangeRequest,
})
g.reportInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: g.deletePolicyReport,
})
g.clusterReportInformer.Informer().AddEventHandler(
cache.ResourceEventHandlerFuncs{
DeleteFunc: g.deleteClusterPolicyReport,
})
for i := 0; i < workers; i++ {
go wait.Until(g.runWorker, time.Second, stopCh)
}

View file

@ -36,24 +36,37 @@ type CertRenewer struct {
clientConfig *rest.Config
certRenewalInterval time.Duration
certValidityDuration time.Duration
log logr.Logger
// IP address where Kyverno controller runs. Only required if out-of-cluster.
serverIP string
log logr.Logger
}
// NewCertRenewer returns an instance of CertRenewer
func NewCertRenewer(client *client.Client, clientConfig *rest.Config, certRenewalInterval, certValidityDuration time.Duration, log logr.Logger) *CertRenewer {
func NewCertRenewer(client *client.Client, clientConfig *rest.Config, certRenewalInterval, certValidityDuration time.Duration, serverIP string, log logr.Logger) *CertRenewer {
return &CertRenewer{
client: client,
clientConfig: clientConfig,
certRenewalInterval: certRenewalInterval,
certValidityDuration: certValidityDuration,
serverIP: serverIP,
log: log,
}
}
func (c *CertRenewer) Client() *client.Client {
return c.client
}
func (c *CertRenewer) ClientConfig() *rest.Config {
return c.clientConfig
}
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
// Created pair is stored in cluster's secret.
// Returns struct with key/certificate pair.
func (c *CertRenewer) InitTLSPemPair(serverIP string) (*PemPair, error) {
func (c *CertRenewer) InitTLSPemPair() (*PemPair, error) {
logger := c.log.WithName("InitTLSPemPair")
certProps, err := GetTLSCertProps(c.clientConfig)
if err != nil {
@ -70,7 +83,7 @@ func (c *CertRenewer) InitTLSPemPair(serverIP string) (*PemPair, error) {
}
logger.Info("building key/certificate pair for TLS")
return c.buildTLSPemPairAndWriteToSecrets(certProps, serverIP)
return c.buildTLSPemPairAndWriteToSecrets(certProps, c.serverIP)
}
// buildTLSPemPairAndWriteToSecrets Issues TLS certificate for webhook server using self-signed CA cert
@ -218,7 +231,7 @@ func (c *CertRenewer) RollingUpdate() error {
return errors.Wrap(err, "failed to find Kyverno")
}
if IsKyvernoIsInRollingUpdate(deploy.UnstructuredContent(), c.log) {
if IsKyvernoInRollingUpdate(deploy.UnstructuredContent(), c.log) {
return nil
}
@ -319,8 +332,8 @@ func (c *CertRenewer) ValidCert() (bool, error) {
return true, nil
}
// IsKyvernoIsInRollingUpdate returns true if Kyverno is in rolling update
func IsKyvernoIsInRollingUpdate(deploy map[string]interface{}, logger logr.Logger) bool {
// IsKyvernoInRollingUpdate returns true if Kyverno is in rolling update
func IsKyvernoInRollingUpdate(deploy map[string]interface{}, logger logr.Logger) bool {
replicas, _, err := unstructured.NestedInt64(deploy, "spec", "replicas")
if err != nil {
logger.Error(err, "unable to fetch spec.replicas")

View file

@ -0,0 +1,180 @@
package webhookconfig
import (
"os"
"reflect"
"strings"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/tls"
ktls "github.com/kyverno/kyverno/pkg/tls"
v1 "k8s.io/api/core/v1"
informerv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
type Interface interface {
// Run starts the certManager
Run(stopCh <-chan struct{})
// InitTLSPemPair initializes the TLSPemPair
// it should be invoked by the leader
InitTLSPemPair()
// GetTLSPemPair gets the existing TLSPemPair from the secret
GetTLSPemPair() (*ktls.PemPair, error)
}
type certManager struct {
renewer *tls.CertRenewer
secretInformer informerv1.SecretInformer
secretQueue chan bool
stopCh <-chan struct{}
log logr.Logger
}
func NewCertManager(secretInformer informerv1.SecretInformer, kubeClient kubernetes.Interface, certRenewer *tls.CertRenewer, log logr.Logger, stopCh <-chan struct{}) (Interface, error) {
manager := &certManager{
renewer: certRenewer,
secretInformer: secretInformer,
secretQueue: make(chan bool, 1),
stopCh: stopCh,
log: log,
}
secretInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: manager.addSecretFunc,
UpdateFunc: manager.updateSecretFunc,
})
return manager, nil
}
func (m *certManager) addSecretFunc(obj interface{}) {
secret := obj.(*v1.Secret)
if secret.GetNamespace() != config.KyvernoNamespace {
return
}
val, ok := secret.GetAnnotations()[tls.SelfSignedAnnotation]
if !ok || val != "true" {
return
}
m.secretQueue <- true
}
func (m *certManager) updateSecretFunc(oldObj interface{}, newObj interface{}) {
old := oldObj.(*v1.Secret)
new := newObj.(*v1.Secret)
if new.GetNamespace() != config.KyvernoNamespace {
return
}
val, ok := new.GetAnnotations()[tls.SelfSignedAnnotation]
if !ok || val != "true" {
return
}
if reflect.DeepEqual(old.DeepCopy().Data, new.DeepCopy().Data) {
return
}
m.secretQueue <- true
m.log.V(4).Info("secret updated, reconciling webhook configurations")
}
func (m *certManager) InitTLSPemPair() {
_, err := m.renewer.InitTLSPemPair()
if err != nil {
m.log.Error(err, "initialaztion error")
os.Exit(1)
}
}
func (m *certManager) GetTLSPemPair() (*ktls.PemPair, error) {
var tls *ktls.PemPair
var err error
retryReadTLS := func() error {
tls, err = ktls.ReadTLSPair(m.renewer.ClientConfig(), m.renewer.Client())
if err != nil {
return err
}
m.log.Info("read TLS pem pair from the secret")
return nil
}
f := common.RetryFunc(time.Second, time.Minute, retryReadTLS, m.log.WithName("GetTLSPemPair/Retry"))
err = f()
return tls, err
}
func (m *certManager) Run(stopCh <-chan struct{}) {
if !cache.WaitForCacheSync(stopCh, m.secretInformer.Informer().HasSynced) {
m.log.Info("failed to sync informer cache")
return
}
m.secretInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: m.addSecretFunc,
UpdateFunc: m.updateSecretFunc,
})
m.log.Info("start managing certificate")
certsRenewalTicker := time.NewTicker(tls.CertRenewalInterval)
defer certsRenewalTicker.Stop()
for {
select {
case <-certsRenewalTicker.C:
valid, err := m.renewer.ValidCert()
if err != nil {
m.log.Error(err, "failed to validate cert")
if !strings.Contains(err.Error(), tls.ErrorsNotFound) {
continue
}
}
if valid {
continue
}
m.log.Info("rootCA is about to expire, trigger a rolling update to renew the cert")
if err := m.renewer.RollingUpdate(); err != nil {
m.log.Error(err, "unable to trigger a rolling update to renew rootCA, force restarting")
os.Exit(1)
}
case <-m.secretQueue:
valid, err := m.renewer.ValidCert()
if err != nil {
m.log.Error(err, "failed to validate cert")
if !strings.Contains(err.Error(), tls.ErrorsNotFound) {
continue
}
}
if valid {
continue
}
m.log.Info("rootCA has changed, updating webhook configurations")
if err := m.renewer.RollingUpdate(); err != nil {
m.log.Error(err, "unable to trigger a rolling update to re-register webhook server, force restarting")
os.Exit(1)
}
case <-m.stopCh:
m.log.V(2).Info("stopping cert renewer")
return
}
}
}

View file

@ -70,7 +70,8 @@ func (wrc *Register) constructOwner() v1.OwnerReference {
}
}
// GetKubePolicyDeployment gets Kyverno deployment
// GetKubePolicyDeployment gets Kyverno deployment using the resource cache
// it does not initialize any client call
func (wrc *Register) GetKubePolicyDeployment() (*apps.Deployment, *unstructured.Unstructured, error) {
lister, _ := wrc.resCache.GetGVRCache("Deployment")
kubePolicyDeployment, err := lister.NamespacedLister(config.KyvernoNamespace).Get(config.KyvernoDeploymentName)

View file

@ -2,26 +2,22 @@ package webhookconfig
import (
"fmt"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/tls"
v1 "k8s.io/api/core/v1"
informerv1 "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/kubernetes"
)
//maxRetryCount defines the max deadline count
const (
tickerInterval time.Duration = 30 * time.Second
idleCheckInterval time.Duration = 60 * time.Second
idleDeadline time.Duration = idleCheckInterval * 2
idleDeadline time.Duration = idleCheckInterval * 5
)
// Monitor stores the last webhook request time and monitors registered webhooks.
@ -30,115 +26,75 @@ const (
// change in the Kyverno deployment to force a webhook request. If no requests
// are received after idleDeadline the webhooks are deleted and re-registered.
//
// Webhook configurations are checked every tickerInterval. Currently the check
// only queries for the expected resource name, and does not compare other details
// like the webhook settings.
// Each instance has an in-memory flag lastSeenRequestTime, recording the last
// received admission timestamp by the current instance. And the latest timestamp
// (latestTimestamp) is recorded in the annotation of the Kyverno deployment,
// this annotation could be updated by any instance. If the duration from
// latestTimestamp is longer than idleCheckInterval, the monitor triggers an
// annotation update; otherwise lastSeenRequestTime is updated to latestTimestamp.
//
//
// Webhook configurations are checked every tickerInterval across all instances.
// Currently the check only queries for the expected resource name, and does
// not compare other details like the webhook settings.
//
type Monitor struct {
t time.Time
mu sync.RWMutex
secretQueue chan bool
log logr.Logger
// lastSeenRequestTime records the timestamp
// of the latest received admission request
lastSeenRequestTime time.Time
mu sync.RWMutex
log logr.Logger
}
//NewMonitor returns a new instance of webhook monitor
func NewMonitor(nsInformer informerv1.SecretInformer, log logr.Logger) *Monitor {
// NewMonitor returns a new instance of webhook monitor
func NewMonitor(kubeClient kubernetes.Interface, log logr.Logger) (*Monitor, error) {
monitor := &Monitor{
t: time.Now(),
secretQueue: make(chan bool, 1),
log: log,
lastSeenRequestTime: time.Now(),
log: log,
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: monitor.addSecretFunc,
UpdateFunc: monitor.updateSecretFunc,
})
return monitor
return monitor, nil
}
//Time returns the last request time
// Time returns the last request time
func (t *Monitor) Time() time.Time {
t.mu.RLock()
defer t.mu.RUnlock()
return t.t
return t.lastSeenRequestTime
}
//SetTime updates the last request time
// SetTime updates the last request time
func (t *Monitor) SetTime(tm time.Time) {
t.mu.Lock()
defer t.mu.Unlock()
t.t = tm
t.lastSeenRequestTime = tm
}
func (t *Monitor) addSecretFunc(obj interface{}) {
secret := obj.(*v1.Secret)
if secret.GetNamespace() != config.KyvernoNamespace {
return
}
val, ok := secret.GetAnnotations()[tls.SelfSignedAnnotation]
if !ok || val != "true" {
return
}
t.secretQueue <- true
}
func (t *Monitor) updateSecretFunc(oldObj interface{}, newObj interface{}) {
old := oldObj.(*v1.Secret)
new := newObj.(*v1.Secret)
if new.GetNamespace() != config.KyvernoNamespace {
return
}
val, ok := new.GetAnnotations()[tls.SelfSignedAnnotation]
if !ok || val != "true" {
return
}
if reflect.DeepEqual(old.DeepCopy().Data, new.DeepCopy().Data) {
return
}
t.secretQueue <- true
t.log.V(4).Info("secret updated, reconciling webhook configurations")
}
//Run runs the checker and verify the resource update
// Run runs the checker and verify the resource update
func (t *Monitor) Run(register *Register, certRenewer *tls.CertRenewer, eventGen event.Interface, stopCh <-chan struct{}) {
logger := t.log
logger.V(4).Info("starting webhook monitor", "interval", idleCheckInterval)
status := newStatusControl(register.client, eventGen, logger.WithName("WebhookStatusControl"))
status := newStatusControl(register, eventGen, t.log.WithName("WebhookStatusControl"))
ticker := time.NewTicker(tickerInterval)
defer ticker.Stop()
certsRenewalTicker := time.NewTicker(tls.CertRenewalInterval)
defer certsRenewalTicker.Stop()
for {
select {
case <-ticker.C:
if skipWebhookCheck(register, logger.WithName("statusCheck/skipWebhookCheck")) {
logger.Info("skip validating webhook status, Kyverno is in rolling update")
continue
}
if err := register.Check(); err != nil {
t.log.Error(err, "missing webhooks")
if err := register.Register(); err != nil {
logger.Error(err, "failed to register webhooks")
}
continue
err := registerWebhookIfNotPresent(register, t.log.WithName("registerWebhookIfNotPresent"))
if err != nil {
t.log.Error(err, "")
}
timeDiff := time.Since(t.Time())
if timeDiff > idleDeadline {
err := fmt.Errorf("admission control configuration error")
logger.Error(err, "webhook check failed", "deadline", idleDeadline)
logger.Error(err, "webhook check failed", "deadline", idleDeadline.String())
if err := status.failure(); err != nil {
logger.Error(err, "failed to annotate deployment webhook status to failure")
}
@ -151,18 +107,38 @@ func (t *Monitor) Run(register *Register, certRenewer *tls.CertRenewer, eventGen
}
if timeDiff > idleCheckInterval {
logger.V(1).Info("webhook idle time exceeded", "deadline", idleCheckInterval)
if skipWebhookCheck(register, logger.WithName("skipWebhookCheck")) {
logger.Info("skip validating webhook status, Kyverno is in rolling update")
continue
}
// send request to update the Kyverno deployment
if err := status.IncrementAnnotation(); err != nil {
logger.Error(err, "failed to annotate deployment for webhook status")
lastRequestTimeFromAnn := lastRequestTimeFromAnnotation(register, t.log.WithName("lastRequestTimeFromAnnotation"))
if lastRequestTimeFromAnn == nil {
now := time.Now()
lastRequestTimeFromAnn = &now
if err := status.UpdateLastRequestTimestmap(t.Time()); err != nil {
logger.Error(err, "failed to annotate deployment for lastRequestTime")
} else {
logger.Info("initialized lastRequestTimestamp", "time", lastRequestTimeFromAnn)
}
continue
}
continue
if t.Time().Before(*lastRequestTimeFromAnn) {
t.SetTime(*lastRequestTimeFromAnn)
logger.V(3).Info("updated in-memory timestamp", "time", lastRequestTimeFromAnn)
continue
}
idleT := time.Since(*lastRequestTimeFromAnn)
if idleT > idleCheckInterval*2 {
logger.V(3).Info("webhook idle time exceeded", "lastRequestTimeFromAnn", (*lastRequestTimeFromAnn).String(), "deadline", (idleCheckInterval * 2).String())
if err := status.UpdateLastRequestTimestmap(t.Time()); err != nil {
logger.Error(err, "failed to update lastRequestTimestamp annotation")
} else {
logger.V(3).Info("updated annotation lastRequestTimestamp", "time", t.Time())
}
}
}
// if the status was false before then we update it to true
@ -171,46 +147,6 @@ func (t *Monitor) Run(register *Register, certRenewer *tls.CertRenewer, eventGen
logger.Error(err, "failed to annotate deployment webhook status to success")
}
case <-certsRenewalTicker.C:
valid, err := certRenewer.ValidCert()
if err != nil {
logger.Error(err, "failed to validate cert")
if !strings.Contains(err.Error(), tls.ErrorsNotFound) {
continue
}
}
if valid {
continue
}
logger.Info("rootCA is about to expire, trigger a rolling update to renew the cert")
if err := certRenewer.RollingUpdate(); err != nil {
logger.Error(err, "unable to trigger a rolling update to renew rootCA, force restarting")
os.Exit(1)
}
case <-t.secretQueue:
valid, err := certRenewer.ValidCert()
if err != nil {
logger.Error(err, "failed to validate cert")
if !strings.Contains(err.Error(), tls.ErrorsNotFound) {
continue
}
}
if valid {
continue
}
logger.Info("rootCA has changed, updating webhook configurations")
if err := certRenewer.RollingUpdate(); err != nil {
logger.Error(err, "unable to trigger a rolling update to re-register webhook server, force restarting")
os.Exit(1)
}
case <-stopCh:
// handler termination signal
logger.V(2).Info("stopping webhook monitor")
@ -219,6 +155,51 @@ func (t *Monitor) Run(register *Register, certRenewer *tls.CertRenewer, eventGen
}
}
func registerWebhookIfNotPresent(register *Register, logger logr.Logger) error {
if skipWebhookCheck(register, logger.WithName("skipWebhookCheck")) {
logger.Info("skip validating webhook status, Kyverno is in rolling update")
return nil
}
if err := register.Check(); err != nil {
logger.Error(err, "missing webhooks")
if err := register.Register(); err != nil {
return errors.Wrap(err, "failed to register webhooks")
}
}
return nil
}
func lastRequestTimeFromAnnotation(register *Register, logger logr.Logger) *time.Time {
_, deploy, err := register.GetKubePolicyDeployment()
if err != nil {
logger.Info("unable to get Kyverno deployment", "reason", err.Error())
return nil
}
annotation, ok, err := unstructured.NestedStringMap(deploy.UnstructuredContent(), "metadata", "annotations")
if err != nil {
logger.Info("unable to get annotation", "reason", err.Error())
return nil
}
if !ok {
logger.Info("timestamp not set in the annotation, setting")
return nil
}
timeStamp := annotation[annLastRequestTime]
annTime, err := time.Parse(time.RFC3339, timeStamp)
if err != nil {
logger.Error(err, "failed to parse timestamp annotation")
return nil
}
return &annTime
}
// skipWebhookCheck returns true if Kyverno is in rolling update
func skipWebhookCheck(register *Register, logger logr.Logger) bool {
_, deploy, err := register.GetKubePolicyDeployment()
@ -227,5 +208,5 @@ func skipWebhookCheck(register *Register, logger logr.Logger) bool {
return false
}
return tls.IsKyvernoIsInRollingUpdate(deploy.UnstructuredContent(), logger)
return tls.IsKyvernoInRollingUpdate(deploy.UnstructuredContent(), logger)
}

View file

@ -297,7 +297,7 @@ func (wrc *Register) createVerifyMutatingWebhookConfiguration(caData []byte) err
func (wrc *Register) removeWebhookConfigurations() {
startTime := time.Now()
wrc.log.Info("deleting all webhook configurations")
wrc.log.V(3).Info("deleting all webhook configurations")
defer func() {
wrc.log.V(4).Info("removed webhook configurations", "processingTime", time.Since(startTime).String())
}()
@ -497,7 +497,9 @@ func (wrc *Register) removeSecrets() {
for _, secret := range secretList.Items {
if err := wrc.client.DeleteResource("", "Secret", secret.GetNamespace(), secret.GetName(), false); err != nil {
wrc.log.Error(err, "failed to delete secret", "ns", secret.GetNamespace(), "name", secret.GetName())
if !errorsapi.IsNotFound(err) {
wrc.log.Error(err, "failed to delete secret", "ns", secret.GetNamespace(), "name", secret.GetName())
}
}
}
}

View file

@ -3,22 +3,27 @@ package webhookconfig
import (
"fmt"
"strconv"
"time"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
dclient "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/event"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
var deployName string = config.KyvernoDeploymentName
var deployNamespace string = config.KyvernoNamespace
const annCounter string = "kyverno.io/generationCounter"
const annWebhookStatus string = "kyverno.io/webhookActive"
const (
annCounter string = "kyverno.io/generationCounter"
annWebhookStatus string = "kyverno.io/webhookActive"
annLastRequestTime string = "kyverno.io/last-request-time"
)
//statusControl controls the webhook status
type statusControl struct {
client *dclient.Client
register *Register
eventGen event.Interface
log logr.Logger
}
@ -34,9 +39,9 @@ func (vc statusControl) failure() error {
}
// NewStatusControl creates a new webhook status control
func newStatusControl(client *dclient.Client, eventGen event.Interface, log logr.Logger) *statusControl {
func newStatusControl(register *Register, eventGen event.Interface, log logr.Logger) *statusControl {
return &statusControl{
client: client,
register: register,
eventGen: eventGen,
log: log,
}
@ -46,7 +51,7 @@ func (vc statusControl) setStatus(status string) error {
logger := vc.log.WithValues("name", deployName, "namespace", deployNamespace)
var ann map[string]string
var err error
deploy, err := vc.client.GetResource("", "Deployment", deployNamespace, deployName)
deploy, err := vc.register.client.GetResource("", "Deployment", deployNamespace, deployName)
if err != nil {
logger.Error(err, "failed to get deployment")
return err
@ -72,10 +77,9 @@ func (vc statusControl) setStatus(status string) error {
deploy.SetAnnotations(ann)
// update counter
_, err = vc.client.UpdateResource("", "Deployment", deployNamespace, deploy, false)
_, err = vc.register.client.UpdateResource("", "Deployment", deployNamespace, deploy, false)
if err != nil {
logger.Error(err, "failed to update deployment annotation", "key", annWebhookStatus, "val", status)
return err
return errors.Wrapf(err, "key %s, val %s", annWebhookStatus, status)
}
// create event on kyverno deployment
@ -98,7 +102,7 @@ func (vc statusControl) IncrementAnnotation() error {
logger := vc.log
var ann map[string]string
var err error
deploy, err := vc.client.GetResource("", "Deployment", deployNamespace, deployName)
deploy, err := vc.register.client.GetResource("", "Deployment", deployNamespace, deployName)
if err != nil {
logger.Error(err, "failed to find Kyverno", "deployment", deployName, "namespace", deployNamespace)
return err
@ -127,7 +131,7 @@ func (vc statusControl) IncrementAnnotation() error {
deploy.SetAnnotations(ann)
// update counter
_, err = vc.client.UpdateResource("", "Deployment", deployNamespace, deploy, false)
_, err = vc.register.client.UpdateResource("", "Deployment", deployNamespace, deploy, false)
if err != nil {
logger.Error(err, fmt.Sprintf("failed to update annotation %s for deployment %s in namespace %s", annCounter, deployName, deployNamespace))
return err
@ -135,3 +139,33 @@ func (vc statusControl) IncrementAnnotation() error {
return nil
}
func (vc statusControl) UpdateLastRequestTimestmap(new time.Time) error {
_, deploy, err := vc.register.GetKubePolicyDeployment()
if err != nil {
return errors.Wrap(err, "unable to get Kyverno deployment")
}
annotation, ok, err := unstructured.NestedStringMap(deploy.UnstructuredContent(), "metadata", "annotations")
if err != nil {
return errors.Wrap(err, "unable to get annotation")
}
if !ok {
annotation = make(map[string]string)
}
t, err := new.MarshalText()
if err != nil {
return errors.Wrap(err, "failed to marshal timestamp")
}
annotation[annLastRequestTime] = string(t)
deploy.SetAnnotations(annotation)
_, err = vc.register.client.UpdateResource("", "Deployment", deploy.GetNamespace(), deploy, false)
if err != nil {
return errors.Wrapf(err, "failed to update annotation %s for deployment %s in namespace %s", annLastRequestTime, deploy.GetName(), deploy.GetNamespace())
}
return nil
}

View file

@ -2,7 +2,6 @@ package generate
import (
"context"
"fmt"
"time"
backoff "github.com/cenkalti/backoff"
@ -17,7 +16,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
)
@ -35,7 +33,6 @@ type GeneratorChannel struct {
// Generator defines the implementation to mange generate request resource
type Generator struct {
// channel to receive request
ch chan GeneratorChannel
client *kyvernoclient.Clientset
stopCh <-chan struct{}
log logr.Logger
@ -47,7 +44,6 @@ type Generator struct {
// NewGenerator returns a new instance of Generate-Request resource generator
func NewGenerator(client *kyvernoclient.Clientset, grInformer kyvernoinformer.GenerateRequestInformer, stopCh <-chan struct{}, log logr.Logger) *Generator {
gen := &Generator{
ch: make(chan GeneratorChannel, 1000),
client: client,
stopCh: stopCh,
log: log,
@ -67,14 +63,8 @@ func (g *Generator) Apply(gr kyverno.GenerateRequestSpec, action v1beta1.Operati
action: action,
spec: gr,
}
select {
case g.ch <- message:
return nil
case <-g.stopCh:
logger.Info("shutting down channel")
return fmt.Errorf("shutting down gr create channel")
}
go g.processApply(message)
return nil
}
// Run starts the generate request spec
@ -92,20 +82,12 @@ func (g *Generator) Run(workers int, stopCh <-chan struct{}) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(g.processApply, time.Second, g.stopCh)
}
<-g.stopCh
}
func (g *Generator) processApply() {
logger := g.log
for r := range g.ch {
logger.V(4).Info("received generate request", "request", r)
if err := g.generate(r.spec, r.action); err != nil {
logger.Error(err, "failed to generate request CR")
}
func (g *Generator) processApply(m GeneratorChannel) {
if err := g.generate(m.spec, m.action); err != nil {
logger.Error(err, "failed to generate request CR")
}
}

View file

@ -164,11 +164,27 @@ func (ws *WebhookServer) handleUpdateCloneSourceResource(resLabels map[string]st
return
}
for _, gr := range grList {
ws.grController.EnqueueGenerateRequestFromWebhook(gr)
ws.updateAnnotationInGR(gr, logger)
}
}
}
// updateAnnotationInGR - function used to update GR annotation
// updating GR will trigger reprocessing of GR and recreation/updation of generated resource
func (ws *WebhookServer) updateAnnotationInGR(gr *v1.GenerateRequest, logger logr.Logger) {
grAnnotations := gr.Annotations
if len(grAnnotations) == 0 {
grAnnotations = make(map[string]string)
}
grAnnotations["generate.kyverno.io/updation-time"] = time.Now().String()
gr.SetAnnotations(grAnnotations)
_, err := ws.kyvernoClient.KyvernoV1().GenerateRequests(config.KyvernoNamespace).Update(contextdefault.TODO(), gr, metav1.UpdateOptions{})
if err != nil {
logger.Error(err, "failed to update generate request for the resource", "generate request", gr.Name)
return
}
}
//handleUpdateTargetResource - handles update of target resource for generate policy
func (ws *WebhookServer) handleUpdateTargetResource(request *v1beta1.AdmissionRequest, policies []*v1.ClusterPolicy, resLabels map[string]string, logger logr.Logger) {
enqueueBool := false
@ -227,7 +243,7 @@ func (ws *WebhookServer) handleUpdateTargetResource(request *v1beta1.AdmissionRe
logger.Error(err, "failed to get generate request", "name", grName)
return
}
ws.grController.EnqueueGenerateRequestFromWebhook(gr)
ws.updateAnnotationInGR(gr, logger)
}
}
@ -343,7 +359,7 @@ func (ws *WebhookServer) handleDelete(request *v1beta1.AdmissionRequest) {
logger.Error(err, "failed to get generate request", "name", grName)
return
}
ws.grController.EnqueueGenerateRequestFromWebhook(gr)
ws.updateAnnotationInGR(gr, logger)
}
}

View file

@ -29,7 +29,6 @@ import (
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/resourcecache"
ktls "github.com/kyverno/kyverno/pkg/tls"
tlsutils "github.com/kyverno/kyverno/pkg/tls"
userinfo "github.com/kyverno/kyverno/pkg/userinfo"
"github.com/kyverno/kyverno/pkg/utils"
@ -108,8 +107,6 @@ type WebhookServer struct {
// last request time
webhookMonitor *webhookconfig.Monitor
certRenewer *ktls.CertRenewer
// policy report generator
prGenerator policyreport.GeneratorInterface
@ -132,8 +129,6 @@ type WebhookServer struct {
grController *generate.Controller
debug bool
promConfig *metrics.PromConfig
}
@ -154,7 +149,6 @@ func NewWebhookServer(
pCache policycache.Interface,
webhookRegistrationClient *webhookconfig.Register,
webhookMonitor *webhookconfig.Monitor,
certRenewer *ktls.CertRenewer,
statusSync policystatus.Listener,
configHandler config.Interface,
prGenerator policyreport.GeneratorInterface,
@ -165,7 +159,6 @@ func NewWebhookServer(
openAPIController *openapi.Controller,
resCache resourcecache.ResourceCache,
grc *generate.Controller,
debug bool,
promConfig *metrics.PromConfig,
) (*WebhookServer, error) {
@ -205,7 +198,6 @@ func NewWebhookServer(
configHandler: configHandler,
cleanUp: cleanUp,
webhookMonitor: webhookMonitor,
certRenewer: certRenewer,
prGenerator: prGenerator,
grGenerator: grGenerator,
grController: grc,
@ -213,7 +205,6 @@ func NewWebhookServer(
log: log,
openAPIController: openAPIController,
resCache: resCache,
debug: debug,
promConfig: promConfig,
}
@ -534,9 +525,6 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
logger.Info("starting service")
if !ws.debug {
go ws.webhookMonitor.Run(ws.webhookRegister, ws.certRenewer, ws.eventGen, stopCh)
}
}
// Stop TLS server and returns control after the server is shut down