1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-05 07:26:55 +00:00

refactor: move config controller in controllers package (#3790)

* refactor: use typed informers and add tombstone support to webhookconfig

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* refactor: remove unstructured usage from webhookconfig

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* refactor: cert manager controller

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>

* refactor: move config controller in controllers package

Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
Charles-Edouard Brétéché 2022-05-04 18:05:03 +02:00 committed by GitHub
parent 288125ebd0
commit bb6e9a1ada
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
20 changed files with 524 additions and 475 deletions

View file

@ -19,6 +19,7 @@ import (
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/controllers/certmanager"
configcontroller "github.com/kyverno/kyverno/pkg/controllers/config"
"github.com/kyverno/kyverno/pkg/cosign"
dclient "github.com/kyverno/kyverno/pkg/dclient"
event "github.com/kyverno/kyverno/pkg/event"
@ -30,7 +31,7 @@ import (
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/registryclient"
"github.com/kyverno/kyverno/pkg/signal"
ktls "github.com/kyverno/kyverno/pkg/tls"
"github.com/kyverno/kyverno/pkg/tls"
"github.com/kyverno/kyverno/pkg/toggle"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/kyverno/kyverno/pkg/version"
@ -142,12 +143,13 @@ func main() {
}
// informer factories
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
kubeInformer := kubeinformers.NewSharedInformerFactory(kubeClient, resyncPeriod)
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace))
kyvernoInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(kyvernoClient, policyControllerResyncPeriod)
kyvernoInformer := kyvernoinformer.NewSharedInformerFactory(kyvernoClient, policyControllerResyncPeriod)
// utils
kyvernoV1 := kyvernoInformer.Kyverno().V1()
kyvernoV1alpha2 := kyvernoInformer.Kyverno().V1alpha2()
// load image registry secrets
secrets := strings.Split(imagePullSecrets, ",")
@ -165,17 +167,13 @@ func main() {
// EVENT GENERATOR
// - generate event with retry mechanism
eventGenerator := event.NewEventGenerator(
dynamicClient,
kyvernoV1.ClusterPolicies(),
kyvernoV1.Policies(),
log.Log.WithName("EventGenerator"))
eventGenerator := event.NewEventGenerator(dynamicClient, kyvernoV1.ClusterPolicies(), kyvernoV1.Policies(), log.Log.WithName("EventGenerator"))
// POLICY Report GENERATOR
reportReqGen := policyreport.NewReportChangeRequestGenerator(kyvernoClient,
dynamicClient,
kyvernoInformer.Kyverno().V1alpha2().ReportChangeRequests(),
kyvernoInformer.Kyverno().V1alpha2().ClusterReportChangeRequests(),
kyvernoV1alpha2.ReportChangeRequests(),
kyvernoV1alpha2.ClusterReportChangeRequests(),
kyvernoV1.ClusterPolicies(),
kyvernoV1.Policies(),
log.Log.WithName("ReportChangeRequestGenerator"),
@ -186,8 +184,8 @@ func main() {
dynamicClient,
kyvernoInformer.Wgpolicyk8s().V1alpha2().ClusterPolicyReports(),
kyvernoInformer.Wgpolicyk8s().V1alpha2().PolicyReports(),
kyvernoInformer.Kyverno().V1alpha2().ReportChangeRequests(),
kyvernoInformer.Kyverno().V1alpha2().ClusterReportChangeRequests(),
kyvernoV1alpha2.ReportChangeRequests(),
kyvernoV1alpha2.ClusterReportChangeRequests(),
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyReportGenerator"),
)
@ -220,16 +218,12 @@ func main() {
os.Exit(1)
}
// Configuration Data
// dynamically load the configuration from configMap
// - resource filters
// if the configMap is update, the configuration will be updated :D
configData := config.NewConfigData(
kubeClient,
kubeKyvernoInformer.Core().V1().ConfigMaps(),
prgen.ReconcileCh,
webhookCfg.UpdateWebhookChan,
)
configuration, err := config.NewConfiguration(kubeClient, prgen.ReconcileCh, webhookCfg.UpdateWebhookChan)
if err != nil {
setupLog.Error(err, "failed to initialize configuration")
os.Exit(1)
}
configurationController := configcontroller.NewController(kubeKyvernoInformer.Core().V1().ConfigMaps(), configuration)
metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
if err != nil {
@ -266,7 +260,7 @@ func main() {
kyvernoV1.ClusterPolicies(),
kyvernoV1.Policies(),
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
configData,
configuration,
eventGenerator,
reportReqGen,
prgen,
@ -296,7 +290,7 @@ func main() {
eventGenerator,
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("BackgroundController"),
configData,
configuration,
)
if err != nil {
setupLog.Error(err, "Failed to create generate controller")
@ -328,12 +322,12 @@ func main() {
kubeInformer.Rbac().V1().ClusterRoleBindings(),
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("ValidateAuditHandler"),
configData,
configuration,
dynamicClient,
promConfig,
)
certRenewer := ktls.NewCertRenewer(kubeClient, clientConfig, ktls.CertRenewalInterval, ktls.CertValidityDuration, serverIP, log.Log.WithName("CertRenewer"))
certRenewer := tls.NewCertRenewer(kubeClient, clientConfig, tls.CertRenewalInterval, tls.CertValidityDuration, serverIP, log.Log.WithName("CertRenewer"))
certManager, err := certmanager.NewController(kubeKyvernoInformer.Core().V1().Secrets(), kubeClient, certRenewer)
if err != nil {
setupLog.Error(err, "failed to initialize CertManager")
@ -346,12 +340,12 @@ func main() {
waitForCacheSync(stopCh, kyvernoInformer, kubeInformer, kubeKyvernoInformer)
// validate the ConfigMap format
if err := webhookCfg.ValidateWebhookConfigurations(config.KyvernoNamespace, configData.GetInitConfigMapName()); err != nil {
if err := webhookCfg.ValidateWebhookConfigurations(config.KyvernoNamespace, config.KyvernoConfigMapName); err != nil {
setupLog.Error(err, "invalid format of the Kyverno init ConfigMap, please correct the format of 'data.webhooks'")
os.Exit(1)
}
if autoUpdateWebhooks {
go webhookCfg.UpdateWebhookConfigurations(configData)
go webhookCfg.UpdateWebhookConfigurations(configuration)
}
if registrationErr := registerWrapperRetry(); registrationErr != nil {
setupLog.Error(err, "Timeout registering admission control webhooks")
@ -382,7 +376,7 @@ func main() {
// the webhook server runs across all instances
openAPIController := startOpenAPIController(dynamicClient, stopCh)
var tlsPair *ktls.PemPair
var tlsPair *tls.PemPair
tlsPair, err = certManager.GetTLSPemPair()
if err != nil {
setupLog.Error(err, "Failed to get TLS key/certificate pair")
@ -410,7 +404,7 @@ func main() {
pCacheController.Cache,
webhookCfg,
webhookMonitor,
configData,
configuration,
reportReqGen,
urgen,
auditHandler,
@ -465,7 +459,7 @@ func main() {
// start Kyverno controllers
go le.Run(ctx)
go reportReqGen.Run(2, stopCh)
go configData.Run(stopCh)
go configurationController.Run(stopCh)
go eventGenerator.Run(3, stopCh)
go auditHandler.Run(10, stopCh)
if !debug {

View file

@ -19,7 +19,7 @@ import (
func NewBackgroundContext(dclient dclient.Interface, ur *urkyverno.UpdateRequest,
policy kyverno.PolicyInterface, trigger *unstructured.Unstructured,
cfg config.Interface, namespaceLabels map[string]string, logger logr.Logger) (*engine.PolicyContext, bool, error) {
cfg config.Configuration, namespaceLabels map[string]string, logger logr.Logger) (*engine.PolicyContext, bool, error) {
ctx := context.NewContext()
requestString := ur.Spec.Context.AdmissionRequestInfo.AdmissionRequest

View file

@ -63,7 +63,7 @@ type GenerateController struct {
// policyLister can list/get Namespace policy from the shared informer's store
npolicyLister kyvernolister.PolicyLister
Config config.Interface
Config config.Configuration
}
//NewGenerateController returns an instance of the Generate-Request Controller
@ -76,7 +76,7 @@ func NewGenerateController(
eventGen event.Interface,
nsLister corelister.NamespaceLister,
log logr.Logger,
dynamicConfig config.Interface,
dynamicConfig config.Configuration,
) (*GenerateController, error) {
c := GenerateController{

View file

@ -48,7 +48,7 @@ type MutateExistingController struct {
// policyLister can list/get Namespace policy from the shared informer's store
npolicyLister kyvernolister.PolicyLister
Config config.Interface
Config config.Configuration
}
// NewMutateExistingController returns an instance of the MutateExistingController
@ -60,7 +60,7 @@ func NewMutateExistingController(
urLister urlister.UpdateRequestNamespaceLister,
eventGen event.Interface,
log logr.Logger,
dynamicConfig config.Interface,
dynamicConfig config.Configuration,
) (*MutateExistingController, error) {
c := MutateExistingController{

View file

@ -65,7 +65,7 @@ type Controller struct {
log logr.Logger
Config config.Interface
Config config.Configuration
}
//NewController returns an instance of the Generate-Request Controller
@ -79,7 +79,7 @@ func NewController(
eventGen event.Interface,
namespaceInformer coreinformers.NamespaceInformer,
log logr.Logger,
dynamicConfig config.Interface,
dynamicConfig config.Configuration,
) (*Controller, error) {
c := Controller{

33
pkg/config/client.go Normal file
View file

@ -0,0 +1,33 @@
package config
import (
"fmt"
"math"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
)
// CreateClientConfig creates client config and applies rate limit QPS and burst
func CreateClientConfig(kubeconfig string, qps float64, burst int) (*rest.Config, error) {
clientConfig, err := createClientConfig(kubeconfig)
if err != nil {
return nil, err
}
if qps > math.MaxFloat32 {
return nil, fmt.Errorf("client rate limit QPS must not be higher than %e", math.MaxFloat32)
}
clientConfig.Burst = burst
clientConfig.QPS = float32(qps)
return clientConfig, nil
}
// createClientConfig creates client config
func createClientConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig == "" {
logger.Info("Using in-cluster configuration")
return rest.InClusterConfig()
}
logger.V(4).Info("Using specified kubeconfig", "kubeconfig", kubeconfig)
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}

View file

@ -1,109 +1,309 @@
package config
import (
"fmt"
"math"
"context"
"reflect"
"strconv"
"sync"
wildcard "github.com/kyverno/go-wildcard"
osutils "github.com/kyverno/kyverno/pkg/utils/os"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
const (
//MutatingWebhookConfigurationName default resource mutating webhook configuration name
// MutatingWebhookConfigurationName default resource mutating webhook configuration name
MutatingWebhookConfigurationName = "kyverno-resource-mutating-webhook-cfg"
//MutatingWebhookConfigurationDebugName default resource mutating webhook configuration name for debug mode
// MutatingWebhookConfigurationDebugName default resource mutating webhook configuration name for debug mode
MutatingWebhookConfigurationDebugName = "kyverno-resource-mutating-webhook-cfg-debug"
//MutatingWebhookName default resource mutating webhook name
// MutatingWebhookName default resource mutating webhook name
MutatingWebhookName = "mutate.kyverno.svc"
ValidatingWebhookConfigurationName = "kyverno-resource-validating-webhook-cfg"
// ValidatingWebhookConfigurationName ...
ValidatingWebhookConfigurationName = "kyverno-resource-validating-webhook-cfg"
// ValidatingWebhookConfigurationDebugName ...
ValidatingWebhookConfigurationDebugName = "kyverno-resource-validating-webhook-cfg-debug"
ValidatingWebhookName = "validate.kyverno.svc"
// ValidatingWebhookName ...
ValidatingWebhookName = "validate.kyverno.svc"
//VerifyMutatingWebhookConfigurationName default verify mutating webhook configuration name
VerifyMutatingWebhookConfigurationName = "kyverno-verify-mutating-webhook-cfg"
//VerifyMutatingWebhookConfigurationDebugName default verify mutating webhook configuration name for debug mode
VerifyMutatingWebhookConfigurationDebugName = "kyverno-verify-mutating-webhook-cfg-debug"
//VerifyMutatingWebhookName default verify mutating webhook name
VerifyMutatingWebhookName = "monitor-webhooks.kyverno.svc"
//PolicyValidatingWebhookConfigurationName default policy validating webhook configuration name
PolicyValidatingWebhookConfigurationName = "kyverno-policy-validating-webhook-cfg"
//PolicyValidatingWebhookConfigurationDebugName default policy validating webhook configuration name for debug mode
PolicyValidatingWebhookConfigurationDebugName = "kyverno-policy-validating-webhook-cfg-debug"
//PolicyValidatingWebhookName default policy validating webhook name
PolicyValidatingWebhookName = "validate-policy.kyverno.svc"
//PolicyMutatingWebhookConfigurationName default policy mutating webhook configuration name
PolicyMutatingWebhookConfigurationName = "kyverno-policy-mutating-webhook-cfg"
//PolicyMutatingWebhookConfigurationDebugName default policy mutating webhook configuration name for debug mode
PolicyMutatingWebhookConfigurationDebugName = "kyverno-policy-mutating-webhook-cfg-debug"
//PolicyMutatingWebhookName default policy mutating webhook name
PolicyMutatingWebhookName = "mutate-policy.kyverno.svc"
// Due to kubernetes issue, we must use next literal constants instead of deployment TypeMeta fields
// Issue: https://github.com/kubernetes/kubernetes/pull/63972
// When the issue is closed, we should use TypeMeta struct instead of this constants
// ClusterRoleAPIVersion define the default clusterrole resource apiVersion
ClusterRoleAPIVersion = "rbac.authorization.k8s.io/v1"
// ClusterRoleKind define the default clusterrole resource kind
ClusterRoleKind = "ClusterRole"
//MutatingWebhookServicePath is the path for mutation webhook
MutatingWebhookServicePath = "/mutate"
//ValidatingWebhookServicePath is the path for validation webhook
ValidatingWebhookServicePath = "/validate"
//PolicyValidatingWebhookServicePath is the path for policy validation webhook(used to validate policy resource)
PolicyValidatingWebhookServicePath = "/policyvalidate"
//PolicyMutatingWebhookServicePath is the path for policy mutation webhook(used to default)
PolicyMutatingWebhookServicePath = "/policymutate"
//VerifyMutatingWebhookServicePath is the path for verify webhook(used to veryfing if admission control is enabled and active)
VerifyMutatingWebhookServicePath = "/verifymutate"
// LivenessServicePath is the path for check liveness health
LivenessServicePath = "/health/liveness"
// ReadinessServicePath is the path for check readness health
ReadinessServicePath = "/health/readiness"
)
var (
//KyvernoNamespace is the Kyverno namespace
// KyvernoNamespace is the Kyverno namespace
KyvernoNamespace = osutils.GetEnvWithFallback("KYVERNO_NAMESPACE", "kyverno")
// KyvernoDeploymentName is the Kyverno deployment name
KyvernoDeploymentName = osutils.GetEnvWithFallback("KYVERNO_DEPLOYMENT", "kyverno")
//KyvernoServiceName is the Kyverno service name
// KyvernoServiceName is the Kyverno service name
KyvernoServiceName = osutils.GetEnvWithFallback("KYVERNO_SVC", "kyverno-svc")
// KyvernoConfigMapName is the Kyverno configmap name
KyvernoConfigMapName = osutils.GetEnvWithFallback("INIT_CONFIG", "kyverno")
// defaultExcludeGroupRole ...
defaultExcludeGroupRole []string = []string{"system:serviceaccounts:kube-system", "system:nodes", "system:kube-scheduler"}
)
//CreateClientConfig creates client config and applies rate limit QPS and burst
func CreateClientConfig(kubeconfig string, qps float64, burst int) (*rest.Config, error) {
clientConfig, err := createClientConfig(kubeconfig)
if err != nil {
return nil, err
}
if qps > math.MaxFloat32 {
return nil, fmt.Errorf("client rate limit QPS must not be higher than %e", math.MaxFloat32)
}
clientConfig.Burst = burst
clientConfig.QPS = float32(qps)
return clientConfig, nil
// Configuration to be used by consumer to check filters
type Configuration interface {
// ToFilter checks if the given resource is set to be filtered in the configuration
ToFilter(kind, namespace, name string) bool
// GetExcludeGroupRole return exclude roles
GetExcludeGroupRole() []string
// GetExcludeUsername return exclude username
GetExcludeUsername() []string
// GetGenerateSuccessEvents return if should generate success events
GetGenerateSuccessEvents() bool
// RestrictDevelopmentUsername return exclude development username
RestrictDevelopmentUsername() []string
// FilterNamespaces filters exclude namespace
FilterNamespaces(namespaces []string) []string
// GetWebhooks returns the webhook configs
GetWebhooks() []WebhookConfig
// Load loads configuration from a configmap
Load(cm *v1.ConfigMap)
}
// createClientConfig creates client config
func createClientConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig == "" {
logger.Info("Using in-cluster configuration")
return rest.InClusterConfig()
}
logger.V(4).Info("Using specified kubeconfig", "kubeconfig", kubeconfig)
return clientcmd.BuildConfigFromFlags("", kubeconfig)
// configuration stores the configuration
type configuration struct {
mux sync.RWMutex
filters []filter
excludeGroupRole []string
excludeUsername []string
restrictDevelopmentUsername []string
webhooks []WebhookConfig
generateSuccessEvents bool
reconcilePolicyReport chan<- bool
updateWebhookConfigurations chan<- bool
}
// NewConfiguration ...
func NewConfiguration(client kubernetes.Interface, reconcilePolicyReport, updateWebhookConfigurations chan<- bool) (Configuration, error) {
cd := &configuration{
reconcilePolicyReport: reconcilePolicyReport,
updateWebhookConfigurations: updateWebhookConfigurations,
restrictDevelopmentUsername: []string{"minikube-user", "kubernetes-admin"},
excludeGroupRole: defaultExcludeGroupRole,
}
if cm, err := client.CoreV1().ConfigMaps(KyvernoNamespace).Get(context.TODO(), KyvernoConfigMapName, metav1.GetOptions{}); err != nil {
if !errors.IsNotFound(err) {
return nil, err
}
} else {
cd.load(cm)
}
return cd, nil
}
func (cd *configuration) ToFilter(kind, namespace, name string) bool {
cd.mux.RLock()
defer cd.mux.RUnlock()
for _, f := range cd.filters {
if wildcard.Match(f.Kind, kind) && wildcard.Match(f.Namespace, namespace) && wildcard.Match(f.Name, name) {
return true
}
if kind == "Namespace" {
// [Namespace,kube-system,*] || [*,kube-system,*]
if (f.Kind == "Namespace" || f.Kind == "*") && wildcard.Match(f.Namespace, name) {
return true
}
}
}
return false
}
func (cd *configuration) GetExcludeGroupRole() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.excludeGroupRole
}
func (cd *configuration) RestrictDevelopmentUsername() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.restrictDevelopmentUsername
}
func (cd *configuration) GetExcludeUsername() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.excludeUsername
}
func (cd *configuration) GetGenerateSuccessEvents() bool {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.generateSuccessEvents
}
func (cd *configuration) FilterNamespaces(namespaces []string) []string {
var results []string
for _, ns := range namespaces {
if !cd.ToFilter("", ns, "") {
results = append(results, ns)
}
}
return results
}
func (cd *configuration) GetWebhooks() []WebhookConfig {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.webhooks
}
func (cd *configuration) Load(cm *v1.ConfigMap) {
reconcilePolicyReport, updateWebhook := true, true
if cm != nil {
logger.Info("load config", "name", cm.Name, "namespace", cm.Namespace)
reconcilePolicyReport, updateWebhook = cd.load(cm)
} else {
logger.Info("unload config")
cd.unload()
}
if reconcilePolicyReport {
logger.Info("resource filters changed, sending reconcile signal to the policy controller")
cd.reconcilePolicyReport <- true
}
if updateWebhook {
logger.Info("webhook configurations changed, updating webhook configurations")
cd.updateWebhookConfigurations <- true
}
}
func (cd *configuration) load(cm *v1.ConfigMap) (reconcilePolicyReport, updateWebhook bool) {
logger := logger.WithValues("name", cm.Name, "namespace", cm.Namespace)
if cm.Data == nil {
logger.V(4).Info("configuration: No data defined in ConfigMap")
return
}
cd.mux.Lock()
defer cd.mux.Unlock()
filters, ok := cm.Data["resourceFilters"]
if !ok {
logger.V(4).Info("configuration: No resourceFilters defined in ConfigMap")
} else {
newFilters := parseKinds(filters)
if reflect.DeepEqual(newFilters, cd.filters) {
logger.V(4).Info("resourceFilters did not change")
} else {
logger.V(2).Info("Updated resource filters", "oldFilters", cd.filters, "newFilters", newFilters)
cd.filters = newFilters
reconcilePolicyReport = true
}
}
excludeGroupRole, ok := cm.Data["excludeGroupRole"]
if !ok {
logger.V(4).Info("configuration: No excludeGroupRole defined in ConfigMap")
}
newExcludeGroupRoles := parseRbac(excludeGroupRole)
newExcludeGroupRoles = append(newExcludeGroupRoles, defaultExcludeGroupRole...)
if reflect.DeepEqual(newExcludeGroupRoles, cd.excludeGroupRole) {
logger.V(4).Info("excludeGroupRole did not change")
} else {
logger.V(2).Info("Updated resource excludeGroupRoles", "oldExcludeGroupRole", cd.excludeGroupRole, "newExcludeGroupRole", newExcludeGroupRoles)
cd.excludeGroupRole = newExcludeGroupRoles
reconcilePolicyReport = true
}
excludeUsername, ok := cm.Data["excludeUsername"]
if !ok {
logger.V(4).Info("configuration: No excludeUsername defined in ConfigMap")
} else {
excludeUsernames := parseRbac(excludeUsername)
if reflect.DeepEqual(excludeUsernames, cd.excludeUsername) {
logger.V(4).Info("excludeGroupRole did not change")
} else {
logger.V(2).Info("Updated resource excludeUsernames", "oldExcludeUsername", cd.excludeUsername, "newExcludeUsername", excludeUsernames)
cd.excludeUsername = excludeUsernames
reconcilePolicyReport = true
}
}
webhooks, ok := cm.Data["webhooks"]
if !ok {
if len(cd.webhooks) > 0 {
cd.webhooks = nil
updateWebhook = true
logger.V(4).Info("configuration: Setting namespaceSelector to empty in the webhook configurations")
} else {
logger.V(4).Info("configuration: No webhook configurations defined in ConfigMap")
}
} else {
cfgs, err := parseWebhooks(webhooks)
if err != nil {
logger.Error(err, "unable to parse webhooks configurations")
return
}
if reflect.DeepEqual(cfgs, cd.webhooks) {
logger.V(4).Info("webhooks did not change")
} else {
logger.Info("Updated webhooks configurations", "oldWebhooks", cd.webhooks, "newWebhookd", cfgs)
cd.webhooks = cfgs
updateWebhook = true
}
}
generateSuccessEvents, ok := cm.Data["generateSuccessEvents"]
if !ok {
logger.V(4).Info("configuration: No generateSuccessEvents defined in ConfigMap")
} else {
generateSuccessEvents, err := strconv.ParseBool(generateSuccessEvents)
if err != nil {
logger.V(4).Info("configuration: generateSuccessEvents must be either true/false")
} else if generateSuccessEvents == cd.generateSuccessEvents {
logger.V(4).Info("generateSuccessEvents did not change")
} else {
logger.V(2).Info("Updated generateSuccessEvents", "oldGenerateSuccessEvents", cd.generateSuccessEvents, "newGenerateSuccessEvents", generateSuccessEvents)
cd.generateSuccessEvents = generateSuccessEvents
reconcilePolicyReport = true
}
}
return
}
func (cd *configuration) unload() {
cd.mux.Lock()
defer cd.mux.Unlock()
cd.filters = []filter{}
cd.excludeGroupRole = []string{}
cd.excludeGroupRole = append(cd.excludeGroupRole, defaultExcludeGroupRole...)
cd.excludeUsername = []string{}
cd.generateSuccessEvents = false
}

View file

@ -1,382 +0,0 @@
package config
import (
"encoding/json"
"os"
"reflect"
"regexp"
"strconv"
"strings"
"sync"
wildcard "github.com/kyverno/go-wildcard"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
// read the conifgMap with name in env:INIT_CONFIG
// this configmap stores the resources that are to be filtered
const cmNameEnv string = "INIT_CONFIG"
var defaultExcludeGroupRole []string = []string{"system:serviceaccounts:kube-system", "system:nodes", "system:kube-scheduler"}
type WebhookConfig struct {
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"`
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"`
}
// ConfigData stores the configuration
type ConfigData struct {
client kubernetes.Interface
cmName string
mux sync.RWMutex
filters []k8Resource
excludeGroupRole []string
excludeUsername []string
restrictDevelopmentUsername []string
webhooks []WebhookConfig
generateSuccessEvents bool
reconcilePolicyReport chan<- bool
updateWebhookConfigurations chan<- bool
}
// ToFilter checks if the given resource is set to be filtered in the configuration
func (cd *ConfigData) ToFilter(kind, namespace, name string) bool {
cd.mux.RLock()
defer cd.mux.RUnlock()
for _, f := range cd.filters {
if wildcard.Match(f.Kind, kind) && wildcard.Match(f.Namespace, namespace) && wildcard.Match(f.Name, name) {
return true
}
if kind == "Namespace" {
// [Namespace,kube-system,*] || [*,kube-system,*]
if (f.Kind == "Namespace" || f.Kind == "*") && wildcard.Match(f.Namespace, name) {
return true
}
}
}
return false
}
// GetExcludeGroupRole return exclude roles
func (cd *ConfigData) GetExcludeGroupRole() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.excludeGroupRole
}
// RestrictDevelopmentUsername return exclude development username
func (cd *ConfigData) RestrictDevelopmentUsername() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.restrictDevelopmentUsername
}
// GetExcludeUsername return exclude username
func (cd *ConfigData) GetExcludeUsername() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.excludeUsername
}
// GetGenerateSuccessEvents return if should generate success events
func (cd *ConfigData) GetGenerateSuccessEvents() bool {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.generateSuccessEvents
}
// FilterNamespaces filters exclude namespace
func (cd *ConfigData) FilterNamespaces(namespaces []string) []string {
var results []string
for _, ns := range namespaces {
if !cd.ToFilter("", ns, "") {
results = append(results, ns)
}
}
return results
}
// GetWebhooks returns the webhook configs
func (cd *ConfigData) GetWebhooks() []WebhookConfig {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.webhooks
}
// GetInitConfigMapName returns the init configmap name
func (cd *ConfigData) GetInitConfigMapName() string {
return cd.cmName
}
// Interface to be used by consumer to check filters
type Interface interface {
ToFilter(kind, namespace, name string) bool
GetExcludeGroupRole() []string
GetExcludeUsername() []string
GetGenerateSuccessEvents() bool
RestrictDevelopmentUsername() []string
FilterNamespaces(namespaces []string) []string
GetWebhooks() []WebhookConfig
GetInitConfigMapName() string
}
// NewConfigData ...
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, reconcilePolicyReport, updateWebhookConfigurations chan<- bool) *ConfigData {
// environment var is read at start only
if cmNameEnv == "" {
logger.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
}
cd := ConfigData{
client: rclient,
cmName: os.Getenv(cmNameEnv),
reconcilePolicyReport: reconcilePolicyReport,
updateWebhookConfigurations: updateWebhookConfigurations,
restrictDevelopmentUsername: []string{"minikube-user", "kubernetes-admin"},
}
cd.initRbac("excludeRoles", "")
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cd.addCM,
UpdateFunc: cd.updateCM,
DeleteFunc: cd.deleteCM,
})
return &cd
}
// Run checks syncing
func (cd *ConfigData) Run(stopCh <-chan struct{}) {
}
func (cd *ConfigData) addCM(obj interface{}) {
cm := obj.(*v1.ConfigMap)
if cm.Name != cd.cmName {
return
}
cd.load(*cm)
}
func (cd *ConfigData) updateCM(old, cur interface{}) {
cm := cur.(*v1.ConfigMap)
if cm.Name != cd.cmName {
return
}
// if data has not changed then dont load configmap
reconcilePolicyReport, updateWebhook := cd.load(*cm)
if reconcilePolicyReport {
logger.Info("resource filters changed, sending reconcile signal to the policy controller")
cd.reconcilePolicyReport <- true
}
if updateWebhook {
logger.Info("webhook configurations changed, updating webhook configurations")
cd.updateWebhookConfigurations <- true
}
}
func (cd *ConfigData) deleteCM(obj interface{}) {
cm, ok := obj.(*v1.ConfigMap)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Info("failed to get object from tombstone")
return
}
cm, ok = tombstone.Obj.(*v1.ConfigMap)
if !ok {
logger.Info("Tombstone contained object that is not a ConfigMap", "object", obj)
return
}
}
if cm.Name != cd.cmName {
return
}
// remove the configuration parameters
cd.unload(*cm)
}
func (cd *ConfigData) load(cm v1.ConfigMap) (reconcilePolicyReport, updateWebhook bool) {
logger := logger.WithValues("name", cm.Name, "namespace", cm.Namespace)
if cm.Data == nil {
logger.V(4).Info("configuration: No data defined in ConfigMap")
return
}
cd.mux.Lock()
defer cd.mux.Unlock()
filters, ok := cm.Data["resourceFilters"]
if !ok {
logger.V(4).Info("configuration: No resourceFilters defined in ConfigMap")
} else {
newFilters := parseKinds(filters)
if reflect.DeepEqual(newFilters, cd.filters) {
logger.V(4).Info("resourceFilters did not change")
} else {
logger.V(2).Info("Updated resource filters", "oldFilters", cd.filters, "newFilters", newFilters)
cd.filters = newFilters
reconcilePolicyReport = true
}
}
excludeGroupRole, ok := cm.Data["excludeGroupRole"]
if !ok {
logger.V(4).Info("configuration: No excludeGroupRole defined in ConfigMap")
}
newExcludeGroupRoles := parseRbac(excludeGroupRole)
newExcludeGroupRoles = append(newExcludeGroupRoles, defaultExcludeGroupRole...)
if reflect.DeepEqual(newExcludeGroupRoles, cd.excludeGroupRole) {
logger.V(4).Info("excludeGroupRole did not change")
} else {
logger.V(2).Info("Updated resource excludeGroupRoles", "oldExcludeGroupRole", cd.excludeGroupRole, "newExcludeGroupRole", newExcludeGroupRoles)
cd.excludeGroupRole = newExcludeGroupRoles
reconcilePolicyReport = true
}
excludeUsername, ok := cm.Data["excludeUsername"]
if !ok {
logger.V(4).Info("configuration: No excludeUsername defined in ConfigMap")
} else {
excludeUsernames := parseRbac(excludeUsername)
if reflect.DeepEqual(excludeUsernames, cd.excludeUsername) {
logger.V(4).Info("excludeGroupRole did not change")
} else {
logger.V(2).Info("Updated resource excludeUsernames", "oldExcludeUsername", cd.excludeUsername, "newExcludeUsername", excludeUsernames)
cd.excludeUsername = excludeUsernames
reconcilePolicyReport = true
}
}
webhooks, ok := cm.Data["webhooks"]
if !ok {
if len(cd.webhooks) > 0 {
cd.webhooks = nil
updateWebhook = true
logger.V(4).Info("configuration: Setting namespaceSelector to empty in the webhook configurations")
} else {
logger.V(4).Info("configuration: No webhook configurations defined in ConfigMap")
}
} else {
cfgs, err := parseWebhooks(webhooks)
if err != nil {
logger.Error(err, "unable to parse webhooks configurations")
return
}
if reflect.DeepEqual(cfgs, cd.webhooks) {
logger.V(4).Info("webhooks did not change")
} else {
logger.Info("Updated webhooks configurations", "oldWebhooks", cd.webhooks, "newWebhookd", cfgs)
cd.webhooks = cfgs
updateWebhook = true
}
}
generateSuccessEvents, ok := cm.Data["generateSuccessEvents"]
if !ok {
logger.V(4).Info("configuration: No generateSuccessEvents defined in ConfigMap")
} else {
generateSuccessEvents, err := strconv.ParseBool(generateSuccessEvents)
if err != nil {
logger.V(4).Info("configuration: generateSuccessEvents must be either true/false")
} else if generateSuccessEvents == cd.generateSuccessEvents {
logger.V(4).Info("generateSuccessEvents did not change")
} else {
logger.V(2).Info("Updated generateSuccessEvents", "oldGenerateSuccessEvents", cd.generateSuccessEvents, "newGenerateSuccessEvents", generateSuccessEvents)
cd.generateSuccessEvents = generateSuccessEvents
reconcilePolicyReport = true
}
}
return
}
func (cd *ConfigData) initFilters(filters string) {
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
newFilters := parseKinds(filters)
logger.V(2).Info("Init resource filters", "filters", newFilters)
// update filters
cd.filters = newFilters
}
func (cd *ConfigData) initRbac(action, exclude string) {
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
rbac := parseRbac(exclude)
logger.V(2).Info("Init resource ", action, exclude)
// update filters
if action == "excludeRoles" {
cd.excludeGroupRole = rbac
cd.excludeGroupRole = append(cd.excludeGroupRole, defaultExcludeGroupRole...)
} else {
cd.excludeUsername = rbac
}
}
func (cd *ConfigData) unload(cm v1.ConfigMap) {
logger.Info("ConfigMap deleted, removing configuration filters", "name", cm.Name, "namespace", cm.Namespace)
cd.mux.Lock()
defer cd.mux.Unlock()
cd.filters = []k8Resource{}
cd.excludeGroupRole = []string{}
cd.excludeGroupRole = append(cd.excludeGroupRole, defaultExcludeGroupRole...)
cd.excludeUsername = []string{}
cd.generateSuccessEvents = false
}
type k8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//ParseKinds parses the kinds if a single string contains comma separated kinds
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
func parseKinds(list string) []k8Resource {
resources := []k8Resource{}
var resource k8Resource
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
submatchall := re.FindAllString(list, -1)
for _, element := range submatchall {
element = strings.Trim(element, "[")
element = strings.Trim(element, "]")
elements := strings.Split(element, ",")
if len(elements) == 0 {
continue
}
if len(elements) == 3 {
resource = k8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
}
if len(elements) == 2 {
resource = k8Resource{Kind: elements[0], Namespace: elements[1]}
}
if len(elements) == 1 {
resource = k8Resource{Kind: elements[0]}
}
resources = append(resources, resource)
}
return resources
}
func parseRbac(list string) []string {
return strings.Split(list, ",")
}
func parseWebhooks(webhooks string) ([]WebhookConfig, error) {
webhookCfgs := make([]WebhookConfig, 0, 10)
if err := json.Unmarshal([]byte(webhooks), &webhookCfgs); err != nil {
return nil, err
}
return webhookCfgs, nil
}

40
pkg/config/filter.go Normal file
View file

@ -0,0 +1,40 @@
package config
import (
"regexp"
"strings"
)
type filter struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
// ParseKinds parses the kinds if a single string contains comma separated kinds
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
func parseKinds(list string) []filter {
resources := []filter{}
var resource filter
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
submatchall := re.FindAllString(list, -1)
for _, element := range submatchall {
element = strings.Trim(element, "[")
element = strings.Trim(element, "]")
elements := strings.Split(element, ",")
if len(elements) == 0 {
continue
}
if len(elements) == 3 {
resource = filter{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
}
if len(elements) == 2 {
resource = filter{Kind: elements[0], Namespace: elements[1]}
}
if len(elements) == 1 {
resource = filter{Kind: elements[0]}
}
resources = append(resources, resource)
}
return resources
}

9
pkg/config/rbac.go Normal file
View file

@ -0,0 +1,9 @@
package config
import (
"strings"
)
func parseRbac(list string) []string {
return strings.Split(list, ",")
}

21
pkg/config/webhook.go Normal file
View file

@ -0,0 +1,21 @@
package config
import (
"encoding/json"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type WebhookConfig struct {
NamespaceSelector *metav1.LabelSelector `json:"namespaceSelector,omitempty" protobuf:"bytes,5,opt,name=namespaceSelector"`
ObjectSelector *metav1.LabelSelector `json:"objectSelector,omitempty" protobuf:"bytes,11,opt,name=objectSelector"`
}
func parseWebhooks(webhooks string) ([]WebhookConfig, error) {
webhookCfgs := make([]WebhookConfig, 0, 10)
if err := json.Unmarshal([]byte(webhooks), &webhookCfgs); err != nil {
return nil, err
}
return webhookCfgs, nil
}

View file

@ -0,0 +1,129 @@
package config
import (
"time"
"github.com/kyverno/kyverno/pkg/config"
kubeutils "github.com/kyverno/kyverno/pkg/utils/kube"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
corev1informers "k8s.io/client-go/informers/core/v1"
corev1listers "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
maxRetries = 10
workers = 3
)
type controller struct {
configuration config.Configuration
// listers
configmapLister corev1listers.ConfigMapLister
// queue
queue workqueue.RateLimitingInterface
}
func NewController(configmapInformer corev1informers.ConfigMapInformer, configuration config.Configuration) *controller {
c := controller{
configuration: configuration,
configmapLister: configmapInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "config-controller"),
}
configmapInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: c.add,
UpdateFunc: c.update,
DeleteFunc: c.delete,
})
return &c
}
func (c *controller) add(obj interface{}) {
c.enqueue(obj.(*corev1.ConfigMap))
}
func (c *controller) update(old, cur interface{}) {
c.enqueue(cur.(*corev1.ConfigMap))
}
func (c *controller) delete(obj interface{}) {
cm, ok := kubeutils.GetObjectWithTombstone(obj).(*corev1.ConfigMap)
if ok {
c.enqueue(cm)
} else {
logger.Info("Failed to get deleted object", "obj", obj)
}
}
func (c *controller) enqueue(obj *corev1.ConfigMap) {
if key, err := cache.MetaNamespaceKeyFunc(obj); err != nil {
logger.Error(err, "failed to compute key name")
} else {
c.queue.Add(key)
}
}
func (c *controller) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
} else if errors.IsNotFound(err) {
logger.V(4).Info("Dropping update request from the queue", "key", key, "error", err.Error())
c.queue.Forget(key)
} else if c.queue.NumRequeues(key) < maxRetries {
logger.V(3).Info("retrying update request", "key", key, "error", err.Error())
c.queue.AddRateLimited(key)
} else {
logger.Error(err, "failed to process update request", "key", key)
c.queue.Forget(key)
}
}
func (c *controller) processNextWorkItem() bool {
if key, quit := c.queue.Get(); !quit {
defer c.queue.Done(key)
c.handleErr(c.reconcile(key.(string)), key)
return true
}
return false
}
func (c *controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *controller) Run(stopCh <-chan struct{}) {
defer runtime.HandleCrash()
logger.Info("start")
defer logger.Info("shutting down")
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
func (c *controller) reconcile(key string) error {
logger.Info("reconciling ...", "key", key)
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
if namespace != config.KyvernoNamespace || name != config.KyvernoConfigMapName {
return nil
}
configMap, err := c.configmapLister.ConfigMaps(namespace).Get(name)
if err != nil {
if errors.IsNotFound(err) {
c.configuration.Load(nil)
}
return err
}
c.configuration.Load(configMap.DeepCopy())
return nil
}

View file

@ -0,0 +1,5 @@
package config
import "sigs.k8s.io/controller-runtime/pkg/log"
var logger = log.Log.WithName("config-controller")

View file

@ -187,7 +187,7 @@ func (pc *PolicyController) match(r unstructured.Unstructured, rule kyverno.Rule
}
// ExcludeResources ...
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Interface, log logr.Logger) {
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Configuration, log logr.Logger) {
if reflect.DeepEqual(exclude, (kyverno.ResourceDescription{})) {
return
}

View file

@ -80,7 +80,7 @@ type PolicyController struct {
rm resourceManager
// helpers to validate against current loaded configuration
configHandler config.Interface
configHandler config.Configuration
// policy report generator
prGenerator policyreport.GeneratorInterface
@ -102,7 +102,7 @@ func NewPolicyController(
pInformer kyvernoinformer.ClusterPolicyInformer,
npInformer kyvernoinformer.PolicyInformer,
urInformer urkyvernoinformer.UpdateRequestInformer,
configHandler config.Interface,
configHandler config.Configuration,
eventGen event.Interface,
prGenerator policyreport.GeneratorInterface,
policyReportEraser policyreport.PolicyReportEraser,

View file

@ -23,7 +23,7 @@ const (
)
//GetRoleRef gets the list of roles and cluster roles for the incoming api-request
func GetRoleRef(rbLister rbaclister.RoleBindingLister, crbLister rbaclister.ClusterRoleBindingLister, request *admissionv1.AdmissionRequest, dynamicConfig config.Interface) ([]string, []string, error) {
func GetRoleRef(rbLister rbaclister.RoleBindingLister, crbLister rbaclister.ClusterRoleBindingLister, request *admissionv1.AdmissionRequest, dynamicConfig config.Configuration) ([]string, []string, error) {
keys := append(request.UserInfo.Groups, request.UserInfo.Username)
if utils.SliceContains(keys, dynamicConfig.GetExcludeGroupRole()...) {
return nil, nil, nil

View file

@ -196,7 +196,7 @@ func (wrc *Register) Remove(cleanUp chan<- struct{}) {
//
// it currently updates namespaceSelector only, can be extend to update other fields
// +deprecated
func (wrc *Register) UpdateWebhookConfigurations(configHandler config.Interface) {
func (wrc *Register) UpdateWebhookConfigurations(configHandler config.Configuration) {
logger := wrc.log.WithName("UpdateWebhookConfigurations")
for {
<-wrc.UpdateWebhookChan

View file

@ -76,7 +76,7 @@ func Admission(logger logr.Logger, inner AdmissionHandler) http.HandlerFunc {
}
}
func Filter(c config.Interface, inner AdmissionHandler) AdmissionHandler {
func Filter(c config.Configuration, inner AdmissionHandler) AdmissionHandler {
return func(request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
if c.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
return nil

View file

@ -63,7 +63,7 @@ type WebhookServer struct {
webhookRegister *webhookconfig.Register
// helpers to validate against current loaded configuration
configHandler config.Interface
configHandler config.Configuration
// channel for cleanup notification
cleanUp chan<- struct{}
@ -107,7 +107,7 @@ func NewWebhookServer(
pCache policycache.Interface,
webhookRegistrationClient *webhookconfig.Register,
webhookMonitor *webhookconfig.Monitor,
configHandler config.Interface,
configHandler config.Configuration,
prGenerator policyreport.GeneratorInterface,
urGenerator webhookgenerate.Interface,
auditHandler AuditHandler,

View file

@ -53,7 +53,7 @@ type auditHandler struct {
nsLister listerv1.NamespaceLister
log logr.Logger
configHandler config.Interface
configHandler config.Configuration
promConfig *metrics.PromConfig
}
@ -65,7 +65,7 @@ func NewValidateAuditHandler(pCache policycache.Interface,
crbInformer rbacinformer.ClusterRoleBindingInformer,
namespaces informers.NamespaceInformer,
log logr.Logger,
dynamicConfig config.Interface,
dynamicConfig config.Configuration,
client client.Interface,
promConfig *metrics.PromConfig) AuditHandler {