mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
Revert "261 dynamic config"
This commit is contained in:
parent
a5a30b1a3c
commit
fd90b25755
11 changed files with 135 additions and 288 deletions
|
@ -278,15 +278,6 @@ subjects:
|
|||
name: kyverno-service-account
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: init-config
|
||||
namespace: kyverno
|
||||
data:
|
||||
# resource types to be skipped by kyverno policy engine
|
||||
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
|
@ -308,14 +299,14 @@ spec:
|
|||
containers:
|
||||
- name: kyverno
|
||||
image: nirmata/kyverno:latest
|
||||
# args:
|
||||
args:
|
||||
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
|
||||
# customize webhook timout
|
||||
# - "--webhooktimeout=4"
|
||||
# open one of the profiling flag here
|
||||
# - "--cpu=true"
|
||||
ports:
|
||||
- containerPort: 443
|
||||
env:
|
||||
- name: INIT_CONFIG
|
||||
value: init-config
|
||||
securityContext:
|
||||
privileged: true
|
||||
|
||||
|
|
|
@ -238,13 +238,4 @@ spec:
|
|||
namespace:
|
||||
type: string
|
||||
creationBlocked:
|
||||
type: boolean
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: init-config
|
||||
namespace: kyverno
|
||||
data:
|
||||
# resource types to be skipped by kyverno policy engine
|
||||
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
|
||||
type: boolean
|
|
@ -128,23 +128,10 @@ The [Kyverno CLI](documentation/testing-policies.md#test-using-the-kyverno-cli)
|
|||
|
||||
|
||||
# Filter kuberenetes resources that admission webhook should not process
|
||||
The admission webhook checks if a policy is applicable on all admission requests. The kubernetes kinds that are not be processed can be filtered by adding the configmap named `init-config` in namespace `kyverno` and specifying the resources to be filtered under `data.resourceFilters`
|
||||
|
||||
THe confimap is picked from the envenvironment variable `INIT_CONFIG` passed to the kyverno deployment spec. The resourceFilters configuration can be updated dynamically at runtime.
|
||||
The admission webhook checks if a policy is applicable on all admission requests. The kubernetes kinds that are not be processed can be filtered by using the command line argument 'filterKind'.
|
||||
|
||||
```
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: init-config
|
||||
namespace: kyverno
|
||||
data:
|
||||
# resource types to be skipped by kyverno policy engine
|
||||
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
|
||||
```
|
||||
|
||||
By default we have specified Nodes, Events, APIService & SubjectAccessReview as the kinds to be skipped in the default configmap
|
||||
[install.yaml](https://github.com/nirmata/kyverno/raw/master/definitions/init_configMap.yaml).
|
||||
By default we have specified Nodes, Events, APIService & SubjectAccessReview as the kinds to be skipped in the [install.yaml](https://github.com/nirmata/kyverno/raw/master/definitions/install.yaml).
|
||||
|
||||
---
|
||||
<small>*Read Next >> [Writing Policies](/documentation/writing-policies.md)*</small>
|
||||
|
|
33
main.go
33
main.go
|
@ -21,11 +21,12 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
serverIP string
|
||||
cpu bool
|
||||
memory bool
|
||||
webhookTimeout int
|
||||
kubeconfig string
|
||||
serverIP string
|
||||
filterK8Resources string
|
||||
cpu bool
|
||||
memory bool
|
||||
webhookTimeout int
|
||||
)
|
||||
|
||||
// TODO: tune resync time differently for each informer
|
||||
|
@ -36,10 +37,8 @@ func main() {
|
|||
printVersionInfo()
|
||||
// profile cpu and memory consuption
|
||||
prof = enableProfiling(cpu, memory)
|
||||
// cleanUp channel
|
||||
// cleanUp Channel
|
||||
cleanUp := make(chan struct{})
|
||||
// SIGINT & SIGTERM channel
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
// CLIENT CONFIG
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
|
@ -91,12 +90,6 @@ func main() {
|
|||
// - cache resync time: 10 seconds
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
|
||||
|
||||
// Configuration Data
|
||||
// dyamically load the configuration from configMap
|
||||
// - resource filters
|
||||
// if the configMap is update, the configuration will be updated :D
|
||||
configData := config.NewConfigData(kubeClient, kubeInformer.Core().V1().ConfigMaps())
|
||||
|
||||
// EVENT GENERATOR
|
||||
// - generate event with retry mechanism
|
||||
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1alpha1().ClusterPolicies())
|
||||
|
@ -106,7 +99,7 @@ func main() {
|
|||
// - process policy on existing resources
|
||||
// - status aggregator: recieves stats when a policy is applied
|
||||
// & updates the policy status
|
||||
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData)
|
||||
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, filterK8Resources)
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating policy controller: %v\n", err)
|
||||
}
|
||||
|
@ -121,7 +114,7 @@ func main() {
|
|||
|
||||
// GENERATE CONTROLLER
|
||||
// - watches for Namespace resource and generates resource based on the policy generate rule
|
||||
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData)
|
||||
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, filterK8Resources)
|
||||
|
||||
// CONFIGURE CERTIFICATES
|
||||
tlsPair, err := initTLSPemPair(clientConfig, client)
|
||||
|
@ -144,17 +137,16 @@ func main() {
|
|||
// -- annotations on resources with update details on mutation JSON patches
|
||||
// -- generate policy violation resource
|
||||
// -- generate events on policy and resource
|
||||
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, cleanUp)
|
||||
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), filterK8Resources, cleanUp)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
// Start the components
|
||||
pInformer.Start(stopCh)
|
||||
kubeInformer.Start(stopCh)
|
||||
if err := configData.Run(kubeInformer.Core().V1().ConfigMaps(), stopCh); err != nil {
|
||||
glog.Fatalf("Unable loading dynamic configuration: %v\n", err)
|
||||
}
|
||||
go pc.Run(1, stopCh)
|
||||
go pvc.Run(1, stopCh)
|
||||
go egen.Run(1, stopCh)
|
||||
|
@ -182,6 +174,7 @@ func init() {
|
|||
flag.IntVar(&webhookTimeout, "webhooktimeout", 2, "timeout for webhook configurations")
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
||||
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
|
||||
config.LogDefaultFlags()
|
||||
flag.Parse()
|
||||
}
|
||||
|
|
|
@ -1,192 +0,0 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// read the conifgMap with name in env:INIT_CONFIG
|
||||
// this configmap stores the resources that are to be filtered
|
||||
const cmNameEnv string = "INIT_CONFIG"
|
||||
const cmDataField string = "resourceFilters"
|
||||
|
||||
type ConfigData struct {
|
||||
client kubernetes.Interface
|
||||
// configMap Name
|
||||
cmName string
|
||||
// lock configuration
|
||||
mux sync.RWMutex
|
||||
// configuration data
|
||||
filters []k8Resource
|
||||
}
|
||||
|
||||
// ToFilter checks if the given resource is set to be filtered in the configuration
|
||||
func (cd *ConfigData) ToFilter(kind, namespace, name string) bool {
|
||||
cd.mux.RLock()
|
||||
defer cd.mux.RUnlock()
|
||||
for _, f := range cd.filters {
|
||||
if wildcard.Match(f.Kind, kind) && wildcard.Match(f.Namespace, namespace) && wildcard.Match(f.Name, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Interface to be used by consumer to check filters
|
||||
type Interface interface {
|
||||
ToFilter(kind, namespace, name string) bool
|
||||
}
|
||||
|
||||
// NewConfigData ...
|
||||
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer) *ConfigData {
|
||||
// environment var is read at start only
|
||||
if cmNameEnv == "" {
|
||||
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
|
||||
}
|
||||
cd := ConfigData{
|
||||
client: rclient,
|
||||
cmName: os.Getenv(cmNameEnv),
|
||||
}
|
||||
|
||||
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: cd.addCM,
|
||||
UpdateFunc: cd.updateCM,
|
||||
DeleteFunc: cd.deleteCM,
|
||||
})
|
||||
return &cd
|
||||
}
|
||||
|
||||
func (cd *ConfigData) Run(cmInformer informers.ConfigMapInformer, stopCh <-chan struct{}) error {
|
||||
// wait for cache to populate first time
|
||||
if !cache.WaitForCacheSync(stopCh, cmInformer.Informer().HasSynced) {
|
||||
return fmt.Errorf("Configuration: Failed to sync informer cache")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cd *ConfigData) addCM(obj interface{}) {
|
||||
cm := obj.(*v1.ConfigMap)
|
||||
if cm.Name != cd.cmName {
|
||||
return
|
||||
}
|
||||
cd.load(*cm)
|
||||
// else load the configuration
|
||||
}
|
||||
|
||||
func (cd *ConfigData) updateCM(old, cur interface{}) {
|
||||
cm := cur.(*v1.ConfigMap)
|
||||
if cm.Name != cd.cmName {
|
||||
return
|
||||
}
|
||||
// if data has not changed then dont load configmap
|
||||
cd.load(*cm)
|
||||
}
|
||||
|
||||
func (cd *ConfigData) deleteCM(obj interface{}) {
|
||||
cm, ok := obj.(*v1.ConfigMap)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
_, ok = tombstone.Obj.(*v1.ConfigMap)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if cm.Name != cd.cmName {
|
||||
return
|
||||
}
|
||||
// remove the configuration paramaters
|
||||
cd.unload(*cm)
|
||||
}
|
||||
|
||||
func (cd *ConfigData) load(cm v1.ConfigMap) {
|
||||
if cm.Data == nil {
|
||||
glog.Infof("Configuration: No data defined in ConfigMap %s", cm.Name)
|
||||
return
|
||||
}
|
||||
// get resource filters
|
||||
filters, ok := cm.Data["resourceFilters"]
|
||||
if !ok {
|
||||
glog.Infof("Configuration: No resourceFilters defined in ConfigMap %s", cm.Name)
|
||||
return
|
||||
}
|
||||
// filters is a string
|
||||
if filters == "" {
|
||||
glog.Infof("Configuration: resourceFilters is empty in ConfigMap %s", cm.Name)
|
||||
return
|
||||
}
|
||||
|
||||
// parse and load the configuration
|
||||
cd.mux.Lock()
|
||||
defer cd.mux.Unlock()
|
||||
|
||||
newFilters := parseKinds(filters)
|
||||
if reflect.DeepEqual(newFilters, cd.filters) {
|
||||
glog.Infof("Configuration: resourceFilters did not change in ConfigMap %s", cm.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Configuration: Old resource filters %v", cd.filters)
|
||||
glog.Infof("Configuration: New resource filters to %v", newFilters)
|
||||
// update filters
|
||||
cd.filters = newFilters
|
||||
}
|
||||
|
||||
func (cd *ConfigData) unload(cm v1.ConfigMap) {
|
||||
// TODO pick one msg
|
||||
glog.Infof("Configuration: ConfigMap %s deleted, removing configuration filters", cm.Name)
|
||||
glog.Infof("Configuration: Removing all resource filters as ConfigMap %s deleted", cm.Name)
|
||||
cd.mux.Lock()
|
||||
defer cd.mux.Unlock()
|
||||
cd.filters = []k8Resource{}
|
||||
}
|
||||
|
||||
type k8Resource struct {
|
||||
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
//ParseKinds parses the kinds if a single string contains comma seperated kinds
|
||||
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
|
||||
func parseKinds(list string) []k8Resource {
|
||||
resources := []k8Resource{}
|
||||
var resource k8Resource
|
||||
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
|
||||
submatchall := re.FindAllString(list, -1)
|
||||
for _, element := range submatchall {
|
||||
element = strings.Trim(element, "[")
|
||||
element = strings.Trim(element, "]")
|
||||
elements := strings.Split(element, ",")
|
||||
//TODO: wildcards for namespace and name
|
||||
if len(elements) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(elements) == 3 {
|
||||
resource = k8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
|
||||
}
|
||||
if len(elements) == 2 {
|
||||
resource = k8Resource{Kind: elements[0], Namespace: elements[1]}
|
||||
}
|
||||
if len(elements) == 1 {
|
||||
resource = k8Resource{Kind: elements[0]}
|
||||
}
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
return resources
|
||||
}
|
|
@ -7,10 +7,10 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
|
@ -55,8 +55,8 @@ type NamespaceController struct {
|
|||
queue workqueue.RateLimitingInterface
|
||||
// Resource manager, manages the mapping for already processed resource
|
||||
rm resourceManager
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// filter the resources defined in the list
|
||||
filterK8Resources []utils.K8Resource
|
||||
}
|
||||
|
||||
//NewNamespaceController returns a new Controller to manage generation rules
|
||||
|
@ -67,15 +67,15 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
|||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
policyStatus policy.PolicyStatusInterface,
|
||||
eventGen event.Interface,
|
||||
configHandler config.Interface) *NamespaceController {
|
||||
filterK8Resources string) *NamespaceController {
|
||||
//TODO: do we need to event recorder for this controller?
|
||||
// create the controller
|
||||
nsc := &NamespaceController{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
eventGen: eventGen,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
||||
configHandler: configHandler,
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
eventGen: eventGen,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
||||
filterK8Resources: utils.ParseKinds(filterK8Resources),
|
||||
}
|
||||
|
||||
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
@ -231,7 +231,7 @@ func (nsc *NamespaceController) syncNamespace(key string) error {
|
|||
|
||||
// skip processing namespace if its been filtered
|
||||
// exclude the filtered resources
|
||||
if nsc.configHandler.ToFilter("Namespace", "", namespace.Name) {
|
||||
if utils.SkipFilteredResources("Namespace", "", namespace.Name, nsc.filterK8Resources) {
|
||||
//TODO: improve the text
|
||||
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
|
||||
return nil
|
||||
|
|
|
@ -13,9 +13,9 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
@ -73,16 +73,15 @@ type PolicyController struct {
|
|||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// Resource manager, manages the mapping for already processed resource
|
||||
rm resourceManager
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// filter the resources defined in the list
|
||||
filterK8Resources []utils.K8Resource
|
||||
// recieves stats and aggregates details
|
||||
statusAggregator *PolicyStatusAggregator
|
||||
}
|
||||
|
||||
// NewPolicyController create a new PolicyController
|
||||
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
|
||||
configHandler config.Interface) (*PolicyController, error) {
|
||||
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, filterK8Resources string) (*PolicyController, error) {
|
||||
// Event broad caster
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
@ -99,7 +98,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
|
|||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
configHandler: configHandler,
|
||||
filterK8Resources: utils.ParseKinds(filterK8Resources),
|
||||
}
|
||||
|
||||
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
|
@ -23,7 +22,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
|
|||
pc.rm.Drop()
|
||||
var engineResponses []engine.EngineResponse
|
||||
// get resource that are satisfy the resource description defined in the rules
|
||||
resourceMap := listResources(pc.client, policy, pc.configHandler)
|
||||
resourceMap := listResources(pc.client, policy, pc.filterK8Resources)
|
||||
for _, resource := range resourceMap {
|
||||
// pre-processing, check if the policy and resource version has been processed before
|
||||
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
|
||||
|
@ -41,7 +40,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
|
|||
return engineResponses
|
||||
}
|
||||
|
||||
func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHandler config.Interface) map[string]unstructured.Unstructured {
|
||||
func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured {
|
||||
// key uid
|
||||
resourceMap := map[string]unstructured.Unstructured{}
|
||||
|
||||
|
@ -70,7 +69,7 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHa
|
|||
|
||||
// get resources in the namespaces
|
||||
for _, ns := range namespaces {
|
||||
rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler)
|
||||
rMap := getResourcesPerNamespace(k, client, ns, rule, filterK8Resources)
|
||||
mergeresources(resourceMap, rMap)
|
||||
}
|
||||
|
||||
|
@ -79,7 +78,7 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHa
|
|||
return resourceMap
|
||||
}
|
||||
|
||||
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, configHandler config.Interface) map[string]unstructured.Unstructured {
|
||||
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured {
|
||||
resourceMap := map[string]unstructured.Unstructured{}
|
||||
// merge include and exclude label selector values
|
||||
ls := rule.MatchResources.Selector
|
||||
|
@ -101,7 +100,7 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri
|
|||
}
|
||||
}
|
||||
// Skip the filtered resources
|
||||
if configHandler.ToFilter(r.GetKind(), r.GetNamespace(), r.GetName()) {
|
||||
if utils.SkipFilteredResources(r.GetKind(), r.GetNamespace(), r.GetName(), filterK8Resources) {
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -111,12 +110,12 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri
|
|||
|
||||
// exclude the resources
|
||||
// skip resources to be filtered
|
||||
excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, configHandler)
|
||||
excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, filterK8Resources)
|
||||
// glog.V(4).Infof("resource map: %v", resourceMap)
|
||||
return resourceMap
|
||||
}
|
||||
|
||||
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Interface) {
|
||||
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, filterK8Resources []utils.K8Resource) {
|
||||
if reflect.DeepEqual(exclude, (kyverno.ResourceDescription{})) {
|
||||
return
|
||||
}
|
||||
|
@ -197,7 +196,7 @@ func excludeResources(included map[string]unstructured.Unstructured, exclude kyv
|
|||
excludeEval = append(excludeEval, ret)
|
||||
}
|
||||
// exclude the filtered resources
|
||||
if configHandler.ToFilter(resource.GetKind(), resource.GetNamespace(), resource.GetName()) {
|
||||
if utils.SkipFilteredResources(resource.GetKind(), resource.GetNamespace(), resource.GetName(), filterK8Resources) {
|
||||
//TODO: improve the text
|
||||
glog.V(4).Infof("excluding resource %s/%s/%s as its satisfies the filtered resources", resource.GetKind(), resource.GetNamespace(), resource.GetName())
|
||||
delete(included, uid)
|
||||
|
|
20
pkg/utils/informer.go
Normal file
20
pkg/utils/informer.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
//NewKubeInformerFactory returns a kubeinformer
|
||||
func NewKubeInformerFactory(cfg *rest.Config) kubeinformers.SharedInformerFactory {
|
||||
// kubernetes client
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
glog.Errorf("error building kubernetes client: %s", err)
|
||||
}
|
||||
|
||||
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
|
||||
return kubeInformerFactory
|
||||
}
|
|
@ -2,16 +2,25 @@ package utils
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type K8Resource struct {
|
||||
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
|
||||
Namespace string
|
||||
Name string
|
||||
}
|
||||
|
||||
//Contains Check if strint is contained in a list of string
|
||||
func contains(list []string, element string, fn func(string, string) bool) bool {
|
||||
for _, e := range list {
|
||||
|
@ -40,6 +49,58 @@ func compareString(str, name string) bool {
|
|||
return str == name
|
||||
}
|
||||
|
||||
//SkipFilteredResourcesReq checks if request is to be skipped based on filtered kinds
|
||||
func SkipFilteredResourcesReq(request *v1beta1.AdmissionRequest, filterK8Resources []K8Resource) bool {
|
||||
kind := request.Kind.Kind
|
||||
namespace := request.Namespace
|
||||
name := request.Name
|
||||
for _, r := range filterK8Resources {
|
||||
if wildcard.Match(r.Kind, kind) && wildcard.Match(r.Namespace, namespace) && wildcard.Match(r.Name, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//SkipFilteredResources checks if the resource is to be skipped based on filtered kinds
|
||||
func SkipFilteredResources(kind, namespace, name string, filterK8Resources []K8Resource) bool {
|
||||
for _, r := range filterK8Resources {
|
||||
if wildcard.Match(r.Kind, kind) && wildcard.Match(r.Namespace, namespace) && wildcard.Match(r.Name, name) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
//ParseKinds parses the kinds if a single string contains comma seperated kinds
|
||||
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
|
||||
func ParseKinds(list string) []K8Resource {
|
||||
resources := []K8Resource{}
|
||||
var resource K8Resource
|
||||
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
|
||||
submatchall := re.FindAllString(list, -1)
|
||||
for _, element := range submatchall {
|
||||
element = strings.Trim(element, "[")
|
||||
element = strings.Trim(element, "]")
|
||||
elements := strings.Split(element, ",")
|
||||
//TODO: wildcards for namespace and name
|
||||
if len(elements) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(elements) == 3 {
|
||||
resource = K8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
|
||||
}
|
||||
if len(elements) == 2 {
|
||||
resource = K8Resource{Kind: elements[0], Namespace: elements[1]}
|
||||
}
|
||||
if len(elements) == 1 {
|
||||
resource = K8Resource{Kind: elements[0]}
|
||||
}
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
//NewKubeClient returns a new kubernetes client
|
||||
func NewKubeClient(config *rest.Config) (kubernetes.Interface, error) {
|
||||
kclient, err := kubernetes.NewForConfig(config)
|
||||
|
|
|
@ -19,6 +19,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
tlsutils "github.com/nirmata/kyverno/pkg/tls"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -38,11 +39,9 @@ type WebhookServer struct {
|
|||
eventGen event.Interface
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// channel for cleanup notification
|
||||
cleanUp chan<- struct{}
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
filterK8Resources []utils.K8Resource
|
||||
cleanUp chan<- struct{}
|
||||
}
|
||||
|
||||
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
|
||||
|
@ -56,7 +55,7 @@ func NewWebhookServer(
|
|||
eventGen event.Interface,
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
|
||||
policyStatus policy.PolicyStatusInterface,
|
||||
configHandler config.Interface,
|
||||
filterK8Resources string,
|
||||
cleanUp chan<- struct{}) (*WebhookServer, error) {
|
||||
|
||||
if tlsPair == nil {
|
||||
|
@ -81,7 +80,7 @@ func NewWebhookServer(
|
|||
eventGen: eventGen,
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
policyStatus: policyStatus,
|
||||
configHandler: configHandler,
|
||||
filterK8Resources: utils.ParseKinds(filterK8Resources),
|
||||
cleanUp: cleanUp,
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
|
@ -113,21 +112,20 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
|
||||
// Do not process the admission requests for kinds that are in filterKinds for filtering
|
||||
request := admissionReview.Request
|
||||
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
|
||||
if !utils.SkipFilteredResourcesReq(admissionReview.Request, ws.filterK8Resources) {
|
||||
// Resource CREATE
|
||||
// Resource UPDATE
|
||||
switch r.URL.Path {
|
||||
case config.MutatingWebhookServicePath:
|
||||
admissionReview.Response = ws.handleAdmissionRequest(request)
|
||||
admissionReview.Response = ws.handleAdmissionRequest(admissionReview.Request)
|
||||
case config.PolicyValidatingWebhookServicePath:
|
||||
admissionReview.Response = ws.handlePolicyValidation(request)
|
||||
admissionReview.Response = ws.handlePolicyValidation(admissionReview.Request)
|
||||
case config.PolicyMutatingWebhookServicePath:
|
||||
admissionReview.Response = ws.handlePolicyMutation(request)
|
||||
admissionReview.Response = ws.handlePolicyMutation(admissionReview.Request)
|
||||
}
|
||||
}
|
||||
|
||||
admissionReview.Response.UID = request.UID
|
||||
admissionReview.Response.UID = admissionReview.Request.UID
|
||||
|
||||
responseJSON, err := json.Marshal(admissionReview)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in a new issue