1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-15 17:51:20 +00:00

initial commit

This commit is contained in:
shivkumar dudhani 2019-10-18 17:38:46 -07:00
parent 37c25daa17
commit 64eab3d1d6
9 changed files with 264 additions and 127 deletions

View file

@ -0,0 +1,8 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: init-config
namespace: kyverno
data:
# resource types to be skipped by kyverno policy engine
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"

34
main.go
View file

@ -21,12 +21,12 @@ import (
) )
var ( var (
kubeconfig string kubeconfig string
serverIP string serverIP string
filterK8Resources string // filterK8Resources string
cpu bool cpu bool
memory bool memory bool
webhookTimeout int webhookTimeout int
) )
// TODO: tune resync time differently for each informer // TODO: tune resync time differently for each informer
@ -37,8 +37,10 @@ func main() {
printVersionInfo() printVersionInfo()
// profile cpu and memory consuption // profile cpu and memory consuption
prof = enableProfiling(cpu, memory) prof = enableProfiling(cpu, memory)
// cleanUp Channel // cleanUp channel
cleanUp := make(chan struct{}) cleanUp := make(chan struct{})
// SIGINT & SIGTERM channel
stopCh := signals.SetupSignalHandler()
// CLIENT CONFIG // CLIENT CONFIG
clientConfig, err := createClientConfig(kubeconfig) clientConfig, err := createClientConfig(kubeconfig)
if err != nil { if err != nil {
@ -90,6 +92,14 @@ func main() {
// - cache resync time: 10 seconds // - cache resync time: 10 seconds
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second) kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
// Configuration Data
// dyamically load the configuration from configMap
// - resource filters
// if the configMap is update, the configuration will be updated :D
configData, err := config.NewConfigData(clientConfig, kubeInformer.Core().V1().ConfigMaps(), stopCh)
if err != nil {
glog.Fatalf("error loading dynamic configuration: %v", err)
}
// EVENT GENERATOR // EVENT GENERATOR
// - generate event with retry mechanism // - generate event with retry mechanism
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1alpha1().ClusterPolicies()) egen := event.NewEventGenerator(client, pInformer.Kyverno().V1alpha1().ClusterPolicies())
@ -99,7 +109,7 @@ func main() {
// - process policy on existing resources // - process policy on existing resources
// - status aggregator: recieves stats when a policy is applied // - status aggregator: recieves stats when a policy is applied
// & updates the policy status // & updates the policy status
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, filterK8Resources) pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData)
if err != nil { if err != nil {
glog.Fatalf("error creating policy controller: %v\n", err) glog.Fatalf("error creating policy controller: %v\n", err)
} }
@ -114,7 +124,7 @@ func main() {
// GENERATE CONTROLLER // GENERATE CONTROLLER
// - watches for Namespace resource and generates resource based on the policy generate rule // - watches for Namespace resource and generates resource based on the policy generate rule
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, filterK8Resources) nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData)
// CONFIGURE CERTIFICATES // CONFIGURE CERTIFICATES
tlsPair, err := initTLSPemPair(clientConfig, client) tlsPair, err := initTLSPemPair(clientConfig, client)
@ -137,13 +147,11 @@ func main() {
// -- annotations on resources with update details on mutation JSON patches // -- annotations on resources with update details on mutation JSON patches
// -- generate policy violation resource // -- generate policy violation resource
// -- generate events on policy and resource // -- generate events on policy and resource
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), filterK8Resources, cleanUp) server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, cleanUp)
if err != nil { if err != nil {
glog.Fatalf("Unable to create webhook server: %v\n", err) glog.Fatalf("Unable to create webhook server: %v\n", err)
} }
stopCh := signals.SetupSignalHandler()
// Start the components // Start the components
pInformer.Start(stopCh) pInformer.Start(stopCh)
kubeInformer.Start(stopCh) kubeInformer.Start(stopCh)
@ -174,7 +182,7 @@ func init() {
flag.IntVar(&webhookTimeout, "webhooktimeout", 2, "timeout for webhook configurations") flag.IntVar(&webhookTimeout, "webhooktimeout", 2, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.") flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.") flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"") // flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
config.LogDefaultFlags() config.LogDefaultFlags()
flag.Parse() flag.Parse()
} }

194
pkg/config/dynamicconfig.go Normal file
View file

@ -0,0 +1,194 @@
package config
import (
"fmt"
"os"
"reflect"
"regexp"
"strings"
"sync"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
v1 "k8s.io/api/core/v1"
informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
// read the conifgMap with name in env:INIT_CONFIG
// this configmap stores the resources that are to be filtered
const cmNameEnv string = "INIT_CONFIG"
const cmDataField string = "resourceFilters"
type ConfigData struct {
client kubernetes.Interface
// configMap Name
cmName string
// lock configuration
mux sync.RWMutex
// configuration data
filters []k8Resource
}
// ToFilter checks if the given resource is set to be filtered in the configuration
func (cd *ConfigData) ToFilter(kind, namespace, name string) bool {
cd.mux.RLock()
defer cd.mux.RUnlock()
for _, f := range cd.filters {
if wildcard.Match(f.Kind, kind) && wildcard.Match(f.Namespace, namespace) && wildcard.Match(f.Name, name) {
return true
}
}
return false
}
// Interface to be used by consumer to check filters
type Interface interface {
ToFilter(kind, namespace, name string) bool
}
// NewConfigData ...
func NewConfigData(restconfig *rest.Config, cmInformer informers.ConfigMapInformer, stopCh <-chan struct{}) (*ConfigData, error) {
// get the client
kclient, err := kubernetes.NewForConfig(restconfig)
if err != nil {
return nil, err
}
// environment var is read at start only
if cmNameEnv == "" {
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
}
cd := ConfigData{
client: kclient,
cmName: os.Getenv(cmNameEnv),
}
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cd.addCM,
UpdateFunc: cd.updateCM,
DeleteFunc: cd.deleteCM,
})
// wait for cache to populate first time
if !cache.WaitForCacheSync(stopCh, cmInformer.Informer().HasSynced) {
return nil, fmt.Errorf("Configuration: Failed to sync informer cache")
}
return &cd, nil
}
func (cd *ConfigData) addCM(obj interface{}) {
cm := obj.(*v1.ConfigMap)
if cm.Name != cd.cmName {
return
}
cd.load(*cm)
// else load the configuration
}
func (cd *ConfigData) updateCM(old, cur interface{}) {
cm := cur.(*v1.ConfigMap)
if cm.Name != cd.cmName {
return
}
// if data has not changed then dont load configmap
cd.load(*cm)
}
func (cd *ConfigData) deleteCM(obj interface{}) {
cm, ok := obj.(*v1.ConfigMap)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
_, ok = tombstone.Obj.(*v1.ConfigMap)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj))
return
}
}
if cm.Name != cd.cmName {
return
}
// remove the configuration paramaters
cd.unload(*cm)
}
func (cd *ConfigData) load(cm v1.ConfigMap) {
if cm.Data == nil {
glog.Infof("Configuration: No data defined in ConfigMap %s", cm.Name)
return
}
// get resource filters
filters, ok := cm.Data["resourceFilters"]
if !ok {
glog.Infof("Configuration: No resourceFilters defined in ConfigMap %s", cm.Name)
return
}
// filters is a string
if filters == "" {
glog.Infof("Configuration: resourceFilters is empty in ConfigMap %s", cm.Name)
return
}
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
newFilters := parseKinds(filters)
if reflect.DeepEqual(newFilters, cd.filters) {
glog.Infof("Configuration: resourceFilters did not change in ConfigMap %s", cm.Name)
return
}
glog.V(4).Infof("Configuration: Old resource filters %v", cd.filters)
glog.Infof("Configuration: New resource filters to %v", newFilters)
// update filters
cd.filters = newFilters
}
func (cd *ConfigData) unload(cm v1.ConfigMap) {
// TODO pick one msg
glog.Infof("Configuration: ConfigMap %s deleted, removing configuration filters", cm.Name)
glog.Infof("Configuration: Removing all resource filters as ConfigMap %s deleted", cm.Name)
cd.mux.Lock()
defer cd.mux.Unlock()
cd.filters = []k8Resource{}
}
type k8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//ParseKinds parses the kinds if a single string contains comma seperated kinds
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
func parseKinds(list string) []k8Resource {
resources := []k8Resource{}
var resource k8Resource
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
submatchall := re.FindAllString(list, -1)
for _, element := range submatchall {
element = strings.Trim(element, "[")
element = strings.Trim(element, "]")
elements := strings.Split(element, ",")
//TODO: wildcards for namespace and name
if len(elements) == 0 {
continue
}
if len(elements) == 3 {
resource = k8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
}
if len(elements) == 2 {
resource = k8Resource{Kind: elements[0], Namespace: elements[1]}
}
if len(elements) == 1 {
resource = k8Resource{Kind: elements[0]}
}
resources = append(resources, resource)
}
return resources
}

View file

@ -7,10 +7,10 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient" client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event" "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy" "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/utils"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
@ -56,7 +56,9 @@ type NamespaceController struct {
// Resource manager, manages the mapping for already processed resource // Resource manager, manages the mapping for already processed resource
rm resourceManager rm resourceManager
// filter the resources defined in the list // filter the resources defined in the list
filterK8Resources []utils.K8Resource // filterK8Resources []utils.K8Resource
// helpers to validate against current loaded configuration
configHandler config.Interface
} }
//NewNamespaceController returns a new Controller to manage generation rules //NewNamespaceController returns a new Controller to manage generation rules
@ -67,15 +69,15 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
policyStatus policy.PolicyStatusInterface, policyStatus policy.PolicyStatusInterface,
eventGen event.Interface, eventGen event.Interface,
filterK8Resources string) *NamespaceController { configHandler config.Interface) *NamespaceController {
//TODO: do we need to event recorder for this controller? //TODO: do we need to event recorder for this controller?
// create the controller // create the controller
nsc := &NamespaceController{ nsc := &NamespaceController{
client: client, client: client,
kyvernoClient: kyvernoClient, kyvernoClient: kyvernoClient,
eventGen: eventGen, eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
filterK8Resources: utils.ParseKinds(filterK8Resources), configHandler: configHandler,
} }
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -231,7 +233,8 @@ func (nsc *NamespaceController) syncNamespace(key string) error {
// skip processing namespace if its been filtered // skip processing namespace if its been filtered
// exclude the filtered resources // exclude the filtered resources
if utils.SkipFilteredResources("Namespace", "", namespace.Name, nsc.filterK8Resources) { if nsc.configHandler.ToFilter("Namespace", "", namespace.Name) {
// if utils.SkipFilteredResources("Namespace", "", namespace.Name, nsc.filterK8Resources) {
//TODO: improve the text //TODO: improve the text
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name) glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
return nil return nil

View file

@ -13,9 +13,9 @@ import (
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme" "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient" client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event" "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/webhookconfig" "github.com/nirmata/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/errors"
@ -73,15 +73,16 @@ type PolicyController struct {
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// Resource manager, manages the mapping for already processed resource // Resource manager, manages the mapping for already processed resource
rm resourceManager rm resourceManager
// filter the resources defined in the list // helpers to validate against current loaded configuration
filterK8Resources []utils.K8Resource configHandler config.Interface
// recieves stats and aggregates details // recieves stats and aggregates details
statusAggregator *PolicyStatusAggregator statusAggregator *PolicyStatusAggregator
} }
// NewPolicyController create a new PolicyController // NewPolicyController create a new PolicyController
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer, func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, filterK8Resources string) (*PolicyController, error) { eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
configHandler config.Interface) (*PolicyController, error) {
// Event broad caster // Event broad caster
eventBroadcaster := record.NewBroadcaster() eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof) eventBroadcaster.StartLogging(glog.Infof)
@ -98,7 +99,8 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}), eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
webhookRegistrationClient: webhookRegistrationClient, webhookRegistrationClient: webhookRegistrationClient,
filterK8Resources: utils.ParseKinds(filterK8Resources), // filterK8Resources: utils.ParseKinds(filterK8Resources),
configHandler: configHandler,
} }
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder} pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}

View file

@ -8,6 +8,7 @@ import (
"github.com/golang/glog" "github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard" "github.com/minio/minio/pkg/wildcard"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient" client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/utils" "github.com/nirmata/kyverno/pkg/utils"
@ -22,7 +23,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
pc.rm.Drop() pc.rm.Drop()
var engineResponses []engine.EngineResponse var engineResponses []engine.EngineResponse
// get resource that are satisfy the resource description defined in the rules // get resource that are satisfy the resource description defined in the rules
resourceMap := listResources(pc.client, policy, pc.filterK8Resources) resourceMap := listResources(pc.client, policy, pc.configHandler)
for _, resource := range resourceMap { for _, resource := range resourceMap {
// pre-processing, check if the policy and resource version has been processed before // pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) { if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
@ -40,7 +41,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
return engineResponses return engineResponses
} }
func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured { func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHandler config.Interface) map[string]unstructured.Unstructured {
// key uid // key uid
resourceMap := map[string]unstructured.Unstructured{} resourceMap := map[string]unstructured.Unstructured{}
@ -69,7 +70,7 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8
// get resources in the namespaces // get resources in the namespaces
for _, ns := range namespaces { for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, filterK8Resources) rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler)
mergeresources(resourceMap, rMap) mergeresources(resourceMap, rMap)
} }
@ -78,7 +79,7 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8
return resourceMap return resourceMap
} }
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured { func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, configHandler config.Interface) map[string]unstructured.Unstructured {
resourceMap := map[string]unstructured.Unstructured{} resourceMap := map[string]unstructured.Unstructured{}
// merge include and exclude label selector values // merge include and exclude label selector values
ls := rule.MatchResources.Selector ls := rule.MatchResources.Selector
@ -100,7 +101,7 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri
} }
} }
// Skip the filtered resources // Skip the filtered resources
if utils.SkipFilteredResources(r.GetKind(), r.GetNamespace(), r.GetName(), filterK8Resources) { if configHandler.ToFilter(r.GetKind(), r.GetNamespace(), r.GetName()) {
continue continue
} }
@ -110,12 +111,12 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri
// exclude the resources // exclude the resources
// skip resources to be filtered // skip resources to be filtered
excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, filterK8Resources) excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, configHandler)
// glog.V(4).Infof("resource map: %v", resourceMap) // glog.V(4).Infof("resource map: %v", resourceMap)
return resourceMap return resourceMap
} }
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, filterK8Resources []utils.K8Resource) { func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Interface) {
if reflect.DeepEqual(exclude, (kyverno.ResourceDescription{})) { if reflect.DeepEqual(exclude, (kyverno.ResourceDescription{})) {
return return
} }
@ -196,7 +197,7 @@ func excludeResources(included map[string]unstructured.Unstructured, exclude kyv
excludeEval = append(excludeEval, ret) excludeEval = append(excludeEval, ret)
} }
// exclude the filtered resources // exclude the filtered resources
if utils.SkipFilteredResources(resource.GetKind(), resource.GetNamespace(), resource.GetName(), filterK8Resources) { if configHandler.ToFilter(resource.GetKind(), resource.GetNamespace(), resource.GetName()) {
//TODO: improve the text //TODO: improve the text
glog.V(4).Infof("excluding resource %s/%s/%s as its satisfies the filtered resources", resource.GetKind(), resource.GetNamespace(), resource.GetName()) glog.V(4).Infof("excluding resource %s/%s/%s as its satisfies the filtered resources", resource.GetKind(), resource.GetNamespace(), resource.GetName())
delete(included, uid) delete(included, uid)

View file

@ -1,20 +0,0 @@
package utils
import (
"github.com/golang/glog"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
//NewKubeInformerFactory returns a kubeinformer
func NewKubeInformerFactory(cfg *rest.Config) kubeinformers.SharedInformerFactory {
// kubernetes client
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Errorf("error building kubernetes client: %s", err)
}
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
return kubeInformerFactory
}

View file

@ -2,25 +2,16 @@ package utils
import ( import (
"reflect" "reflect"
"regexp"
"strings"
"github.com/golang/glog" "github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard" "github.com/minio/minio/pkg/wildcard"
client "github.com/nirmata/kyverno/pkg/dclient" client "github.com/nirmata/kyverno/pkg/dclient"
"k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes" "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest" "k8s.io/client-go/rest"
) )
type K8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//Contains Check if strint is contained in a list of string //Contains Check if strint is contained in a list of string
func contains(list []string, element string, fn func(string, string) bool) bool { func contains(list []string, element string, fn func(string, string) bool) bool {
for _, e := range list { for _, e := range list {
@ -49,58 +40,6 @@ func compareString(str, name string) bool {
return str == name return str == name
} }
//SkipFilteredResourcesReq checks if request is to be skipped based on filtered kinds
func SkipFilteredResourcesReq(request *v1beta1.AdmissionRequest, filterK8Resources []K8Resource) bool {
kind := request.Kind.Kind
namespace := request.Namespace
name := request.Name
for _, r := range filterK8Resources {
if wildcard.Match(r.Kind, kind) && wildcard.Match(r.Namespace, namespace) && wildcard.Match(r.Name, name) {
return true
}
}
return false
}
//SkipFilteredResources checks if the resource is to be skipped based on filtered kinds
func SkipFilteredResources(kind, namespace, name string, filterK8Resources []K8Resource) bool {
for _, r := range filterK8Resources {
if wildcard.Match(r.Kind, kind) && wildcard.Match(r.Namespace, namespace) && wildcard.Match(r.Name, name) {
return true
}
}
return false
}
//ParseKinds parses the kinds if a single string contains comma seperated kinds
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
func ParseKinds(list string) []K8Resource {
resources := []K8Resource{}
var resource K8Resource
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
submatchall := re.FindAllString(list, -1)
for _, element := range submatchall {
element = strings.Trim(element, "[")
element = strings.Trim(element, "]")
elements := strings.Split(element, ",")
//TODO: wildcards for namespace and name
if len(elements) == 0 {
continue
}
if len(elements) == 3 {
resource = K8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
}
if len(elements) == 2 {
resource = K8Resource{Kind: elements[0], Namespace: elements[1]}
}
if len(elements) == 1 {
resource = K8Resource{Kind: elements[0]}
}
resources = append(resources, resource)
}
return resources
}
//NewKubeClient returns a new kubernetes client //NewKubeClient returns a new kubernetes client
func NewKubeClient(config *rest.Config) (kubernetes.Interface, error) { func NewKubeClient(config *rest.Config) (kubernetes.Interface, error) {
kclient, err := kubernetes.NewForConfig(config) kclient, err := kubernetes.NewForConfig(config)

View file

@ -19,7 +19,6 @@ import (
"github.com/nirmata/kyverno/pkg/event" "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy" "github.com/nirmata/kyverno/pkg/policy"
tlsutils "github.com/nirmata/kyverno/pkg/tls" tlsutils "github.com/nirmata/kyverno/pkg/tls"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/webhookconfig" "github.com/nirmata/kyverno/pkg/webhookconfig"
v1beta1 "k8s.io/api/admission/v1beta1" v1beta1 "k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -39,9 +38,11 @@ type WebhookServer struct {
eventGen event.Interface eventGen event.Interface
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// API to send policy stats for aggregation // API to send policy stats for aggregation
policyStatus policy.PolicyStatusInterface policyStatus policy.PolicyStatusInterface
filterK8Resources []utils.K8Resource // helpers to validate against current loaded configuration
cleanUp chan<- struct{} configHandler config.Interface
// channel for cleanup notification
cleanUp chan<- struct{}
} }
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration // NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
@ -55,7 +56,7 @@ func NewWebhookServer(
eventGen event.Interface, eventGen event.Interface,
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
policyStatus policy.PolicyStatusInterface, policyStatus policy.PolicyStatusInterface,
filterK8Resources string, configHandler config.Interface,
cleanUp chan<- struct{}) (*WebhookServer, error) { cleanUp chan<- struct{}) (*WebhookServer, error) {
if tlsPair == nil { if tlsPair == nil {
@ -80,7 +81,7 @@ func NewWebhookServer(
eventGen: eventGen, eventGen: eventGen,
webhookRegistrationClient: webhookRegistrationClient, webhookRegistrationClient: webhookRegistrationClient,
policyStatus: policyStatus, policyStatus: policyStatus,
filterK8Resources: utils.ParseKinds(filterK8Resources), configHandler: configHandler,
cleanUp: cleanUp, cleanUp: cleanUp,
} }
mux := http.NewServeMux() mux := http.NewServeMux()
@ -112,20 +113,21 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
} }
// Do not process the admission requests for kinds that are in filterKinds for filtering // Do not process the admission requests for kinds that are in filterKinds for filtering
if !utils.SkipFilteredResourcesReq(admissionReview.Request, ws.filterK8Resources) { request := admissionReview.Request
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
// Resource CREATE // Resource CREATE
// Resource UPDATE // Resource UPDATE
switch r.URL.Path { switch r.URL.Path {
case config.MutatingWebhookServicePath: case config.MutatingWebhookServicePath:
admissionReview.Response = ws.handleAdmissionRequest(admissionReview.Request) admissionReview.Response = ws.handleAdmissionRequest(request)
case config.PolicyValidatingWebhookServicePath: case config.PolicyValidatingWebhookServicePath:
admissionReview.Response = ws.handlePolicyValidation(admissionReview.Request) admissionReview.Response = ws.handlePolicyValidation(request)
case config.PolicyMutatingWebhookServicePath: case config.PolicyMutatingWebhookServicePath:
admissionReview.Response = ws.handlePolicyMutation(admissionReview.Request) admissionReview.Response = ws.handlePolicyMutation(request)
} }
} }
admissionReview.Response.UID = admissionReview.Request.UID admissionReview.Response.UID = request.UID
responseJSON, err := json.Marshal(admissionReview) responseJSON, err := json.Marshal(admissionReview)
if err != nil { if err != nil {