1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-08 10:04:25 +00:00

Merge branch 'master' of github.com:nirmata/kyverno into 391_feature

This commit is contained in:
shivkumar dudhani 2019-10-29 12:01:15 -07:00
commit c7787eff8d
19 changed files with 349 additions and 514 deletions

View file

@ -1,18 +0,0 @@
# Example of MutatingWebhookConfiguration which can be used for debug, when controller is placed on master node
# To register webhook for debug, replace ${CA_BUNDLE} with corresponding data from ~/.kube/config and create this resource
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kyverno-webhook-cfg-debug
labels:
app: kyverno
webhooks:
- name: webhook.nirmata.kyverno
clientConfig:
url: "https://localhost/mutate"
caBundle: ${CA_BUNDLE}
rules:
- operations: [ "CREATE" ]
resources: [ "*/*" ]
apiGroups: [ "*" ]
apiVersions: [ "*" ]

View file

@ -1,8 +0,0 @@
apiVersion: v1
kind: Secret
metadata:
name: docker-registry-key
namespace: kube-system
data:
.dockerconfigjson: DOCKER_CONFIG_JSON_IN_BASE64
type: kubernetes.io/dockerconfigjson

View file

@ -232,15 +232,13 @@ spec:
type: string
managedResource:
type: object
required:
- kind
properties:
kind:
type: string
namespace:
type: string
creationBlocked:
type: bool
type: boolean
---
kind: Namespace
apiVersion: v1
@ -280,6 +278,15 @@ subjects:
name: kyverno-service-account
namespace: kyverno
---
apiVersion: v1
kind: ConfigMap
metadata:
name: init-config
namespace: kyverno
data:
# resource types to be skipped by kyverno policy engine
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
---
apiVersion: apps/v1
kind: Deployment
metadata:
@ -300,10 +307,15 @@ spec:
serviceAccountName: kyverno-service-account
containers:
- name: kyverno
image: nirmata/kyverno:v0.10.0
args: ["--filterK8Resources","[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"]
image: nirmata/kyverno:latest
args:
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
# customize webhook timout
# - "--webhooktimeout=4"
# open one of the profiling flag here
# - "--cpu=true"
ports:
- containerPort: 443
securityContext:
privileged: true
env:
- name: INIT_CONFIG
value: init-config

View file

@ -1,249 +0,0 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterpolicies.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1alpha1
served: true
storage: true
scope: Cluster
names:
kind: ClusterPolicy
plural: clusterpolicies
singular: clusterpolicy
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- rules
properties:
# default values to be handled by user
validationFailureAction:
type: string
enum:
- enforce # blocks the resorce api-reques if a rule fails. Default behavior
- audit # allows resource creationg and reports the failed validation rules as violations
rules:
type: array
items:
type: object
required:
- name
- match
properties:
name:
type: string
match:
type: object
required:
- resources
properties:
resources:
type: object
required:
- kinds
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespace:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
exclude:
type: object
required:
- resources
properties:
resources:
type: object
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespace:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
mutate:
type: object
properties:
overlay:
AnyValue: {}
patches:
type: array
items:
type: object
required:
- path
- op
properties:
path:
type: string
op:
type: string
enum:
- add
- replace
- remove
value:
AnyValue: {}
validate:
type: object
required:
- pattern
properties:
message:
type: string
pattern:
AnyValue: {}
generate:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
clone:
type: object
required:
- namespace
- name
properties:
namespace:
type: string
name:
type: string
data:
AnyValue: {}
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kyverno
name: kyverno-svc
labels:
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kyverno-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: kyverno
name: kyverno
labels:
app: kyverno
spec:
selector:
matchLabels:
app: kyverno
replicas: 1
template:
metadata:
labels:
app: kyverno
spec:
serviceAccountName: kyverno-service-account
containers:
- name: kyverno
image: nirmata/kyverno:latest
args:
- "--webhooktimeout=4"
# open one of the profiling flag here
- "--cpu=true"
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*]Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
ports:
- containerPort: 443
securityContext:
privileged: true
volumeMounts:
- mountPath: /opt/nirmata
name: profiling-volume
volumes:
- name: profiling-volume
hostPath:
path: /opt/nirmata
type: Directory

View file

@ -27,7 +27,7 @@ spec:
type: string
enum:
- enforce # blocks the resorce api-reques if a rule fails.
- audit # allows resource creationg and reports the failed validation rules as violations. Default
- audit # allows resource creation and reports the failed validation rules as violations. Default
rules:
type: array
items:
@ -206,8 +206,8 @@ spec:
resource:
type: object
required:
- kind
- name
- kind
- name
properties:
kind:
type: string
@ -220,6 +220,31 @@ spec:
items:
type: object
required:
- name
- type
- message
- name
- type
- message
properties:
name:
type: string
type:
type: string
message:
type: string
managedResource:
type: object
properties:
kind:
type: string
namespace:
type: string
creationBlocked:
type: boolean
---
apiVersion: v1
kind: ConfigMap
metadata:
name: init-config
namespace: kyverno
data:
# resource types to be skipped by kyverno policy engine
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"

View file

@ -1,85 +0,0 @@
apiVersion : policy.nirmata.io/v1alpha1
kind : clusterPolicy
metadata :
name : examples
spec :
# failurePolicy is optional. Defaults to stopOnError
failurePolicy: continueOnError, stopOnError
# The spec contains a list of rules. Each rule has a resource and an operation.
# Allowed operations are:
# - patch, configMapGenerator, secretGenerator
rules:
- resource:
# Allowed resource kinds:
# ConfigMap
# CronJob
# DaemonSet
# Deployment
# Endpoint
# HorizontalPodAutoscaler
# Ingress
# Job
# LimitRange
# Namespace
# NetworkPolicy
# PersistentVolumeClaim
# PodDisruptionBudget
# PodTemplate
# ResourceQuota
# Secret
# Service
# StatefulSet
kind : PodTemplateSpec
# name is optional. Either a name or selector must be specified.
name:
# selector is optional. Either a name or a selector must be specified.
selector:
matchLabels :
nirmata.io/environment.name : production
matchExpressions:
# a patch contains multiple entries of path, operation, value
# path: starts with the resource. if blank selects resource.
# operation: Add | Replace | Delete
# value: text, number, JSON string
patch :
- path : spec/terminationGracePeriodSeconds
operation : Add
value : 20
- path : spec/containers/0/imagePullPolicy
operation : Add
value : Always
- resource:
# Kind needs to be Namespace, when operation is configMapGenerator
kind: Namespace
name: foo
configMapGenerator :
name: game-config-env-file
# copyFrom is optional. If specified, must point to a valid config map
copyFrom:
namespace: some-ns
name: some-other-config-map
data:
foo: bar
app.properties: /
foo1=bar1
foo2=bar2
ui.properties: /
foo1=bar1
foo2=bar2
- resource:
# Kind needs to be Namespace, when operation is secretGenerator
kind: Namespace
name: foo
secretGenerator :
name: game-secrets
copyFrom:
namespace: some-ns
name: some-other-secrets
data: # data is optional
status:
events:
# log of applied policies. We will need a way to distingush between failed
# and succeeded operations

View file

@ -118,9 +118,9 @@ To build Kyverno in a development environment see: https://github.com/nirmata/ky
To run controller in this mode you should prepare TLS key/certificate pair for debug webhook, then start controller with kubeconfig and the server address.
1. Run scripts/deploy-controller-debug.sh --service=localhost --serverIP=<server_IP>, where <server_IP> is the IP address of the host where controller runs. This scripts will generate TLS certificate for debug webhook server and register this webhook in the cluster. Also it registers CustomResource Policy.
1. Run `scripts/deploy-controller-debug.sh --service=localhost --serverIP=<server_IP>`, where <server_IP> is the IP address of the host where controller runs. This scripts will generate TLS certificate for debug webhook server and register this webhook in the cluster. Also it registers CustomResource Policy.
2. Start the controller using the following command: sudo kyverno --kubeconfig=~/.kube/config --serverIP=<server_IP>
2. Start the controller using the following command: `sudo kyverno --kubeconfig=~/.kube/config --serverIP=<server_IP>`
# Try Kyverno without a Kubernetes cluster
@ -128,10 +128,23 @@ The [Kyverno CLI](documentation/testing-policies.md#test-using-the-kyverno-cli)
# Filter kuberenetes resources that admission webhook should not process
The admission webhook checks if a policy is applicable on all admission requests. The kubernetes kinds that are not be processed can be filtered by adding the configmap named `init-config` in namespace `kyverno` and specifying the resources to be filtered under `data.resourceFilters`
The admission webhook checks if a policy is applicable on all admission requests. The kubernetes kinds that are not be processed can be filtered by using the command line argument 'filterKind'.
THe confimap is picked from the envenvironment variable `INIT_CONFIG` passed to the kyverno deployment spec. The resourceFilters configuration can be updated dynamically at runtime.
By default we have specified Nodes, Events, APIService & SubjectAccessReview as the kinds to be skipped in the [install.yaml](https://github.com/nirmata/kyverno/raw/master/definitions/install.yaml).
```
apiVersion: v1
kind: ConfigMap
metadata:
name: init-config
namespace: kyverno
data:
# resource types to be skipped by kyverno policy engine
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
```
By default we have specified Nodes, Events, APIService & SubjectAccessReview as the kinds to be skipped in the default configmap
[install.yaml](https://github.com/nirmata/kyverno/raw/master/definitions/init_configMap.yaml).
---
<small>*Read Next >> [Writing Policies](/documentation/writing-policies.md)*</small>

38
main.go
View file

@ -21,12 +21,14 @@ import (
)
var (
kubeconfig string
serverIP string
kubeconfig string
serverIP string
cpu bool
memory bool
webhookTimeout int
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
filterK8Resources string
cpu bool
memory bool
webhookTimeout int
)
// TODO: tune resync time differently for each informer
@ -39,6 +41,8 @@ func main() {
prof = enableProfiling(cpu, memory)
// cleanUp Channel
cleanUp := make(chan struct{})
// SIGINT & SIGTERM channel
stopCh := signals.SetupSignalHandler()
// CLIENT CONFIG
clientConfig, err := createClientConfig(kubeconfig)
if err != nil {
@ -90,6 +94,12 @@ func main() {
// - cache resync time: 10 seconds
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
// Configuration Data
// dyamically load the configuration from configMap
// - resource filters
// if the configMap is update, the configuration will be updated :D
configData := config.NewConfigData(kubeClient, kubeInformer.Core().V1().ConfigMaps(), filterK8Resources)
// EVENT GENERATOR
// - generate event with retry mechanism
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1alpha1().ClusterPolicies())
@ -99,7 +109,7 @@ func main() {
// - process policy on existing resources
// - status aggregator: recieves stats when a policy is applied
// & updates the policy status
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, filterK8Resources)
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData)
if err != nil {
glog.Fatalf("error creating policy controller: %v\n", err)
}
@ -114,7 +124,7 @@ func main() {
// GENERATE CONTROLLER
// - watches for Namespace resource and generates resource based on the policy generate rule
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, filterK8Resources)
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData)
// CONFIGURE CERTIFICATES
tlsPair, err := initTLSPemPair(clientConfig, client)
@ -137,16 +147,16 @@ func main() {
// -- annotations on resources with update details on mutation JSON patches
// -- generate policy violation resource
// -- generate events on policy and resource
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), filterK8Resources, cleanUp)
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, cleanUp)
if err != nil {
glog.Fatalf("Unable to create webhook server: %v\n", err)
}
stopCh := signals.SetupSignalHandler()
// Start the components
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
if err := configData.Run(kubeInformer.Core().V1().ConfigMaps(), stopCh); err != nil {
glog.Fatalf("Unable loading dynamic configuration: %v\n", err)
}
go pc.Run(1, stopCh)
go pvc.Run(1, stopCh)
go egen.Run(1, stopCh)
@ -170,8 +180,10 @@ func init() {
// by default is to profile cpu
flag.BoolVar(&cpu, "cpu", false, "cpu profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
flag.BoolVar(&memory, "memory", false, "memory profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
flag.IntVar(&webhookTimeout, "webhooktimeout", 2, "timeout for webhook configurations")
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")

View file

@ -173,13 +173,13 @@ type ResourceSpec struct {
// ViolatedRule stores the information regarding the rule
type ViolatedRule struct {
Name string `json:"name"`
Type string `json:"type"`
Message string `json:"message"`
ManagedResource ManagedResource `json:"managedResource,omitempty"`
Name string `json:"name"`
Type string `json:"type"`
Message string `json:"message"`
ManagedResource ManagedResourceSpec `json:"managedResource,omitempty"`
}
type ManagedResource struct {
type ManagedResourceSpec struct {
Kind string `json:"kind,omitempty"`
Namespace string `json:"namespace,omitempty"`
CreationBlocked bool `json:"creationBlocked,omitempty"`

View file

@ -191,17 +191,17 @@ func (in *Generation) DeepCopy() *Generation {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ManagedResource) DeepCopyInto(out *ManagedResource) {
func (in *ManagedResourceSpec) DeepCopyInto(out *ManagedResourceSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResource.
func (in *ManagedResource) DeepCopy() *ManagedResource {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ManagedResourceSpec.
func (in *ManagedResourceSpec) DeepCopy() *ManagedResourceSpec {
if in == nil {
return nil
}
out := new(ManagedResource)
out := new(ManagedResourceSpec)
in.DeepCopyInto(out)
return out
}

210
pkg/config/dynamicconfig.go Normal file
View file

@ -0,0 +1,210 @@
package config
import (
"fmt"
"os"
"reflect"
"regexp"
"strings"
"sync"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
v1 "k8s.io/api/core/v1"
informers "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
// read the conifgMap with name in env:INIT_CONFIG
// this configmap stores the resources that are to be filtered
const cmNameEnv string = "INIT_CONFIG"
const cmDataField string = "resourceFilters"
type ConfigData struct {
client kubernetes.Interface
// configMap Name
cmName string
// lock configuration
mux sync.RWMutex
// configuration data
filters []k8Resource
}
// ToFilter checks if the given resource is set to be filtered in the configuration
func (cd *ConfigData) ToFilter(kind, namespace, name string) bool {
cd.mux.RLock()
defer cd.mux.RUnlock()
for _, f := range cd.filters {
if wildcard.Match(f.Kind, kind) && wildcard.Match(f.Namespace, namespace) && wildcard.Match(f.Name, name) {
return true
}
}
return false
}
// Interface to be used by consumer to check filters
type Interface interface {
ToFilter(kind, namespace, name string) bool
}
// NewConfigData ...
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8Resources string) *ConfigData {
// environment var is read at start only
if cmNameEnv == "" {
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
}
cd := ConfigData{
client: rclient,
cmName: os.Getenv(cmNameEnv),
}
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
if filterK8Resources != "" {
glog.Info("Init configuration from commandline arguments")
cd.initFilters(filterK8Resources)
}
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cd.addCM,
UpdateFunc: cd.updateCM,
DeleteFunc: cd.deleteCM,
})
return &cd
}
func (cd *ConfigData) Run(cmInformer informers.ConfigMapInformer, stopCh <-chan struct{}) error {
// wait for cache to populate first time
if !cache.WaitForCacheSync(stopCh, cmInformer.Informer().HasSynced) {
return fmt.Errorf("Configuration: Failed to sync informer cache")
}
return nil
}
func (cd *ConfigData) addCM(obj interface{}) {
cm := obj.(*v1.ConfigMap)
if cm.Name != cd.cmName {
return
}
cd.load(*cm)
// else load the configuration
}
func (cd *ConfigData) updateCM(old, cur interface{}) {
cm := cur.(*v1.ConfigMap)
if cm.Name != cd.cmName {
return
}
// if data has not changed then dont load configmap
cd.load(*cm)
}
func (cd *ConfigData) deleteCM(obj interface{}) {
cm, ok := obj.(*v1.ConfigMap)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
_, ok = tombstone.Obj.(*v1.ConfigMap)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj))
return
}
}
if cm.Name != cd.cmName {
return
}
// remove the configuration paramaters
cd.unload(*cm)
}
func (cd *ConfigData) load(cm v1.ConfigMap) {
if cm.Data == nil {
glog.Infof("Configuration: No data defined in ConfigMap %s", cm.Name)
return
}
// get resource filters
filters, ok := cm.Data["resourceFilters"]
if !ok {
glog.Infof("Configuration: No resourceFilters defined in ConfigMap %s", cm.Name)
return
}
// filters is a string
if filters == "" {
glog.Infof("Configuration: resourceFilters is empty in ConfigMap %s", cm.Name)
return
}
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
newFilters := parseKinds(filters)
if reflect.DeepEqual(newFilters, cd.filters) {
glog.Infof("Configuration: resourceFilters did not change in ConfigMap %s", cm.Name)
return
}
glog.V(4).Infof("Configuration: Old resource filters %v", cd.filters)
glog.Infof("Configuration: New resource filters to %v", newFilters)
// update filters
cd.filters = newFilters
}
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
func (cd *ConfigData) initFilters(filters string) {
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
newFilters := parseKinds(filters)
glog.Infof("Configuration: Init resource filters to %v", newFilters)
// update filters
cd.filters = newFilters
}
func (cd *ConfigData) unload(cm v1.ConfigMap) {
// TODO pick one msg
glog.Infof("Configuration: ConfigMap %s deleted, removing configuration filters", cm.Name)
glog.Infof("Configuration: Removing all resource filters as ConfigMap %s deleted", cm.Name)
cd.mux.Lock()
defer cd.mux.Unlock()
cd.filters = []k8Resource{}
}
type k8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//ParseKinds parses the kinds if a single string contains comma seperated kinds
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
func parseKinds(list string) []k8Resource {
resources := []k8Resource{}
var resource k8Resource
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
submatchall := re.FindAllString(list, -1)
for _, element := range submatchall {
element = strings.Trim(element, "[")
element = strings.Trim(element, "]")
elements := strings.Split(element, ",")
//TODO: wildcards for namespace and name
if len(elements) == 0 {
continue
}
if len(elements) == 3 {
resource = k8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
}
if len(elements) == 2 {
resource = k8Resource{Kind: elements[0], Namespace: elements[1]}
}
if len(elements) == 1 {
resource = k8Resource{Kind: elements[0]}
}
resources = append(resources, resource)
}
return resources
}

View file

@ -10,7 +10,7 @@ import (
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/config"
"k8s.io/apimachinery/pkg/api/errors"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
@ -55,8 +55,8 @@ type NamespaceController struct {
queue workqueue.RateLimitingInterface
// Resource manager, manages the mapping for already processed resource
rm resourceManager
// filter the resources defined in the list
filterK8Resources []utils.K8Resource
// helpers to validate against current loaded configuration
configHandler config.Interface
}
//NewNamespaceController returns a new Controller to manage generation rules
@ -67,15 +67,15 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
policyStatus policy.PolicyStatusInterface,
eventGen event.Interface,
filterK8Resources string) *NamespaceController {
//TODO: do we need to event recorder for this controller?
configHandler config.Interface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
// create the controller
nsc := &NamespaceController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
filterK8Resources: utils.ParseKinds(filterK8Resources),
configHandler: configHandler,
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -231,7 +231,7 @@ func (nsc *NamespaceController) syncNamespace(key string) error {
// skip processing namespace if its been filtered
// exclude the filtered resources
if utils.SkipFilteredResources("Namespace", "", namespace.Name, nsc.filterK8Resources) {
if nsc.configHandler.ToFilter("Namespace", "", namespace.Name) {
//TODO: improve the text
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
return nil

View file

@ -13,9 +13,9 @@ import (
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -73,15 +73,16 @@ type PolicyController struct {
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// Resource manager, manages the mapping for already processed resource
rm resourceManager
// filter the resources defined in the list
filterK8Resources []utils.K8Resource
// helpers to validate against current loaded configuration
configHandler config.Interface
// recieves stats and aggregates details
statusAggregator *PolicyStatusAggregator
}
// NewPolicyController create a new PolicyController
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, filterK8Resources string) (*PolicyController, error) {
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
configHandler config.Interface) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
@ -98,7 +99,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
webhookRegistrationClient: webhookRegistrationClient,
filterK8Resources: utils.ParseKinds(filterK8Resources),
configHandler: configHandler,
}
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}

View file

@ -8,6 +8,7 @@ import (
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/utils"
@ -22,7 +23,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
pc.rm.Drop()
var engineResponses []engine.EngineResponse
// get resource that are satisfy the resource description defined in the rules
resourceMap := listResources(pc.client, policy, pc.filterK8Resources)
resourceMap := listResources(pc.client, policy, pc.configHandler)
for _, resource := range resourceMap {
// pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
@ -40,7 +41,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
return engineResponses
}
func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured {
func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHandler config.Interface) map[string]unstructured.Unstructured {
// key uid
resourceMap := map[string]unstructured.Unstructured{}
@ -69,7 +70,7 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8
// get resources in the namespaces
for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, filterK8Resources)
rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler)
mergeresources(resourceMap, rMap)
}
@ -78,7 +79,7 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, filterK8
return resourceMap
}
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured {
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, configHandler config.Interface) map[string]unstructured.Unstructured {
resourceMap := map[string]unstructured.Unstructured{}
// merge include and exclude label selector values
ls := rule.MatchResources.Selector
@ -100,7 +101,7 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri
}
}
// Skip the filtered resources
if utils.SkipFilteredResources(r.GetKind(), r.GetNamespace(), r.GetName(), filterK8Resources) {
if configHandler.ToFilter(r.GetKind(), r.GetNamespace(), r.GetName()) {
continue
}
@ -110,12 +111,12 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri
// exclude the resources
// skip resources to be filtered
excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, filterK8Resources)
excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, configHandler)
// glog.V(4).Infof("resource map: %v", resourceMap)
return resourceMap
}
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, filterK8Resources []utils.K8Resource) {
func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Interface) {
if reflect.DeepEqual(exclude, (kyverno.ResourceDescription{})) {
return
}
@ -196,7 +197,7 @@ func excludeResources(included map[string]unstructured.Unstructured, exclude kyv
excludeEval = append(excludeEval, ret)
}
// exclude the filtered resources
if utils.SkipFilteredResources(resource.GetKind(), resource.GetNamespace(), resource.GetName(), filterK8Resources) {
if configHandler.ToFilter(resource.GetKind(), resource.GetNamespace(), resource.GetName()) {
//TODO: improve the text
glog.V(4).Infof("excluding resource %s/%s/%s as its satisfies the filtered resources", resource.GetKind(), resource.GetNamespace(), resource.GetName())
delete(included, uid)

View file

@ -259,7 +259,7 @@ func (pvc *PolicyViolationController) syncActiveResource(curPv *kyverno.ClusterP
// when rejected resource created in the cluster
func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.ClusterPolicyViolation) error {
for _, violatedRule := range curPv.Spec.ViolatedRules {
if reflect.DeepEqual(violatedRule.ManagedResource, kyverno.ManagedResource{}) {
if reflect.DeepEqual(violatedRule.ManagedResource, kyverno.ManagedResourceSpec{}) {
continue
}

View file

@ -248,7 +248,7 @@ func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvRes
func newViolatedRules(er engine.EngineResponse, msg string) (violatedRules []kyverno.ViolatedRule) {
unstr := er.PatchedResource
dependant := kyverno.ManagedResource{
dependant := kyverno.ManagedResourceSpec{
Kind: unstr.GetKind(),
Namespace: unstr.GetNamespace(),
CreationBlocked: true,

View file

@ -1,20 +0,0 @@
package utils
import (
"github.com/golang/glog"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
//NewKubeInformerFactory returns a kubeinformer
func NewKubeInformerFactory(cfg *rest.Config) kubeinformers.SharedInformerFactory {
// kubernetes client
kubeClient, err := kubernetes.NewForConfig(cfg)
if err != nil {
glog.Errorf("error building kubernetes client: %s", err)
}
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
return kubeInformerFactory
}

View file

@ -2,25 +2,16 @@ package utils
import (
"reflect"
"regexp"
"strings"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
client "github.com/nirmata/kyverno/pkg/dclient"
"k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
type K8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//Contains Check if strint is contained in a list of string
func contains(list []string, element string, fn func(string, string) bool) bool {
for _, e := range list {
@ -49,58 +40,6 @@ func compareString(str, name string) bool {
return str == name
}
//SkipFilteredResourcesReq checks if request is to be skipped based on filtered kinds
func SkipFilteredResourcesReq(request *v1beta1.AdmissionRequest, filterK8Resources []K8Resource) bool {
kind := request.Kind.Kind
namespace := request.Namespace
name := request.Name
for _, r := range filterK8Resources {
if wildcard.Match(r.Kind, kind) && wildcard.Match(r.Namespace, namespace) && wildcard.Match(r.Name, name) {
return true
}
}
return false
}
//SkipFilteredResources checks if the resource is to be skipped based on filtered kinds
func SkipFilteredResources(kind, namespace, name string, filterK8Resources []K8Resource) bool {
for _, r := range filterK8Resources {
if wildcard.Match(r.Kind, kind) && wildcard.Match(r.Namespace, namespace) && wildcard.Match(r.Name, name) {
return true
}
}
return false
}
//ParseKinds parses the kinds if a single string contains comma seperated kinds
// {"1,2,3","4","5"} => {"1","2","3","4","5"}
func ParseKinds(list string) []K8Resource {
resources := []K8Resource{}
var resource K8Resource
re := regexp.MustCompile(`\[([^\[\]]*)\]`)
submatchall := re.FindAllString(list, -1)
for _, element := range submatchall {
element = strings.Trim(element, "[")
element = strings.Trim(element, "]")
elements := strings.Split(element, ",")
//TODO: wildcards for namespace and name
if len(elements) == 0 {
continue
}
if len(elements) == 3 {
resource = K8Resource{Kind: elements[0], Namespace: elements[1], Name: elements[2]}
}
if len(elements) == 2 {
resource = K8Resource{Kind: elements[0], Namespace: elements[1]}
}
if len(elements) == 1 {
resource = K8Resource{Kind: elements[0]}
}
resources = append(resources, resource)
}
return resources
}
//NewKubeClient returns a new kubernetes client
func NewKubeClient(config *rest.Config) (kubernetes.Interface, error) {
kclient, err := kubernetes.NewForConfig(config)

View file

@ -19,7 +19,6 @@ import (
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
tlsutils "github.com/nirmata/kyverno/pkg/tls"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1beta1 "k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -39,9 +38,11 @@ type WebhookServer struct {
eventGen event.Interface
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// API to send policy stats for aggregation
policyStatus policy.PolicyStatusInterface
filterK8Resources []utils.K8Resource
cleanUp chan<- struct{}
policyStatus policy.PolicyStatusInterface
// helpers to validate against current loaded configuration
configHandler config.Interface
// channel for cleanup notification
cleanUp chan<- struct{}
}
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
@ -55,7 +56,7 @@ func NewWebhookServer(
eventGen event.Interface,
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
policyStatus policy.PolicyStatusInterface,
filterK8Resources string,
configHandler config.Interface,
cleanUp chan<- struct{}) (*WebhookServer, error) {
if tlsPair == nil {
@ -80,7 +81,7 @@ func NewWebhookServer(
eventGen: eventGen,
webhookRegistrationClient: webhookRegistrationClient,
policyStatus: policyStatus,
filterK8Resources: utils.ParseKinds(filterK8Resources),
configHandler: configHandler,
cleanUp: cleanUp,
}
mux := http.NewServeMux()
@ -112,20 +113,21 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
}
// Do not process the admission requests for kinds that are in filterKinds for filtering
if !utils.SkipFilteredResourcesReq(admissionReview.Request, ws.filterK8Resources) {
request := admissionReview.Request
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
// Resource CREATE
// Resource UPDATE
switch r.URL.Path {
case config.MutatingWebhookServicePath:
admissionReview.Response = ws.handleAdmissionRequest(admissionReview.Request)
admissionReview.Response = ws.handleAdmissionRequest(request)
case config.PolicyValidatingWebhookServicePath:
admissionReview.Response = ws.handlePolicyValidation(admissionReview.Request)
admissionReview.Response = ws.handlePolicyValidation(request)
case config.PolicyMutatingWebhookServicePath:
admissionReview.Response = ws.handlePolicyMutation(admissionReview.Request)
admissionReview.Response = ws.handlePolicyMutation(request)
}
}
admissionReview.Response.UID = admissionReview.Request.UID
admissionReview.Response.UID = request.UID
responseJSON, err := json.Marshal(admissionReview)
if err != nil {