1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 18:38:40 +00:00

configrable rules added ()

* configrable rules added

* fix exclude group logic from code

* flag added in yaml

* exclude username added

* exclude username added

* config interface implimented

* configure exclude username

* get role ref

* test case fixed

* panic fix

* move from interface to slice

* exclude added in mutate

* trim strings

* configmap changes added

* kustomize changes for configmap

* k8s resources added
This commit is contained in:
Yuvraj 2020-08-07 17:09:24 -07:00 committed by GitHub
parent a14828246d
commit 73840e3c5f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
30 changed files with 554 additions and 375 deletions

View file

@ -98,7 +98,8 @@ config:
- "[SubjectAccessReview,*,*]"
- "[*,kyverno,*]"
# Or give the name of an existing config map (ignores default/provided resourceFilters)
existingConfig:
existingConfig: ''
excludeGroupRole: 'system:serviceaccounts:kube-system,system:nodes,system:kube-scheduler'
# existingConfig: init-config
service:

View file

@ -46,6 +46,9 @@ var (
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
filterK8Resources string
excludeGroupRole string
excludeUsername string
// User FQDN as CSR CN
fqdncn bool
setupLog = log.Log.WithName("setup")
@ -55,6 +58,8 @@ func main() {
klog.InitFlags(nil)
log.SetLogger(klogr.New())
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.StringVar(&excludeGroupRole, "excludeGroupRole","","")
flag.StringVar(&excludeUsername, "excludeUsername","","")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
@ -151,6 +156,8 @@ func main() {
kubeClient,
kubeInformer.Core().V1().ConfigMaps(),
filterK8Resources,
excludeGroupRole,
excludeUsername,
log.Log.WithName("ConfigData"),
)
@ -212,6 +219,7 @@ func main() {
kubedynamicInformer,
statusSync.Listener,
log.Log.WithName("GenerateController"),
configData,
)
// GENERATE REQUEST CLEANUP
@ -238,6 +246,7 @@ func main() {
kubeInformer.Rbac().V1().RoleBindings(),
kubeInformer.Rbac().V1().ClusterRoleBindings(),
log.Log.WithName("ValidateAuditHandler"),
configData,
)
// CONFIGURE CERTIFICATES

View file

@ -3,4 +3,4 @@ kind: Kustomization
resources:
- ../crds/
- ../rbac/
- ../k8s-resource/

View file

@ -731,6 +731,7 @@ subjects:
---
apiVersion: v1
data:
excludeGroupRole: system:serviceaccounts:kube-system,system:nodes,system:kube-scheduler
resourceFilters: '[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]'
kind: ConfigMap
metadata:
@ -771,6 +772,7 @@ spec:
containers:
- args:
- --filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]
- --excludeGroupRole="system:serviceaccounts:kube-system,system:nodes,system:kube-scheduler"
- -v=2
env:
- name: INIT_CONFIG

View file

@ -731,6 +731,7 @@ subjects:
---
apiVersion: v1
data:
excludeGroupRole: system:serviceaccounts:kube-system,system:nodes,system:kube-scheduler
resourceFilters: '[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]'
kind: ConfigMap
metadata:

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
resourceFilters: '[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]'
excludeGroupRole: 'system:serviceaccounts:kube-system,system:nodes,system:kube-scheduler'
kind: ConfigMap
metadata:
name: init-config
namespace: kyverno

View file

@ -2,4 +2,5 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./rbac.yaml
- ./rbac.yaml
- ./configmap.yaml

View file

@ -0,0 +1,259 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kyverno
name: kyverno-svc
labels:
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyviolations
rules:
- apiGroups: ["kyverno.io"]
resources:
- policyviolations
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:webhook
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:userinfo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:userinfo
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:customresources
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:customresources
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:policycontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:policycontroller
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:generatecontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:generatecontroller
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:webhook
rules:
# Dynamic creation of webhooks, events & certs
- apiGroups:
- '*'
resources:
- events
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
- certificatesigningrequests
- certificatesigningrequests/approval
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
- certificatesigningrequests/approval
- certificatesigningrequests/status
resourceNames:
- kubernetes.io/legacy-unknown
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- certificates.k8s.io
resources:
- signers
resourceNames:
- kubernetes.io/legacy-unknown
verbs:
- approve
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:userinfo
rules:
# get the roleRef for incoming api-request user
- apiGroups:
- "*"
resources:
- roles
- clusterroles
- rolebindings
- clusterrolebindings
- configmaps
verbs:
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:customresources
rules:
# Kyverno CRs
- apiGroups:
- '*'
resources:
- clusterpolicies
- clusterpolicies/status
- clusterpolicyviolations
- clusterpolicyviolations/status
- policyviolations
- policyviolations/status
- generaterequests
- generaterequests/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:policycontroller
rules:
# background processing, identify all existing resources
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:generatecontroller
rules:
# process generate rules to generate resources
- apiGroups:
- "*"
resources:
- namespaces
- networkpolicies
- secrets
- configmaps
- resourcequotas
- limitranges
- clusterroles
- rolebindings
- clusterrolebindings
verbs:
- create
- update
- delete
- get
# dynamic watches on trigger resources for generate rules
# re-evaluate the policy if the resource is updated
- apiGroups:
- '*'
resources:
- namespaces
verbs:
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:view-policyviolations
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: ["kyverno.io"]
resources:
- policyviolations
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:view-clusterpolicyviolations
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["kyverno.io"]
resources:
- clusterpolicyviolations
verbs: ["get", "list", "watch"]

View file

@ -4,4 +4,4 @@ kind: Kustomization
resources:
- ./crds/
- ./manifest/
- ./rbac/
- ./k8s-resource/

View file

@ -26,12 +26,13 @@ spec:
image: nirmata/kyverno:v1.1.8
imagePullPolicy: Always
args:
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]"
# customize webhook timeout
#- "--webhooktimeout=4"
# enable profiling
# - "--profile"
- "-v=2"
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]"
# customize webhook timeout
#- "--webhooktimeout=4"
# enable profiling
# - "--profile"
- --excludeGroupRole="system:serviceaccounts:kube-system,system:nodes,system:kube-scheduler"
- "-v=2"
ports:
- containerPort: 443
env:
@ -68,4 +69,5 @@ spec:
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 4
successThreshold: 1
successThreshold: 1

View file

@ -1,268 +0,0 @@
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kyverno
name: kyverno-svc
labels:
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyviolations
rules:
- apiGroups: ["kyverno.io"]
resources:
- policyviolations
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:webhook
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:userinfo
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:userinfo
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:customresources
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:customresources
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:policycontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:policycontroller
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: kyverno:generatecontroller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kyverno:generatecontroller
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:webhook
rules:
# Dynamic creation of webhooks, events & certs
- apiGroups:
- '*'
resources:
- events
- mutatingwebhookconfigurations
- validatingwebhookconfigurations
- certificatesigningrequests
- certificatesigningrequests/approval
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- certificates.k8s.io
resources:
- certificatesigningrequests
- certificatesigningrequests/approval
- certificatesigningrequests/status
resourceNames:
- kubernetes.io/legacy-unknown
verbs:
- create
- delete
- get
- update
- watch
- apiGroups:
- certificates.k8s.io
resources:
- signers
resourceNames:
- kubernetes.io/legacy-unknown
verbs:
- approve
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:userinfo
rules:
# get the roleRef for incoming api-request user
- apiGroups:
- "*"
resources:
- roles
- clusterroles
- rolebindings
- clusterrolebindings
- configmaps
verbs:
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:customresources
rules:
# Kyverno CRs
- apiGroups:
- '*'
resources:
- clusterpolicies
- clusterpolicies/status
- clusterpolicyviolations
- clusterpolicyviolations/status
- policyviolations
- policyviolations/status
- generaterequests
- generaterequests/status
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:policycontroller
rules:
# background processing, identify all existing resources
- apiGroups:
- '*'
resources:
- '*'
verbs:
- get
- list
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: kyverno:generatecontroller
rules:
# process generate rules to generate resources
- apiGroups:
- "*"
resources:
- namespaces
- networkpolicies
- secrets
- configmaps
- resourcequotas
- limitranges
- clusterroles
- rolebindings
- clusterrolebindings
verbs:
- create
- update
- delete
- get
# dynamic watches on trigger resources for generate rules
# re-evaluate the policy if the resource is updated
- apiGroups:
- '*'
resources:
- namespaces
verbs:
- watch
---
apiVersion: v1
kind: ConfigMap
metadata:
name: init-config
namespace: kyverno
data:
# resource types to be skipped by kyverno policy engine
resourceFilters: "[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:view-policyviolations
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: ["kyverno.io"]
resources:
- policyviolations
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:view-clusterpolicyviolations
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["kyverno.io"]
resources:
- clusterpolicyviolations
verbs: ["get", "list", "watch"]

View file

@ -28,6 +28,15 @@ type ConfigData struct {
mux sync.RWMutex
// configuration data
filters []k8Resource
// excludeGroupRole Role
excludeGroupRole []string
//excludeUsername exclude username
excludeUsername []string
//restrictDevelopmentUsername exclude dev username like minikube and kind
restrictDevelopmentUsername []string
// hasynced
cmSycned cache.InformerSynced
log logr.Logger
@ -45,13 +54,37 @@ func (cd *ConfigData) ToFilter(kind, namespace, name string) bool {
return false
}
// GetExcludeGroupRole return exclude roles
func (cd *ConfigData) GetExcludeGroupRole() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.excludeGroupRole
}
// RestrictDevelopmentUsername return exclude development username
func (cd *ConfigData) RestrictDevelopmentUsername() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.restrictDevelopmentUsername
}
// GetExcludeUsername return exclude username
func (cd *ConfigData) GetExcludeUsername() []string {
cd.mux.RLock()
defer cd.mux.RUnlock()
return cd.excludeUsername
}
// Interface to be used by consumer to check filters
type Interface interface {
ToFilter(kind, namespace, name string) bool
GetExcludeGroupRole() []string
GetExcludeUsername() []string
RestrictDevelopmentUsername() []string
}
// NewConfigData ...
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8Resources string, log logr.Logger) *ConfigData {
func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8Resources,excludeGroupRole,excludeUsername string, log logr.Logger) *ConfigData {
// environment var is read at start only
if cmNameEnv == "" {
log.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
@ -62,13 +95,25 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
cmSycned: cmInformer.Informer().HasSynced,
log: log,
}
cd.restrictDevelopmentUsername = []string{"minikube-user", "kubernetes-admin"}
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
if filterK8Resources != "" {
cd.log.Info("init configuration from commandline arguments")
cd.log.Info("init configuration from commandline arguments for filterK8Resources")
cd.initFilters(filterK8Resources)
}
if excludeGroupRole != "" {
cd.log.Info("init configuration from commandline arguments for excludeGroupRole")
cd.initRbac("excludeRoles",excludeGroupRole)
}
if excludeUsername != "" {
cd.log.Info("init configuration from commandline arguments for excludeUsername")
cd.initRbac("excludeUsername",excludeUsername)
}
cmInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: cd.addCM,
UpdateFunc: cd.updateCM,
@ -139,11 +184,33 @@ func (cd *ConfigData) load(cm v1.ConfigMap) {
logger.V(4).Info("configuration: No resourceFilters defined in ConfigMap")
return
}
// get resource filters
excludeGroupRole, ok := cm.Data["excludeGroupRole"]
if !ok {
logger.V(4).Info("configuration: No excludeGroupRole defined in ConfigMap")
return
}
// get resource filters
excludeUsername, ok := cm.Data["excludeUsername"]
if !ok {
logger.V(4).Info("configuration: No excludeUsername defined in ConfigMap")
return
}
// filters is a string
if filters == "" {
logger.V(4).Info("configuration: resourceFilters is empty in ConfigMap")
return
}
if excludeGroupRole == "" {
logger.V(4).Info("configuration: excludeGroupRole is empty in ConfigMap")
return
}
if excludeUsername == "" {
logger.V(4).Info("configuration: excludeUsername is empty in ConfigMap")
return
}
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
@ -151,11 +218,29 @@ func (cd *ConfigData) load(cm v1.ConfigMap) {
newFilters := parseKinds(filters)
if reflect.DeepEqual(newFilters, cd.filters) {
logger.V(4).Info("resourceFilters did not change")
return
}else{
logger.V(2).Info("Updated resource filters", "oldFilters", cd.filters, "newFilters", newFilters)
// update filters
cd.filters = newFilters
}
logger.V(2).Info("Updated resource filters", "oldFilters", cd.filters, "newFilters", newFilters)
// update filters
cd.filters = newFilters
excludeGroupRoles := parseRbac(excludeGroupRole)
if reflect.DeepEqual(excludeGroupRoles, cd.excludeGroupRole) {
logger.V(4).Info("excludeGroupRole did not change")
}else{
logger.V(2).Info("Updated resource excludeGroupRoles", "oldExcludeGroupRole", cd.excludeGroupRole, "newExcludeGroupRole", excludeGroupRoles)
// update filters
cd.excludeGroupRole = excludeGroupRoles
}
excludeUsernames := parseRbac(excludeUsername)
if reflect.DeepEqual(excludeUsernames, cd.excludeUsername) {
logger.V(4).Info("excludeGroupRole did not change")
}else{
logger.V(2).Info("Updated resource excludeUsernames", "oldExcludeUsername", cd.excludeUsername, "newExcludeUsername", excludeUsernames)
// update filters
cd.excludeUsername = excludeUsernames
}
}
//TODO: this has been added to backward support command line arguments
@ -172,12 +257,31 @@ func (cd *ConfigData) initFilters(filters string) {
cd.filters = newFilters
}
func (cd *ConfigData) initRbac(action,exclude string) {
logger := cd.log
// parse and load the configuration
cd.mux.Lock()
defer cd.mux.Unlock()
rbac := parseRbac(exclude)
logger.V(2).Info("Init resource ", action, exclude)
// update filters
if action == "excludeRoles" {
cd.excludeGroupRole = rbac
}else{
cd.excludeUsername = rbac
}
}
func (cd *ConfigData) unload(cm v1.ConfigMap) {
logger := cd.log
logger.Info("ConfigMap deleted, removing configuration filters", "name", cm.Name, "namespace", cm.Namespace)
cd.mux.Lock()
defer cd.mux.Unlock()
cd.filters = []k8Resource{}
cd.excludeGroupRole = []string{}
cd.excludeUsername = []string{}
}
type k8Resource struct {
@ -214,3 +318,12 @@ func parseKinds(list string) []k8Resource {
}
return resources
}
func parseRbac(list string) []string {
elements := strings.Split(list, ",")
var exclude []string
for _,e := range elements {
exclude = append(exclude,e)
}
return exclude
}

View file

@ -22,18 +22,19 @@ func Generate(policyContext PolicyContext) (resp response.EngineResponse) {
resource := policyContext.NewResource
admissionInfo := policyContext.AdmissionInfo
ctx := policyContext.Context
logger := log.Log.WithName("Generate").WithValues("policy", policy.Name, "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
return filterRules(policy, resource, admissionInfo, ctx, logger)
return filterRules(policy, resource, admissionInfo, ctx, logger,policyContext.ExcludeGroupRole)
}
func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger) *response.RuleResponse {
func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger,excludeGroupRole []string) *response.RuleResponse {
if !rule.HasGenerate() {
return nil
}
startTime := time.Now()
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
if err := MatchesResourceDescription(resource, rule, admissionInfo,excludeGroupRole); err != nil {
return nil
}
// operate on the copy of the conditions, as we perform variable substitution
@ -55,7 +56,7 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
}
}
func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger) response.EngineResponse {
func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger,excludeGroupRole []string) response.EngineResponse {
resp := response.EngineResponse{
PolicyResponse: response.PolicyResponse{
Policy: policy.Name,
@ -66,9 +67,8 @@ func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
},
},
}
for _, rule := range policy.Spec.Rules {
if ruleResp := filterRule(rule, resource, admissionInfo, ctx, log); ruleResp != nil {
if ruleResp := filterRule(rule, resource, admissionInfo, ctx, log,excludeGroupRole); ruleResp != nil {
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *ruleResp)
}
}

View file

@ -51,7 +51,11 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
// check if the resource satisfies the filter conditions defined in the rule
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont satisfy a policy rule resource description
if err := MatchesResourceDescription(patchedResource, rule, policyContext.AdmissionInfo); err != nil {
excludeResource := []string{}
if len(policyContext.ExcludeGroupRole) > 0 {
excludeResource = policyContext.ExcludeGroupRole
}
if err := MatchesResourceDescription(patchedResource, rule, policyContext.AdmissionInfo,excludeResource); err != nil {
logger.V(3).Info("resource not matched", "reason", err.Error())
continue
}

View file

@ -79,6 +79,7 @@ func Test_VariableSubstitutionOverlay(t *testing.T) {
t.Error(err)
}
value, err := ctx.Query("request.object.metadata.name")
t.Log(value)
if err != nil {
t.Error(err)

View file

@ -20,4 +20,6 @@ type PolicyContext struct {
Client *client.Client
// Contexts to store resources
Context context.EvalInterface
// Config handler
ExcludeGroupRole []string
}

View file

@ -18,8 +18,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
)
var ExcludeUserInfo = []string{"system:nodes", "system:serviceaccounts:kube-system", "system:kube-scheduler"}
//EngineStats stores in the statistics for a single application of resource
type EngineStats struct {
// average time required to process the policy rules on a resource
@ -80,7 +78,7 @@ func checkSelector(labelSelector *metav1.LabelSelector, resourceLabels map[strin
// should be: AND across attibutes but an OR inside attributes that of type list
// To filter out the targeted resources with UserInfo, the check
// should be: OR (accross & inside) attributes
func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription, userInfo kyverno.UserInfo, admissionInfo kyverno.RequestInfo, resource unstructured.Unstructured) []error {
func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription, userInfo kyverno.UserInfo, admissionInfo kyverno.RequestInfo, resource unstructured.Unstructured,dynamicConfig []string) []error {
var errs []error
if len(conditionBlock.Kinds) > 0 {
if !checkKind(conditionBlock.Kinds, resource.GetKind()) {
@ -111,7 +109,7 @@ func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription,
keys := append(admissionInfo.AdmissionUserInfo.Groups, admissionInfo.AdmissionUserInfo.Username)
var userInfoErrors []error
var checkedItem int
if len(userInfo.Roles) > 0 && !utils.SliceContains(keys, ExcludeUserInfo...) {
if len(userInfo.Roles) > 0 && !utils.SliceContains(keys, dynamicConfig...) {
checkedItem++
if !utils.SliceContains(userInfo.Roles, admissionInfo.Roles...) {
@ -121,7 +119,7 @@ func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription,
}
}
if len(userInfo.ClusterRoles) > 0 && !utils.SliceContains(keys, ExcludeUserInfo...) {
if len(userInfo.ClusterRoles) > 0 && !utils.SliceContains(keys, dynamicConfig...) {
checkedItem++
if !utils.SliceContains(userInfo.ClusterRoles, admissionInfo.ClusterRoles...) {
@ -134,7 +132,7 @@ func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription,
if len(userInfo.Subjects) > 0 {
checkedItem++
if !matchSubjects(userInfo.Subjects, admissionInfo.AdmissionUserInfo) {
if !matchSubjects(userInfo.Subjects, admissionInfo.AdmissionUserInfo,dynamicConfig) {
userInfoErrors = append(userInfoErrors, fmt.Errorf("user info does not match subject for the given conditionBlock"))
} else {
return errs
@ -149,17 +147,17 @@ func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription,
}
// matchSubjects return true if one of ruleSubjects exist in userInfo
func matchSubjects(ruleSubjects []rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
func matchSubjects(ruleSubjects []rbacv1.Subject, userInfo authenticationv1.UserInfo,dynamicConfig []string) bool {
const SaPrefix = "system:serviceaccount:"
userGroups := append(userInfo.Groups, userInfo.Username)
// TODO: see issue https://github.com/nirmata/kyverno/issues/861
ruleSubjects = append(ruleSubjects,
rbacv1.Subject{Kind: "Group", Name: "system:serviceaccounts:kube-system"},
rbacv1.Subject{Kind: "Group", Name: "system:nodes"},
rbacv1.Subject{Kind: "Group", Name: "system:kube-scheduler"},
)
for _,e := range dynamicConfig {
ruleSubjects = append(ruleSubjects,
rbacv1.Subject{Kind: "Group", Name: e},
)
}
for _, subject := range ruleSubjects {
switch subject.Kind {
@ -182,7 +180,8 @@ func matchSubjects(ruleSubjects []rbacv1.Subject, userInfo authenticationv1.User
}
//MatchesResourceDescription checks if the resource matches resource description of the rule or not
func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef kyverno.Rule, admissionInfoRef kyverno.RequestInfo) error {
func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef kyverno.Rule, admissionInfoRef kyverno.RequestInfo,dynamicConfig []string) error {
rule := *ruleRef.DeepCopy()
resource := *resourceRef.DeepCopy()
admissionInfo := *admissionInfoRef.DeepCopy()
@ -196,7 +195,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
// checking if resource matches the rule
if !reflect.DeepEqual(rule.MatchResources.ResourceDescription, kyverno.ResourceDescription{}) ||
!reflect.DeepEqual(rule.MatchResources.UserInfo, kyverno.UserInfo{}) {
matchErrs := doesResourceMatchConditionBlock(rule.MatchResources.ResourceDescription, rule.MatchResources.UserInfo, admissionInfo, resource)
matchErrs := doesResourceMatchConditionBlock(rule.MatchResources.ResourceDescription, rule.MatchResources.UserInfo, admissionInfo, resource,dynamicConfig)
reasonsForFailure = append(reasonsForFailure, matchErrs...)
} else {
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("match cannot be empty"))
@ -205,7 +204,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
// checking if resource has been excluded
if !reflect.DeepEqual(rule.ExcludeResources.ResourceDescription, kyverno.ResourceDescription{}) ||
!reflect.DeepEqual(rule.ExcludeResources.UserInfo, kyverno.UserInfo{}) {
excludeErrs := doesResourceMatchConditionBlock(rule.ExcludeResources.ResourceDescription, rule.ExcludeResources.UserInfo, admissionInfo, resource)
excludeErrs := doesResourceMatchConditionBlock(rule.ExcludeResources.ResourceDescription, rule.ExcludeResources.UserInfo, admissionInfo, resource,dynamicConfig)
if excludeErrs == nil {
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("resource excluded"))
}

View file

@ -70,7 +70,7 @@ func TestMatchesResourceDescription(t *testing.T) {
resource, _ := utils.ConvertToUnstructured(tc.Resource)
for _, rule := range policy.Spec.Rules {
err := MatchesResourceDescription(*resource, rule, tc.AdmissionInfo)
err := MatchesResourceDescription(*resource, rule, tc.AdmissionInfo,[]string{})
if err != nil {
if !tc.areErrorsExpected {
t.Errorf("Testcase %d Unexpected error: %v", i+1, err)
@ -138,7 +138,7 @@ func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{},[]string{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
@ -199,7 +199,7 @@ func TestResourceDescriptionMatch_Name(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{},[]string{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
@ -259,7 +259,7 @@ func TestResourceDescriptionMatch_Name_Regex(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{},[]string{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
@ -327,7 +327,7 @@ func TestResourceDescriptionMatch_Label_Expression_NotMatch(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{},[]string{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
@ -396,7 +396,7 @@ func TestResourceDescriptionMatch_Label_Expression_Match(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{},[]string{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
@ -476,7 +476,7 @@ func TestResourceDescriptionExclude_Label_Expression_Match(t *testing.T) {
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription},
ExcludeResources: kyverno.ExcludeResources{ResourceDescription: resourceDescriptionExclude}}
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err == nil {
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{},[]string{}); err == nil {
t.Errorf("Testcase has failed due to the following:\n Function has returned no error, even though it was suposed to fail")
}
}

View file

@ -62,19 +62,19 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) {
// If request is delete, newR will be empty
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
return *isRequestDenied(logger, ctx, policy, oldR, admissionInfo)
return *isRequestDenied(logger, ctx, policy, oldR, admissionInfo,policyContext.ExcludeGroupRole)
}
if denyResp := isRequestDenied(logger, ctx, policy, newR, admissionInfo); !denyResp.IsSuccessful() {
if denyResp := isRequestDenied(logger, ctx, policy, newR, admissionInfo,policyContext.ExcludeGroupRole); !denyResp.IsSuccessful() {
return *denyResp
}
if reflect.DeepEqual(oldR, unstructured.Unstructured{}) {
return *validateResource(logger, ctx, policy, newR, admissionInfo)
return *validateResource(logger, ctx, policy, newR, admissionInfo,policyContext.ExcludeGroupRole)
}
oldResponse := validateResource(logger, ctx, policy, oldR, admissionInfo)
newResponse := validateResource(logger, ctx, policy, newR, admissionInfo)
oldResponse := validateResource(logger, ctx, policy, oldR, admissionInfo,policyContext.ExcludeGroupRole)
newResponse := validateResource(logger, ctx, policy, newR, admissionInfo,policyContext.ExcludeGroupRole)
if !isSameResponse(oldResponse, newResponse) {
return *newResponse
}
@ -102,19 +102,22 @@ func incrementAppliedCount(resp *response.EngineResponse) {
resp.PolicyResponse.RulesAppliedCount++
}
func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse {
func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo,excludeGroupRole []string) *response.EngineResponse {
resp := &response.EngineResponse{}
if policy.HasAutoGenAnnotation() && excludePod(resource) {
log.V(5).Info("Skip applying policy, Pod has ownerRef set", "policy", policy.GetName())
return resp
}
excludeResource := []string{}
if len(excludeGroupRole) > 0 {
excludeResource = excludeGroupRole
}
for _, rule := range policy.Spec.Rules {
if !rule.HasValidate() {
continue
}
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
if err := MatchesResourceDescription(resource, rule, admissionInfo,excludeResource); err != nil {
log.V(4).Info("resource fails the match description", "reason", err.Error())
continue
}
@ -144,7 +147,7 @@ func isRequestDenied(log logr.Logger, ctx context.EvalInterface, policy kyverno.
return resp
}
func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse {
func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo,excludeGroupRole []string) *response.EngineResponse {
resp := &response.EngineResponse{}
if policy.HasAutoGenAnnotation() && excludePod(resource) {
@ -152,6 +155,11 @@ func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno
return resp
}
excludeResource := []string{}
if len(excludeGroupRole)>0 {
excludeResource = excludeGroupRole
}
for _, rule := range policy.Spec.Rules {
if !rule.HasValidate() {
continue
@ -160,7 +168,7 @@ func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno
// check if the resource satisfies the filter conditions defined in the rule
// TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont satisfy a policy rule resource description
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
if err := MatchesResourceDescription(resource, rule, admissionInfo,excludeResource); err != nil {
log.V(4).Info("resource fails the match description", "reason", err.Error())
continue
}

View file

@ -58,6 +58,8 @@ type Controller struct {
nsInformer informers.GenericInformer
policyStatusListener policystatus.Listener
log logr.Logger
Config config.Interface
}
//NewController returns an instance of the Generate-Request Controller
@ -70,6 +72,7 @@ func NewController(
dynamicInformer dynamicinformer.DynamicSharedInformerFactory,
policyStatus policystatus.Listener,
log logr.Logger,
dynamicConfig config.Interface,
) *Controller {
c := Controller{
client: client,
@ -81,6 +84,7 @@ func NewController(
dynamicInformer: dynamicInformer,
log: log,
policyStatusListener: policyStatus,
Config: dynamicConfig,
}
c.statusControl = StatusControl{client: kyvernoclient}

View file

@ -97,6 +97,7 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
Policy: *policy,
Context: ctx,
AdmissionInfo: gr.Spec.Context.UserRequestInfo,
ExcludeGroupRole : c.Config.GetExcludeGroupRole(),
}
// check if the policy still applies to the resource

View file

@ -19,7 +19,7 @@ import (
// applyPolicy applies policy on a resource
//TODO: generation rules
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger) (responses []response.EngineResponse) {
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger,excludeGroupRole []string) (responses []response.EngineResponse) {
startTime := time.Now()
defer func() {
name := resource.GetKind() + "/" + resource.GetName()
@ -47,7 +47,7 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
}
//VALIDATION
engineResponseValidation = engine.Validate(engine.PolicyContext{Policy: policy, Context: ctx, NewResource: resource})
engineResponseValidation = engine.Validate(engine.PolicyContext{Policy: policy, Context: ctx, NewResource: resource,ExcludeGroupRole: excludeGroupRole})
engineResponses = append(engineResponses, mergeRuleRespose(engineResponseMutation, engineResponseValidation))
//TODO: GENERATION

View file

@ -47,7 +47,7 @@ func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPoli
}
// apply the policy on each
engineResponse := applyPolicy(*policy, resource, logger)
engineResponse := applyPolicy(*policy, resource, logger,pc.configHandler.GetExcludeGroupRole())
// get engine response for mutation & validation independently
engineResponses = append(engineResponses, engineResponse...)
// post-processing, register the resource as processed

View file

@ -128,7 +128,7 @@ func runTestCase(t *testing.T, tc scaseT) bool {
var er response.EngineResponse
er = engine.Mutate(engine.PolicyContext{Policy: *policy, NewResource: *resource})
er = engine.Mutate(engine.PolicyContext{Policy: *policy, NewResource: *resource,ExcludeGroupRole: []string{}})
t.Log("---Mutation---")
validateResource(t, er.PatchedResource, tc.Expected.Mutation.PatchedResource)
validateResponse(t, er.PolicyResponse, tc.Expected.Mutation.PolicyResponse)
@ -138,7 +138,7 @@ func runTestCase(t *testing.T, tc scaseT) bool {
resource = &er.PatchedResource
}
er = engine.Validate(engine.PolicyContext{Policy: *policy, NewResource: *resource})
er = engine.Validate(engine.PolicyContext{Policy: *policy, NewResource: *resource,ExcludeGroupRole: []string{}})
t.Log("---Validation---")
validateResponse(t, er.PolicyResponse, tc.Expected.Validation.PolicyResponse)
@ -156,6 +156,7 @@ func runTestCase(t *testing.T, tc scaseT) bool {
NewResource: *resource,
Policy: *policy,
Client: client,
ExcludeGroupRole: []string{},
}
er = engine.Generate(policyContext)

View file

@ -4,7 +4,7 @@ import (
"fmt"
"strings"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/config"
"github.com/nirmata/kyverno/pkg/utils"
v1beta1 "k8s.io/api/admission/v1beta1"
authenticationv1 "k8s.io/api/authentication/v1"
@ -18,14 +18,20 @@ const (
clusterrolekind = "ClusterRole"
rolekind = "Role"
SaPrefix = "system:serviceaccount:"
KyvernoSuffix = "kyverno:"
)
var defaultSuffixs = []string{"system:", "kyverno:"}
type allRolesStruct struct {
RoleType string
Role []string
}
var allRoles []allRolesStruct
//GetRoleRef gets the list of roles and cluster roles for the incoming api-request
func GetRoleRef(rbLister rbaclister.RoleBindingLister, crbLister rbaclister.ClusterRoleBindingLister, request *v1beta1.AdmissionRequest) (roles []string, clusterRoles []string, err error) {
func GetRoleRef(rbLister rbaclister.RoleBindingLister, crbLister rbaclister.ClusterRoleBindingLister, request *v1beta1.AdmissionRequest,dynamicConfig config.Interface) (roles []string, clusterRoles []string, err error) {
keys := append(request.UserInfo.Groups, request.UserInfo.Username)
if utils.SliceContains(keys, engine.ExcludeUserInfo...) {
if utils.SliceContains(keys, dynamicConfig.GetExcludeGroupRole()...) {
return
}
@ -131,53 +137,68 @@ func matchUserOrGroup(subject rbacv1.Subject, userInfo authenticationv1.UserInfo
}
//IsRoleAuthorize is role authorize or not
func IsRoleAuthorize(rbLister rbaclister.RoleBindingLister, crbLister rbaclister.ClusterRoleBindingLister, rLister rbaclister.RoleLister, crLister rbaclister.ClusterRoleLister, request *v1beta1.AdmissionRequest) (bool, error) {
func IsRoleAuthorize(rbLister rbaclister.RoleBindingLister, crbLister rbaclister.ClusterRoleBindingLister, rLister rbaclister.RoleLister, crLister rbaclister.ClusterRoleLister, request *v1beta1.AdmissionRequest,dynamicConfig config.Interface) (bool, error) {
if strings.Contains(request.UserInfo.Username, SaPrefix) {
roles, clusterRoles, err := GetRoleRef(rbLister, crbLister, request)
roles, clusterRoles, err := GetRoleRef(rbLister, crbLister, request,dynamicConfig)
if err != nil {
return false, err
}
for _, e := range clusterRoles {
if strings.Contains(e, "kyverno:") {
return true, nil
}
role, err := crLister.Get(e)
if err != nil {
return false, err
}
labels := role.GetLabels()
if labels["kubernetes.io/bootstrapping"] == "rbac-defaults" {
return true, nil
}
}
for _, e := range roles {
roleData := strings.Split(e, ":")
role, err := rLister.Roles(roleData[0]).Get(roleData[1])
if err != nil {
return false, err
}
labels := role.GetLabels()
if !strings.Contains(e, "kyverno:") {
if labels["kubernetes.io/bootstrapping"] == "rbac-defaults" {
allRoles := append(allRoles,allRolesStruct{
RoleType: "ClusterRole",
Role : clusterRoles,
},allRolesStruct{
RoleType: "Role",
Role : roles,
})
for _, r := range allRoles {
for _,e := range r.Role {
if strings.Contains(e, KyvernoSuffix) {
return true, nil
}
var labels map[string]string
if r.RoleType == "Role" {
roleData := strings.Split(e, ":")
role, err := rLister.Roles(roleData[0]).Get(strings.Join(roleData[1:],":"))
if err != nil {
return false, err
}
labels = role.GetLabels()
}else{
role, err := crLister.Get(e)
if err != nil {
return false, err
}
labels = role.GetLabels()
}
if !strings.Contains(e, KyvernoSuffix) {
if labels["kubernetes.io/bootstrapping"] == "rbac-defaults" {
return true, nil
}
}
}
}
return true, nil
}
// User or Group
excludeDevelopmentRole := []string{"minikube-user", "kubernetes-admin"}
for _, e := range excludeDevelopmentRole {
for _, e := range dynamicConfig.GetExcludeUsername() {
if strings.Contains(request.UserInfo.Username, e) {
return true, nil
}
}
// Restrict Development Roles
for _, e := range dynamicConfig.RestrictDevelopmentUsername() {
if strings.Contains(request.UserInfo.Username, strings.TrimSpace(e)) {
return false, nil
}
}
var matchedRoles []bool
excludeGroupRule := append(dynamicConfig.GetExcludeGroupRole(),KyvernoSuffix)
for _, e := range request.UserInfo.Groups {
for _, defaultSuffix := range defaultSuffixs {
if strings.Contains(e, defaultSuffix) {
for _, defaultSuffix := range excludeGroupRule {
if strings.Contains(strings.TrimSpace(e), strings.TrimSpace(defaultSuffix)) {
matchedRoles = append(matchedRoles, true)
break
}

View file

@ -2,6 +2,7 @@ package webhooks
import (
"fmt"
"github.com/nirmata/kyverno/pkg/config"
"reflect"
"sort"
"strings"
@ -20,7 +21,7 @@ import (
)
//HandleGenerate handles admission-requests for policies with generate rules
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo) {
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo,dynamicConfig config.Interface) {
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
logger.V(4).Info("incoming request")
var engineResponses []response.EngineResponse
@ -42,6 +43,7 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
NewResource: *resource,
AdmissionInfo: userRequestInfo,
Context: ctx,
ExcludeGroupRole : dynamicConfig.GetExcludeGroupRole(),
}
// engine.Generate returns a list of rules that are applicable on this resource

View file

@ -42,6 +42,7 @@ func (ws *WebhookServer) HandleMutation(
NewResource: resource,
AdmissionInfo: userRequestInfo,
Context: ctx,
ExcludeGroupRole: ws.configHandler.GetExcludeGroupRole(),
}
if request.Operation == v1beta1.Update {

View file

@ -280,7 +280,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
var roles, clusterRoles []string
var err error
if containRBACinfo(mutatePolicies, validatePolicies, generatePolicies) {
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request)
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request,ws.configHandler)
if err != nil {
// TODO(shuting): continue apply policy if error getting roleRef?
logger.Error(err, "failed to get RBAC infromation for request")
@ -342,7 +342,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
ws.auditHandler.Add(request.DeepCopy())
// VALIDATION
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log)
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log,ws.configHandler)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{
@ -364,7 +364,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
// Failed -> Failed to create Generate Request CR
if request.Operation == v1beta1.Create || request.Operation == v1beta1.Update {
go ws.HandleGenerate(request.DeepCopy(), generatePolicies, ctx, userRequestInfo)
go ws.HandleGenerate(request.DeepCopy(), generatePolicies, ctx, userRequestInfo,ws.configHandler)
}
// Succesful processing of mutation & validation rules in policy
@ -427,7 +427,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
var err error
// getRoleRef only if policy has roles/clusterroles defined
if containRBACinfo(policies) {
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request)
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request,ws.configHandler)
if err != nil {
logger.Error(err, "failed to get RBAC information for request")
return &v1beta1.AdmissionResponse{
@ -463,7 +463,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
logger.Error(err, "failed to load service account in context")
}
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log)
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.log,ws.configHandler)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{
@ -577,7 +577,7 @@ func (ws *WebhookServer) excludeKyvernoResources(request *v1beta1.AdmissionReque
labels := resource.GetLabels()
if labels != nil {
if labels["app.kubernetes.io/managed-by"] == "kyverno" && labels["policy.kyverno.io/synchronize"] == "enable" {
isAuthorized, err := userinfo.IsRoleAuthorize(ws.rbLister, ws.crbLister, ws.rLister, ws.crLister, request)
isAuthorized, err := userinfo.IsRoleAuthorize(ws.rbLister, ws.crbLister, ws.rLister, ws.crLister, request,ws.configHandler)
if err != nil {
return fmt.Errorf("failed to get RBAC infromation for request %v", err)
}

View file

@ -5,6 +5,7 @@ import (
"github.com/minio/minio/cmd/logger"
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
"github.com/nirmata/kyverno/pkg/config"
"github.com/nirmata/kyverno/pkg/constant"
enginectx "github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/event"
@ -50,6 +51,7 @@ type auditHandler struct {
crbSynced cache.InformerSynced
log logr.Logger
configHandler config.Interface
}
// NewValidateAuditHandler returns a new instance of audit policy handler
@ -59,7 +61,8 @@ func NewValidateAuditHandler(pCache policycache.Interface,
pvGenerator policyviolation.GeneratorInterface,
rbInformer rbacinformer.RoleBindingInformer,
crbInformer rbacinformer.ClusterRoleBindingInformer,
log logr.Logger) AuditHandler {
log logr.Logger,
dynamicConfig config.Interface) AuditHandler {
return &auditHandler{
pCache: pCache,
@ -72,6 +75,7 @@ func NewValidateAuditHandler(pCache policycache.Interface,
crbLister: crbInformer.Lister(),
crbSynced: crbInformer.Informer().HasSynced,
log: log,
configHandler : dynamicConfig,
}
}
@ -134,7 +138,7 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
// getRoleRef only if policy has roles/clusterroles defined
if containRBACinfo(policies) {
roles, clusterRoles, err = userinfo.GetRoleRef(h.rbLister, h.crbLister, request)
roles, clusterRoles, err = userinfo.GetRoleRef(h.rbLister, h.crbLister, request,h.configHandler)
if err != nil {
logger.Error(err, "failed to get RBAC information for request")
}
@ -161,7 +165,7 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
return errors.Wrap(err, "failed to load service account in context")
}
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.pvGenerator, logger)
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.pvGenerator, logger,h.configHandler)
return nil
}

View file

@ -1,6 +1,7 @@
package webhooks
import (
"github.com/nirmata/kyverno/pkg/config"
"reflect"
"sort"
"time"
@ -33,7 +34,8 @@ func HandleValidation(
statusListener policystatus.Listener,
eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
log logr.Logger) (bool, string) {
log logr.Logger,
dynamicConfig config.Interface) (bool, string) {
if len(policies) == 0 {
return true, ""
@ -70,6 +72,7 @@ func HandleValidation(
OldResource: oldR,
Context: ctx,
AdmissionInfo: userRequestInfo,
ExcludeGroupRole : dynamicConfig.GetExcludeGroupRole(),
}
var engineResponses []response.EngineResponse