1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-29 10:55:05 +00:00

644 updating changes with revised understanding of issue, also removed alot of deadcode to make changes

This commit is contained in:
shravan 2020-02-03 18:51:18 +05:30
parent 3b37a61f5d
commit 0d4b256d13
8 changed files with 525 additions and 586 deletions

View file

@ -6,7 +6,6 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/engine/variables"
@ -29,10 +28,7 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
if !rule.HasGenerate() {
return nil
}
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
return nil
}
if !MatchesResourceDescription(resource, rule) {
if !MatchesResourceDescription(resource, rule, admissionInfo) {
return nil
}

View file

@ -9,7 +9,6 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/mutate"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/engine/variables"
@ -57,17 +56,12 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
}
startTime := time.Now()
if !rbac.MatchAdmissionInfo(rule, policyContext.AdmissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), policyContext.AdmissionInfo)
continue
}
glog.V(4).Infof("Time: Mutate matchAdmissionInfo %v", time.Since(startTime))
// check if the resource satisfies the filter conditions defined in the rule
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont statisfy a policy rule resource description
ok := MatchesResourceDescription(resource, rule)
ok := MatchesResourceDescription(resource, rule, policyContext.AdmissionInfo)
if !ok {
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
continue

View file

@ -5,6 +5,8 @@ import (
"strings"
"time"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
@ -27,10 +29,16 @@ type EngineStats struct {
}
//MatchesResourceDescription checks if the resource matches resource desription of the rule or not
func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno.Rule) bool {
func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno.Rule, admissionInfo kyverno.RequestInfo) bool {
matches := rule.MatchResources.ResourceDescription
exclude := rule.ExcludeResources.ResourceDescription
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), admissionInfo)
return false
}
if !findKind(matches.Kinds, resource.GetKind()) {
return false
}

View file

@ -67,7 +67,7 @@ func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
}
// Match resource name
@ -125,7 +125,7 @@ func TestResourceDescriptionMatch_Name(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
}
// Match resource regex
@ -183,7 +183,7 @@ func TestResourceDescriptionMatch_Name_Regex(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
}
// Match expressions for labels to not match
@ -249,7 +249,7 @@ func TestResourceDescriptionMatch_Label_Expression_NotMatch(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
}
// Match label expression in matching set
@ -316,7 +316,7 @@ func TestResourceDescriptionMatch_Label_Expression_Match(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
}
// check for exclude conditions
@ -394,7 +394,7 @@ func TestResourceDescriptionExclude_Label_Expression_Match(t *testing.T) {
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription},
ExcludeResources: kyverno.ExcludeResources{ResourceDescription: resourceDescriptionExclude}}
assert.Assert(t, !MatchesResourceDescription(*resource, rule))
assert.Assert(t, !MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
}
func Test_validateGeneralRuleInfoVariables(t *testing.T) {

View file

@ -10,7 +10,6 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/engine/validate"
@ -101,18 +100,12 @@ func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, r
newPathNotPresentRuleResponse(rule.Name, utils.Validation.String(), fmt.Sprintf("path not present: %s", paths)))
continue
}
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), admissionInfo)
continue
}
glog.V(4).Infof("Time: Validate matchAdmissionInfo %v", time.Since(startTime))
// check if the resource satisfies the filter conditions defined in the rule
// TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont statisfy a policy rule resource description
ok := MatchesResourceDescription(resource, rule)
ok := MatchesResourceDescription(resource, rule, admissionInfo)
if !ok {
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
continue

View file

@ -1,250 +1,225 @@
package namespace
import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"k8s.io/apimachinery/pkg/api/errors"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
v1Informer "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
maxRetries = 15
)
//NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
type NamespaceController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
syncHandler func(nsKey string) error
enqueueNs func(ns *v1.Namespace)
//nsLister provides expansion to the namespace lister to inject GVK for the resource
nsLister NamespaceListerExpansion
// nsSynced returns true if the Namespace store has been synced at least once
nsSynced cache.InformerSynced
// pvLister can list/get policy violation from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// pSynced retrns true if the Policy store has been synced at least once
pSynced cache.InformerSynced
// API to send policy stats for aggregation
policyStatus policy.PolicyStatusInterface
// eventGen provides interface to generate evenets
eventGen event.Interface
// Namespaces that need to be synced
queue workqueue.RateLimitingInterface
// Resource manager, manages the mapping for already processed resource
rm resourceManager
// helpers to validate against current loaded configuration
configHandler config.Interface
// store to hold policy meta data for faster lookup
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
}
//NewNamespaceController returns a new Controller to manage generation rules
func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
nsInformer v1Informer.NamespaceInformer,
pInformer kyvernoinformer.ClusterPolicyInformer,
policyStatus policy.PolicyStatusInterface,
eventGen event.Interface,
configHandler config.Interface,
pvGenerator policyviolation.GeneratorInterface,
pMetaStore policystore.LookupInterface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
// create the controller
nsc := &NamespaceController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
configHandler: configHandler,
pMetaStore: pMetaStore,
pvGenerator: pvGenerator,
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addNamespace,
UpdateFunc: nsc.updateNamespace,
DeleteFunc: nsc.deleteNamespace,
})
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addPolicy,
UpdateFunc: nsc.updatePolicy,
})
nsc.enqueueNs = nsc.enqueue
nsc.syncHandler = nsc.syncNamespace
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
nsc.nsSynced = nsInformer.Informer().HasSynced
nsc.pLister = pInformer.Lister()
nsc.pSynced = pInformer.Informer().HasSynced
nsc.policyStatus = policyStatus
// resource manager
// rebuild after 300 seconds/ 5 mins
nsc.rm = NewResourceManager(300)
return nsc
}
func (nsc *NamespaceController) addPolicy(obj interface{}) {
p := obj.(*kyverno.ClusterPolicy)
// check if the policy has generate rule
if generateRuleExists(p) {
// process policy
nsc.processPolicy(p)
}
}
func (nsc *NamespaceController) updatePolicy(old, cur interface{}) {
curP := cur.(*kyverno.ClusterPolicy)
// check if the policy has generate rule
if generateRuleExists(curP) {
// process policy
nsc.processPolicy(curP)
}
}
func (nsc *NamespaceController) addNamespace(obj interface{}) {
ns := obj.(*v1.Namespace)
glog.V(4).Infof("Adding Namespace %s", ns.Name)
nsc.enqueueNs(ns)
}
func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
oldNs := old.(*v1.Namespace)
curNs := cur.(*v1.Namespace)
if curNs.ResourceVersion == oldNs.ResourceVersion {
// Periodic resync will send update events for all known Namespace.
// Two different versions of the same replica set will always have different RVs.
return
}
glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
ns, _ := obj.(*v1.Namespace)
glog.V(4).Infof("Deleting Namespace %s", ns.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
key, err := cache.MetaNamespaceKeyFunc(ns)
if err != nil {
glog.Error(err)
return
}
nsc.queue.Add(key)
}
//Run to run the controller
func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer nsc.queue.ShutDown()
glog.Info("Starting namespace controller")
defer glog.Info("Shutting down namespace controller")
if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
glog.Error("namespace generator: failed to sync cache")
return
}
for i := 0; i < workers; i++ {
go wait.Until(nsc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (nsc *NamespaceController) worker() {
for nsc.processNextWorkItem() {
}
}
func (nsc *NamespaceController) processNextWorkItem() bool {
key, quit := nsc.queue.Get()
if quit {
return false
}
defer nsc.queue.Done(key)
err := nsc.syncHandler(key.(string))
nsc.handleErr(err, key)
return true
}
func (nsc *NamespaceController) handleErr(err error, key interface{}) {
if err == nil {
nsc.queue.Forget(key)
return
}
if nsc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
nsc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
nsc.queue.Forget(key)
}
func (nsc *NamespaceController) syncNamespace(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()
namespace, err := nsc.nsLister.GetResource(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("namespace %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
n := namespace.DeepCopy()
// skip processing namespace if its been filtered
// exclude the filtered resources
if nsc.configHandler.ToFilter("", namespace.Name, "") {
//TODO: improve the text
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
return nil
}
// process generate rules
engineResponses := nsc.processNamespace(*n)
// report errors
nsc.report(engineResponses)
return nil
}
//const (
// // maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
// maxRetries = 15
//)
//
////NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
//type NamespaceController struct {
// client *client.Client
// kyvernoClient *kyvernoclient.Clientset
// syncHandler func(nsKey string) error
// enqueueNs func(ns *v1.Namespace)
//
// //nsLister provides expansion to the namespace lister to inject GVK for the resource
// nsLister NamespaceListerExpansion
// // nsSynced returns true if the Namespace store has been synced at least once
// nsSynced cache.InformerSynced
// // pvLister can list/get policy violation from the shared informer's store
// pLister kyvernolister.ClusterPolicyLister
// // pSynced retrns true if the Policy store has been synced at least once
// pSynced cache.InformerSynced
// // API to send policy stats for aggregation
// policyStatus policy.PolicyStatusInterface
// // eventGen provides interface to generate evenets
// eventGen event.Interface
// // Namespaces that need to be synced
// queue workqueue.RateLimitingInterface
// // Resource manager, manages the mapping for already processed resource
// rm resourceManager
// // helpers to validate against current loaded configuration
// configHandler config.Interface
// // store to hold policy meta data for faster lookup
// pMetaStore policystore.LookupInterface
// // policy violation generator
// pvGenerator policyviolation.GeneratorInterface
//}
//
////NewNamespaceController returns a new Controller to manage generation rules
//func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
// client *client.Client,
// nsInformer v1Informer.NamespaceInformer,
// pInformer kyvernoinformer.ClusterPolicyInformer,
// policyStatus policy.PolicyStatusInterface,
// eventGen event.Interface,
// configHandler config.Interface,
// pvGenerator policyviolation.GeneratorInterface,
// pMetaStore policystore.LookupInterface) *NamespaceController {
// //TODO: do we need to event recorder for this controller?
// // create the controller
// nsc := &NamespaceController{
// client: client,
// kyvernoClient: kyvernoClient,
// eventGen: eventGen,
// queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
// configHandler: configHandler,
// pMetaStore: pMetaStore,
// pvGenerator: pvGenerator,
// }
//
// nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
// AddFunc: nsc.addNamespace,
// UpdateFunc: nsc.updateNamespace,
// DeleteFunc: nsc.deleteNamespace,
// })
//
// pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
// AddFunc: nsc.addPolicy,
// UpdateFunc: nsc.updatePolicy,
// })
//
// nsc.enqueueNs = nsc.enqueue
// nsc.syncHandler = nsc.syncNamespace
//
// nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
// nsc.nsSynced = nsInformer.Informer().HasSynced
// nsc.pLister = pInformer.Lister()
// nsc.pSynced = pInformer.Informer().HasSynced
// nsc.policyStatus = policyStatus
//
// // resource manager
// // rebuild after 300 seconds/ 5 mins
// nsc.rm = NewResourceManager(300)
//
// return nsc
//}
//func (nsc *NamespaceController) addPolicy(obj interface{}) {
// p := obj.(*kyverno.ClusterPolicy)
// // check if the policy has generate rule
// if generateRuleExists(p) {
// // process policy
// nsc.processPolicy(p)
// }
//}
//
//func (nsc *NamespaceController) updatePolicy(old, cur interface{}) {
// curP := cur.(*kyverno.ClusterPolicy)
// // check if the policy has generate rule
// if generateRuleExists(curP) {
// // process policy
// nsc.processPolicy(curP)
// }
//}
//
//func (nsc *NamespaceController) addNamespace(obj interface{}) {
// ns := obj.(*v1.Namespace)
// glog.V(4).Infof("Adding Namespace %s", ns.Name)
// nsc.enqueueNs(ns)
//}
//
//func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
// oldNs := old.(*v1.Namespace)
// curNs := cur.(*v1.Namespace)
// if curNs.ResourceVersion == oldNs.ResourceVersion {
// // Periodic resync will send update events for all known Namespace.
// // Two different versions of the same replica set will always have different RVs.
// return
// }
// glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
// //TODO: anything to be done here?
//}
//
//func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
// ns, _ := obj.(*v1.Namespace)
// glog.V(4).Infof("Deleting Namespace %s", ns.Name)
// //TODO: anything to be done here?
//}
//
//func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
// key, err := cache.MetaNamespaceKeyFunc(ns)
// if err != nil {
// glog.Error(err)
// return
// }
// nsc.queue.Add(key)
//}
//
////Run to run the controller
//func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
// defer utilruntime.HandleCrash()
// defer nsc.queue.ShutDown()
//
// glog.Info("Starting namespace controller")
// defer glog.Info("Shutting down namespace controller")
//
// if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
// glog.Error("namespace generator: failed to sync cache")
// return
// }
//
// for i := 0; i < workers; i++ {
// go wait.Until(nsc.worker, time.Second, stopCh)
// }
// <-stopCh
//}
//
//// worker runs a worker thread that just dequeues items, processes them, and marks them done.
//// It enforces that the syncHandler is never invoked concurrently with the same key.
//func (nsc *NamespaceController) worker() {
// for nsc.processNextWorkItem() {
// }
//}
//
//func (nsc *NamespaceController) processNextWorkItem() bool {
// key, quit := nsc.queue.Get()
// if quit {
// return false
// }
// defer nsc.queue.Done(key)
//
// err := nsc.syncHandler(key.(string))
// nsc.handleErr(err, key)
//
// return true
//}
//
//func (nsc *NamespaceController) handleErr(err error, key interface{}) {
// if err == nil {
// nsc.queue.Forget(key)
// return
// }
//
// if nsc.queue.NumRequeues(key) < maxRetries {
// glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
// nsc.queue.AddRateLimited(key)
// return
// }
//
// utilruntime.HandleError(err)
// glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
// nsc.queue.Forget(key)
//}
//
//func (nsc *NamespaceController) syncNamespace(key string) error {
// startTime := time.Now()
// glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
// defer func() {
// glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
// }()
// namespace, err := nsc.nsLister.GetResource(key)
// if errors.IsNotFound(err) {
// glog.V(2).Infof("namespace %v has been deleted", key)
// return nil
// }
// if err != nil {
// return err
// }
// // Deep-copy otherwise we are mutating our cache.
// // TODO: Deep-copy only when needed.
// n := namespace.DeepCopy()
//
// // skip processing namespace if its been filtered
// // exclude the filtered resources
// if nsc.configHandler.ToFilter("", namespace.Name, "") {
// //TODO: improve the text
// glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
// return nil
// }
//
// // process generate rules
// engineResponses := nsc.processNamespace(*n)
// // report errors
// nsc.report(engineResponses)
// return nil
//}

View file

@ -1,252 +1,234 @@
package namespace
import (
"sync"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
policyctr "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/utils"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
type resourceManager interface {
ProcessResource(policy, pv, kind, ns, name, rv string) bool
//TODO removeResource(kind, ns, name string) error
RegisterResource(policy, pv, kind, ns, name, rv string)
// reload
Drop()
}
// ResourceManager stores the details on already processed resources for caching
type ResourceManager struct {
// we drop and re-build the cache
// based on the memory consumer of by the map
data map[string]interface{}
mux sync.RWMutex
time time.Time
rebuildTime int64 // after how many seconds should we rebuild the cache
}
//NewResourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) *ResourceManager {
rm := ResourceManager{
data: make(map[string]interface{}),
time: time.Now(),
rebuildTime: rebuildTime,
}
// set time it was built
return &rm
}
var empty struct{}
//RegisterResource stores if the policy is processed on this resource version
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
rm.mux.Lock()
defer rm.mux.Unlock()
// add the resource
key := buildKey(policy, pv, kind, ns, name, rv)
rm.data[key] = empty
}
//ProcessResource returns true if the policy was not applied on the resource
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
rm.mux.RLock()
defer rm.mux.RUnlock()
key := buildKey(policy, pv, kind, ns, name, rv)
_, ok := rm.data[key]
return !ok
}
//Drop drop the cache after every rebuild interval mins
//TODO: or drop based on the size
func (rm *ResourceManager) Drop() {
timeSince := time.Since(rm.time)
glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.data = map[string]interface{}{}
rm.time = time.Now()
glog.V(4).Infof("dropping cache at time %v", rm.time)
}
}
func buildKey(policy, pv, kind, ns, name, rv string) string {
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
}
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []response.EngineResponse {
// convert to unstructured
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
if err != nil {
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
return nil
}
nsc.rm.Drop()
ns := unstructured.Unstructured{Object: unstr}
// get all the policies that have a generate rule and resource description satisfies the namespace
// apply policy on resource
policies := listpolicies(ns, nsc.pMetaStore)
var engineResponses []response.EngineResponse
for _, policy := range policies {
// pre-processing, check if the policy and resource version has been processed before
if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
continue
}
engineResponse := applyPolicy(nsc.client, ns, policy, nsc.policyStatus)
engineResponses = append(engineResponses, engineResponse)
// post-processing, register the resource as processed
nsc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
}
return engineResponses
}
func generateRuleExists(policy *kyverno.ClusterPolicy) bool {
for _, rule := range policy.Spec.Rules {
if rule.Generation != (kyverno.Generation{}) {
return true
}
}
return false
}
func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
filteredNamespaces := []*corev1.Namespace{}
// get namespaces that policy applies on
namespaces, err := nsc.nsLister.ListResources(labels.NewSelector())
if err != nil {
glog.Errorf("failed to get list namespaces: %v", err)
return
}
for _, namespace := range namespaces {
// convert to unstructured
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(namespace)
if err != nil {
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
continue
}
ns := unstructured.Unstructured{Object: unstr}
for _, rule := range policy.Spec.Rules {
if rule.Generation == (kyverno.Generation{}) {
continue
}
ok := engine.MatchesResourceDescription(ns, rule)
if !ok {
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
continue
}
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
filteredNamespaces = append(filteredNamespaces, namespace)
}
}
// list of namespaces that the policy applies on
for _, ns := range filteredNamespaces {
glog.V(4).Infof("policy %s with generate rule: namespace %s to be processed ", policy.Name, ns.Name)
nsc.addNamespace(ns)
}
}
func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
var filteredpolicies []kyverno.ClusterPolicy
glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
policies, err := pMetaStore.LookUp(ns.GetKind(), ns.GetNamespace())
if err != nil {
glog.Errorf("failed to get list policies: %v", err)
return nil
}
for _, policy := range policies {
for _, rule := range policy.Spec.Rules {
if rule.Generation == (kyverno.Generation{}) {
continue
}
ok := engine.MatchesResourceDescription(ns, rule)
if !ok {
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
continue
}
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
filteredpolicies = append(filteredpolicies, policy)
}
}
return filteredpolicies
}
func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) response.EngineResponse {
var policyStats []policyctr.PolicyStat
// gather stats from the engine response
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
ps := policyctr.PolicyStat{}
ps.PolicyName = policyName
ps.Stats.GenerationExecutionTime = policyResponse.ProcessingTime
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
// capture rule level stats
for _, rule := range policyResponse.Rules {
rs := policyctr.RuleStatinfo{}
rs.RuleName = rule.Name
rs.ExecutionTime = rule.RuleStats.ProcessingTime
if rule.Success {
rs.RuleAppliedCount++
} else {
rs.RulesFailedCount++
}
ps.Stats.Rules = append(ps.Stats.Rules, rs)
}
policyStats = append(policyStats, ps)
}
// send stats for aggregation
sendStat := func(blocked bool) {
for _, stat := range policyStats {
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
//SEND
policyStatus.SendStat(stat)
}
}
startTime := time.Now()
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
defer func() {
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
}()
// build context
ctx := context.NewContext()
ctx.AddResource(transformResource(resource))
policyContext := engine.PolicyContext{
NewResource: resource,
Policy: p,
Client: client,
Context: ctx,
}
engineResponse := engine.Generate(policyContext)
// gather stats
gatherStat(p.Name, engineResponse.PolicyResponse)
//send stats
sendStat(false)
return engineResponse
}
func transformResource(resource unstructured.Unstructured) []byte {
data, err := resource.MarshalJSON()
if err != nil {
glog.Errorf("failed to marshall resource %v: %v", resource, err)
return nil
}
return data
}
//
//type resourceManager interface {
// ProcessResource(policy, pv, kind, ns, name, rv string) bool
// //TODO removeResource(kind, ns, name string) error
// RegisterResource(policy, pv, kind, ns, name, rv string)
// // reload
// Drop()
//}
//
//// ResourceManager stores the details on already processed resources for caching
//type ResourceManager struct {
// // we drop and re-build the cache
// // based on the memory consumer of by the map
// data map[string]interface{}
// mux sync.RWMutex
// time time.Time
// rebuildTime int64 // after how many seconds should we rebuild the cache
//}
//
////NewResourceManager returns a new ResourceManager
//func NewResourceManager(rebuildTime int64) *ResourceManager {
// rm := ResourceManager{
// data: make(map[string]interface{}),
// time: time.Now(),
// rebuildTime: rebuildTime,
// }
// // set time it was built
// return &rm
//}
//
//var empty struct{}
//
////RegisterResource stores if the policy is processed on this resource version
//func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
// rm.mux.Lock()
// defer rm.mux.Unlock()
// // add the resource
// key := buildKey(policy, pv, kind, ns, name, rv)
// rm.data[key] = empty
//}
//
////ProcessResource returns true if the policy was not applied on the resource
//func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
// rm.mux.RLock()
// defer rm.mux.RUnlock()
//
// key := buildKey(policy, pv, kind, ns, name, rv)
// _, ok := rm.data[key]
// return !ok
//}
//
////Drop drop the cache after every rebuild interval mins
////TODO: or drop based on the size
//func (rm *ResourceManager) Drop() {
// timeSince := time.Since(rm.time)
// glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
// glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
// if timeSince > time.Duration(rm.rebuildTime)*time.Second {
// rm.mux.Lock()
// defer rm.mux.Unlock()
// rm.data = map[string]interface{}{}
// rm.time = time.Now()
// glog.V(4).Infof("dropping cache at time %v", rm.time)
// }
//}
//func buildKey(policy, pv, kind, ns, name, rv string) string {
// return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
//}
//
//func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []response.EngineResponse {
// // convert to unstructured
// unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
// if err != nil {
// glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
// return nil
// }
// nsc.rm.Drop()
//
// ns := unstructured.Unstructured{Object: unstr}
//
// // get all the policies that have a generate rule and resource description satisfies the namespace
// // apply policy on resource
// policies := listpolicies(ns, nsc.pMetaStore)
// var engineResponses []response.EngineResponse
// for _, policy := range policies {
// // pre-processing, check if the policy and resource version has been processed before
// if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
// glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
// continue
// }
// engineResponse := applyPolicy(nsc.client, ns, policy, nsc.policyStatus)
// engineResponses = append(engineResponses, engineResponse)
//
// // post-processing, register the resource as processed
// nsc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
// }
// return engineResponses
//}
//
//func generateRuleExists(policy *kyverno.ClusterPolicy) bool {
// for _, rule := range policy.Spec.Rules {
// if rule.Generation != (kyverno.Generation{}) {
// return true
// }
// }
// return false
//}
//
//func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
// filteredNamespaces := []*corev1.Namespace{}
// // get namespaces that policy applies on
// namespaces, err := nsc.nsLister.ListResources(labels.NewSelector())
// if err != nil {
// glog.Errorf("failed to get list namespaces: %v", err)
// return
// }
// for _, namespace := range namespaces {
// // convert to unstructured
// unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(namespace)
// if err != nil {
// glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
// continue
// }
// ns := unstructured.Unstructured{Object: unstr}
// for _, rule := range policy.Spec.Rules {
// if rule.Generation == (kyverno.Generation{}) {
// continue
// }
// ok := engine.MatchesResourceDescription(ns, rule)
// if !ok {
// glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
// continue
// }
// glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
// filteredNamespaces = append(filteredNamespaces, namespace)
// }
// }
// // list of namespaces that the policy applies on
// for _, ns := range filteredNamespaces {
// glog.V(4).Infof("policy %s with generate rule: namespace %s to be processed ", policy.Name, ns.Name)
// nsc.addNamespace(ns)
// }
//}
//
//func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
// var filteredpolicies []kyverno.ClusterPolicy
// glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
// policies, err := pMetaStore.LookUp(ns.GetKind(), ns.GetNamespace())
// if err != nil {
// glog.Errorf("failed to get list policies: %v", err)
// return nil
// }
// for _, policy := range policies {
// for _, rule := range policy.Spec.Rules {
// if rule.Generation == (kyverno.Generation{}) {
// continue
// }
// ok := engine.MatchesResourceDescription(ns, rule)
// if !ok {
// glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
// continue
// }
// glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
// filteredpolicies = append(filteredpolicies, policy)
// }
// }
// return filteredpolicies
//}
//
//func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) response.EngineResponse {
// var policyStats []policyctr.PolicyStat
// // gather stats from the engine response
// gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
// ps := policyctr.PolicyStat{}
// ps.PolicyName = policyName
// ps.Stats.GenerationExecutionTime = policyResponse.ProcessingTime
// ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
// // capture rule level stats
// for _, rule := range policyResponse.Rules {
// rs := policyctr.RuleStatinfo{}
// rs.RuleName = rule.Name
// rs.ExecutionTime = rule.RuleStats.ProcessingTime
// if rule.Success {
// rs.RuleAppliedCount++
// } else {
// rs.RulesFailedCount++
// }
// ps.Stats.Rules = append(ps.Stats.Rules, rs)
// }
// policyStats = append(policyStats, ps)
// }
// // send stats for aggregation
// sendStat := func(blocked bool) {
// for _, stat := range policyStats {
// stat.Stats.ResourceBlocked = utils.Btoi(blocked)
// //SEND
// policyStatus.SendStat(stat)
// }
// }
//
// startTime := time.Now()
// glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
// defer func() {
// glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
// }()
// // build context
// ctx := context.NewContext()
// ctx.AddResource(transformResource(resource))
//
// policyContext := engine.PolicyContext{
// NewResource: resource,
// Policy: p,
// Client: client,
// Context: ctx,
// }
// engineResponse := engine.Generate(policyContext)
// // gather stats
// gatherStat(p.Name, engineResponse.PolicyResponse)
// //send stats
// sendStat(false)
//
// return engineResponse
//}
//
//func transformResource(resource unstructured.Unstructured) []byte {
// data, err := resource.MarshalJSON()
// if err != nil {
// glog.Errorf("failed to marshall resource %v: %v", resource, err)
// return nil
// }
// return data
//}

View file

@ -1,63 +1,54 @@
package namespace
import (
"fmt"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
)
func (nsc *NamespaceController) report(engineResponses []response.EngineResponse) {
// generate events
eventInfos := generateEvents(engineResponses)
nsc.eventGen.Add(eventInfos...)
// generate policy violations
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
nsc.pvGenerator.Add(pvInfos...)
}
func generateEvents(ers []response.EngineResponse) []event.Info {
var eventInfos []event.Info
for _, er := range ers {
if er.IsSuccesful() {
continue
}
eventInfos = append(eventInfos, generateEventsPerEr(er)...)
}
return eventInfos
}
func generateEventsPerEr(er response.EngineResponse) []event.Info {
var eventInfos []event.Info
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
// generate event on resource for each failed rule
glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = er.PolicyResponse.Resource.Kind
e.Namespace = "" // event generate on namespace resource
e.Name = er.PolicyResponse.Resource.Name
e.Reason = "Failure"
e.Source = event.GeneratePolicyController
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' not satisfied. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
eventInfos = append(eventInfos, e)
}
if er.IsSuccesful() {
return eventInfos
}
// generate a event on policy for all failed rules
glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = "ClusterPolicy"
e.Namespace = ""
e.Name = er.PolicyResponse.Policy
e.Reason = "Failure"
e.Source = event.GeneratePolicyController
e.Message = fmt.Sprintf("policy '%s' rules '%v' on resource '%s/%s/%s' not stasified", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
return eventInfos
}
//func (nsc *NamespaceController) report(engineResponses []response.EngineResponse) {
// // generate events
// eventInfos := generateEvents(engineResponses)
// nsc.eventGen.Add(eventInfos...)
// // generate policy violations
// pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
// nsc.pvGenerator.Add(pvInfos...)
//}
//
//func generateEvents(ers []response.EngineResponse) []event.Info {
// var eventInfos []event.Info
// for _, er := range ers {
// if er.IsSuccesful() {
// continue
// }
// eventInfos = append(eventInfos, generateEventsPerEr(er)...)
// }
// return eventInfos
//}
//
//func generateEventsPerEr(er response.EngineResponse) []event.Info {
// var eventInfos []event.Info
// glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
// for _, rule := range er.PolicyResponse.Rules {
// if rule.Success {
// continue
// }
// // generate event on resource for each failed rule
// glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
// e := event.Info{}
// e.Kind = er.PolicyResponse.Resource.Kind
// e.Namespace = "" // event generate on namespace resource
// e.Name = er.PolicyResponse.Resource.Name
// e.Reason = "Failure"
// e.Source = event.GeneratePolicyController
// e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' not satisfied. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
// eventInfos = append(eventInfos, e)
// }
// if er.IsSuccesful() {
// return eventInfos
// }
// // generate a event on policy for all failed rules
// glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
// e := event.Info{}
// e.Kind = "ClusterPolicy"
// e.Namespace = ""
// e.Name = er.PolicyResponse.Policy
// e.Reason = "Failure"
// e.Source = event.GeneratePolicyController
// e.Message = fmt.Sprintf("policy '%s' rules '%v' on resource '%s/%s/%s' not stasified", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
// return eventInfos
//}