mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
644 updating changes with revised understanding of issue, also removed alot of deadcode to make changes
This commit is contained in:
parent
3b37a61f5d
commit
0d4b256d13
8 changed files with 525 additions and 586 deletions
|
@ -6,7 +6,6 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/rbac"
|
|
||||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/utils"
|
"github.com/nirmata/kyverno/pkg/engine/utils"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/variables"
|
"github.com/nirmata/kyverno/pkg/engine/variables"
|
||||||
|
@ -29,10 +28,7 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
|
||||||
if !rule.HasGenerate() {
|
if !rule.HasGenerate() {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
|
if !MatchesResourceDescription(resource, rule, admissionInfo) {
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if !MatchesResourceDescription(resource, rule) {
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/mutate"
|
"github.com/nirmata/kyverno/pkg/engine/mutate"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/rbac"
|
|
||||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/utils"
|
"github.com/nirmata/kyverno/pkg/engine/utils"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/variables"
|
"github.com/nirmata/kyverno/pkg/engine/variables"
|
||||||
|
@ -57,17 +56,12 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
|
||||||
}
|
}
|
||||||
|
|
||||||
startTime := time.Now()
|
startTime := time.Now()
|
||||||
if !rbac.MatchAdmissionInfo(rule, policyContext.AdmissionInfo) {
|
|
||||||
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
|
|
||||||
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), policyContext.AdmissionInfo)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
glog.V(4).Infof("Time: Mutate matchAdmissionInfo %v", time.Since(startTime))
|
glog.V(4).Infof("Time: Mutate matchAdmissionInfo %v", time.Since(startTime))
|
||||||
|
|
||||||
// check if the resource satisfies the filter conditions defined in the rule
|
// check if the resource satisfies the filter conditions defined in the rule
|
||||||
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
|
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
|
||||||
// dont statisfy a policy rule resource description
|
// dont statisfy a policy rule resource description
|
||||||
ok := MatchesResourceDescription(resource, rule)
|
ok := MatchesResourceDescription(resource, rule, policyContext.AdmissionInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
|
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -5,6 +5,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/nirmata/kyverno/pkg/engine/rbac"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
|
||||||
"github.com/minio/minio/pkg/wildcard"
|
"github.com/minio/minio/pkg/wildcard"
|
||||||
|
@ -27,10 +29,16 @@ type EngineStats struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
//MatchesResourceDescription checks if the resource matches resource desription of the rule or not
|
//MatchesResourceDescription checks if the resource matches resource desription of the rule or not
|
||||||
func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno.Rule) bool {
|
func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno.Rule, admissionInfo kyverno.RequestInfo) bool {
|
||||||
matches := rule.MatchResources.ResourceDescription
|
matches := rule.MatchResources.ResourceDescription
|
||||||
exclude := rule.ExcludeResources.ResourceDescription
|
exclude := rule.ExcludeResources.ResourceDescription
|
||||||
|
|
||||||
|
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
|
||||||
|
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
|
||||||
|
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), admissionInfo)
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
if !findKind(matches.Kinds, resource.GetKind()) {
|
if !findKind(matches.Kinds, resource.GetKind()) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
|
@ -67,7 +67,7 @@ func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
|
||||||
}
|
}
|
||||||
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
||||||
|
|
||||||
assert.Assert(t, MatchesResourceDescription(*resource, rule))
|
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match resource name
|
// Match resource name
|
||||||
|
@ -125,7 +125,7 @@ func TestResourceDescriptionMatch_Name(t *testing.T) {
|
||||||
}
|
}
|
||||||
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
||||||
|
|
||||||
assert.Assert(t, MatchesResourceDescription(*resource, rule))
|
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match resource regex
|
// Match resource regex
|
||||||
|
@ -183,7 +183,7 @@ func TestResourceDescriptionMatch_Name_Regex(t *testing.T) {
|
||||||
}
|
}
|
||||||
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
||||||
|
|
||||||
assert.Assert(t, MatchesResourceDescription(*resource, rule))
|
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match expressions for labels to not match
|
// Match expressions for labels to not match
|
||||||
|
@ -249,7 +249,7 @@ func TestResourceDescriptionMatch_Label_Expression_NotMatch(t *testing.T) {
|
||||||
}
|
}
|
||||||
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
||||||
|
|
||||||
assert.Assert(t, MatchesResourceDescription(*resource, rule))
|
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Match label expression in matching set
|
// Match label expression in matching set
|
||||||
|
@ -316,7 +316,7 @@ func TestResourceDescriptionMatch_Label_Expression_Match(t *testing.T) {
|
||||||
}
|
}
|
||||||
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
|
||||||
|
|
||||||
assert.Assert(t, MatchesResourceDescription(*resource, rule))
|
assert.Assert(t, MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
// check for exclude conditions
|
// check for exclude conditions
|
||||||
|
@ -394,7 +394,7 @@ func TestResourceDescriptionExclude_Label_Expression_Match(t *testing.T) {
|
||||||
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription},
|
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription},
|
||||||
ExcludeResources: kyverno.ExcludeResources{ResourceDescription: resourceDescriptionExclude}}
|
ExcludeResources: kyverno.ExcludeResources{ResourceDescription: resourceDescriptionExclude}}
|
||||||
|
|
||||||
assert.Assert(t, !MatchesResourceDescription(*resource, rule))
|
assert.Assert(t, !MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func Test_validateGeneralRuleInfoVariables(t *testing.T) {
|
func Test_validateGeneralRuleInfoVariables(t *testing.T) {
|
||||||
|
|
|
@ -10,7 +10,6 @@ import (
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/rbac"
|
|
||||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/utils"
|
"github.com/nirmata/kyverno/pkg/engine/utils"
|
||||||
"github.com/nirmata/kyverno/pkg/engine/validate"
|
"github.com/nirmata/kyverno/pkg/engine/validate"
|
||||||
|
@ -101,18 +100,12 @@ func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, r
|
||||||
newPathNotPresentRuleResponse(rule.Name, utils.Validation.String(), fmt.Sprintf("path not present: %s", paths)))
|
newPathNotPresentRuleResponse(rule.Name, utils.Validation.String(), fmt.Sprintf("path not present: %s", paths)))
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
|
|
||||||
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
|
|
||||||
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), admissionInfo)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
glog.V(4).Infof("Time: Validate matchAdmissionInfo %v", time.Since(startTime))
|
glog.V(4).Infof("Time: Validate matchAdmissionInfo %v", time.Since(startTime))
|
||||||
|
|
||||||
// check if the resource satisfies the filter conditions defined in the rule
|
// check if the resource satisfies the filter conditions defined in the rule
|
||||||
// TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
|
// TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
|
||||||
// dont statisfy a policy rule resource description
|
// dont statisfy a policy rule resource description
|
||||||
ok := MatchesResourceDescription(resource, rule)
|
ok := MatchesResourceDescription(resource, rule, admissionInfo)
|
||||||
if !ok {
|
if !ok {
|
||||||
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
|
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
|
||||||
continue
|
continue
|
||||||
|
|
|
@ -1,250 +1,225 @@
|
||||||
package namespace
|
package namespace
|
||||||
|
|
||||||
import (
|
//const (
|
||||||
"time"
|
// // maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
|
||||||
|
// maxRetries = 15
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
//)
|
||||||
|
//
|
||||||
"github.com/golang/glog"
|
////NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
//type NamespaceController struct {
|
||||||
"github.com/nirmata/kyverno/pkg/config"
|
// client *client.Client
|
||||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
// kyvernoClient *kyvernoclient.Clientset
|
||||||
"github.com/nirmata/kyverno/pkg/event"
|
// syncHandler func(nsKey string) error
|
||||||
"github.com/nirmata/kyverno/pkg/policy"
|
// enqueueNs func(ns *v1.Namespace)
|
||||||
"github.com/nirmata/kyverno/pkg/policystore"
|
//
|
||||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
// //nsLister provides expansion to the namespace lister to inject GVK for the resource
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
// nsLister NamespaceListerExpansion
|
||||||
|
// // nsSynced returns true if the Namespace store has been synced at least once
|
||||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
// nsSynced cache.InformerSynced
|
||||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
// // pvLister can list/get policy violation from the shared informer's store
|
||||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
// pLister kyvernolister.ClusterPolicyLister
|
||||||
v1 "k8s.io/api/core/v1"
|
// // pSynced retrns true if the Policy store has been synced at least once
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
// pSynced cache.InformerSynced
|
||||||
v1Informer "k8s.io/client-go/informers/core/v1"
|
// // API to send policy stats for aggregation
|
||||||
"k8s.io/client-go/tools/cache"
|
// policyStatus policy.PolicyStatusInterface
|
||||||
"k8s.io/client-go/util/workqueue"
|
// // eventGen provides interface to generate evenets
|
||||||
)
|
// eventGen event.Interface
|
||||||
|
// // Namespaces that need to be synced
|
||||||
const (
|
// queue workqueue.RateLimitingInterface
|
||||||
// maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
|
// // Resource manager, manages the mapping for already processed resource
|
||||||
maxRetries = 15
|
// rm resourceManager
|
||||||
)
|
// // helpers to validate against current loaded configuration
|
||||||
|
// configHandler config.Interface
|
||||||
//NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
|
// // store to hold policy meta data for faster lookup
|
||||||
type NamespaceController struct {
|
// pMetaStore policystore.LookupInterface
|
||||||
client *client.Client
|
// // policy violation generator
|
||||||
kyvernoClient *kyvernoclient.Clientset
|
// pvGenerator policyviolation.GeneratorInterface
|
||||||
syncHandler func(nsKey string) error
|
//}
|
||||||
enqueueNs func(ns *v1.Namespace)
|
//
|
||||||
|
////NewNamespaceController returns a new Controller to manage generation rules
|
||||||
//nsLister provides expansion to the namespace lister to inject GVK for the resource
|
//func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
||||||
nsLister NamespaceListerExpansion
|
// client *client.Client,
|
||||||
// nsSynced returns true if the Namespace store has been synced at least once
|
// nsInformer v1Informer.NamespaceInformer,
|
||||||
nsSynced cache.InformerSynced
|
// pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||||
// pvLister can list/get policy violation from the shared informer's store
|
// policyStatus policy.PolicyStatusInterface,
|
||||||
pLister kyvernolister.ClusterPolicyLister
|
// eventGen event.Interface,
|
||||||
// pSynced retrns true if the Policy store has been synced at least once
|
// configHandler config.Interface,
|
||||||
pSynced cache.InformerSynced
|
// pvGenerator policyviolation.GeneratorInterface,
|
||||||
// API to send policy stats for aggregation
|
// pMetaStore policystore.LookupInterface) *NamespaceController {
|
||||||
policyStatus policy.PolicyStatusInterface
|
// //TODO: do we need to event recorder for this controller?
|
||||||
// eventGen provides interface to generate evenets
|
// // create the controller
|
||||||
eventGen event.Interface
|
// nsc := &NamespaceController{
|
||||||
// Namespaces that need to be synced
|
// client: client,
|
||||||
queue workqueue.RateLimitingInterface
|
// kyvernoClient: kyvernoClient,
|
||||||
// Resource manager, manages the mapping for already processed resource
|
// eventGen: eventGen,
|
||||||
rm resourceManager
|
// queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
||||||
// helpers to validate against current loaded configuration
|
// configHandler: configHandler,
|
||||||
configHandler config.Interface
|
// pMetaStore: pMetaStore,
|
||||||
// store to hold policy meta data for faster lookup
|
// pvGenerator: pvGenerator,
|
||||||
pMetaStore policystore.LookupInterface
|
// }
|
||||||
// policy violation generator
|
//
|
||||||
pvGenerator policyviolation.GeneratorInterface
|
// nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
}
|
// AddFunc: nsc.addNamespace,
|
||||||
|
// UpdateFunc: nsc.updateNamespace,
|
||||||
//NewNamespaceController returns a new Controller to manage generation rules
|
// DeleteFunc: nsc.deleteNamespace,
|
||||||
func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
// })
|
||||||
client *client.Client,
|
//
|
||||||
nsInformer v1Informer.NamespaceInformer,
|
// pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
// AddFunc: nsc.addPolicy,
|
||||||
policyStatus policy.PolicyStatusInterface,
|
// UpdateFunc: nsc.updatePolicy,
|
||||||
eventGen event.Interface,
|
// })
|
||||||
configHandler config.Interface,
|
//
|
||||||
pvGenerator policyviolation.GeneratorInterface,
|
// nsc.enqueueNs = nsc.enqueue
|
||||||
pMetaStore policystore.LookupInterface) *NamespaceController {
|
// nsc.syncHandler = nsc.syncNamespace
|
||||||
//TODO: do we need to event recorder for this controller?
|
//
|
||||||
// create the controller
|
// nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
|
||||||
nsc := &NamespaceController{
|
// nsc.nsSynced = nsInformer.Informer().HasSynced
|
||||||
client: client,
|
// nsc.pLister = pInformer.Lister()
|
||||||
kyvernoClient: kyvernoClient,
|
// nsc.pSynced = pInformer.Informer().HasSynced
|
||||||
eventGen: eventGen,
|
// nsc.policyStatus = policyStatus
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
//
|
||||||
configHandler: configHandler,
|
// // resource manager
|
||||||
pMetaStore: pMetaStore,
|
// // rebuild after 300 seconds/ 5 mins
|
||||||
pvGenerator: pvGenerator,
|
// nsc.rm = NewResourceManager(300)
|
||||||
}
|
//
|
||||||
|
// return nsc
|
||||||
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
//}
|
||||||
AddFunc: nsc.addNamespace,
|
//func (nsc *NamespaceController) addPolicy(obj interface{}) {
|
||||||
UpdateFunc: nsc.updateNamespace,
|
// p := obj.(*kyverno.ClusterPolicy)
|
||||||
DeleteFunc: nsc.deleteNamespace,
|
// // check if the policy has generate rule
|
||||||
})
|
// if generateRuleExists(p) {
|
||||||
|
// // process policy
|
||||||
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
// nsc.processPolicy(p)
|
||||||
AddFunc: nsc.addPolicy,
|
// }
|
||||||
UpdateFunc: nsc.updatePolicy,
|
//}
|
||||||
})
|
//
|
||||||
|
//func (nsc *NamespaceController) updatePolicy(old, cur interface{}) {
|
||||||
nsc.enqueueNs = nsc.enqueue
|
// curP := cur.(*kyverno.ClusterPolicy)
|
||||||
nsc.syncHandler = nsc.syncNamespace
|
// // check if the policy has generate rule
|
||||||
|
// if generateRuleExists(curP) {
|
||||||
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
|
// // process policy
|
||||||
nsc.nsSynced = nsInformer.Informer().HasSynced
|
// nsc.processPolicy(curP)
|
||||||
nsc.pLister = pInformer.Lister()
|
// }
|
||||||
nsc.pSynced = pInformer.Informer().HasSynced
|
//}
|
||||||
nsc.policyStatus = policyStatus
|
//
|
||||||
|
//func (nsc *NamespaceController) addNamespace(obj interface{}) {
|
||||||
// resource manager
|
// ns := obj.(*v1.Namespace)
|
||||||
// rebuild after 300 seconds/ 5 mins
|
// glog.V(4).Infof("Adding Namespace %s", ns.Name)
|
||||||
nsc.rm = NewResourceManager(300)
|
// nsc.enqueueNs(ns)
|
||||||
|
//}
|
||||||
return nsc
|
//
|
||||||
}
|
//func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
|
||||||
func (nsc *NamespaceController) addPolicy(obj interface{}) {
|
// oldNs := old.(*v1.Namespace)
|
||||||
p := obj.(*kyverno.ClusterPolicy)
|
// curNs := cur.(*v1.Namespace)
|
||||||
// check if the policy has generate rule
|
// if curNs.ResourceVersion == oldNs.ResourceVersion {
|
||||||
if generateRuleExists(p) {
|
// // Periodic resync will send update events for all known Namespace.
|
||||||
// process policy
|
// // Two different versions of the same replica set will always have different RVs.
|
||||||
nsc.processPolicy(p)
|
// return
|
||||||
}
|
// }
|
||||||
}
|
// glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
|
||||||
|
// //TODO: anything to be done here?
|
||||||
func (nsc *NamespaceController) updatePolicy(old, cur interface{}) {
|
//}
|
||||||
curP := cur.(*kyverno.ClusterPolicy)
|
//
|
||||||
// check if the policy has generate rule
|
//func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
|
||||||
if generateRuleExists(curP) {
|
// ns, _ := obj.(*v1.Namespace)
|
||||||
// process policy
|
// glog.V(4).Infof("Deleting Namespace %s", ns.Name)
|
||||||
nsc.processPolicy(curP)
|
// //TODO: anything to be done here?
|
||||||
}
|
//}
|
||||||
}
|
//
|
||||||
|
//func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
|
||||||
func (nsc *NamespaceController) addNamespace(obj interface{}) {
|
// key, err := cache.MetaNamespaceKeyFunc(ns)
|
||||||
ns := obj.(*v1.Namespace)
|
// if err != nil {
|
||||||
glog.V(4).Infof("Adding Namespace %s", ns.Name)
|
// glog.Error(err)
|
||||||
nsc.enqueueNs(ns)
|
// return
|
||||||
}
|
// }
|
||||||
|
// nsc.queue.Add(key)
|
||||||
func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
|
//}
|
||||||
oldNs := old.(*v1.Namespace)
|
//
|
||||||
curNs := cur.(*v1.Namespace)
|
////Run to run the controller
|
||||||
if curNs.ResourceVersion == oldNs.ResourceVersion {
|
//func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
// Periodic resync will send update events for all known Namespace.
|
// defer utilruntime.HandleCrash()
|
||||||
// Two different versions of the same replica set will always have different RVs.
|
// defer nsc.queue.ShutDown()
|
||||||
return
|
//
|
||||||
}
|
// glog.Info("Starting namespace controller")
|
||||||
glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
|
// defer glog.Info("Shutting down namespace controller")
|
||||||
//TODO: anything to be done here?
|
//
|
||||||
}
|
// if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
|
||||||
|
// glog.Error("namespace generator: failed to sync cache")
|
||||||
func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
|
// return
|
||||||
ns, _ := obj.(*v1.Namespace)
|
// }
|
||||||
glog.V(4).Infof("Deleting Namespace %s", ns.Name)
|
//
|
||||||
//TODO: anything to be done here?
|
// for i := 0; i < workers; i++ {
|
||||||
}
|
// go wait.Until(nsc.worker, time.Second, stopCh)
|
||||||
|
// }
|
||||||
func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
|
// <-stopCh
|
||||||
key, err := cache.MetaNamespaceKeyFunc(ns)
|
//}
|
||||||
if err != nil {
|
//
|
||||||
glog.Error(err)
|
//// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||||
return
|
//// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||||
}
|
//func (nsc *NamespaceController) worker() {
|
||||||
nsc.queue.Add(key)
|
// for nsc.processNextWorkItem() {
|
||||||
}
|
// }
|
||||||
|
//}
|
||||||
//Run to run the controller
|
//
|
||||||
func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
//func (nsc *NamespaceController) processNextWorkItem() bool {
|
||||||
defer utilruntime.HandleCrash()
|
// key, quit := nsc.queue.Get()
|
||||||
defer nsc.queue.ShutDown()
|
// if quit {
|
||||||
|
// return false
|
||||||
glog.Info("Starting namespace controller")
|
// }
|
||||||
defer glog.Info("Shutting down namespace controller")
|
// defer nsc.queue.Done(key)
|
||||||
|
//
|
||||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
|
// err := nsc.syncHandler(key.(string))
|
||||||
glog.Error("namespace generator: failed to sync cache")
|
// nsc.handleErr(err, key)
|
||||||
return
|
//
|
||||||
}
|
// return true
|
||||||
|
//}
|
||||||
for i := 0; i < workers; i++ {
|
//
|
||||||
go wait.Until(nsc.worker, time.Second, stopCh)
|
//func (nsc *NamespaceController) handleErr(err error, key interface{}) {
|
||||||
}
|
// if err == nil {
|
||||||
<-stopCh
|
// nsc.queue.Forget(key)
|
||||||
}
|
// return
|
||||||
|
// }
|
||||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
//
|
||||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
// if nsc.queue.NumRequeues(key) < maxRetries {
|
||||||
func (nsc *NamespaceController) worker() {
|
// glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
|
||||||
for nsc.processNextWorkItem() {
|
// nsc.queue.AddRateLimited(key)
|
||||||
}
|
// return
|
||||||
}
|
// }
|
||||||
|
//
|
||||||
func (nsc *NamespaceController) processNextWorkItem() bool {
|
// utilruntime.HandleError(err)
|
||||||
key, quit := nsc.queue.Get()
|
// glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
|
||||||
if quit {
|
// nsc.queue.Forget(key)
|
||||||
return false
|
//}
|
||||||
}
|
//
|
||||||
defer nsc.queue.Done(key)
|
//func (nsc *NamespaceController) syncNamespace(key string) error {
|
||||||
|
// startTime := time.Now()
|
||||||
err := nsc.syncHandler(key.(string))
|
// glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
|
||||||
nsc.handleErr(err, key)
|
// defer func() {
|
||||||
|
// glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||||
return true
|
// }()
|
||||||
}
|
// namespace, err := nsc.nsLister.GetResource(key)
|
||||||
|
// if errors.IsNotFound(err) {
|
||||||
func (nsc *NamespaceController) handleErr(err error, key interface{}) {
|
// glog.V(2).Infof("namespace %v has been deleted", key)
|
||||||
if err == nil {
|
// return nil
|
||||||
nsc.queue.Forget(key)
|
// }
|
||||||
return
|
// if err != nil {
|
||||||
}
|
// return err
|
||||||
|
// }
|
||||||
if nsc.queue.NumRequeues(key) < maxRetries {
|
// // Deep-copy otherwise we are mutating our cache.
|
||||||
glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
|
// // TODO: Deep-copy only when needed.
|
||||||
nsc.queue.AddRateLimited(key)
|
// n := namespace.DeepCopy()
|
||||||
return
|
//
|
||||||
}
|
// // skip processing namespace if its been filtered
|
||||||
|
// // exclude the filtered resources
|
||||||
utilruntime.HandleError(err)
|
// if nsc.configHandler.ToFilter("", namespace.Name, "") {
|
||||||
glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
|
// //TODO: improve the text
|
||||||
nsc.queue.Forget(key)
|
// glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
|
||||||
}
|
// return nil
|
||||||
|
// }
|
||||||
func (nsc *NamespaceController) syncNamespace(key string) error {
|
//
|
||||||
startTime := time.Now()
|
// // process generate rules
|
||||||
glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
|
// engineResponses := nsc.processNamespace(*n)
|
||||||
defer func() {
|
// // report errors
|
||||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
// nsc.report(engineResponses)
|
||||||
}()
|
// return nil
|
||||||
namespace, err := nsc.nsLister.GetResource(key)
|
//}
|
||||||
if errors.IsNotFound(err) {
|
|
||||||
glog.V(2).Infof("namespace %v has been deleted", key)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
// Deep-copy otherwise we are mutating our cache.
|
|
||||||
// TODO: Deep-copy only when needed.
|
|
||||||
n := namespace.DeepCopy()
|
|
||||||
|
|
||||||
// skip processing namespace if its been filtered
|
|
||||||
// exclude the filtered resources
|
|
||||||
if nsc.configHandler.ToFilter("", namespace.Name, "") {
|
|
||||||
//TODO: improve the text
|
|
||||||
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// process generate rules
|
|
||||||
engineResponses := nsc.processNamespace(*n)
|
|
||||||
// report errors
|
|
||||||
nsc.report(engineResponses)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,252 +1,234 @@
|
||||||
package namespace
|
package namespace
|
||||||
|
|
||||||
import (
|
//
|
||||||
"sync"
|
//type resourceManager interface {
|
||||||
"time"
|
// ProcessResource(policy, pv, kind, ns, name, rv string) bool
|
||||||
|
// //TODO removeResource(kind, ns, name string) error
|
||||||
"github.com/golang/glog"
|
// RegisterResource(policy, pv, kind, ns, name, rv string)
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
// // reload
|
||||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
// Drop()
|
||||||
"github.com/nirmata/kyverno/pkg/engine"
|
//}
|
||||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
//
|
||||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
//// ResourceManager stores the details on already processed resources for caching
|
||||||
policyctr "github.com/nirmata/kyverno/pkg/policy"
|
//type ResourceManager struct {
|
||||||
"github.com/nirmata/kyverno/pkg/policystore"
|
// // we drop and re-build the cache
|
||||||
"github.com/nirmata/kyverno/pkg/utils"
|
// // based on the memory consumer of by the map
|
||||||
corev1 "k8s.io/api/core/v1"
|
// data map[string]interface{}
|
||||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
// mux sync.RWMutex
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
// time time.Time
|
||||||
"k8s.io/apimachinery/pkg/runtime"
|
// rebuildTime int64 // after how many seconds should we rebuild the cache
|
||||||
)
|
//}
|
||||||
|
//
|
||||||
type resourceManager interface {
|
////NewResourceManager returns a new ResourceManager
|
||||||
ProcessResource(policy, pv, kind, ns, name, rv string) bool
|
//func NewResourceManager(rebuildTime int64) *ResourceManager {
|
||||||
//TODO removeResource(kind, ns, name string) error
|
// rm := ResourceManager{
|
||||||
RegisterResource(policy, pv, kind, ns, name, rv string)
|
// data: make(map[string]interface{}),
|
||||||
// reload
|
// time: time.Now(),
|
||||||
Drop()
|
// rebuildTime: rebuildTime,
|
||||||
}
|
// }
|
||||||
|
// // set time it was built
|
||||||
// ResourceManager stores the details on already processed resources for caching
|
// return &rm
|
||||||
type ResourceManager struct {
|
//}
|
||||||
// we drop and re-build the cache
|
//
|
||||||
// based on the memory consumer of by the map
|
//var empty struct{}
|
||||||
data map[string]interface{}
|
//
|
||||||
mux sync.RWMutex
|
////RegisterResource stores if the policy is processed on this resource version
|
||||||
time time.Time
|
//func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
|
||||||
rebuildTime int64 // after how many seconds should we rebuild the cache
|
// rm.mux.Lock()
|
||||||
}
|
// defer rm.mux.Unlock()
|
||||||
|
// // add the resource
|
||||||
//NewResourceManager returns a new ResourceManager
|
// key := buildKey(policy, pv, kind, ns, name, rv)
|
||||||
func NewResourceManager(rebuildTime int64) *ResourceManager {
|
// rm.data[key] = empty
|
||||||
rm := ResourceManager{
|
//}
|
||||||
data: make(map[string]interface{}),
|
//
|
||||||
time: time.Now(),
|
////ProcessResource returns true if the policy was not applied on the resource
|
||||||
rebuildTime: rebuildTime,
|
//func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
|
||||||
}
|
// rm.mux.RLock()
|
||||||
// set time it was built
|
// defer rm.mux.RUnlock()
|
||||||
return &rm
|
//
|
||||||
}
|
// key := buildKey(policy, pv, kind, ns, name, rv)
|
||||||
|
// _, ok := rm.data[key]
|
||||||
var empty struct{}
|
// return !ok
|
||||||
|
//}
|
||||||
//RegisterResource stores if the policy is processed on this resource version
|
//
|
||||||
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
|
////Drop drop the cache after every rebuild interval mins
|
||||||
rm.mux.Lock()
|
////TODO: or drop based on the size
|
||||||
defer rm.mux.Unlock()
|
//func (rm *ResourceManager) Drop() {
|
||||||
// add the resource
|
// timeSince := time.Since(rm.time)
|
||||||
key := buildKey(policy, pv, kind, ns, name, rv)
|
// glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
|
||||||
rm.data[key] = empty
|
// glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
|
||||||
}
|
// if timeSince > time.Duration(rm.rebuildTime)*time.Second {
|
||||||
|
// rm.mux.Lock()
|
||||||
//ProcessResource returns true if the policy was not applied on the resource
|
// defer rm.mux.Unlock()
|
||||||
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
|
// rm.data = map[string]interface{}{}
|
||||||
rm.mux.RLock()
|
// rm.time = time.Now()
|
||||||
defer rm.mux.RUnlock()
|
// glog.V(4).Infof("dropping cache at time %v", rm.time)
|
||||||
|
// }
|
||||||
key := buildKey(policy, pv, kind, ns, name, rv)
|
//}
|
||||||
_, ok := rm.data[key]
|
//func buildKey(policy, pv, kind, ns, name, rv string) string {
|
||||||
return !ok
|
// return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
|
||||||
}
|
//}
|
||||||
|
//
|
||||||
//Drop drop the cache after every rebuild interval mins
|
//func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []response.EngineResponse {
|
||||||
//TODO: or drop based on the size
|
// // convert to unstructured
|
||||||
func (rm *ResourceManager) Drop() {
|
// unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
|
||||||
timeSince := time.Since(rm.time)
|
// if err != nil {
|
||||||
glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
|
// glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
|
||||||
glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
|
// return nil
|
||||||
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
|
// }
|
||||||
rm.mux.Lock()
|
// nsc.rm.Drop()
|
||||||
defer rm.mux.Unlock()
|
//
|
||||||
rm.data = map[string]interface{}{}
|
// ns := unstructured.Unstructured{Object: unstr}
|
||||||
rm.time = time.Now()
|
//
|
||||||
glog.V(4).Infof("dropping cache at time %v", rm.time)
|
// // get all the policies that have a generate rule and resource description satisfies the namespace
|
||||||
}
|
// // apply policy on resource
|
||||||
}
|
// policies := listpolicies(ns, nsc.pMetaStore)
|
||||||
func buildKey(policy, pv, kind, ns, name, rv string) string {
|
// var engineResponses []response.EngineResponse
|
||||||
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
|
// for _, policy := range policies {
|
||||||
}
|
// // pre-processing, check if the policy and resource version has been processed before
|
||||||
|
// if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
|
||||||
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []response.EngineResponse {
|
// glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
|
||||||
// convert to unstructured
|
// continue
|
||||||
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
|
// }
|
||||||
if err != nil {
|
// engineResponse := applyPolicy(nsc.client, ns, policy, nsc.policyStatus)
|
||||||
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
|
// engineResponses = append(engineResponses, engineResponse)
|
||||||
return nil
|
//
|
||||||
}
|
// // post-processing, register the resource as processed
|
||||||
nsc.rm.Drop()
|
// nsc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
|
||||||
|
// }
|
||||||
ns := unstructured.Unstructured{Object: unstr}
|
// return engineResponses
|
||||||
|
//}
|
||||||
// get all the policies that have a generate rule and resource description satisfies the namespace
|
//
|
||||||
// apply policy on resource
|
//func generateRuleExists(policy *kyverno.ClusterPolicy) bool {
|
||||||
policies := listpolicies(ns, nsc.pMetaStore)
|
// for _, rule := range policy.Spec.Rules {
|
||||||
var engineResponses []response.EngineResponse
|
// if rule.Generation != (kyverno.Generation{}) {
|
||||||
for _, policy := range policies {
|
// return true
|
||||||
// pre-processing, check if the policy and resource version has been processed before
|
// }
|
||||||
if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
|
// }
|
||||||
glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
|
// return false
|
||||||
continue
|
//}
|
||||||
}
|
//
|
||||||
engineResponse := applyPolicy(nsc.client, ns, policy, nsc.policyStatus)
|
//func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
|
||||||
engineResponses = append(engineResponses, engineResponse)
|
// filteredNamespaces := []*corev1.Namespace{}
|
||||||
|
// // get namespaces that policy applies on
|
||||||
// post-processing, register the resource as processed
|
// namespaces, err := nsc.nsLister.ListResources(labels.NewSelector())
|
||||||
nsc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
|
// if err != nil {
|
||||||
}
|
// glog.Errorf("failed to get list namespaces: %v", err)
|
||||||
return engineResponses
|
// return
|
||||||
}
|
// }
|
||||||
|
// for _, namespace := range namespaces {
|
||||||
func generateRuleExists(policy *kyverno.ClusterPolicy) bool {
|
// // convert to unstructured
|
||||||
for _, rule := range policy.Spec.Rules {
|
// unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(namespace)
|
||||||
if rule.Generation != (kyverno.Generation{}) {
|
// if err != nil {
|
||||||
return true
|
// glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
|
||||||
}
|
// continue
|
||||||
}
|
// }
|
||||||
return false
|
// ns := unstructured.Unstructured{Object: unstr}
|
||||||
}
|
// for _, rule := range policy.Spec.Rules {
|
||||||
|
// if rule.Generation == (kyverno.Generation{}) {
|
||||||
func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
|
// continue
|
||||||
filteredNamespaces := []*corev1.Namespace{}
|
// }
|
||||||
// get namespaces that policy applies on
|
// ok := engine.MatchesResourceDescription(ns, rule)
|
||||||
namespaces, err := nsc.nsLister.ListResources(labels.NewSelector())
|
// if !ok {
|
||||||
if err != nil {
|
// glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
||||||
glog.Errorf("failed to get list namespaces: %v", err)
|
// continue
|
||||||
return
|
// }
|
||||||
}
|
// glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
||||||
for _, namespace := range namespaces {
|
// filteredNamespaces = append(filteredNamespaces, namespace)
|
||||||
// convert to unstructured
|
// }
|
||||||
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(namespace)
|
// }
|
||||||
if err != nil {
|
// // list of namespaces that the policy applies on
|
||||||
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
|
// for _, ns := range filteredNamespaces {
|
||||||
continue
|
// glog.V(4).Infof("policy %s with generate rule: namespace %s to be processed ", policy.Name, ns.Name)
|
||||||
}
|
// nsc.addNamespace(ns)
|
||||||
ns := unstructured.Unstructured{Object: unstr}
|
// }
|
||||||
for _, rule := range policy.Spec.Rules {
|
//}
|
||||||
if rule.Generation == (kyverno.Generation{}) {
|
//
|
||||||
continue
|
//func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
|
||||||
}
|
// var filteredpolicies []kyverno.ClusterPolicy
|
||||||
ok := engine.MatchesResourceDescription(ns, rule)
|
// glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
|
||||||
if !ok {
|
// policies, err := pMetaStore.LookUp(ns.GetKind(), ns.GetNamespace())
|
||||||
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
// if err != nil {
|
||||||
continue
|
// glog.Errorf("failed to get list policies: %v", err)
|
||||||
}
|
// return nil
|
||||||
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
// }
|
||||||
filteredNamespaces = append(filteredNamespaces, namespace)
|
// for _, policy := range policies {
|
||||||
}
|
// for _, rule := range policy.Spec.Rules {
|
||||||
}
|
// if rule.Generation == (kyverno.Generation{}) {
|
||||||
// list of namespaces that the policy applies on
|
// continue
|
||||||
for _, ns := range filteredNamespaces {
|
// }
|
||||||
glog.V(4).Infof("policy %s with generate rule: namespace %s to be processed ", policy.Name, ns.Name)
|
// ok := engine.MatchesResourceDescription(ns, rule)
|
||||||
nsc.addNamespace(ns)
|
// if !ok {
|
||||||
}
|
// glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
||||||
}
|
// continue
|
||||||
|
// }
|
||||||
func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
|
// glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
||||||
var filteredpolicies []kyverno.ClusterPolicy
|
// filteredpolicies = append(filteredpolicies, policy)
|
||||||
glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
|
// }
|
||||||
policies, err := pMetaStore.LookUp(ns.GetKind(), ns.GetNamespace())
|
// }
|
||||||
if err != nil {
|
// return filteredpolicies
|
||||||
glog.Errorf("failed to get list policies: %v", err)
|
//}
|
||||||
return nil
|
//
|
||||||
}
|
//func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) response.EngineResponse {
|
||||||
for _, policy := range policies {
|
// var policyStats []policyctr.PolicyStat
|
||||||
for _, rule := range policy.Spec.Rules {
|
// // gather stats from the engine response
|
||||||
if rule.Generation == (kyverno.Generation{}) {
|
// gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||||
continue
|
// ps := policyctr.PolicyStat{}
|
||||||
}
|
// ps.PolicyName = policyName
|
||||||
ok := engine.MatchesResourceDescription(ns, rule)
|
// ps.Stats.GenerationExecutionTime = policyResponse.ProcessingTime
|
||||||
if !ok {
|
// ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||||
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
// // capture rule level stats
|
||||||
continue
|
// for _, rule := range policyResponse.Rules {
|
||||||
}
|
// rs := policyctr.RuleStatinfo{}
|
||||||
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
|
// rs.RuleName = rule.Name
|
||||||
filteredpolicies = append(filteredpolicies, policy)
|
// rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||||
}
|
// if rule.Success {
|
||||||
}
|
// rs.RuleAppliedCount++
|
||||||
return filteredpolicies
|
// } else {
|
||||||
}
|
// rs.RulesFailedCount++
|
||||||
|
// }
|
||||||
func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) response.EngineResponse {
|
// ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||||
var policyStats []policyctr.PolicyStat
|
// }
|
||||||
// gather stats from the engine response
|
// policyStats = append(policyStats, ps)
|
||||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
// }
|
||||||
ps := policyctr.PolicyStat{}
|
// // send stats for aggregation
|
||||||
ps.PolicyName = policyName
|
// sendStat := func(blocked bool) {
|
||||||
ps.Stats.GenerationExecutionTime = policyResponse.ProcessingTime
|
// for _, stat := range policyStats {
|
||||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
// stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||||
// capture rule level stats
|
// //SEND
|
||||||
for _, rule := range policyResponse.Rules {
|
// policyStatus.SendStat(stat)
|
||||||
rs := policyctr.RuleStatinfo{}
|
// }
|
||||||
rs.RuleName = rule.Name
|
// }
|
||||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
//
|
||||||
if rule.Success {
|
// startTime := time.Now()
|
||||||
rs.RuleAppliedCount++
|
// glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
|
||||||
} else {
|
// defer func() {
|
||||||
rs.RulesFailedCount++
|
// glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
|
||||||
}
|
// }()
|
||||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
// // build context
|
||||||
}
|
// ctx := context.NewContext()
|
||||||
policyStats = append(policyStats, ps)
|
// ctx.AddResource(transformResource(resource))
|
||||||
}
|
//
|
||||||
// send stats for aggregation
|
// policyContext := engine.PolicyContext{
|
||||||
sendStat := func(blocked bool) {
|
// NewResource: resource,
|
||||||
for _, stat := range policyStats {
|
// Policy: p,
|
||||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
// Client: client,
|
||||||
//SEND
|
// Context: ctx,
|
||||||
policyStatus.SendStat(stat)
|
// }
|
||||||
}
|
// engineResponse := engine.Generate(policyContext)
|
||||||
}
|
// // gather stats
|
||||||
|
// gatherStat(p.Name, engineResponse.PolicyResponse)
|
||||||
startTime := time.Now()
|
// //send stats
|
||||||
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
|
// sendStat(false)
|
||||||
defer func() {
|
//
|
||||||
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
|
// return engineResponse
|
||||||
}()
|
//}
|
||||||
// build context
|
//
|
||||||
ctx := context.NewContext()
|
//func transformResource(resource unstructured.Unstructured) []byte {
|
||||||
ctx.AddResource(transformResource(resource))
|
// data, err := resource.MarshalJSON()
|
||||||
|
// if err != nil {
|
||||||
policyContext := engine.PolicyContext{
|
// glog.Errorf("failed to marshall resource %v: %v", resource, err)
|
||||||
NewResource: resource,
|
// return nil
|
||||||
Policy: p,
|
// }
|
||||||
Client: client,
|
// return data
|
||||||
Context: ctx,
|
//}
|
||||||
}
|
|
||||||
engineResponse := engine.Generate(policyContext)
|
|
||||||
// gather stats
|
|
||||||
gatherStat(p.Name, engineResponse.PolicyResponse)
|
|
||||||
//send stats
|
|
||||||
sendStat(false)
|
|
||||||
|
|
||||||
return engineResponse
|
|
||||||
}
|
|
||||||
|
|
||||||
func transformResource(resource unstructured.Unstructured) []byte {
|
|
||||||
data, err := resource.MarshalJSON()
|
|
||||||
if err != nil {
|
|
||||||
glog.Errorf("failed to marshall resource %v: %v", resource, err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
return data
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,63 +1,54 @@
|
||||||
package namespace
|
package namespace
|
||||||
|
|
||||||
import (
|
//func (nsc *NamespaceController) report(engineResponses []response.EngineResponse) {
|
||||||
"fmt"
|
// // generate events
|
||||||
|
// eventInfos := generateEvents(engineResponses)
|
||||||
"github.com/golang/glog"
|
// nsc.eventGen.Add(eventInfos...)
|
||||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
// // generate policy violations
|
||||||
"github.com/nirmata/kyverno/pkg/event"
|
// pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
|
||||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
// nsc.pvGenerator.Add(pvInfos...)
|
||||||
)
|
//}
|
||||||
|
//
|
||||||
func (nsc *NamespaceController) report(engineResponses []response.EngineResponse) {
|
//func generateEvents(ers []response.EngineResponse) []event.Info {
|
||||||
// generate events
|
// var eventInfos []event.Info
|
||||||
eventInfos := generateEvents(engineResponses)
|
// for _, er := range ers {
|
||||||
nsc.eventGen.Add(eventInfos...)
|
// if er.IsSuccesful() {
|
||||||
// generate policy violations
|
// continue
|
||||||
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
|
// }
|
||||||
nsc.pvGenerator.Add(pvInfos...)
|
// eventInfos = append(eventInfos, generateEventsPerEr(er)...)
|
||||||
}
|
// }
|
||||||
|
// return eventInfos
|
||||||
func generateEvents(ers []response.EngineResponse) []event.Info {
|
//}
|
||||||
var eventInfos []event.Info
|
//
|
||||||
for _, er := range ers {
|
//func generateEventsPerEr(er response.EngineResponse) []event.Info {
|
||||||
if er.IsSuccesful() {
|
// var eventInfos []event.Info
|
||||||
continue
|
// glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||||
}
|
// for _, rule := range er.PolicyResponse.Rules {
|
||||||
eventInfos = append(eventInfos, generateEventsPerEr(er)...)
|
// if rule.Success {
|
||||||
}
|
// continue
|
||||||
return eventInfos
|
// }
|
||||||
}
|
// // generate event on resource for each failed rule
|
||||||
|
// glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
|
||||||
func generateEventsPerEr(er response.EngineResponse) []event.Info {
|
// e := event.Info{}
|
||||||
var eventInfos []event.Info
|
// e.Kind = er.PolicyResponse.Resource.Kind
|
||||||
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
// e.Namespace = "" // event generate on namespace resource
|
||||||
for _, rule := range er.PolicyResponse.Rules {
|
// e.Name = er.PolicyResponse.Resource.Name
|
||||||
if rule.Success {
|
// e.Reason = "Failure"
|
||||||
continue
|
// e.Source = event.GeneratePolicyController
|
||||||
}
|
// e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' not satisfied. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
|
||||||
// generate event on resource for each failed rule
|
// eventInfos = append(eventInfos, e)
|
||||||
glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
|
// }
|
||||||
e := event.Info{}
|
// if er.IsSuccesful() {
|
||||||
e.Kind = er.PolicyResponse.Resource.Kind
|
// return eventInfos
|
||||||
e.Namespace = "" // event generate on namespace resource
|
// }
|
||||||
e.Name = er.PolicyResponse.Resource.Name
|
// // generate a event on policy for all failed rules
|
||||||
e.Reason = "Failure"
|
// glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
|
||||||
e.Source = event.GeneratePolicyController
|
// e := event.Info{}
|
||||||
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' not satisfied. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
|
// e.Kind = "ClusterPolicy"
|
||||||
eventInfos = append(eventInfos, e)
|
// e.Namespace = ""
|
||||||
}
|
// e.Name = er.PolicyResponse.Policy
|
||||||
if er.IsSuccesful() {
|
// e.Reason = "Failure"
|
||||||
return eventInfos
|
// e.Source = event.GeneratePolicyController
|
||||||
}
|
// e.Message = fmt.Sprintf("policy '%s' rules '%v' on resource '%s/%s/%s' not stasified", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||||
// generate a event on policy for all failed rules
|
// return eventInfos
|
||||||
glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
|
//}
|
||||||
e := event.Info{}
|
|
||||||
e.Kind = "ClusterPolicy"
|
|
||||||
e.Namespace = ""
|
|
||||||
e.Name = er.PolicyResponse.Policy
|
|
||||||
e.Reason = "Failure"
|
|
||||||
e.Source = event.GeneratePolicyController
|
|
||||||
e.Message = fmt.Sprintf("policy '%s' rules '%v' on resource '%s/%s/%s' not stasified", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
|
||||||
return eventInfos
|
|
||||||
}
|
|
||||||
|
|
Loading…
Add table
Reference in a new issue