mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 10:28:36 +00:00
namespace controller redesign
This commit is contained in:
parent
aed0ed0dc1
commit
05e1f128c7
11 changed files with 659 additions and 383 deletions
18
main.go
18
main.go
|
@ -9,9 +9,12 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/namespace"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/sample-controller/pkg/signals"
|
||||
)
|
||||
|
||||
|
@ -73,6 +76,19 @@ func main() {
|
|||
glog.Fatalf("error creating policy violation controller: %v\n", err)
|
||||
}
|
||||
|
||||
// NAMESPACE INFORMER
|
||||
// watches namespace resource
|
||||
// - cache resync time: 30 seconds
|
||||
kubeClient, err := utils.NewKubeClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating kubernetes client: %v\n", err)
|
||||
}
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 30)
|
||||
|
||||
// GENERATE CONTROLLER
|
||||
// - watches for Namespace resource and generates resource based on the policy generate rule
|
||||
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().Policies(), pInformer.Kyverno().V1alpha1().PolicyViolations(), egen)
|
||||
|
||||
tlsPair, err := initTLSPemPair(clientConfig, client)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
|
@ -94,9 +110,11 @@ func main() {
|
|||
}
|
||||
|
||||
pInformer.Start(stopCh)
|
||||
kubeInformer.Start(stopCh)
|
||||
go pc.Run(1, stopCh)
|
||||
go pvc.Run(1, stopCh)
|
||||
go egen.Run(1, stopCh)
|
||||
go nsc.Run(1, stopCh)
|
||||
|
||||
//TODO add WG for the go routines?
|
||||
server.RunAsync()
|
||||
|
|
|
@ -14,22 +14,24 @@ import (
|
|||
)
|
||||
|
||||
//Generate apply generation rules on a resource
|
||||
func Generate(client *client.Client, policy *kyverno.Policy, ns unstructured.Unstructured) []info.RuleInfo {
|
||||
func Generate(client *client.Client, policy kyverno.Policy, ns unstructured.Unstructured) []info.RuleInfo {
|
||||
ris := []info.RuleInfo{}
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if rule.Generation == (kyverno.Generation{}) {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("applying policy %s generate rule %s on resource %s/%s/%s", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName())
|
||||
ri := info.NewRuleInfo(rule.Name, info.Generation)
|
||||
err := applyRuleGenerator(client, ns, rule.Generation)
|
||||
if err != nil {
|
||||
ri.Fail()
|
||||
ri.Addf("Rule %s: Failed to apply rule generator, err %v.", rule.Name, err)
|
||||
ri.Addf("Failed to apply rule generator, err %v.", rule.Name, err)
|
||||
glog.Infof("failed to apply policy %s rule %s on resource %s/%s/%s: %v", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName(), err)
|
||||
} else {
|
||||
ri.Addf("Rule %s: Generation succesfully.", rule.Name)
|
||||
ri.Addf("Generation succesfully.", rule.Name)
|
||||
glog.Infof("succesfully applied policy %s rule %s on resource %s/%s/%s", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName())
|
||||
}
|
||||
ris = append(ris, ri)
|
||||
|
||||
}
|
||||
return ris
|
||||
}
|
||||
|
@ -40,19 +42,24 @@ func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen
|
|||
var rdata map[string]interface{}
|
||||
|
||||
if gen.Data != nil {
|
||||
glog.V(4).Info("generate rule: creates new resource")
|
||||
// 1> Check if resource exists
|
||||
obj, err := client.GetResource(gen.Kind, ns.GetName(), gen.Name)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("generate rule: resource %s/%s/%s already present. checking if it contains the required configuration", gen.Kind, ns.GetName(), gen.Name)
|
||||
// 2> If already exsists, then verify the content is contained
|
||||
// found the resource
|
||||
// check if the rule is create, if yes, then verify if the specified configuration is present in the resource
|
||||
ok, err := checkResource(gen.Data, obj)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("generate rule:: unable to check if configuration %v, is present in resource %s/%s/%s", gen.Data, gen.Kind, ns.GetName(), gen.Name)
|
||||
return err
|
||||
}
|
||||
if !ok {
|
||||
glog.V(4).Infof("generate rule:: configuration %v not present in resource %s/%s/%s", gen.Data, gen.Kind, ns.GetName(), gen.Name)
|
||||
return errors.New("rule configuration not present in resource")
|
||||
}
|
||||
glog.V(4).Infof("generate rule: required configuration %v is present in resource %s/%s/%s", gen.Data, gen.Kind, ns.GetName(), gen.Name)
|
||||
return nil
|
||||
}
|
||||
rdata, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&gen.Data)
|
||||
|
@ -62,16 +69,20 @@ func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen
|
|||
}
|
||||
}
|
||||
if gen.Clone != (kyverno.CloneFrom{}) {
|
||||
glog.V(4).Info("generate rule: clone resource")
|
||||
// 1> Check if resource exists
|
||||
_, err := client.GetResource(gen.Kind, ns.GetName(), gen.Name)
|
||||
if err == nil {
|
||||
glog.V(4).Infof("generate rule: resource %s/%s/%s already present", gen.Kind, ns.GetName(), gen.Name)
|
||||
return nil
|
||||
}
|
||||
// 2> If already exists return
|
||||
// 2> If clone already exists return
|
||||
resource, err = client.GetResource(gen.Kind, gen.Clone.Namespace, gen.Clone.Name)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("generate rule: clone reference resource %s/%s/%s not present: %v", gen.Kind, gen.Kind, gen.Clone.Namespace, gen.Clone.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("generate rule: clone reference resource %s/%s/%s present", gen.Kind, gen.Kind, gen.Clone.Namespace, gen.Clone.Name)
|
||||
rdata = resource.UnstructuredContent()
|
||||
}
|
||||
resource.SetUnstructuredContent(rdata)
|
||||
|
@ -82,11 +93,14 @@ func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen
|
|||
|
||||
_, err = client.CreateResource(gen.Kind, ns.GetName(), resource, false)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("generate rule: unable to create resource %s/%s/%s: %v", gen.Kind, ns.GetKind(), ns.GetNamespace(), ns.GetName(), err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("generate rule: created resource %s/%s/%s", gen.Kind, ns.GetKind(), ns.GetNamespace(), ns.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
//checkResource checks if the config is present in th eresource
|
||||
func checkResource(config interface{}, resource *unstructured.Unstructured) (bool, error) {
|
||||
var err error
|
||||
|
||||
|
|
|
@ -1,160 +0,0 @@
|
|||
package gencontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
v1Informer "k8s.io/client-go/informers/core/v1"
|
||||
v1CoreLister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
//Controller watches the 'Namespace' resource creation/update and applied the generation rules on them
|
||||
type Controller struct {
|
||||
client *client.Client
|
||||
namespaceLister v1CoreLister.NamespaceLister
|
||||
namespaceSynced cache.InformerSynced
|
||||
policyLister policyLister.PolicyLister
|
||||
eventController event.Generator
|
||||
violationBuilder violation.Generator
|
||||
annotationsController annotations.Controller
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
//NewGenController returns a new Controller to manage generation rules
|
||||
func NewGenController(client *client.Client,
|
||||
eventController event.Generator,
|
||||
policyInformer policySharedInformer.PolicyInformer,
|
||||
violationBuilder violation.Generator,
|
||||
namespaceInformer v1Informer.NamespaceInformer,
|
||||
annotationsController annotations.Controller) *Controller {
|
||||
|
||||
// create the controller
|
||||
controller := &Controller{
|
||||
client: client,
|
||||
namespaceLister: namespaceInformer.Lister(),
|
||||
namespaceSynced: namespaceInformer.Informer().HasSynced,
|
||||
policyLister: policyInformer.GetLister(),
|
||||
eventController: eventController,
|
||||
violationBuilder: violationBuilder,
|
||||
annotationsController: annotationsController,
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), wqNamespace),
|
||||
}
|
||||
namespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.createNamespaceHandler,
|
||||
UpdateFunc: controller.updateNamespaceHandler,
|
||||
})
|
||||
|
||||
return controller
|
||||
}
|
||||
func (c *Controller) createNamespaceHandler(resource interface{}) {
|
||||
c.enqueueNamespace(resource)
|
||||
}
|
||||
|
||||
func (c *Controller) updateNamespaceHandler(oldResoruce, newResource interface{}) {
|
||||
// DO we need to anything if the namespace is modified ?
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueNamespace(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
//Run to run the controller
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) error {
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.namespaceSynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
glog.Info("started namespace controller workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
//Stop to stop the controller
|
||||
func (c *Controller) Stop() {
|
||||
defer c.workqueue.ShutDown()
|
||||
glog.Info("shutting down namespace controller workers")
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
err := func(obj interface{}) error {
|
||||
defer c.workqueue.Done(obj)
|
||||
err := c.syncHandler(obj)
|
||||
c.handleErr(err, obj)
|
||||
return nil
|
||||
}(obj)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
c.workqueue.Forget(key)
|
||||
return
|
||||
}
|
||||
if c.workqueue.NumRequeues(key) < wqRetryLimit {
|
||||
glog.Warningf("Error syncing events %v: %v", key, err)
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
c.workqueue.Forget(key)
|
||||
glog.Error(err)
|
||||
glog.Warningf("Dropping the key %q out of the queue: %v", key, err)
|
||||
}
|
||||
|
||||
func (c *Controller) syncHandler(obj interface{}) error {
|
||||
var key string
|
||||
var ok bool
|
||||
if key, ok = obj.(string); !ok {
|
||||
return fmt.Errorf("expected string in workqueue but got %v", obj)
|
||||
}
|
||||
// Namespace is cluster wide resource
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("invalid namespace key: %s", key)
|
||||
return err
|
||||
}
|
||||
// Get Namespace
|
||||
ns, err := c.namespaceLister.Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Errorf("namespace '%s' in work queue no longer exists", key)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
//TODO: need to find a way to store the policy such that we can directly queury the
|
||||
// policies with generation policies
|
||||
// PolicyListerExpansion
|
||||
c.processNamespace(ns)
|
||||
return nil
|
||||
}
|
|
@ -1,155 +0,0 @@
|
|||
package gencontroller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/info"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
func (c *Controller) processNamespace(ns *corev1.Namespace) error {
|
||||
//Get all policies and then verify if the namespace matches any of the defined selectors
|
||||
policies, err := c.listPolicies(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// process policy on namespace
|
||||
for _, p := range policies {
|
||||
c.processPolicy(ns, p)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) listPolicies(ns *corev1.Namespace) ([]*v1alpha1.Policy, error) {
|
||||
var fpolicies []*v1alpha1.Policy
|
||||
policies, err := c.policyLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
glog.Error("Unable to connect to policy controller. Unable to access policies not applying GENERATION rules")
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range policies {
|
||||
// Check if the policy contains a generatoin rule
|
||||
for _, r := range p.Spec.Rules {
|
||||
if r.Generation != nil {
|
||||
// Check if the resource meets the description
|
||||
data, err := json.Marshal(ns)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
continue
|
||||
}
|
||||
// convert types of GVK
|
||||
nsGvk := schema.FromAPIVersionAndKind("v1", "Namespace")
|
||||
// Hardcode as we have a informer on specified gvk
|
||||
gvk := metav1.GroupVersionKind{Group: nsGvk.Group, Kind: nsGvk.Kind, Version: nsGvk.Version}
|
||||
if engine.ResourceMeetsDescription(data, r.MatchResources.ResourceDescription, r.ExcludeResources.ResourceDescription, gvk) {
|
||||
fpolicies = append(fpolicies, p)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fpolicies, nil
|
||||
}
|
||||
|
||||
func (c *Controller) processPolicy(ns *corev1.Namespace, p *v1alpha1.Policy) {
|
||||
var eventInfo *event.Info
|
||||
var onViolation bool
|
||||
var msg string
|
||||
|
||||
policyInfo := info.NewPolicyInfo(p.Name,
|
||||
"Namespace",
|
||||
ns.Name,
|
||||
"",
|
||||
p.Spec.ValidationFailureAction) // Namespace has no namespace..WOW
|
||||
|
||||
// convert to unstructured
|
||||
unstrMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
unstObj := unstructured.Unstructured{Object: unstrMap}
|
||||
ruleInfos := engine.Generate(c.client, p, unstObj)
|
||||
policyInfo.AddRuleInfos(ruleInfos)
|
||||
|
||||
// generate annotations on namespace
|
||||
c.createAnnotations(policyInfo)
|
||||
//TODO generate namespace on created resources
|
||||
|
||||
if !policyInfo.IsSuccessful() {
|
||||
glog.Infof("Failed to apply policy %s on resource %s %s", p.Name, ns.Kind, ns.Name)
|
||||
for _, r := range ruleInfos {
|
||||
glog.Warning(r.Msgs)
|
||||
|
||||
if msg = strings.Join(r.Msgs, " "); strings.Contains(msg, "rule configuration not present in resource") {
|
||||
onViolation = true
|
||||
msg = fmt.Sprintf(`Resource creation violates generate rule '%s' of policy '%s'`, r.Name, policyInfo.Name)
|
||||
}
|
||||
}
|
||||
|
||||
if onViolation {
|
||||
glog.Infof("Adding violation for generation rule of policy %s\n", policyInfo.Name)
|
||||
// Policy Violation
|
||||
v := violation.BuldNewViolation(policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, event.PolicyViolation.String(), policyInfo.FailedRules())
|
||||
c.violationBuilder.Add(v)
|
||||
} else {
|
||||
// Event
|
||||
eventInfo = event.NewEvent(policyKind, "", policyInfo.Name, event.RequestBlocked,
|
||||
event.FPolicyApplyBlockCreate, policyInfo.RName, policyInfo.GetRuleNames(false))
|
||||
|
||||
glog.V(2).Infof("Request blocked event info has prepared for %s/%s\n", policyKind, policyInfo.Name)
|
||||
|
||||
c.eventController.Add(eventInfo)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
glog.Infof("Generation from policy %s has succesfully applied to %s/%s", p.Name, policyInfo.RKind, policyInfo.RName)
|
||||
|
||||
eventInfo = event.NewEvent(policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName,
|
||||
event.PolicyApplied, event.SRulesApply, policyInfo.GetRuleNames(true), policyInfo.Name)
|
||||
|
||||
glog.V(2).Infof("Success event info has prepared for %s/%s\n", policyInfo.RKind, policyInfo.RName)
|
||||
|
||||
c.eventController.Add(eventInfo)
|
||||
}
|
||||
|
||||
func (c *Controller) createAnnotations(pi *info.PolicyInfo) {
|
||||
//get resource
|
||||
obj, err := c.client.GetResource(pi.RKind, pi.RNamespace, pi.RName)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
// add annotation for policy application
|
||||
ann := obj.GetAnnotations()
|
||||
// Generation rules
|
||||
ann, gpatch, err := annotations.AddPolicyJSONPatch(ann, pi, info.Generation)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
if gpatch == nil {
|
||||
// nothing to patch
|
||||
return
|
||||
}
|
||||
// add the anotation to the resource
|
||||
_, err = c.client.PatchResource(pi.RKind, pi.RNamespace, pi.RName, gpatch)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
}
|
|
@ -1,62 +0,0 @@
|
|||
package gencontroller
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
wqNamespace string = "namespace"
|
||||
workerCount int = 1
|
||||
wqRetryLimit int = 5
|
||||
policyKind string = "Policy"
|
||||
)
|
||||
|
||||
func namespaceMeetsRuleDescription(ns *corev1.Namespace, resourceDescription v1alpha1.ResourceDescription) bool {
|
||||
//REWORK Not needed but verify the 'Namespace' is defined in the list of supported kinds
|
||||
if !findKind(resourceDescription.Kinds, "Namespace") {
|
||||
return false
|
||||
}
|
||||
if resourceDescription.Name != nil {
|
||||
if !wildcard.Match(*resourceDescription.Name, ns.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if resourceDescription.Selector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(resourceDescription.Selector)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
labelSet := convertLabelsToLabelSet(ns.Labels)
|
||||
// labels
|
||||
if !selector.Matches(labelSet) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func convertLabelsToLabelSet(labelMap map[string]string) labels.Set {
|
||||
labelSet := make(labels.Set, len(labelMap))
|
||||
// REWORK: check if the below works
|
||||
// if x, ok := labelMap.(labels.Set); !ok {
|
||||
|
||||
// }
|
||||
for k, v := range labelMap {
|
||||
labelSet[k] = v
|
||||
}
|
||||
return labelSet
|
||||
}
|
||||
|
||||
func findKind(kinds []string, kindGVK string) bool {
|
||||
for _, kind := range kinds {
|
||||
if kind == kindGVK {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
199
pkg/namespace/controller.go
Normal file
199
pkg/namespace/controller.go
Normal file
|
@ -0,0 +1,199 @@
|
|||
package namespace
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned"
|
||||
informer "github.com/nirmata/kyverno/pkg/clientNew/informers/externalversions/kyverno/v1alpha1"
|
||||
lister "github.com/nirmata/kyverno/pkg/clientNew/listers/kyverno/v1alpha1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
v1Informer "k8s.io/client-go/informers/core/v1"
|
||||
v1CoreLister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
|
||||
maxRetries = 15
|
||||
)
|
||||
|
||||
//NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
|
||||
type NamespaceController struct {
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
syncHandler func(nsKey string) error
|
||||
enqueueNs func(ns *v1.Namespace)
|
||||
|
||||
// nLsister can list/get namespaces from the shared informer's store
|
||||
nsLister v1CoreLister.NamespaceLister
|
||||
// nsListerSynced returns true if the Namespace store has been synced at least once
|
||||
nsListerSynced cache.InformerSynced
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pLister lister.PolicyLister
|
||||
// pvListerSynced retrns true if the Policy store has been synced at least once
|
||||
pvListerSynced cache.InformerSynced
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pvLister lister.PolicyViolationLister
|
||||
|
||||
// eventGen provides interface to generate evenets
|
||||
eventGen event.Interface
|
||||
// Namespaces that need to be synced
|
||||
queue workqueue.RateLimitingInterface
|
||||
// Resource manager, manages the mapping for already processed resource
|
||||
rm resourceManager
|
||||
}
|
||||
|
||||
//NewNamespaceController returns a new Controller to manage generation rules
|
||||
func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
||||
client *client.Client,
|
||||
nsInformer v1Informer.NamespaceInformer,
|
||||
pInformer informer.PolicyInformer,
|
||||
pvInformer informer.PolicyViolationInformer,
|
||||
eventGen event.Interface) *NamespaceController {
|
||||
//TODO: do we need to event recorder for this controller?
|
||||
// create the controller
|
||||
nsc := &NamespaceController{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
eventGen: eventGen,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
|
||||
}
|
||||
|
||||
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: nsc.addNamespace,
|
||||
UpdateFunc: nsc.updateNamespace,
|
||||
DeleteFunc: nsc.deleteNamespace,
|
||||
})
|
||||
|
||||
nsc.enqueueNs = nsc.enqueue
|
||||
nsc.syncHandler = nsc.syncNamespace
|
||||
|
||||
nsc.nsLister = nsInformer.Lister()
|
||||
nsc.nsListerSynced = nsInformer.Informer().HasSynced
|
||||
nsc.pLister = pInformer.Lister()
|
||||
nsc.pvListerSynced = pInformer.Informer().HasSynced
|
||||
nsc.pvLister = pvInformer.Lister()
|
||||
|
||||
return nsc
|
||||
}
|
||||
func (nsc *NamespaceController) addNamespace(obj interface{}) {
|
||||
ns := obj.(*v1.Namespace)
|
||||
glog.V(4).Infof("Adding Namespace %s", ns.Name)
|
||||
nsc.enqueueNs(ns)
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
|
||||
oldNs := old.(*v1.Namespace)
|
||||
curNs := cur.(*v1.Namespace)
|
||||
if curNs.ResourceVersion == oldNs.ResourceVersion {
|
||||
// Periodic resync will send update events for all known Namespace.
|
||||
// Two different versions of the same replica set will always have different RVs.
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
|
||||
//TODO: anything to be done here?
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
|
||||
ns, _ := obj.(*v1.Namespace)
|
||||
glog.V(4).Infof("Deleting Namespace %s", ns.Name)
|
||||
//TODO: anything to be done here?
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(ns)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
nsc.queue.Add(key)
|
||||
}
|
||||
|
||||
//Run to run the controller
|
||||
func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer nsc.queue.ShutDown()
|
||||
|
||||
glog.Info("Starting namespace controller")
|
||||
defer glog.Info("Shutting down namespace controller")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsListerSynced); !ok {
|
||||
return
|
||||
}
|
||||
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go wait.Until(nsc.worker, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (nsc *NamespaceController) worker() {
|
||||
for nsc.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) processNextWorkItem() bool {
|
||||
key, quit := nsc.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer nsc.queue.Done(key)
|
||||
|
||||
err := nsc.syncHandler(key.(string))
|
||||
nsc.handleErr(err, key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
nsc.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if nsc.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing Namespace %v: %v", key, err)
|
||||
nsc.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
|
||||
nsc.queue.Forget(key)
|
||||
}
|
||||
|
||||
func (nsc *NamespaceController) syncNamespace(key string) error {
|
||||
startTime := time.Now()
|
||||
glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
namespace, err := nsc.nsLister.Get(key)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("Namespace %v has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// Deep-copy otherwise we are mutating our cache.
|
||||
// TODO: Deep-copy only when needed.
|
||||
n := namespace.DeepCopy()
|
||||
|
||||
// process generate rules
|
||||
policyInfos := nsc.processNamespace(*n)
|
||||
// report errors
|
||||
nsc.report(policyInfos)
|
||||
return nil
|
||||
}
|
298
pkg/namespace/generation.go
Normal file
298
pkg/namespace/generation.go
Normal file
|
@ -0,0 +1,298 @@
|
|||
package namespace
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
|
||||
|
||||
lister "github.com/nirmata/kyverno/pkg/clientNew/listers/kyverno/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/info"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
type resourceManager interface {
|
||||
ProcessResource(policy, pv, kind, ns, name, rv string) bool
|
||||
//TODO removeResource(kind, ns, name string) error
|
||||
RegisterResource(policy, pv, kind, ns, name, rv string)
|
||||
// reload
|
||||
Drop()
|
||||
}
|
||||
|
||||
// ResourceManager stores the details on already processed resources for caching
|
||||
type ResourceManager struct {
|
||||
// we drop and re-build the cache
|
||||
// based on the memory consumer of by the map
|
||||
data map[string]interface{}
|
||||
mux sync.RWMutex
|
||||
time time.Time
|
||||
rebuildTime int64 // after how many seconds should we rebuild the cache
|
||||
}
|
||||
|
||||
//NewResourceManager returns a new ResourceManager
|
||||
func NewResourceManager(rebuildTime int64) *ResourceManager {
|
||||
rm := ResourceManager{
|
||||
data: make(map[string]interface{}),
|
||||
time: time.Now(),
|
||||
rebuildTime: rebuildTime,
|
||||
}
|
||||
// set time it was built
|
||||
return &rm
|
||||
}
|
||||
|
||||
var empty struct{}
|
||||
|
||||
//RegisterResource stores if the policy is processed on this resource version
|
||||
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
|
||||
rm.mux.Lock()
|
||||
defer rm.mux.Unlock()
|
||||
// add the resource
|
||||
key := buildKey(policy, pv, kind, ns, name, rv)
|
||||
rm.data[key] = empty
|
||||
}
|
||||
|
||||
//ProcessResource returns true if the policy was not applied on the resource
|
||||
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
|
||||
rm.mux.RLock()
|
||||
defer rm.mux.RUnlock()
|
||||
|
||||
key := buildKey(policy, pv, kind, ns, name, rv)
|
||||
_, ok := rm.data[key]
|
||||
return ok == false
|
||||
}
|
||||
|
||||
func buildKey(policy, pv, kind, ns, name, rv string) string {
|
||||
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
|
||||
}
|
||||
|
||||
// func (nsc *NamespaceController) listPolicies(ns *corev1.Namespace) ([]*v1alpha1.Policy, error) {
|
||||
// var fpolicies []*v1alpha1.Policy
|
||||
// policies, err := c.policyLister.List(labels.NewSelector())
|
||||
// if err != nil {
|
||||
// glog.Error("Unable to connect to policy controller. Unable to access policies not applying GENERATION rules")
|
||||
// return nil, err
|
||||
// }
|
||||
// for _, p := range policies {
|
||||
// // Check if the policy contains a generatoin rule
|
||||
// for _, r := range p.Spec.Rules {
|
||||
// if r.Generation != nil {
|
||||
// // Check if the resource meets the description
|
||||
// data, err := json.Marshal(ns)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// continue
|
||||
// }
|
||||
// // convert types of GVK
|
||||
// nsGvk := schema.FromAPIVersionAndKind("v1", "Namespace")
|
||||
// // Hardcode as we have a informer on specified gvk
|
||||
// gvk := metav1.GroupVersionKind{Group: nsGvk.Group, Kind: nsGvk.Kind, Version: nsGvk.Version}
|
||||
// if engine.ResourceMeetsDescription(data, r.MatchResources.ResourceDescription, r.ExcludeResources.ResourceDescription, gvk) {
|
||||
// fpolicies = append(fpolicies, p)
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// func (nsc *NamespaceController) processNamespace(ns *corev1.Namespace) error {
|
||||
// //Get all policies and then verify if the namespace matches any of the defined selectors
|
||||
// policies, err := c.listPolicies(ns)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// // process policy on namespace
|
||||
// for _, p := range policies {
|
||||
// c.processPolicy(ns, p)
|
||||
// }
|
||||
|
||||
// return nil
|
||||
// }
|
||||
|
||||
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []info.PolicyInfo {
|
||||
var policyInfos []info.PolicyInfo
|
||||
// convert to unstructured
|
||||
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(namespace)
|
||||
if err != nil {
|
||||
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
|
||||
return policyInfos
|
||||
}
|
||||
ns := unstructured.Unstructured{Object: unstr}
|
||||
|
||||
// get all the policies that have a generate rule and apply on the namespace
|
||||
// apply policy on resource
|
||||
|
||||
policies := listpolicies(ns, nsc.pLister)
|
||||
for _, policy := range policies {
|
||||
policyInfo := applyPolicy(nsc.client, ns, *policy)
|
||||
policyInfos = append(policyInfos, policyInfo)
|
||||
}
|
||||
return policyInfos
|
||||
}
|
||||
|
||||
func listpolicies(ns unstructured.Unstructured, pLister lister.PolicyLister) []*kyverno.Policy {
|
||||
var filteredpolicies []*kyverno.Policy
|
||||
glog.V(4).Infof("listing policies that namespace %s", ns.GetName())
|
||||
policies, err := pLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get list policies: %v", err)
|
||||
return nil
|
||||
}
|
||||
for _, policy := range policies {
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if rule.Generation == (kyverno.Generation{}) {
|
||||
continue
|
||||
}
|
||||
ok := engine.MatchesResourceDescription(ns, rule)
|
||||
if !ok {
|
||||
glog.V(4).Infof("namespace %s does not satisfy the resource description for the rule ", ns.GetName())
|
||||
continue
|
||||
}
|
||||
filteredpolicies = append(filteredpolicies, policy)
|
||||
}
|
||||
}
|
||||
|
||||
return filteredpolicies
|
||||
}
|
||||
|
||||
func applyPolicy(client *client.Client, resource unstructured.Unstructured, policy kyverno.Policy) info.PolicyInfo {
|
||||
startTime := time.Now()
|
||||
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
|
||||
}()
|
||||
policyInfo := info.NewPolicyInfo(policy.Name, resource.GetKind(), resource.GetName(), resource.GetNamespace(), policy.Spec.ValidationFailureAction)
|
||||
ruleInfos := engine.Generate(client, policy, resource)
|
||||
policyInfo.AddRuleInfos(ruleInfos)
|
||||
|
||||
return policyInfo
|
||||
}
|
||||
|
||||
// // func (nsc *NamespaceController) listPolicies(ns *corev1.Namespace) ([]*v1alpha1.Policy, error) {
|
||||
// // var fpolicies []*v1alpha1.Policy
|
||||
// // policies, err := c.policyLister.List(labels.NewSelector())
|
||||
// // if err != nil {
|
||||
// // glog.Error("Unable to connect to policy controller. Unable to access policies not applying GENERATION rules")
|
||||
// // return nil, err
|
||||
// // }
|
||||
// // for _, p := range policies {
|
||||
// // // Check if the policy contains a generatoin rule
|
||||
// // for _, r := range p.Spec.Rules {
|
||||
// // if r.Generation != nil {
|
||||
// // // Check if the resource meets the description
|
||||
// // data, err := json.Marshal(ns)
|
||||
// // if err != nil {
|
||||
// // glog.Error(err)
|
||||
// // continue
|
||||
// // }
|
||||
// // // convert types of GVK
|
||||
// // nsGvk := schema.FromAPIVersionAndKind("v1", "Namespace")
|
||||
// // // Hardcode as we have a informer on specified gvk
|
||||
// // gvk := metav1.GroupVersionKind{Group: nsGvk.Group, Kind: nsGvk.Kind, Version: nsGvk.Version}
|
||||
// // if engine.ResourceMeetsDescription(data, r.MatchResources.ResourceDescription, r.ExcludeResources.ResourceDescription, gvk) {
|
||||
// // fpolicies = append(fpolicies, p)
|
||||
// // break
|
||||
// // }
|
||||
// // }
|
||||
// // }
|
||||
// // }
|
||||
|
||||
// // return fpolicies, nil
|
||||
// // }
|
||||
|
||||
// func (nsc *NamespaceController) processPolicy(ns *corev1.Namespace, p *v1alpha1.Policy) {
|
||||
// var eventInfo *event.Info
|
||||
// var onViolation bool
|
||||
// var msg string
|
||||
|
||||
// policyInfo := info.NewPolicyInfo(p.Name,
|
||||
// "Namespace",
|
||||
// ns.Name,
|
||||
// "",
|
||||
// p.Spec.ValidationFailureAction) // Namespace has no namespace..WOW
|
||||
|
||||
// // convert to unstructured
|
||||
// unstrMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// return
|
||||
// }
|
||||
// unstObj := unstructured.Unstructured{Object: unstrMap}
|
||||
// ruleInfos := engine.Generate(c.client, p, unstObj)
|
||||
// policyInfo.AddRuleInfos(ruleInfos)
|
||||
|
||||
// // generate annotations on namespace
|
||||
// c.createAnnotations(policyInfo)
|
||||
// //TODO generate namespace on created resources
|
||||
|
||||
// if !policyInfo.IsSuccessful() {
|
||||
// glog.Infof("Failed to apply policy %s on resource %s %s", p.Name, ns.Kind, ns.Name)
|
||||
// for _, r := range ruleInfos {
|
||||
// glog.Warning(r.Msgs)
|
||||
|
||||
// if msg = strings.Join(r.Msgs, " "); strings.Contains(msg, "rule configuration not present in resource") {
|
||||
// onViolation = true
|
||||
// msg = fmt.Sprintf(`Resource creation violates generate rule '%s' of policy '%s'`, r.Name, policyInfo.Name)
|
||||
// }
|
||||
// }
|
||||
|
||||
// if onViolation {
|
||||
// glog.Infof("Adding violation for generation rule of policy %s\n", policyInfo.Name)
|
||||
// // Policy Violation
|
||||
// v := violation.BuldNewViolation(policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, event.PolicyViolation.String(), policyInfo.FailedRules())
|
||||
// c.violationBuilder.Add(v)
|
||||
// } else {
|
||||
// // Event
|
||||
// eventInfo = event.NewEvent(policyKind, "", policyInfo.Name, event.RequestBlocked,
|
||||
// event.FPolicyApplyBlockCreate, policyInfo.RName, policyInfo.GetRuleNames(false))
|
||||
|
||||
// glog.V(2).Infof("Request blocked event info has prepared for %s/%s\n", policyKind, policyInfo.Name)
|
||||
|
||||
// c.eventController.Add(eventInfo)
|
||||
// }
|
||||
// return
|
||||
// }
|
||||
|
||||
// glog.Infof("Generation from policy %s has succesfully applied to %s/%s", p.Name, policyInfo.RKind, policyInfo.RName)
|
||||
|
||||
// eventInfo = event.NewEvent(policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName,
|
||||
// event.PolicyApplied, event.SRulesApply, policyInfo.GetRuleNames(true), policyInfo.Name)
|
||||
|
||||
// glog.V(2).Infof("Success event info has prepared for %s/%s\n", policyInfo.RKind, policyInfo.RName)
|
||||
|
||||
// c.eventController.Add(eventInfo)
|
||||
// }
|
||||
|
||||
// func (nsc *NamespaceController) createAnnotations(pi *info.PolicyInfo) {
|
||||
// //get resource
|
||||
// obj, err := c.client.GetResource(pi.RKind, pi.RNamespace, pi.RName)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// return
|
||||
// }
|
||||
// // add annotation for policy application
|
||||
// ann := obj.GetAnnotations()
|
||||
// // Generation rules
|
||||
// ann, gpatch, err := annotations.AddPolicyJSONPatch(ann, pi, info.Generation)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// return
|
||||
// }
|
||||
// if gpatch == nil {
|
||||
// // nothing to patch
|
||||
// return
|
||||
// }
|
||||
// // add the anotation to the resource
|
||||
// _, err = c.client.PatchResource(pi.RKind, pi.RNamespace, pi.RName, gpatch)
|
||||
// if err != nil {
|
||||
// glog.Error(err)
|
||||
// return
|
||||
// }
|
||||
// }
|
59
pkg/namespace/report.go
Normal file
59
pkg/namespace/report.go
Normal file
|
@ -0,0 +1,59 @@
|
|||
package namespace
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/info"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
)
|
||||
|
||||
func (nsc *NamespaceController) report(policyInfos []info.PolicyInfo) {
|
||||
// generate events
|
||||
// generate policy violations
|
||||
for _, policyInfo := range policyInfos {
|
||||
// events
|
||||
// success - policy applied on resource
|
||||
// failure - policy/rule failed to apply on the resource
|
||||
reportEvents(policyInfo, nsc.eventGen)
|
||||
// policy violations
|
||||
// failure - policy/rule failed to apply on the resource
|
||||
}
|
||||
|
||||
// generate policy violation
|
||||
policyviolation.GeneratePolicyViolations(nsc.pvListerSynced, nsc.pvLister, nsc.kyvernoClient, policyInfos)
|
||||
|
||||
}
|
||||
|
||||
//reportEvents generates events for the failed resources
|
||||
func reportEvents(policyInfo info.PolicyInfo, eventGen event.Interface) {
|
||||
|
||||
if policyInfo.IsSuccessful() {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("reporting results for policy %s application on resource %s/%s/%s", policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName)
|
||||
for _, rule := range policyInfo.Rules {
|
||||
if rule.IsSuccessful() {
|
||||
continue
|
||||
}
|
||||
|
||||
// generate event on resource for each failed rule
|
||||
e := &event.Info{}
|
||||
e.Kind = policyInfo.RKind
|
||||
e.Namespace = policyInfo.RNamespace
|
||||
e.Name = policyInfo.RName
|
||||
e.Reason = "Failure"
|
||||
e.Message = fmt.Sprintf("policy %s (%s) rule %s failed to apply. %v", policyInfo.Name, rule.RuleType.String(), rule.Name, rule.GetErrorString())
|
||||
eventGen.Add(e)
|
||||
|
||||
}
|
||||
// generate a event on policy for all failed rules
|
||||
e := &event.Info{}
|
||||
e.Kind = "Policy"
|
||||
e.Namespace = ""
|
||||
e.Name = policyInfo.Name
|
||||
e.Reason = "Failure"
|
||||
e.Message = fmt.Sprintf("failed to apply rules %s on resource %s/%s/%s", policyInfo.FailedRules(), policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName)
|
||||
eventGen.Add(e)
|
||||
}
|
55
pkg/namespace/utils.go
Normal file
55
pkg/namespace/utils.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package namespace
|
||||
|
||||
const (
|
||||
wqNamespace string = "namespace"
|
||||
workerCount int = 1
|
||||
wqRetryLimit int = 5
|
||||
policyKind string = "Policy"
|
||||
)
|
||||
|
||||
// func namespaceMeetsRuleDescription(ns *corev1.Namespace, resourceDescription v1alpha1.ResourceDescription) bool {
|
||||
// //REWORK Not needed but verify the 'Namespace' is defined in the list of supported kinds
|
||||
// if !findKind(resourceDescription.Kinds, "Namespace") {
|
||||
// return false
|
||||
// }
|
||||
// if resourceDescription.Name != nil {
|
||||
// if !wildcard.Match(*resourceDescription.Name, ns.Name) {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
|
||||
// if resourceDescription.Selector != nil {
|
||||
// selector, err := metav1.LabelSelectorAsSelector(resourceDescription.Selector)
|
||||
// if err != nil {
|
||||
// return false
|
||||
// }
|
||||
|
||||
// labelSet := convertLabelsToLabelSet(ns.Labels)
|
||||
// // labels
|
||||
// if !selector.Matches(labelSet) {
|
||||
// return false
|
||||
// }
|
||||
// }
|
||||
// return true
|
||||
// }
|
||||
|
||||
// func convertLabelsToLabelSet(labelMap map[string]string) labels.Set {
|
||||
// labelSet := make(labels.Set, len(labelMap))
|
||||
// // REWORK: check if the below works
|
||||
// // if x, ok := labelMap.(labels.Set); !ok {
|
||||
|
||||
// // }
|
||||
// for k, v := range labelMap {
|
||||
// labelSet[k] = v
|
||||
// }
|
||||
// return labelSet
|
||||
// }
|
||||
|
||||
// func findKind(kinds []string, kindGVK string) bool {
|
||||
// for _, kind := range kinds {
|
||||
// if kind == kindGVK {
|
||||
// return true
|
||||
// }
|
||||
// }
|
||||
// return false
|
||||
// }
|
|
@ -393,7 +393,6 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
// Deep-copy otherwise we are mutating our cache.
|
||||
// TODO: Deep-copy only when needed.
|
||||
p := policy.DeepCopy()
|
||||
// TODO: Update Status to update ObserverdGeneration
|
||||
|
||||
pvList, err := pc.getPolicyViolationsForPolicy(p)
|
||||
if err != nil {
|
||||
|
|
|
@ -6,6 +6,8 @@ import (
|
|||
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type K8Resource struct {
|
||||
|
@ -75,3 +77,12 @@ func ParseKinds(list string) []K8Resource {
|
|||
}
|
||||
return resources
|
||||
}
|
||||
|
||||
//NewKubeClient returns a new kubernetes client
|
||||
func NewKubeClient(config *rest.Config) (kubernetes.Interface, error) {
|
||||
kclient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kclient, nil
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue