mirror of
https://github.com/kyverno/kyverno.git
synced 2025-04-18 02:06:52 +00:00
initial commit
This commit is contained in:
parent
b490d614f2
commit
63c1874016
7 changed files with 424 additions and 4 deletions
main.go
pkg
22
main.go
22
main.go
|
@ -2,13 +2,16 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
controller "github.com/nirmata/kyverno/pkg/controller"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
gencontroller "github.com/nirmata/kyverno/pkg/gencontroller"
|
||||
"github.com/nirmata/kyverno/pkg/sharedinformer"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/violation"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks"
|
||||
"k8s.io/sample-controller/pkg/signals"
|
||||
|
@ -33,11 +36,25 @@ func main() {
|
|||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
// test Code
|
||||
rGVR := client.DiscoveryClient.GetGVRFromKind("ConfigMap")
|
||||
obj, err := client.GetResource(rGVR.Resource, "ns2", "default-config")
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
data, err := obj.MarshalJSON()
|
||||
if err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Println(string(data))
|
||||
|
||||
// test Code
|
||||
|
||||
policyInformerFactory, err := sharedinformer.NewSharedInformerFactory(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating policy sharedinformer: %v\n", err)
|
||||
}
|
||||
kubeInformer := utils.NewKubeInformerFactory(clientConfig)
|
||||
eventController := event.NewEventController(client, policyInformerFactory)
|
||||
violationBuilder := violation.NewPolicyViolationBuilder(client, policyInformerFactory, eventController)
|
||||
|
||||
|
@ -47,6 +64,7 @@ func main() {
|
|||
violationBuilder,
|
||||
eventController)
|
||||
|
||||
genControler := gencontroller.NewGenController(client, eventController, policyInformerFactory, violationBuilder, kubeInformer.Core().V1().Namespaces())
|
||||
tlsPair, err := initTLSPemPair(clientConfig, client)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
|
@ -64,8 +82,9 @@ func main() {
|
|||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
policyInformerFactory.Run(stopCh)
|
||||
kubeInformer.Start(stopCh)
|
||||
eventController.Run(stopCh)
|
||||
|
||||
genControler.Run(stopCh)
|
||||
if err = policyController.Run(stopCh); err != nil {
|
||||
glog.Fatalf("Error running PolicyController: %v\n", err)
|
||||
}
|
||||
|
@ -77,6 +96,7 @@ func main() {
|
|||
server.RunAsync()
|
||||
<-stopCh
|
||||
server.Stop()
|
||||
genControler.Stop()
|
||||
eventController.Stop()
|
||||
policyController.Stop()
|
||||
}
|
||||
|
|
|
@ -25,6 +25,10 @@ func ProcessExisting(client *client.Client, policy *types.Policy) []*info.Policy
|
|||
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
for _, k := range rule.Kinds {
|
||||
if k == "Namespace" {
|
||||
// REWORK: will be handeled by namespace controller
|
||||
continue
|
||||
}
|
||||
// kind -> resource
|
||||
gvr := client.DiscoveryClient.GetGVRFromKind(k)
|
||||
// label selectors
|
||||
|
@ -87,9 +91,9 @@ func applyPolicy(client *client.Client, policy *types.Policy, res *resourceInfo)
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Generate
|
||||
gruleInfos := Generate(client, *policy, rawResource, *res.gvk, false)
|
||||
policyInfo.AddRuleInfos(gruleInfos)
|
||||
// // Generate
|
||||
// gruleInfos := Generate(client, *policy, rawResource, *res.gvk, false)
|
||||
// policyInfo.AddRuleInfos(gruleInfos)
|
||||
|
||||
return policyInfo, nil
|
||||
}
|
||||
|
|
86
pkg/engine/generation_new.go
Normal file
86
pkg/engine/generation_new.go
Normal file
|
@ -0,0 +1,86 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/info"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
//GenerateNew apply generation rules on a resource
|
||||
func GenerateNew(client *client.Client, policy *v1alpha1.Policy, ns *corev1.Namespace, processExisting bool) []*info.RuleInfo {
|
||||
ris := []*info.RuleInfo{}
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if rule.Generation == nil {
|
||||
continue
|
||||
}
|
||||
ri := info.NewRuleInfo(rule.Name, info.Generation)
|
||||
err := applyRuleGeneratorNew(client, ns, rule.Generation, processExisting)
|
||||
if err != nil {
|
||||
ri.Fail()
|
||||
ri.Addf("Rule %s: Failed to apply rule generator, err %v.", rule.Name, err)
|
||||
} else {
|
||||
ri.Addf("Rule %s: Generation succesfully.", rule.Name)
|
||||
}
|
||||
ris = append(ris, ri)
|
||||
|
||||
}
|
||||
return ris
|
||||
}
|
||||
|
||||
func applyRuleGeneratorNew(client *client.Client, ns *corev1.Namespace, gen *v1alpha1.Generation, processExisting bool) error {
|
||||
var err error
|
||||
resource := &unstructured.Unstructured{}
|
||||
// get resource from kind
|
||||
rGVR := client.DiscoveryClient.GetGVRFromKind(gen.Kind)
|
||||
if rGVR.Resource == "" {
|
||||
return fmt.Errorf("Kind to Resource Name conversion failed for %s", gen.Kind)
|
||||
}
|
||||
// If processing Existing resource, we only check if the resource
|
||||
// already exists
|
||||
if processExisting {
|
||||
obj, err := client.GetResource(rGVR.Resource, ns.Name, gen.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data := []byte{}
|
||||
if err := obj.UnmarshalJSON(data); err != nil {
|
||||
fmt.Println(err)
|
||||
}
|
||||
fmt.Println(string(data))
|
||||
}
|
||||
|
||||
var rdata map[string]interface{}
|
||||
// data -> create new resource
|
||||
if gen.Data != nil {
|
||||
rdata, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&gen.Data)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
// clone -> copy from existing resource
|
||||
if gen.Clone != nil {
|
||||
resource, err = client.GetResource(rGVR.Resource, gen.Clone.Namespace, gen.Clone.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdata = resource.UnstructuredContent()
|
||||
}
|
||||
resource.SetUnstructuredContent(rdata)
|
||||
resource.SetName(gen.Name)
|
||||
resource.SetNamespace(ns.Name)
|
||||
// Reset resource version
|
||||
resource.SetResourceVersion("")
|
||||
|
||||
_, err = client.CreateResource(rGVR.Resource, ns.Name, resource, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
157
pkg/gencontroller/controller.go
Normal file
157
pkg/gencontroller/controller.go
Normal file
|
@ -0,0 +1,157 @@
|
|||
package gencontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/golang/glog"
|
||||
policyLister "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
policySharedInformer "github.com/nirmata/kyverno/pkg/sharedinformer"
|
||||
"github.com/nirmata/kyverno/pkg/violation"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
v1Informer "k8s.io/client-go/informers/core/v1"
|
||||
v1CoreLister "k8s.io/client-go/listers/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
//Controller watches the 'Namespace' resource creation/update and applied the generation rules on them
|
||||
type Controller struct {
|
||||
client *client.Client
|
||||
namespaceLister v1CoreLister.NamespaceLister
|
||||
namespaceSynced cache.InformerSynced
|
||||
policyLister policyLister.PolicyLister
|
||||
workqueue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
//NewGenController returns a new Controller to manage generation rules
|
||||
func NewGenController(client *client.Client,
|
||||
eventController event.Generator,
|
||||
policyInformer policySharedInformer.PolicyInformer,
|
||||
violationBuilder violation.Generator,
|
||||
namespaceInformer v1Informer.NamespaceInformer) *Controller {
|
||||
|
||||
// create the controller
|
||||
controller := &Controller{
|
||||
client: client,
|
||||
namespaceLister: namespaceInformer.Lister(),
|
||||
namespaceSynced: namespaceInformer.Informer().HasSynced,
|
||||
policyLister: policyInformer.GetLister(),
|
||||
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), wqNamespace),
|
||||
}
|
||||
namespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.createNamespaceHandler,
|
||||
UpdateFunc: controller.updateNamespaceHandler,
|
||||
})
|
||||
|
||||
return controller
|
||||
}
|
||||
func (c *Controller) createNamespaceHandler(resource interface{}) {
|
||||
c.enqueueNamespace(resource)
|
||||
}
|
||||
|
||||
func (c *Controller) updateNamespaceHandler(oldResoruce, newResource interface{}) {
|
||||
// DO we need to anything if the namespace is modified ?
|
||||
}
|
||||
|
||||
func (c *Controller) enqueueNamespace(obj interface{}) {
|
||||
var key string
|
||||
var err error
|
||||
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
c.workqueue.Add(key)
|
||||
}
|
||||
|
||||
//Run to run the controller
|
||||
func (c *Controller) Run(stopCh <-chan struct{}) error {
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, c.namespaceSynced); !ok {
|
||||
return fmt.Errorf("faield to wait for caches to sync")
|
||||
}
|
||||
|
||||
for i := 0; i < workerCount; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
glog.Info("started namespace controller workers")
|
||||
return nil
|
||||
}
|
||||
|
||||
//Stop to stop the controller
|
||||
func (c *Controller) Stop() {
|
||||
defer c.workqueue.ShutDown()
|
||||
glog.Info("shutting down namespace controller workers")
|
||||
}
|
||||
|
||||
func (c *Controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Controller) processNextWorkItem() bool {
|
||||
obj, shutdown := c.workqueue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
err := func(obj interface{}) error {
|
||||
defer c.workqueue.Done(obj)
|
||||
err := c.syncHandler(obj)
|
||||
c.handleErr(err, obj)
|
||||
return nil
|
||||
}(obj)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return true
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (c *Controller) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
c.workqueue.Forget(key)
|
||||
return
|
||||
}
|
||||
if c.workqueue.NumRequeues(key) < wqRetryLimit {
|
||||
glog.Warningf("Error syncing events %v: %v", key, err)
|
||||
c.workqueue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
c.workqueue.Forget(key)
|
||||
glog.Error(err)
|
||||
glog.Warningf("Dropping the key %q out of the queue: %v", key, err)
|
||||
}
|
||||
|
||||
func (c *Controller) syncHandler(obj interface{}) error {
|
||||
var key string
|
||||
var ok bool
|
||||
if key, ok = obj.(string); !ok {
|
||||
return fmt.Errorf("expected string in workqueue but got %v", obj)
|
||||
}
|
||||
// Namespace is cluster wide resource
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
glog.Errorf("invalid namespace key: %s", key)
|
||||
return err
|
||||
}
|
||||
// Get Namespace
|
||||
ns, err := c.namespaceLister.Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
glog.Errorf("namespace '%s' in work queue no longer exists", key)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
glog.Info("apply generation policy to resources :)")
|
||||
//TODO: need to find a way to store the policy such that we can directly queury the
|
||||
// policies with generation policies
|
||||
// PolicyListerExpansion
|
||||
c.processNamespace(ns)
|
||||
return nil
|
||||
}
|
72
pkg/gencontroller/generation.go
Normal file
72
pkg/gencontroller/generation.go
Normal file
|
@ -0,0 +1,72 @@
|
|||
package gencontroller
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/info"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
func (c *Controller) processNamespace(ns *corev1.Namespace) error {
|
||||
//Get all policies and then verify if the namespace matches any of the defined selectors
|
||||
fmt.Println(ns.Name)
|
||||
policies, err := c.listPolicies(ns)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// process policy on namespace
|
||||
for _, p := range policies {
|
||||
c.processPolicy(ns, p)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) listPolicies(ns *corev1.Namespace) ([]*v1alpha1.Policy, error) {
|
||||
var fpolicies []*v1alpha1.Policy
|
||||
policies, err := c.policyLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
glog.Error("Unable to connect to policy controller. Unable to access policies not applying GENERATION rules")
|
||||
return nil, err
|
||||
}
|
||||
for _, p := range policies {
|
||||
// Check if the policy contains a generatoin rule
|
||||
for _, r := range p.Spec.Rules {
|
||||
if r.Generation != nil {
|
||||
fmt.Println(p.Name)
|
||||
// Check if the resource meets the description
|
||||
if namespaceMeetsRuleDescription(ns, r.ResourceDescription) {
|
||||
fpolicies = append(fpolicies, p)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fpolicies, nil
|
||||
}
|
||||
|
||||
func (c *Controller) processPolicy(ns *corev1.Namespace, p *v1alpha1.Policy) error {
|
||||
policyInfo := info.NewPolicyInfo(p.Name,
|
||||
ns.Kind,
|
||||
ns.Name,
|
||||
"") // Namespace has no namespace..WOW
|
||||
|
||||
ruleInfos := engine.GenerateNew(c.client, p, ns, false)
|
||||
policyInfo.AddRuleInfos(ruleInfos)
|
||||
if !policyInfo.IsSuccessful() {
|
||||
glog.Infof("Failed to apply policy %s on resource %s %s", p.Name, ns.Kind, ns.Name)
|
||||
for _, r := range ruleInfos {
|
||||
glog.Warning(r.Msgs)
|
||||
}
|
||||
} else {
|
||||
glog.Infof("Generation from policy %s has succesfully applied to %s %s", p.Name, ns.Kind, ns.Name)
|
||||
}
|
||||
|
||||
//TODO Generate policy Violations and corresponding events based on policyInfo
|
||||
return nil
|
||||
}
|
62
pkg/gencontroller/utils.go
Normal file
62
pkg/gencontroller/utils.go
Normal file
|
@ -0,0 +1,62 @@
|
|||
package gencontroller
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
const (
|
||||
wqNamespace string = "namespace"
|
||||
workerCount int = 1
|
||||
wqRetryLimit int = 5
|
||||
)
|
||||
|
||||
func namespaceMeetsRuleDescription(ns *corev1.Namespace, resourceDescription v1alpha1.ResourceDescription) bool {
|
||||
//REWORK Not needed but verify the 'Namespace' is defined in the list of supported kinds
|
||||
if !findKind(resourceDescription.Kinds, "Namespace") {
|
||||
return false
|
||||
}
|
||||
if resourceDescription.Name != nil {
|
||||
if !wildcard.Match(*resourceDescription.Name, ns.Name) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
if resourceDescription.Selector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(resourceDescription.Selector)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
labelSet := convertLabelsToLabelSet(ns.Labels)
|
||||
// labels
|
||||
if !selector.Matches(labelSet) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func convertLabelsToLabelSet(labelMap map[string]string) labels.Set {
|
||||
labelSet := make(labels.Set, len(labelMap))
|
||||
// REWORK: check if the below works
|
||||
// if x, ok := labelMap.(labels.Set); !ok {
|
||||
|
||||
// }
|
||||
for k, v := range labelMap {
|
||||
labelSet[k] = v
|
||||
}
|
||||
return labelSet
|
||||
}
|
||||
|
||||
func findKind(kinds []string, kindGVK string) bool {
|
||||
for _, kind := range kinds {
|
||||
if kind == kindGVK {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
19
pkg/utils/informer.go
Normal file
19
pkg/utils/informer.go
Normal file
|
@ -0,0 +1,19 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
func NewKubeInformerFactory(cfg *rest.Config) kubeinformers.SharedInformerFactory {
|
||||
// kubernetes client
|
||||
kubeClient, err := kubernetes.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
glog.Errorf("error building kubernetes client: %s", err)
|
||||
}
|
||||
|
||||
kubeInformerFactory := kubeinformers.NewSharedInformerFactory(kubeClient, 0)
|
||||
return kubeInformerFactory
|
||||
}
|
Loading…
Add table
Reference in a new issue