From 0babf1f25dda3e189243d272432b9ada581b2c0c Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Mon, 6 May 2019 09:12:37 -0700 Subject: [PATCH 01/17] redesign event & violation builders. kubeclient to provide resource handlers --- controller/controller.go | 129 ++++--- .../controller_interfaces.go | 18 + .../MutatingWebhookConfiguration_debug.yaml | 2 +- definitions/install.yaml | 5 +- kubeclient/kubeclient.go | 194 +++++++++++ main.go | 10 +- pkg/apis/policy/v1alpha1/types.go | 8 +- pkg/event/builder.go | 161 +++++++++ .../internalinterfaces/builder_interfaces.go | 12 + pkg/event/utils/util.go | 15 + pkg/resourceClient/client.go | 159 --------- .../violation_interfaces.go | 11 + pkg/violation/util.go | 37 -- pkg/violation/utils/util.go | 10 + pkg/violation/violation.go | 315 +++++------------- scripts/compile-image.sh | 2 +- scripts/deploy-controller.sh | 11 +- scripts/update-codegen.sh | 2 +- webhooks/admission.go | 24 +- webhooks/mutation.go | 8 +- 20 files changed, 610 insertions(+), 523 deletions(-) create mode 100755 controller/internalinterfaces/controller_interfaces.go create mode 100644 pkg/event/builder.go create mode 100644 pkg/event/internalinterfaces/builder_interfaces.go create mode 100644 pkg/event/utils/util.go delete mode 100644 pkg/resourceClient/client.go create mode 100644 pkg/violation/internalinterfaces/violation_interfaces.go create mode 100644 pkg/violation/utils/util.go diff --git a/controller/controller.go b/controller/controller.go index c2b687a67c..496f745314 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -2,36 +2,53 @@ package controller import ( "errors" + "fmt" "log" "os" "sort" "time" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" - + internalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + kubeClient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" policies "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/typed/policy/v1alpha1" informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions" lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" + event "github.com/nirmata/kube-policy/pkg/event" + eventinternalinterfaces "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + eventutils "github.com/nirmata/kube-policy/pkg/event/utils" violation "github.com/nirmata/kube-policy/pkg/violation" + violationinternalinterfaces "github.com/nirmata/kube-policy/pkg/violation/internalinterfaces" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + mergetypes "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" ) -// PolicyController for CRD -type PolicyController struct { +// PolicyController API +type PolicyController interface { + internalinterfaces.PolicyGetter + createPolicyHandler(resource interface{}) + updatePolicyHandler(oldResource, newResource interface{}) + deletePolicyHandler(resource interface{}) + getResourceKey(resource interface{}) string +} + +//policyController for CRD +type policyController struct { policyInformerFactory informers.SharedInformerFactory policyLister lister.PolicyLister policiesInterface policies.PolicyInterface logger *log.Logger - violationBuilder *violation.Builder + violationBuilder violationinternalinterfaces.ViolationGenerator + eventBuilder eventinternalinterfaces.BuilderInternal } // NewPolicyController from cmd args -func NewPolicyController(config *rest.Config, logger *log.Logger) (*PolicyController, error) { +func NewPolicyController(config *rest.Config, logger *log.Logger, kubeClient *kubeClient.KubeClient) (PolicyController, error) { if logger == nil { logger = log.New(os.Stdout, "Policy Controller: ", log.LstdFlags|log.Lshortfile) } @@ -44,54 +61,55 @@ func NewPolicyController(config *rest.Config, logger *log.Logger) (*PolicyContro if err != nil { return nil, err } - // Initialize Kube Client - kubeClient, err := kubernetes.NewForConfig(config) - if err != nil { - return nil, err - } - policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, time.Second*30) + policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0) policyInformer := policyInformerFactory.Nirmata().V1alpha1().Policies() - // generate Violation builder - builder, err := violation.NewViolationHelper(kubeClient, policyClientset, logger, policyInformer) + // generate Event builder + eventBuilder, err := event.NewEventBuilder(kubeClient, logger) if err != nil { return nil, err } - controller := &PolicyController{ + + // generate Violation builer + violationBuilder, err := violation.NewViolationBuilder(kubeClient, eventBuilder, logger) + + controller := &policyController{ policyInformerFactory: policyInformerFactory, policyLister: policyInformer.Lister(), policiesInterface: policyClientset.NirmataV1alpha1().Policies("default"), logger: logger, - violationBuilder: builder, + violationBuilder: violationBuilder, + eventBuilder: eventBuilder, } - policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.createPolicyHandler, UpdateFunc: controller.updatePolicyHandler, DeleteFunc: controller.deletePolicyHandler, }) - + // Set the controller + eventBuilder.SetController(controller) + violationBuilder.SetController(controller) return controller, nil } -// Run is main controller thread -func (c *PolicyController) Run(stopCh <-chan struct{}) { - c.policyInformerFactory.Start(stopCh) - // Un-comment to run the violation Builder - c.violationBuilder.Run(1, stopCh) +func (c *policyController) GetCacheInformerSync() cache.InformerSynced { + return c.policyInformerFactory.Nirmata().V1alpha1().Policies().Informer().HasSynced } -// GetPolicies retrieves all policy resources -// from cache. Cache is refreshed by informer -func (c *PolicyController) GetPolicies() []types.Policy { +// Run is main controller thread +func (c *policyController) Run(stopCh <-chan struct{}) { + c.policyInformerFactory.Start(stopCh) + c.eventBuilder.Run(eventutils.EventWorkerThreadCount, stopCh) +} + +func (c *policyController) GetPolicies() ([]types.Policy, error) { // Create nil Selector to grab all the policies selector := labels.NewSelector() cachedPolicies, err := c.policyLister.List(selector) - if err != nil { c.logger.Printf("Error: %v", err) - return nil + return nil, err } var policies []types.Policy @@ -102,17 +120,16 @@ func (c *PolicyController) GetPolicies() []types.Policy { sort.Slice(policies, func(i, j int) bool { return policies[i].CreationTimestamp.Time.Before(policies[j].CreationTimestamp.Time) }) - - return policies + return policies, nil } // Writes error message to the policy logs in status section -func (c *PolicyController) LogPolicyError(name, text string) { +func (c *policyController) LogPolicyError(name, text string) { c.addPolicyLog(name, "[ERROR] "+text) } // Writes info message to the policy logs in status section -func (c *PolicyController) LogPolicyInfo(name, text string) { +func (c *policyController) LogPolicyInfo(name, text string) { c.addPolicyLog(name, "[ INFO] "+text) } @@ -121,7 +138,7 @@ func (c *PolicyController) LogPolicyInfo(name, text string) { const policyLogMaxRecords int = 50 // Appends given log text to the status/logs array. -func (c *PolicyController) addPolicyLog(name, text string) { +func (c *policyController) addPolicyLog(name, text string) { getOptions := metav1.GetOptions{ ResourceVersion: "1", IncludeUninitialized: true, @@ -134,12 +151,12 @@ func (c *PolicyController) addPolicyLog(name, text string) { // Add new log record text = time.Now().Format("2006 Jan 02 15:04:05.999 ") + text - //policy.Status.Logs = append(policy.Status.Logs, text) - // Pop front extra log records - // logsCount := len(policy.Status.Logs) - // if logsCount > policyLogMaxRecords { - // policy.Status.Logs = policy.Status.Logs[logsCount-policyLogMaxRecords:] - // } + policy.Status.Logs = append(policy.Status.Logs, text) + // Pop front extra log records + logsCount := len(policy.Status.Logs) + if logsCount > policyLogMaxRecords { + policy.Status.Logs = policy.Status.Logs[logsCount-policyLogMaxRecords:] + } // Save logs to policy object _, err = c.policiesInterface.UpdateStatus(policy) if err != nil { @@ -147,29 +164,43 @@ func (c *PolicyController) addPolicyLog(name, text string) { } } -func (c *PolicyController) createPolicyHandler(resource interface{}) { +func (c *policyController) createPolicyHandler(resource interface{}) { key := c.getResourceKey(resource) c.logger.Printf("Policy created: %s", key) } -func (c *PolicyController) updatePolicyHandler(oldResource, newResource interface{}) { +func (c *policyController) updatePolicyHandler(oldResource, newResource interface{}) { oldKey := c.getResourceKey(oldResource) newKey := c.getResourceKey(newResource) - c.logger.Printf("Policy %s updated to %s", oldKey, newKey) } -func (c *PolicyController) deletePolicyHandler(resource interface{}) { +func (c *policyController) deletePolicyHandler(resource interface{}) { key := c.getResourceKey(resource) c.logger.Printf("Policy deleted: %s", key) } -func (c *PolicyController) getResourceKey(resource interface{}) string { +func (c *policyController) getResourceKey(resource interface{}) string { if key, err := cache.MetaNamespaceKeyFunc(resource); err != nil { c.logger.Fatalf("Error retrieving policy key: %v", err) } else { return key } - return "" } +func (c *policyController) GetPolicy(name string) (*types.Policy, error) { + policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(name) + if err != nil { + utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", name)) + return nil, err + } + return c.getPolicyInterface(policyNamespace).Get(policyName) +} + +func (c *policyController) getPolicyInterface(namespace string) lister.PolicyNamespaceLister { + return c.policyLister.Policies(namespace) +} + +func (c *policyController) PatchPolicy(policy string, pt mergetypes.PatchType, data []byte) (*types.Policy, error) { + return c.policiesInterface.Patch(policy, pt, data) +} diff --git a/controller/internalinterfaces/controller_interfaces.go b/controller/internalinterfaces/controller_interfaces.go new file mode 100755 index 0000000000..791dae6ea5 --- /dev/null +++ b/controller/internalinterfaces/controller_interfaces.go @@ -0,0 +1,18 @@ +package internalinterfaces + +import ( + policytypes "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + types "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/tools/cache" +) + +// PolicyGetter interface for external API +type PolicyGetter interface { + GetPolicies() ([]policytypes.Policy, error) + GetPolicy(name string) (*policytypes.Policy, error) + GetCacheInformerSync() cache.InformerSynced + PatchPolicy(policy string, pt types.PatchType, data []byte) (*policytypes.Policy, error) + Run(stopCh <-chan struct{}) + LogPolicyError(name, text string) + LogPolicyInfo(name, text string) +} diff --git a/definitions/MutatingWebhookConfiguration_debug.yaml b/definitions/MutatingWebhookConfiguration_debug.yaml index 713fb9cb40..a3d7cfdcba 100644 --- a/definitions/MutatingWebhookConfiguration_debug.yaml +++ b/definitions/MutatingWebhookConfiguration_debug.yaml @@ -10,7 +10,7 @@ webhooks: - name: webhook.nirmata.kube-policy clientConfig: url: "https://localhost/mutate" - caBundle: ${CA_BUNDLE} + caBundle: MIIC5zCCAc+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5pa3ViZUNBMB4XDTE5MDQxMDIxMjM1OFoXDTI5MDQwODIxMjM1OFowFTETMBEGA1UEAxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALV1uHt50QVtVnGiGc4nrMFhsuT+R/KpU0qq3hNV6xPWiBcfUZNqz0iEAbh9YpZ3np2d2gHniBkbUfZwcI541SYacwPfVkdnBMKvVqBYGk1wz2GVyd8fas6guerchmUO7XtU5VfGr9TbKGp9vo/d+NWwGlp9/x7Ni8rnK1D1oMoFmQ02Q6N0xdsBiCEs+MUVqUlu/Xtx+rITD+fYOWpB+z1+KOEysFGaLl8nayxanhCqcRR+T8SeJ+hXIDHNoOqFDf1Y7XmzLtTlynoVNLh0gMy4cWgoFYuklz7JlYvpLJFt9cSziwIKfG56T6RQZ77z5w4TO5bfTvYlHCztY5zSiNkCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCqaqjPJTDU0U2pkewyCGFGVAzdnDgGozjeCP2rRojhJOiOMaBBgVIpJxCRz2BKfXHW+B4hKCli08t7lPe1ab12hM6wmlLxkkmbjxW4H9Coo/OAaoIz6bfmMhBUjl6tuTsgTkHjarG7W12rFb7Xkj6zSd17EJsUoRx8t1GD65HXFindEMvjEGQ4MPfX3XqLT1NNIsFHF6e7RXpYPWQ/eT3Z/9ia+7vZzXzEmjXYedEeggyqg6QrL+DX3BQF8TcTLmxSRA6MMqOXEjlmU85TOTjP39PBhzCz78m8ZSM9KHQyjOv1xhR0CxZMyxfiN2bvA3aJAtMLOwLjKIYAkLm3W2hp rules: - operations: [ "CREATE" ] resources: [ "*/*" ] diff --git a/definitions/install.yaml b/definitions/install.yaml index 44c1214c00..c4081e8f1d 100644 --- a/definitions/install.yaml +++ b/definitions/install.yaml @@ -8,7 +8,7 @@ spec: - name: v1alpha1 served: true storage: true - scope: Cluster + scope: Namespaced names: kind: Policy plural: policies @@ -34,6 +34,7 @@ spec: type: object required: - resource + - name properties: resource: type: object @@ -103,6 +104,8 @@ spec: - remove value: AnyValue: {} + name: + type: string configMapGenerator: type: object required: diff --git a/kubeclient/kubeclient.go b/kubeclient/kubeclient.go index 44eb018658..2068f04217 100644 --- a/kubeclient/kubeclient.go +++ b/kubeclient/kubeclient.go @@ -1,6 +1,7 @@ package kubeclient import ( + "fmt" "log" "os" "time" @@ -11,8 +12,12 @@ import ( v1 "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/kubernetes" + event "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" + "k8s.io/client-go/tools/cache" ) // KubeClient is the api-client for core Kubernetes objects @@ -38,6 +43,10 @@ func NewKubeClient(config *rest.Config, logger *log.Logger) (*KubeClient, error) }, nil } +func (kc *KubeClient) GetEventsInterface(namespace string) event.EventInterface { + return kc.client.CoreV1().Events(namespace) +} + func (kc *KubeClient) GetKubePolicyDeployment() (*apps.Deployment, error) { kubePolicyDeployment, err := kc.client. Apps(). @@ -175,3 +184,188 @@ func (kc *KubeClient) createSecretAfterNamespaceIsCreated(secret v1.Secret, name kc.logger.Printf("Can't create a secret: %s", err) } } + +var rMapper = map[string]getter{ + "ConfigMap": configMapGetter, + "Pods": podsGetter, + "Deploymeny": deploymentGetter, + "CronJob": cronJobGetter, + "Endpoints": endpointsbGetter, + "HorizontalPodAutoscaler": horizontalPodAutoscalerGetter, + "Ingress": ingressGetter, + "Job": jobGetter, + "LimitRange": limitRangeGetter, + "Namespace": namespaceGetter, + "NetworkPolicy": networkPolicyGetter, + "PersistentVolumeClaim": persistentVolumeClaimGetter, + "PodDisruptionBudget": podDisruptionBudgetGetter, + "PodTemplate": podTemplateGetter, + "ResourceQuota": resourceQuotaGetter, + "Secret": secretGetter, + "Service": serviceGetter, + "StatefulSet": statefulSetGetter, +} + +type getter func(*kubernetes.Clientset, string, string) (runtime.Object, error) + +//GetResource get the resource object +func (kc *KubeClient) GetResource(kind string, resource string) (runtime.Object, error) { + namespace, name, err := cache.SplitMetaNamespaceKey(resource) + if err != nil { + utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", resource)) + return nil, err + } + // runtime.Object -> Actual object + return rMapper[kind](kc.client, namespace, name) +} + +//GetSupportedResourceTypes provides list of supported types +func GetSupportedResourceTypes() (rTypes []string) { + for k := range rMapper { + rTypes = append(rTypes, k) + } + return rTypes +} + +func configMapGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} +func podsGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func deploymentGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func cronJobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func endpointsbGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func horizontalPodAutoscalerGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func ingressGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func jobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func limitRangeGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().LimitRanges(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func namespaceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func networkPolicyGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.NetworkingV1().NetworkPolicies(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func persistentVolumeClaimGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func podDisruptionBudgetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func podTemplateGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().PodTemplates(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func resourceQuotaGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func secretGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func serviceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func statefulSetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { + obj, err := clientSet.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} diff --git a/main.go b/main.go index e0ba632a68..6059b5019f 100644 --- a/main.go +++ b/main.go @@ -24,16 +24,16 @@ func main() { log.Fatalf("Error building kubeconfig: %v\n", err) } - controller, err := controller.NewPolicyController(clientConfig, nil) - if err != nil { - log.Fatalf("Error creating PolicyController: %s\n", err) - } - kubeclient, err := kubeclient.NewKubeClient(clientConfig, nil) if err != nil { log.Fatalf("Error creating kubeclient: %v\n", err) } + controller, err := controller.NewPolicyController(clientConfig, nil, kubeclient) + if err != nil { + log.Fatalf("Error creating PolicyController: %s\n", err) + } + mutationWebhook, err := webhooks.CreateMutationWebhook(clientConfig, kubeclient, controller, nil) if err != nil { log.Fatalf("Error creating mutation webhook: %v\n", err) diff --git a/pkg/apis/policy/v1alpha1/types.go b/pkg/apis/policy/v1alpha1/types.go index b503a1fa86..97e2214d52 100644 --- a/pkg/apis/policy/v1alpha1/types.go +++ b/pkg/apis/policy/v1alpha1/types.go @@ -12,9 +12,9 @@ import ( type Policy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec PolicySpec `json:"spec"` - // Status PolicyStatus `json:"status"` - Status PolicyViolations `json:"status,omitempty"` + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status"` + PolicyViolation PolicyViolations `json:"policyviolation,omitempty"` } type PolicyViolations struct { @@ -23,9 +23,9 @@ type PolicyViolations struct { type Violation struct { Kind string `json:"kind,omitempty"` Resource string `json:"resource,omitempty"` - Source string `json:"source,omitempty"` Rule string `json:"rule,omitempty"` Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty` } // Specification of the Policy. diff --git a/pkg/event/builder.go b/pkg/event/builder.go new file mode 100644 index 0000000000..f43231b42b --- /dev/null +++ b/pkg/event/builder.go @@ -0,0 +1,161 @@ +package event + +import ( + "errors" + "fmt" + "log" + "time" + + controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + kubeClient "github.com/nirmata/kube-policy/kubeclient" + "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" + policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" + "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + utils "github.com/nirmata/kube-policy/pkg/event/utils" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +type builder struct { + kubeClient *kubeClient.KubeClient + controller controllerinternalinterfaces.PolicyGetter + workqueue workqueue.RateLimitingInterface + recorder record.EventRecorder + logger *log.Logger + policySynced cache.InformerSynced +} + +type Builder interface { + internalinterfaces.BuilderInternal + SyncHandler(key utils.EventInfo) error + ProcessNextWorkItem() bool + RunWorker() +} + +func NewEventBuilder(kubeClient *kubeClient.KubeClient, + logger *log.Logger, +) (Builder, error) { + builder := &builder{ + kubeClient: kubeClient, + workqueue: initWorkqueue(), + recorder: initRecorder(kubeClient), + logger: logger, + } + + return builder, nil +} + +func initRecorder(kubeClient *kubeClient.KubeClient) record.EventRecorder { + // Initliaze Event Broadcaster + policyscheme.AddToScheme(scheme.Scheme) + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(log.Printf) + eventBroadcaster.StartRecordingToSink( + &typedcorev1.EventSinkImpl{ + + Interface: kubeClient.GetEventsInterface("")}) + recorder := eventBroadcaster.NewRecorder( + scheme.Scheme, + v1.EventSource{Component: utils.EventSource}) + return recorder +} + +func initWorkqueue() workqueue.RateLimitingInterface { + return workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), utils.EventWorkQueueName) +} + +func (b *builder) SetController(controller controllerinternalinterfaces.PolicyGetter) { + b.controller = controller + b.policySynced = controller.GetCacheInformerSync() +} + +func (b *builder) AddEvent(info utils.EventInfo) { + b.workqueue.Add(info) +} + +// Run : Initialize the worker routines to process the event creation +func (b *builder) Run(threadiness int, stopCh <-chan struct{}) error { + if b.controller == nil { + return errors.New("Controller has not be set") + } + defer utilruntime.HandleCrash() + defer b.workqueue.ShutDown() + log.Println("Starting violation builder") + + fmt.Println(("Wait for informer cache to sync")) + if ok := cache.WaitForCacheSync(stopCh, b.policySynced); !ok { + fmt.Println("Unable to sync the cache") + } + log.Println("Starting workers") + + for i := 0; i < threadiness; i++ { + go wait.Until(b.RunWorker, time.Second, stopCh) + } + log.Println("Started workers") + <-stopCh + log.Println("Shutting down workers") + return nil + +} + +func (b *builder) RunWorker() { + for b.ProcessNextWorkItem() { + } +} + +func (b *builder) ProcessNextWorkItem() bool { + obj, shutdown := b.workqueue.Get() + if shutdown { + return false + } + err := func(obj interface{}) error { + defer b.workqueue.Done(obj) + var key utils.EventInfo + var ok bool + if key, ok = obj.(utils.EventInfo); !ok { + b.workqueue.Forget(obj) + log.Printf("Expecting type info by got %v", obj) + return nil + } + + // Run the syncHandler, passing the resource and the policy + if err := b.SyncHandler(key); err != nil { + b.workqueue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error()) + } + + return nil + }(obj) + + if err != nil { + log.Println((err)) + } + return true +} + +func (b *builder) SyncHandler(key utils.EventInfo) error { + var resource runtime.Object + var err error + switch key.Kind { + case "Policy": + resource, err = b.controller.GetPolicy(key.Resource) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to create event for policy %s, will retry ", key.Resource)) + return err + } + default: + resource, err = b.kubeClient.GetResource(key.Kind, key.Resource) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource)) + return err + } + } + b.recorder.Event(resource, v1.EventTypeNormal, key.Reason, key.Message) + return nil +} diff --git a/pkg/event/internalinterfaces/builder_interfaces.go b/pkg/event/internalinterfaces/builder_interfaces.go new file mode 100644 index 0000000000..b020824df5 --- /dev/null +++ b/pkg/event/internalinterfaces/builder_interfaces.go @@ -0,0 +1,12 @@ +package internalinterfaces + +import ( + internalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + utils "github.com/nirmata/kube-policy/pkg/event/utils" +) + +type BuilderInternal interface { + SetController(controller internalinterfaces.PolicyGetter) + Run(threadiness int, stopCh <-chan struct{}) error + AddEvent(info utils.EventInfo) +} diff --git a/pkg/event/utils/util.go b/pkg/event/utils/util.go new file mode 100644 index 0000000000..de21252a3f --- /dev/null +++ b/pkg/event/utils/util.go @@ -0,0 +1,15 @@ +package utils + +const EventSource = "policy-controller" + +const EventWorkQueueName = "policy-controller-events" + +type EventInfo struct { + Kind string + Resource string + Rule string + Reason string + Message string +} + +const EventWorkerThreadCount = 1 diff --git a/pkg/resourceClient/client.go b/pkg/resourceClient/client.go deleted file mode 100644 index 23a7af0e54..0000000000 --- a/pkg/resourceClient/client.go +++ /dev/null @@ -1,159 +0,0 @@ -package resourceClient - -import ( - meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/client-go/kubernetes" -) - -func GetResouce(clientSet *kubernetes.Clientset, kind string, resourceNamespace string, resourceName string) (runtime.Object, error) { - switch kind { - case "Deployment": - { - obj, err := clientSet.AppsV1().Deployments(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Pods": - { - obj, err := clientSet.CoreV1().Pods(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "ConfigMap": - { - obj, err := clientSet.CoreV1().ConfigMaps(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "CronJob": - { - obj, err := clientSet.BatchV1beta1().CronJobs(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Endpoints": - { - obj, err := clientSet.CoreV1().Endpoints(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "HorizontalPodAutoscaler": - { - obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Ingress": - { - obj, err := clientSet.ExtensionsV1beta1().Ingresses(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Job": - { - obj, err := clientSet.BatchV1().Jobs(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "LimitRange": - { - obj, err := clientSet.CoreV1().LimitRanges(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Namespace": - { - obj, err := clientSet.CoreV1().Namespaces().Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "NetworkPolicy": - { - obj, err := clientSet.NetworkingV1().NetworkPolicies(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "PersistentVolumeClaim": - { - obj, err := clientSet.CoreV1().PersistentVolumeClaims(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "PodDisruptionBudget": - { - obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "PodTemplate": - { - obj, err := clientSet.CoreV1().PodTemplates(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "ResourceQuota": - { - obj, err := clientSet.CoreV1().ResourceQuotas(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Secret": - { - obj, err := clientSet.CoreV1().Secrets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "Service": - { - obj, err := clientSet.CoreV1().Services(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - case "StatefulSet": - { - obj, err := clientSet.AppsV1().StatefulSets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) - if err != nil { - return nil, err - } - return obj, nil - } - - default: - return nil, nil - } -} diff --git a/pkg/violation/internalinterfaces/violation_interfaces.go b/pkg/violation/internalinterfaces/violation_interfaces.go new file mode 100644 index 0000000000..7863bc8906 --- /dev/null +++ b/pkg/violation/internalinterfaces/violation_interfaces.go @@ -0,0 +1,11 @@ +package internalinterfaces + +import ( + "github.com/nirmata/kube-policy/controller/internalinterfaces" + utils "github.com/nirmata/kube-policy/pkg/violation/utils" +) + +type ViolationGenerator interface { + SetController(controller internalinterfaces.PolicyGetter) + Create(info utils.ViolationInfo) error +} diff --git a/pkg/violation/util.go b/pkg/violation/util.go index c5c3bcbe2b..00ad937cfa 100644 --- a/pkg/violation/util.go +++ b/pkg/violation/util.go @@ -1,34 +1,5 @@ package violation -// Mode to identify the CRUD event when the violation was identified -type Mode string - -const ( - // Create resource - Create Mode = "create" - // Update resource - Update Mode = "update" - // Delete resource - Delete Mode = "delete" -) - -// ResourceMode to identify the source of violatino check -type ResourceMode string - -const ( - // Resource type is kubernetes resource - Resource ResourceMode = "resource" - // Policy type is policy custom resource - Policy ResourceMode = "policy" -) - -type Target int - -const ( - ResourceTarget Target = 1 - PolicyTarget Target = 2 -) - // Source for the events recorder const violationEventSource = "policy-controller" @@ -38,14 +9,6 @@ const workqueueViolationName = "Policy-Violations" // Event Reason const violationEventResrouce = "Violation" -type EventInfo struct { - Resource string - Kind string - Reason string - Source string - ResourceTarget Target -} - // Info input details type Info struct { Kind string diff --git a/pkg/violation/utils/util.go b/pkg/violation/utils/util.go new file mode 100644 index 0000000000..b200752345 --- /dev/null +++ b/pkg/violation/utils/util.go @@ -0,0 +1,10 @@ +package utils + +type ViolationInfo struct { + Kind string + Resource string + Policy string + Rule string + Reason string + Message string +} diff --git a/pkg/violation/violation.go b/pkg/violation/violation.go index f4c1c1a21d..860a12366d 100644 --- a/pkg/violation/violation.go +++ b/pkg/violation/violation.go @@ -1,284 +1,129 @@ package violation import ( + "encoding/json" "fmt" "log" - "time" + jsonpatch "github.com/evanphx/json-patch" + controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + kubeClient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" - policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" - informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1" - lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" - resourceClient "github.com/nirmata/kube-policy/pkg/resourceClient" - v1 "k8s.io/api/core/v1" + "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + eventinternalinterfaces "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + eventutils "github.com/nirmata/kube-policy/pkg/event/utils" + violationinternalinterfaces "github.com/nirmata/kube-policy/pkg/violation/internalinterfaces" + utils "github.com/nirmata/kube-policy/pkg/violation/utils" + mergetypes "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/kubernetes/scheme" - typedcc1orev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" ) -type Violations []Violation - -type Violation struct { +type builder struct { + kubeClient *kubeClient.KubeClient + controller controllerinternalinterfaces.PolicyGetter + eventBuilder eventinternalinterfaces.BuilderInternal + logger *log.Logger } -// Builder to generate violations -type Builder struct { - kubeClient *kubernetes.Clientset - policyClientset *clientset.Clientset - workqueue workqueue.RateLimitingInterface - logger *log.Logger - recorder record.EventRecorder - policyLister lister.PolicyLister - policySynced cache.InformerSynced +type Builder interface { + violationinternalinterfaces.ViolationGenerator + ProcessViolation(info utils.ViolationInfo) error + Patch(policy *types.Policy, updatedPolicy *types.Policy) error + IsActive(kind string, resource string) (bool, error) } -func NewViolationHelper(kubeClient *kubernetes.Clientset, policyClientSet *clientset.Clientset, logger *log.Logger, policyInformer informers.PolicyInformer) (*Builder, error) { +func NewViolationBuilder( + kubeClient *kubeClient.KubeClient, + eventBuilder internalinterfaces.BuilderInternal, + logger *log.Logger) (Builder, error) { - // Initialize Event Broadcaster - policyscheme.AddToScheme(scheme.Scheme) - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(log.Printf) - eventBroadcaster.StartRecordingToSink( - &typedcc1orev1.EventSinkImpl{ - Interface: kubeClient.CoreV1().Events("")}) - recorder := eventBroadcaster.NewRecorder( - scheme.Scheme, - v1.EventSource{Component: violationEventSource}) - // Build the builder - builder := &Builder{ - kubeClient: kubeClient, - policyClientset: policyClientSet, - workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workqueueViolationName), - logger: logger, - recorder: recorder, - policyLister: policyInformer.Lister(), - policySynced: policyInformer.Informer().HasSynced, - } + builder := &builder{} return builder, nil } -// Create Violation -> (Info) - -// Create to generate violation jsonpatch script & -// queue events to generate events -// TODO: create should validate the rule number and update the violation if one exists -func (b *Builder) Create(info Info) error { - // generate patch - // we can generate the patch as the policy resource will alwasy exist - // Apply Patch - err := b.patchViolation(info) +func (b *builder) Create(info utils.ViolationInfo) error { + err := b.ProcessViolation(info) if err != nil { return err } - - // Generate event for policy - b.workqueue.Add( - EventInfo{ - Resource: info.Policy, - Reason: info.Reason, - ResourceTarget: PolicyTarget, - }) - // Generat event for resource - b.workqueue.Add( - EventInfo{ - Kind: info.Kind, - Resource: info.Resource, - Reason: info.Reason, - ResourceTarget: ResourceTarget, - }) - return nil } -// Remove the violation -func (b *Builder) Remove(info Info) ([]byte, error) { - b.workqueue.Add(info) - return nil, nil +func (b *builder) SetController(controller controllerinternalinterfaces.PolicyGetter) { + b.controller = controller } -func (b *Builder) patchViolation(info Info) error { - // policy-controller handlers are post events - // adm-ctr will always have policy resource created - // Get Policy namespace and name - policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(info.Policy) - if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", info.Policy)) - return err - } - // Try to access the policy - // Try to access the resource - // if the above resource objects have not been created then we reque the request to create the event - policy, err := b.policyLister.Policies(policyNamespace).Get(policyName) +func (b *builder) ProcessViolation(info utils.ViolationInfo) error { + // Get the policy + policy, err := b.controller.GetPolicy(info.Policy) if err != nil { utilruntime.HandleError(err) return err } - // Add violation - updatedPolicy := policy.DeepCopy() - // var update bool - // inactiveViolationindex := []int{} - updatedViolations := []types.Violation{} - // Check if the violation with the same rule exists for the same resource and rule name - for _, violation := range updatedPolicy.Status.Violations { + modifiedPolicy := policy.DeepCopy() + modifiedViolations := []types.Violation{} - if ok, err := b.IsActive(violation); ok { - if err != nil { - fmt.Println(err) - } - updatedViolations = append(updatedViolations, violation) - } else { - fmt.Println("Remove violation") - b.workqueue.Add( - EventInfo{ - Resource: info.Policy, - Reason: "Removing violation for rule " + info.RuleName, - ResourceTarget: PolicyTarget, - }) + for _, violation := range modifiedPolicy.PolicyViolation.Violations { + ok, err := b.IsActive(info.Kind, info.Resource) + if err != nil { + utilruntime.HandleError(err) + continue } + if !ok { + // Remove the violation + // Create a removal event + b.eventBuilder.AddEvent(eventutils.EventInfo{ + Kind: "Policy", + Resource: info.Resource, + Rule: info.Rule, + Reason: info.Reason, + Message: info.Message, + }) + continue + } + // If violation already exists for this rule, we update the violation + if violation.Kind == info.Kind && + violation.Resource == info.Resource && + violation.Rule == info.Rule { + violation.Reason = info.Reason + violation.Message = info.Message + } + modifiedViolations = append(modifiedViolations, violation) } - // Rule is updated TO-DO - // Dont validate if the resouce is active as a new Violation will not be created if it did not - updatedViolations = append(updatedViolations, - types.Violation{ - Kind: info.Kind, - Resource: info.Resource, - Rule: info.RuleName, - Reason: info.Reason, - }) - updatedPolicy.Status.Violations = updatedViolations - // Patch - return b.patch(policy, updatedPolicy) + modifiedPolicy.PolicyViolation.Violations = modifiedViolations + return b.Patch(policy, modifiedPolicy) + } -func (b *Builder) getPolicyEvent(info Info) EventInfo { - return EventInfo{Resource: info.Resource} -} - -func (b *Builder) IsActive(violation types.Violation) (bool, error) { - if ok, err := b.ValidationResourceActive(violation); !ok { +func (b *builder) IsActive(kind string, resource string) (bool, error) { + // Generate Merge Patch + _, err := b.kubeClient.GetResource(kind, resource) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", resource)) return false, err } return true, nil } -func (b *Builder) ValidationResourceActive(violation types.Violation) (bool, error) { - resourceNamespace, resourceName, err := cache.SplitMetaNamespaceKey(violation.Resource) +// ProcessViolation(info utils.ViolationInfo) error +func (b *builder) Patch(policy *types.Policy, updatedPolicy *types.Policy) error { + originalData, err := json.Marshal(policy) if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", violation.Resource)) - // Remove the corresponding violation - return false, err + return err } - - // Check if the corresponding resource is still present - _, err = resourceClient.GetResouce(b.kubeClient, violation.Kind, resourceNamespace, resourceName) + modifiedData, err := json.Marshal(updatedPolicy) if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", violation.Resource)) - return false, err + return err } - - return true, nil -} - -func (b *Builder) patch(policy *types.Policy, updatedPolicy *types.Policy) error { - _, err := b.policyClientset.Nirmata().Policies(updatedPolicy.Namespace).UpdateStatus(updatedPolicy) + // generate merge patch + patchBytes, err := jsonpatch.CreateMergePatch(originalData, modifiedData) if err != nil { return err } + _, err = b.controller.PatchPolicy(policy.Name, mergetypes.MergePatchType, patchBytes) + if err != nil { + // Unable to patch + return err + } return nil } - -// Run : Initialize the worker routines to process the event creation -func (b *Builder) Run(threadiness int, stopCh <-chan struct{}) error { - defer utilruntime.HandleCrash() - defer b.workqueue.ShutDown() - log.Println("Starting violation builder") - - fmt.Println(("Wait for informer cache to sync")) - if ok := cache.WaitForCacheSync(stopCh, b.policySynced); !ok { - fmt.Println("Unable to sync the cache") - } - - log.Println("Starting workers") - for i := 0; i < threadiness; i++ { - go wait.Until(b.runWorker, time.Second, stopCh) - } - log.Println("Started workers") - <-stopCh - log.Println("Shutting down workers") - return nil -} - -func (b *Builder) runWorker() { - for b.processNextWorkItem() { - } -} - -func (b *Builder) processNextWorkItem() bool { - // get info object - obj, shutdown := b.workqueue.Get() - if shutdown { - return false - } - err := func(obj interface{}) error { - defer b.workqueue.Done(obj) - var key EventInfo - var ok bool - if key, ok = obj.(EventInfo); !ok { - b.workqueue.Forget(obj) - log.Printf("Expecting type info but got %v", obj) - return nil - } - - // Run the syncHandler, passing the resource and the policy - if err := b.syncHandler(key); err != nil { - b.workqueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error()) - } - - return nil - }(obj) - - if err != nil { - log.Println((err)) - } - return true - -} - -// TO-DO: how to handle events if the resource has been delted, and clean the dirty object -func (b *Builder) syncHandler(key EventInfo) error { - fmt.Println(key) - // Get Policy namespace and name - namespace, name, err := cache.SplitMetaNamespaceKey(key.Resource) - if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid policy key: %s", key.Resource)) - return nil - } - if key.ResourceTarget == ResourceTarget { - // Resource Event - resource, err := resourceClient.GetResouce(b.kubeClient, key.Kind, namespace, name) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource)) - return err - } - b.recorder.Event(resource, v1.EventTypeNormal, violationEventResrouce, key.Reason) - } else { - // Policy Event - policy, err := b.policyLister.Policies(namespace).Get(name) - if err != nil { - // TO-DO: this scenario will not exist as the policy will always exist - // unless the namespace and resource name are invalid - utilruntime.HandleError(err) - return err - } - b.recorder.Event(policy, v1.EventTypeNormal, violationEventResrouce, key.Reason) - } - - return nil -} diff --git a/scripts/compile-image.sh b/scripts/compile-image.sh index 1f13b7efc6..f46d28bb1c 100755 --- a/scripts/compile-image.sh +++ b/scripts/compile-image.sh @@ -22,4 +22,4 @@ tag="${hub_user_name}/${project_name}:${version}" docker build --no-cache -t "${tag}" . || exit 4 echo "# Pushing image to repository..." -docker push "${tag}" || exit 5 +#docker push "${tag}" || exit 5 diff --git a/scripts/deploy-controller.sh b/scripts/deploy-controller.sh index a9c4a31581..6fd13566c4 100755 --- a/scripts/deploy-controller.sh +++ b/scripts/deploy-controller.sh @@ -34,11 +34,14 @@ if [ -z "${namespace}" ]; then # controller should be launched locally ${certsGenerator} "--service=${service_name}" "--serverIp=${serverIp}" || exit 2 echo "Applying webhook..." - kubectl delete -f crd/MutatingWebhookConfiguration_local.yaml - kubectl create -f crd/MutatingWebhookConfiguration_local.yaml || exit 3 + kubectl delete -f definitions/MutatingWebhookConfiguration_debug.yaml + kubectl create -f definitions/MutatingWebhookConfiguration_debug.yaml || exit 3 - kubectl delete -f crd/crd.yaml - kubectl create -f crd/crd.yaml || exit 3 +# kubectl delete -f definitions/PolicyValidationConfiguration.yaml +# kubectl create -f definitions/PolicyValidationConfiguration.yaml || exit 3 + + kubectl delete -f definitions/install.yaml + kubectl create -f definitions/install.yaml || exit 3 echo -e "\n### You can build and run kube-policy project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)." diff --git a/scripts/update-codegen.sh b/scripts/update-codegen.sh index 331be38ff7..ed6e2212ff 100755 --- a/scripts/update-codegen.sh +++ b/scripts/update-codegen.sh @@ -5,7 +5,7 @@ set -o pipefail # get nirmata root NIRMATA_DIR=$(dirname ${BASH_SOURCE})/.. -NIRMATA_ROOT=$(readlink -f ${NIRMATA_DIR}) +NIRMATA_ROOT=$(greadlink -f ${NIRMATA_DIR}) # get relative path to code generation script CODEGEN_PKG=${NIRMATA_DIR}/vendor/k8s.io/code-generator diff --git a/webhooks/admission.go b/webhooks/admission.go index 9dcca827ef..8e7f94bd61 100644 --- a/webhooks/admission.go +++ b/webhooks/admission.go @@ -2,34 +2,14 @@ package webhooks import ( "github.com/minio/minio/pkg/wildcard" + kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var supportedKinds = [...]string{ - "ConfigMap", - "CronJob", - "DaemonSet", - "Deployment", - "Endpoints", - "HorizontalPodAutoscaler", - "Ingress", - "Job", - "LimitRange", - "Namespace", - "NetworkPolicy", - "PersistentVolumeClaim", - "PodDisruptionBudget", - "PodTemplate", - "ResourceQuota", - "Secret", - "Service", - "StatefulSet", -} - func kindIsSupported(kind string) bool { - for _, k := range supportedKinds { + for _, k := range kubeclient.GetSupportedResourceTypes() { if k == kind { return true } diff --git a/webhooks/mutation.go b/webhooks/mutation.go index cb12602bcc..5b29b031be 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -6,7 +6,7 @@ import ( "log" "os" - controller "github.com/nirmata/kube-policy/controller" + controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" v1beta1 "k8s.io/api/admission/v1beta1" @@ -18,13 +18,13 @@ import ( // business logic for resource mutation type MutationWebhook struct { kubeclient *kubeclient.KubeClient - controller *controller.PolicyController + controller controllerinternalinterfaces.PolicyGetter registration *MutationWebhookRegistration logger *log.Logger } // Registers mutation webhook in cluster and creates object for this webhook -func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller *controller.PolicyController, logger *log.Logger) (*MutationWebhook, error) { +func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller controllerinternalinterfaces.PolicyGetter, logger *log.Logger) (*MutationWebhook, error) { if clientConfig == nil || kubeclient == nil || controller == nil { return nil, errors.New("Some parameters are not set") } @@ -55,7 +55,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad mw.logger.Printf("AdmissionReview for Kind=%v, Namespace=%v Name=%v UID=%v patchOperation=%v UserInfo=%v", request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, request.UserInfo) - policies := mw.controller.GetPolicies() + policies, _ := mw.controller.GetPolicies() if len(policies) == 0 { return nil } From d43e6b81a01bb76fc44319ddd2f8414928efc97a Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Mon, 6 May 2019 09:27:48 -0700 Subject: [PATCH 02/17] correct changes --- definitions/MutatingWebhookConfiguration_debug.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/definitions/MutatingWebhookConfiguration_debug.yaml b/definitions/MutatingWebhookConfiguration_debug.yaml index a3d7cfdcba..713fb9cb40 100644 --- a/definitions/MutatingWebhookConfiguration_debug.yaml +++ b/definitions/MutatingWebhookConfiguration_debug.yaml @@ -10,7 +10,7 @@ webhooks: - name: webhook.nirmata.kube-policy clientConfig: url: "https://localhost/mutate" - caBundle: MIIC5zCCAc+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5pa3ViZUNBMB4XDTE5MDQxMDIxMjM1OFoXDTI5MDQwODIxMjM1OFowFTETMBEGA1UEAxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALV1uHt50QVtVnGiGc4nrMFhsuT+R/KpU0qq3hNV6xPWiBcfUZNqz0iEAbh9YpZ3np2d2gHniBkbUfZwcI541SYacwPfVkdnBMKvVqBYGk1wz2GVyd8fas6guerchmUO7XtU5VfGr9TbKGp9vo/d+NWwGlp9/x7Ni8rnK1D1oMoFmQ02Q6N0xdsBiCEs+MUVqUlu/Xtx+rITD+fYOWpB+z1+KOEysFGaLl8nayxanhCqcRR+T8SeJ+hXIDHNoOqFDf1Y7XmzLtTlynoVNLh0gMy4cWgoFYuklz7JlYvpLJFt9cSziwIKfG56T6RQZ77z5w4TO5bfTvYlHCztY5zSiNkCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCqaqjPJTDU0U2pkewyCGFGVAzdnDgGozjeCP2rRojhJOiOMaBBgVIpJxCRz2BKfXHW+B4hKCli08t7lPe1ab12hM6wmlLxkkmbjxW4H9Coo/OAaoIz6bfmMhBUjl6tuTsgTkHjarG7W12rFb7Xkj6zSd17EJsUoRx8t1GD65HXFindEMvjEGQ4MPfX3XqLT1NNIsFHF6e7RXpYPWQ/eT3Z/9ia+7vZzXzEmjXYedEeggyqg6QrL+DX3BQF8TcTLmxSRA6MMqOXEjlmU85TOTjP39PBhzCz78m8ZSM9KHQyjOv1xhR0CxZMyxfiN2bvA3aJAtMLOwLjKIYAkLm3W2hp + caBundle: ${CA_BUNDLE} rules: - operations: [ "CREATE" ] resources: [ "*/*" ] From bd1a84190b0f4ce0cb2b794147c95abc7aa3a246 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Mon, 6 May 2019 10:07:47 -0700 Subject: [PATCH 03/17] undo corrections --- definitions/install.yaml | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/definitions/install.yaml b/definitions/install.yaml index c4081e8f1d..ded3a1e38c 100644 --- a/definitions/install.yaml +++ b/definitions/install.yaml @@ -8,7 +8,7 @@ spec: - name: v1alpha1 served: true storage: true - scope: Namespaced + scope: Cluster names: kind: Policy plural: policies @@ -34,7 +34,6 @@ spec: type: object required: - resource - - name properties: resource: type: object @@ -62,8 +61,6 @@ spec: - Secret - Service - StatefulSet - name: - type: string selector: properties: matchLabels: @@ -104,8 +101,6 @@ spec: - remove value: AnyValue: {} - name: - type: string configMapGenerator: type: object required: From 8af86ab002e35797a109489d09c424911172a787 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Mon, 6 May 2019 10:30:44 -0700 Subject: [PATCH 04/17] rename file, move append events --- pkg/violation/{violation.go => builder.go} | 23 +++++++++++++--------- 1 file changed, 14 insertions(+), 9 deletions(-) rename pkg/violation/{violation.go => builder.go} (89%) diff --git a/pkg/violation/violation.go b/pkg/violation/builder.go similarity index 89% rename from pkg/violation/violation.go rename to pkg/violation/builder.go index 860a12366d..98b66ab4b5 100644 --- a/pkg/violation/violation.go +++ b/pkg/violation/builder.go @@ -63,8 +63,16 @@ func (b *builder) ProcessViolation(info utils.ViolationInfo) error { modifiedPolicy := policy.DeepCopy() modifiedViolations := []types.Violation{} + // Create new violation + newViolation := types.Violation{ + Kind: info.Kind, + Resource: info.Resource, + Rule: info.Rule, + Reason: info.Reason, + Message: info.Message, + } for _, violation := range modifiedPolicy.PolicyViolation.Violations { - ok, err := b.IsActive(info.Kind, info.Resource) + ok, err := b.IsActive(info.Kind, violation.Resource) if err != nil { utilruntime.HandleError(err) continue @@ -74,7 +82,7 @@ func (b *builder) ProcessViolation(info utils.ViolationInfo) error { // Create a removal event b.eventBuilder.AddEvent(eventutils.EventInfo{ Kind: "Policy", - Resource: info.Resource, + Resource: info.Policy, Rule: info.Rule, Reason: info.Reason, Message: info.Message, @@ -82,14 +90,10 @@ func (b *builder) ProcessViolation(info utils.ViolationInfo) error { continue } // If violation already exists for this rule, we update the violation - if violation.Kind == info.Kind && - violation.Resource == info.Resource && - violation.Rule == info.Rule { - violation.Reason = info.Reason - violation.Message = info.Message - } - modifiedViolations = append(modifiedViolations, violation) + //TODO: update violation, instead of re-creating one every time } + modifiedViolations = append(modifiedViolations, newViolation) + modifiedPolicy.PolicyViolation.Violations = modifiedViolations return b.Patch(policy, modifiedPolicy) @@ -122,6 +126,7 @@ func (b *builder) Patch(policy *types.Policy, updatedPolicy *types.Policy) error } _, err = b.controller.PatchPolicy(policy.Name, mergetypes.MergePatchType, patchBytes) if err != nil { + // Unable to patch return err } From 644d044fe670fb3f376ffd75876295204756a62f Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Mon, 6 May 2019 12:08:31 -0700 Subject: [PATCH 05/17] correct arguments to violation builder --- kubeclient/kubeclient.go | 3 +-- pkg/violation/builder.go | 6 +++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/kubeclient/kubeclient.go b/kubeclient/kubeclient.go index 2068f04217..978fb75b1e 100644 --- a/kubeclient/kubeclient.go +++ b/kubeclient/kubeclient.go @@ -188,7 +188,7 @@ func (kc *KubeClient) createSecretAfterNamespaceIsCreated(secret v1.Secret, name var rMapper = map[string]getter{ "ConfigMap": configMapGetter, "Pods": podsGetter, - "Deploymeny": deploymentGetter, + "Deployment": deploymentGetter, "CronJob": cronJobGetter, "Endpoints": endpointsbGetter, "HorizontalPodAutoscaler": horizontalPodAutoscalerGetter, @@ -215,7 +215,6 @@ func (kc *KubeClient) GetResource(kind string, resource string) (runtime.Object, utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", resource)) return nil, err } - // runtime.Object -> Actual object return rMapper[kind](kc.client, namespace, name) } diff --git a/pkg/violation/builder.go b/pkg/violation/builder.go index 98b66ab4b5..abeaf9a6b9 100644 --- a/pkg/violation/builder.go +++ b/pkg/violation/builder.go @@ -37,7 +37,11 @@ func NewViolationBuilder( eventBuilder internalinterfaces.BuilderInternal, logger *log.Logger) (Builder, error) { - builder := &builder{} + builder := &builder{ + kubeClient: kubeClient, + eventBuilder: eventBuilder, + logger: logger, + } return builder, nil } From 9d4541ac02f1c0e4dd68b41c2917e4f97906b017 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Mon, 6 May 2019 17:03:37 -0700 Subject: [PATCH 06/17] move policy violation type inside status subresource --- controller/controller.go | 5 ++++ .../controller_interfaces.go | 1 + pkg/apis/policy/v1alpha1/types.go | 28 +++++++++---------- pkg/violation/builder.go | 10 ++++--- 4 files changed, 25 insertions(+), 19 deletions(-) diff --git a/controller/controller.go b/controller/controller.go index 496f745314..3145b07785 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -204,3 +204,8 @@ func (c *policyController) getPolicyInterface(namespace string) lister.PolicyNam func (c *policyController) PatchPolicy(policy string, pt mergetypes.PatchType, data []byte) (*types.Policy, error) { return c.policiesInterface.Patch(policy, pt, data) } + +func (c *policyController) UpdatePolicyViolations(updatedPolicy *types.Policy) error { + _, err := c.policiesInterface.UpdateStatus(updatedPolicy) + return err +} diff --git a/controller/internalinterfaces/controller_interfaces.go b/controller/internalinterfaces/controller_interfaces.go index 791dae6ea5..2352077394 100755 --- a/controller/internalinterfaces/controller_interfaces.go +++ b/controller/internalinterfaces/controller_interfaces.go @@ -12,6 +12,7 @@ type PolicyGetter interface { GetPolicy(name string) (*policytypes.Policy, error) GetCacheInformerSync() cache.InformerSynced PatchPolicy(policy string, pt types.PatchType, data []byte) (*policytypes.Policy, error) + UpdatePolicyViolations(updatedPolicy *policytypes.Policy) error Run(stopCh <-chan struct{}) LogPolicyError(name, text string) LogPolicyInfo(name, text string) diff --git a/pkg/apis/policy/v1alpha1/types.go b/pkg/apis/policy/v1alpha1/types.go index 97e2214d52..18dd87909c 100644 --- a/pkg/apis/policy/v1alpha1/types.go +++ b/pkg/apis/policy/v1alpha1/types.go @@ -12,20 +12,8 @@ import ( type Policy struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` - Spec PolicySpec `json:"spec"` - Status PolicyStatus `json:"status"` - PolicyViolation PolicyViolations `json:"policyviolation,omitempty"` -} - -type PolicyViolations struct { - Violations []Violation `json:"violations,omitempty"` -} -type Violation struct { - Kind string `json:"kind,omitempty"` - Resource string `json:"resource,omitempty"` - Rule string `json:"rule,omitempty"` - Reason string `json:"reason,omitempty"` - Message string `json:"message,omitempty` + Spec PolicySpec `json:"spec"` + Status PolicyStatus `json:"status"` } // Specification of the Policy. @@ -86,7 +74,8 @@ type PolicyCopyFrom struct { // Contains logs about policy application type PolicyStatus struct { - Logs []string `json:"log"` + Logs []string `json:"log"` + Violations []Violation `json:"violations,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object @@ -97,3 +86,12 @@ type PolicyList struct { metav1.ListMeta `json:"metadata"` Items []Policy `json:"items"` } + +// Violation for the policy +type Violation struct { + Kind string `json:"kind,omitempty"` + Resource string `json:"resource,omitempty"` + Rule string `json:"rule,omitempty"` + Reason string `json:"reason,omitempty"` + Message string `json:"message,omitempty` +} diff --git a/pkg/violation/builder.go b/pkg/violation/builder.go index abeaf9a6b9..09ae07c4e0 100644 --- a/pkg/violation/builder.go +++ b/pkg/violation/builder.go @@ -75,7 +75,8 @@ func (b *builder) ProcessViolation(info utils.ViolationInfo) error { Reason: info.Reason, Message: info.Message, } - for _, violation := range modifiedPolicy.PolicyViolation.Violations { + + for _, violation := range modifiedPolicy.Status.Violations { ok, err := b.IsActive(info.Kind, violation.Resource) if err != nil { utilruntime.HandleError(err) @@ -98,9 +99,10 @@ func (b *builder) ProcessViolation(info utils.ViolationInfo) error { } modifiedViolations = append(modifiedViolations, newViolation) - modifiedPolicy.PolicyViolation.Violations = modifiedViolations - return b.Patch(policy, modifiedPolicy) - + modifiedPolicy.Status.Violations = modifiedViolations + // return b.Patch(policy, modifiedPolicy) + // Violations are part of the status sub resource, so we can use the Update Status api instead of updating the policy object + return b.controller.UpdatePolicyViolations(modifiedPolicy) } func (b *builder) IsActive(kind string, resource string) (bool, error) { From 2b4ac9d07b2080f737c17e04822e7335650838b4 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Tue, 7 May 2019 13:26:54 -0700 Subject: [PATCH 07/17] code review changes --- controller/controller.go | 40 +++++++++---------- .../controller_interfaces.go | 11 +++-- kubeclient/kubeclient.go | 4 +- pkg/event/builder.go | 10 ++--- .../builder_interfaces.go | 6 +-- pkg/violation/builder.go | 18 ++++----- .../interfaces/violation_interfaces.go | 11 +++++ .../violation_interfaces.go | 11 ----- pkg/violation/utils/util.go | 10 ++--- webhooks/admission.go | 2 +- webhooks/mutation.go | 13 ++++-- 11 files changed, 70 insertions(+), 66 deletions(-) rename controller/{internalinterfaces => interfaces}/controller_interfaces.go (69%) rename pkg/event/{internalinterfaces => interfaces}/builder_interfaces.go (52%) create mode 100644 pkg/violation/interfaces/violation_interfaces.go delete mode 100644 pkg/violation/internalinterfaces/violation_interfaces.go diff --git a/controller/controller.go b/controller/controller.go index 3145b07785..3ccde81b97 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -8,7 +8,7 @@ import ( "sort" "time" - internalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeClient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" @@ -16,10 +16,10 @@ import ( informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions" lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" event "github.com/nirmata/kube-policy/pkg/event" - eventinternalinterfaces "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" eventutils "github.com/nirmata/kube-policy/pkg/event/utils" violation "github.com/nirmata/kube-policy/pkg/violation" - violationinternalinterfaces "github.com/nirmata/kube-policy/pkg/violation/internalinterfaces" + violationinterfaces "github.com/nirmata/kube-policy/pkg/violation/interfaces" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" mergetypes "k8s.io/apimachinery/pkg/types" @@ -30,11 +30,9 @@ import ( // PolicyController API type PolicyController interface { - internalinterfaces.PolicyGetter - createPolicyHandler(resource interface{}) - updatePolicyHandler(oldResource, newResource interface{}) - deletePolicyHandler(resource interface{}) - getResourceKey(resource interface{}) string + controllerinterfaces.PolicyGetter + controllerinterfaces.PolicyHandlers + Run(stopCh <-chan struct{}) } //policyController for CRD @@ -43,8 +41,8 @@ type policyController struct { policyLister lister.PolicyLister policiesInterface policies.PolicyInterface logger *log.Logger - violationBuilder violationinternalinterfaces.ViolationGenerator - eventBuilder eventinternalinterfaces.BuilderInternal + violationBuilder violationinterfaces.ViolationGenerator + eventBuilder eventinterfaces.BuilderInternal } // NewPolicyController from cmd args @@ -83,9 +81,9 @@ func NewPolicyController(config *rest.Config, logger *log.Logger, kubeClient *ku eventBuilder: eventBuilder, } policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.createPolicyHandler, - UpdateFunc: controller.updatePolicyHandler, - DeleteFunc: controller.deletePolicyHandler, + AddFunc: controller.CreatePolicyHandler, + UpdateFunc: controller.UpdatePolicyHandler, + DeleteFunc: controller.DeletePolicyHandler, }) // Set the controller eventBuilder.SetController(controller) @@ -164,23 +162,23 @@ func (c *policyController) addPolicyLog(name, text string) { } } -func (c *policyController) createPolicyHandler(resource interface{}) { - key := c.getResourceKey(resource) +func (c *policyController) CreatePolicyHandler(resource interface{}) { + key := c.GetResourceKey(resource) c.logger.Printf("Policy created: %s", key) } -func (c *policyController) updatePolicyHandler(oldResource, newResource interface{}) { - oldKey := c.getResourceKey(oldResource) - newKey := c.getResourceKey(newResource) +func (c *policyController) UpdatePolicyHandler(oldResource, newResource interface{}) { + oldKey := c.GetResourceKey(oldResource) + newKey := c.GetResourceKey(newResource) c.logger.Printf("Policy %s updated to %s", oldKey, newKey) } -func (c *policyController) deletePolicyHandler(resource interface{}) { - key := c.getResourceKey(resource) +func (c *policyController) DeletePolicyHandler(resource interface{}) { + key := c.GetResourceKey(resource) c.logger.Printf("Policy deleted: %s", key) } -func (c *policyController) getResourceKey(resource interface{}) string { +func (c *policyController) GetResourceKey(resource interface{}) string { if key, err := cache.MetaNamespaceKeyFunc(resource); err != nil { c.logger.Fatalf("Error retrieving policy key: %v", err) } else { diff --git a/controller/internalinterfaces/controller_interfaces.go b/controller/interfaces/controller_interfaces.go similarity index 69% rename from controller/internalinterfaces/controller_interfaces.go rename to controller/interfaces/controller_interfaces.go index 2352077394..8b3911bab2 100755 --- a/controller/internalinterfaces/controller_interfaces.go +++ b/controller/interfaces/controller_interfaces.go @@ -1,4 +1,4 @@ -package internalinterfaces +package interfaces import ( policytypes "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" @@ -6,14 +6,19 @@ import ( "k8s.io/client-go/tools/cache" ) -// PolicyGetter interface for external API type PolicyGetter interface { GetPolicies() ([]policytypes.Policy, error) GetPolicy(name string) (*policytypes.Policy, error) GetCacheInformerSync() cache.InformerSynced PatchPolicy(policy string, pt types.PatchType, data []byte) (*policytypes.Policy, error) UpdatePolicyViolations(updatedPolicy *policytypes.Policy) error - Run(stopCh <-chan struct{}) LogPolicyError(name, text string) LogPolicyInfo(name, text string) } + +type PolicyHandlers interface { + CreatePolicyHandler(resource interface{}) + UpdatePolicyHandler(oldResource, newResource interface{}) + DeletePolicyHandler(resource interface{}) + GetResourceKey(resource interface{}) string +} diff --git a/kubeclient/kubeclient.go b/kubeclient/kubeclient.go index 978fb75b1e..36436d608d 100644 --- a/kubeclient/kubeclient.go +++ b/kubeclient/kubeclient.go @@ -218,8 +218,8 @@ func (kc *KubeClient) GetResource(kind string, resource string) (runtime.Object, return rMapper[kind](kc.client, namespace, name) } -//GetSupportedResourceTypes provides list of supported types -func GetSupportedResourceTypes() (rTypes []string) { +//GetSupportedKinds provides list of supported types +func GetSupportedKinds() (rTypes []string) { for k := range rMapper { rTypes = append(rTypes, k) } diff --git a/pkg/event/builder.go b/pkg/event/builder.go index f43231b42b..93db5795ec 100644 --- a/pkg/event/builder.go +++ b/pkg/event/builder.go @@ -6,11 +6,11 @@ import ( "log" "time" - controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeClient "github.com/nirmata/kube-policy/kubeclient" "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" - "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" utils "github.com/nirmata/kube-policy/pkg/event/utils" v1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/runtime" @@ -24,7 +24,7 @@ import ( type builder struct { kubeClient *kubeClient.KubeClient - controller controllerinternalinterfaces.PolicyGetter + controller controllerinterfaces.PolicyGetter workqueue workqueue.RateLimitingInterface recorder record.EventRecorder logger *log.Logger @@ -32,7 +32,7 @@ type builder struct { } type Builder interface { - internalinterfaces.BuilderInternal + eventinterfaces.BuilderInternal SyncHandler(key utils.EventInfo) error ProcessNextWorkItem() bool RunWorker() @@ -70,7 +70,7 @@ func initWorkqueue() workqueue.RateLimitingInterface { return workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), utils.EventWorkQueueName) } -func (b *builder) SetController(controller controllerinternalinterfaces.PolicyGetter) { +func (b *builder) SetController(controller controllerinterfaces.PolicyGetter) { b.controller = controller b.policySynced = controller.GetCacheInformerSync() } diff --git a/pkg/event/internalinterfaces/builder_interfaces.go b/pkg/event/interfaces/builder_interfaces.go similarity index 52% rename from pkg/event/internalinterfaces/builder_interfaces.go rename to pkg/event/interfaces/builder_interfaces.go index b020824df5..94a685f719 100644 --- a/pkg/event/internalinterfaces/builder_interfaces.go +++ b/pkg/event/interfaces/builder_interfaces.go @@ -1,12 +1,12 @@ -package internalinterfaces +package interfaces import ( - internalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" utils "github.com/nirmata/kube-policy/pkg/event/utils" ) type BuilderInternal interface { - SetController(controller internalinterfaces.PolicyGetter) + SetController(controller controllerinterfaces.PolicyGetter) Run(threadiness int, stopCh <-chan struct{}) error AddEvent(info utils.EventInfo) } diff --git a/pkg/violation/builder.go b/pkg/violation/builder.go index 09ae07c4e0..26acc643c6 100644 --- a/pkg/violation/builder.go +++ b/pkg/violation/builder.go @@ -6,13 +6,12 @@ import ( "log" jsonpatch "github.com/evanphx/json-patch" - controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeClient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" - eventinternalinterfaces "github.com/nirmata/kube-policy/pkg/event/internalinterfaces" + eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" eventutils "github.com/nirmata/kube-policy/pkg/event/utils" - violationinternalinterfaces "github.com/nirmata/kube-policy/pkg/violation/internalinterfaces" + violationinterfaces "github.com/nirmata/kube-policy/pkg/violation/interfaces" utils "github.com/nirmata/kube-policy/pkg/violation/utils" mergetypes "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -20,13 +19,13 @@ import ( type builder struct { kubeClient *kubeClient.KubeClient - controller controllerinternalinterfaces.PolicyGetter - eventBuilder eventinternalinterfaces.BuilderInternal + controller controllerinterfaces.PolicyGetter + eventBuilder eventinterfaces.BuilderInternal logger *log.Logger } type Builder interface { - violationinternalinterfaces.ViolationGenerator + violationinterfaces.ViolationGenerator ProcessViolation(info utils.ViolationInfo) error Patch(policy *types.Policy, updatedPolicy *types.Policy) error IsActive(kind string, resource string) (bool, error) @@ -34,7 +33,7 @@ type Builder interface { func NewViolationBuilder( kubeClient *kubeClient.KubeClient, - eventBuilder internalinterfaces.BuilderInternal, + eventBuilder eventinterfaces.BuilderInternal, logger *log.Logger) (Builder, error) { builder := &builder{ @@ -53,7 +52,7 @@ func (b *builder) Create(info utils.ViolationInfo) error { return nil } -func (b *builder) SetController(controller controllerinternalinterfaces.PolicyGetter) { +func (b *builder) SetController(controller controllerinterfaces.PolicyGetter) { b.controller = controller } @@ -115,7 +114,6 @@ func (b *builder) IsActive(kind string, resource string) (bool, error) { return true, nil } -// ProcessViolation(info utils.ViolationInfo) error func (b *builder) Patch(policy *types.Policy, updatedPolicy *types.Policy) error { originalData, err := json.Marshal(policy) if err != nil { diff --git a/pkg/violation/interfaces/violation_interfaces.go b/pkg/violation/interfaces/violation_interfaces.go new file mode 100644 index 0000000000..f74cd28c6f --- /dev/null +++ b/pkg/violation/interfaces/violation_interfaces.go @@ -0,0 +1,11 @@ +package interfaces + +import ( + controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" + utils "github.com/nirmata/kube-policy/pkg/violation/utils" +) + +type ViolationGenerator interface { + SetController(controller controllerinterfaces.PolicyGetter) + Create(info utils.ViolationInfo) error +} diff --git a/pkg/violation/internalinterfaces/violation_interfaces.go b/pkg/violation/internalinterfaces/violation_interfaces.go deleted file mode 100644 index 7863bc8906..0000000000 --- a/pkg/violation/internalinterfaces/violation_interfaces.go +++ /dev/null @@ -1,11 +0,0 @@ -package internalinterfaces - -import ( - "github.com/nirmata/kube-policy/controller/internalinterfaces" - utils "github.com/nirmata/kube-policy/pkg/violation/utils" -) - -type ViolationGenerator interface { - SetController(controller internalinterfaces.PolicyGetter) - Create(info utils.ViolationInfo) error -} diff --git a/pkg/violation/utils/util.go b/pkg/violation/utils/util.go index b200752345..1d3db344f4 100644 --- a/pkg/violation/utils/util.go +++ b/pkg/violation/utils/util.go @@ -1,10 +1,8 @@ package utils +import policytype "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + type ViolationInfo struct { - Kind string - Resource string - Policy string - Rule string - Reason string - Message string + Policy string + policytype.Violation } diff --git a/webhooks/admission.go b/webhooks/admission.go index 8e7f94bd61..652dcc3c34 100644 --- a/webhooks/admission.go +++ b/webhooks/admission.go @@ -9,7 +9,7 @@ import ( ) func kindIsSupported(kind string) bool { - for _, k := range kubeclient.GetSupportedResourceTypes() { + for _, k := range kubeclient.GetSupportedKinds() { if k == kind { return true } diff --git a/webhooks/mutation.go b/webhooks/mutation.go index 5b29b031be..ad651c329d 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -6,11 +6,12 @@ import ( "log" "os" - controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces" + controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" v1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" rest "k8s.io/client-go/rest" ) @@ -18,13 +19,13 @@ import ( // business logic for resource mutation type MutationWebhook struct { kubeclient *kubeclient.KubeClient - controller controllerinternalinterfaces.PolicyGetter + controller controllerinterfaces.PolicyGetter registration *MutationWebhookRegistration logger *log.Logger } // Registers mutation webhook in cluster and creates object for this webhook -func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller controllerinternalinterfaces.PolicyGetter, logger *log.Logger) (*MutationWebhook, error) { +func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller controllerinterfaces.PolicyGetter, logger *log.Logger) (*MutationWebhook, error) { if clientConfig == nil || kubeclient == nil || controller == nil { return nil, errors.New("Some parameters are not set") } @@ -55,7 +56,11 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad mw.logger.Printf("AdmissionReview for Kind=%v, Namespace=%v Name=%v UID=%v patchOperation=%v UserInfo=%v", request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, request.UserInfo) - policies, _ := mw.controller.GetPolicies() + policies, err := mw.controller.GetPolicies() + if err != nil { + utilruntime.HandleError(err) + return nil + } if len(policies) == 0 { return nil } From 0c744db0e996ca006ce209920a0f097939553e2b Mon Sep 17 00:00:00 2001 From: shuting Date: Tue, 7 May 2019 16:50:39 -0700 Subject: [PATCH 08/17] move webhooks/patches.go webhooks/utils.go to pkg/policymanager/ --- {webhooks => pkg/policymanager}/patches.go | 12 +++++- .../policymanager}/patches_test.go | 33 ++++++++------ {webhooks => pkg/policymanager}/utils.go | 14 +++--- {webhooks => pkg/policymanager}/utils_test.go | 2 +- pkg/policymanager/validate.go | 43 +++++++++++++++++++ webhooks/admission.go | 41 +----------------- webhooks/mutation.go | 34 ++++++--------- 7 files changed, 96 insertions(+), 83 deletions(-) rename {webhooks => pkg/policymanager}/patches.go (85%) rename {webhooks => pkg/policymanager}/patches_test.go (72%) rename {webhooks => pkg/policymanager}/utils.go (77%) rename {webhooks => pkg/policymanager}/utils_test.go (96%) create mode 100644 pkg/policymanager/validate.go diff --git a/webhooks/patches.go b/pkg/policymanager/patches.go similarity index 85% rename from webhooks/patches.go rename to pkg/policymanager/patches.go index 8e7e09cb27..863873e732 100644 --- a/webhooks/patches.go +++ b/pkg/policymanager/patches.go @@ -1,4 +1,4 @@ -package webhooks +package policymanager import ( "encoding/json" @@ -20,6 +20,15 @@ const ( type PatchBytes []byte +func GetPolicyPatchingSets(policy types.Policy) PatchingSets { + // failurePolicy property is the only available way for now to define behavior on patching error. + // TODO: define new failurePolicy values specific for patching and other policy features. + if policy.Spec.FailurePolicy != nil && *policy.Spec.FailurePolicy == "continueOnError" { + return PatchingSetsContinueAlways + } + return PatchingSetsDefault +} + // Test patches on given document according to given sets. // Returns array from separate patches that can be applied to the document // Returns error ONLY in case when creation of resource should be denied. @@ -27,7 +36,6 @@ func ProcessPatches(patches []types.PolicyPatch, originalDocument []byte, sets P if len(originalDocument) == 0 { return nil, errors.New("Source document for patching is empty") } - var appliedPatches []PatchBytes patchedDocument := originalDocument for _, patch := range patches { diff --git a/webhooks/patches_test.go b/pkg/policymanager/patches_test.go similarity index 72% rename from webhooks/patches_test.go rename to pkg/policymanager/patches_test.go index 0c1c29d6be..40fcc14286 100644 --- a/webhooks/patches_test.go +++ b/pkg/policymanager/patches_test.go @@ -1,10 +1,9 @@ -package webhooks_test +package policymanager import ( - "gotest.tools/assert" "testing" - "github.com/nirmata/kube-policy/webhooks" + "gotest.tools/assert" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" ) @@ -36,7 +35,7 @@ const endpointsDocument string = `{ func TestProcessPatches_EmptyPatches(t *testing.T) { var empty []types.PolicyPatch - patches, err := webhooks.ProcessPatches(empty, []byte(endpointsDocument), webhooks.PatchingSetsDefault) + patches, err := ProcessPatches(empty, []byte(endpointsDocument), PatchingSetsDefault) assert.NilError(t, err) assert.Assert(t, len(patches) == 0) } @@ -52,13 +51,13 @@ func makeAddIsMutatedLabelPatch() types.PolicyPatch { func TestProcessPatches_EmptyDocument(t *testing.T) { var patches []types.PolicyPatch patches = append(patches, makeAddIsMutatedLabelPatch()) - patchesBytes, err := webhooks.ProcessPatches(patches, nil, webhooks.PatchingSetsDefault) + patchesBytes, err := ProcessPatches(patches, nil, PatchingSetsDefault) assert.Assert(t, err != nil) assert.Assert(t, len(patchesBytes) == 0) } func TestProcessPatches_AllEmpty(t *testing.T) { - patchesBytes, err := webhooks.ProcessPatches(nil, nil, webhooks.PatchingSetsDefault) + patchesBytes, err := ProcessPatches(nil, nil, PatchingSetsDefault) assert.Assert(t, err != nil) assert.Assert(t, len(patchesBytes) == 0) } @@ -67,7 +66,7 @@ func TestProcessPatches_AddPathDoesntExist_StopOnError(t *testing.T) { patch := makeAddIsMutatedLabelPatch() patch.Path = "/metadata/additional/is-mutated" patches := []types.PolicyPatch{patch} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsStopOnError) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsStopOnError) assert.Assert(t, err != nil) assert.Assert(t, len(patchesBytes) == 0) } @@ -76,7 +75,7 @@ func TestProcessPatches_AddPathDoesntExist_ContinueOnError(t *testing.T) { patch := makeAddIsMutatedLabelPatch() patch.Path = "/metadata/additional/is-mutated" patches := []types.PolicyPatch{patch} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsContinueAlways) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueAlways) assert.NilError(t, err) assert.Assert(t, len(patchesBytes) == 0) } @@ -84,7 +83,7 @@ func TestProcessPatches_AddPathDoesntExist_ContinueOnError(t *testing.T) { func TestProcessPatches_RemovePathDoesntExist_StopOnError(t *testing.T) { patch := types.PolicyPatch{Path: "/metadata/labels/is-mutated", Operation: "remove"} patches := []types.PolicyPatch{patch} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsStopOnError) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsStopOnError) assert.Assert(t, err != nil) assert.Assert(t, len(patchesBytes) == 0) } @@ -93,7 +92,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_ContinueOnError_EmptyResult(t patch1 := types.PolicyPatch{Path: "/metadata/labels/is-mutated", Operation: "remove"} patch2 := types.PolicyPatch{Path: "/spec/labels/label3", Operation: "add", Value: "label3Value"} patches := []types.PolicyPatch{patch1, patch2} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsContinueAlways) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueAlways) assert.NilError(t, err) assert.Assert(t, len(patchesBytes) == 0) } @@ -103,7 +102,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_ContinueOnError_NotEmptyResul patch2 := types.PolicyPatch{Path: "/spec/labels/label2", Operation: "remove", Value: "label2Value"} patch3 := types.PolicyPatch{Path: "/metadata/labels/label3", Operation: "add", Value: "label3Value"} patches := []types.PolicyPatch{patch1, patch2, patch3} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsContinueAlways) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueAlways) assert.NilError(t, err) assert.Assert(t, len(patchesBytes) == 1) assertEqStringAndData(t, `{"path":"/metadata/labels/label3","op":"add","value":"label3Value"}`, patchesBytes[0]) @@ -112,7 +111,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_ContinueOnError_NotEmptyResul func TestProcessPatches_RemovePathDoesntExist_IgnoreRemoveFailures_EmptyResult(t *testing.T) { patch := types.PolicyPatch{Path: "/metadata/labels/is-mutated", Operation: "remove"} patches := []types.PolicyPatch{patch} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsContinueOnRemoveFailure) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueOnRemoveFailure) assert.NilError(t, err) assert.Assert(t, len(patchesBytes) == 0) } @@ -121,8 +120,16 @@ func TestProcessPatches_RemovePathDoesntExist_IgnoreRemoveFailures_NotEmptyResul patch1 := types.PolicyPatch{Path: "/metadata/labels/is-mutated", Operation: "remove"} patch2 := types.PolicyPatch{Path: "/metadata/labels/label2", Operation: "add", Value: "label2Value"} patches := []types.PolicyPatch{patch1, patch2} - patchesBytes, err := webhooks.ProcessPatches(patches, []byte(endpointsDocument), webhooks.PatchingSetsContinueOnRemoveFailure) + patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueOnRemoveFailure) assert.NilError(t, err) assert.Assert(t, len(patchesBytes) == 1) assertEqStringAndData(t, `{"path":"/metadata/labels/label2","op":"add","value":"label2Value"}`, patchesBytes[0]) } + +// func TestProcessSamePatch_AddAndRemovePathsDontExist_ContinueOnError_EmptyResult(t *testing.T) { +// patch1 := types.PolicyPatch{Path: "/metadata/labels/label3", Operation: "add", Value: "label3Value"} +// patches := []types.PolicyPatch{patch1} +// patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueAlways) +// assert.NilError(t, err) +// assert.Assert(t, len(patchesBytes) == 1) +// } diff --git a/webhooks/utils.go b/pkg/policymanager/utils.go similarity index 77% rename from webhooks/utils.go rename to pkg/policymanager/utils.go index 0f163514ba..3455aece1d 100644 --- a/webhooks/utils.go +++ b/pkg/policymanager/utils.go @@ -1,4 +1,4 @@ -package webhooks +package policymanager import ( "encoding/json" @@ -7,21 +7,21 @@ import ( "k8s.io/apimachinery/pkg/labels" ) -func parseMetadataFromObject(bytes []byte) map[string]interface{} { +func ParseMetadataFromObject(bytes []byte) map[string]interface{} { var objectJSON map[string]interface{} json.Unmarshal(bytes, &objectJSON) return objectJSON["metadata"].(map[string]interface{}) } -func parseKindFromObject(bytes []byte) string { +func ParseKindFromObject(bytes []byte) string { var objectJSON map[string]interface{} json.Unmarshal(bytes, &objectJSON) return objectJSON["kind"].(string) } -func parseLabelsFromMetadata(meta map[string]interface{}) labels.Set { +func ParseLabelsFromMetadata(meta map[string]interface{}) labels.Set { if interfaceMap, ok := meta["labels"].(map[string]interface{}); ok { labelMap := make(labels.Set, len(interfaceMap)) @@ -33,7 +33,7 @@ func parseLabelsFromMetadata(meta map[string]interface{}) labels.Set { return nil } -func parseNameFromObject(bytes []byte) string { +func ParseNameFromObject(bytes []byte) string { var objectJSON map[string]interface{} json.Unmarshal(bytes, &objectJSON) @@ -45,7 +45,7 @@ func parseNameFromObject(bytes []byte) string { return "" } -func parseNamespaceFromObject(bytes []byte) string { +func ParseNamespaceFromObject(bytes []byte) string { var objectJSON map[string]interface{} json.Unmarshal(bytes, &objectJSON) @@ -58,7 +58,7 @@ func parseNamespaceFromObject(bytes []byte) string { } // returns true if policyResourceName is a regexp -func parseRegexPolicyResourceName(policyResourceName string) (string, bool) { +func ParseRegexPolicyResourceName(policyResourceName string) (string, bool) { regex := strings.Split(policyResourceName, "regex:") if len(regex) == 1 { return regex[0], false diff --git a/webhooks/utils_test.go b/pkg/policymanager/utils_test.go similarity index 96% rename from webhooks/utils_test.go rename to pkg/policymanager/utils_test.go index ef000ecb08..275c8c5db6 100644 --- a/webhooks/utils_test.go +++ b/pkg/policymanager/utils_test.go @@ -1,4 +1,4 @@ -package webhooks_test +package policymanager import ( "testing" diff --git a/pkg/policymanager/validate.go b/pkg/policymanager/validate.go new file mode 100644 index 0000000000..ff5eeb0e72 --- /dev/null +++ b/pkg/policymanager/validate.go @@ -0,0 +1,43 @@ +package policymanager + +import ( + "github.com/minio/minio/pkg/wildcard" + types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// kind is the type of object being manipulated +// Checks requests kind, name and labels to fit the policy +func IsRuleApplicableToResource(kind string, resourceRaw []byte, policyResource types.PolicyResource) (bool, error) { + if policyResource.Kind != kind { + return false, nil + } + + if resourceRaw != nil { + meta := ParseMetadataFromObject(resourceRaw) + name := ParseNameFromObject(resourceRaw) + + if policyResource.Name != nil { + + if !wildcard.Match(*policyResource.Name, name) { + return false, nil + } + } + + if policyResource.Selector != nil { + selector, err := metav1.LabelSelectorAsSelector(policyResource.Selector) + + if err != nil { + return false, err + } + + labelMap := ParseLabelsFromMetadata(meta) + + if !selector.Matches(labelMap) { + return false, nil + } + + } + } + return true, nil +} diff --git a/webhooks/admission.go b/webhooks/admission.go index 652dcc3c34..aecdb3148a 100644 --- a/webhooks/admission.go +++ b/webhooks/admission.go @@ -1,11 +1,10 @@ package webhooks import ( - "github.com/minio/minio/pkg/wildcard" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + policymanager "github.com/nirmata/kube-policy/pkg/policymanager" "k8s.io/api/admission/v1beta1" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func kindIsSupported(kind string) bool { @@ -25,41 +24,5 @@ func AdmissionIsRequired(request *v1beta1.AdmissionRequest) bool { // Checks requests kind, name and labels to fit the policy func IsRuleApplicableToRequest(policyResource types.PolicyResource, request *v1beta1.AdmissionRequest) (bool, error) { - return IsRuleApplicableToResource(request.Kind.Kind, request.Object.Raw, policyResource) -} - -// kind is the type of object being manipulated -// Checks requests kind, name and labels to fit the policy -func IsRuleApplicableToResource(kind string, resourceRaw []byte, policyResource types.PolicyResource) (bool, error) { - if policyResource.Kind != kind { - return false, nil - } - - if resourceRaw != nil { - meta := parseMetadataFromObject(resourceRaw) - name := parseNameFromObject(resourceRaw) - - if policyResource.Name != nil { - - if !wildcard.Match(*policyResource.Name, name) { - return false, nil - } - } - - if policyResource.Selector != nil { - selector, err := metav1.LabelSelectorAsSelector(policyResource.Selector) - - if err != nil { - return false, err - } - - labelMap := parseLabelsFromMetadata(meta) - - if !selector.Matches(labelMap) { - return false, nil - } - - } - } - return true, nil + return policymanager.IsRuleApplicableToResource(request.Kind.Kind, request.Object.Raw, policyResource) } diff --git a/webhooks/mutation.go b/webhooks/mutation.go index ad651c329d..a553cbeeaa 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -9,6 +9,7 @@ import ( controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + policymanager "github.com/nirmata/kube-policy/pkg/policymanager" v1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -65,7 +66,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad return nil } - var allPatches []PatchBytes + var allPatches []policymanager.PatchBytes for _, policy := range policies { mw.logger.Printf("Applying policy %s with %d rules", policy.ObjectMeta.Name, len(policy.Spec.Rules)) @@ -79,8 +80,8 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad } if len(policyPatches) > 0 { - namespace := parseNamespaceFromObject(request.Object.Raw) - name := parseNameFromObject(request.Object.Raw) + namespace := policymanager.ParseNamespaceFromObject(request.Object.Raw) + name := policymanager.ParseNameFromObject(request.Object.Raw) mw.controller.LogPolicyInfo(policy.Name, fmt.Sprintf("Applied to %s %s/%s", request.Kind.Kind, namespace, name)) mw.logger.Printf("%s applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name) @@ -91,31 +92,22 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad patchType := v1beta1.PatchTypeJSONPatch return &v1beta1.AdmissionResponse{ Allowed: true, - Patch: JoinPatches(allPatches), + Patch: policymanager.JoinPatches(allPatches), PatchType: &patchType, } } -func getPolicyPatchingSets(policy types.Policy) PatchingSets { - // failurePolicy property is the only available way for now to define behavior on patching error. - // TODO: define new failurePolicy values specific for patching and other policy features. - if policy.Spec.FailurePolicy != nil && *policy.Spec.FailurePolicy == "continueOnError" { - return PatchingSetsContinueAlways - } - return PatchingSetsDefault -} - // Applies all policy rules to the created object and returns list of processed JSON patches. // May return nil patches if it is not necessary to create patches for requested object. // Returns error ONLY in case when creation of resource should be denied. -func (mw *MutationWebhook) applyPolicyRules(request *v1beta1.AdmissionRequest, policy types.Policy) ([]PatchBytes, error) { +func (mw *MutationWebhook) applyPolicyRules(request *v1beta1.AdmissionRequest, policy types.Policy) ([]policymanager.PatchBytes, error) { return mw.applyPolicyRulesOnResource(request.Kind.Kind, request.Object.Raw, policy) } // kind is the type of object being manipulated -func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource []byte, policy types.Policy) ([]PatchBytes, error) { - patchingSets := getPolicyPatchingSets(policy) - var policyPatches []PatchBytes +func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource []byte, policy types.Policy) ([]policymanager.PatchBytes, error) { + patchingSets := policymanager.GetPolicyPatchingSets(policy) + var policyPatches []policymanager.PatchBytes for ruleIdx, rule := range policy.Spec.Rules { err := rule.Validate() @@ -124,7 +116,7 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ continue } - if ok, err := IsRuleApplicableToResource(kind, rawResource, rule.Resource); !ok { + if ok, err := policymanager.IsRuleApplicableToResource(kind, rawResource, rule.Resource); !ok { mw.logger.Printf("Rule %d of policy %s is not applicable to the request", ruleIdx, policy.Name) return nil, err } @@ -132,12 +124,12 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ // configMapGenerator and secretGenerator can be applied only to namespaces if kind == "Namespace" { err = mw.applyRuleGenerators(rawResource, rule) - if err != nil && patchingSets == PatchingSetsStopOnError { + if err != nil && patchingSets == policymanager.PatchingSetsStopOnError { return nil, fmt.Errorf("Failed to apply generators from rule #%d: %s", ruleIdx, err) } } - rulePatchesProcessed, err := ProcessPatches(rule.Patches, rawResource, patchingSets) + rulePatchesProcessed, err := policymanager.ProcessPatches(rule.Patches, rawResource, patchingSets) if err != nil { return nil, fmt.Errorf("Failed to process patches from rule #%d: %s", ruleIdx, err) } @@ -160,7 +152,7 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ // Applies "configMapGenerator" and "secretGenerator" described in PolicyRule func (mw *MutationWebhook) applyRuleGenerators(rawResource []byte, rule types.PolicyRule) error { - namespaceName := parseNameFromObject(rawResource) + namespaceName := policymanager.ParseNameFromObject(rawResource) err := mw.applyConfigGenerator(rule.ConfigMapGenerator, namespaceName, "ConfigMap") if err == nil { From eecc9db2d466b9be9557aead800d65667e4b9da2 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Tue, 7 May 2019 17:33:19 -0700 Subject: [PATCH 09/17] undo the removal of name field --- definitions/install.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/definitions/install.yaml b/definitions/install.yaml index ded3a1e38c..44c1214c00 100644 --- a/definitions/install.yaml +++ b/definitions/install.yaml @@ -61,6 +61,8 @@ spec: - Secret - Service - StatefulSet + name: + type: string selector: properties: matchLabels: From 653a733a4b49d5377cd0bba9583aa8482d271048 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Wed, 8 May 2019 07:38:42 -0700 Subject: [PATCH 10/17] code review changes: undo scripts --- scripts/compile-image.sh | 2 +- scripts/deploy-controller.sh | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/scripts/compile-image.sh b/scripts/compile-image.sh index f46d28bb1c..1f13b7efc6 100755 --- a/scripts/compile-image.sh +++ b/scripts/compile-image.sh @@ -22,4 +22,4 @@ tag="${hub_user_name}/${project_name}:${version}" docker build --no-cache -t "${tag}" . || exit 4 echo "# Pushing image to repository..." -#docker push "${tag}" || exit 5 +docker push "${tag}" || exit 5 diff --git a/scripts/deploy-controller.sh b/scripts/deploy-controller.sh index 6fd13566c4..ff9bd3f0d5 100755 --- a/scripts/deploy-controller.sh +++ b/scripts/deploy-controller.sh @@ -37,9 +37,6 @@ if [ -z "${namespace}" ]; then # controller should be launched locally kubectl delete -f definitions/MutatingWebhookConfiguration_debug.yaml kubectl create -f definitions/MutatingWebhookConfiguration_debug.yaml || exit 3 -# kubectl delete -f definitions/PolicyValidationConfiguration.yaml -# kubectl create -f definitions/PolicyValidationConfiguration.yaml || exit 3 - kubectl delete -f definitions/install.yaml kubectl create -f definitions/install.yaml || exit 3 From 3ef72addd81eef7f0956742f18c2d75e2f1dc33f Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Wed, 8 May 2019 07:41:26 -0700 Subject: [PATCH 11/17] code review changes: remove extra code --- pkg/violation/builder.go | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/pkg/violation/builder.go b/pkg/violation/builder.go index 26acc643c6..9011aa6b1b 100644 --- a/pkg/violation/builder.go +++ b/pkg/violation/builder.go @@ -45,11 +45,7 @@ func NewViolationBuilder( } func (b *builder) Create(info utils.ViolationInfo) error { - err := b.ProcessViolation(info) - if err != nil { - return err - } - return nil + return b.ProcessViolation(info) } func (b *builder) SetController(controller controllerinterfaces.PolicyGetter) { From e8e33732cf999539353836584882231a63123d7c Mon Sep 17 00:00:00 2001 From: shuting Date: Wed, 8 May 2019 10:01:41 -0700 Subject: [PATCH 12/17] move patch logic to pkg/mutation --- pkg/{policymanager => mutation}/patches.go | 2 +- .../patches_test.go | 2 +- pkg/{policymanager => mutation}/utils.go | 2 +- pkg/{policymanager => mutation}/utils_test.go | 2 +- pkg/{policymanager => mutation}/validate.go | 2 +- webhooks/admission.go | 4 +-- webhooks/mutation.go | 26 +++++++++---------- 7 files changed, 20 insertions(+), 20 deletions(-) rename pkg/{policymanager => mutation}/patches.go (99%) rename pkg/{policymanager => mutation}/patches_test.go (99%) rename pkg/{policymanager => mutation}/utils.go (98%) rename pkg/{policymanager => mutation}/utils_test.go (96%) rename pkg/{policymanager => mutation}/validate.go (97%) diff --git a/pkg/policymanager/patches.go b/pkg/mutation/patches.go similarity index 99% rename from pkg/policymanager/patches.go rename to pkg/mutation/patches.go index 863873e732..83f66863dc 100644 --- a/pkg/policymanager/patches.go +++ b/pkg/mutation/patches.go @@ -1,4 +1,4 @@ -package policymanager +package mutation import ( "encoding/json" diff --git a/pkg/policymanager/patches_test.go b/pkg/mutation/patches_test.go similarity index 99% rename from pkg/policymanager/patches_test.go rename to pkg/mutation/patches_test.go index 40fcc14286..7453781c51 100644 --- a/pkg/policymanager/patches_test.go +++ b/pkg/mutation/patches_test.go @@ -1,4 +1,4 @@ -package policymanager +package mutation import ( "testing" diff --git a/pkg/policymanager/utils.go b/pkg/mutation/utils.go similarity index 98% rename from pkg/policymanager/utils.go rename to pkg/mutation/utils.go index 3455aece1d..5abc2ee03c 100644 --- a/pkg/policymanager/utils.go +++ b/pkg/mutation/utils.go @@ -1,4 +1,4 @@ -package policymanager +package mutation import ( "encoding/json" diff --git a/pkg/policymanager/utils_test.go b/pkg/mutation/utils_test.go similarity index 96% rename from pkg/policymanager/utils_test.go rename to pkg/mutation/utils_test.go index 275c8c5db6..f8473ae287 100644 --- a/pkg/policymanager/utils_test.go +++ b/pkg/mutation/utils_test.go @@ -1,4 +1,4 @@ -package policymanager +package mutation import ( "testing" diff --git a/pkg/policymanager/validate.go b/pkg/mutation/validate.go similarity index 97% rename from pkg/policymanager/validate.go rename to pkg/mutation/validate.go index ff5eeb0e72..b02c4639a9 100644 --- a/pkg/policymanager/validate.go +++ b/pkg/mutation/validate.go @@ -1,4 +1,4 @@ -package policymanager +package mutation import ( "github.com/minio/minio/pkg/wildcard" diff --git a/webhooks/admission.go b/webhooks/admission.go index aecdb3148a..91c2eb983a 100644 --- a/webhooks/admission.go +++ b/webhooks/admission.go @@ -3,7 +3,7 @@ package webhooks import ( kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - policymanager "github.com/nirmata/kube-policy/pkg/policymanager" + mutation "github.com/nirmata/kube-policy/pkg/mutation" "k8s.io/api/admission/v1beta1" ) @@ -24,5 +24,5 @@ func AdmissionIsRequired(request *v1beta1.AdmissionRequest) bool { // Checks requests kind, name and labels to fit the policy func IsRuleApplicableToRequest(policyResource types.PolicyResource, request *v1beta1.AdmissionRequest) (bool, error) { - return policymanager.IsRuleApplicableToResource(request.Kind.Kind, request.Object.Raw, policyResource) + return mutation.IsRuleApplicableToResource(request.Kind.Kind, request.Object.Raw, policyResource) } diff --git a/webhooks/mutation.go b/webhooks/mutation.go index a553cbeeaa..ef0b4c6b72 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -9,7 +9,7 @@ import ( controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - policymanager "github.com/nirmata/kube-policy/pkg/policymanager" + mutation "github.com/nirmata/kube-policy/pkg/mutation" v1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -66,7 +66,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad return nil } - var allPatches []policymanager.PatchBytes + var allPatches []mutation.PatchBytes for _, policy := range policies { mw.logger.Printf("Applying policy %s with %d rules", policy.ObjectMeta.Name, len(policy.Spec.Rules)) @@ -80,8 +80,8 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad } if len(policyPatches) > 0 { - namespace := policymanager.ParseNamespaceFromObject(request.Object.Raw) - name := policymanager.ParseNameFromObject(request.Object.Raw) + namespace := mutation.ParseNamespaceFromObject(request.Object.Raw) + name := mutation.ParseNameFromObject(request.Object.Raw) mw.controller.LogPolicyInfo(policy.Name, fmt.Sprintf("Applied to %s %s/%s", request.Kind.Kind, namespace, name)) mw.logger.Printf("%s applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name) @@ -92,7 +92,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad patchType := v1beta1.PatchTypeJSONPatch return &v1beta1.AdmissionResponse{ Allowed: true, - Patch: policymanager.JoinPatches(allPatches), + Patch: mutation.JoinPatches(allPatches), PatchType: &patchType, } } @@ -100,14 +100,14 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad // Applies all policy rules to the created object and returns list of processed JSON patches. // May return nil patches if it is not necessary to create patches for requested object. // Returns error ONLY in case when creation of resource should be denied. -func (mw *MutationWebhook) applyPolicyRules(request *v1beta1.AdmissionRequest, policy types.Policy) ([]policymanager.PatchBytes, error) { +func (mw *MutationWebhook) applyPolicyRules(request *v1beta1.AdmissionRequest, policy types.Policy) ([]mutation.PatchBytes, error) { return mw.applyPolicyRulesOnResource(request.Kind.Kind, request.Object.Raw, policy) } // kind is the type of object being manipulated -func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource []byte, policy types.Policy) ([]policymanager.PatchBytes, error) { - patchingSets := policymanager.GetPolicyPatchingSets(policy) - var policyPatches []policymanager.PatchBytes +func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource []byte, policy types.Policy) ([]mutation.PatchBytes, error) { + patchingSets := mutation.GetPolicyPatchingSets(policy) + var policyPatches []mutation.PatchBytes for ruleIdx, rule := range policy.Spec.Rules { err := rule.Validate() @@ -116,7 +116,7 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ continue } - if ok, err := policymanager.IsRuleApplicableToResource(kind, rawResource, rule.Resource); !ok { + if ok, err := mutation.IsRuleApplicableToResource(kind, rawResource, rule.Resource); !ok { mw.logger.Printf("Rule %d of policy %s is not applicable to the request", ruleIdx, policy.Name) return nil, err } @@ -124,12 +124,12 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ // configMapGenerator and secretGenerator can be applied only to namespaces if kind == "Namespace" { err = mw.applyRuleGenerators(rawResource, rule) - if err != nil && patchingSets == policymanager.PatchingSetsStopOnError { + if err != nil && patchingSets == mutation.PatchingSetsStopOnError { return nil, fmt.Errorf("Failed to apply generators from rule #%d: %s", ruleIdx, err) } } - rulePatchesProcessed, err := policymanager.ProcessPatches(rule.Patches, rawResource, patchingSets) + rulePatchesProcessed, err := mutation.ProcessPatches(rule.Patches, rawResource, patchingSets) if err != nil { return nil, fmt.Errorf("Failed to process patches from rule #%d: %s", ruleIdx, err) } @@ -152,7 +152,7 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ // Applies "configMapGenerator" and "secretGenerator" described in PolicyRule func (mw *MutationWebhook) applyRuleGenerators(rawResource []byte, rule types.PolicyRule) error { - namespaceName := policymanager.ParseNameFromObject(rawResource) + namespaceName := mutation.ParseNameFromObject(rawResource) err := mw.applyConfigGenerator(rule.ConfigMapGenerator, namespaceName, "ConfigMap") if err == nil { From 7c82ea428460425371f5212728961e1f424409a5 Mon Sep 17 00:00:00 2001 From: shuting Date: Thu, 9 May 2019 22:26:22 -0700 Subject: [PATCH 13/17] Add PolicyEngine --- controller/controller.go | 21 +- controller/processPolicy.go | 120 +++++++++ kubeclient/kubeclient.go | 242 ++++++++++++++++++ pkg/policyengine/mutation.go | 96 +++++++ .../mutation/checkRules.go} | 9 +- pkg/{ => policyengine}/mutation/patches.go | 0 .../mutation/patches_test.go | 0 pkg/{ => policyengine}/mutation/utils.go | 0 pkg/{ => policyengine}/mutation/utils_test.go | 0 pkg/policyengine/policyengine.go | 129 ++++++++++ pkg/policyengine/validation.go | 5 + webhooks/admission.go | 4 +- webhooks/mutation.go | 13 +- 13 files changed, 626 insertions(+), 13 deletions(-) create mode 100644 controller/processPolicy.go create mode 100644 pkg/policyengine/mutation.go rename pkg/{mutation/validate.go => policyengine/mutation/checkRules.go} (79%) rename pkg/{ => policyengine}/mutation/patches.go (100%) rename pkg/{ => policyengine}/mutation/patches_test.go (100%) rename pkg/{ => policyengine}/mutation/utils.go (100%) rename pkg/{ => policyengine}/mutation/utils_test.go (100%) create mode 100644 pkg/policyengine/policyengine.go create mode 100644 pkg/policyengine/validation.go diff --git a/controller/controller.go b/controller/controller.go index 3ccde81b97..06e1f4305b 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -18,12 +18,12 @@ import ( event "github.com/nirmata/kube-policy/pkg/event" eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" eventutils "github.com/nirmata/kube-policy/pkg/event/utils" + "github.com/nirmata/kube-policy/pkg/policyengine" violation "github.com/nirmata/kube-policy/pkg/violation" violationinterfaces "github.com/nirmata/kube-policy/pkg/violation/interfaces" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" mergetypes "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/client-go/rest" "k8s.io/client-go/tools/cache" ) @@ -43,6 +43,9 @@ type policyController struct { logger *log.Logger violationBuilder violationinterfaces.ViolationGenerator eventBuilder eventinterfaces.BuilderInternal + + policyEngine policyengine.PolicyEngine + kubeClient *kubeClient.KubeClient } // NewPolicyController from cmd args @@ -71,6 +74,12 @@ func NewPolicyController(config *rest.Config, logger *log.Logger, kubeClient *ku // generate Violation builer violationBuilder, err := violation.NewViolationBuilder(kubeClient, eventBuilder, logger) + if err != nil { + return nil, err + } + + // generate Policy Engine + policyEngine := policyengine.NewPolicyEngine(kubeClient, logger) controller := &policyController{ policyInformerFactory: policyInformerFactory, @@ -79,15 +88,20 @@ func NewPolicyController(config *rest.Config, logger *log.Logger, kubeClient *ku logger: logger, violationBuilder: violationBuilder, eventBuilder: eventBuilder, + policyEngine: policyEngine, + kubeClient: kubeClient, } + policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ AddFunc: controller.CreatePolicyHandler, UpdateFunc: controller.UpdatePolicyHandler, DeleteFunc: controller.DeletePolicyHandler, }) + // Set the controller eventBuilder.SetController(controller) violationBuilder.SetController(controller) + return controller, nil } @@ -165,6 +179,7 @@ func (c *policyController) addPolicyLog(name, text string) { func (c *policyController) CreatePolicyHandler(resource interface{}) { key := c.GetResourceKey(resource) c.logger.Printf("Policy created: %s", key) + // c.runForPolicy(key) } func (c *policyController) UpdatePolicyHandler(oldResource, newResource interface{}) { @@ -186,11 +201,11 @@ func (c *policyController) GetResourceKey(resource interface{}) string { } return "" } + func (c *policyController) GetPolicy(name string) (*types.Policy, error) { policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(name) if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", name)) - return nil, err + return nil, fmt.Errorf("error when SplitMetaNamespaceKey: %s, err: %v", name, err) } return c.getPolicyInterface(policyNamespace).Get(policyName) } diff --git a/controller/processPolicy.go b/controller/processPolicy.go new file mode 100644 index 0000000000..37fca9fad2 --- /dev/null +++ b/controller/processPolicy.go @@ -0,0 +1,120 @@ +package controller + +import ( + "encoding/json" + "fmt" + + types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + eventutils "github.com/nirmata/kube-policy/pkg/event/utils" + "github.com/nirmata/kube-policy/pkg/policyengine/mutation" + violationutils "github.com/nirmata/kube-policy/pkg/violation/utils" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" +) + +func (c *policyController) runForPolicy(key string) { + //policy, err := c.GetPolicy(key) + policy, err := c.getPolicyByKey(key) + if err != nil { + utilruntime.HandleError(fmt.Errorf("invalid resource key: %s, err: %v", key, err)) + return + } + + if policy == nil { + c.logger.Printf("Counld not find policy by key %s", key) + return + } + + violations, events, err := c.processPolicy(*policy) + if err != nil { + // add Error processing policy event + } + + c.logger.Printf("%v, %v", violations, events) + // TODO: + // create violations + // create events + +} + +// processPolicy process the policy to all the matched resources +func (c *policyController) processPolicy(policy types.Policy) ( + violations []violationutils.ViolationInfo, events []eventutils.EventInfo, err error) { + + for _, rule := range policy.Spec.Rules { + resources, err := c.filterResourceByRule(rule) + if err != nil { + c.logger.Printf("Failed to filter resources by rule %s, err: %v\n", rule.Name, err) + } + + for _, resource := range resources { + rawResource, err := json.Marshal(resource) + if err != nil { + c.logger.Printf("Failed to marshal resources map to rule %s, err: %v\n", rule.Name, err) + continue + } + + violation, eventInfos, err := c.policyEngine.ProcessExisting(policy, rawResource) + if err != nil { + c.logger.Printf("Failed to process rule %s, err: %v\n", rule.Name, err) + continue + } + + violations = append(violations, violation...) + events = append(events, eventInfos...) + } + } + return violations, events, nil +} + +func (c *policyController) filterResourceByRule(rule types.PolicyRule) ([]runtime.Object, error) { + var targetResources []runtime.Object + // TODO: make this namespace all + var namespace = "default" + if err := rule.Validate(); err != nil { + return nil, fmt.Errorf("invalid rule detected: %s, err: %v", rule.Name, err) + } + + // Get the resource list from kind + resources, err := c.kubeClient.ListResource(rule.Resource.Kind, namespace) + if err != nil { + return nil, err + } + + for _, resource := range resources { + // TODO: + rawResource, err := json.Marshal(resource) + // objKind := resource.GetObjectKind() + // codecFactory := serializer.NewCodecFactory(runtime.NewScheme()) + // codecFactory.EncoderForVersion() + + if err != nil { + c.logger.Printf("failed to marshal object %v", resource) + continue + } + + // filter the resource by name and label + if ok, _ := mutation.IsRuleApplicableToResource(rawResource, rule.Resource); ok { + targetResources = append(targetResources, resource) + } + } + return targetResources, nil +} + +func (c *policyController) getPolicyByKey(key string) (*types.Policy, error) { + // Create nil Selector to grab all the policies + selector := labels.NewSelector() + cachedPolicies, err := c.policyLister.List(selector) + if err != nil { + return nil, err + } + + for _, elem := range cachedPolicies { + if elem.Name == key { + return elem, nil + } + } + + return nil, nil +} diff --git a/kubeclient/kubeclient.go b/kubeclient/kubeclient.go index 36436d608d..2edc317900 100644 --- a/kubeclient/kubeclient.go +++ b/kubeclient/kubeclient.go @@ -206,7 +206,34 @@ var rMapper = map[string]getter{ "StatefulSet": statefulSetGetter, } +var lMapper = map[string]lister{ + "ConfigMap": configMapLister, + "Pods": podLister, + "Deployment": deploymentLister, + "CronJob": cronJobLister, + "Endpoints": endpointsLister, + "HorizontalPodAutoscaler": horizontalPodAutoscalerLister, + "Ingress": ingressLister, + "Job": jobLister, + "LimitRange": limitRangeLister, + "Namespace": namespaceLister, + "NetworkPolicy": networkPolicyLister, + "PersistentVolumeClaim": persistentVolumeClaimLister, + "PodDisruptionBudget": podDisruptionBudgetLister, + "PodTemplate": podTemplateLister, + "ResourceQuota": resourceQuotaLister, + "Secret": secretLister, + "Service": serviceLister, + "StatefulSet": statefulSetLister, +} + type getter func(*kubernetes.Clientset, string, string) (runtime.Object, error) +type lister func(*kubernetes.Clientset, string) ([]runtime.Object, error) + +//ListResource to return resource list +func (kc *KubeClient) ListResource(kind string, namespace string) ([]runtime.Object, error) { + return lMapper[kind](kc.client, namespace) +} //GetResource get the resource object func (kc *KubeClient) GetResource(kind string, resource string) (runtime.Object, error) { @@ -233,6 +260,19 @@ func configMapGetter(clientSet *kubernetes.Clientset, namespace string, name str } return obj, nil } + +func configMapLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func podsGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -241,6 +281,18 @@ func podsGetter(clientSet *kubernetes.Clientset, namespace string, name string) return obj, nil } +func podLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().Pods(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func deploymentGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -248,6 +300,17 @@ func deploymentGetter(clientSet *kubernetes.Clientset, namespace string, name st } return obj, nil } +func deploymentLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} func cronJobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{}) @@ -257,6 +320,18 @@ func cronJobGetter(clientSet *kubernetes.Clientset, namespace string, name strin return obj, nil } +func cronJobLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.BatchV1beta1().CronJobs(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func endpointsbGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -265,6 +340,18 @@ func endpointsbGetter(clientSet *kubernetes.Clientset, namespace string, name st return obj, nil } +func endpointsLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().Endpoints(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func horizontalPodAutoscalerGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -273,6 +360,18 @@ func horizontalPodAutoscalerGetter(clientSet *kubernetes.Clientset, namespace st return obj, nil } +func horizontalPodAutoscalerLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func ingressGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -281,6 +380,18 @@ func ingressGetter(clientSet *kubernetes.Clientset, namespace string, name strin return obj, nil } +func ingressLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.ExtensionsV1beta1().Ingresses(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func jobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -289,6 +400,18 @@ func jobGetter(clientSet *kubernetes.Clientset, namespace string, name string) ( return obj, nil } +func jobLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.BatchV1().Jobs(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func limitRangeGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().LimitRanges(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -296,6 +419,17 @@ func limitRangeGetter(clientSet *kubernetes.Clientset, namespace string, name st } return obj, nil } +func limitRangeLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().LimitRanges(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} func namespaceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().Namespaces().Get(name, metav1.GetOptions{}) @@ -305,6 +439,18 @@ func namespaceGetter(clientSet *kubernetes.Clientset, namespace string, name str return obj, nil } +func namespaceLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().Namespaces().List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func networkPolicyGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.NetworkingV1().NetworkPolicies(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -313,6 +459,18 @@ func networkPolicyGetter(clientSet *kubernetes.Clientset, namespace string, name return obj, nil } +func networkPolicyLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.NetworkingV1().NetworkPolicies(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func persistentVolumeClaimGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -321,6 +479,18 @@ func persistentVolumeClaimGetter(clientSet *kubernetes.Clientset, namespace stri return obj, nil } +func persistentVolumeClaimLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().PersistentVolumeClaims(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func podDisruptionBudgetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -329,6 +499,18 @@ func podDisruptionBudgetGetter(clientSet *kubernetes.Clientset, namespace string return obj, nil } +func podDisruptionBudgetLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func podTemplateGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().PodTemplates(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -337,6 +519,18 @@ func podTemplateGetter(clientSet *kubernetes.Clientset, namespace string, name s return obj, nil } +func podTemplateLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().PodTemplates(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func resourceQuotaGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -345,6 +539,18 @@ func resourceQuotaGetter(clientSet *kubernetes.Clientset, namespace string, name return obj, nil } +func resourceQuotaLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func secretGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -353,6 +559,18 @@ func secretGetter(clientSet *kubernetes.Clientset, namespace string, name string return obj, nil } +func secretLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func serviceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.CoreV1().Services(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -361,6 +579,18 @@ func serviceGetter(clientSet *kubernetes.Clientset, namespace string, name strin return obj, nil } +func serviceLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.CoreV1().Services(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} + func statefulSetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) { obj, err := clientSet.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{}) if err != nil { @@ -368,3 +598,15 @@ func statefulSetGetter(clientSet *kubernetes.Clientset, namespace string, name s } return obj, nil } + +func statefulSetLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) { + list, err := clientSet.AppsV1().StatefulSets(namespace).List(metav1.ListOptions{}) + if err != nil { + return nil, err + } + objList := []runtime.Object{} + for _, obj := range list.Items { + objList = append(objList, &obj) + } + return objList, nil +} diff --git a/pkg/policyengine/mutation.go b/pkg/policyengine/mutation.go new file mode 100644 index 0000000000..85483bf53a --- /dev/null +++ b/pkg/policyengine/mutation.go @@ -0,0 +1,96 @@ +package policyengine + +import ( + "errors" + "fmt" + + types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + "github.com/nirmata/kube-policy/pkg/policyengine/mutation" +) + +func (p *policyEngine) ProcessMutation(policy types.Policy, rawResource []byte) ([]mutation.PatchBytes, error) { + patchingSets := mutation.GetPolicyPatchingSets(policy) + var policyPatches []mutation.PatchBytes + + for ruleIdx, rule := range policy.Spec.Rules { + err := rule.Validate() + if err != nil { + p.logger.Printf("Invalid rule detected: #%s in policy %s, err: %v\n", rule.Name, policy.ObjectMeta.Name, err) + continue + } + + if ok, err := mutation.IsRuleApplicableToResource(rawResource, rule.Resource); !ok { + p.logger.Printf("Rule %d of policy %s is not applicable to the request", ruleIdx, policy.Name) + return nil, err + } + + err = p.applyRuleGenerators(rawResource, rule) + if err != nil && patchingSets == mutation.PatchingSetsStopOnError { + return nil, fmt.Errorf("Failed to apply generators from rule #%s: %v", rule.Name, err) + } + + rulePatchesProcessed, err := mutation.ProcessPatches(rule.Patches, rawResource, patchingSets) + if err != nil { + return nil, fmt.Errorf("Failed to process patches from rule #%s: %v", rule.Name, err) + } + + if rulePatchesProcessed != nil { + policyPatches = append(policyPatches, rulePatchesProcessed...) + p.logger.Printf("Rule %d: prepared %d patches", ruleIdx, len(rulePatchesProcessed)) + // TODO: add PolicyApplied events per rule for policy and resource + } else { + p.logger.Printf("Rule %d: no patches prepared", ruleIdx) + } + } + + // empty patch, return error to deny resource creation + if policyPatches == nil { + return nil, fmt.Errorf("no patches prepared") + } + + return policyPatches, nil +} + +// Applies "configMapGenerator" and "secretGenerator" described in PolicyRule +func (p *policyEngine) applyRuleGenerators(rawResource []byte, rule types.PolicyRule) error { + kind := mutation.ParseKindFromObject(rawResource) + + // configMapGenerator and secretGenerator can be applied only to namespaces + if kind == "Namespace" { + namespaceName := mutation.ParseNameFromObject(rawResource) + + err := p.applyConfigGenerator(rule.ConfigMapGenerator, namespaceName, "ConfigMap") + if err == nil { + err = p.applyConfigGenerator(rule.SecretGenerator, namespaceName, "Secret") + } + return err + } + return nil +} + +// Creates resourceKind (ConfigMap or Secret) with parameters specified in generator in cluster specified in request. +func (p *policyEngine) applyConfigGenerator(generator *types.PolicyConfigGenerator, namespace string, configKind string) error { + if generator == nil { + return nil + } + + err := generator.Validate() + if err != nil { + return errors.New(fmt.Sprintf("Generator for '%s' is invalid: %s", configKind, err)) + } + + switch configKind { + case "ConfigMap": + err = p.kubeClient.GenerateConfigMap(*generator, namespace) + case "Secret": + err = p.kubeClient.GenerateSecret(*generator, namespace) + default: + err = errors.New(fmt.Sprintf("Unsupported config Kind '%s'", configKind)) + } + + if err != nil { + return errors.New(fmt.Sprintf("Unable to apply generator for %s '%s/%s' : %s", configKind, namespace, generator.Name, err)) + } + + return nil +} diff --git a/pkg/mutation/validate.go b/pkg/policyengine/mutation/checkRules.go similarity index 79% rename from pkg/mutation/validate.go rename to pkg/policyengine/mutation/checkRules.go index b02c4639a9..ecb7f7ff35 100644 --- a/pkg/mutation/validate.go +++ b/pkg/policyengine/mutation/checkRules.go @@ -8,10 +8,11 @@ import ( // kind is the type of object being manipulated // Checks requests kind, name and labels to fit the policy -func IsRuleApplicableToResource(kind string, resourceRaw []byte, policyResource types.PolicyResource) (bool, error) { - if policyResource.Kind != kind { - return false, nil - } +func IsRuleApplicableToResource(resourceRaw []byte, policyResource types.PolicyResource) (bool, error) { + // kind := ParseKindFromObject(resourceRaw) + // if policyResource.Kind != kind { + // return false, nil + // } if resourceRaw != nil { meta := ParseMetadataFromObject(resourceRaw) diff --git a/pkg/mutation/patches.go b/pkg/policyengine/mutation/patches.go similarity index 100% rename from pkg/mutation/patches.go rename to pkg/policyengine/mutation/patches.go diff --git a/pkg/mutation/patches_test.go b/pkg/policyengine/mutation/patches_test.go similarity index 100% rename from pkg/mutation/patches_test.go rename to pkg/policyengine/mutation/patches_test.go diff --git a/pkg/mutation/utils.go b/pkg/policyengine/mutation/utils.go similarity index 100% rename from pkg/mutation/utils.go rename to pkg/policyengine/mutation/utils.go diff --git a/pkg/mutation/utils_test.go b/pkg/policyengine/mutation/utils_test.go similarity index 100% rename from pkg/mutation/utils_test.go rename to pkg/policyengine/mutation/utils_test.go diff --git a/pkg/policyengine/policyengine.go b/pkg/policyengine/policyengine.go new file mode 100644 index 0000000000..1bf585d184 --- /dev/null +++ b/pkg/policyengine/policyengine.go @@ -0,0 +1,129 @@ +package policyengine + +import ( + "fmt" + "log" + + kubeClient "github.com/nirmata/kube-policy/kubeclient" + policytype "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + eventutils "github.com/nirmata/kube-policy/pkg/event/utils" + "github.com/nirmata/kube-policy/pkg/policyengine/mutation" + violationutils "github.com/nirmata/kube-policy/pkg/violation/utils" +) + +type PolicyEngine interface { + // ProcessMutation should be called from admission contoller + // when there is an creation / update of the resource + // ProcessMutation(policy types.Policy, rawResource []byte) (patchBytes []byte, events []Events, err error) + ProcessMutation(policy types.Policy, rawResource []byte) ([]mutation.PatchBytes, error) + + // ProcessValidation should be called from admission contoller + // when there is an creation / update of the resource + ProcessValidation(policy types.Policy, rawResource []byte) + + // ProcessExisting should be called from policy controller + // when there is an create / update of the policy + // we should process the policy on matched resources, generate violations accordingly + ProcessExisting(policy types.Policy, rawResource []byte) ([]violationutils.ViolationInfo, []eventutils.EventInfo, error) +} + +type policyEngine struct { + kubeClient *kubeClient.KubeClient + // controller controllerinterfaces.PolicyGetter + logger *log.Logger +} + +func NewPolicyEngine(kubeClient *kubeClient.KubeClient, logger *log.Logger) PolicyEngine { + return &policyEngine{ + kubeClient: kubeClient, + logger: logger, + } +} + +func (p *policyEngine) ProcessExisting(policy types.Policy, rawResource []byte) ([]violationutils.ViolationInfo, []eventutils.EventInfo, error) { + var violations []violationutils.ViolationInfo + var events []eventutils.EventInfo + + patchingSets := mutation.GetPolicyPatchingSets(policy) + + for _, rule := range policy.Spec.Rules { + err := rule.Validate() + if err != nil { + p.logger.Printf("Invalid rule detected: #%s in policy %s, err: %v\n", rule.Name, policy.ObjectMeta.Name, err) + continue + } + + if ok, err := mutation.IsRuleApplicableToResource(rawResource, rule.Resource); !ok { + p.logger.Printf("Rule %s of policy %s is not applicable to the request", rule.Name, policy.Name) + return nil, nil, err + } + + violation, eventInfos, err := p.processRuleOnResource(policy.Name, rule, rawResource, patchingSets) + if err != nil { + p.logger.Printf("Failed to process rule %s, err: %v\n", rule.Name, err) + continue + } + // } else { + // policyPatches = append(policyPatches, processedPatches...) + // } + violations = append(violations, violation) + events = append(events, eventInfos...) + } + return violations, events, nil +} + +func (p *policyEngine) processRuleOnResource(policyName string, rule types.PolicyRule, rawResource []byte, patchingSets mutation.PatchingSets) ( + violationutils.ViolationInfo, []eventutils.EventInfo, error) { + + var violationInfo violationutils.ViolationInfo + var eventInfos []eventutils.EventInfo + + resourceKind := mutation.ParseKindFromObject(rawResource) + resourceName := mutation.ParseNameFromObject(rawResource) + resourceNamespace := mutation.ParseNamespaceFromObject(rawResource) + + rulePatchesProcessed, err := mutation.ProcessPatches(rule.Patches, nil, patchingSets) + if err != nil { + return violationInfo, eventInfos, fmt.Errorf("Failed to process patches from rule %s: %v", rule.Name, err) + } + + if rulePatchesProcessed != nil { + log.Printf("Rule %s: prepared %d patches", rule.Name, len(rulePatchesProcessed)) + + // add a violation to queue + violationInfo = violationutils.ViolationInfo{ + Policy: policyName, + Violation: policytype.Violation{ + Kind: resourceKind, + Resource: resourceNamespace + "/" + resourceName, + Rule: rule.Name, + // TODO: + Reason: "testing violation reason", + Message: "testing violation message", + }, + } + + // add an event to policy + eventInfos = append(eventInfos, eventutils.EventInfo{ + Kind: "Policy", + Resource: policyName, + Rule: rule.Name, + // TODO: + Reason: "PolicyViolation", + Message: "testing event message for policy", + }) + + // add an event to resource + eventInfos = append(eventInfos, eventutils.EventInfo{ + Kind: resourceKind, + Resource: resourceNamespace + "/" + resourceName, + Rule: rule.Name, + // TODO: + Reason: "PolicyViolation", + Message: "testing event message for policy", + }) + } + + return violationInfo, eventInfos, nil +} diff --git a/pkg/policyengine/validation.go b/pkg/policyengine/validation.go new file mode 100644 index 0000000000..282a4496a4 --- /dev/null +++ b/pkg/policyengine/validation.go @@ -0,0 +1,5 @@ +package policyengine + +import types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + +func (p *policyEngine) ProcessValidation(policy types.Policy, rawResource []byte) {} diff --git a/webhooks/admission.go b/webhooks/admission.go index 91c2eb983a..ba2913588f 100644 --- a/webhooks/admission.go +++ b/webhooks/admission.go @@ -3,7 +3,7 @@ package webhooks import ( kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - mutation "github.com/nirmata/kube-policy/pkg/mutation" + mutation "github.com/nirmata/kube-policy/pkg/policyengine/mutation" "k8s.io/api/admission/v1beta1" ) @@ -24,5 +24,5 @@ func AdmissionIsRequired(request *v1beta1.AdmissionRequest) bool { // Checks requests kind, name and labels to fit the policy func IsRuleApplicableToRequest(policyResource types.PolicyResource, request *v1beta1.AdmissionRequest) (bool, error) { - return mutation.IsRuleApplicableToResource(request.Kind.Kind, request.Object.Raw, policyResource) + return mutation.IsRuleApplicableToResource(request.Object.Raw, policyResource) } diff --git a/webhooks/mutation.go b/webhooks/mutation.go index ef0b4c6b72..3bb66a3d02 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -9,7 +9,8 @@ import ( controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - mutation "github.com/nirmata/kube-policy/pkg/mutation" + policyengine "github.com/nirmata/kube-policy/pkg/policyengine" + mutation "github.com/nirmata/kube-policy/pkg/policyengine/mutation" v1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -20,6 +21,7 @@ import ( // business logic for resource mutation type MutationWebhook struct { kubeclient *kubeclient.KubeClient + policyEngine policyengine.PolicyEngine controller controllerinterfaces.PolicyGetter registration *MutationWebhookRegistration logger *log.Logger @@ -44,8 +46,11 @@ func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.Kub if logger == nil { logger = log.New(os.Stdout, "Mutation WebHook: ", log.LstdFlags|log.Lshortfile) } + policyengine := policyengine.NewPolicyEngine(kubeclient, logger) + return &MutationWebhook{ kubeclient: kubeclient, + policyEngine: policyengine, controller: controller, registration: registration, logger: logger, @@ -101,10 +106,10 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad // May return nil patches if it is not necessary to create patches for requested object. // Returns error ONLY in case when creation of resource should be denied. func (mw *MutationWebhook) applyPolicyRules(request *v1beta1.AdmissionRequest, policy types.Policy) ([]mutation.PatchBytes, error) { - return mw.applyPolicyRulesOnResource(request.Kind.Kind, request.Object.Raw, policy) + return mw.policyEngine.ProcessMutation(policy, request.Object.Raw) } -// kind is the type of object being manipulated +// kind is the type of object being manipulated, e.g. request.Kind.kind func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource []byte, policy types.Policy) ([]mutation.PatchBytes, error) { patchingSets := mutation.GetPolicyPatchingSets(policy) var policyPatches []mutation.PatchBytes @@ -116,7 +121,7 @@ func (mw *MutationWebhook) applyPolicyRulesOnResource(kind string, rawResource [ continue } - if ok, err := mutation.IsRuleApplicableToResource(kind, rawResource, rule.Resource); !ok { + if ok, err := mutation.IsRuleApplicableToResource(rawResource, rule.Resource); !ok { mw.logger.Printf("Rule %d of policy %s is not applicable to the request", ruleIdx, policy.Name) return nil, err } From e25f7a41e48af3f1bb6b719d0a964a5fe3744a2f Mon Sep 17 00:00:00 2001 From: shuting Date: Thu, 9 May 2019 22:27:44 -0700 Subject: [PATCH 14/17] Update policyengine.go --- pkg/policyengine/policyengine.go | 1 - 1 file changed, 1 deletion(-) diff --git a/pkg/policyengine/policyengine.go b/pkg/policyengine/policyengine.go index 1bf585d184..71e4d23d60 100644 --- a/pkg/policyengine/policyengine.go +++ b/pkg/policyengine/policyengine.go @@ -30,7 +30,6 @@ type PolicyEngine interface { type policyEngine struct { kubeClient *kubeClient.KubeClient - // controller controllerinterfaces.PolicyGetter logger *log.Logger } From 9a7be9493008a9e580221a42c3cca792d734b536 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Fri, 10 May 2019 00:05:21 -0700 Subject: [PATCH 15/17] initial feature proposal --- controller/controller.go | 209 ------------------ .../interfaces/controller_interfaces.go | 24 -- main.go | 46 +++- pkg/event/builder.go | 161 -------------- pkg/event/eventcontroller.go | 169 ++++++++++++++ pkg/event/eventmsgbuilder.go | 45 ++++ pkg/event/eventmsgbuilder_test.go | 23 ++ pkg/event/interfaces/builder_interfaces.go | 12 - pkg/event/reason.go | 21 ++ pkg/event/util.go | 7 + pkg/event/utils/util.go | 15 -- pkg/violation/builder.go | 124 +++++------ .../interfaces/violation_interfaces.go | 11 - pkg/violation/util.go | 12 +- pkg/violation/utils/util.go | 8 - policycontroller/policycontroller.go | 194 ++++++++++++++++ .../policycontroller_test.go | 5 +- policycontroller/utils.go | 7 + webhooks/mutation.go | 59 +++-- 19 files changed, 603 insertions(+), 549 deletions(-) delete mode 100644 controller/controller.go delete mode 100755 controller/interfaces/controller_interfaces.go delete mode 100644 pkg/event/builder.go create mode 100644 pkg/event/eventcontroller.go create mode 100644 pkg/event/eventmsgbuilder.go create mode 100644 pkg/event/eventmsgbuilder_test.go delete mode 100644 pkg/event/interfaces/builder_interfaces.go create mode 100644 pkg/event/reason.go create mode 100644 pkg/event/util.go delete mode 100644 pkg/event/utils/util.go delete mode 100644 pkg/violation/interfaces/violation_interfaces.go delete mode 100644 pkg/violation/utils/util.go create mode 100644 policycontroller/policycontroller.go rename controller/controller_test.go => policycontroller/policycontroller_test.go (99%) create mode 100644 policycontroller/utils.go diff --git a/controller/controller.go b/controller/controller.go deleted file mode 100644 index 3ccde81b97..0000000000 --- a/controller/controller.go +++ /dev/null @@ -1,209 +0,0 @@ -package controller - -import ( - "errors" - "fmt" - "log" - "os" - "sort" - "time" - - controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" - kubeClient "github.com/nirmata/kube-policy/kubeclient" - types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" - policies "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/typed/policy/v1alpha1" - informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions" - lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" - event "github.com/nirmata/kube-policy/pkg/event" - eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" - eventutils "github.com/nirmata/kube-policy/pkg/event/utils" - violation "github.com/nirmata/kube-policy/pkg/violation" - violationinterfaces "github.com/nirmata/kube-policy/pkg/violation/interfaces" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/labels" - mergetypes "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/cache" -) - -// PolicyController API -type PolicyController interface { - controllerinterfaces.PolicyGetter - controllerinterfaces.PolicyHandlers - Run(stopCh <-chan struct{}) -} - -//policyController for CRD -type policyController struct { - policyInformerFactory informers.SharedInformerFactory - policyLister lister.PolicyLister - policiesInterface policies.PolicyInterface - logger *log.Logger - violationBuilder violationinterfaces.ViolationGenerator - eventBuilder eventinterfaces.BuilderInternal -} - -// NewPolicyController from cmd args -func NewPolicyController(config *rest.Config, logger *log.Logger, kubeClient *kubeClient.KubeClient) (PolicyController, error) { - if logger == nil { - logger = log.New(os.Stdout, "Policy Controller: ", log.LstdFlags|log.Lshortfile) - } - - if config == nil { - return nil, errors.New("Client Config should be set for controller") - } - - policyClientset, err := clientset.NewForConfig(config) - if err != nil { - return nil, err - } - - policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0) - policyInformer := policyInformerFactory.Nirmata().V1alpha1().Policies() - - // generate Event builder - eventBuilder, err := event.NewEventBuilder(kubeClient, logger) - if err != nil { - return nil, err - } - - // generate Violation builer - violationBuilder, err := violation.NewViolationBuilder(kubeClient, eventBuilder, logger) - - controller := &policyController{ - policyInformerFactory: policyInformerFactory, - policyLister: policyInformer.Lister(), - policiesInterface: policyClientset.NirmataV1alpha1().Policies("default"), - logger: logger, - violationBuilder: violationBuilder, - eventBuilder: eventBuilder, - } - policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ - AddFunc: controller.CreatePolicyHandler, - UpdateFunc: controller.UpdatePolicyHandler, - DeleteFunc: controller.DeletePolicyHandler, - }) - // Set the controller - eventBuilder.SetController(controller) - violationBuilder.SetController(controller) - return controller, nil -} - -func (c *policyController) GetCacheInformerSync() cache.InformerSynced { - return c.policyInformerFactory.Nirmata().V1alpha1().Policies().Informer().HasSynced -} - -// Run is main controller thread -func (c *policyController) Run(stopCh <-chan struct{}) { - c.policyInformerFactory.Start(stopCh) - c.eventBuilder.Run(eventutils.EventWorkerThreadCount, stopCh) -} - -func (c *policyController) GetPolicies() ([]types.Policy, error) { - // Create nil Selector to grab all the policies - selector := labels.NewSelector() - cachedPolicies, err := c.policyLister.List(selector) - if err != nil { - c.logger.Printf("Error: %v", err) - return nil, err - } - - var policies []types.Policy - for _, elem := range cachedPolicies { - policies = append(policies, *elem.DeepCopy()) - } - - sort.Slice(policies, func(i, j int) bool { - return policies[i].CreationTimestamp.Time.Before(policies[j].CreationTimestamp.Time) - }) - return policies, nil -} - -// Writes error message to the policy logs in status section -func (c *policyController) LogPolicyError(name, text string) { - c.addPolicyLog(name, "[ERROR] "+text) -} - -// Writes info message to the policy logs in status section -func (c *policyController) LogPolicyInfo(name, text string) { - c.addPolicyLog(name, "[ INFO] "+text) -} - -// This is the maximum number of records that can be written to the log object of the policy. -// If this number is exceeded, the older entries will be deleted. -const policyLogMaxRecords int = 50 - -// Appends given log text to the status/logs array. -func (c *policyController) addPolicyLog(name, text string) { - getOptions := metav1.GetOptions{ - ResourceVersion: "1", - IncludeUninitialized: true, - } - policy, err := c.policiesInterface.Get(name, getOptions) - if err != nil { - c.logger.Printf("Unable to get policy %s: %s", name, err) - return - } - - // Add new log record - text = time.Now().Format("2006 Jan 02 15:04:05.999 ") + text - policy.Status.Logs = append(policy.Status.Logs, text) - // Pop front extra log records - logsCount := len(policy.Status.Logs) - if logsCount > policyLogMaxRecords { - policy.Status.Logs = policy.Status.Logs[logsCount-policyLogMaxRecords:] - } - // Save logs to policy object - _, err = c.policiesInterface.UpdateStatus(policy) - if err != nil { - c.logger.Printf("Unable to update logs for policy %s: %s", name, err) - } -} - -func (c *policyController) CreatePolicyHandler(resource interface{}) { - key := c.GetResourceKey(resource) - c.logger.Printf("Policy created: %s", key) -} - -func (c *policyController) UpdatePolicyHandler(oldResource, newResource interface{}) { - oldKey := c.GetResourceKey(oldResource) - newKey := c.GetResourceKey(newResource) - c.logger.Printf("Policy %s updated to %s", oldKey, newKey) -} - -func (c *policyController) DeletePolicyHandler(resource interface{}) { - key := c.GetResourceKey(resource) - c.logger.Printf("Policy deleted: %s", key) -} - -func (c *policyController) GetResourceKey(resource interface{}) string { - if key, err := cache.MetaNamespaceKeyFunc(resource); err != nil { - c.logger.Fatalf("Error retrieving policy key: %v", err) - } else { - return key - } - return "" -} -func (c *policyController) GetPolicy(name string) (*types.Policy, error) { - policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(name) - if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", name)) - return nil, err - } - return c.getPolicyInterface(policyNamespace).Get(policyName) -} - -func (c *policyController) getPolicyInterface(namespace string) lister.PolicyNamespaceLister { - return c.policyLister.Policies(namespace) -} - -func (c *policyController) PatchPolicy(policy string, pt mergetypes.PatchType, data []byte) (*types.Policy, error) { - return c.policiesInterface.Patch(policy, pt, data) -} - -func (c *policyController) UpdatePolicyViolations(updatedPolicy *types.Policy) error { - _, err := c.policiesInterface.UpdateStatus(updatedPolicy) - return err -} diff --git a/controller/interfaces/controller_interfaces.go b/controller/interfaces/controller_interfaces.go deleted file mode 100755 index 8b3911bab2..0000000000 --- a/controller/interfaces/controller_interfaces.go +++ /dev/null @@ -1,24 +0,0 @@ -package interfaces - -import ( - policytypes "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - types "k8s.io/apimachinery/pkg/types" - "k8s.io/client-go/tools/cache" -) - -type PolicyGetter interface { - GetPolicies() ([]policytypes.Policy, error) - GetPolicy(name string) (*policytypes.Policy, error) - GetCacheInformerSync() cache.InformerSynced - PatchPolicy(policy string, pt types.PatchType, data []byte) (*policytypes.Policy, error) - UpdatePolicyViolations(updatedPolicy *policytypes.Policy) error - LogPolicyError(name, text string) - LogPolicyInfo(name, text string) -} - -type PolicyHandlers interface { - CreatePolicyHandler(resource interface{}) - UpdatePolicyHandler(oldResource, newResource interface{}) - DeletePolicyHandler(resource interface{}) - GetResourceKey(resource interface{}) string -} diff --git a/main.go b/main.go index 6059b5019f..32e7ccf6c7 100644 --- a/main.go +++ b/main.go @@ -4,12 +4,17 @@ import ( "flag" "log" - "github.com/nirmata/kube-policy/controller" "github.com/nirmata/kube-policy/kubeclient" + "github.com/nirmata/kube-policy/policycontroller" "github.com/nirmata/kube-policy/server" "github.com/nirmata/kube-policy/webhooks" - signals "k8s.io/sample-controller/pkg/signals" + policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" + informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions" + violation "github.com/nirmata/kube-policy/pkg/violation" + + event "github.com/nirmata/kube-policy/pkg/event" + "k8s.io/sample-controller/pkg/signals" ) var ( @@ -29,12 +34,29 @@ func main() { log.Fatalf("Error creating kubeclient: %v\n", err) } - controller, err := controller.NewPolicyController(clientConfig, nil, kubeclient) + policyClientset, err := policyclientset.NewForConfig(clientConfig) if err != nil { - log.Fatalf("Error creating PolicyController: %s\n", err) + log.Fatalf("Error creating policyClient: %v\n", err) } - mutationWebhook, err := webhooks.CreateMutationWebhook(clientConfig, kubeclient, controller, nil) + //TODO wrap the policyInformer inside a factory + policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0) + policyInformer := policyInformerFactory.Nirmata().V1alpha1().Policies() + + eventController := event.NewEventController(kubeclient, policyInformer.Lister(), nil) + violationBuilder := violation.NewPolicyViolationBuilder(kubeclient, policyInformer.Lister(), policyClientset, eventController, nil) + + policyController := policycontroller.NewPolicyController(policyClientset, + policyInformer, + violationBuilder, + nil, + kubeclient) + + mutationWebhook, err := webhooks.CreateMutationWebhook(clientConfig, + kubeclient, + policyInformer.Lister(), + violationBuilder, + nil) if err != nil { log.Fatalf("Error creating mutation webhook: %v\n", err) } @@ -51,17 +73,17 @@ func main() { server.RunAsync() stopCh := signals.SetupSignalHandler() - controller.Run(stopCh) - - if err != nil { - log.Fatalf("Error running PolicyController: %s\n", err) + policyInformerFactory.Start(stopCh) + if err = eventController.Run(stopCh); err != nil { + log.Fatalf("Error running EventController: %v\n", err) + } + + if err = policyController.Run(stopCh); err != nil { + log.Fatalf("Error running PolicyController: %v\n", err) } - log.Println("Policy Controller has started") <-stopCh - server.Stop() - log.Println("Policy Controller has stopped") } func init() { diff --git a/pkg/event/builder.go b/pkg/event/builder.go deleted file mode 100644 index 93db5795ec..0000000000 --- a/pkg/event/builder.go +++ /dev/null @@ -1,161 +0,0 @@ -package event - -import ( - "errors" - "fmt" - "log" - "time" - - controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" - kubeClient "github.com/nirmata/kube-policy/kubeclient" - "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" - policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" - eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" - utils "github.com/nirmata/kube-policy/pkg/event/utils" - v1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" - "k8s.io/client-go/tools/cache" - "k8s.io/client-go/tools/record" - "k8s.io/client-go/util/workqueue" -) - -type builder struct { - kubeClient *kubeClient.KubeClient - controller controllerinterfaces.PolicyGetter - workqueue workqueue.RateLimitingInterface - recorder record.EventRecorder - logger *log.Logger - policySynced cache.InformerSynced -} - -type Builder interface { - eventinterfaces.BuilderInternal - SyncHandler(key utils.EventInfo) error - ProcessNextWorkItem() bool - RunWorker() -} - -func NewEventBuilder(kubeClient *kubeClient.KubeClient, - logger *log.Logger, -) (Builder, error) { - builder := &builder{ - kubeClient: kubeClient, - workqueue: initWorkqueue(), - recorder: initRecorder(kubeClient), - logger: logger, - } - - return builder, nil -} - -func initRecorder(kubeClient *kubeClient.KubeClient) record.EventRecorder { - // Initliaze Event Broadcaster - policyscheme.AddToScheme(scheme.Scheme) - eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(log.Printf) - eventBroadcaster.StartRecordingToSink( - &typedcorev1.EventSinkImpl{ - - Interface: kubeClient.GetEventsInterface("")}) - recorder := eventBroadcaster.NewRecorder( - scheme.Scheme, - v1.EventSource{Component: utils.EventSource}) - return recorder -} - -func initWorkqueue() workqueue.RateLimitingInterface { - return workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), utils.EventWorkQueueName) -} - -func (b *builder) SetController(controller controllerinterfaces.PolicyGetter) { - b.controller = controller - b.policySynced = controller.GetCacheInformerSync() -} - -func (b *builder) AddEvent(info utils.EventInfo) { - b.workqueue.Add(info) -} - -// Run : Initialize the worker routines to process the event creation -func (b *builder) Run(threadiness int, stopCh <-chan struct{}) error { - if b.controller == nil { - return errors.New("Controller has not be set") - } - defer utilruntime.HandleCrash() - defer b.workqueue.ShutDown() - log.Println("Starting violation builder") - - fmt.Println(("Wait for informer cache to sync")) - if ok := cache.WaitForCacheSync(stopCh, b.policySynced); !ok { - fmt.Println("Unable to sync the cache") - } - log.Println("Starting workers") - - for i := 0; i < threadiness; i++ { - go wait.Until(b.RunWorker, time.Second, stopCh) - } - log.Println("Started workers") - <-stopCh - log.Println("Shutting down workers") - return nil - -} - -func (b *builder) RunWorker() { - for b.ProcessNextWorkItem() { - } -} - -func (b *builder) ProcessNextWorkItem() bool { - obj, shutdown := b.workqueue.Get() - if shutdown { - return false - } - err := func(obj interface{}) error { - defer b.workqueue.Done(obj) - var key utils.EventInfo - var ok bool - if key, ok = obj.(utils.EventInfo); !ok { - b.workqueue.Forget(obj) - log.Printf("Expecting type info by got %v", obj) - return nil - } - - // Run the syncHandler, passing the resource and the policy - if err := b.SyncHandler(key); err != nil { - b.workqueue.AddRateLimited(key) - return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error()) - } - - return nil - }(obj) - - if err != nil { - log.Println((err)) - } - return true -} - -func (b *builder) SyncHandler(key utils.EventInfo) error { - var resource runtime.Object - var err error - switch key.Kind { - case "Policy": - resource, err = b.controller.GetPolicy(key.Resource) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to create event for policy %s, will retry ", key.Resource)) - return err - } - default: - resource, err = b.kubeClient.GetResource(key.Kind, key.Resource) - if err != nil { - utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource)) - return err - } - } - b.recorder.Event(resource, v1.EventTypeNormal, key.Reason, key.Message) - return nil -} diff --git a/pkg/event/eventcontroller.go b/pkg/event/eventcontroller.go new file mode 100644 index 0000000000..250712f5bd --- /dev/null +++ b/pkg/event/eventcontroller.go @@ -0,0 +1,169 @@ +package event + +import ( + "fmt" + "log" + "time" + + kubeClient "github.com/nirmata/kube-policy/kubeclient" + "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" + policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme" + policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" + v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/tools/record" + "k8s.io/client-go/util/workqueue" +) + +type eventController struct { + kubeClient *kubeClient.KubeClient + policyLister policylister.PolicyLister + queue workqueue.RateLimitingInterface + recorder record.EventRecorder + logger *log.Logger +} + +// EventGenertor to generate event +type EventGenerator interface { + Add(kind string, resource string, reason Reason, message EventMsg, args ...interface{}) +} +type EventController interface { + EventGenerator + Run(stopCh <-chan struct{}) error +} + +func NewEventController(kubeClient *kubeClient.KubeClient, + policyLister policylister.PolicyLister, + logger *log.Logger) EventController { + controller := &eventController{ + kubeClient: kubeClient, + policyLister: policyLister, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName), + recorder: initRecorder(kubeClient), + logger: logger, + } + return controller +} + +func initRecorder(kubeClient *kubeClient.KubeClient) record.EventRecorder { + // Initliaze Event Broadcaster + policyscheme.AddToScheme(scheme.Scheme) + eventBroadcaster := record.NewBroadcaster() + eventBroadcaster.StartLogging(log.Printf) + eventBroadcaster.StartRecordingToSink( + &typedcorev1.EventSinkImpl{ + Interface: kubeClient.GetEventsInterface("")}) + recorder := eventBroadcaster.NewRecorder( + scheme.Scheme, + v1.EventSource{Component: eventSource}) + return recorder +} + +func (eb *eventController) Add(kind string, resource string, reason Reason, message EventMsg, args ...interface{}) { + eb.queue.Add(eb.newEvent( + kind, + resource, + reason, + message, + )) +} + +// Run : Initialize the worker routines to process the event creation +func (eb *eventController) Run(stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer eb.queue.ShutDown() + + log.Println("starting eventbuilder controller") + + log.Println("Starting eventbuilder controller workers") + for i := 0; i < eventWorkerThreadCount; i++ { + go wait.Until(eb.runWorker, time.Second, stopCh) + } + log.Println("Started eventbuilder controller workers") + <-stopCh + log.Println("Shutting down eventbuilder controller workers") + return nil +} + +func (eb *eventController) runWorker() { + for eb.processNextWorkItem() { + } +} + +func (eb *eventController) processNextWorkItem() bool { + obj, shutdown := eb.queue.Get() + if shutdown { + return false + } + err := func(obj interface{}) error { + defer eb.queue.Done(obj) + var key eventInfo + var ok bool + if key, ok = obj.(eventInfo); !ok { + eb.queue.Forget(obj) + log.Printf("Expecting type info by got %v", obj) + return nil + } + // Run the syncHandler, passing the resource and the policy + if err := eb.SyncHandler(key); err != nil { + eb.queue.AddRateLimited(key) + return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error()) + } + return nil + }(obj) + + if err != nil { + log.Println((err)) + } + return true +} + +func (eb *eventController) SyncHandler(key eventInfo) error { + var resource runtime.Object + var err error + switch key.Kind { + case "Policy": + namespace, name, err := cache.SplitMetaNamespaceKey(key.Resource) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to extract namespace and name for %s", key.Resource)) + return err + } + resource, err = eb.policyLister.Policies(namespace).Get(name) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to create event for policy %s, will retry ", key.Resource)) + return err + } + default: + resource, err = eb.kubeClient.GetResource(key.Kind, key.Resource) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource)) + return err + } + } + eb.recorder.Event(resource, v1.EventTypeNormal, key.Reason, key.Message) + return nil +} + +type eventInfo struct { + Kind string + Resource string + Reason string + Message string +} + +func (eb *eventController) newEvent(kind string, resource string, reason Reason, message EventMsg, args ...interface{}) eventInfo { + msgText, err := getEventMsg(message, args) + if err != nil { + utilruntime.HandleError(err) + } + return eventInfo{ + Kind: kind, + Resource: resource, + Reason: reason.String(), + Message: msgText, + } +} diff --git a/pkg/event/eventmsgbuilder.go b/pkg/event/eventmsgbuilder.go new file mode 100644 index 0000000000..39d95d82df --- /dev/null +++ b/pkg/event/eventmsgbuilder.go @@ -0,0 +1,45 @@ +package event + +import ( + "fmt" + "regexp" +) + +//Key to describe the event +type EventMsg int + +const ( + FResourcePolcy EventMsg = iota + FProcessRule + SPolicyApply + SRuleApply + FPolicyApplyBlockCreate + FPolicyApplyBlockUpdate + FPolicyApplyBlockUpdateRule +) + +func (k EventMsg) String() string { + return [...]string{ + "Failed to satisfy policy on resource %s.The following rules %s failed to apply. Created Policy Violation", + "Failed to process rule %s of policy %s. Created Policy Violation %s", + "Policy applied successfully on the resource %s", + "Rule %s of Policy %s applied successfull", + "Failed to apply policy, blocked creation of resource %s. The following rules %s failed to apply", + "Failed to apply rule %s of policy %s Blocked update of the resource", + "Failed to apply policy on resource %s.Blocked update of the resource. The following rules %s failed to apply", + }[k] +} + +const argRegex = "%[s,d,v]" + +//GetEventMsg return the application message based on the message id and the arguments, +// if the number of arguments passed to the message are incorrect generate an error +func getEventMsg(key EventMsg, args ...interface{}) (string, error) { + // Verify the number of arguments + re := regexp.MustCompile(argRegex) + argsCount := len(re.FindAllString(key.String(), -1)) + if argsCount != len(args) { + return "", fmt.Errorf("message expects %d arguments, but %d arguments passed", argsCount, len(args)) + } + return fmt.Sprintf(key.String(), args...), nil +} diff --git a/pkg/event/eventmsgbuilder_test.go b/pkg/event/eventmsgbuilder_test.go new file mode 100644 index 0000000000..dcedd1e377 --- /dev/null +++ b/pkg/event/eventmsgbuilder_test.go @@ -0,0 +1,23 @@ +package event + +import ( + "fmt" + "testing" + + "gotest.tools/assert" +) + +func TestPositive(t *testing.T) { + resourceName := "test_resource" + expectedMsg := fmt.Sprintf("Policy applied successfully on the resource %s", resourceName) + msg, err := getEventMsg(SPolicyApply, resourceName) + assert.NilError(t, err) + assert.Equal(t, expectedMsg, msg) +} + +// passing incorrect args +func TestIncorrectArgs(t *testing.T) { + resourceName := "test_resource" + _, err := getEventMsg(SPolicyApply, resourceName, "extra_args") + assert.Error(t, err, "message expects 1 arguments, but 2 arguments passed") +} diff --git a/pkg/event/interfaces/builder_interfaces.go b/pkg/event/interfaces/builder_interfaces.go deleted file mode 100644 index 94a685f719..0000000000 --- a/pkg/event/interfaces/builder_interfaces.go +++ /dev/null @@ -1,12 +0,0 @@ -package interfaces - -import ( - controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" - utils "github.com/nirmata/kube-policy/pkg/event/utils" -) - -type BuilderInternal interface { - SetController(controller controllerinterfaces.PolicyGetter) - Run(threadiness int, stopCh <-chan struct{}) error - AddEvent(info utils.EventInfo) -} diff --git a/pkg/event/reason.go b/pkg/event/reason.go new file mode 100644 index 0000000000..ceac4cb0d9 --- /dev/null +++ b/pkg/event/reason.go @@ -0,0 +1,21 @@ +package event + +//Reason types of Event Reasons +type Reason int + +const ( + //PolicyViolation there is a violation of policy + PolicyViolation Reason = iota + //PolicyApplied policy applied + PolicyApplied + //RequestBlocked the request to create/update the resource was blocked( generated from admission-controller) + RequestBlocked +) + +func (r Reason) String() string { + return [...]string{ + "PolicyViolation", + "PolicyApplied", + "RequestBlocked", + }[r] +} diff --git a/pkg/event/util.go b/pkg/event/util.go new file mode 100644 index 0000000000..27470b71e6 --- /dev/null +++ b/pkg/event/util.go @@ -0,0 +1,7 @@ +package event + +const eventSource = "policy-controller" + +const eventWorkQueueName = "policy-controller-events" + +const eventWorkerThreadCount = 1 diff --git a/pkg/event/utils/util.go b/pkg/event/utils/util.go deleted file mode 100644 index de21252a3f..0000000000 --- a/pkg/event/utils/util.go +++ /dev/null @@ -1,15 +0,0 @@ -package utils - -const EventSource = "policy-controller" - -const EventWorkQueueName = "policy-controller-events" - -type EventInfo struct { - Kind string - Resource string - Rule string - Reason string - Message string -} - -const EventWorkerThreadCount = 1 diff --git a/pkg/violation/builder.go b/pkg/violation/builder.go index 9011aa6b1b..c872ce4702 100644 --- a/pkg/violation/builder.go +++ b/pkg/violation/builder.go @@ -1,60 +1,65 @@ package violation import ( - "encoding/json" "fmt" "log" - jsonpatch "github.com/evanphx/json-patch" - controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeClient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - eventinterfaces "github.com/nirmata/kube-policy/pkg/event/interfaces" - eventutils "github.com/nirmata/kube-policy/pkg/event/utils" - violationinterfaces "github.com/nirmata/kube-policy/pkg/violation/interfaces" - utils "github.com/nirmata/kube-policy/pkg/violation/utils" - mergetypes "k8s.io/apimachinery/pkg/types" + policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" + policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" + event "github.com/nirmata/kube-policy/pkg/event" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/tools/cache" ) -type builder struct { - kubeClient *kubeClient.KubeClient - controller controllerinterfaces.PolicyGetter - eventBuilder eventinterfaces.BuilderInternal - logger *log.Logger +type PolicyViolationGenerator interface { + Add(info ViolationInfo) error } -type Builder interface { - violationinterfaces.ViolationGenerator - ProcessViolation(info utils.ViolationInfo) error - Patch(policy *types.Policy, updatedPolicy *types.Policy) error - IsActive(kind string, resource string) (bool, error) +type policyViolationBuilder struct { + kubeClient *kubeClient.KubeClient + policyLister policylister.PolicyLister + policyInterface policyclientset.Interface + eventBuilder event.EventGenerator + logger *log.Logger } -func NewViolationBuilder( +type PolicyViolationBuilder interface { + PolicyViolationGenerator + processViolation(info ViolationInfo) error + isActive(kind string, resource string) (bool, error) +} + +func NewPolicyViolationBuilder( kubeClient *kubeClient.KubeClient, - eventBuilder eventinterfaces.BuilderInternal, - logger *log.Logger) (Builder, error) { + policyLister policylister.PolicyLister, + policyInterface policyclientset.Interface, + eventController event.EventGenerator, + logger *log.Logger) PolicyViolationBuilder { - builder := &builder{ - kubeClient: kubeClient, - eventBuilder: eventBuilder, - logger: logger, + builder := &policyViolationBuilder{ + kubeClient: kubeClient, + policyLister: policyLister, + policyInterface: policyInterface, + eventBuilder: eventController, + logger: logger, } - return builder, nil + return builder } -func (b *builder) Create(info utils.ViolationInfo) error { - return b.ProcessViolation(info) +func (pvb *policyViolationBuilder) Add(info ViolationInfo) error { + return pvb.processViolation(info) } -func (b *builder) SetController(controller controllerinterfaces.PolicyGetter) { - b.controller = controller -} - -func (b *builder) ProcessViolation(info utils.ViolationInfo) error { +func (pvb *policyViolationBuilder) processViolation(info ViolationInfo) error { // Get the policy - policy, err := b.controller.GetPolicy(info.Policy) + namespace, name, err := cache.SplitMetaNamespaceKey(info.Policy) + if err != nil { + utilruntime.HandleError(fmt.Errorf("unable to extract namespace and name for %s", info.Policy)) + return err + } + policy, err := pvb.policyLister.Policies(namespace).Get(name) if err != nil { utilruntime.HandleError(err) return err @@ -72,63 +77,34 @@ func (b *builder) ProcessViolation(info utils.ViolationInfo) error { } for _, violation := range modifiedPolicy.Status.Violations { - ok, err := b.IsActive(info.Kind, violation.Resource) + ok, err := pvb.isActive(info.Kind, violation.Resource) if err != nil { utilruntime.HandleError(err) continue } if !ok { - // Remove the violation - // Create a removal event - b.eventBuilder.AddEvent(eventutils.EventInfo{ - Kind: "Policy", - Resource: info.Policy, - Rule: info.Rule, - Reason: info.Reason, - Message: info.Message, - }) - continue + pvb.logger.Printf("removed violation ") } - // If violation already exists for this rule, we update the violation - //TODO: update violation, instead of re-creating one every time } + // If violation already exists for this rule, we update the violation + //TODO: update violation, instead of re-creating one every time modifiedViolations = append(modifiedViolations, newViolation) modifiedPolicy.Status.Violations = modifiedViolations - // return b.Patch(policy, modifiedPolicy) // Violations are part of the status sub resource, so we can use the Update Status api instead of updating the policy object - return b.controller.UpdatePolicyViolations(modifiedPolicy) + _, err = pvb.policyInterface.NirmataV1alpha1().Policies(namespace).UpdateStatus(modifiedPolicy) + if err != nil { + return err + } + return nil } -func (b *builder) IsActive(kind string, resource string) (bool, error) { +func (pvb *policyViolationBuilder) isActive(kind string, resource string) (bool, error) { // Generate Merge Patch - _, err := b.kubeClient.GetResource(kind, resource) + _, err := pvb.kubeClient.GetResource(kind, resource) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", resource)) return false, err } return true, nil } - -func (b *builder) Patch(policy *types.Policy, updatedPolicy *types.Policy) error { - originalData, err := json.Marshal(policy) - if err != nil { - return err - } - modifiedData, err := json.Marshal(updatedPolicy) - if err != nil { - return err - } - // generate merge patch - patchBytes, err := jsonpatch.CreateMergePatch(originalData, modifiedData) - if err != nil { - return err - } - _, err = b.controller.PatchPolicy(policy.Name, mergetypes.MergePatchType, patchBytes) - if err != nil { - - // Unable to patch - return err - } - return nil -} diff --git a/pkg/violation/interfaces/violation_interfaces.go b/pkg/violation/interfaces/violation_interfaces.go deleted file mode 100644 index f74cd28c6f..0000000000 --- a/pkg/violation/interfaces/violation_interfaces.go +++ /dev/null @@ -1,11 +0,0 @@ -package interfaces - -import ( - controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" - utils "github.com/nirmata/kube-policy/pkg/violation/utils" -) - -type ViolationGenerator interface { - SetController(controller controllerinterfaces.PolicyGetter) - Create(info utils.ViolationInfo) error -} diff --git a/pkg/violation/util.go b/pkg/violation/util.go index 00ad937cfa..406a8a707d 100644 --- a/pkg/violation/util.go +++ b/pkg/violation/util.go @@ -1,5 +1,7 @@ package violation +import policytype "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + // Source for the events recorder const violationEventSource = "policy-controller" @@ -9,11 +11,7 @@ const workqueueViolationName = "Policy-Violations" // Event Reason const violationEventResrouce = "Violation" -// Info input details -type Info struct { - Kind string - Resource string - Policy string - RuleName string - Reason string +type ViolationInfo struct { + Policy string + policytype.Violation } diff --git a/pkg/violation/utils/util.go b/pkg/violation/utils/util.go deleted file mode 100644 index 1d3db344f4..0000000000 --- a/pkg/violation/utils/util.go +++ /dev/null @@ -1,8 +0,0 @@ -package utils - -import policytype "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" - -type ViolationInfo struct { - Policy string - policytype.Violation -} diff --git a/policycontroller/policycontroller.go b/policycontroller/policycontroller.go new file mode 100644 index 0000000000..c13c8755b9 --- /dev/null +++ b/policycontroller/policycontroller.go @@ -0,0 +1,194 @@ +package policycontroller + +import ( + "fmt" + "log" + "time" + + kubeClient "github.com/nirmata/kube-policy/kubeclient" + types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" + infomertypes "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1" + lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" + violation "github.com/nirmata/kube-policy/pkg/violation" + "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/tools/cache" + "k8s.io/client-go/util/workqueue" +) + +//PolicyController for CRD +type PolicyController struct { + kubeClient *kubeClient.KubeClient + policyLister lister.PolicyLister + policyInterface policyclientset.Interface + policySynced cache.InformerSynced + violationBuilder violation.PolicyViolationGenerator + logger *log.Logger + queue workqueue.RateLimitingInterface +} + +// NewPolicyController from cmd args +func NewPolicyController(policyInterface policyclientset.Interface, + policyInformer infomertypes.PolicyInformer, + violationBuilder violation.PolicyViolationGenerator, + logger *log.Logger, + kubeClient *kubeClient.KubeClient) *PolicyController { + + controller := &PolicyController{ + kubeClient: kubeClient, + policyLister: policyInformer.Lister(), + policyInterface: policyInterface, + policySynced: policyInformer.Informer().HasSynced, + violationBuilder: violationBuilder, + logger: logger, + queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), policyWorkQueueName), + //TODO Event Builder, this will used to record events with policy cannot be processed, using eventBuilder as we can restrict the event types + } + + policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ + AddFunc: controller.createPolicyHandler, + UpdateFunc: controller.updatePolicyHandler, + DeleteFunc: controller.deletePolicyHandler, + }) + return controller +} + +func (pc *PolicyController) createPolicyHandler(resource interface{}) { + pc.enqueuePolicy(resource) +} + +func (pc *PolicyController) updatePolicyHandler(oldResource, newResource interface{}) { + newPolicy := newResource.(*types.Policy) + oldPolicy := oldResource.(*types.Policy) + if newPolicy.ResourceVersion == oldPolicy.ResourceVersion { + return + } + pc.enqueuePolicy(newResource) +} +func (pc *PolicyController) deletePolicyHandler(resource interface{}) { + var object metav1.Object + var ok bool + if object, ok = resource.(metav1.Object); !ok { + utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type")) + return + } + pc.logger.Printf("policy deleted: %s", object.GetName()) +} + +func (pc *PolicyController) enqueuePolicy(obj interface{}) { + var key string + var err error + if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil { + utilruntime.HandleError(err) + return + } + pc.queue.Add(key) +} + +// Run is main controller thread +func (pc *PolicyController) Run(stopCh <-chan struct{}) error { + defer utilruntime.HandleCrash() + defer pc.queue.ShutDown() + + pc.logger.Printf("starting policy controller") + + pc.logger.Printf("waiting for infomer caches to sync") + if ok := cache.WaitForCacheSync(stopCh, pc.policySynced); !ok { + return fmt.Errorf("failed to wait for caches to sync") + } + + pc.logger.Println("starting policy controller workers") + for i := 0; i < policyControllerWorkerCount; i++ { + go wait.Until(pc.runWorker, time.Second, stopCh) + } + + pc.logger.Println("started policy controller workers") + <-stopCh + pc.logger.Println("shutting down policy controller workers") + return nil +} + +// runWorker is a long-running function that will continually call the +// processNextWorkItem function in order to read and process a message on the +// workqueue. +func (pc *PolicyController) runWorker() { + for pc.processNextWorkItem() { + } +} + +// processNextWorkItem will read a single work item off the workqueue and +// attempt to process it, by calling the syncHandler. +func (pc *PolicyController) processNextWorkItem() bool { + obj, shutdown := pc.queue.Get() + if shutdown { + return false + } + + err := func(obj interface{}) error { + defer pc.queue.Done(obj) + err := pc.syncHandler(obj) + pc.handleErr(err, obj) + return nil + }(obj) + if err != nil { + utilruntime.HandleError(err) + return true + } + return true +} + +func (pc *PolicyController) handleErr(err error, key interface{}) { + if err == nil { + pc.queue.Forget(key) + return + } + + // This controller retries 5 times if something goes wrong. After that, it stops trying. + if pc.queue.NumRequeues(key) < policyWorkQueueRetryLimit { + + pc.logger.Printf("Error syncing events %v: %v", key, err) + + // Re-enqueue the key rate limited. Based on the rate limiter on the + // queue and the re-enqueue history, the key will be processed later again. + pc.queue.AddRateLimited(key) + return + } + + pc.queue.Forget(key) + // Report to an external entity that, even after several retries, we could not successfully process this key + utilruntime.HandleError(err) + pc.logger.Printf("Dropping the key %q out of the queue: %v", key, err) +} + +func (pc *PolicyController) syncHandler(obj interface{}) error { + var key string + var ok bool + if key, ok = obj.(string); !ok { + return fmt.Errorf("expected string in workqueue but got %#v", obj) + } + // convert the namespace/name string into distinct namespace and name + namespace, name, err := cache.SplitMetaNamespaceKey(key) + if err != nil { + utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + return nil + } + + // Get Policy resource with namespace/name + policy, err := pc.policyLister.Policies(namespace).Get(name) + if err != nil { + if errors.IsNotFound(err) { + utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key)) + return nil + } + return err + } + // process policy on existing resource + // get the violations and pass to violation Builder + // get the events and pass to event Builder + fmt.Println(policy) + return nil +} diff --git a/controller/controller_test.go b/policycontroller/policycontroller_test.go similarity index 99% rename from controller/controller_test.go rename to policycontroller/policycontroller_test.go index d7eec6df8c..b4f513bc87 100644 --- a/controller/controller_test.go +++ b/policycontroller/policycontroller_test.go @@ -1,9 +1,10 @@ -package controller_test +package policycontroller import ( - "gotest.tools/assert" "testing" + "gotest.tools/assert" + types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) diff --git a/policycontroller/utils.go b/policycontroller/utils.go new file mode 100644 index 0000000000..22f11696a1 --- /dev/null +++ b/policycontroller/utils.go @@ -0,0 +1,7 @@ +package policycontroller + +const policyWorkQueueName = "policyworkqueue" + +const policyWorkQueueRetryLimit = 5 + +const policyControllerWorkerCount = 2 diff --git a/webhooks/mutation.go b/webhooks/mutation.go index ef0b4c6b72..ca7fca63c3 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -5,13 +5,17 @@ import ( "fmt" "log" "os" + "sort" - controllerinterfaces "github.com/nirmata/kube-policy/controller/interfaces" kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" + policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" mutation "github.com/nirmata/kube-policy/pkg/mutation" + violation "github.com/nirmata/kube-policy/pkg/violation" v1beta1 "k8s.io/api/admission/v1beta1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" utilruntime "k8s.io/apimachinery/pkg/util/runtime" rest "k8s.io/client-go/rest" ) @@ -19,15 +23,21 @@ import ( // MutationWebhook is a data type that represents // business logic for resource mutation type MutationWebhook struct { - kubeclient *kubeclient.KubeClient - controller controllerinterfaces.PolicyGetter - registration *MutationWebhookRegistration - logger *log.Logger + kubeclient *kubeclient.KubeClient + policyLister policylister.PolicyLister + registration *MutationWebhookRegistration + violationBuilder violation.PolicyViolationGenerator + logger *log.Logger } // Registers mutation webhook in cluster and creates object for this webhook -func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller controllerinterfaces.PolicyGetter, logger *log.Logger) (*MutationWebhook, error) { - if clientConfig == nil || kubeclient == nil || controller == nil { +func CreateMutationWebhook( + clientConfig *rest.Config, + kubeclient *kubeclient.KubeClient, + policyLister policylister.PolicyLister, + violationBuilder violation.PolicyViolationGenerator, + logger *log.Logger) (*MutationWebhook, error) { + if clientConfig == nil || kubeclient == nil { return nil, errors.New("Some parameters are not set") } @@ -45,19 +55,40 @@ func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.Kub logger = log.New(os.Stdout, "Mutation WebHook: ", log.LstdFlags|log.Lshortfile) } return &MutationWebhook{ - kubeclient: kubeclient, - controller: controller, - registration: registration, - logger: logger, + kubeclient: kubeclient, + policyLister: policyLister, + registration: registration, + violationBuilder: violationBuilder, + logger: logger, }, nil } +func (mw *MutationWebhook) getPolicies() ([]types.Policy, error) { + selector := labels.NewSelector() + cachedPolicies, err := mw.policyLister.List(selector) + if err != nil { + mw.logger.Printf("Error: %v", err) + return nil, err + } + + var policies []types.Policy + for _, elem := range cachedPolicies { + policies = append(policies, *elem.DeepCopy()) + } + + sort.Slice(policies, func(i, j int) bool { + return policies[i].CreationTimestamp.Time.Before(policies[j].CreationTimestamp.Time) + }) + return policies, nil + +} + // Mutate applies admission to request func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse { mw.logger.Printf("AdmissionReview for Kind=%v, Namespace=%v Name=%v UID=%v patchOperation=%v UserInfo=%v", request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, request.UserInfo) - policies, err := mw.controller.GetPolicies() + policies, err := mw.getPolicies() if err != nil { utilruntime.HandleError(err) return nil @@ -72,7 +103,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad policyPatches, err := mw.applyPolicyRules(request, policy) if err != nil { - mw.controller.LogPolicyError(policy.Name, err.Error()) + //TODO Log Policy Error errStr := fmt.Sprintf("Unable to apply policy %s: %v", policy.Name, err) mw.logger.Printf("Denying the request because of error: %s", errStr) @@ -82,7 +113,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad if len(policyPatches) > 0 { namespace := mutation.ParseNamespaceFromObject(request.Object.Raw) name := mutation.ParseNameFromObject(request.Object.Raw) - mw.controller.LogPolicyInfo(policy.Name, fmt.Sprintf("Applied to %s %s/%s", request.Kind.Kind, namespace, name)) + //TODO Log Policy Info mw.logger.Printf("%s applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name) allPatches = append(allPatches, policyPatches...) From 04f5716f7ba2627d6351bc878aaea013cd9bd148 Mon Sep 17 00:00:00 2001 From: shivdudhani Date: Fri, 10 May 2019 10:38:38 -0700 Subject: [PATCH 16/17] rename internal components --- main.go | 6 +- pkg/event/eventcontroller.go | 63 +++++++++---------- pkg/event/eventmsgbuilder.go | 17 +---- pkg/event/util.go | 20 ++++++ pkg/{violation => policyviolation}/builder.go | 39 ++++++------ pkg/{violation => policyviolation}/util.go | 3 +- policycontroller/policycontroller.go | 32 ++++------ webhooks/mutation.go | 10 ++- 8 files changed, 98 insertions(+), 92 deletions(-) rename pkg/{violation => policyviolation}/builder.go (72%) rename pkg/{violation => policyviolation}/util.go (83%) diff --git a/main.go b/main.go index 32e7ccf6c7..716d79f18b 100644 --- a/main.go +++ b/main.go @@ -11,7 +11,7 @@ import ( policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions" - violation "github.com/nirmata/kube-policy/pkg/violation" + policyviolation "github.com/nirmata/kube-policy/pkg/policyviolation" event "github.com/nirmata/kube-policy/pkg/event" "k8s.io/sample-controller/pkg/signals" @@ -44,11 +44,12 @@ func main() { policyInformer := policyInformerFactory.Nirmata().V1alpha1().Policies() eventController := event.NewEventController(kubeclient, policyInformer.Lister(), nil) - violationBuilder := violation.NewPolicyViolationBuilder(kubeclient, policyInformer.Lister(), policyClientset, eventController, nil) + violationBuilder := policyviolation.NewPolicyViolationBuilder(kubeclient, policyInformer.Lister(), policyClientset, eventController, nil) policyController := policycontroller.NewPolicyController(policyClientset, policyInformer, violationBuilder, + eventController, nil, kubeclient) @@ -56,6 +57,7 @@ func main() { kubeclient, policyInformer.Lister(), violationBuilder, + eventController, nil) if err != nil { log.Fatalf("Error creating mutation webhook: %v\n", err) diff --git a/pkg/event/eventcontroller.go b/pkg/event/eventcontroller.go index 250712f5bd..6033d398b7 100644 --- a/pkg/event/eventcontroller.go +++ b/pkg/event/eventcontroller.go @@ -19,7 +19,7 @@ import ( "k8s.io/client-go/util/workqueue" ) -type eventController struct { +type controller struct { kubeClient *kubeClient.KubeClient policyLister policylister.PolicyLister queue workqueue.RateLimitingInterface @@ -27,19 +27,22 @@ type eventController struct { logger *log.Logger } -// EventGenertor to generate event -type EventGenerator interface { - Add(kind string, resource string, reason Reason, message EventMsg, args ...interface{}) +//Generator to generate event +type Generator interface { + Add(kind string, resource string, reason Reason, message MsgKey, args ...interface{}) } -type EventController interface { - EventGenerator + +//Controller api +type Controller interface { + Generator Run(stopCh <-chan struct{}) error } +//NewEventController to generate a new event controller func NewEventController(kubeClient *kubeClient.KubeClient, policyLister policylister.PolicyLister, - logger *log.Logger) EventController { - controller := &eventController{ + logger *log.Logger) Controller { + controller := &controller{ kubeClient: kubeClient, policyLister: policyLister, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName), @@ -63,8 +66,8 @@ func initRecorder(kubeClient *kubeClient.KubeClient) record.EventRecorder { return recorder } -func (eb *eventController) Add(kind string, resource string, reason Reason, message EventMsg, args ...interface{}) { - eb.queue.Add(eb.newEvent( +func (c *controller) Add(kind string, resource string, reason Reason, message MsgKey, args ...interface{}) { + c.queue.Add(c.newEvent( kind, resource, reason, @@ -72,16 +75,15 @@ func (eb *eventController) Add(kind string, resource string, reason Reason, mess )) } -// Run : Initialize the worker routines to process the event creation -func (eb *eventController) Run(stopCh <-chan struct{}) error { +func (c *controller) Run(stopCh <-chan struct{}) error { defer utilruntime.HandleCrash() - defer eb.queue.ShutDown() + defer c.queue.ShutDown() log.Println("starting eventbuilder controller") log.Println("Starting eventbuilder controller workers") for i := 0; i < eventWorkerThreadCount; i++ { - go wait.Until(eb.runWorker, time.Second, stopCh) + go wait.Until(c.runWorker, time.Second, stopCh) } log.Println("Started eventbuilder controller workers") <-stopCh @@ -89,28 +91,28 @@ func (eb *eventController) Run(stopCh <-chan struct{}) error { return nil } -func (eb *eventController) runWorker() { - for eb.processNextWorkItem() { +func (c *controller) runWorker() { + for c.processNextWorkItem() { } } -func (eb *eventController) processNextWorkItem() bool { - obj, shutdown := eb.queue.Get() +func (c *controller) processNextWorkItem() bool { + obj, shutdown := c.queue.Get() if shutdown { return false } err := func(obj interface{}) error { - defer eb.queue.Done(obj) + defer c.queue.Done(obj) var key eventInfo var ok bool if key, ok = obj.(eventInfo); !ok { - eb.queue.Forget(obj) + c.queue.Forget(obj) log.Printf("Expecting type info by got %v", obj) return nil } // Run the syncHandler, passing the resource and the policy - if err := eb.SyncHandler(key); err != nil { - eb.queue.AddRateLimited(key) + if err := c.SyncHandler(key); err != nil { + c.queue.AddRateLimited(key) return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error()) } return nil @@ -122,7 +124,7 @@ func (eb *eventController) processNextWorkItem() bool { return true } -func (eb *eventController) SyncHandler(key eventInfo) error { +func (c *controller) SyncHandler(key eventInfo) error { var resource runtime.Object var err error switch key.Kind { @@ -132,30 +134,23 @@ func (eb *eventController) SyncHandler(key eventInfo) error { utilruntime.HandleError(fmt.Errorf("unable to extract namespace and name for %s", key.Resource)) return err } - resource, err = eb.policyLister.Policies(namespace).Get(name) + resource, err = c.policyLister.Policies(namespace).Get(name) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to create event for policy %s, will retry ", key.Resource)) return err } default: - resource, err = eb.kubeClient.GetResource(key.Kind, key.Resource) + resource, err = c.kubeClient.GetResource(key.Kind, key.Resource) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource)) return err } } - eb.recorder.Event(resource, v1.EventTypeNormal, key.Reason, key.Message) + c.recorder.Event(resource, v1.EventTypeNormal, key.Reason, key.Message) return nil } -type eventInfo struct { - Kind string - Resource string - Reason string - Message string -} - -func (eb *eventController) newEvent(kind string, resource string, reason Reason, message EventMsg, args ...interface{}) eventInfo { +func (c *controller) newEvent(kind string, resource string, reason Reason, message MsgKey, args ...interface{}) eventInfo { msgText, err := getEventMsg(message, args) if err != nil { utilruntime.HandleError(err) diff --git a/pkg/event/eventmsgbuilder.go b/pkg/event/eventmsgbuilder.go index 39d95d82df..1e06c3c5a4 100644 --- a/pkg/event/eventmsgbuilder.go +++ b/pkg/event/eventmsgbuilder.go @@ -5,20 +5,7 @@ import ( "regexp" ) -//Key to describe the event -type EventMsg int - -const ( - FResourcePolcy EventMsg = iota - FProcessRule - SPolicyApply - SRuleApply - FPolicyApplyBlockCreate - FPolicyApplyBlockUpdate - FPolicyApplyBlockUpdateRule -) - -func (k EventMsg) String() string { +func (k MsgKey) String() string { return [...]string{ "Failed to satisfy policy on resource %s.The following rules %s failed to apply. Created Policy Violation", "Failed to process rule %s of policy %s. Created Policy Violation %s", @@ -34,7 +21,7 @@ const argRegex = "%[s,d,v]" //GetEventMsg return the application message based on the message id and the arguments, // if the number of arguments passed to the message are incorrect generate an error -func getEventMsg(key EventMsg, args ...interface{}) (string, error) { +func getEventMsg(key MsgKey, args ...interface{}) (string, error) { // Verify the number of arguments re := regexp.MustCompile(argRegex) argsCount := len(re.FindAllString(key.String(), -1)) diff --git a/pkg/event/util.go b/pkg/event/util.go index 27470b71e6..b56bad7f67 100644 --- a/pkg/event/util.go +++ b/pkg/event/util.go @@ -5,3 +5,23 @@ const eventSource = "policy-controller" const eventWorkQueueName = "policy-controller-events" const eventWorkerThreadCount = 1 + +type eventInfo struct { + Kind string + Resource string + Reason string + Message string +} + +//MsgKey is an identified to determine the preset message formats +type MsgKey int + +const ( + FResourcePolcy MsgKey = iota + FProcessRule + SPolicyApply + SRuleApply + FPolicyApplyBlockCreate + FPolicyApplyBlockUpdate + FPolicyApplyBlockUpdateRule +) diff --git a/pkg/violation/builder.go b/pkg/policyviolation/builder.go similarity index 72% rename from pkg/violation/builder.go rename to pkg/policyviolation/builder.go index c872ce4702..043a87bee4 100644 --- a/pkg/violation/builder.go +++ b/pkg/policyviolation/builder.go @@ -1,4 +1,4 @@ -package violation +package policyviolation import ( "fmt" @@ -13,32 +13,35 @@ import ( "k8s.io/client-go/tools/cache" ) -type PolicyViolationGenerator interface { +//Generator to generate policy violation +type Generator interface { Add(info ViolationInfo) error } -type policyViolationBuilder struct { +type builder struct { kubeClient *kubeClient.KubeClient policyLister policylister.PolicyLister policyInterface policyclientset.Interface - eventBuilder event.EventGenerator + eventBuilder event.Generator logger *log.Logger } -type PolicyViolationBuilder interface { - PolicyViolationGenerator +//Builder is to build policy violations +type Builder interface { + Generator processViolation(info ViolationInfo) error isActive(kind string, resource string) (bool, error) } +//NewPolicyViolationBuilder returns new violation builder func NewPolicyViolationBuilder( kubeClient *kubeClient.KubeClient, policyLister policylister.PolicyLister, policyInterface policyclientset.Interface, - eventController event.EventGenerator, - logger *log.Logger) PolicyViolationBuilder { + eventController event.Generator, + logger *log.Logger) Builder { - builder := &policyViolationBuilder{ + builder := &builder{ kubeClient: kubeClient, policyLister: policyLister, policyInterface: policyInterface, @@ -48,18 +51,18 @@ func NewPolicyViolationBuilder( return builder } -func (pvb *policyViolationBuilder) Add(info ViolationInfo) error { - return pvb.processViolation(info) +func (b *builder) Add(info ViolationInfo) error { + return b.processViolation(info) } -func (pvb *policyViolationBuilder) processViolation(info ViolationInfo) error { +func (b *builder) processViolation(info ViolationInfo) error { // Get the policy namespace, name, err := cache.SplitMetaNamespaceKey(info.Policy) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to extract namespace and name for %s", info.Policy)) return err } - policy, err := pvb.policyLister.Policies(namespace).Get(name) + policy, err := b.policyLister.Policies(namespace).Get(name) if err != nil { utilruntime.HandleError(err) return err @@ -77,13 +80,13 @@ func (pvb *policyViolationBuilder) processViolation(info ViolationInfo) error { } for _, violation := range modifiedPolicy.Status.Violations { - ok, err := pvb.isActive(info.Kind, violation.Resource) + ok, err := b.isActive(info.Kind, violation.Resource) if err != nil { utilruntime.HandleError(err) continue } if !ok { - pvb.logger.Printf("removed violation ") + b.logger.Printf("removed violation") } } // If violation already exists for this rule, we update the violation @@ -92,16 +95,16 @@ func (pvb *policyViolationBuilder) processViolation(info ViolationInfo) error { modifiedPolicy.Status.Violations = modifiedViolations // Violations are part of the status sub resource, so we can use the Update Status api instead of updating the policy object - _, err = pvb.policyInterface.NirmataV1alpha1().Policies(namespace).UpdateStatus(modifiedPolicy) + _, err = b.policyInterface.NirmataV1alpha1().Policies(namespace).UpdateStatus(modifiedPolicy) if err != nil { return err } return nil } -func (pvb *policyViolationBuilder) isActive(kind string, resource string) (bool, error) { +func (b *builder) isActive(kind string, resource string) (bool, error) { // Generate Merge Patch - _, err := pvb.kubeClient.GetResource(kind, resource) + _, err := b.kubeClient.GetResource(kind, resource) if err != nil { utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", resource)) return false, err diff --git a/pkg/violation/util.go b/pkg/policyviolation/util.go similarity index 83% rename from pkg/violation/util.go rename to pkg/policyviolation/util.go index 406a8a707d..8cfaaf99b3 100644 --- a/pkg/violation/util.go +++ b/pkg/policyviolation/util.go @@ -1,4 +1,4 @@ -package violation +package policyviolation import policytype "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" @@ -11,6 +11,7 @@ const workqueueViolationName = "Policy-Violations" // Event Reason const violationEventResrouce = "Violation" +//ViolationInfo describes the policyviolation details type ViolationInfo struct { Policy string policytype.Violation diff --git a/policycontroller/policycontroller.go b/policycontroller/policycontroller.go index c13c8755b9..921c88e76c 100644 --- a/policycontroller/policycontroller.go +++ b/policycontroller/policycontroller.go @@ -10,23 +10,25 @@ import ( policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned" infomertypes "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1" lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" - violation "github.com/nirmata/kube-policy/pkg/violation" + event "github.com/nirmata/kube-policy/pkg/event" + policyviolation "github.com/nirmata/kube-policy/pkg/policyviolation" + "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/tools/cache" "k8s.io/client-go/util/workqueue" ) -//PolicyController for CRD +//PolicyController to manage Policy CRD type PolicyController struct { kubeClient *kubeClient.KubeClient policyLister lister.PolicyLister policyInterface policyclientset.Interface policySynced cache.InformerSynced - violationBuilder violation.PolicyViolationGenerator + violationBuilder policyviolation.Generator + eventBuilder event.Generator logger *log.Logger queue workqueue.RateLimitingInterface } @@ -34,7 +36,8 @@ type PolicyController struct { // NewPolicyController from cmd args func NewPolicyController(policyInterface policyclientset.Interface, policyInformer infomertypes.PolicyInformer, - violationBuilder violation.PolicyViolationGenerator, + violationBuilder policyviolation.Generator, + eventController event.Generator, logger *log.Logger, kubeClient *kubeClient.KubeClient) *PolicyController { @@ -44,9 +47,9 @@ func NewPolicyController(policyInterface policyclientset.Interface, policyInterface: policyInterface, policySynced: policyInformer.Informer().HasSynced, violationBuilder: violationBuilder, + eventBuilder: eventController, logger: logger, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), policyWorkQueueName), - //TODO Event Builder, this will used to record events with policy cannot be processed, using eventBuilder as we can restrict the event types } policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{ @@ -69,6 +72,7 @@ func (pc *PolicyController) updatePolicyHandler(oldResource, newResource interfa } pc.enqueuePolicy(newResource) } + func (pc *PolicyController) deletePolicyHandler(resource interface{}) { var object metav1.Object var ok bool @@ -112,16 +116,11 @@ func (pc *PolicyController) Run(stopCh <-chan struct{}) error { return nil } -// runWorker is a long-running function that will continually call the -// processNextWorkItem function in order to read and process a message on the -// workqueue. func (pc *PolicyController) runWorker() { for pc.processNextWorkItem() { } } -// processNextWorkItem will read a single work item off the workqueue and -// attempt to process it, by calling the syncHandler. func (pc *PolicyController) processNextWorkItem() bool { obj, shutdown := pc.queue.Get() if shutdown { @@ -146,20 +145,15 @@ func (pc *PolicyController) handleErr(err error, key interface{}) { pc.queue.Forget(key) return } - - // This controller retries 5 times if something goes wrong. After that, it stops trying. + // This controller retries if something goes wrong. After that, it stops trying. if pc.queue.NumRequeues(key) < policyWorkQueueRetryLimit { - pc.logger.Printf("Error syncing events %v: %v", key, err) - // Re-enqueue the key rate limited. Based on the rate limiter on the // queue and the re-enqueue history, the key will be processed later again. pc.queue.AddRateLimited(key) return } - pc.queue.Forget(key) - // Report to an external entity that, even after several retries, we could not successfully process this key utilruntime.HandleError(err) pc.logger.Printf("Dropping the key %q out of the queue: %v", key, err) } @@ -173,7 +167,7 @@ func (pc *PolicyController) syncHandler(obj interface{}) error { // convert the namespace/name string into distinct namespace and name namespace, name, err := cache.SplitMetaNamespaceKey(key) if err != nil { - utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key)) + utilruntime.HandleError(fmt.Errorf("invalid policy key: %s", key)) return nil } @@ -181,7 +175,7 @@ func (pc *PolicyController) syncHandler(obj interface{}) error { policy, err := pc.policyLister.Policies(namespace).Get(name) if err != nil { if errors.IsNotFound(err) { - utilruntime.HandleError(fmt.Errorf("foo '%s' in work queue no longer exists", key)) + utilruntime.HandleError(fmt.Errorf("policy '%s' in work queue no longer exists", key)) return nil } return err diff --git a/webhooks/mutation.go b/webhooks/mutation.go index ca7fca63c3..13f5f0ee93 100644 --- a/webhooks/mutation.go +++ b/webhooks/mutation.go @@ -10,8 +10,9 @@ import ( kubeclient "github.com/nirmata/kube-policy/kubeclient" types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1" policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1" + event "github.com/nirmata/kube-policy/pkg/event" mutation "github.com/nirmata/kube-policy/pkg/mutation" - violation "github.com/nirmata/kube-policy/pkg/violation" + policyviolation "github.com/nirmata/kube-policy/pkg/policyviolation" v1beta1 "k8s.io/api/admission/v1beta1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -26,7 +27,8 @@ type MutationWebhook struct { kubeclient *kubeclient.KubeClient policyLister policylister.PolicyLister registration *MutationWebhookRegistration - violationBuilder violation.PolicyViolationGenerator + violationBuilder policyviolation.Generator + eventBuilder event.Generator logger *log.Logger } @@ -35,7 +37,8 @@ func CreateMutationWebhook( clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, policyLister policylister.PolicyLister, - violationBuilder violation.PolicyViolationGenerator, + violationBuilder policyviolation.Generator, + eventController event.Generator, logger *log.Logger) (*MutationWebhook, error) { if clientConfig == nil || kubeclient == nil { return nil, errors.New("Some parameters are not set") @@ -59,6 +62,7 @@ func CreateMutationWebhook( policyLister: policyLister, registration: registration, violationBuilder: violationBuilder, + eventBuilder: eventController, logger: logger, }, nil } From 10e693d318539faf7072ee025318322bd8f32d72 Mon Sep 17 00:00:00 2001 From: shuting Date: Sun, 12 May 2019 16:29:31 -0500 Subject: [PATCH 17/17] Remove commented code --- pkg/policyengine/mutation/patches_test.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/pkg/policyengine/mutation/patches_test.go b/pkg/policyengine/mutation/patches_test.go index 7453781c51..986b15594f 100644 --- a/pkg/policyengine/mutation/patches_test.go +++ b/pkg/policyengine/mutation/patches_test.go @@ -125,11 +125,3 @@ func TestProcessPatches_RemovePathDoesntExist_IgnoreRemoveFailures_NotEmptyResul assert.Assert(t, len(patchesBytes) == 1) assertEqStringAndData(t, `{"path":"/metadata/labels/label2","op":"add","value":"label2Value"}`, patchesBytes[0]) } - -// func TestProcessSamePatch_AddAndRemovePathsDontExist_ContinueOnError_EmptyResult(t *testing.T) { -// patch1 := types.PolicyPatch{Path: "/metadata/labels/label3", Operation: "add", Value: "label3Value"} -// patches := []types.PolicyPatch{patch1} -// patchesBytes, err := ProcessPatches(patches, []byte(endpointsDocument), PatchingSetsContinueAlways) -// assert.NilError(t, err) -// assert.Assert(t, len(patchesBytes) == 1) -// }