1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 18:38:40 +00:00

redesign event & violation builders. kubeclient to provide resource handlers

This commit is contained in:
shivdudhani 2019-05-06 09:12:37 -07:00
parent 7eb3225bd7
commit 0babf1f25d
20 changed files with 610 additions and 523 deletions

View file

@ -2,36 +2,53 @@ package controller
import (
"errors"
"fmt"
"log"
"os"
"sort"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
internalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces"
kubeClient "github.com/nirmata/kube-policy/kubeclient"
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
policies "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/typed/policy/v1alpha1"
informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions"
lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
event "github.com/nirmata/kube-policy/pkg/event"
eventinternalinterfaces "github.com/nirmata/kube-policy/pkg/event/internalinterfaces"
eventutils "github.com/nirmata/kube-policy/pkg/event/utils"
violation "github.com/nirmata/kube-policy/pkg/violation"
violationinternalinterfaces "github.com/nirmata/kube-policy/pkg/violation/internalinterfaces"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
mergetypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
// PolicyController for CRD
type PolicyController struct {
// PolicyController API
type PolicyController interface {
internalinterfaces.PolicyGetter
createPolicyHandler(resource interface{})
updatePolicyHandler(oldResource, newResource interface{})
deletePolicyHandler(resource interface{})
getResourceKey(resource interface{}) string
}
//policyController for CRD
type policyController struct {
policyInformerFactory informers.SharedInformerFactory
policyLister lister.PolicyLister
policiesInterface policies.PolicyInterface
logger *log.Logger
violationBuilder *violation.Builder
violationBuilder violationinternalinterfaces.ViolationGenerator
eventBuilder eventinternalinterfaces.BuilderInternal
}
// NewPolicyController from cmd args
func NewPolicyController(config *rest.Config, logger *log.Logger) (*PolicyController, error) {
func NewPolicyController(config *rest.Config, logger *log.Logger, kubeClient *kubeClient.KubeClient) (PolicyController, error) {
if logger == nil {
logger = log.New(os.Stdout, "Policy Controller: ", log.LstdFlags|log.Lshortfile)
}
@ -44,54 +61,55 @@ func NewPolicyController(config *rest.Config, logger *log.Logger) (*PolicyContro
if err != nil {
return nil, err
}
// Initialize Kube Client
kubeClient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, time.Second*30)
policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0)
policyInformer := policyInformerFactory.Nirmata().V1alpha1().Policies()
// generate Violation builder
builder, err := violation.NewViolationHelper(kubeClient, policyClientset, logger, policyInformer)
// generate Event builder
eventBuilder, err := event.NewEventBuilder(kubeClient, logger)
if err != nil {
return nil, err
}
controller := &PolicyController{
// generate Violation builer
violationBuilder, err := violation.NewViolationBuilder(kubeClient, eventBuilder, logger)
controller := &policyController{
policyInformerFactory: policyInformerFactory,
policyLister: policyInformer.Lister(),
policiesInterface: policyClientset.NirmataV1alpha1().Policies("default"),
logger: logger,
violationBuilder: builder,
violationBuilder: violationBuilder,
eventBuilder: eventBuilder,
}
policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.createPolicyHandler,
UpdateFunc: controller.updatePolicyHandler,
DeleteFunc: controller.deletePolicyHandler,
})
// Set the controller
eventBuilder.SetController(controller)
violationBuilder.SetController(controller)
return controller, nil
}
// Run is main controller thread
func (c *PolicyController) Run(stopCh <-chan struct{}) {
c.policyInformerFactory.Start(stopCh)
// Un-comment to run the violation Builder
c.violationBuilder.Run(1, stopCh)
func (c *policyController) GetCacheInformerSync() cache.InformerSynced {
return c.policyInformerFactory.Nirmata().V1alpha1().Policies().Informer().HasSynced
}
// GetPolicies retrieves all policy resources
// from cache. Cache is refreshed by informer
func (c *PolicyController) GetPolicies() []types.Policy {
// Run is main controller thread
func (c *policyController) Run(stopCh <-chan struct{}) {
c.policyInformerFactory.Start(stopCh)
c.eventBuilder.Run(eventutils.EventWorkerThreadCount, stopCh)
}
func (c *policyController) GetPolicies() ([]types.Policy, error) {
// Create nil Selector to grab all the policies
selector := labels.NewSelector()
cachedPolicies, err := c.policyLister.List(selector)
if err != nil {
c.logger.Printf("Error: %v", err)
return nil
return nil, err
}
var policies []types.Policy
@ -102,17 +120,16 @@ func (c *PolicyController) GetPolicies() []types.Policy {
sort.Slice(policies, func(i, j int) bool {
return policies[i].CreationTimestamp.Time.Before(policies[j].CreationTimestamp.Time)
})
return policies
return policies, nil
}
// Writes error message to the policy logs in status section
func (c *PolicyController) LogPolicyError(name, text string) {
func (c *policyController) LogPolicyError(name, text string) {
c.addPolicyLog(name, "[ERROR] "+text)
}
// Writes info message to the policy logs in status section
func (c *PolicyController) LogPolicyInfo(name, text string) {
func (c *policyController) LogPolicyInfo(name, text string) {
c.addPolicyLog(name, "[ INFO] "+text)
}
@ -121,7 +138,7 @@ func (c *PolicyController) LogPolicyInfo(name, text string) {
const policyLogMaxRecords int = 50
// Appends given log text to the status/logs array.
func (c *PolicyController) addPolicyLog(name, text string) {
func (c *policyController) addPolicyLog(name, text string) {
getOptions := metav1.GetOptions{
ResourceVersion: "1",
IncludeUninitialized: true,
@ -134,12 +151,12 @@ func (c *PolicyController) addPolicyLog(name, text string) {
// Add new log record
text = time.Now().Format("2006 Jan 02 15:04:05.999 ") + text
//policy.Status.Logs = append(policy.Status.Logs, text)
// Pop front extra log records
// logsCount := len(policy.Status.Logs)
// if logsCount > policyLogMaxRecords {
// policy.Status.Logs = policy.Status.Logs[logsCount-policyLogMaxRecords:]
// }
policy.Status.Logs = append(policy.Status.Logs, text)
// Pop front extra log records
logsCount := len(policy.Status.Logs)
if logsCount > policyLogMaxRecords {
policy.Status.Logs = policy.Status.Logs[logsCount-policyLogMaxRecords:]
}
// Save logs to policy object
_, err = c.policiesInterface.UpdateStatus(policy)
if err != nil {
@ -147,29 +164,43 @@ func (c *PolicyController) addPolicyLog(name, text string) {
}
}
func (c *PolicyController) createPolicyHandler(resource interface{}) {
func (c *policyController) createPolicyHandler(resource interface{}) {
key := c.getResourceKey(resource)
c.logger.Printf("Policy created: %s", key)
}
func (c *PolicyController) updatePolicyHandler(oldResource, newResource interface{}) {
func (c *policyController) updatePolicyHandler(oldResource, newResource interface{}) {
oldKey := c.getResourceKey(oldResource)
newKey := c.getResourceKey(newResource)
c.logger.Printf("Policy %s updated to %s", oldKey, newKey)
}
func (c *PolicyController) deletePolicyHandler(resource interface{}) {
func (c *policyController) deletePolicyHandler(resource interface{}) {
key := c.getResourceKey(resource)
c.logger.Printf("Policy deleted: %s", key)
}
func (c *PolicyController) getResourceKey(resource interface{}) string {
func (c *policyController) getResourceKey(resource interface{}) string {
if key, err := cache.MetaNamespaceKeyFunc(resource); err != nil {
c.logger.Fatalf("Error retrieving policy key: %v", err)
} else {
return key
}
return ""
}
func (c *policyController) GetPolicy(name string) (*types.Policy, error) {
policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", name))
return nil, err
}
return c.getPolicyInterface(policyNamespace).Get(policyName)
}
func (c *policyController) getPolicyInterface(namespace string) lister.PolicyNamespaceLister {
return c.policyLister.Policies(namespace)
}
func (c *policyController) PatchPolicy(policy string, pt mergetypes.PatchType, data []byte) (*types.Policy, error) {
return c.policiesInterface.Patch(policy, pt, data)
}

View file

@ -0,0 +1,18 @@
package internalinterfaces
import (
policytypes "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
types "k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/cache"
)
// PolicyGetter interface for external API
type PolicyGetter interface {
GetPolicies() ([]policytypes.Policy, error)
GetPolicy(name string) (*policytypes.Policy, error)
GetCacheInformerSync() cache.InformerSynced
PatchPolicy(policy string, pt types.PatchType, data []byte) (*policytypes.Policy, error)
Run(stopCh <-chan struct{})
LogPolicyError(name, text string)
LogPolicyInfo(name, text string)
}

View file

@ -10,7 +10,7 @@ webhooks:
- name: webhook.nirmata.kube-policy
clientConfig:
url: "https://localhost/mutate"
caBundle: ${CA_BUNDLE}
caBundle: MIIC5zCCAc+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5pa3ViZUNBMB4XDTE5MDQxMDIxMjM1OFoXDTI5MDQwODIxMjM1OFowFTETMBEGA1UEAxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBALV1uHt50QVtVnGiGc4nrMFhsuT+R/KpU0qq3hNV6xPWiBcfUZNqz0iEAbh9YpZ3np2d2gHniBkbUfZwcI541SYacwPfVkdnBMKvVqBYGk1wz2GVyd8fas6guerchmUO7XtU5VfGr9TbKGp9vo/d+NWwGlp9/x7Ni8rnK1D1oMoFmQ02Q6N0xdsBiCEs+MUVqUlu/Xtx+rITD+fYOWpB+z1+KOEysFGaLl8nayxanhCqcRR+T8SeJ+hXIDHNoOqFDf1Y7XmzLtTlynoVNLh0gMy4cWgoFYuklz7JlYvpLJFt9cSziwIKfG56T6RQZ77z5w4TO5bfTvYlHCztY5zSiNkCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQCqaqjPJTDU0U2pkewyCGFGVAzdnDgGozjeCP2rRojhJOiOMaBBgVIpJxCRz2BKfXHW+B4hKCli08t7lPe1ab12hM6wmlLxkkmbjxW4H9Coo/OAaoIz6bfmMhBUjl6tuTsgTkHjarG7W12rFb7Xkj6zSd17EJsUoRx8t1GD65HXFindEMvjEGQ4MPfX3XqLT1NNIsFHF6e7RXpYPWQ/eT3Z/9ia+7vZzXzEmjXYedEeggyqg6QrL+DX3BQF8TcTLmxSRA6MMqOXEjlmU85TOTjP39PBhzCz78m8ZSM9KHQyjOv1xhR0CxZMyxfiN2bvA3aJAtMLOwLjKIYAkLm3W2hp
rules:
- operations: [ "CREATE" ]
resources: [ "*/*" ]

View file

@ -8,7 +8,7 @@ spec:
- name: v1alpha1
served: true
storage: true
scope: Cluster
scope: Namespaced
names:
kind: Policy
plural: policies
@ -34,6 +34,7 @@ spec:
type: object
required:
- resource
- name
properties:
resource:
type: object
@ -103,6 +104,8 @@ spec:
- remove
value:
AnyValue: {}
name:
type: string
configMapGenerator:
type: object
required:

View file

@ -1,6 +1,7 @@
package kubeclient
import (
"fmt"
"log"
"os"
"time"
@ -11,8 +12,12 @@ import (
v1 "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
event "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
// KubeClient is the api-client for core Kubernetes objects
@ -38,6 +43,10 @@ func NewKubeClient(config *rest.Config, logger *log.Logger) (*KubeClient, error)
}, nil
}
func (kc *KubeClient) GetEventsInterface(namespace string) event.EventInterface {
return kc.client.CoreV1().Events(namespace)
}
func (kc *KubeClient) GetKubePolicyDeployment() (*apps.Deployment, error) {
kubePolicyDeployment, err := kc.client.
Apps().
@ -175,3 +184,188 @@ func (kc *KubeClient) createSecretAfterNamespaceIsCreated(secret v1.Secret, name
kc.logger.Printf("Can't create a secret: %s", err)
}
}
var rMapper = map[string]getter{
"ConfigMap": configMapGetter,
"Pods": podsGetter,
"Deploymeny": deploymentGetter,
"CronJob": cronJobGetter,
"Endpoints": endpointsbGetter,
"HorizontalPodAutoscaler": horizontalPodAutoscalerGetter,
"Ingress": ingressGetter,
"Job": jobGetter,
"LimitRange": limitRangeGetter,
"Namespace": namespaceGetter,
"NetworkPolicy": networkPolicyGetter,
"PersistentVolumeClaim": persistentVolumeClaimGetter,
"PodDisruptionBudget": podDisruptionBudgetGetter,
"PodTemplate": podTemplateGetter,
"ResourceQuota": resourceQuotaGetter,
"Secret": secretGetter,
"Service": serviceGetter,
"StatefulSet": statefulSetGetter,
}
type getter func(*kubernetes.Clientset, string, string) (runtime.Object, error)
//GetResource get the resource object
func (kc *KubeClient) GetResource(kind string, resource string) (runtime.Object, error) {
namespace, name, err := cache.SplitMetaNamespaceKey(resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", resource))
return nil, err
}
// runtime.Object -> Actual object
return rMapper[kind](kc.client, namespace, name)
}
//GetSupportedResourceTypes provides list of supported types
func GetSupportedResourceTypes() (rTypes []string) {
for k := range rMapper {
rTypes = append(rTypes, k)
}
return rTypes
}
func configMapGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func podsGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func deploymentGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func cronJobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func endpointsbGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func horizontalPodAutoscalerGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func ingressGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func jobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func limitRangeGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().LimitRanges(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func namespaceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func networkPolicyGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.NetworkingV1().NetworkPolicies(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func persistentVolumeClaimGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func podDisruptionBudgetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func podTemplateGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().PodTemplates(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func resourceQuotaGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func secretGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func serviceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
func statefulSetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
obj, err := clientSet.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}

10
main.go
View file

@ -24,16 +24,16 @@ func main() {
log.Fatalf("Error building kubeconfig: %v\n", err)
}
controller, err := controller.NewPolicyController(clientConfig, nil)
if err != nil {
log.Fatalf("Error creating PolicyController: %s\n", err)
}
kubeclient, err := kubeclient.NewKubeClient(clientConfig, nil)
if err != nil {
log.Fatalf("Error creating kubeclient: %v\n", err)
}
controller, err := controller.NewPolicyController(clientConfig, nil, kubeclient)
if err != nil {
log.Fatalf("Error creating PolicyController: %s\n", err)
}
mutationWebhook, err := webhooks.CreateMutationWebhook(clientConfig, kubeclient, controller, nil)
if err != nil {
log.Fatalf("Error creating mutation webhook: %v\n", err)

View file

@ -12,9 +12,9 @@ import (
type Policy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PolicySpec `json:"spec"`
// Status PolicyStatus `json:"status"`
Status PolicyViolations `json:"status,omitempty"`
Spec PolicySpec `json:"spec"`
Status PolicyStatus `json:"status"`
PolicyViolation PolicyViolations `json:"policyviolation,omitempty"`
}
type PolicyViolations struct {
@ -23,9 +23,9 @@ type PolicyViolations struct {
type Violation struct {
Kind string `json:"kind,omitempty"`
Resource string `json:"resource,omitempty"`
Source string `json:"source,omitempty"`
Rule string `json:"rule,omitempty"`
Reason string `json:"reason,omitempty"`
Message string `json:"message,omitempty`
}
// Specification of the Policy.

161
pkg/event/builder.go Normal file
View file

@ -0,0 +1,161 @@
package event
import (
"errors"
"fmt"
"log"
"time"
controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces"
kubeClient "github.com/nirmata/kube-policy/kubeclient"
"github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme"
policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme"
"github.com/nirmata/kube-policy/pkg/event/internalinterfaces"
utils "github.com/nirmata/kube-policy/pkg/event/utils"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
type builder struct {
kubeClient *kubeClient.KubeClient
controller controllerinternalinterfaces.PolicyGetter
workqueue workqueue.RateLimitingInterface
recorder record.EventRecorder
logger *log.Logger
policySynced cache.InformerSynced
}
type Builder interface {
internalinterfaces.BuilderInternal
SyncHandler(key utils.EventInfo) error
ProcessNextWorkItem() bool
RunWorker()
}
func NewEventBuilder(kubeClient *kubeClient.KubeClient,
logger *log.Logger,
) (Builder, error) {
builder := &builder{
kubeClient: kubeClient,
workqueue: initWorkqueue(),
recorder: initRecorder(kubeClient),
logger: logger,
}
return builder, nil
}
func initRecorder(kubeClient *kubeClient.KubeClient) record.EventRecorder {
// Initliaze Event Broadcaster
policyscheme.AddToScheme(scheme.Scheme)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(log.Printf)
eventBroadcaster.StartRecordingToSink(
&typedcorev1.EventSinkImpl{
Interface: kubeClient.GetEventsInterface("")})
recorder := eventBroadcaster.NewRecorder(
scheme.Scheme,
v1.EventSource{Component: utils.EventSource})
return recorder
}
func initWorkqueue() workqueue.RateLimitingInterface {
return workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), utils.EventWorkQueueName)
}
func (b *builder) SetController(controller controllerinternalinterfaces.PolicyGetter) {
b.controller = controller
b.policySynced = controller.GetCacheInformerSync()
}
func (b *builder) AddEvent(info utils.EventInfo) {
b.workqueue.Add(info)
}
// Run : Initialize the worker routines to process the event creation
func (b *builder) Run(threadiness int, stopCh <-chan struct{}) error {
if b.controller == nil {
return errors.New("Controller has not be set")
}
defer utilruntime.HandleCrash()
defer b.workqueue.ShutDown()
log.Println("Starting violation builder")
fmt.Println(("Wait for informer cache to sync"))
if ok := cache.WaitForCacheSync(stopCh, b.policySynced); !ok {
fmt.Println("Unable to sync the cache")
}
log.Println("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(b.RunWorker, time.Second, stopCh)
}
log.Println("Started workers")
<-stopCh
log.Println("Shutting down workers")
return nil
}
func (b *builder) RunWorker() {
for b.ProcessNextWorkItem() {
}
}
func (b *builder) ProcessNextWorkItem() bool {
obj, shutdown := b.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer b.workqueue.Done(obj)
var key utils.EventInfo
var ok bool
if key, ok = obj.(utils.EventInfo); !ok {
b.workqueue.Forget(obj)
log.Printf("Expecting type info by got %v", obj)
return nil
}
// Run the syncHandler, passing the resource and the policy
if err := b.SyncHandler(key); err != nil {
b.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error())
}
return nil
}(obj)
if err != nil {
log.Println((err))
}
return true
}
func (b *builder) SyncHandler(key utils.EventInfo) error {
var resource runtime.Object
var err error
switch key.Kind {
case "Policy":
resource, err = b.controller.GetPolicy(key.Resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to create event for policy %s, will retry ", key.Resource))
return err
}
default:
resource, err = b.kubeClient.GetResource(key.Kind, key.Resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource))
return err
}
}
b.recorder.Event(resource, v1.EventTypeNormal, key.Reason, key.Message)
return nil
}

View file

@ -0,0 +1,12 @@
package internalinterfaces
import (
internalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces"
utils "github.com/nirmata/kube-policy/pkg/event/utils"
)
type BuilderInternal interface {
SetController(controller internalinterfaces.PolicyGetter)
Run(threadiness int, stopCh <-chan struct{}) error
AddEvent(info utils.EventInfo)
}

15
pkg/event/utils/util.go Normal file
View file

@ -0,0 +1,15 @@
package utils
const EventSource = "policy-controller"
const EventWorkQueueName = "policy-controller-events"
type EventInfo struct {
Kind string
Resource string
Rule string
Reason string
Message string
}
const EventWorkerThreadCount = 1

View file

@ -1,159 +0,0 @@
package resourceClient
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes"
)
func GetResouce(clientSet *kubernetes.Clientset, kind string, resourceNamespace string, resourceName string) (runtime.Object, error) {
switch kind {
case "Deployment":
{
obj, err := clientSet.AppsV1().Deployments(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Pods":
{
obj, err := clientSet.CoreV1().Pods(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "ConfigMap":
{
obj, err := clientSet.CoreV1().ConfigMaps(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "CronJob":
{
obj, err := clientSet.BatchV1beta1().CronJobs(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Endpoints":
{
obj, err := clientSet.CoreV1().Endpoints(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "HorizontalPodAutoscaler":
{
obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Ingress":
{
obj, err := clientSet.ExtensionsV1beta1().Ingresses(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Job":
{
obj, err := clientSet.BatchV1().Jobs(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "LimitRange":
{
obj, err := clientSet.CoreV1().LimitRanges(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Namespace":
{
obj, err := clientSet.CoreV1().Namespaces().Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "NetworkPolicy":
{
obj, err := clientSet.NetworkingV1().NetworkPolicies(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "PersistentVolumeClaim":
{
obj, err := clientSet.CoreV1().PersistentVolumeClaims(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "PodDisruptionBudget":
{
obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "PodTemplate":
{
obj, err := clientSet.CoreV1().PodTemplates(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "ResourceQuota":
{
obj, err := clientSet.CoreV1().ResourceQuotas(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Secret":
{
obj, err := clientSet.CoreV1().Secrets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Service":
{
obj, err := clientSet.CoreV1().Services(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "StatefulSet":
{
obj, err := clientSet.AppsV1().StatefulSets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
default:
return nil, nil
}
}

View file

@ -0,0 +1,11 @@
package internalinterfaces
import (
"github.com/nirmata/kube-policy/controller/internalinterfaces"
utils "github.com/nirmata/kube-policy/pkg/violation/utils"
)
type ViolationGenerator interface {
SetController(controller internalinterfaces.PolicyGetter)
Create(info utils.ViolationInfo) error
}

View file

@ -1,34 +1,5 @@
package violation
// Mode to identify the CRUD event when the violation was identified
type Mode string
const (
// Create resource
Create Mode = "create"
// Update resource
Update Mode = "update"
// Delete resource
Delete Mode = "delete"
)
// ResourceMode to identify the source of violatino check
type ResourceMode string
const (
// Resource type is kubernetes resource
Resource ResourceMode = "resource"
// Policy type is policy custom resource
Policy ResourceMode = "policy"
)
type Target int
const (
ResourceTarget Target = 1
PolicyTarget Target = 2
)
// Source for the events recorder
const violationEventSource = "policy-controller"
@ -38,14 +9,6 @@ const workqueueViolationName = "Policy-Violations"
// Event Reason
const violationEventResrouce = "Violation"
type EventInfo struct {
Resource string
Kind string
Reason string
Source string
ResourceTarget Target
}
// Info input details
type Info struct {
Kind string

View file

@ -0,0 +1,10 @@
package utils
type ViolationInfo struct {
Kind string
Resource string
Policy string
Rule string
Reason string
Message string
}

View file

@ -1,284 +1,129 @@
package violation
import (
"encoding/json"
"fmt"
"log"
"time"
jsonpatch "github.com/evanphx/json-patch"
controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces"
kubeClient "github.com/nirmata/kube-policy/kubeclient"
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme"
informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1"
lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
resourceClient "github.com/nirmata/kube-policy/pkg/resourceClient"
v1 "k8s.io/api/core/v1"
"github.com/nirmata/kube-policy/pkg/event/internalinterfaces"
eventinternalinterfaces "github.com/nirmata/kube-policy/pkg/event/internalinterfaces"
eventutils "github.com/nirmata/kube-policy/pkg/event/utils"
violationinternalinterfaces "github.com/nirmata/kube-policy/pkg/violation/internalinterfaces"
utils "github.com/nirmata/kube-policy/pkg/violation/utils"
mergetypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/kubernetes/scheme"
typedcc1orev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
type Violations []Violation
type Violation struct {
type builder struct {
kubeClient *kubeClient.KubeClient
controller controllerinternalinterfaces.PolicyGetter
eventBuilder eventinternalinterfaces.BuilderInternal
logger *log.Logger
}
// Builder to generate violations
type Builder struct {
kubeClient *kubernetes.Clientset
policyClientset *clientset.Clientset
workqueue workqueue.RateLimitingInterface
logger *log.Logger
recorder record.EventRecorder
policyLister lister.PolicyLister
policySynced cache.InformerSynced
type Builder interface {
violationinternalinterfaces.ViolationGenerator
ProcessViolation(info utils.ViolationInfo) error
Patch(policy *types.Policy, updatedPolicy *types.Policy) error
IsActive(kind string, resource string) (bool, error)
}
func NewViolationHelper(kubeClient *kubernetes.Clientset, policyClientSet *clientset.Clientset, logger *log.Logger, policyInformer informers.PolicyInformer) (*Builder, error) {
func NewViolationBuilder(
kubeClient *kubeClient.KubeClient,
eventBuilder internalinterfaces.BuilderInternal,
logger *log.Logger) (Builder, error) {
// Initialize Event Broadcaster
policyscheme.AddToScheme(scheme.Scheme)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(log.Printf)
eventBroadcaster.StartRecordingToSink(
&typedcc1orev1.EventSinkImpl{
Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(
scheme.Scheme,
v1.EventSource{Component: violationEventSource})
// Build the builder
builder := &Builder{
kubeClient: kubeClient,
policyClientset: policyClientSet,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workqueueViolationName),
logger: logger,
recorder: recorder,
policyLister: policyInformer.Lister(),
policySynced: policyInformer.Informer().HasSynced,
}
builder := &builder{}
return builder, nil
}
// Create Violation -> (Info)
// Create to generate violation jsonpatch script &
// queue events to generate events
// TODO: create should validate the rule number and update the violation if one exists
func (b *Builder) Create(info Info) error {
// generate patch
// we can generate the patch as the policy resource will alwasy exist
// Apply Patch
err := b.patchViolation(info)
func (b *builder) Create(info utils.ViolationInfo) error {
err := b.ProcessViolation(info)
if err != nil {
return err
}
// Generate event for policy
b.workqueue.Add(
EventInfo{
Resource: info.Policy,
Reason: info.Reason,
ResourceTarget: PolicyTarget,
})
// Generat event for resource
b.workqueue.Add(
EventInfo{
Kind: info.Kind,
Resource: info.Resource,
Reason: info.Reason,
ResourceTarget: ResourceTarget,
})
return nil
}
// Remove the violation
func (b *Builder) Remove(info Info) ([]byte, error) {
b.workqueue.Add(info)
return nil, nil
func (b *builder) SetController(controller controllerinternalinterfaces.PolicyGetter) {
b.controller = controller
}
func (b *Builder) patchViolation(info Info) error {
// policy-controller handlers are post events
// adm-ctr will always have policy resource created
// Get Policy namespace and name
policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(info.Policy)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", info.Policy))
return err
}
// Try to access the policy
// Try to access the resource
// if the above resource objects have not been created then we reque the request to create the event
policy, err := b.policyLister.Policies(policyNamespace).Get(policyName)
func (b *builder) ProcessViolation(info utils.ViolationInfo) error {
// Get the policy
policy, err := b.controller.GetPolicy(info.Policy)
if err != nil {
utilruntime.HandleError(err)
return err
}
// Add violation
updatedPolicy := policy.DeepCopy()
// var update bool
// inactiveViolationindex := []int{}
updatedViolations := []types.Violation{}
// Check if the violation with the same rule exists for the same resource and rule name
for _, violation := range updatedPolicy.Status.Violations {
modifiedPolicy := policy.DeepCopy()
modifiedViolations := []types.Violation{}
if ok, err := b.IsActive(violation); ok {
if err != nil {
fmt.Println(err)
}
updatedViolations = append(updatedViolations, violation)
} else {
fmt.Println("Remove violation")
b.workqueue.Add(
EventInfo{
Resource: info.Policy,
Reason: "Removing violation for rule " + info.RuleName,
ResourceTarget: PolicyTarget,
})
for _, violation := range modifiedPolicy.PolicyViolation.Violations {
ok, err := b.IsActive(info.Kind, info.Resource)
if err != nil {
utilruntime.HandleError(err)
continue
}
if !ok {
// Remove the violation
// Create a removal event
b.eventBuilder.AddEvent(eventutils.EventInfo{
Kind: "Policy",
Resource: info.Resource,
Rule: info.Rule,
Reason: info.Reason,
Message: info.Message,
})
continue
}
// If violation already exists for this rule, we update the violation
if violation.Kind == info.Kind &&
violation.Resource == info.Resource &&
violation.Rule == info.Rule {
violation.Reason = info.Reason
violation.Message = info.Message
}
modifiedViolations = append(modifiedViolations, violation)
}
// Rule is updated TO-DO
// Dont validate if the resouce is active as a new Violation will not be created if it did not
updatedViolations = append(updatedViolations,
types.Violation{
Kind: info.Kind,
Resource: info.Resource,
Rule: info.RuleName,
Reason: info.Reason,
})
updatedPolicy.Status.Violations = updatedViolations
// Patch
return b.patch(policy, updatedPolicy)
modifiedPolicy.PolicyViolation.Violations = modifiedViolations
return b.Patch(policy, modifiedPolicy)
}
func (b *Builder) getPolicyEvent(info Info) EventInfo {
return EventInfo{Resource: info.Resource}
}
func (b *Builder) IsActive(violation types.Violation) (bool, error) {
if ok, err := b.ValidationResourceActive(violation); !ok {
func (b *builder) IsActive(kind string, resource string) (bool, error) {
// Generate Merge Patch
_, err := b.kubeClient.GetResource(kind, resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", resource))
return false, err
}
return true, nil
}
func (b *Builder) ValidationResourceActive(violation types.Violation) (bool, error) {
resourceNamespace, resourceName, err := cache.SplitMetaNamespaceKey(violation.Resource)
// ProcessViolation(info utils.ViolationInfo) error
func (b *builder) Patch(policy *types.Policy, updatedPolicy *types.Policy) error {
originalData, err := json.Marshal(policy)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", violation.Resource))
// Remove the corresponding violation
return false, err
return err
}
// Check if the corresponding resource is still present
_, err = resourceClient.GetResouce(b.kubeClient, violation.Kind, resourceNamespace, resourceName)
modifiedData, err := json.Marshal(updatedPolicy)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", violation.Resource))
return false, err
return err
}
return true, nil
}
func (b *Builder) patch(policy *types.Policy, updatedPolicy *types.Policy) error {
_, err := b.policyClientset.Nirmata().Policies(updatedPolicy.Namespace).UpdateStatus(updatedPolicy)
// generate merge patch
patchBytes, err := jsonpatch.CreateMergePatch(originalData, modifiedData)
if err != nil {
return err
}
_, err = b.controller.PatchPolicy(policy.Name, mergetypes.MergePatchType, patchBytes)
if err != nil {
// Unable to patch
return err
}
return nil
}
// Run : Initialize the worker routines to process the event creation
func (b *Builder) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer b.workqueue.ShutDown()
log.Println("Starting violation builder")
fmt.Println(("Wait for informer cache to sync"))
if ok := cache.WaitForCacheSync(stopCh, b.policySynced); !ok {
fmt.Println("Unable to sync the cache")
}
log.Println("Starting workers")
for i := 0; i < threadiness; i++ {
go wait.Until(b.runWorker, time.Second, stopCh)
}
log.Println("Started workers")
<-stopCh
log.Println("Shutting down workers")
return nil
}
func (b *Builder) runWorker() {
for b.processNextWorkItem() {
}
}
func (b *Builder) processNextWorkItem() bool {
// get info object
obj, shutdown := b.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer b.workqueue.Done(obj)
var key EventInfo
var ok bool
if key, ok = obj.(EventInfo); !ok {
b.workqueue.Forget(obj)
log.Printf("Expecting type info but got %v", obj)
return nil
}
// Run the syncHandler, passing the resource and the policy
if err := b.syncHandler(key); err != nil {
b.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error())
}
return nil
}(obj)
if err != nil {
log.Println((err))
}
return true
}
// TO-DO: how to handle events if the resource has been delted, and clean the dirty object
func (b *Builder) syncHandler(key EventInfo) error {
fmt.Println(key)
// Get Policy namespace and name
namespace, name, err := cache.SplitMetaNamespaceKey(key.Resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid policy key: %s", key.Resource))
return nil
}
if key.ResourceTarget == ResourceTarget {
// Resource Event
resource, err := resourceClient.GetResouce(b.kubeClient, key.Kind, namespace, name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource))
return err
}
b.recorder.Event(resource, v1.EventTypeNormal, violationEventResrouce, key.Reason)
} else {
// Policy Event
policy, err := b.policyLister.Policies(namespace).Get(name)
if err != nil {
// TO-DO: this scenario will not exist as the policy will always exist
// unless the namespace and resource name are invalid
utilruntime.HandleError(err)
return err
}
b.recorder.Event(policy, v1.EventTypeNormal, violationEventResrouce, key.Reason)
}
return nil
}

View file

@ -22,4 +22,4 @@ tag="${hub_user_name}/${project_name}:${version}"
docker build --no-cache -t "${tag}" . || exit 4
echo "# Pushing image to repository..."
docker push "${tag}" || exit 5
#docker push "${tag}" || exit 5

View file

@ -34,11 +34,14 @@ if [ -z "${namespace}" ]; then # controller should be launched locally
${certsGenerator} "--service=${service_name}" "--serverIp=${serverIp}" || exit 2
echo "Applying webhook..."
kubectl delete -f crd/MutatingWebhookConfiguration_local.yaml
kubectl create -f crd/MutatingWebhookConfiguration_local.yaml || exit 3
kubectl delete -f definitions/MutatingWebhookConfiguration_debug.yaml
kubectl create -f definitions/MutatingWebhookConfiguration_debug.yaml || exit 3
kubectl delete -f crd/crd.yaml
kubectl create -f crd/crd.yaml || exit 3
# kubectl delete -f definitions/PolicyValidationConfiguration.yaml
# kubectl create -f definitions/PolicyValidationConfiguration.yaml || exit 3
kubectl delete -f definitions/install.yaml
kubectl create -f definitions/install.yaml || exit 3
echo -e "\n### You can build and run kube-policy project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)."

View file

@ -5,7 +5,7 @@ set -o pipefail
# get nirmata root
NIRMATA_DIR=$(dirname ${BASH_SOURCE})/..
NIRMATA_ROOT=$(readlink -f ${NIRMATA_DIR})
NIRMATA_ROOT=$(greadlink -f ${NIRMATA_DIR})
# get relative path to code generation script
CODEGEN_PKG=${NIRMATA_DIR}/vendor/k8s.io/code-generator

View file

@ -2,34 +2,14 @@ package webhooks
import (
"github.com/minio/minio/pkg/wildcard"
kubeclient "github.com/nirmata/kube-policy/kubeclient"
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
"k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var supportedKinds = [...]string{
"ConfigMap",
"CronJob",
"DaemonSet",
"Deployment",
"Endpoints",
"HorizontalPodAutoscaler",
"Ingress",
"Job",
"LimitRange",
"Namespace",
"NetworkPolicy",
"PersistentVolumeClaim",
"PodDisruptionBudget",
"PodTemplate",
"ResourceQuota",
"Secret",
"Service",
"StatefulSet",
}
func kindIsSupported(kind string) bool {
for _, k := range supportedKinds {
for _, k := range kubeclient.GetSupportedResourceTypes() {
if k == kind {
return true
}

View file

@ -6,7 +6,7 @@ import (
"log"
"os"
controller "github.com/nirmata/kube-policy/controller"
controllerinternalinterfaces "github.com/nirmata/kube-policy/controller/internalinterfaces"
kubeclient "github.com/nirmata/kube-policy/kubeclient"
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
v1beta1 "k8s.io/api/admission/v1beta1"
@ -18,13 +18,13 @@ import (
// business logic for resource mutation
type MutationWebhook struct {
kubeclient *kubeclient.KubeClient
controller *controller.PolicyController
controller controllerinternalinterfaces.PolicyGetter
registration *MutationWebhookRegistration
logger *log.Logger
}
// Registers mutation webhook in cluster and creates object for this webhook
func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller *controller.PolicyController, logger *log.Logger) (*MutationWebhook, error) {
func CreateMutationWebhook(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient, controller controllerinternalinterfaces.PolicyGetter, logger *log.Logger) (*MutationWebhook, error) {
if clientConfig == nil || kubeclient == nil || controller == nil {
return nil, errors.New("Some parameters are not set")
}
@ -55,7 +55,7 @@ func (mw *MutationWebhook) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.Ad
mw.logger.Printf("AdmissionReview for Kind=%v, Namespace=%v Name=%v UID=%v patchOperation=%v UserInfo=%v",
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, request.UserInfo)
policies := mw.controller.GetPolicies()
policies, _ := mw.controller.GetPolicies()
if len(policies) == 0 {
return nil
}