1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 18:38:40 +00:00

violation framework updates

This commit is contained in:
Shivkumar Dudhani 2019-04-30 17:13:44 -07:00 committed by shuting
parent 91b3269aa1
commit 9a6738e272
4 changed files with 353 additions and 103 deletions

View file

@ -12,15 +12,16 @@ import (
type Policy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PolicySpec `json:"spec"`
Status PolicyStatus `json:"status"`
Violations PolicyViolations `json:"policyviolations,omitempty"`
Spec PolicySpec `json:"spec"`
// Status PolicyStatus `json:"status"`
Status PolicyViolations `json:"status,omitempty"`
}
type PolicyViolations struct {
Violations []Violation `json:"violations,omitempty"`
}
type Violation struct {
Kind string `json:"kind,omitempty"`
Resource string `json:"resource,omitempty"`
Source string `json:"source,omitempty"`
Rule string `json:"rule,omitempty"`
@ -37,6 +38,7 @@ type PolicySpec struct {
// The rule of mutation for the single resource definition.
// Details are listed in the description of each of the substructures.
type PolicyRule struct {
Name string `json:"name"`
Resource PolicyResource `json:"resource"`
Patches []PolicyPatch `json:"patch,omitempty"`
ConfigMapGenerator *PolicyConfigGenerator `json:"configMapGenerator,omitempty"`

View file

@ -0,0 +1,161 @@
package resourceClient
import (
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/apimachinery/pkg/runtime"
)
func GetResouce(clientSet *kubernetes.Clientset, kind string, resourceNamespace string, resourceName string) (runtime.Object, error) {
switch kind {
case "Deployment":
{
obj, err := clientSet.AppsV1().Deployments(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) // Deployment
if err != nil {
return nil, err
}
return obj, nil
}
case "Pods":
{
obj, err := clientSet.CoreV1().Pods(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "ConfigMap":
{
obj, err := clientSet.CoreV1().ConfigMaps(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "CronJob":
{
obj, err := clientSet.BatchV1beta1().CronJobs(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Endpoints":
{
obj, err := clientSet.CoreV1().Endpoints(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "HorizontalPodAutoscaler":
{
obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Ingress":
{
obj, err := clientSet.ExtensionsV1beta1().Ingresses(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Job":
{
obj, err := clientSet.BatchV1().Jobs(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "LimitRange":
{
obj, err := clientSet.CoreV1().LimitRanges(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Namespace":
{
obj, err := clientSet.CoreV1().Namespaces().Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "NetworkPolicy":
{
obj, err := clientSet.NetworkingV1().NetworkPolicies(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "PersistentVolumeClaim":
{
obj, err := clientSet.CoreV1().PersistentVolumeClaims(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "PodDisruptionBudget":
{
obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "PodTemplate":
{
obj, err := clientSet.CoreV1().PodTemplates(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "ResourceQuota":
{
obj, err := clientSet.CoreV1().ResourceQuotas(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Secret":
{
obj, err := clientSet.CoreV1().Secrets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "Service":
{
obj, err := clientSet.CoreV1().Services(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
case "StatefulSet":
{
obj, err := clientSet.AppsV1().StatefulSets(resourceNamespace).Get(resourceName, meta_v1.GetOptions{})
if err != nil {
return nil, err
}
return obj, nil
}
default:
return nil, nil
}
}

56
pkg/violation/util.go Normal file
View file

@ -0,0 +1,56 @@
package violation
// Mode to identify the CRUD event when the violation was identified
type Mode string
const (
// Create resource
Create Mode = "create"
// Update resource
Update Mode = "update"
// Delete resource
Delete Mode = "delete"
)
// ResourceMode to identify the source of violatino check
type ResourceMode string
const (
// Resource type is kubernetes resource
Resource ResourceMode = "resource"
// Policy type is policy custom resource
Policy ResourceMode = "policy"
)
type Target int
const (
ResourceTarget Target = 1
PolicyTarget Target = 2
)
// Source for the events recorder
const violationEventSource = "policy-controller"
// Name for the workqueue to store the events
const workqueueViolationName = "Policy-Violations"
// Event Reason
const violationEventResrouce = "Violation"
type EventInfo struct {
Resource string
Kind string
Reason string
Source string
ResourceTarget Target
}
// Info input details
type Info struct {
Kind string
Resource string
Policy string
RuleName string
Reason string
}

View file

@ -1,18 +1,14 @@
package violation
import (
"encoding/json"
"fmt"
jsonpatch "github.com/evanphx/json-patch"
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme"
informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1"
lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
// "github.com/nirmata/kube-policy/webhooks"
resourceClient "github.com/nirmata/kube-policy/pkg/resourceClient"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
// patchTypes "k8s.io/apimachinery/pkg/types"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
@ -30,24 +26,6 @@ type Violations []Violation
type Violation struct {
}
// Mode for policy types
type Mode int
const (
Mutate Mode = 0
Violate Mode = 1
)
// Info input details
type Info struct {
Resource string
Policy string
rule string
Mode Mode
Reason string
crud string
}
// Builder to generate violations
type Builder struct {
kubeClient *kubernetes.Clientset
@ -61,8 +39,8 @@ type Builder struct {
func NewViolationHelper(kubeClient *kubernetes.Clientset, policyClientSet *clientset.Clientset, logger *log.Logger, policyInformer informers.PolicyInformer) (*Builder, error) {
policyscheme.AddToScheme(scheme.Scheme)
// Initialize Event Broadcaster
policyscheme.AddToScheme(scheme.Scheme)
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(log.Printf)
eventBroadcaster.StartRecordingToSink(
@ -70,12 +48,12 @@ func NewViolationHelper(kubeClient *kubernetes.Clientset, policyClientSet *clien
Interface: kubeClient.CoreV1().Events("")})
recorder := eventBroadcaster.NewRecorder(
scheme.Scheme,
v1.EventSource{Component: "policy-controller"})
v1.EventSource{Component: violationEventSource})
// Build the builder
builder := &Builder{
kubeClient: kubeClient,
policyClientset: policyClientSet,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "Policy-Violations"),
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workqueueViolationName),
logger: logger,
recorder: recorder,
policyLister: policyInformer.Lister(),
@ -84,73 +62,141 @@ func NewViolationHelper(kubeClient *kubernetes.Clientset, policyClientSet *clien
return builder, nil
}
// Create Violation -> (Info)
// Create to generate violation jsonpatch script &
// queue events to generate events
// TO-DO create should validate the rule number and update the violation if one exists
func (b *Builder) Create(info Info) ([]byte, error) {
func (b *Builder) Create(info Info) error {
// generate patch
patchBytes, err := b.generateViolationPatch(info)
// we can generate the patch as the policy resource will alwasy exist
// Apply Patch
err := b.patchViolation(info)
if err != nil {
return nil, err
return err
}
// generate event
// add to queue
b.workqueue.Add(info)
return patchBytes, nil
// Generate event for policy
b.workqueue.Add(
EventInfo{
Resource: info.Policy,
Reason: info.Reason,
ResourceTarget: PolicyTarget,
})
// Generat event for resource
b.workqueue.Add(
EventInfo{
Kind: info.Kind,
Resource: info.Resource,
Reason: info.Reason,
ResourceTarget: ResourceTarget,
})
return nil
}
// Remove the violation
func (b *Builder) Remove(info Info) ([]byte, error) {
b.workqueue.Add(info)
return nil, nil
}
func (b *Builder) generateViolationPatch(info Info) ([]byte, error) {
func (b *Builder) patchViolation(info Info) error {
// policy-controller handlers are post events
// adm-ctr will always have policy resource created
// Get Policy namespace and name
policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(info.Policy)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", info.Policy))
return nil, err
return err
}
// Try to access the policy
// Try to access the resource
// if the above resource objects have not been created then we reque the request to create the event
policy, err := b.policyLister.Policies(policyNamespace).Get(policyName)
if err != nil {
fmt.Println(err)
return nil, err
utilruntime.HandleError(err)
return err
}
// Add violation
updatedPolicy := policy.DeepCopy()
updatedPolicy.Status.Logs = append(updatedPolicy.Status.Logs, info.Reason)
// var update bool
// inactiveViolationindex := []int{}
updatedViolations := []types.Violation{}
// Check if the violation with the same rule exists for the same resource and rule name
for _, violation := range updatedPolicy.Status.Violations {
if ok, err := b.IsActive(violation); ok {
if err != nil {
fmt.Println(err)
}
updatedViolations = append(updatedViolations, violation)
} else {
fmt.Println("Remove violation")
b.workqueue.Add(
EventInfo{
Resource: info.Policy,
Reason: "Removing violation for rule " + info.RuleName,
ResourceTarget: PolicyTarget,
})
}
}
// Rule is updated TO-DO
// Dont validate if the resouce is active as a new Violation will not be created if it did not
updatedViolations = append(updatedViolations,
types.Violation{
Kind: info.Kind,
Resource: info.Resource,
Rule: info.RuleName,
Reason: info.Reason,
})
updatedPolicy.Status.Violations = updatedViolations
// Patch
return b.patch(policy, updatedPolicy)
}
func (b *Builder) patch(policy *types.Policy, updatedPolicy *types.Policy) ([]byte, error) {
originalPolicy, err := json.Marshal(policy)
if err != nil {
return nil, err
}
modifiedPolicy, err := json.Marshal(updatedPolicy)
if err != nil {
return nil, err
}
patchBytes, err := jsonpatch.CreateMergePatch(originalPolicy, modifiedPolicy)
if err != nil {
return nil, err
}
return patchBytes, nil
// _, err = b.PolicyClientset.Nirmata().Policies(policy.Namespace).Patch(policy.Name, patchTypes.MergePatchType, patchBytes)
// if err != nil {
// return err
// }
// return nil
func (b *Builder) getPolicyEvent(info Info) EventInfo {
return EventInfo{Resource: info.Resource}
}
func (b *Builder) IsActive(violation types.Violation) (bool, error) {
if ok, err := b.ValidationResourceActive(violation); !ok {
return false, err
}
return true, nil
}
func (b *Builder) ValidationResourceActive(violation types.Violation) (bool, error) {
resourceNamespace, resourceName, err := cache.SplitMetaNamespaceKey(violation.Resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", violation.Resource))
// Remove the corresponding violation
return false, err
}
// Check if the corresponding resource is still present
_, err = resourceClient.GetResouce(b.kubeClient, violation.Kind, resourceNamespace, resourceName)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", violation.Resource))
return false, err
}
return true, nil
}
func (b *Builder) patch(policy *types.Policy, updatedPolicy *types.Policy) error {
_, err := b.policyClientset.Nirmata().Policies(updatedPolicy.Namespace).UpdateStatus(updatedPolicy)
// _, err = b.policyClientset.Nirmata().Policies(policy.Namespace).Patch(policy.Name, patchTypes.MergePatchType, patchBytes)
if err != nil {
return err
}
return nil
}
// Run : Initialize the worker routines to process the event creation
func (b *Builder) Run(threadiness int, stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
defer b.workqueue.ShutDown()
log.Println("Starting violation builder")
@ -183,10 +229,9 @@ func (b *Builder) processNextWorkItem() bool {
}
err := func(obj interface{}) error {
defer b.workqueue.Done(obj)
var key Info
var key EventInfo
var ok bool
if key, ok = obj.(Info); !ok {
if key, ok = obj.(EventInfo); !ok {
b.workqueue.Forget(obj)
log.Printf("Expecting type info by got %v", obj)
return nil
@ -195,7 +240,7 @@ func (b *Builder) processNextWorkItem() bool {
// Run the syncHandler, passing the resource and the policy
if err := b.syncHandler(key); err != nil {
b.workqueue.AddRateLimited(key)
return fmt.Errorf("error syncing '%s' & '%s': %s, requeuing", key.Resource, key.Policy, err.Error())
return fmt.Errorf("error syncing '%s' : %s, requeuing event creation request", key.Resource, err.Error())
}
return nil
@ -209,47 +254,33 @@ func (b *Builder) processNextWorkItem() bool {
}
// TO-DO: how to handle events if the resource has been delted, and clean the dirty object
func (b *Builder) syncHandler(key Info) error {
func (b *Builder) syncHandler(key EventInfo) error {
fmt.Println(key)
// Get Policy namespace and name
policyNamespace, policyName, err := cache.SplitMetaNamespaceKey(key.Policy)
namespace, name, err := cache.SplitMetaNamespaceKey(key.Resource)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key.Policy))
utilruntime.HandleError(fmt.Errorf("invalid policy key: %s", key.Resource))
return nil
}
// Try to access the policy
// Try to access the resource
// if the above resource objects have not been created then we reque the request to create the event
fmt.Println(policyNamespace)
fmt.Println(policyName)
policy, err := b.policyLister.Policies(policyNamespace).Get(policyName)
if err != nil {
fmt.Println(err)
return err
if key.ResourceTarget == ResourceTarget {
// Resource Event
resource, err := resourceClient.GetResouce(b.kubeClient, key.Kind, namespace, name)
if err != nil {
utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource))
return err
}
b.recorder.Event(resource, v1.EventTypeNormal, violationEventResrouce, key.Reason)
} else {
// Policy Event
policy, err := b.policyLister.Policies(namespace).Get(name)
if err != nil {
// TO-DO: this scenario will not exist as the policy will always exist
// unless the namespace and resource name are invalid
utilruntime.HandleError(err)
return err
}
b.recorder.Event(policy, v1.EventTypeNormal, violationEventResrouce, key.Reason)
}
resourceNamespace, resourceName, err := cache.SplitMetaNamespaceKey(key.Resource)
fmt.Println(resourceNamespace)
fmt.Println(resourceName)
if err != nil {
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key.Resource))
return nil
}
// Get Resource namespace and name
resource, err := b.kubeClient.AppsV1().Deployments(resourceNamespace).Get(resourceName, meta_v1.GetOptions{}) // Deployment
if err != nil {
fmt.Println(err)
return err
}
// Generate events for policy
b.recorder.Event(policy, v1.EventTypeNormal, "violation", key.Reason)
// Generate events for resource
b.recorder.Event(resource, v1.EventTypeNormal, "violation", key.Reason)
return nil
}