1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-30 19:35:06 +00:00

introduce policy violation generator

This commit is contained in:
shivkumar dudhani 2019-11-12 14:41:29 -08:00
parent f788f0e526
commit ccbb6e33a5
14 changed files with 784 additions and 224 deletions

17
main.go
View file

@ -80,9 +80,6 @@ func main() {
glog.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
}
// Policy meta-data store
policyMetaStore := policystore.NewPolicyStore()
// KYVERNO CRD INFORMER
// watches CRD resources:
// - Policy
@ -101,16 +98,23 @@ func main() {
// if the configMap is update, the configuration will be updated :D
configData := config.NewConfigData(kubeClient, kubeInformer.Core().V1().ConfigMaps(), filterK8Resources)
// Policy meta-data store
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1alpha1().ClusterPolicies().Lister())
// EVENT GENERATOR
// - generate event with retry mechanism
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1alpha1().ClusterPolicies())
// POLICY VIOLATION GENERATOR
// -- generate policy violation
pvgen := policyviolation.NewPVGenerator(pclient, pInformer.Kyverno().V1alpha1().ClusterPolicyViolations().Lister())
// POLICY CONTROLLER
// - reconciliation policy and policy violation
// - process policy on existing resources
// - status aggregator: recieves stats when a policy is applied
// & updates the policy status
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData, policyMetaStore)
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData, pvgen, policyMetaStore)
if err != nil {
glog.Fatalf("error creating policy controller: %v\n", err)
}
@ -125,7 +129,7 @@ func main() {
// GENERATE CONTROLLER
// - watches for Namespace resource and generates resource based on the policy generate rule
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData)
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData, pvgen, policyMetaStore)
// CONFIGURE CERTIFICATES
tlsPair, err := initTLSPemPair(clientConfig, client)
@ -148,7 +152,7 @@ func main() {
// -- annotations on resources with update details on mutation JSON patches
// -- generate policy violation resource
// -- generate events on policy and resource
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, cleanUp)
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, policyMetaStore, pvgen, cleanUp)
if err != nil {
glog.Fatalf("Unable to create webhook server: %v\n", err)
}
@ -162,6 +166,7 @@ func main() {
go pvc.Run(1, stopCh)
go egen.Run(1, stopCh)
go nsc.Run(1, stopCh)
go pvgen.Run(1, stopCh)
// verifys if the admission control is enabled and active
// resync: 60 seconds

View file

@ -7,10 +7,12 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/config"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"k8s.io/apimachinery/pkg/api/errors"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
@ -57,6 +59,10 @@ type NamespaceController struct {
rm resourceManager
// helpers to validate against current loaded configuration
configHandler config.Interface
// store to hold policy meta data for faster lookup
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
}
//NewNamespaceController returns a new Controller to manage generation rules
@ -67,15 +73,19 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
policyStatus policy.PolicyStatusInterface,
eventGen event.Interface,
configHandler config.Interface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
configHandler config.Interface,
pvGenerator policyviolation.GeneratorInterface,
pMetaStore policystore.LookupInterface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
// create the controller
nsc := &NamespaceController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
configHandler: configHandler,
pMetaStore: pMetaStore,
pvGenerator: pvGenerator,
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{

View file

@ -6,10 +6,10 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
policyctr "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/utils"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -98,7 +98,7 @@ func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []e
// get all the policies that have a generate rule and resource description satifies the namespace
// apply policy on resource
policies := listpolicies(ns, nsc.pLister)
policies := listpolicies(ns, nsc.pMetaStore)
var engineResponses []engine.EngineResponse
for _, policy := range policies {
// pre-processing, check if the policy and resource version has been processed before
@ -106,7 +106,7 @@ func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []e
glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
continue
}
engineResponse := applyPolicy(nsc.client, ns, *policy, nsc.policyStatus)
engineResponse := applyPolicy(nsc.client, ns, policy, nsc.policyStatus)
engineResponses = append(engineResponses, engineResponse)
// post-processing, register the resource as processed
@ -160,10 +160,10 @@ func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
}
}
func listpolicies(ns unstructured.Unstructured, pLister kyvernolister.ClusterPolicyLister) []*kyverno.ClusterPolicy {
var filteredpolicies []*kyverno.ClusterPolicy
func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
var filteredpolicies []kyverno.ClusterPolicy
glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
policies, err := pLister.List(labels.NewSelector())
policies, err := pMetaStore.LookUp(ns.GetKind(), ns.GetNamespace())
if err != nil {
glog.Errorf("failed to get list policies: %v", err)
return nil

View file

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
@ -11,47 +12,96 @@ import (
func (nsc *NamespaceController) report(engineResponses []engine.EngineResponse) {
// generate events
eventInfos := generateEvents(engineResponses)
nsc.eventGen.Add(eventInfos...)
// generate policy violations
for _, er := range engineResponses {
// events
// success - policy applied on resource
// failure - policy/rule failed to apply on the resource
reportEvents(er, nsc.eventGen)
// policy violations
// failure - policy/rule failed to apply on the resource
}
// generate policy violation
policyviolation.CreatePV(nsc.pvLister, nsc.kyvernoClient, engineResponses)
pvInfos := generatePVs(engineResponses)
nsc.pvGenerator.Add(pvInfos...)
}
//reportEvents generates events for the failed resources
func reportEvents(engineResponse engine.EngineResponse, eventGen event.Interface) {
if engineResponse.IsSuccesful() {
return
func generatePVs(ers []engine.EngineResponse) []policyviolation.Info {
var pvInfos []policyviolation.Info
for _, er := range ers {
// ignore creation of PV for resoruces that are yet to be assigned a name
if er.PolicyResponse.Resource.Name == "" {
glog.V(4).Infof("resource %v, has not been assigned a name, not creating a policy violation for it", er.PolicyResponse.Resource)
continue
}
if er.IsSuccesful() {
continue
}
glog.V(4).Infof("Building policy violation for engine response %v", er)
// build policy violation info
pvInfos = append(pvInfos, buildPVInfo(er))
}
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", engineResponse.PolicyResponse.Policy, engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name)
for _, rule := range engineResponse.PolicyResponse.Rules {
return pvInfos
}
func buildPVInfo(er engine.EngineResponse) policyviolation.Info {
info := policyviolation.Info{
Blocked: false,
PolicyName: er.PolicyResponse.Policy,
Resource: er.PatchedResource,
Rules: buildViolatedRules(er),
}
return info
}
func buildViolatedRules(er engine.EngineResponse) []kyverno.ViolatedRule {
var violatedRules []kyverno.ViolatedRule
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
return
continue
}
vrule := kyverno.ViolatedRule{
Name: rule.Name,
Type: rule.Type,
Message: rule.Message,
}
violatedRules = append(violatedRules, vrule)
}
return violatedRules
}
func generateEvents(ers []engine.EngineResponse) []event.Info {
var eventInfos []event.Info
for _, er := range ers {
if er.IsSuccesful() {
continue
}
eventInfos = append(eventInfos, generateEventsPerEr(er)...)
}
return eventInfos
}
func generateEventsPerEr(er engine.EngineResponse) []event.Info {
var eventInfos []event.Info
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
// generate event on resource for each failed rule
glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Name, engineResponse.PolicyResponse.Policy)
glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = engineResponse.PolicyResponse.Resource.Kind
e.Kind = er.PolicyResponse.Resource.Kind
e.Namespace = "" // event generate on namespace resource
e.Name = engineResponse.PolicyResponse.Resource.Name
e.Name = er.PolicyResponse.Resource.Name
e.Reason = "Failure"
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' failed to apply. %v", engineResponse.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
eventGen.Add(e)
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' failed to apply. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
eventInfos = append(eventInfos, e)
}
if er.IsSuccesful() {
return eventInfos
}
// generate a event on policy for all failed rules
glog.V(4).Infof("generation event on policy '%s'", engineResponse.PolicyResponse.Policy)
glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = "ClusterPolicy"
e.Namespace = ""
e.Name = engineResponse.PolicyResponse.Policy
e.Name = er.PolicyResponse.Policy
e.Reason = "Failure"
e.Message = fmt.Sprintf("failed to apply policy '%s' rules '%v' on resource '%s/%s/%s'", engineResponse.PolicyResponse.Policy, engineResponse.GetFailedRules(), engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name)
eventGen.Add(e)
e.Message = fmt.Sprintf("failed to apply policy '%s' rules '%v' on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
return eventInfos
}

View file

@ -17,6 +17,7 @@ import (
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
@ -79,14 +80,17 @@ type PolicyController struct {
// recieves stats and aggregates details
statusAggregator *PolicyStatusAggregator
// store to hold policy meta data for faster lookup
pMetaStore policystore.Interface
pMetaStore policystore.UpdateInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
}
// NewPolicyController create a new PolicyController
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
configHandler config.Interface,
pMetaStore policystore.Interface) (*PolicyController, error) {
pvGenerator policyviolation.GeneratorInterface,
pMetaStore policystore.UpdateInterface) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
@ -105,6 +109,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
webhookRegistrationClient: webhookRegistrationClient,
configHandler: configHandler,
pMetaStore: pMetaStore,
pvGenerator: pvGenerator,
}
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}

View file

@ -4,6 +4,7 @@ import (
"fmt"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
@ -13,52 +14,112 @@ import (
// - has violation -> report
// - no violation -> cleanup policy violations(resource or resource owner)
func (pc *PolicyController) cleanupAndReport(engineResponses []engine.EngineResponse) {
for _, eResponse := range engineResponses {
if !eResponse.IsSuccesful() {
// failure - policy/rule failed to apply on the resource
reportEvents(eResponse, pc.eventGen)
// generate policy violation
// Only created on resource, not resource owners
policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses)
} else {
// cleanup existing violations if any
// if there is any error in clean up, we dont re-queue the resource
// it will be re-tried in the next controller cache resync
pc.cleanUpPolicyViolation(eResponse.PolicyResponse)
// generate Events
eventInfos := generateEvents(engineResponses)
pc.eventGen.Add(eventInfos...)
// create policy violation
pvInfos := generatePVs(engineResponses)
pc.pvGenerator.Add(pvInfos...)
// cleanup existing violations if any
// if there is any error in clean up, we dont re-queue the resource
// it will be re-tried in the next controller cache resync
pc.cleanUp(engineResponses)
}
func (pc *PolicyController) cleanUp(ers []engine.EngineResponse) {
for _, er := range ers {
if er.IsSuccesful() {
continue
}
pc.cleanUpPolicyViolation(er.PolicyResponse)
}
}
//reportEvents generates events for the failed resources
func reportEvents(engineResponse engine.EngineResponse, eventGen event.Interface) {
if engineResponse.IsSuccesful() {
return
}
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", engineResponse.PolicyResponse.Policy, engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name)
for _, rule := range engineResponse.PolicyResponse.Rules {
if rule.Success {
return
func generatePVs(ers []engine.EngineResponse) []policyviolation.Info {
var pvInfos []policyviolation.Info
for _, er := range ers {
// ignore creation of PV for resoruces that are yet to be assigned a name
if er.PolicyResponse.Resource.Name == "" {
glog.V(4).Infof("resource %v, has not been assigned a name, not creating a policy violation for it", er.PolicyResponse.Resource)
continue
}
// generate event on resource for each failed rule
glog.V(4).Infof("generation event on resource '%s/%s/%s' for policy '%s'", engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name, engineResponse.PolicyResponse.Policy)
e := event.Info{}
e.Kind = engineResponse.PolicyResponse.Resource.Kind
e.Namespace = engineResponse.PolicyResponse.Resource.Namespace
e.Name = engineResponse.PolicyResponse.Resource.Name
e.Reason = "Failure"
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' failed to apply. %v", engineResponse.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
eventGen.Add(e)
if er.IsSuccesful() {
continue
}
glog.V(4).Infof("Building policy violation for engine response %v", er)
// build policy violation info
pvInfos = append(pvInfos, buildPVInfo(er))
}
return pvInfos
}
func buildPVInfo(er engine.EngineResponse) policyviolation.Info {
info := policyviolation.Info{
Blocked: false,
PolicyName: er.PolicyResponse.Policy,
Resource: er.PatchedResource,
Rules: buildViolatedRules(er),
}
return info
}
func buildViolatedRules(er engine.EngineResponse) []kyverno.ViolatedRule {
var violatedRules []kyverno.ViolatedRule
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
vrule := kyverno.ViolatedRule{
Name: rule.Name,
Type: rule.Type,
Message: rule.Message,
}
violatedRules = append(violatedRules, vrule)
}
return violatedRules
}
func generateEvents(ers []engine.EngineResponse) []event.Info {
var eventInfos []event.Info
for _, er := range ers {
if er.IsSuccesful() {
continue
}
eventInfos = append(eventInfos, generateEventsPerEr(er)...)
}
return eventInfos
}
func generateEventsPerEr(er engine.EngineResponse) []event.Info {
var eventInfos []event.Info
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
// generate event on resource for each failed rule
glog.V(4).Infof("generation event on resource '%s/%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = er.PolicyResponse.Resource.Kind
e.Namespace = er.PolicyResponse.Resource.Namespace
e.Name = er.PolicyResponse.Resource.Name
e.Reason = "Failure"
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' failed to apply. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
eventInfos = append(eventInfos, e)
}
if er.IsSuccesful() {
return eventInfos
}
// generate a event on policy for all failed rules
glog.V(4).Infof("generation event on policy '%s'", engineResponse.PolicyResponse.Policy)
glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = "ClusterPolicy"
e.Namespace = ""
e.Name = engineResponse.PolicyResponse.Policy
e.Name = er.PolicyResponse.Policy
e.Reason = "Failure"
e.Message = fmt.Sprintf("failed to apply policy '%s' rules '%v' on resource '%s/%s/%s'", engineResponse.PolicyResponse.Policy, engineResponse.GetFailedRules(), engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name)
eventGen.Add(e)
e.Message = fmt.Sprintf("failed to apply policy '%s' rules '%v' on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
eventInfos = append(eventInfos, e)
return eventInfos
}

View file

@ -4,61 +4,43 @@ import (
"sync"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
)
type PolicyElement struct {
Name string
Rule string
}
//Operation defines the operation that a rule is performing
// we can only have a single operation per rule
type Operation string
const (
//Mutation : mutation rules
Mutation Operation = "Mutation"
//Validation : validation rules
Validation Operation = "Validation"
//Generation : generation rules
Generation Operation = "Generation"
)
type policyMap map[PolicyElement]interface{}
type policyMap map[string]interface{}
type namespaceMap map[string]policyMap
type kindMap map[string]namespaceMap
//PolicyStore Store the meta-data information to faster lookup policies
type PolicyStore struct {
data map[Operation]map[string]map[string]policyMap
mu sync.RWMutex
data map[string]namespaceMap
mu sync.RWMutex
pLister kyvernolister.ClusterPolicyLister
}
type Interface interface {
//UpdateInterface provides api to update policies
type UpdateInterface interface {
// Register a new policy
Register(policy kyverno.ClusterPolicy)
// Remove policy information
UnRegister(policy kyverno.ClusterPolicy) error
}
//LookupInterface provides api to lookup policies
type LookupInterface interface {
// Lookup based on kind and namespaces
LookUp(operation Operation, kind, namespace string) []PolicyElement
LookUp(kind, namespace string) ([]kyverno.ClusterPolicy, error)
}
// NewPolicyStore returns a new policy store
func NewPolicyStore() *PolicyStore {
func NewPolicyStore(pLister kyvernolister.ClusterPolicyLister) *PolicyStore {
ps := PolicyStore{
data: make(map[Operation]map[string]map[string]policyMap),
data: make(kindMap),
pLister: pLister,
}
return &ps
}
func operation(rule kyverno.Rule) Operation {
if rule.HasMutate() {
return Mutation
} else if rule.HasValidate() {
return Validation
} else {
return Generation
}
}
//Register a new policy
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
ps.mu.Lock()
@ -66,13 +48,9 @@ func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
var pmap policyMap
// add an entry for each rule in policy
for _, rule := range policy.Spec.Rules {
// get operation
operation := operation(rule)
operationMap := ps.addOperation(operation)
// rule.MatchResources.Kinds - List - mandatory - atleast on entry
for _, kind := range rule.MatchResources.Kinds {
kindMap := addKind(operationMap, kind)
kindMap := ps.addKind(kind)
// namespaces
if len(rule.MatchResources.Namespaces) == 0 {
// all namespaces - *
@ -83,22 +61,34 @@ func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
}
}
// add policy to the pmap
addPolicyElement(pmap, policy.Name, rule.Name)
addPolicyElement(pmap, policy.Name)
}
}
}
//LookUp look up the resources
func (ps *PolicyStore) LookUp(kind, namespace string) ([]kyverno.ClusterPolicy, error) {
ret := []kyverno.ClusterPolicy{}
// lookup meta-store
policyNames := ps.lookUp(kind, namespace)
for _, policyName := range policyNames {
policy, err := ps.pLister.Get(policyName)
if err != nil {
return nil, err
}
ret = append(ret, *policy)
}
return ret, nil
}
//UnRegister Remove policy information
func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
ps.mu.Lock()
defer ps.mu.Unlock()
for _, rule := range policy.Spec.Rules {
// get operation
operation := operation(rule)
operationMap := ps.getOperation(operation)
for _, kind := range rule.MatchResources.Kinds {
// get kind Map
kindMap := getKind(operationMap, kind)
kindMap := ps.getKind(kind)
if kindMap == nil {
// kind does not exist
return nil
@ -107,12 +97,12 @@ func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
namespace := "*"
pmap := getNamespace(kindMap, namespace)
// remove element
delete(pmap, PolicyElement{Name: policy.Name, Rule: rule.Name})
delete(pmap, policy.Name)
} else {
for _, ns := range rule.MatchResources.Namespaces {
pmap := getNamespace(kindMap, ns)
// remove element
delete(pmap, PolicyElement{Name: policy.Name, Rule: rule.Name})
delete(pmap, policy.Name)
}
}
}
@ -122,20 +112,15 @@ func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
//LookUp lookups up the policies for kind and namespace
// returns a list of <policy, rule> that statisfy the filters
func (ps *PolicyStore) LookUp(operation Operation, kind, namespace string) []PolicyElement {
func (ps *PolicyStore) lookUp(kind, namespace string) []string {
ps.mu.RLock()
defer ps.mu.RUnlock()
var policyMap policyMap
var ret []PolicyElement
// operation
operationMap := ps.getOperation(operation)
if operationMap == nil {
return []PolicyElement{}
}
var ret []string
// kind
kindMap := getKind(operationMap, kind)
kindMap := ps.getKind(kind)
if kindMap == nil {
return []PolicyElement{}
return []string{}
}
// get namespace specific policies
policyMap = kindMap[namespace]
@ -143,42 +128,41 @@ func (ps *PolicyStore) LookUp(operation Operation, kind, namespace string) []Pol
// get policies on all namespaces
policyMap = kindMap["*"]
ret = append(ret, transform(policyMap)...)
return ret
return unique(ret)
}
func unique(intSlice []string) []string {
keys := make(map[string]bool)
list := []string{}
for _, entry := range intSlice {
if _, value := keys[entry]; !value {
keys[entry] = true
list = append(list, entry)
}
}
return list
}
// generates a copy
func transform(pmap policyMap) []PolicyElement {
ret := []PolicyElement{}
func transform(pmap policyMap) []string {
ret := []string{}
for k := range pmap {
ret = append(ret, k)
}
return ret
}
func (ps *PolicyStore) addOperation(operation Operation) map[string]map[string]policyMap {
operationMap, ok := ps.data[operation]
if ok {
return operationMap
}
ps.data[operation] = make(map[string]map[string]policyMap)
return ps.data[operation]
}
func (ps *PolicyStore) getOperation(operation Operation) map[string]map[string]policyMap {
return ps.data[operation]
}
func addKind(operationMap map[string]map[string]policyMap, kind string) map[string]policyMap {
val, ok := operationMap[kind]
func (ps *PolicyStore) addKind(kind string) namespaceMap {
val, ok := ps.data[kind]
if ok {
return val
}
operationMap[kind] = make(map[string]policyMap)
return operationMap[kind]
ps.data[kind] = make(namespaceMap)
return ps.data[kind]
}
func getKind(operationMap map[string]map[string]policyMap, kind string) map[string]policyMap {
return operationMap[kind]
func (ps *PolicyStore) getKind(kind string) namespaceMap {
return ps.data[kind]
}
func addNamespace(kindMap map[string]policyMap, namespace string) policyMap {
@ -194,13 +178,10 @@ func getNamespace(kindMap map[string]policyMap, namespace string) policyMap {
return kindMap[namespace]
}
func addPolicyElement(pmap policyMap, name, rule string) {
func addPolicyElement(pmap policyMap, name string) {
var emptyInterface interface{}
key := PolicyElement{
Name: name,
Rule: rule,
}
if _, ok := pmap[key]; !ok {
pmap[key] = emptyInterface
if _, ok := pmap[name]; !ok {
pmap[name] = emptyInterface
}
}

View file

@ -2,12 +2,18 @@ package policystore
import (
"encoding/json"
"reflect"
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/fake"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/kubernetes/scheme"
)
func Test_Add(t *testing.T) {
func Test_Operations(t *testing.T) {
rawPolicy1 := []byte(`
{
"apiVersion": "kyverno.io/v1alpha1",
@ -143,20 +149,127 @@ func Test_Add(t *testing.T) {
]
}
}`)
var policy1 kyverno.ClusterPolicy
json.Unmarshal(rawPolicy1, &policy1)
var policy2 kyverno.ClusterPolicy
json.Unmarshal(rawPolicy2, &policy2)
var store Interface
store = NewPolicyStore()
rawPolicy3 := []byte(`
{
"apiVersion": "kyverno.io/v1alpha1",
"kind": "ClusterPolicy",
"metadata": {
"name": "test-policy3"
},
"spec": {
"rules": [
{
"name": "r4",
"match": {
"resources": {
"kinds": [
"Service"
]
}
},
"mutate": {
"overlay": "temp"
}
}
]
}
}`)
var policy1 v1alpha1.ClusterPolicy
json.Unmarshal(rawPolicy1, &policy1)
var policy2 v1alpha1.ClusterPolicy
json.Unmarshal(rawPolicy2, &policy2)
var policy3 v1alpha1.ClusterPolicy
json.Unmarshal(rawPolicy3, &policy3)
scheme.Scheme.AddKnownTypes(v1alpha1.SchemeGroupVersion,
&v1alpha1.ClusterPolicy{},
)
var obj runtime.Object
var err error
var retPolicies []v1alpha1.ClusterPolicy
polices := []runtime.Object{}
// list of runtime objects
decode := scheme.Codecs.UniversalDeserializer().Decode
obj, _, err = decode(rawPolicy1, nil, nil)
if err != nil {
t.Error(err)
}
polices = append(polices, obj)
obj, _, err = decode(rawPolicy2, nil, nil)
if err != nil {
t.Error(err)
}
polices = append(polices, obj)
obj, _, err = decode(rawPolicy3, nil, nil)
if err != nil {
t.Error(err)
}
polices = append(polices, obj)
// Mock Lister
client := fake.NewSimpleClientset(polices...)
fakeLister := &FakeLister{client: client}
store := NewPolicyStore(fakeLister)
// Test Operations
// Add
store.Register(policy1)
// Add
store.Register(policy2)
t.Log(store.LookUp(Mutation, "Pod", ""))
// Add
store.Register(policy3)
// Lookup
retPolicies, err = store.LookUp("Pod", "")
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(retPolicies, []v1alpha1.ClusterPolicy{policy1, policy2}) {
t.Error("not matching")
}
// Remove
store.UnRegister(policy1)
t.Log(store.LookUp(Mutation, "Pod", ""))
retPolicies, err = store.LookUp("Pod", "")
if err != nil {
t.Error(err)
}
// Lookup
if !reflect.DeepEqual(retPolicies, []v1alpha1.ClusterPolicy{policy2}) {
t.Error("not matching")
}
// Add
store.Register(policy1)
t.Log(store.LookUp(Mutation, "Pod", ""))
t.Fail()
retPolicies, err = store.LookUp("Pod", "")
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(retPolicies, []v1alpha1.ClusterPolicy{policy1, policy2}) {
t.Error("not matching")
}
retPolicies, err = store.LookUp("Service", "")
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(retPolicies, []v1alpha1.ClusterPolicy{policy3}) {
t.Error("not matching")
}
}
type FakeLister struct {
client *fake.Clientset
}
func (fk *FakeLister) List(selector labels.Selector) (ret []*v1alpha1.ClusterPolicy, err error) {
return nil, nil
}
func (fk *FakeLister) Get(name string) (*v1alpha1.ClusterPolicy, error) {
return fk.client.KyvernoV1alpha1().ClusterPolicies().Get(name, v1.GetOptions{})
}
func (fk *FakeLister) GetPolicyForPolicyViolation(pv *v1alpha1.ClusterPolicyViolation) ([]*v1alpha1.ClusterPolicy, error) {
return nil, nil
}
func (fk *FakeLister) ListResources(selector labels.Selector) (ret []*v1alpha1.ClusterPolicy, err error) {
return nil, nil
}

View file

@ -0,0 +1,284 @@
package policyviolation
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
pvInterface "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
dclient "github.com/nirmata/kyverno/pkg/dclient"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const workQueueName = "policy-violation-controller"
const workQueueRetryLimit = 3
//Generator creates PV
type Generator struct {
dclient *dclient.Client
pvInterface pvInterface.ClusterPolicyViolationInterface
pvLister kyvernolister.ClusterPolicyViolationLister
queue workqueue.RateLimitingInterface
}
//Info is a request to create PV
type Info struct {
Blocked bool
PolicyName string
Resource unstructured.Unstructured
Rules []kyverno.ViolatedRule
}
// make the struct hashable
//GeneratorInterface provides API to create PVs
type GeneratorInterface interface {
Add(infos ...Info)
}
// NewPVGenerator returns a new instance of policy violation generator
func NewPVGenerator(client *kyvernoclient.Clientset,
pvLister kyvernolister.ClusterPolicyViolationLister) *Generator {
gen := Generator{
pvInterface: client.KyvernoV1alpha1().ClusterPolicyViolations(),
pvLister: pvLister,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
}
return &gen
}
func (gen *Generator) enqueue(info Info) {
key, err := cache.MetaNamespaceKeyFunc(info)
if err != nil {
glog.Error(err)
return
}
gen.queue.Add(key)
}
//Add queues a policy violation create request
func (gen *Generator) Add(infos ...Info) {
for _, info := range infos {
gen.enqueue(info)
}
}
// Run starts the workers
func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Info("Start policy violaion generator")
defer glog.Info("Shutting down event generator")
for i := 0; i < workers; i++ {
go wait.Until(gen.runWorker, time.Second, stopCh)
}
<-stopCh
}
func (gen *Generator) runWorker() {
for gen.processNextWorkitem() {
}
}
func (gen *Generator) handleErr(err error, key interface{}) {
if err == nil {
gen.queue.Forget(key)
}
// retires requests if there is error
if gen.queue.NumRequeues(key) < workQueueRetryLimit {
glog.Warningf("Error syncing policy violation %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
gen.queue.AddRateLimited(key)
return
}
gen.queue.Forget(key)
glog.Error(err)
glog.Warningf("Dropping the key out of the queue: %v", err)
}
func (gen *Generator) processNextWorkitem() bool {
obj, shutdown := gen.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer gen.queue.Done(obj)
var key Info
var ok bool
if key, ok = obj.(Info); !ok {
gen.queue.Forget(obj)
glog.Warningf("Expecting type info bt got %v\n", obj)
return nil
}
err := gen.syncHandler(key)
gen.handleErr(err, obj)
return nil
}(obj)
if err != nil {
glog.Error(err)
return true
}
return true
}
func (gen *Generator) syncHandler(info Info) error {
var pvs []kyverno.ClusterPolicyViolation
if !info.Blocked {
pvs = append(pvs, buildPV(info))
} else {
// blocked
// get owners
pvs = buildPVWithOwners(gen.dclient, info)
}
// create policy violation
createPVS(pvs, gen.pvLister, gen.pvInterface)
return nil
}
func createPVS(pvs []kyverno.ClusterPolicyViolation, pvLister kyvernolister.ClusterPolicyViolationLister, pvInterface pvInterface.ClusterPolicyViolationInterface) {
for _, pv := range pvs {
createPVNew(pv, pvLister, pvInterface)
}
}
func createPVNew(pv kyverno.ClusterPolicyViolation, pvLister kyvernolister.ClusterPolicyViolationLister, pvInterface pvInterface.ClusterPolicyViolationInterface) error {
var err error
// PV already exists
ePV, err := getExistingPVIfAny(pvLister, pv)
if err != nil {
glog.Error(err)
return err
}
if ePV == nil {
// Create a New PV
glog.V(4).Infof("creating new policy violation for policy %s & resource %s/%s/%s", pv.Spec.Policy, pv.Spec.ResourceSpec.Kind, pv.Spec.ResourceSpec.Namespace, pv.Spec.ResourceSpec.Name)
_, err = pvInterface.Create(&pv)
if err != nil {
glog.Error(err)
return err
}
glog.Infof("policy violation created for resource %v", pv.Spec.ResourceSpec)
return nil
}
// Update existing PV if there any changes
if reflect.DeepEqual(pv.Spec, ePV.Spec) {
glog.V(4).Infof("policy violation spec %v did not change so not updating it", pv.Spec)
return nil
}
_, err = pvInterface.Update(&pv)
if err != nil {
glog.Error(err)
return err
}
glog.Infof("policy violation updated for resource %v", pv.Spec.ResourceSpec)
return nil
}
func getExistingPVIfAny(pvLister kyvernolister.ClusterPolicyViolationLister, pv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
labelMap := map[string]string{"policy": pv.Spec.Policy, "resource": pv.Spec.ResourceSpec.ToKey()}
pvSelector, err := converLabelToSelector(labelMap)
if err != nil {
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", pv.Spec.Policy, err)
}
pvs, err := pvLister.List(pvSelector)
if err != nil {
glog.Errorf("unable to list policy violations with label selector %v: %v", pvSelector, err)
return nil, err
}
if len(pvs) == 0 {
glog.Infof("policy violation does not exist with labels %v", labelMap)
return nil, nil
}
// There should be only one policy violation
if len(pvs) > 1 {
glog.Errorf("more than one policy violation exists with labels %v", labelMap)
}
// return the first PV
return pvs[0], nil
}
// build PV without owners
func buildPV(info Info) kyverno.ClusterPolicyViolation {
pv := buildPVObj(info.PolicyName, kyverno.ResourceSpec{
Kind: info.Resource.GetKind(),
Namespace: info.Resource.GetNamespace(),
Name: info.Resource.GetName(),
}, info.Rules,
)
pv.SetGenerateName("pv-")
return pv
}
// build PV object
func buildPVObj(policyName string, resourceSpec kyverno.ResourceSpec, rules []kyverno.ViolatedRule) kyverno.ClusterPolicyViolation {
pv := kyverno.ClusterPolicyViolation{
Spec: kyverno.PolicyViolationSpec{
Policy: policyName,
ResourceSpec: resourceSpec,
ViolatedRules: rules,
},
}
return pv
}
// build PV with owners
func buildPVWithOwners(dclient *client.Client, info Info) []kyverno.ClusterPolicyViolation {
var pvs []kyverno.ClusterPolicyViolation
// as its blocked resource, the violation is created on owner
ownerMap := map[kyverno.ResourceSpec]interface{}{}
getOwner(dclient, ownerMap, info.Resource)
// Generate owner on all owners
for owner := range ownerMap {
pv := buildPVObj(info.PolicyName, owner, info.Rules)
pvs = append(pvs, pv)
}
return pvs
}
// get owners of a resource by iterating over ownerReferences
func getOwner(dclient *client.Client, ownerMap map[kyverno.ResourceSpec]interface{}, resource unstructured.Unstructured) {
var emptyInterface interface{}
resourceSpec := kyverno.ResourceSpec{
Kind: resource.GetKind(),
Namespace: resource.GetNamespace(),
Name: resource.GetName(),
}
if _, ok := ownerMap[resourceSpec]; ok {
// owner seen before
// breaking loop
return
}
rOwners := resource.GetOwnerReferences()
// if there are no resource owners then its top level resource
if len(rOwners) == 0 {
// add resource to map
ownerMap[resourceSpec] = emptyInterface
return
}
for _, rOwner := range rOwners {
// lookup resource via client
// owner has to be in same namespace
owner, err := dclient.GetResource(rOwner.Kind, resource.GetNamespace(), rOwner.Name)
if err != nil {
glog.Errorf("Failed to get resource owner for %s/%s/%s, err: %v", rOwner.Kind, resource.GetNamespace(), rOwner.Name, err)
// as we want to process other owners
continue
}
getOwner(dclient, ownerMap, *owner)
}
}

View file

@ -29,8 +29,6 @@ func BuildPolicyViolation(policy string, resource kyverno.ResourceSpec, fRules [
ViolatedRules: fRules,
},
}
//TODO: check if this can be removed or use unstructured?
// pv.Kind = "PolicyViolation"
pv.SetGenerateName("pv-")
return pv
}
@ -153,22 +151,11 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []
//TODO: change the name
func getExistingPolicyViolationIfAny(pvListerSynced cache.InformerSynced, pvLister kyvernolister.ClusterPolicyViolationLister, newPv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
// TODO: check for existing ov using label selectors on resource and policy
// TODO: there can be duplicates, as the labels have not been assigned to the policy violation yet
labelMap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
policyViolationSelector, err := converLabelToSelector(labelMap)
if err != nil {
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", newPv.Spec.Policy, err)
}
//TODO: sync the cache before reading from it ?
// check is this is needed ?
// stopCh := make(chan struct{}, 0)
// if !cache.WaitForCacheSync(stopCh, pvListerSynced) {
// //TODO: can this be handled or avoided ?
// glog.Info("unable to sync policy violation shared informer cache, might be out of sync")
// }
pvs, err := pvLister.List(policyViolationSelector)
if err != nil {
glog.Errorf("unable to list policy violations with label selector %v: %v", policyViolationSelector, err)
@ -324,6 +311,5 @@ func validDependantForDeployment(client appsv1.AppsV1Interface, curPv kyverno.Cl
return true
}
}
return false
}

View file

@ -6,7 +6,6 @@ import (
policyctr "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/utils"
v1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@ -61,7 +60,8 @@ func (ws *WebhookServer) handleMutation(request *v1beta1.AdmissionRequest) (bool
//TODO: check if the name and namespace is also passed right in the resource?
// if not then set it from the api request
resource.SetGroupVersionKind(schema.GroupVersionKind{Group: request.Kind.Group, Version: request.Kind.Version, Kind: request.Kind.Kind})
policies, err := ws.pLister.List(labels.NewSelector())
// lookup policies based on operation,kind, namespace
policies, err := ws.pMetaStore.LookUp(resource.GetKind(), resource.GetNamespace())
if err != nil {
//TODO check if the CRD is created ?
// Unable to connect to policy Lister to access policies
@ -69,19 +69,13 @@ func (ws *WebhookServer) handleMutation(request *v1beta1.AdmissionRequest) (bool
glog.Warning(err)
return true, nil, ""
}
var engineResponses []engine.EngineResponse
for _, policy := range policies {
// check if policy has a rule for the admission request kind
if !utils.ContainsString(getApplicableKindsForPolicy(policy), request.Kind.Kind) {
continue
}
glog.V(2).Infof("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation)
// TODO: this can be
engineResponse := engine.Mutate(*policy, *resource)
engineResponse := engine.Mutate(policy, *resource)
engineResponses = append(engineResponses, engineResponse)
// Gather policy application statistics
gatherStat(policy.Name, engineResponse.PolicyResponse)
@ -91,10 +85,7 @@ func (ws *WebhookServer) handleMutation(request *v1beta1.AdmissionRequest) (bool
}
// gather patches
patches = append(patches, engineResponse.GetPatches()...)
glog.V(4).Infof("Mutation from policy %s has applied succesfully to %s %s/%s", policy.Name, request.Kind.Kind, resource.GetNamespace(), resource.GetName())
//TODO: check if there is an order to policy application on resource
// resource = &engineResponse.PatchedResource
}
// generate annotations

View file

@ -1,10 +1,12 @@
package webhooks
import (
"fmt"
"strings"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/event"
@ -94,3 +96,62 @@ func generateEvents(engineResponses []engine.EngineResponse, onUpdate bool) []ev
}
return events
}
func generatePV(ers []engine.EngineResponse, blocked bool) []policyviolation.Info {
var pvInfos []policyviolation.Info
// generate PV for each
for _, er := range ers {
// ignore creation of PV for resoruces that are yet to be assigned a name
if er.PolicyResponse.Resource.Name == "" {
glog.V(4).Infof("resource %v, has not been assigned a name, not creating a policy violation for it", er.PolicyResponse.Resource)
continue
}
if er.IsSuccesful() {
continue
}
glog.V(4).Infof("Building policy violation for engine response %v", er)
// build policy violation info
pvInfos = append(pvInfos, buildPVInfo(er, blocked))
}
return nil
}
func buildPVInfo(er engine.EngineResponse, blocked bool) policyviolation.Info {
info := policyviolation.Info{
Blocked: blocked,
PolicyName: er.PolicyResponse.Policy,
Resource: er.PatchedResource,
Rules: buildViolatedRules(er, blocked),
}
return info
}
func buildViolatedRules(er engine.EngineResponse, blocked bool) []kyverno.ViolatedRule {
blockMsg := fmt.Sprintf("Request Blocked for resource %s/%s; ", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name)
var violatedRules []kyverno.ViolatedRule
// if resource was blocked we create dependent
dependant := kyverno.ManagedResourceSpec{
Kind: er.PolicyResponse.Resource.Kind,
Namespace: er.PolicyResponse.Resource.Namespace,
CreationBlocked: true,
}
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
vrule := kyverno.ViolatedRule{
Name: rule.Name,
Type: rule.Type,
}
if blocked {
vrule.Message = blockMsg + rule.Message
vrule.ManagedResource = dependant
} else {
vrule.Message = rule.Message
}
violatedRules = append(violatedRules, vrule)
}
return violatedRules
}

View file

@ -20,6 +20,7 @@ import (
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
tlsutils "github.com/nirmata/kyverno/pkg/tls"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1beta1 "k8s.io/api/admission/v1beta1"
@ -48,7 +49,9 @@ type WebhookServer struct {
// last request time
lastReqTime *checker.LastReqTime
// store to hold policy meta data for faster lookup
pMetaStore policystore.Interface
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
}
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
@ -63,7 +66,8 @@ func NewWebhookServer(
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
policyStatus policy.PolicyStatusInterface,
configHandler config.Interface,
pMetaStore policystore.Interface,
pMetaStore policystore.LookupInterface,
pvGenerator policyviolation.GeneratorInterface,
cleanUp chan<- struct{}) (*WebhookServer, error) {
if tlsPair == nil {
@ -91,6 +95,7 @@ func NewWebhookServer(
configHandler: configHandler,
cleanUp: cleanUp,
lastReqTime: checker.NewLastReqTime(),
pvGenerator: pvGenerator,
pMetaStore: pMetaStore,
}
mux := http.NewServeMux()
@ -112,6 +117,7 @@ func NewWebhookServer(
// Main server endpoint for all requests
func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
// for every request recieved on the ep update last request time,
// this is used to verify admission control
ws.lastReqTime.SetTime(time.Now())
@ -119,6 +125,9 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
if admissionReview == nil {
return
}
defer func() {
glog.V(4).Infof("request: %v %s/%s/%s", time.Since(startTime), admissionReview.Request.Kind, admissionReview.Request.Namespace, admissionReview.Request.Name)
}()
admissionReview.Response = &v1beta1.AdmissionResponse{
Allowed: true,

View file

@ -1,13 +1,13 @@
package webhooks
import (
"time"
"github.com/golang/glog"
engine "github.com/nirmata/kyverno/pkg/engine"
policyctr "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/utils"
v1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
)
@ -19,7 +19,7 @@ func (ws *WebhookServer) handleValidation(request *v1beta1.AdmissionRequest, pat
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
var policyStats []policyctr.PolicyStat
evalTime := time.Now()
// gather stats from the engine response
gatherStat := func(policyName string, policyResponse engine.PolicyResponse) {
ps := policyctr.PolicyStat{}
@ -71,7 +71,8 @@ func (ws *WebhookServer) handleValidation(request *v1beta1.AdmissionRequest, pat
// resource namespace is empty for the first CREATE operation
resource.SetNamespace(request.Namespace)
policies, err := ws.pLister.List(labels.NewSelector())
// lookup policies based on operation,kind, namespace
policies, err := ws.pMetaStore.LookUp(resource.GetKind(), resource.GetNamespace())
if err != nil {
//TODO check if the CRD is created ?
// Unable to connect to policy Lister to access policies
@ -82,15 +83,10 @@ func (ws *WebhookServer) handleValidation(request *v1beta1.AdmissionRequest, pat
var engineResponses []engine.EngineResponse
for _, policy := range policies {
if !utils.ContainsString(getApplicableKindsForPolicy(policy), request.Kind.Kind) {
continue
}
glog.V(2).Infof("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation)
engineResponse := engine.Validate(*policy, *resource)
engineResponse := engine.Validate(policy, *resource)
engineResponses = append(engineResponses, engineResponse)
// Gather policy application statistics
gatherStat(policy.Name, engineResponse.PolicyResponse)
@ -99,6 +95,9 @@ func (ws *WebhookServer) handleValidation(request *v1beta1.AdmissionRequest, pat
continue
}
}
glog.V(4).Infof("eval: %v %s/%s/%s %s", time.Since(evalTime), request.Kind, request.Namespace, request.Name, toBlockResource(engineResponses))
// report time
reportTime := time.Now()
// ADD EVENTS
events := generateEvents(engineResponses, (request.Operation == v1beta1.Update))
ws.eventGen.Add(events...)
@ -107,15 +106,20 @@ func (ws *WebhookServer) handleValidation(request *v1beta1.AdmissionRequest, pat
// violations are created with resource owner(if exist) on "enforce"
// and if there are any then we dont block the resource creation
// Even if one the policy being applied
if !isResponseSuccesful(engineResponses) && toBlockResource(engineResponses) {
policyviolation.CreatePVWhenBlocked(ws.pvLister, ws.kyvernoClient, ws.client, engineResponses)
blocked := toBlockResource(engineResponses)
if !isResponseSuccesful(engineResponses) && blocked {
glog.V(4).Infof("resource %s/%s/%s is blocked\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
pvInfos := generatePV(engineResponses, blocked)
ws.pvGenerator.Add(pvInfos...)
sendStat(true)
return false, getErrorMsg(engineResponses)
}
// ADD POLICY VIOLATIONS
// violations are created with resource on "audit"
policyviolation.CreatePV(ws.pvLister, ws.kyvernoClient, engineResponses)
pvInfos := generatePV(engineResponses, blocked)
ws.pvGenerator.Add(pvInfos...)
sendStat(false)
// report time end
glog.V(4).Infof("report: %v %s/%s/%s %s", time.Since(reportTime), request.Kind, request.Namespace, request.Name, toBlockResource(engineResponses))
return true, ""
}