1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 16:06:56 +00:00

Merge pull request #1227 from realshuting/remove_violation

clean up policy violation code - remove pvGenerator
This commit is contained in:
Jim Bugwadia 2020-11-03 23:49:59 -08:00 committed by GitHub
commit 3d2bb58395
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
17 changed files with 41 additions and 1547 deletions

View file

@ -23,7 +23,6 @@ import (
"github.com/kyverno/kyverno/pkg/policycache"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/policyviolation"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/signal"
"github.com/kyverno/kyverno/pkg/utils"
@ -96,8 +95,9 @@ func main() {
// KYVERNO CRD CLIENT
// access CRD resources
// - Policy
// - PolicyViolation
// - ClusterPolicy, Policy
// - ClusterPolicyReport, PolicyReport
// - GenerateRequest
pclient, err := kyvernoclient.NewForConfig(clientConfig)
if err != nil {
setupLog.Error(err, "Failed to create client")
@ -122,7 +122,7 @@ func main() {
// ===========================================================
// CRD CHECK
// - verify if the CRD for Policy & PolicyViolation are available
// - verify if Kyverno CRDs are available
if !utils.CRDInstalled(client.DiscoveryClient, log.Log) {
setupLog.Error(fmt.Errorf("CRDs not installed"), "Failed to access Kyverno CRDs")
os.Exit(1)
@ -157,8 +157,10 @@ func main() {
// KYVERNO CRD INFORMER
// watches CRD resources:
// - Policy
// - PolicyViolation
// - ClusterPolicy, Policy
// - ClusterPolicyReport, PolicyReport
// - GenerateRequest
// - ClusterReportChangeRequest, ReportChangeRequest
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, resyncPeriod)
// Configuration Data
@ -191,7 +193,6 @@ func main() {
// -- generate policy report
var reportReqGen *policyreport.Generator
var prgen *policyreport.ReportGenerator
if os.Getenv("POLICY-TYPE") == common.PolicyReport {
reportReqGen = policyreport.NewReportChangeRequestGenerator(pclient,
client,
pInformer.Kyverno().V1alpha1().ReportChangeRequests(),
@ -208,22 +209,6 @@ func main() {
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyReportGenerator"),
)
}
// POLICY VIOLATION GENERATOR
// -- generate policy violation
var pvgen *policyviolation.Generator
pvgen = policyviolation.NewPVGenerator(pclient,
client,
pInformer.Kyverno().V1().ClusterPolicyViolations(),
pInformer.Kyverno().V1().PolicyViolations(),
pInformer.Policy().V1alpha1().ClusterPolicyReports(),
pInformer.Policy().V1alpha1().PolicyReports(),
statusSync.Listener,
reportReqGen,
log.Log.WithName("PolicyViolationGenerator"),
stopCh,
)
// POLICY CONTROLLER
// - reconciliation policy and policy violation
@ -233,12 +218,9 @@ func main() {
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
pInformer.Kyverno().V1().ClusterPolicyViolations(),
pInformer.Kyverno().V1().PolicyViolations(),
pInformer.Kyverno().V1().GenerateRequests(),
configData,
eventGenerator,
pvgen,
reportReqGen,
rWebhookWatcher,
kubeInformer.Core().V1().Namespaces(),
@ -290,7 +272,6 @@ func main() {
pCacheController.Cache,
eventGenerator,
statusSync.Listener,
pvgen,
reportReqGen,
kubeInformer.Rbac().V1().RoleBindings(),
kubeInformer.Rbac().V1().ClusterRoleBindings(),
@ -347,7 +328,6 @@ func main() {
webhookRegistrationClient,
statusSync.Listener,
configData,
pvgen,
reportReqGen,
grgen,
rWebhookWatcher,
@ -369,16 +349,12 @@ func main() {
kubeInformer.Start(stopCh)
kubedynamicInformer.Start(stopCh)
if os.Getenv("POLICY-TYPE") == common.PolicyReport {
go reportReqGen.Run(2, stopCh)
go prgen.Run(1, stopCh)
}
go grgen.Run(1)
go rWebhookWatcher.Run(stopCh)
go configData.Run(stopCh)
go policyCtrl.Run(2, stopCh)
go pvgen.Run(2, stopCh)
go eventGenerator.Run(3, stopCh)
go grc.Run(1, stopCh)
go grcc.Run(1, stopCh)

View file

@ -1,104 +0,0 @@
package policy
import (
"fmt"
"reflect"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/response"
"k8s.io/apimachinery/pkg/labels"
)
func (pc *PolicyController) cleanUp(ers []response.EngineResponse) {
for _, er := range ers {
if !er.IsSuccessful() {
continue
}
if len(er.PolicyResponse.Rules) == 0 {
continue
}
// clean up after the policy has been corrected
pc.cleanUpPolicyViolation(er.PolicyResponse)
}
}
func (pc *PolicyController) cleanUpPolicyViolation(pResponse response.PolicyResponse) {
logger := pc.log
// - check if there is violation on resource (label:Selector)
if pResponse.Resource.Namespace == "" {
pv, err := getClusterPV(pc.cpvLister, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Name, logger)
if err != nil {
logger.Error(err, "failed to get cluster policy violation on policy and resource", "policy", pResponse.Policy, "kind", pResponse.Resource.Kind, "name", pResponse.Resource.Name)
return
}
if reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) {
return
}
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
logger.Error(err, "failed to delete cluster policy violation", "name", pv.Name)
} else {
logger.Info("deleted cluster policy violation", "name", pv.Name)
}
return
}
// namespace policy violation
nspv, err := getNamespacedPV(pc.nspvLister, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Namespace, pResponse.Resource.Name, logger)
if err != nil {
logger.Error(err, "failed to get namespaced policy violation on policy and resource", "policy", pResponse.Policy, "kind", pResponse.Resource.Kind, "namespace", pResponse.Resource.Namespace, "name", pResponse.Resource.Name)
return
}
if reflect.DeepEqual(nspv, kyverno.PolicyViolation{}) {
return
}
if err := pc.pvControl.DeleteNamespacedPolicyViolation(nspv.Namespace, nspv.Name); err != nil {
logger.Error(err, "failed to delete cluster policy violation", "name", nspv.Name, "namespace", nspv.Namespace)
} else {
logger.Info("deleted namespaced policy violation", "name", nspv.Name, "namespace", nspv.Namespace)
}
}
// Wont do the claiming of objects, just lookup based on selectors
func getClusterPV(pvLister kyvernolister.ClusterPolicyViolationLister, policyName, rkind, rname string, log logr.Logger) (kyverno.ClusterPolicyViolation, error) {
var err error
// Check Violation on resource
pvs, err := pvLister.List(labels.Everything())
if err != nil {
log.Error(err, "failed to list cluster policy violations")
return kyverno.ClusterPolicyViolation{}, fmt.Errorf("failed to list cluster pv: %v", err)
}
for _, pv := range pvs {
// find a policy on same resource and policy combination
if pv.Spec.Policy == policyName &&
pv.Spec.ResourceSpec.Kind == rkind &&
pv.Spec.ResourceSpec.Name == rname {
return *pv, nil
}
}
return kyverno.ClusterPolicyViolation{}, nil
}
func getNamespacedPV(nspvLister kyvernolister.PolicyViolationLister, policyName, rkind, rnamespace, rname string, log logr.Logger) (kyverno.PolicyViolation, error) {
nspvs, err := nspvLister.PolicyViolations(rnamespace).List(labels.Everything())
if err != nil {
log.Error(err, "failed to list namespaced policy violation")
return kyverno.PolicyViolation{}, fmt.Errorf("failed to list namespaced pv: %v", err)
}
for _, nspv := range nspvs {
// find a policy on same resource and policy combination
if nspv.Spec.Policy == policyName &&
nspv.Spec.ResourceSpec.Kind == rkind &&
nspv.Spec.ResourceSpec.Name == rname {
return *nspv, nil
}
}
return kyverno.PolicyViolation{}, nil
}

View file

@ -1,132 +0,0 @@
package policy
import (
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"k8s.io/client-go/tools/cache"
)
func (pc *PolicyController) addClusterPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.ClusterPolicyViolation)
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
if pv.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
// show up in a state that is already pending deletion.
pc.deleteClusterPolicyViolation(pv)
return
}
ps := pc.getPolicyForClusterPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("Cluster Policy Violation does not belong to an active policy, will be cleaned up")
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
logger.Error(err, "failed to delete resource")
return
}
logger.V(4).Info("resource deleted")
return
}
logger.V(4).Info("resource added")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) updateClusterPolicyViolation(old, cur interface{}) {
curPV := cur.(*kyverno.ClusterPolicyViolation)
oldPV := old.(*kyverno.ClusterPolicyViolation)
if curPV.ResourceVersion == oldPV.ResourceVersion {
// Periodic resync will send update events for all known Policy Violation.
// Two different versions of the same replica set will always have different RVs.
return
}
logger := pc.log.WithValues("kind", curPV.Kind, "namespace", curPV.Namespace, "name", curPV.Name)
ps := pc.getPolicyForClusterPolicyViolation(curPV)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("Cluster Policy Violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteClusterPolicyViolation(curPV.Name); err != nil {
logger.Error(err, "failed to delete resource")
return
}
logger.V(4).Info("resource deleted")
return
}
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
// deletePolicyViolation enqueues the Policy that manages a PolicyViolation when
// the PolicyViolation is deleted. obj could be an *kyverno.CusterPolicyViolation, or
// a DeletionFinalStateUnknown marker item.
func (pc *PolicyController) deleteClusterPolicyViolation(obj interface{}) {
logger := pc.log
pv, ok := obj.(*kyverno.ClusterPolicyViolation)
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
// changed labels the new Policy will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
pv, ok = tombstone.Obj.(*kyverno.ClusterPolicyViolation)
if !ok {
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
}
logger = logger.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
ps := pc.getPolicyForClusterPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("Cluster Policy Violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
logger.Error(err, "failed to delete resource")
return
}
logger.V(4).Info("resource deleted")
return
}
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) getPolicyForClusterPolicyViolation(pv *kyverno.ClusterPolicyViolation) []*kyverno.ClusterPolicy {
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
policies, err := pc.pLister.GetPolicyForPolicyViolation(pv)
if err != nil || len(policies) == 0 {
return nil
}
// Because all PolicyViolations's belonging to a Policy should have a unique label key,
// there should never be more than one Policy returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(policies) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
logger.V(4).Info("user error! more than one policy is selecting policy violation", "labels", pv.Labels, "policy", policies[0].Name)
}
return policies
}
func (pc *PolicyController) getClusterPolicyViolationForPolicy(policy string) ([]*kyverno.ClusterPolicyViolation, error) {
policySelector, err := buildPolicyLabel(policy)
if err != nil {
return nil, err
}
// Get List of cluster policy violation
cpvList, err := pc.cpvLister.List(policySelector)
if err != nil {
return nil, err
}
return cpvList, nil
}

View file

@ -20,7 +20,6 @@ import (
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policyviolation"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1"
@ -54,9 +53,6 @@ type PolicyController struct {
eventGen event.Interface
eventRecorder record.EventRecorder
//pvControl is used for adoptin/releasing policy violation
pvControl PVControlInterface
// Policys that need to be synced
queue workqueue.RateLimitingInterface
@ -69,12 +65,6 @@ type PolicyController struct {
// grLister can list/get generate request from the shared informer's store
grLister kyvernolister.GenerateRequestLister
// pvLister can list/get policy violation from the shared informer's store
cpvLister kyvernolister.ClusterPolicyViolationLister
// nspvLister can list/get namespaced policy violation from the shared informer's store
nspvLister kyvernolister.PolicyViolationLister
// nsLister can list/get namespacecs from the shared informer's store
nsLister listerv1.NamespaceLister
@ -101,10 +91,7 @@ type PolicyController struct {
// helpers to validate against current loaded configuration
configHandler config.Interface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
// policy violation generator
// policy report generator
prGenerator policyreport.GeneratorInterface
// resourceWebhookWatcher queues the webhook creation request, creates the webhook
@ -121,11 +108,9 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
pInformer kyvernoinformer.ClusterPolicyInformer,
npInformer kyvernoinformer.PolicyInformer,
cpvInformer kyvernoinformer.ClusterPolicyViolationInformer,
nspvInformer kyvernoinformer.PolicyViolationInformer,
grInformer kyvernoinformer.GenerateRequestInformer,
configHandler config.Interface, eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
configHandler config.Interface,
eventGen event.Interface,
prGenerator policyreport.GeneratorInterface,
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
namespaces informers.NamespaceInformer,
@ -148,39 +133,18 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
pvGenerator: pvGenerator,
prGenerator: prGenerator,
resourceWebhookWatcher: resourceWebhookWatcher,
log: log,
resCache: resCache,
}
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}
if os.Getenv("POLICY-TYPE") != common.PolicyReport {
cpvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addClusterPolicyViolation,
UpdateFunc: pc.updateClusterPolicyViolation,
DeleteFunc: pc.deleteClusterPolicyViolation,
})
nspvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addNamespacedPolicyViolation,
UpdateFunc: pc.updateNamespacedPolicyViolation,
DeleteFunc: pc.deleteNamespacedPolicyViolation,
})
pc.cpvLister = cpvInformer.Lister()
pc.cpvListerSynced = cpvInformer.Informer().HasSynced
pc.nspvLister = nspvInformer.Lister()
pc.nspvListerSynced = nspvInformer.Informer().HasSynced
}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicy,
UpdateFunc: pc.updatePolicy,
DeleteFunc: pc.deletePolicy,
})
// Policy informer event handler
npInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addNsPolicy,
UpdateFunc: pc.updateNsPolicy,
@ -339,7 +303,7 @@ func (pc *PolicyController) enqueueDeletedRule(old, cur *kyverno.ClusterPolicy)
for _, rule := range old.Spec.Rules {
if !curRule[rule.Name] {
pc.pvGenerator.Add(policyviolation.Info{
pc.prGenerator.Add(policyreport.Info{
PolicyName: cur.GetName(),
Rules: []kyverno.ViolatedRule{
{Name: rule.Name},
@ -369,12 +333,7 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
logger.Info("starting")
defer logger.Info("shutting down")
cacheSyncs := []cache.InformerSynced{pc.pListerSynced, pc.npListerSynced, pc.nsListerSynced, pc.grListerSynced}
if os.Getenv("POLICY-TYPE") == common.PolicyViolation {
cacheSyncs = []cache.InformerSynced{pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced, pc.nsListerSynced, pc.grListerSynced}
}
if !cache.WaitForCacheSync(stopCh, cacheSyncs...) {
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.npListerSynced, pc.nsListerSynced, pc.grListerSynced) {
logger.Info("failed to sync informer cache")
return
}
@ -466,13 +425,9 @@ func (pc *PolicyController) syncPolicy(key string) error {
}
}
if os.Getenv("POLICY-TYPE") == common.PolicyReport {
go pc.removeResultsEntryFromPolicyReport(key)
return nil
}
go pc.deletePolicyViolations(key)
return nil
}
return err
}
@ -495,94 +450,7 @@ func (pc *PolicyController) syncPolicy(key string) error {
}
func (pc *PolicyController) removeResultsEntryFromPolicyReport(policyName string) {
info := policyviolation.Info{
pc.prGenerator.Add(policyreport.Info{
PolicyName: policyName,
}
pc.pvGenerator.Add(info)
}
func (pc *PolicyController) deletePolicyViolations(key string) {
cpv, err := pc.deleteClusterPolicyViolations(key)
if err != nil {
pc.log.Error(err, "failed to delete policy violations", "policy", key)
}
npv, err := pc.deleteNamespacedPolicyViolations(key)
if err != nil {
pc.log.Error(err, "failed to delete policy violations", "policy", key)
}
pc.log.Info("deleted policy violations", "policy", key, "count", cpv+npv)
}
func (pc *PolicyController) deleteClusterPolicyViolations(policy string) (int, error) {
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy)
if err != nil {
return 0, err
}
count := 0
for _, cpv := range cpvList {
if err := pc.pvControl.DeleteClusterPolicyViolation(cpv.Name); err != nil {
pc.log.Error(err, "failed to delete policy violation", "name", cpv.Name)
} else {
count++
}
}
return count, nil
}
func (pc *PolicyController) deleteNamespacedPolicyViolations(policy string) (int, error) {
nspvList, err := pc.getNamespacedPolicyViolationForPolicy(policy)
if err != nil {
return 0, err
}
count := 0
for _, nspv := range nspvList {
if err := pc.pvControl.DeleteNamespacedPolicyViolation(nspv.Namespace, nspv.Name); err != nil {
pc.log.Error(err, "failed to delete policy violation", "name", nspv.Name)
} else {
count++
}
}
return count, nil
}
func (pc *PolicyController) getNamespacedPolicyViolationForPolicy(policy string) ([]*kyverno.PolicyViolation, error) {
policySelector, err := buildPolicyLabel(policy)
if err != nil {
return nil, err
}
// Get List of cluster policy violation
nspvList, err := pc.nspvLister.List(policySelector)
if err != nil {
return nil, err
}
return nspvList, nil
}
//PVControlInterface provides interface to operate on policy violation resource
type PVControlInterface interface {
DeleteClusterPolicyViolation(name string) error
DeleteNamespacedPolicyViolation(ns, name string) error
}
// RealPVControl is the default implementation of PVControlInterface.
type RealPVControl struct {
Client kyvernoclient.Interface
Recorder record.EventRecorder
}
//DeleteClusterPolicyViolation deletes the policy violation
func (r RealPVControl) DeleteClusterPolicyViolation(name string) error {
return r.Client.KyvernoV1().ClusterPolicyViolations().Delete(context.TODO(), name, metav1.DeleteOptions{})
}
//DeleteNamespacedPolicyViolation deletes the namespaced policy violation
func (r RealPVControl) DeleteNamespacedPolicyViolation(ns, name string) error {
return r.Client.KyvernoV1().PolicyViolations(ns).Delete(context.TODO(), name, metav1.DeleteOptions{})
})
}

View file

@ -1,137 +0,0 @@
package policy
import (
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"k8s.io/apimachinery/pkg/api/errors"
cache "k8s.io/client-go/tools/cache"
)
func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.PolicyViolation)
logger := pc.log.WithValues("kind", pv.GetObjectKind(), "namespace", pv.Namespace, "name", pv.Name)
if pv.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
// show up in a state that is already pending deletion.
pc.deleteNamespacedPolicyViolation(pv)
return
}
ps := pc.getPolicyForNamespacedPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("namespaced policy violation does not belong to an active policy, will be cleaned up")
if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil {
logger.Error(err, "failed to delete resource")
return
}
logger.V(4).Info("resource deleted")
return
}
logger.V(4).Info("resource added")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) updateNamespacedPolicyViolation(old, cur interface{}) {
curPV := cur.(*kyverno.PolicyViolation)
oldPV := old.(*kyverno.PolicyViolation)
if curPV.ResourceVersion == oldPV.ResourceVersion {
// Periodic resync will send update events for all known Policy Violation.
// Two different versions of the same replica set will always have different RVs.
return
}
logger := pc.log.WithValues("kind", curPV.Kind, "namespace", curPV.Namespace, "name", curPV.Name)
ps := pc.getPolicyForNamespacedPolicyViolation(curPV)
if len(ps) == 0 {
// there is no namespaced policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("nameapced policy violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteNamespacedPolicyViolation(curPV.Namespace, curPV.Name); err != nil {
logger.Error(err, "failed to delete resource")
return
}
logger.V(4).Info("resource deleted")
return
}
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) deleteNamespacedPolicyViolation(obj interface{}) {
logger := pc.log
pv, ok := obj.(*kyverno.PolicyViolation)
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
// changed labels the new Policy will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
pv, ok = tombstone.Obj.(*kyverno.PolicyViolation)
if !ok {
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
}
logger = logger.WithValues("kind", pv.GetObjectKind(), "namespace", pv.Namespace, "name", pv.Name)
ps := pc.getPolicyForNamespacedPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("namespaced policy violation does not belong to an active policy, will be cleaned up")
if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil {
if !errors.IsNotFound(err) {
logger.Error(err, "failed to delete resource")
return
}
}
logger.V(4).Info("resource deleted")
return
}
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) getPolicyForNamespacedPolicyViolation(pv *kyverno.PolicyViolation) []*kyverno.ClusterPolicy {
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
// Check for NamespacePolicies
nspol, err := pc.npLister.GetPolicyForPolicyViolation(pv)
if err != nil {
logger.V(4).Info("missing namespace policy for namespaced policy violation", "reason", err.Error())
return nil
}
if len(nspol) > 0 {
return convertPoliciesToClusterPolicies(nspol)
}
policies, err := pc.pLister.GetPolicyForNamespacedPolicyViolation(pv)
if err != nil || len(policies) == 0 {
logger.V(4).Info("missing policy for namespaced policy violation", "reason", err.Error())
return nil
}
// Because all PolicyViolations's belonging to a Policy should have a unique label key,
// there should never be more than one Policy returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(policies) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
logger.V(4).Info("user error! more than one policy is selecting policy violation", "labels", pv.Labels, "policy", policies[0].Name)
}
return policies
}

View file

@ -2,13 +2,11 @@ package policy
import (
"fmt"
"os"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/policyviolation"
"github.com/kyverno/kyverno/pkg/policyreport"
)
// for each policy-resource response
@ -20,19 +18,12 @@ func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineRe
eventInfos := generateEvents(pc.log, engineResponses)
pc.eventGen.Add(eventInfos...)
// create policy violation
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
pvInfos := policyreport.GeneratePRsFromEngineResponse(engineResponses, logger)
for i := range pvInfos {
pvInfos[i].FromSync = true
}
pc.pvGenerator.Add(pvInfos...)
// cleanup existing violations if any
// if there is any error in clean up, we dont re-queue the resource
// it will be re-tried in the next controller cache resync
if os.Getenv("POLICY-TYPE") == common.PolicyViolation {
pc.cleanUp(engineResponses)
}
pc.prGenerator.Add(pvInfos...)
}
func generateEvents(log logr.Logger, ers []response.EngineResponse) []event.Info {

View file

@ -1,108 +0,0 @@
package policyviolation
import (
"fmt"
"os"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
report "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/engine/response"
)
//GeneratePVsFromEngineResponse generate Violations from engine responses
func GeneratePVsFromEngineResponse(ers []response.EngineResponse, log logr.Logger) (pvInfos []Info) {
for _, er := range ers {
// ignore creation of PV for resources that are yet to be assigned a name
if er.PolicyResponse.Resource.Name == "" {
log.V(4).Info("resource does no have a name assigned yet, not creating a policy violation", "resource", er.PolicyResponse.Resource)
continue
}
// skip when response succeed
if os.Getenv("POLICY-TYPE") != common.PolicyReport {
if er.IsSuccessful() {
continue
}
}
// build policy violation info
pvInfos = append(pvInfos, buildPVInfo(er))
}
return pvInfos
}
// Builder builds Policy Violation struct
// this is base type of namespaced and cluster policy violation
type Builder interface {
generate(info Info) kyverno.PolicyViolationTemplate
build(policy, kind, namespace, name string, rules []kyverno.ViolatedRule) *kyverno.PolicyViolationTemplate
}
type pvBuilder struct{}
func newPvBuilder() *pvBuilder {
return &pvBuilder{}
}
func (pvb *pvBuilder) generate(info Info) kyverno.PolicyViolationTemplate {
pv := pvb.build(info.PolicyName, info.Resource.GetKind(), info.Resource.GetNamespace(), info.Resource.GetName(), info.Rules)
return *pv
}
func (pvb *pvBuilder) build(policy, kind, namespace, name string, rules []kyverno.ViolatedRule) *kyverno.PolicyViolationTemplate {
pv := &kyverno.PolicyViolationTemplate{
Spec: kyverno.PolicyViolationSpec{
Policy: policy,
ResourceSpec: kyverno.ResourceSpec{
Kind: kind,
Name: name,
Namespace: namespace,
},
ViolatedRules: rules,
},
}
labelMap := map[string]string{
"policy": pv.Spec.Policy,
"resource": pv.Spec.ToKey(),
}
pv.SetLabels(labelMap)
if namespace != "" {
pv.SetNamespace(namespace)
}
pv.SetGenerateName(fmt.Sprintf("%s-", policy))
return pv
}
func buildPVInfo(er response.EngineResponse) Info {
info := Info{
PolicyName: er.PolicyResponse.Policy,
Resource: er.PatchedResource,
Rules: buildViolatedRules(er),
}
return info
}
func buildViolatedRules(er response.EngineResponse) []kyverno.ViolatedRule {
var violatedRules []kyverno.ViolatedRule
for _, rule := range er.PolicyResponse.Rules {
if os.Getenv("POLICY-TYPE") == common.PolicyViolation {
if rule.Success {
continue
}
}
vrule := kyverno.ViolatedRule{
Name: rule.Name,
Type: rule.Type,
Message: rule.Message,
}
vrule.Check = report.StatusFail
if rule.Success {
vrule.Check = report.StatusPass
}
violatedRules = append(violatedRules, vrule)
}
return violatedRules
}

View file

@ -1,58 +0,0 @@
package policyviolation
import (
"testing"
"github.com/kyverno/kyverno/pkg/engine/response"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func Test_GeneratePVsFromEngineResponse_PathNotExist(t *testing.T) {
ers := []response.EngineResponse{
{
PolicyResponse: response.PolicyResponse{
Policy: "test-substitute-variable",
Resource: response.ResourceSpec{
Kind: "Pod",
Name: "test",
Namespace: "test",
},
Rules: []response.RuleResponse{
{
Name: "test-path-not-exist",
Type: "Mutation",
Message: "referenced paths are not present: request.object.metadata.name1",
Success: false,
},
{
Name: "test-path-exist",
Type: "Mutation",
Success: true,
},
},
},
},
{
PolicyResponse: response.PolicyResponse{
Policy: "test-substitute-variable2",
Resource: response.ResourceSpec{
Kind: "Pod",
Name: "test",
Namespace: "test",
},
Rules: []response.RuleResponse{
{
Name: "test-path-not-exist-across-policy",
Type: "Mutation",
Message: "referenced paths are not present: request.object.metadata.name1",
Success: true,
},
},
},
},
}
pvInfos := GeneratePVsFromEngineResponse(ers, log.Log)
assert.Assert(t, len(pvInfos) == 1)
}

View file

@ -1,148 +0,0 @@
package policyviolation
import (
"context"
"fmt"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernov1 "github.com/kyverno/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/policystatus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//ClusterPV ...
type clusterPV struct {
// dynamic client
dclient *client.Client
// get/list cluster policy violation
cpvLister kyvernolister.ClusterPolicyViolationLister
// policy violation interface
kyvernoInterface kyvernov1.KyvernoV1Interface
// logger
log logr.Logger
// update policy stats with violationCount
policyStatusListener policystatus.Listener
}
func newClusterPV(log logr.Logger, dclient *client.Client,
cpvLister kyvernolister.ClusterPolicyViolationLister,
kyvernoInterface kyvernov1.KyvernoV1Interface,
policyStatus policystatus.Listener,
) *clusterPV {
cpv := clusterPV{
dclient: dclient,
cpvLister: cpvLister,
kyvernoInterface: kyvernoInterface,
log: log,
policyStatusListener: policyStatus,
}
return &cpv
}
func (cpv *clusterPV) create(pv kyverno.PolicyViolationTemplate) error {
newPv := kyverno.ClusterPolicyViolation(pv)
// PV already exists
oldPv, err := cpv.getExisting(newPv)
if err != nil {
return err
}
if oldPv == nil {
// create a new policy violation
return cpv.createPV(&newPv)
}
// policy violation exists
// skip if there is not change, else update the violation
return cpv.updatePV(&newPv, oldPv)
}
func (cpv *clusterPV) getExisting(newPv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
logger := cpv.log.WithValues("namespace", newPv.Namespace, "name", newPv.Name)
var err error
// use labels
policyLabelmap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
ls, err := converLabelToSelector(policyLabelmap)
if err != nil {
return nil, err
}
pvs, err := cpv.cpvLister.List(ls)
if err != nil {
logger.Error(err, "failed to list cluster policy violations")
return nil, err
}
for _, pv := range pvs {
// find a policy on same resource and policy combination
if pv.Spec.Policy == newPv.Spec.Policy &&
pv.Spec.ResourceSpec.Kind == newPv.Spec.ResourceSpec.Kind &&
pv.Spec.ResourceSpec.Name == newPv.Spec.ResourceSpec.Name {
return pv, nil
}
}
return nil, nil
}
func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
var err error
logger := cpv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
logger.V(4).Info("creating new policy violation")
obj, err := retryGetResource(cpv.dclient, newPv.Spec.ResourceSpec)
if err != nil {
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", newPv.Name, newPv.Spec.Policy, err)
}
if obj.GetDeletionTimestamp() != nil {
return nil
}
// set owner reference to resource
ownerRef, ok := createOwnerReference(obj)
if !ok {
return nil
}
newPv.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
// create resource
_, err = cpv.kyvernoInterface.ClusterPolicyViolations().Create(context.TODO(), newPv, metav1.CreateOptions{})
if err != nil {
logger.Error(err, "failed to create cluster policy violation")
return err
}
if newPv.Annotations["fromSync"] != "true" {
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
logger.Info("cluster policy violation created")
return nil
}
func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) error {
logger := cpv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
var err error
// check if there is any update
if !hasViolationSpecChanged(newPv.Spec.DeepCopy(), oldPv.Spec.DeepCopy()) {
logger.V(4).Info("policy violation spec did not change, not upadating the resource")
return nil
}
// set name
newPv.SetName(oldPv.Name)
newPv.SetResourceVersion(oldPv.ResourceVersion)
newPv.SetOwnerReferences(oldPv.GetOwnerReferences())
// update resource
_, err = cpv.kyvernoInterface.ClusterPolicyViolations().Update(context.TODO(), newPv, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update cluster policy violation: %v", err)
}
logger.Info("cluster policy violation updated")
if newPv.Annotations["fromSync"] != "true" {
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
return nil
}

View file

@ -1,130 +0,0 @@
package policyviolation
import (
"fmt"
"reflect"
"time"
backoff "github.com/cenkalti/backoff"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
client "github.com/kyverno/kyverno/pkg/dclient"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func createOwnerReference(resource *unstructured.Unstructured) (metav1.OwnerReference, bool) {
controllerFlag := true
blockOwnerDeletionFlag := true
apiversion := resource.GetAPIVersion()
kind := resource.GetKind()
name := resource.GetName()
uid := resource.GetUID()
if apiversion == "" || kind == "" || name == "" || uid == "" {
return metav1.OwnerReference{}, false
}
ownerRef := metav1.OwnerReference{
APIVersion: resource.GetAPIVersion(),
Kind: resource.GetKind(),
Name: resource.GetName(),
UID: resource.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
return ownerRef, true
}
func retryGetResource(client *client.Client, rspec kyverno.ResourceSpec) (*unstructured.Unstructured, error) {
var i int
var obj *unstructured.Unstructured
var err error
getResource := func() error {
obj, err = client.GetResource("", rspec.Kind, rspec.Namespace, rspec.Name)
log.Log.V(4).Info(fmt.Sprintf("retry %v getting %s/%s/%s", i, rspec.Kind, rspec.Namespace, rspec.Name))
i++
return err
}
exbackoff := &backoff.ExponentialBackOff{
InitialInterval: 500 * time.Millisecond,
RandomizationFactor: 0.5,
Multiplier: 1.5,
MaxInterval: time.Second,
MaxElapsedTime: 3 * time.Second,
Clock: backoff.SystemClock,
}
exbackoff.Reset()
err = backoff.Retry(getResource, exbackoff)
if err != nil {
return nil, err
}
return obj, nil
}
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
return policyViolationSelector, nil
}
type violationCount struct {
policyName string
violatedRules []v1.ViolatedRule
}
func (vc violationCount) PolicyName() string {
return vc.policyName
}
func (vc violationCount) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
var ruleNameToViolations = make(map[string]int)
for _, rule := range vc.violatedRules {
ruleNameToViolations[rule.Name]++
}
for i := range status.Rules {
status.ViolationCount += ruleNameToViolations[status.Rules[i].Name]
status.Rules[i].ViolationCount += ruleNameToViolations[status.Rules[i].Name]
}
return status
}
// hasViolationSpecChanged returns true if oldSpec & newSpec
// are identical, exclude message in violated rules
func hasViolationSpecChanged(new, old *kyverno.PolicyViolationSpec) bool {
if new.Policy != old.Policy {
return true
}
if new.ResourceSpec.ToKey() != old.ResourceSpec.ToKey() {
return true
}
for i := range new.ViolatedRules {
new.ViolatedRules[i].Message = ""
}
for i := range old.ViolatedRules {
old.ViolatedRules[i].Message = ""
}
return !reflect.DeepEqual(*new, *old)
}

View file

@ -1,285 +0,0 @@
package policyviolation
import (
"errors"
"os"
"reflect"
"strconv"
"strings"
"sync"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernov1 "github.com/kyverno/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
policyreportinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/constant"
dclient "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const workQueueName = "policy-violation-controller"
const workQueueRetryLimit = 3
//Generator creates PV
type Generator struct {
dclient *dclient.Client
kyvernoInterface kyvernov1.KyvernoV1Interface
// get/list cluster policy violation
cpvLister kyvernolister.ClusterPolicyViolationLister
// get/ist namespaced policy violation
nspvLister kyvernolister.PolicyViolationLister
// returns true if the cluster policy store has been synced at least once
pvSynced cache.InformerSynced
// returns true if the namespaced cluster policy store has been synced at at least once
log logr.Logger
nspvSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
dataStore *dataStore
policyStatusListener policystatus.Listener
prgen *policyreport.Generator
}
//NewDataStore returns an instance of data store
func newDataStore() *dataStore {
ds := dataStore{
data: make(map[string]Info),
}
return &ds
}
type dataStore struct {
data map[string]Info
mu sync.RWMutex
}
func (ds *dataStore) add(keyHash string, info Info) {
ds.mu.Lock()
defer ds.mu.Unlock()
// queue the key hash
ds.data[keyHash] = info
}
func (ds *dataStore) lookup(keyHash string) Info {
ds.mu.RLock()
defer ds.mu.RUnlock()
return ds.data[keyHash]
}
func (ds *dataStore) delete(keyHash string) {
ds.mu.Lock()
defer ds.mu.Unlock()
delete(ds.data, keyHash)
}
//Info is a request to create PV
type Info struct {
PolicyName string
Resource unstructured.Unstructured
Rules []kyverno.ViolatedRule
FromSync bool
}
func (i Info) toKey() string {
keys := []string{
i.PolicyName,
i.Resource.GetKind(),
i.Resource.GetNamespace(),
i.Resource.GetName(),
strconv.Itoa(len(i.Rules)),
}
return strings.Join(keys, "/")
}
// make the struct hashable
//GeneratorInterface provides API to create PVs
type GeneratorInterface interface {
Add(infos ...Info)
}
// NewPVGenerator returns a new instance of policy violation generator
func NewPVGenerator(client *kyvernoclient.Clientset,
dclient *dclient.Client,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
nspvInformer kyvernoinformer.PolicyViolationInformer,
prInformer policyreportinformer.ClusterPolicyReportInformer,
nsprInformer policyreportinformer.PolicyReportInformer,
policyStatus policystatus.Listener,
prgen *policyreport.Generator,
log logr.Logger,
stopChna <-chan struct{}) *Generator {
gen := Generator{
kyvernoInterface: client.KyvernoV1(),
dclient: dclient,
cpvLister: pvInformer.Lister(),
pvSynced: pvInformer.Informer().HasSynced,
nspvLister: nspvInformer.Lister(),
nspvSynced: nspvInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
dataStore: newDataStore(),
prgen: prgen,
log: log,
policyStatusListener: policyStatus,
}
return &gen
}
func (gen *Generator) enqueue(info Info) {
// add to data map
keyHash := info.toKey()
// add to
// queue the key hash
gen.dataStore.add(keyHash, info)
gen.queue.Add(keyHash)
}
//Add queues a policy violation create request
func (gen *Generator) Add(infos ...Info) {
if os.Getenv("POLICY-TYPE") == common.PolicyReport {
for _, v := range infos {
gen.prgen.Add(policyreport.Info(v))
}
return
}
for _, info := range infos {
gen.enqueue(info)
}
}
// Run starts the workers
func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
logger := gen.log
defer utilruntime.HandleCrash()
logger.Info("start")
defer logger.Info("shutting down")
if !cache.WaitForCacheSync(stopCh, gen.pvSynced, gen.nspvSynced) {
logger.Info("failed to sync informer cache")
}
for i := 0; i < workers; i++ {
go wait.Until(gen.runWorker, constant.PolicyViolationControllerResync, stopCh)
}
<-stopCh
}
func (gen *Generator) runWorker() {
for gen.processNextWorkItem() {
}
}
func (gen *Generator) handleErr(err error, key interface{}) {
logger := gen.log
if err == nil {
gen.queue.Forget(key)
return
}
// retires requests if there is error
if gen.queue.NumRequeues(key) < workQueueRetryLimit {
logger.Error(err, "failed to sync policy violation", "key", key)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
gen.queue.AddRateLimited(key)
return
}
gen.queue.Forget(key)
// remove from data store
if keyHash, ok := key.(string); ok {
gen.dataStore.delete(keyHash)
}
logger.Error(err, "dropping key out of the queue", "key", key)
}
func (gen *Generator) processNextWorkItem() bool {
logger := gen.log
obj, shutdown := gen.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer gen.queue.Done(obj)
var keyHash string
var ok bool
if keyHash, ok = obj.(string); !ok {
gen.queue.Forget(obj)
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
return nil
}
// lookup data store
info := gen.dataStore.lookup(keyHash)
if reflect.DeepEqual(info, Info{}) {
// empty key
gen.queue.Forget(obj)
logger.Info("empty key")
return nil
}
err := gen.syncHandler(info)
gen.handleErr(err, obj)
return nil
}(obj)
if err != nil {
logger.Error(err, "failed to process item")
return true
}
return true
}
func (gen *Generator) syncHandler(info Info) error {
var handler pvGenerator
logger := gen.log
builder := newPvBuilder()
if info.Resource.GetNamespace() == "" {
// cluster scope resource generate a clusterpolicy violation
handler = newClusterPV(gen.log.WithName("ClusterPV"), gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatusListener)
} else {
// namespaced resources generated a namespaced policy violation in the namespace of the resource
handler = newNamespacedPV(gen.log.WithName("NamespacedPV"), gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatusListener)
}
failure := false
pv := builder.generate(info)
if info.FromSync {
pv.Annotations = map[string]string{
"fromSync": "true",
}
}
// Create Policy Violations
logger.V(4).Info("creating policy violation", "key", info.toKey())
if err := handler.create(pv); err != nil {
failure = true
logger.Error(err, "failed to create policy violation")
}
if failure {
// even if there is a single failure we requeue the request
return errors.New("Failed to process some policy violations, re-queuing")
}
return nil
}
// Provides an interface to generate policy violations
// implementations for namespaced and cluster PV
type pvGenerator interface {
create(policyViolation kyverno.PolicyViolationTemplate) error
}

View file

@ -1,145 +0,0 @@
package policyviolation
import (
"context"
"fmt"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernov1 "github.com/kyverno/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/policystatus"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
//NamespacedPV ...
type namespacedPV struct {
// dynamic client
dclient *client.Client
// get/list namespaced policy violation
nspvLister kyvernolister.PolicyViolationLister
// policy violation interface
kyvernoInterface kyvernov1.KyvernoV1Interface
// logger
log logr.Logger
// update policy status with violationCount
policyStatusListener policystatus.Listener
}
func newNamespacedPV(log logr.Logger, dclient *client.Client,
nspvLister kyvernolister.PolicyViolationLister,
kyvernoInterface kyvernov1.KyvernoV1Interface,
policyStatus policystatus.Listener,
) *namespacedPV {
nspv := namespacedPV{
dclient: dclient,
nspvLister: nspvLister,
kyvernoInterface: kyvernoInterface,
log: log,
policyStatusListener: policyStatus,
}
return &nspv
}
func (nspv *namespacedPV) create(pv kyverno.PolicyViolationTemplate) error {
newPv := kyverno.PolicyViolation(pv)
// PV already exists
oldPv, err := nspv.getExisting(newPv)
if err != nil {
return err
}
if oldPv == nil {
// create a new policy violation
return nspv.createPV(&newPv)
}
// policy violation exists
// skip if there is not change, else update the violation
return nspv.updatePV(&newPv, oldPv)
}
func (nspv *namespacedPV) getExisting(newPv kyverno.PolicyViolation) (*kyverno.PolicyViolation, error) {
logger := nspv.log.WithValues("namespace", newPv.Namespace, "name", newPv.Name)
var err error
// use labels
policyLabelmap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
ls, err := converLabelToSelector(policyLabelmap)
if err != nil {
return nil, err
}
pvs, err := nspv.nspvLister.PolicyViolations(newPv.GetNamespace()).List(ls)
if err != nil {
logger.Error(err, "failed to list namespaced policy violations")
return nil, err
}
for _, pv := range pvs {
// find a policy on same resource and policy combination
if pv.Spec.Policy == newPv.Spec.Policy &&
pv.Spec.ResourceSpec.Kind == newPv.Spec.ResourceSpec.Kind &&
pv.Spec.ResourceSpec.Name == newPv.Spec.ResourceSpec.Name {
return pv, nil
}
}
return nil, nil
}
func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
var err error
logger := nspv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
logger.V(4).Info("creating new policy violation")
obj, err := retryGetResource(nspv.dclient, newPv.Spec.ResourceSpec)
if err != nil {
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", newPv.Name, newPv.Spec.Policy, err)
}
if obj.GetDeletionTimestamp() != nil {
return nil
}
// set owner reference to resource
ownerRef, ok := createOwnerReference(obj)
if !ok {
return nil
}
newPv.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
// create resource
_, err = nspv.kyvernoInterface.PolicyViolations(newPv.GetNamespace()).Create(context.TODO(), newPv, metav1.CreateOptions{})
if err != nil {
logger.Error(err, "failed to create namespaced policy violation")
return err
}
if newPv.Annotations["fromSync"] != "true" {
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
logger.Info("namespaced policy violation created")
return nil
}
func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error {
logger := nspv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
var err error
// check if there is any update
if !hasViolationSpecChanged(newPv.Spec.DeepCopy(), oldPv.Spec.DeepCopy()) {
logger.V(4).Info("policy violation spec did not change, not upadating the resource")
return nil
}
// set name
newPv.SetName(oldPv.Name)
newPv.SetResourceVersion(oldPv.ResourceVersion)
newPv.SetOwnerReferences(oldPv.GetOwnerReferences())
// update resource
_, err = nspv.kyvernoInterface.PolicyViolations(newPv.GetNamespace()).Update(context.TODO(), newPv, metav1.UpdateOptions{})
if err != nil {
return fmt.Errorf("failed to update namespaced policy violation: %v", err)
}
if newPv.Annotations["fromSync"] != "true" {
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
}
logger.Info("namespaced policy violation updated")
return nil
}

View file

@ -1,74 +0,0 @@
package policyviolation
import (
"encoding/json"
"reflect"
"testing"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
)
func Test_Stats(t *testing.T) {
testCase := struct {
violationCountStats []struct {
policyName string
violatedRules []v1.ViolatedRule
}
expectedOutput []byte
existingCache map[string]v1.PolicyStatus
}{
existingCache: map[string]v1.PolicyStatus{
"policy1": {
Rules: []v1.RuleStats{
{
Name: "rule4",
},
},
},
"policy2": {
Rules: []v1.RuleStats{
{
Name: "rule4",
},
},
},
},
expectedOutput: []byte(`{"policy1":{"violationCount":1,"ruleStatus":[{"ruleName":"rule4","violationCount":1}]},"policy2":{"violationCount":1,"ruleStatus":[{"ruleName":"rule4","violationCount":1}]}}`),
violationCountStats: []struct {
policyName string
violatedRules []v1.ViolatedRule
}{
{
policyName: "policy1",
violatedRules: []v1.ViolatedRule{
{
Name: "rule4",
},
},
},
{
policyName: "policy2",
violatedRules: []v1.ViolatedRule{
{
Name: "rule4",
},
},
},
},
}
policyNameToStatus := testCase.existingCache
for _, violationCountStat := range testCase.violationCountStats {
receiver := &violationCount{
policyName: violationCountStat.policyName,
violatedRules: violationCountStat.violatedRules,
}
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
}
output, _ := json.Marshal(policyNameToStatus)
if !reflect.DeepEqual(output, testCase.expectedOutput) {
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
}
}

View file

@ -11,7 +11,6 @@ import (
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/response"
engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
"github.com/kyverno/kyverno/pkg/policyviolation"
v1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
@ -87,11 +86,6 @@ func (ws *WebhookServer) HandleMutation(
patches = append(patches, annPatches)
}
// AUDIT
// generate violation when response fails
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
ws.pvGenerator.Add(pvInfos...)
// REPORTING EVENTS
// Scenario 1:
// some/all policies failed to apply on the resource. a policy violation is generated.

View file

@ -26,7 +26,6 @@ import (
"github.com/kyverno/kyverno/pkg/policycache"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/policyviolation"
"github.com/kyverno/kyverno/pkg/resourcecache"
tlsutils "github.com/kyverno/kyverno/pkg/tls"
userinfo "github.com/kyverno/kyverno/pkg/userinfo"
@ -98,9 +97,6 @@ type WebhookServer struct {
// last request time
lastReqTime *checker.LastReqTime
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
// policy report generator
prGenerator policyreport.GeneratorInterface
@ -136,7 +132,6 @@ func NewWebhookServer(
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
statusSync policystatus.Listener,
configHandler config.Interface,
pvGenerator policyviolation.GeneratorInterface,
prGenerator policyreport.GeneratorInterface,
grGenerator *generate.Generator,
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
@ -180,7 +175,6 @@ func NewWebhookServer(
configHandler: configHandler,
cleanUp: cleanUp,
lastReqTime: resourceWebhookWatcher.LastReqTime,
pvGenerator: pvGenerator,
prGenerator: prGenerator,
grGenerator: grGenerator,
resourceWebhookWatcher: resourceWebhookWatcher,
@ -358,7 +352,7 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
ws.auditHandler.Add(request.DeepCopy())
// VALIDATION
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.prGenerator, ws.log, ws.configHandler, ws.resCache)
ok, msg := HandleValidation(request, validatePolicies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.prGenerator, ws.log, ws.configHandler, ws.resCache)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{
@ -484,7 +478,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
logger.Error(err, "failed to load service account in context")
}
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.pvGenerator, ws.prGenerator, ws.log, ws.configHandler, ws.resCache)
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.prGenerator, ws.log, ws.configHandler, ws.resCache)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{

View file

@ -11,7 +11,6 @@ import (
"github.com/kyverno/kyverno/pkg/policycache"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/policyviolation"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/userinfo"
"github.com/minio/minio/cmd/logger"
@ -45,7 +44,6 @@ type auditHandler struct {
pCache policycache.Interface
eventGen event.Interface
statusListener policystatus.Listener
pvGenerator policyviolation.GeneratorInterface
prGenerator policyreport.GeneratorInterface
rbLister rbaclister.RoleBindingLister
@ -62,7 +60,6 @@ type auditHandler struct {
func NewValidateAuditHandler(pCache policycache.Interface,
eventGen event.Interface,
statusListener policystatus.Listener,
pvGenerator policyviolation.GeneratorInterface,
prGenerator policyreport.GeneratorInterface,
rbInformer rbacinformer.RoleBindingInformer,
crbInformer rbacinformer.ClusterRoleBindingInformer,
@ -75,7 +72,6 @@ func NewValidateAuditHandler(pCache policycache.Interface,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
eventGen: eventGen,
statusListener: statusListener,
pvGenerator: pvGenerator,
rbLister: rbInformer.Lister(),
rbSynced: rbInformer.Informer().HasSynced,
crbLister: crbInformer.Lister(),
@ -175,7 +171,7 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
return errors.Wrap(err, "failed to load service account in context")
}
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.pvGenerator, h.prGenerator, logger, h.configHandler, h.resCache)
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.prGenerator, logger, h.configHandler, h.resCache)
return nil
}

View file

@ -17,7 +17,6 @@ import (
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/policyviolation"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/utils"
v1beta1 "k8s.io/api/admission/v1beta1"
@ -36,7 +35,6 @@ func HandleValidation(
userRequestInfo kyverno.RequestInfo,
statusListener policystatus.Listener,
eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
prGenerator policyreport.GeneratorInterface,
log logr.Logger,
dynamicConfig config.Interface,
@ -125,14 +123,12 @@ func HandleValidation(
return false, getEnforceFailureErrorMsg(engineResponses)
}
// ADD POLICY VIOLATIONS
// violations are created with resource on "audit"
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
pvGenerator.Add(pvInfos...)
prInfos := policyreport.GeneratePRsFromEngineResponse(engineResponses, logger)
prGenerator.Add(prInfos...)
if os.Getenv("POLICY-TYPE") == common.PolicyReport {
if request.Operation == v1beta1.Delete {
pvGenerator.Add(policyviolation.Info{
prGenerator.Add(policyreport.Info{
Resource: unstructured.Unstructured{
Object: map[string]interface{}{
"kind": oldR.GetKind(),