1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 16:06:56 +00:00
kyverno/pkg/policystatus/policy_status.go

210 lines
5.8 KiB
Go
Raw Normal View History

package policystatus
2020-02-25 20:55:07 +05:30
import (
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
"context"
2020-03-07 16:23:17 +05:30
"encoding/json"
"strings"
2020-02-25 20:55:07 +05:30
"sync"
"time"
"github.com/go-logr/logr"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
2020-03-20 11:43:21 -07:00
log "sigs.k8s.io/controller-runtime/pkg/log"
2020-02-25 20:55:07 +05:30
)
2020-03-07 16:23:17 +05:30
// Policy status implementation works in the following way,
// Currently policy status maintains a cache of the status of each policy.
// Every x unit of time the status of policy is updated using
2020-03-07 16:23:17 +05:30
//the data from the cache.
//The sync exposes a listener which accepts a statusUpdater
//interface which dictates how the status should be updated.
//The status is updated by a worker that receives the interface
//on a channel.
//The worker then updates the current status using the methods
//exposed by the interface.
//Current implementation is designed to be thread safe with optimised
2020-03-07 16:23:17 +05:30
//locking for each policy.
// statusUpdater defines a type to have a method which
// updates the given status
2020-02-29 22:39:27 +05:30
type statusUpdater interface {
PolicyName() string
UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus
2020-02-29 22:39:27 +05:30
}
// Listener is a channel of statusUpdater instances
type Listener chan statusUpdater
// Update queues an status update request
func (l Listener) Update(s statusUpdater) {
l <- s
}
2020-03-07 16:23:17 +05:30
// Sync is the object which is used to initialize
//the policyStatus sync, can be considered the parent object
2020-11-17 12:01:01 -08:00
//since it contains access to all the persistent data present
2020-03-07 16:23:17 +05:30
//in this package.
2020-02-25 20:55:07 +05:30
type Sync struct {
cache *cache
Listener Listener
client *versioned.Clientset
lister kyvernolister.ClusterPolicyLister
nsLister kyvernolister.PolicyLister
log logr.Logger
2020-02-25 20:55:07 +05:30
}
type cache struct {
2020-03-07 14:56:42 +05:30
dataMu sync.RWMutex
data map[string]v1.PolicyStatus
keyToMutex *keyToMutex
2020-02-25 20:55:07 +05:30
}
// NewSync creates a new Sync instance
func NewSync(c *versioned.Clientset, lister kyvernolister.ClusterPolicyLister, nsLister kyvernolister.PolicyLister) *Sync {
2020-02-25 20:55:07 +05:30
return &Sync{
cache: &cache{
2020-03-07 14:56:42 +05:30
dataMu: sync.RWMutex{},
data: make(map[string]v1.PolicyStatus),
keyToMutex: newKeyToMutex(),
2020-02-25 20:55:07 +05:30
},
client: c,
lister: lister,
nsLister: nsLister,
Listener: make(chan statusUpdater, 20),
log: log.Log.WithName("PolicyStatus"),
2020-02-25 20:55:07 +05:30
}
}
// Run starts workers and periodically flushes the cached status
2020-02-29 17:19:00 +05:30
func (s *Sync) Run(workers int, stopCh <-chan struct{}) {
2020-02-25 21:07:00 +05:30
for i := 0; i < workers; i++ {
2020-02-29 17:19:00 +05:30
go s.updateStatusCache(stopCh)
2020-02-25 21:07:00 +05:30
}
wait.Until(s.updatePolicyStatus, 60*time.Second, stopCh)
2020-02-29 17:19:00 +05:30
<-stopCh
2020-02-25 20:55:07 +05:30
}
2020-03-07 16:23:17 +05:30
// updateStatusCache is a worker which updates the current status
//using the statusUpdater interface
2020-02-29 17:19:00 +05:30
func (s *Sync) updateStatusCache(stopCh <-chan struct{}) {
2020-02-25 20:55:07 +05:30
for {
select {
2020-02-29 22:39:27 +05:30
case statusUpdater := <-s.Listener:
name := statusUpdater.PolicyName()
s.log.V(3).Info("received policy status update request", "policy", name)
s.cache.keyToMutex.Get(name).Lock()
2020-03-07 14:56:42 +05:30
s.cache.dataMu.RLock()
status, exist := s.cache.data[statusUpdater.PolicyName()]
2020-03-07 14:56:42 +05:30
s.cache.dataMu.RUnlock()
if !exist {
policy, _ := s.lister.Get(statusUpdater.PolicyName())
if policy != nil {
status = policy.Status
}
}
2020-03-07 14:56:42 +05:30
updatedStatus := statusUpdater.UpdateStatus(status)
2020-03-07 14:56:42 +05:30
s.cache.dataMu.Lock()
s.cache.data[statusUpdater.PolicyName()] = updatedStatus
s.cache.dataMu.Unlock()
s.cache.keyToMutex.Get(statusUpdater.PolicyName()).Unlock()
2020-03-07 16:23:17 +05:30
oldStatus, _ := json.Marshal(status)
newStatus, _ := json.Marshal(updatedStatus)
s.log.V(4).Info("updated policy status", "policy", statusUpdater.PolicyName(),
"oldStatus", string(oldStatus), "newStatus", string(newStatus))
2020-02-29 17:19:00 +05:30
case <-stopCh:
2020-02-25 20:55:07 +05:30
return
}
}
}
2020-03-07 16:23:17 +05:30
// updatePolicyStatus updates the status in the policy resource definition
// from the status cache, syncing them
2020-02-25 20:55:07 +05:30
func (s *Sync) updatePolicyStatus() {
for key, status := range s.getCachedStatus() {
s.log.V(3).Info("updating policy status", "policy", key)
namespace, policyName := s.parseStatusKey(key)
if namespace == "" {
s.updateClusterPolicy(policyName, key, status)
} else {
s.updateNamespacedPolicyStatus(policyName, namespace, key, status)
}
}
}
func (s *Sync) parseStatusKey(key string) (string, string) {
namespace := ""
policyName := key
index := strings.Index(key, "/")
if index != -1 {
namespace = key[:index]
policyName = key[index+1:]
}
return namespace, policyName
}
func (s *Sync) updateClusterPolicy(policyName, key string, status v1.PolicyStatus) {
defer s.deleteCachedStatus(key)
policy, err := s.lister.Get(policyName)
if err != nil {
s.log.Error(err, "failed to update policy status", "policy", policyName)
return
}
policy.Status = status
_, err = s.client.KyvernoV1().ClusterPolicies().UpdateStatus(context.TODO(), policy, metav1.UpdateOptions{})
if err != nil {
s.log.Error(err, "failed to update policy status", "policy", policyName)
}
}
func (s *Sync) updateNamespacedPolicyStatus(policyName, namespace, key string, status v1.PolicyStatus) {
defer s.deleteCachedStatus(key)
policy, err := s.nsLister.Policies(namespace).Get(policyName)
if err != nil {
s.log.Error(err, "failed to update policy status", "policy", policyName)
return
}
policy.Status = status
_, err = s.client.KyvernoV1().Policies(namespace).UpdateStatus(context.TODO(), policy, metav1.UpdateOptions{})
if err != nil {
s.log.Error(err, "failed to update namespaced policy status", "policy", policyName)
}
}
func (s *Sync) deleteCachedStatus(policyName string) {
s.cache.dataMu.Lock()
defer s.cache.dataMu.Unlock()
delete(s.cache.data, policyName)
}
func (s *Sync) getCachedStatus() map[string]v1.PolicyStatus {
2020-03-07 14:56:42 +05:30
s.cache.dataMu.Lock()
defer s.cache.dataMu.Unlock()
var nameToStatus = make(map[string]v1.PolicyStatus, len(s.cache.data))
for k, v := range s.cache.data {
2020-02-25 20:55:07 +05:30
nameToStatus[k] = v
}
return nameToStatus
2020-02-25 20:55:07 +05:30
}