mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
deprecate policy status (#2136)
* deprecate policy status Signed-off-by: Jim Bugwadia <jim@nirmata.com> * make fmt Signed-off-by: Jim Bugwadia <jim@nirmata.com> * remove policy status tests Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix generate metrics Signed-off-by: Jim Bugwadia <jim@nirmata.com>
This commit is contained in:
parent
b9e77575be
commit
003c865ab9
17 changed files with 40 additions and 959 deletions
|
@ -32,7 +32,6 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/policy"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
"github.com/kyverno/kyverno/pkg/signal"
|
||||
ktls "github.com/kyverno/kyverno/pkg/tls"
|
||||
|
@ -195,12 +194,6 @@ func main() {
|
|||
rCache,
|
||||
log.Log.WithName("EventGenerator"))
|
||||
|
||||
// Policy Status Handler - deals with all logic related to policy status
|
||||
statusSync := policystatus.NewSync(
|
||||
pclient,
|
||||
pInformer.Kyverno().V1().ClusterPolicies().Lister(),
|
||||
pInformer.Kyverno().V1().Policies().Lister())
|
||||
|
||||
// POLICY Report GENERATOR
|
||||
reportReqGen := policyreport.NewReportChangeRequestGenerator(pclient,
|
||||
client,
|
||||
|
@ -208,7 +201,6 @@ func main() {
|
|||
pInformer.Kyverno().V1alpha1().ClusterReportChangeRequests(),
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pInformer.Kyverno().V1().Policies(),
|
||||
statusSync.Listener,
|
||||
log.Log.WithName("ReportChangeRequestGenerator"),
|
||||
)
|
||||
|
||||
|
@ -300,7 +292,6 @@ func main() {
|
|||
pInformer.Kyverno().V1().GenerateRequests(),
|
||||
eventGenerator,
|
||||
kubedynamicInformer,
|
||||
statusSync.Listener,
|
||||
log.Log.WithName("GenerateController"),
|
||||
configData,
|
||||
rCache,
|
||||
|
@ -335,7 +326,6 @@ func main() {
|
|||
auditHandler := webhooks.NewValidateAuditHandler(
|
||||
pCacheController.Cache,
|
||||
eventGenerator,
|
||||
statusSync.Listener,
|
||||
reportReqGen,
|
||||
kubeInformer.Rbac().V1().RoleBindings(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
|
@ -429,7 +419,6 @@ func main() {
|
|||
pCacheController.Cache,
|
||||
webhookCfg,
|
||||
webhookMonitor,
|
||||
statusSync.Listener,
|
||||
configData,
|
||||
reportReqGen,
|
||||
grgen,
|
||||
|
@ -486,7 +475,6 @@ func main() {
|
|||
go configData.Run(stopCh)
|
||||
go eventGenerator.Run(3, stopCh)
|
||||
go grgen.Run(10, stopCh)
|
||||
go statusSync.Run(1, stopCh)
|
||||
go pCacheController.Run(1, stopCh)
|
||||
go auditHandler.Run(10, stopCh)
|
||||
if !debug {
|
||||
|
|
|
@ -32,6 +32,7 @@ type Policy struct {
|
|||
|
||||
// Status contains policy runtime information.
|
||||
// +optional
|
||||
// Deprecated. Policy metrics are available via the metrics endpoint
|
||||
Status PolicyStatus `json:"status,omitempty" yaml:"status,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -434,6 +435,8 @@ type CloneFrom struct {
|
|||
}
|
||||
|
||||
// PolicyStatus mostly contains runtime information related to policy execution.
|
||||
// Deprecated. Policy metrics are now available via the "/metrics" endpoint.
|
||||
// See: https://kyverno.io/docs/monitoring-kyverno-with-prometheus-metrics/
|
||||
type PolicyStatus struct {
|
||||
// AvgExecutionTime is the average time taken to process the policy rules on a resource.
|
||||
// +optional
|
||||
|
@ -469,6 +472,8 @@ type PolicyStatus struct {
|
|||
}
|
||||
|
||||
// RuleStats provides statistics for an individual rule within a policy.
|
||||
// Deprecated. Policy metrics are now available via the "/metrics" endpoint.
|
||||
// See: https://kyverno.io/docs/monitoring-kyverno-with-prometheus-metrics/
|
||||
type RuleStats struct {
|
||||
// Name is the rule name.
|
||||
Name string `json:"ruleName" yaml:"ruleName"`
|
||||
|
|
|
@ -22,7 +22,6 @@ package v1
|
|||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -73,14 +72,6 @@ func (in *AnyAllConditions) DeepCopy() *AnyAllConditions {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyJSON is an autogenerated deepcopy function, copying the receiver, creating a new apiextensions.JSON.
|
||||
func (in *AnyAllConditions) DeepCopyJSON() apiextensions.JSON {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloneFrom) DeepCopyInto(out *CloneFrom) {
|
||||
*out = *in
|
||||
|
@ -220,14 +211,6 @@ func (in *Deny) DeepCopy() *Deny {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyJSON is an autogenerated deepcopy function, copying the receiver, creating a new apiextensions.JSON.
|
||||
func (in *Deny) DeepCopyJSON() apiextensions.JSON {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExcludeResources) DeepCopyInto(out *ExcludeResources) {
|
||||
*out = *in
|
||||
|
@ -534,6 +517,11 @@ func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Names != nil {
|
||||
in, out := &in.Names, &out.Names
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Namespaces != nil {
|
||||
in, out := &in.Namespaces, &out.Namespaces
|
||||
*out = make([]string, len(*in))
|
||||
|
|
|
@ -243,55 +243,9 @@ func (c *Controller) applyGeneratePolicy(log logr.Logger, policyContext *engine.
|
|||
}
|
||||
}
|
||||
|
||||
if gr.Status.State == "" && len(genResources) > 0 {
|
||||
log.V(4).Info("updating policy status", "policy", policy.Name, "data", ruleNameToProcessingTime)
|
||||
c.policyStatusListener.Update(generateSyncStats{
|
||||
policyName: policy.Name,
|
||||
ruleNameToProcessingTime: ruleNameToProcessingTime,
|
||||
})
|
||||
}
|
||||
|
||||
return genResources, nil
|
||||
}
|
||||
|
||||
type generateSyncStats struct {
|
||||
policyName string
|
||||
ruleNameToProcessingTime map[string]time.Duration
|
||||
}
|
||||
|
||||
func (vc generateSyncStats) PolicyName() string {
|
||||
return vc.policyName
|
||||
}
|
||||
|
||||
func (vc generateSyncStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
|
||||
for i := range status.Rules {
|
||||
if executionTime, exist := vc.ruleNameToProcessingTime[status.Rules[i].Name]; exist {
|
||||
status.ResourcesGeneratedCount++
|
||||
status.Rules[i].ResourcesGeneratedCount++
|
||||
averageOver := int64(status.Rules[i].AppliedCount + status.Rules[i].FailedCount)
|
||||
status.Rules[i].ExecutionTime = updateGenerateExecutionTime(
|
||||
executionTime,
|
||||
status.Rules[i].ExecutionTime,
|
||||
averageOver,
|
||||
).String()
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func updateGenerateExecutionTime(newTime time.Duration, oldAverageTimeString string, averageOver int64) time.Duration {
|
||||
if averageOver == 0 {
|
||||
return newTime
|
||||
}
|
||||
oldAverageExecutionTime, _ := time.ParseDuration(oldAverageTimeString)
|
||||
numerator := (oldAverageExecutionTime.Nanoseconds() * averageOver) + newTime.Nanoseconds()
|
||||
denominator := averageOver
|
||||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
||||
func getResourceInfo(object map[string]interface{}) (kind, name, namespace, apiversion string, err error) {
|
||||
if kind, _, err = unstructured.NestedString(object, "kind"); err != nil {
|
||||
return "", "", "", "", err
|
||||
|
|
|
@ -14,7 +14,6 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/config"
|
||||
dclient "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -66,9 +65,8 @@ type Controller struct {
|
|||
|
||||
//TODO: list of generic informers
|
||||
// only support Namespaces for re-evaluation on resource updates
|
||||
nsInformer informers.GenericInformer
|
||||
policyStatusListener policystatus.Listener
|
||||
log logr.Logger
|
||||
nsInformer informers.GenericInformer
|
||||
log logr.Logger
|
||||
|
||||
Config config.Interface
|
||||
resCache resourcecache.ResourceCache
|
||||
|
@ -83,23 +81,21 @@ func NewController(
|
|||
grInformer kyvernoinformer.GenerateRequestInformer,
|
||||
eventGen event.Interface,
|
||||
dynamicInformer dynamicinformer.DynamicSharedInformerFactory,
|
||||
policyStatus policystatus.Listener,
|
||||
log logr.Logger,
|
||||
dynamicConfig config.Interface,
|
||||
resourceCache resourcecache.ResourceCache,
|
||||
) (*Controller, error) {
|
||||
|
||||
c := Controller{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
policyInformer: policyInformer,
|
||||
eventGen: eventGen,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "generate-request"),
|
||||
dynamicInformer: dynamicInformer,
|
||||
log: log,
|
||||
policyStatusListener: policyStatus,
|
||||
Config: dynamicConfig,
|
||||
resCache: resourceCache,
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
policyInformer: policyInformer,
|
||||
eventGen: eventGen,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "generate-request"),
|
||||
dynamicInformer: dynamicInformer,
|
||||
log: log,
|
||||
Config: dynamicConfig,
|
||||
resCache: resourceCache,
|
||||
}
|
||||
|
||||
c.statusControl = StatusControl{client: kyvernoClient}
|
||||
|
|
|
@ -1,53 +0,0 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
func Test_Stats(t *testing.T) {
|
||||
testCase := struct {
|
||||
generatedSyncStats []generateSyncStats
|
||||
expectedOutput []byte
|
||||
existingStatus map[string]v1.PolicyStatus
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"resourcesGeneratedCount":2,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"23ns","resourcesGeneratedCount":1},{"ruleName":"rule2","averageExecutionTime":"44ns","resourcesGeneratedCount":1},{"ruleName":"rule3"}]}}`),
|
||||
generatedSyncStats: []generateSyncStats{
|
||||
{
|
||||
policyName: "policy1",
|
||||
ruleNameToProcessingTime: map[string]time.Duration{
|
||||
"rule1": time.Nanosecond * 23,
|
||||
"rule2": time.Nanosecond * 44,
|
||||
},
|
||||
},
|
||||
},
|
||||
existingStatus: map[string]v1.PolicyStatus{
|
||||
"policy1": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule1",
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
},
|
||||
{
|
||||
Name: "rule3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, generateSyncStat := range testCase.generatedSyncStats {
|
||||
testCase.existingStatus[generateSyncStat.PolicyName()] = generateSyncStat.UpdateStatus(testCase.existingStatus[generateSyncStat.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(testCase.existingStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -17,7 +17,6 @@ import (
|
|||
requestlister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1alpha1"
|
||||
dclient "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -68,7 +67,6 @@ func NewReportChangeRequestGenerator(client *policyreportclient.Clientset,
|
|||
clusterReportReqInformer requestinformer.ClusterReportChangeRequestInformer,
|
||||
cpolInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
polInformer kyvernoinformer.PolicyInformer,
|
||||
policyStatus policystatus.Listener,
|
||||
log logr.Logger) *Generator {
|
||||
gen := Generator{
|
||||
dclient: dclient,
|
||||
|
|
|
@ -1,31 +0,0 @@
|
|||
package policystatus
|
||||
|
||||
import "sync"
|
||||
|
||||
// keyToMutex allows status to be updated
|
||||
//for different policies at the same time
|
||||
//while ensuring the status for same policies
|
||||
//are updated one at a time.
|
||||
type keyToMutex struct {
|
||||
mu sync.RWMutex
|
||||
keyMu map[string]*sync.RWMutex
|
||||
}
|
||||
|
||||
func newKeyToMutex() *keyToMutex {
|
||||
return &keyToMutex{
|
||||
mu: sync.RWMutex{},
|
||||
keyMu: make(map[string]*sync.RWMutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (k *keyToMutex) Get(key string) *sync.RWMutex {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
mutex := k.keyMu[key]
|
||||
if mutex == nil {
|
||||
mutex = &sync.RWMutex{}
|
||||
k.keyMu[key] = mutex
|
||||
}
|
||||
|
||||
return mutex
|
||||
}
|
|
@ -1,219 +0,0 @@
|
|||
package policystatus
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
log "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
// Policy status implementation works in the following way,
|
||||
// Currently policy status maintains a cache of the status of each policy.
|
||||
// Every x unit of time the status of policy is updated using
|
||||
//the data from the cache.
|
||||
//The sync exposes a listener which accepts a statusUpdater
|
||||
//interface which dictates how the status should be updated.
|
||||
//The status is updated by a worker that receives the interface
|
||||
//on a channel.
|
||||
//The worker then updates the current status using the methods
|
||||
//exposed by the interface.
|
||||
//Current implementation is designed to be thread safe with optimized
|
||||
//locking for each policy.
|
||||
|
||||
// statusUpdater defines a type to have a method which
|
||||
// updates the given status
|
||||
type statusUpdater interface {
|
||||
PolicyName() string
|
||||
UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus
|
||||
}
|
||||
|
||||
// Listener is a channel of statusUpdater instances
|
||||
type Listener chan statusUpdater
|
||||
|
||||
// Update queues an status update request
|
||||
func (l Listener) Update(s statusUpdater) {
|
||||
l <- s
|
||||
}
|
||||
|
||||
// Sync is the object which is used to initialize
|
||||
//the policyStatus sync, can be considered the parent object
|
||||
//since it contains access to all the persistent data present
|
||||
//in this package.
|
||||
type Sync struct {
|
||||
cache *cache
|
||||
Listener Listener
|
||||
client *versioned.Clientset
|
||||
lister kyvernolister.ClusterPolicyLister
|
||||
nsLister kyvernolister.PolicyLister
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
dataMu sync.RWMutex
|
||||
data map[string]v1.PolicyStatus
|
||||
keyToMutex *keyToMutex
|
||||
}
|
||||
|
||||
// NewSync creates a new Sync instance
|
||||
func NewSync(c *versioned.Clientset, lister kyvernolister.ClusterPolicyLister, nsLister kyvernolister.PolicyLister) *Sync {
|
||||
return &Sync{
|
||||
cache: &cache{
|
||||
dataMu: sync.RWMutex{},
|
||||
data: make(map[string]v1.PolicyStatus),
|
||||
keyToMutex: newKeyToMutex(),
|
||||
},
|
||||
client: c,
|
||||
lister: lister,
|
||||
nsLister: nsLister,
|
||||
Listener: make(chan statusUpdater, 20),
|
||||
log: log.Log.WithName("PolicyStatus"),
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts workers and periodically flushes the cached status
|
||||
func (s *Sync) Run(workers int, stopCh <-chan struct{}) {
|
||||
for i := 0; i < workers; i++ {
|
||||
go s.updateStatusCache(stopCh)
|
||||
}
|
||||
|
||||
// sync the status to the existing policy every minute
|
||||
wait.Until(s.writePolicyStatus, time.Minute, stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// updateStatusCache is a worker which adds the current status
|
||||
// to the cache, using the statusUpdater interface
|
||||
func (s *Sync) updateStatusCache(stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case statusUpdater := <-s.Listener:
|
||||
name := statusUpdater.PolicyName()
|
||||
s.log.V(4).Info("received policy status update request", "policy", name)
|
||||
|
||||
s.cache.keyToMutex.Get(name).Lock()
|
||||
|
||||
s.cache.dataMu.RLock()
|
||||
status, exist := s.cache.data[statusUpdater.PolicyName()]
|
||||
s.cache.dataMu.RUnlock()
|
||||
if !exist {
|
||||
policy, _ := s.lister.Get(statusUpdater.PolicyName())
|
||||
if policy != nil {
|
||||
status = policy.Status
|
||||
}
|
||||
}
|
||||
|
||||
updatedStatus := statusUpdater.UpdateStatus(status)
|
||||
|
||||
s.cache.dataMu.Lock()
|
||||
s.cache.data[statusUpdater.PolicyName()] = updatedStatus
|
||||
s.cache.dataMu.Unlock()
|
||||
|
||||
s.cache.keyToMutex.Get(statusUpdater.PolicyName()).Unlock()
|
||||
oldStatus, _ := json.Marshal(status)
|
||||
newStatus, _ := json.Marshal(updatedStatus)
|
||||
|
||||
s.log.V(5).Info("updated policy status in the cache", "policy", statusUpdater.PolicyName(),
|
||||
"oldStatus", string(oldStatus), "newStatus", string(newStatus))
|
||||
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// writePolicyStatus sends the update request to the APIServer
|
||||
// syncs the status (from cache) to the policy
|
||||
func (s *Sync) writePolicyStatus() {
|
||||
for key, status := range s.getCachedStatus() {
|
||||
s.log.V(4).Info("updating policy status", "policy", key)
|
||||
namespace, policyName := s.parseStatusKey(key)
|
||||
if namespace == "" {
|
||||
s.updateClusterPolicy(policyName, key, status)
|
||||
} else {
|
||||
s.updateNamespacedPolicyStatus(policyName, namespace, key, status)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sync) parseStatusKey(key string) (string, string) {
|
||||
namespace := ""
|
||||
policyName := key
|
||||
|
||||
index := strings.Index(key, "/")
|
||||
if index != -1 {
|
||||
namespace = key[:index]
|
||||
policyName = key[index+1:]
|
||||
}
|
||||
|
||||
return namespace, policyName
|
||||
}
|
||||
|
||||
func (s *Sync) updateClusterPolicy(policyName, key string, status v1.PolicyStatus) {
|
||||
defer s.deleteCachedStatus(key)
|
||||
|
||||
policy, err := s.lister.Get(policyName)
|
||||
if err != nil {
|
||||
s.log.Error(err, "failed to update policy status", "policy", policyName)
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(status, policy.Status) {
|
||||
return
|
||||
}
|
||||
|
||||
policy.Status = status
|
||||
_, err = s.client.KyvernoV1().ClusterPolicies().UpdateStatus(context.TODO(), policy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
s.log.Error(err, "failed to update policy status", "policy", policyName)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sync) updateNamespacedPolicyStatus(policyName, namespace, key string, status v1.PolicyStatus) {
|
||||
defer s.deleteCachedStatus(key)
|
||||
|
||||
policy, err := s.nsLister.Policies(namespace).Get(policyName)
|
||||
if err != nil {
|
||||
s.log.Error(err, "failed to update policy status", "policy", policyName)
|
||||
return
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(status, policy.Status) {
|
||||
return
|
||||
}
|
||||
|
||||
policy.Status = status
|
||||
_, err = s.client.KyvernoV1().Policies(namespace).UpdateStatus(context.TODO(), policy, metav1.UpdateOptions{})
|
||||
if err != nil {
|
||||
s.log.Error(err, "failed to update namespaced policy status", "policy", policyName)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sync) deleteCachedStatus(policyName string) {
|
||||
s.cache.dataMu.Lock()
|
||||
defer s.cache.dataMu.Unlock()
|
||||
|
||||
delete(s.cache.data, policyName)
|
||||
}
|
||||
|
||||
func (s *Sync) getCachedStatus() map[string]v1.PolicyStatus {
|
||||
s.cache.dataMu.Lock()
|
||||
defer s.cache.dataMu.Unlock()
|
||||
|
||||
var nameToStatus = make(map[string]v1.PolicyStatus, len(s.cache.data))
|
||||
for k, v := range s.cache.data {
|
||||
nameToStatus[k] = v
|
||||
}
|
||||
|
||||
return nameToStatus
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
package policystatus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
lv1 "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
|
||||
)
|
||||
|
||||
type dummyStore struct {
|
||||
}
|
||||
|
||||
func (d dummyStore) Get(policyName string) (*v1.ClusterPolicy, error) {
|
||||
return &v1.ClusterPolicy{}, nil
|
||||
}
|
||||
|
||||
type dummyStatusUpdater struct {
|
||||
}
|
||||
|
||||
func (d dummyStatusUpdater) UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus {
|
||||
status.RulesAppliedCount++
|
||||
return status
|
||||
}
|
||||
|
||||
func (d dummyStatusUpdater) PolicyName() string {
|
||||
return "policy1"
|
||||
}
|
||||
|
||||
type dummyLister struct {
|
||||
}
|
||||
|
||||
func (dl dummyLister) List(selector labels.Selector) (ret []*v1.ClusterPolicy, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (dl dummyLister) Get(name string) (*v1.ClusterPolicy, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (dl dummyLister) ListResources(selector labels.Selector) (ret []*v1.ClusterPolicy, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
// type dymmyNsNamespace struct {}
|
||||
|
||||
type dummyNsLister struct {
|
||||
}
|
||||
|
||||
func (dl dummyNsLister) Policies(name string) lv1.PolicyNamespaceLister {
|
||||
return dummyNsLister{}
|
||||
}
|
||||
|
||||
func (dl dummyNsLister) List(selector labels.Selector) (ret []*v1.Policy, err error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func (dl dummyNsLister) Get(name string) (*v1.Policy, error) {
|
||||
return nil, fmt.Errorf("not implemented")
|
||||
}
|
||||
|
||||
func TestKeyToMutex(t *testing.T) {
|
||||
expectedCache := `{"policy1":{"rulesAppliedCount":100}}`
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
s := NewSync(nil, dummyLister{}, dummyNsLister{})
|
||||
for i := 0; i < 100; i++ {
|
||||
go s.updateStatusCache(stopCh)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go s.Listener.Update(dummyStatusUpdater{})
|
||||
}
|
||||
|
||||
<-time.After(time.Second * 3)
|
||||
stopCh <- struct{}{}
|
||||
|
||||
cacheRaw, _ := json.Marshal(s.cache.data)
|
||||
if string(cacheRaw) != expectedCache {
|
||||
t.Errorf("\nTestcase Failed\nGot:\n%v\nExpected:\n%v\n", string(cacheRaw), expectedCache)
|
||||
}
|
||||
}
|
|
@ -4,8 +4,6 @@ import (
|
|||
contextdefault "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -98,10 +96,8 @@ func (ws *WebhookServer) handleGenerate(
|
|||
// some generate rules do apply to the resource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
triggeredGeneratePolicies = append(triggeredGeneratePolicies, *policy)
|
||||
ws.statusListener.Update(generateStats{
|
||||
resp: engineResponse,
|
||||
})
|
||||
}
|
||||
|
||||
// registering the kyverno_policy_rule_results_info metric concurrently
|
||||
go ws.registerPolicyRuleResultsMetricGeneration(logger, string(request.Operation), *policy, *engineResponse, admissionRequestTimestamp)
|
||||
|
||||
|
@ -451,76 +447,6 @@ func transform(userRequestInfo kyverno.RequestInfo, er *response.EngineResponse)
|
|||
return gr
|
||||
}
|
||||
|
||||
type generateStats struct {
|
||||
resp *response.EngineResponse
|
||||
}
|
||||
|
||||
func (gs generateStats) PolicyName() string {
|
||||
return gs.resp.PolicyResponse.Policy.Name
|
||||
}
|
||||
|
||||
func (gs generateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, gs.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range gs.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
ruleStat.AppliedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func updateAverageTime(newTime time.Duration, oldAverageTimeString string, averageOver int64) time.Duration {
|
||||
if averageOver == 0 {
|
||||
return newTime
|
||||
}
|
||||
oldAverageExecutionTime, _ := time.ParseDuration(oldAverageTimeString)
|
||||
numerator := (oldAverageExecutionTime.Nanoseconds() * averageOver) + newTime.Nanoseconds()
|
||||
denominator := averageOver + 1
|
||||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
||||
type generateRequestResponse struct {
|
||||
gr v1.GenerateRequestSpec
|
||||
err error
|
||||
|
|
|
@ -3,7 +3,6 @@ package webhooks
|
|||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
|
@ -150,10 +149,6 @@ func (ws *WebhookServer) applyMutation(request *v1beta1.AdmissionRequest, policy
|
|||
engineResponse := engine.Mutate(policyContext)
|
||||
policyPatches := engineResponse.GetPatches()
|
||||
|
||||
if engineResponse.PolicyResponse.RulesAppliedCount > 0 && len(policyPatches) > 0 {
|
||||
ws.statusListener.Update(mutateStats{resp: engineResponse, namespace: policyContext.Policy.Namespace})
|
||||
}
|
||||
|
||||
if !engineResponse.IsSuccessful() && len(engineResponse.GetFailedRules()) > 0 {
|
||||
return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy.Name, engineResponse.GetFailedRules())
|
||||
}
|
||||
|
@ -185,68 +180,3 @@ func (ws *WebhookServer) registerPolicyRuleExecutionLatencyMetricMutate(logger l
|
|||
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
|
||||
}
|
||||
}
|
||||
|
||||
type mutateStats struct {
|
||||
resp *response.EngineResponse
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (ms mutateStats) PolicyName() string {
|
||||
if ms.namespace == "" {
|
||||
return ms.resp.PolicyResponse.Policy.Name
|
||||
}
|
||||
return ms.namespace + "/" + ms.resp.PolicyResponse.Policy.Name
|
||||
}
|
||||
|
||||
func (ms mutateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, ms.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range ms.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
status.ResourcesMutatedCount++
|
||||
ruleStat.AppliedCount++
|
||||
ruleStat.ResourcesMutatedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
|
@ -1,211 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
)
|
||||
|
||||
func Test_GenerateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
generateStats []*response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule5","averageExecutionTime":"243ns","appliedCount":1},{"ruleName":"rule6","averageExecutionTime":"251ns","failedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule5","averageExecutionTime":"222ns","appliedCount":1},{"ruleName":"rule6","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
generateStats: []*response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: response.PolicySpec{Name: "policy1"},
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule5",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule6",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: response.PolicySpec{Name: "policy2"},
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule5",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule6",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
|
||||
for _, generateStat := range testCase.generateStats {
|
||||
receiver := generateStats{
|
||||
resp: generateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MutateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
mutateStats []*response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesMutatedCount":1,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"243ns","appliedCount":1,"resourcesMutatedCount":1},{"ruleName":"rule2","averageExecutionTime":"251ns","failedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesMutatedCount":1,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"222ns","appliedCount":1,"resourcesMutatedCount":1},{"ruleName":"rule2","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
mutateStats: []*response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: response.PolicySpec{Name: "policy1"},
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule1",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: response.PolicySpec{Name: "policy2"},
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule1",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
for _, mutateStat := range testCase.mutateStats {
|
||||
receiver := mutateStats{
|
||||
resp: mutateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
validateStats []*response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesBlockedCount":1,"ruleStatus":[{"ruleName":"rule3","averageExecutionTime":"243ns","appliedCount":1},{"ruleName":"rule4","averageExecutionTime":"251ns","failedCount":1,"resourcesBlockedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule3","averageExecutionTime":"222ns","appliedCount":1},{"ruleName":"rule4","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
validateStats: []*response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: response.PolicySpec{Name: "policy1"},
|
||||
ValidationFailureAction: "enforce",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule3",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule4",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: response.PolicySpec{Name: "policy2"},
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule3",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule4",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
for _, validateStat := range testCase.validateStats {
|
||||
receiver := validateStats{
|
||||
resp: validateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -29,7 +29,6 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/openapi"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
tlsutils "github.com/kyverno/kyverno/pkg/tls"
|
||||
userinfo "github.com/kyverno/kyverno/pkg/userinfo"
|
||||
|
@ -97,9 +96,6 @@ type WebhookServer struct {
|
|||
// webhook registration client
|
||||
webhookRegister *webhookconfig.Register
|
||||
|
||||
// API to send policy stats for aggregation
|
||||
statusListener policystatus.Listener
|
||||
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
|
||||
|
@ -151,7 +147,6 @@ func NewWebhookServer(
|
|||
pCache policycache.Interface,
|
||||
webhookRegistrationClient *webhookconfig.Register,
|
||||
webhookMonitor *webhookconfig.Monitor,
|
||||
statusSync policystatus.Listener,
|
||||
configHandler config.Interface,
|
||||
prGenerator policyreport.GeneratorInterface,
|
||||
grGenerator *webhookgenerate.Generator,
|
||||
|
@ -196,7 +191,6 @@ func NewWebhookServer(
|
|||
eventGen: eventGen,
|
||||
pCache: pCache,
|
||||
webhookRegister: webhookRegistrationClient,
|
||||
statusListener: statusSync,
|
||||
configHandler: configHandler,
|
||||
cleanUp: cleanUp,
|
||||
webhookMonitor: webhookMonitor,
|
||||
|
@ -528,33 +522,21 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
}
|
||||
|
||||
vh := &validationHandler{
|
||||
log: ws.log,
|
||||
statusListener: ws.statusListener,
|
||||
eventGen: ws.eventGen,
|
||||
prGenerator: ws.prGenerator,
|
||||
log: ws.log,
|
||||
eventGen: ws.eventGen,
|
||||
prGenerator: ws.prGenerator,
|
||||
}
|
||||
|
||||
ok, msg := vh.handleValidation(ws.promConfig, request, policies, policyContext, namespaceLabels, admissionRequestTimestamp)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: msg,
|
||||
},
|
||||
}
|
||||
return failureResponse(msg)
|
||||
}
|
||||
|
||||
// push admission request to audit handler, this won't block the admission request
|
||||
ws.auditHandler.Add(request.DeepCopy())
|
||||
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Status: "Success",
|
||||
},
|
||||
}
|
||||
return successResponse(nil)
|
||||
}
|
||||
|
||||
// RunAsync TLS server in separate thread and returns control immediately
|
||||
|
|
|
@ -18,7 +18,6 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
"github.com/kyverno/kyverno/pkg/userinfo"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
|
@ -47,12 +46,11 @@ type AuditHandler interface {
|
|||
}
|
||||
|
||||
type auditHandler struct {
|
||||
client *client.Client
|
||||
queue workqueue.RateLimitingInterface
|
||||
pCache policycache.Interface
|
||||
eventGen event.Interface
|
||||
statusListener policystatus.Listener
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
client *client.Client
|
||||
queue workqueue.RateLimitingInterface
|
||||
pCache policycache.Interface
|
||||
eventGen event.Interface
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
rbSynced cache.InformerSynced
|
||||
|
@ -70,7 +68,6 @@ type auditHandler struct {
|
|||
// NewValidateAuditHandler returns a new instance of audit policy handler
|
||||
func NewValidateAuditHandler(pCache policycache.Interface,
|
||||
eventGen event.Interface,
|
||||
statusListener policystatus.Listener,
|
||||
prGenerator policyreport.GeneratorInterface,
|
||||
rbInformer rbacinformer.RoleBindingInformer,
|
||||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
|
@ -85,7 +82,6 @@ func NewValidateAuditHandler(pCache policycache.Interface,
|
|||
pCache: pCache,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
eventGen: eventGen,
|
||||
statusListener: statusListener,
|
||||
rbLister: rbInformer.Lister(),
|
||||
rbSynced: rbInformer.Informer().HasSynced,
|
||||
crbLister: crbInformer.Lister(),
|
||||
|
@ -204,10 +200,9 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
|
|||
}
|
||||
|
||||
vh := &validationHandler{
|
||||
log: h.log,
|
||||
statusListener: h.statusListener,
|
||||
eventGen: h.eventGen,
|
||||
prGenerator: h.prGenerator,
|
||||
log: h.log,
|
||||
eventGen: h.eventGen,
|
||||
prGenerator: h.prGenerator,
|
||||
}
|
||||
|
||||
vh.handleValidation(h.promConfig, request, policies, policyContext, namespaceLabels, admissionRequestTimestamp)
|
||||
|
|
|
@ -2,14 +2,11 @@ package webhooks
|
|||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
|
@ -23,10 +20,9 @@ import (
|
|||
)
|
||||
|
||||
type validationHandler struct {
|
||||
log logr.Logger
|
||||
statusListener policystatus.Listener
|
||||
eventGen event.Interface
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
log logr.Logger
|
||||
eventGen event.Interface
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
}
|
||||
|
||||
// handleValidation handles validating webhook admission request
|
||||
|
@ -78,11 +74,6 @@ func (v *validationHandler) handleValidation(
|
|||
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
triggeredPolicies = append(triggeredPolicies, *policy)
|
||||
v.statusListener.Update(validateStats{
|
||||
resp: engineResponse,
|
||||
namespace: policy.Namespace,
|
||||
})
|
||||
|
||||
if !engineResponse.IsSuccessful() {
|
||||
logger.V(2).Info("validation failed", "policy", policy.Name, "failed rules", engineResponse.GetFailedRules())
|
||||
continue
|
||||
|
@ -185,71 +176,3 @@ func buildDeletionPrInfo(oldR unstructured.Unstructured) policyreport.Info {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
type validateStats struct {
|
||||
resp *response.EngineResponse
|
||||
namespace string
|
||||
}
|
||||
|
||||
func (vs validateStats) PolicyName() string {
|
||||
if vs.namespace == "" {
|
||||
return vs.resp.PolicyResponse.Policy.Name
|
||||
}
|
||||
return vs.namespace + "/" + vs.resp.PolicyResponse.Policy.Name
|
||||
|
||||
}
|
||||
|
||||
func (vs validateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, vs.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range vs.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
ruleStat.AppliedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
if vs.resp.PolicyResponse.ValidationFailureAction == "enforce" {
|
||||
status.ResourcesBlockedCount++
|
||||
ruleStat.ResourcesBlockedCount++
|
||||
}
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
|
@ -132,22 +132,19 @@ func Test_ClusterRole_ClusterRoleBinding_Sets(t *testing.T) {
|
|||
By("Verifying ClusterRole")
|
||||
|
||||
// Wait Till Creation of ClusterRole
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 30, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crGVR, tests.ClusterRoleName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
rRes, err := e2eClient.GetClusteredResource(crGVR, tests.ClusterRoleName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rRes.GetName()).To(Equal(tests.ClusterRoleName))
|
||||
// ============================================
|
||||
|
||||
// ======= Verify ClusterRoleBinding Creation ========
|
||||
By("Verifying ClusterRoleBinding")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 30, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crbGVR, tests.ClusterRoleBindingName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -159,7 +156,6 @@ func Test_ClusterRole_ClusterRoleBinding_Sets(t *testing.T) {
|
|||
rbRes, err := e2eClient.GetClusteredResource(crbGVR, tests.ClusterRoleBindingName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(rbRes.GetName()).To(Equal(tests.ClusterRoleBindingName))
|
||||
|
||||
// ============================================
|
||||
|
||||
// ======= CleanUp Resources =====
|
||||
|
|
Loading…
Add table
Reference in a new issue