mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-29 10:55:05 +00:00
527 resolving merge conflicts
This commit is contained in:
commit
892f8c7040
29 changed files with 1061 additions and 460 deletions
|
@ -17,6 +17,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/generate"
|
||||
generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/signal"
|
||||
|
@ -137,12 +138,18 @@ func main() {
|
|||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicies())
|
||||
|
||||
// Policy Status Handler - deals with all logic related to policy status
|
||||
statusSync := policystatus.NewSync(
|
||||
pclient,
|
||||
policyMetaStore)
|
||||
|
||||
// POLICY VIOLATION GENERATOR
|
||||
// -- generate policy violation
|
||||
pvgen := policyviolation.NewPVGenerator(pclient,
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||
pInformer.Kyverno().V1().PolicyViolations())
|
||||
pInformer.Kyverno().V1().PolicyViolations(),
|
||||
statusSync.Listener)
|
||||
|
||||
// POLICY CONTROLLER
|
||||
// - reconciliation policy and policy violation
|
||||
|
@ -176,6 +183,7 @@ func main() {
|
|||
egen,
|
||||
pvgen,
|
||||
kubedynamicInformer,
|
||||
statusSync.Listener,
|
||||
)
|
||||
// GENERATE REQUEST CLEANUP
|
||||
// -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout
|
||||
|
@ -220,7 +228,7 @@ func main() {
|
|||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
egen,
|
||||
webhookRegistrationClient,
|
||||
pc.GetPolicyStatusAggregator(),
|
||||
statusSync.Listener,
|
||||
configData,
|
||||
policyMetaStore,
|
||||
pvgen,
|
||||
|
@ -243,6 +251,7 @@ func main() {
|
|||
go grc.Run(1, stopCh)
|
||||
go grcc.Run(1, stopCh)
|
||||
go pvgen.Run(1, stopCh)
|
||||
go statusSync.Run(1, stopCh)
|
||||
go openApiSync.Run(1, stopCh)
|
||||
|
||||
// verifys if the admission control is enabled and active
|
||||
|
|
|
@ -469,6 +469,16 @@ metadata:
|
|||
name: kyverno-service-account
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kyverno:policyviolations
|
||||
rules:
|
||||
- apiGroups: ["kyverno.io"]
|
||||
resources:
|
||||
- policyviolations
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
|
@ -673,10 +683,10 @@ spec:
|
|||
serviceAccountName: kyverno-service-account
|
||||
initContainers:
|
||||
- name: kyverno-pre
|
||||
image: nirmata/kyvernopre:v1.1.3
|
||||
image: nirmata/kyvernopre:v1.1.4-rc1
|
||||
containers:
|
||||
- name: kyverno
|
||||
image: nirmata/kyverno:v1.1.3
|
||||
image: nirmata/kyverno:v1.1.4-rc1
|
||||
args:
|
||||
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
|
||||
# customize webhook timout
|
||||
|
|
|
@ -168,7 +168,7 @@ Here is a script that generates a self-signed CA, a TLS certificate-key pair, an
|
|||
|
||||
# Configure a namespace admin to access policy violations
|
||||
|
||||
During Kyverno installation, it creates a ClusterRole `policyviolation` which has the `list,get,watch` operation on resource `policyviolations`. To grant access to a namespace admin, configure the following YAML file then apply to the cluster.
|
||||
During Kyverno installation, it creates a ClusterRole `kyverno:policyviolations` which has the `list,get,watch` operation on resource `policyviolations`. To grant access to a namespace admin, configure the following YAML file then apply to the cluster.
|
||||
|
||||
- Replace `metadata.namespace` with namespace of the admin
|
||||
- Configure `subjects` field to bind admin's role to the ClusterRole `policyviolation`
|
||||
|
|
|
@ -227,21 +227,24 @@ type CloneFrom struct {
|
|||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
//PolicyStatus provides status for violations
|
||||
// PolicyStatus mostly contains statistics related to policy
|
||||
type PolicyStatus struct {
|
||||
ViolationCount int `json:"violationCount"`
|
||||
// average time required to process the policy rules on a resource
|
||||
AvgExecutionTime string `json:"averageExecutionTime"`
|
||||
// number of violations created by this policy
|
||||
ViolationCount int `json:"violationCount,omitempty"`
|
||||
// Count of rules that failed
|
||||
RulesFailedCount int `json:"rulesFailedCount,omitempty"`
|
||||
// Count of rules that were applied
|
||||
RulesAppliedCount int `json:"rulesAppliedCount"`
|
||||
// Count of resources for whom update/create api requests were blocked as the resoruce did not satisfy the policy rules
|
||||
ResourcesBlockedCount int `json:"resourcesBlockedCount"`
|
||||
// average time required to process the policy Mutation rules on a resource
|
||||
AvgExecutionTimeMutation string `json:"averageMutationRulesExecutionTime"`
|
||||
// average time required to process the policy Validation rules on a resource
|
||||
AvgExecutionTimeValidation string `json:"averageValidationRulesExecutionTime"`
|
||||
// average time required to process the policy Validation rules on a resource
|
||||
AvgExecutionTimeGeneration string `json:"averageGenerationRulesExecutionTime"`
|
||||
// statistics per rule
|
||||
Rules []RuleStats `json:"ruleStatus"`
|
||||
RulesAppliedCount int `json:"rulesAppliedCount,omitempty"`
|
||||
// Count of resources that were blocked for failing a validate, across all rules
|
||||
ResourcesBlockedCount int `json:"resourcesBlockedCount,omitempty"`
|
||||
// Count of resources that were successfully mutated, across all rules
|
||||
ResourcesMutatedCount int `json:"resourcesMutatedCount,omitempty"`
|
||||
// Count of resources that were successfully generated, across all rules
|
||||
ResourcesGeneratedCount int `json:"resourcesGeneratedCount,omitempty"`
|
||||
|
||||
Rules []RuleStats `json:"ruleStatus,omitempty"`
|
||||
}
|
||||
|
||||
//RuleStats provides status per rule
|
||||
|
@ -249,13 +252,19 @@ type RuleStats struct {
|
|||
// Rule name
|
||||
Name string `json:"ruleName"`
|
||||
// average time require to process the rule
|
||||
ExecutionTime string `json:"averageExecutionTime"`
|
||||
// Count of rules that were applied
|
||||
AppliedCount int `json:"appliedCount"`
|
||||
ExecutionTime string `json:"averageExecutionTime,omitempty"`
|
||||
// number of violations created by this rule
|
||||
ViolationCount int `json:"violationCount,omitempty"`
|
||||
// Count of rules that failed
|
||||
ViolationCount int `json:"violationCount"`
|
||||
// Count of mutations
|
||||
MutationCount int `json:"mutationsCount"`
|
||||
FailedCount int `json:"failedCount,omitempty"`
|
||||
// Count of rules that were applied
|
||||
AppliedCount int `json:"appliedCount,omitempty"`
|
||||
// Count of resources for whom update/create api requests were blocked as the resource did not satisfy the policy rules
|
||||
ResourcesBlockedCount int `json:"resourcesBlockedCount,omitempty"`
|
||||
// Count of resources that were successfully mutated
|
||||
ResourcesMutatedCount int `json:"resourcesMutatedCount,omitempty"`
|
||||
// Count of resources that were successfully generated
|
||||
ResourcesGeneratedCount int `json:"resourcesGeneratedCount,omitempty"`
|
||||
}
|
||||
|
||||
// PolicyList is a list of Policy resources
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
|
@ -25,6 +27,9 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
|
|||
if !rule.HasGenerate() {
|
||||
return nil
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
|
||||
glog.V(4).Infof(err.Error())
|
||||
return nil
|
||||
|
@ -39,8 +44,12 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
|
|||
}
|
||||
// build rule Response
|
||||
return &response.RuleResponse{
|
||||
Name: rule.Name,
|
||||
Type: "Generation",
|
||||
Name: rule.Name,
|
||||
Type: "Generation",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Since(startTime),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import (
|
|||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -57,6 +58,8 @@ type Controller struct {
|
|||
//TODO: list of generic informers
|
||||
// only support Namespaces for re-evalutation on resource updates
|
||||
nsInformer informers.GenericInformer
|
||||
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
//NewController returns an instance of the Generate-Request Controller
|
||||
|
@ -68,6 +71,7 @@ func NewController(
|
|||
eventGen event.Interface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
dynamicInformer dynamicinformer.DynamicSharedInformerFactory,
|
||||
policyStatus policystatus.Listener,
|
||||
) *Controller {
|
||||
c := Controller{
|
||||
client: client,
|
||||
|
@ -76,8 +80,9 @@ func NewController(
|
|||
pvGenerator: pvGenerator,
|
||||
//TODO: do the math for worst case back off and make sure cleanup runs after that
|
||||
// as we dont want a deleted GR to be re-queue
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
|
||||
dynamicInformer: dynamicInformer,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
|
||||
dynamicInformer: dynamicInformer,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
c.statusControl = StatusControl{client: kyvernoclient}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package generate
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
|
@ -80,7 +81,7 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
|
|||
}
|
||||
|
||||
// Apply the generate rule on resource
|
||||
return applyGeneratePolicy(c.client, policyContext)
|
||||
return c.applyGeneratePolicy(policyContext, gr)
|
||||
}
|
||||
|
||||
func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateRequest, err error, genResources []kyverno.ResourceSpec) error {
|
||||
|
@ -92,7 +93,7 @@ func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateReque
|
|||
return statusControl.Success(gr, genResources)
|
||||
}
|
||||
|
||||
func applyGeneratePolicy(client *dclient.Client, policyContext engine.PolicyContext) ([]kyverno.ResourceSpec, error) {
|
||||
func (c *Controller) applyGeneratePolicy(policyContext engine.PolicyContext, gr kyverno.GenerateRequest) ([]kyverno.ResourceSpec, error) {
|
||||
// List of generatedResources
|
||||
var genResources []kyverno.ResourceSpec
|
||||
// Get the response as the actions to be performed on the resource
|
||||
|
@ -107,20 +108,70 @@ func applyGeneratePolicy(client *dclient.Client, policyContext engine.PolicyCont
|
|||
return rcreationTime.Before(&pcreationTime)
|
||||
}()
|
||||
|
||||
ruleNameToProcessingTime := make(map[string]time.Duration)
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if !rule.HasGenerate() {
|
||||
continue
|
||||
}
|
||||
genResource, err := applyRule(client, rule, resource, ctx, processExisting)
|
||||
|
||||
startTime := time.Now()
|
||||
genResource, err := applyRule(c.client, rule, resource, ctx, processExisting)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ruleNameToProcessingTime[rule.Name] = time.Since(startTime)
|
||||
genResources = append(genResources, genResource)
|
||||
}
|
||||
|
||||
if gr.Status.State == "" {
|
||||
c.policyStatusListener.Send(generateSyncStats{
|
||||
policyName: policy.Name,
|
||||
ruleNameToProcessingTime: ruleNameToProcessingTime,
|
||||
})
|
||||
}
|
||||
|
||||
return genResources, nil
|
||||
}
|
||||
|
||||
type generateSyncStats struct {
|
||||
policyName string
|
||||
ruleNameToProcessingTime map[string]time.Duration
|
||||
}
|
||||
|
||||
func (vc generateSyncStats) PolicyName() string {
|
||||
return vc.policyName
|
||||
}
|
||||
|
||||
func (vc generateSyncStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
|
||||
for i := range status.Rules {
|
||||
if executionTime, exist := vc.ruleNameToProcessingTime[status.Rules[i].Name]; exist {
|
||||
status.ResourcesGeneratedCount += 1
|
||||
status.Rules[i].ResourcesGeneratedCount += 1
|
||||
averageOver := int64(status.Rules[i].AppliedCount + status.Rules[i].FailedCount)
|
||||
status.Rules[i].ExecutionTime = updateGenerateExecutionTime(
|
||||
executionTime,
|
||||
status.Rules[i].ExecutionTime,
|
||||
averageOver,
|
||||
).String()
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func updateGenerateExecutionTime(newTime time.Duration, oldAverageTimeString string, averageOver int64) time.Duration {
|
||||
if averageOver == 0 {
|
||||
return newTime
|
||||
}
|
||||
oldAverageExecutionTime, _ := time.ParseDuration(oldAverageTimeString)
|
||||
numerator := (oldAverageExecutionTime.Nanoseconds() * averageOver) + newTime.Nanoseconds()
|
||||
denominator := averageOver
|
||||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
||||
func applyRule(client *dclient.Client, rule kyverno.Rule, resource unstructured.Unstructured, ctx context.EvalInterface, processExisting bool) (kyverno.ResourceSpec, error) {
|
||||
var rdata map[string]interface{}
|
||||
var err error
|
||||
|
|
53
pkg/generate/policyStatus_test.go
Normal file
53
pkg/generate/policyStatus_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
func Test_Stats(t *testing.T) {
|
||||
testCase := struct {
|
||||
generatedSyncStats []generateSyncStats
|
||||
expectedOutput []byte
|
||||
existingStatus map[string]v1.PolicyStatus
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"","resourcesGeneratedCount":2,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"23ns","resourcesGeneratedCount":1},{"ruleName":"rule2","averageExecutionTime":"44ns","resourcesGeneratedCount":1},{"ruleName":"rule3"}]}}`),
|
||||
generatedSyncStats: []generateSyncStats{
|
||||
{
|
||||
policyName: "policy1",
|
||||
ruleNameToProcessingTime: map[string]time.Duration{
|
||||
"rule1": time.Nanosecond * 23,
|
||||
"rule2": time.Nanosecond * 44,
|
||||
},
|
||||
},
|
||||
},
|
||||
existingStatus: map[string]v1.PolicyStatus{
|
||||
"policy1": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule1",
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
},
|
||||
{
|
||||
Name: "rule3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, generateSyncStat := range testCase.generatedSyncStats {
|
||||
testCase.existingStatus[generateSyncStat.PolicyName()] = generateSyncStat.UpdateStatus(testCase.existingStatus[generateSyncStat.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(testCase.existingStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -19,45 +19,14 @@ import (
|
|||
|
||||
// applyPolicy applies policy on a resource
|
||||
//TODO: generation rules
|
||||
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (responses []response.EngineResponse) {
|
||||
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (responses []response.EngineResponse) {
|
||||
startTime := time.Now()
|
||||
var policyStats []PolicyStat
|
||||
|
||||
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
|
||||
}()
|
||||
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||
ps := PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.MutationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
rs := RuleStatinfo{}
|
||||
rs.RuleName = rule.Name
|
||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||
if rule.Success {
|
||||
rs.RuleAppliedCount++
|
||||
} else {
|
||||
rs.RulesFailedCount++
|
||||
}
|
||||
if rule.Patches != nil {
|
||||
rs.MutationCount++
|
||||
}
|
||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||
}
|
||||
policyStats = append(policyStats, ps)
|
||||
}
|
||||
// send stats for aggregation
|
||||
sendStat := func(blocked bool) {
|
||||
for _, stat := range policyStats {
|
||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||
//SEND
|
||||
policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
var engineResponses []response.EngineResponse
|
||||
var engineResponse response.EngineResponse
|
||||
var err error
|
||||
|
@ -66,27 +35,20 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
|
|||
ctx.AddResource(transformResource(resource))
|
||||
|
||||
//MUTATION
|
||||
engineResponse, err = mutation(policy, resource, policyStatus, ctx)
|
||||
engineResponse, err = mutation(policy, resource, ctx)
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to process mutation rules: %v", err)
|
||||
}
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
//send stats
|
||||
sendStat(false)
|
||||
|
||||
//VALIDATION
|
||||
engineResponse = engine.Validate(engine.PolicyContext{Policy: policy, Context: ctx, NewResource: resource})
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
// gather stats
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
//send stats
|
||||
sendStat(false)
|
||||
|
||||
//TODO: GENERATION
|
||||
return engineResponses
|
||||
}
|
||||
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface, ctx context.EvalInterface) (response.EngineResponse, error) {
|
||||
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, ctx context.EvalInterface) (response.EngineResponse, error) {
|
||||
|
||||
engineResponse := engine.Mutate(engine.PolicyContext{Policy: policy, NewResource: resource, Context: ctx})
|
||||
if !engineResponse.IsSuccesful() {
|
||||
|
|
|
@ -2,7 +2,6 @@ package policy
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -67,8 +66,6 @@ type PolicyController struct {
|
|||
rm resourceManager
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// receives stats and aggregates details
|
||||
statusAggregator *PolicyStatusAggregator
|
||||
// store to hold policy meta data for faster lookup
|
||||
pMetaStore policystore.UpdateInterface
|
||||
// policy violation generator
|
||||
|
@ -144,10 +141,6 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
|
|||
//TODO: pass the time in seconds instead of converting it internally
|
||||
pc.rm = NewResourceManager(30)
|
||||
|
||||
// aggregator
|
||||
// pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient, pInformer)
|
||||
pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient)
|
||||
|
||||
return &pc, nil
|
||||
}
|
||||
|
||||
|
@ -264,9 +257,6 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
|||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(pc.worker, time.Second, stopCh)
|
||||
}
|
||||
// policy status aggregator
|
||||
//TODO: workers required for aggergation
|
||||
pc.statusAggregator.Run(1, stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
|
@ -328,8 +318,6 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
if err := pc.deleteNamespacedPolicyViolations(key); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the recorded stats for the policy
|
||||
pc.statusAggregator.RemovePolicyStats(key)
|
||||
|
||||
// remove webhook configurations if there are no policies
|
||||
if err := pc.removeResourceWebhookConfiguration(); err != nil {
|
||||
|
@ -345,23 +333,12 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
|
||||
pc.resourceWebhookWatcher.RegisterResourceWebhook()
|
||||
|
||||
// cluster policy violations
|
||||
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// namespaced policy violation
|
||||
nspvList, err := pc.getNamespacedPolicyViolationForPolicy(policy.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// process policies on existing resources
|
||||
engineResponses := pc.processExistingResources(*policy)
|
||||
// report errors
|
||||
pc.cleanupAndReport(engineResponses)
|
||||
// sync active
|
||||
return pc.syncStatusOnly(policy, cpvList, nspvList)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) deleteClusterPolicyViolations(policy string) error {
|
||||
|
@ -390,39 +367,6 @@ func (pc *PolicyController) deleteNamespacedPolicyViolations(policy string) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
//syncStatusOnly updates the policy status subresource
|
||||
func (pc *PolicyController) syncStatusOnly(p *kyverno.ClusterPolicy, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.PolicyViolation) error {
|
||||
newStatus := pc.calculateStatus(p.Name, pvList, nspvList)
|
||||
if reflect.DeepEqual(newStatus, p.Status) {
|
||||
// no update to status
|
||||
return nil
|
||||
}
|
||||
// update status
|
||||
newPolicy := p
|
||||
newPolicy.Status = newStatus
|
||||
_, err := pc.kyvernoClient.KyvernoV1().ClusterPolicies().UpdateStatus(newPolicy)
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc *PolicyController) calculateStatus(policyName string, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.PolicyViolation) kyverno.PolicyStatus {
|
||||
violationCount := len(pvList) + len(nspvList)
|
||||
status := kyverno.PolicyStatus{
|
||||
ViolationCount: violationCount,
|
||||
}
|
||||
// get stats
|
||||
stats := pc.statusAggregator.GetPolicyStats(policyName)
|
||||
if !reflect.DeepEqual(stats, (PolicyStatInfo{})) {
|
||||
status.RulesAppliedCount = stats.RulesAppliedCount
|
||||
status.ResourcesBlockedCount = stats.ResourceBlocked
|
||||
status.AvgExecutionTimeMutation = stats.MutationExecutionTime.String()
|
||||
status.AvgExecutionTimeValidation = stats.ValidationExecutionTime.String()
|
||||
status.AvgExecutionTimeGeneration = stats.GenerationExecutionTime.String()
|
||||
// update rule stats
|
||||
status.Rules = convertRules(stats.Rules)
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getNamespacedPolicyViolationForPolicy(policy string) ([]*kyverno.PolicyViolation, error) {
|
||||
policySelector, err := buildPolicyLabel(policy)
|
||||
if err != nil {
|
||||
|
@ -458,19 +402,3 @@ func (r RealPVControl) DeleteClusterPolicyViolation(name string) error {
|
|||
func (r RealPVControl) DeleteNamespacedPolicyViolation(ns, name string) error {
|
||||
return r.Client.KyvernoV1().PolicyViolations(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// convertRules converts the internal rule stats to one used in policy.stats struct
|
||||
func convertRules(rules []RuleStatinfo) []kyverno.RuleStats {
|
||||
var stats []kyverno.RuleStats
|
||||
for _, r := range rules {
|
||||
stat := kyverno.RuleStats{
|
||||
Name: r.RuleName,
|
||||
ExecutionTime: r.ExecutionTime.String(),
|
||||
AppliedCount: r.RuleAppliedCount,
|
||||
ViolationCount: r.RulesFailedCount,
|
||||
MutationCount: r.MutationCount,
|
||||
}
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
|
|||
|
||||
// apply the policy on each
|
||||
glog.V(4).Infof("apply policy %s with resource version %s on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
|
||||
engineResponse := applyPolicy(policy, resource, pc.statusAggregator)
|
||||
engineResponse := applyPolicy(policy, resource)
|
||||
// get engine response for mutation & validation independently
|
||||
engineResponses = append(engineResponses, engineResponse...)
|
||||
// post-processing, register the resource as processed
|
||||
|
|
|
@ -18,6 +18,10 @@ func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineRe
|
|||
pc.eventGen.Add(eventInfos...)
|
||||
// create policy violation
|
||||
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
|
||||
for i := range pvInfos {
|
||||
pvInfos[i].FromSync = true
|
||||
}
|
||||
|
||||
pc.pvGenerator.Add(pvInfos...)
|
||||
// cleanup existing violations if any
|
||||
// if there is any error in clean up, we dont re-queue the resource
|
||||
|
|
|
@ -1,210 +0,0 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
//PolicyStatusAggregator stores information abt aggregation
|
||||
type PolicyStatusAggregator struct {
|
||||
// time since we start aggregating the stats
|
||||
startTime time.Time
|
||||
// channel to receive stats
|
||||
ch chan PolicyStat
|
||||
//TODO: lock based on key, possibly sync.Map ?
|
||||
//sync RW for policyData
|
||||
mux sync.RWMutex
|
||||
// stores aggregated stats for policy
|
||||
policyData map[string]PolicyStatInfo
|
||||
}
|
||||
|
||||
//NewPolicyStatAggregator returns a new policy status
|
||||
func NewPolicyStatAggregator(client *kyvernoclient.Clientset) *PolicyStatusAggregator {
|
||||
psa := PolicyStatusAggregator{
|
||||
startTime: time.Now(),
|
||||
ch: make(chan PolicyStat),
|
||||
policyData: map[string]PolicyStatInfo{},
|
||||
}
|
||||
return &psa
|
||||
}
|
||||
|
||||
//Run begins aggregator
|
||||
func (psa *PolicyStatusAggregator) Run(workers int, stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
glog.V(4).Info("Started aggregator for policy status stats")
|
||||
defer func() {
|
||||
glog.V(4).Info("Shutting down aggregator for policy status stats")
|
||||
}()
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(psa.process, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (psa *PolicyStatusAggregator) process() {
|
||||
// As mutation and validation are handled separately
|
||||
// ideally we need to combine the execution time from both for a policy
|
||||
// but its tricky to detect here the type of rules policy contains
|
||||
// so we dont combine the results, but instead compute the execution time for
|
||||
// mutation & validation rules separately
|
||||
for r := range psa.ch {
|
||||
glog.V(4).Infof("received policy stats %v", r)
|
||||
psa.aggregate(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (psa *PolicyStatusAggregator) aggregate(ps PolicyStat) {
|
||||
func() {
|
||||
glog.V(4).Infof("write lock update policy %s", ps.PolicyName)
|
||||
psa.mux.Lock()
|
||||
}()
|
||||
defer func() {
|
||||
glog.V(4).Infof("write Unlock update policy %s", ps.PolicyName)
|
||||
psa.mux.Unlock()
|
||||
}()
|
||||
|
||||
if len(ps.Stats.Rules) == 0 {
|
||||
glog.V(4).Infof("ignoring stats, as no rule was applied")
|
||||
return
|
||||
}
|
||||
|
||||
info, ok := psa.policyData[ps.PolicyName]
|
||||
if !ok {
|
||||
psa.policyData[ps.PolicyName] = ps.Stats
|
||||
glog.V(4).Infof("added stats for policy %s", ps.PolicyName)
|
||||
return
|
||||
}
|
||||
// aggregate policy information
|
||||
info.RulesAppliedCount = info.RulesAppliedCount + ps.Stats.RulesAppliedCount
|
||||
if ps.Stats.ResourceBlocked == 1 {
|
||||
info.ResourceBlocked++
|
||||
}
|
||||
var zeroDuration time.Duration
|
||||
if info.MutationExecutionTime != zeroDuration {
|
||||
info.MutationExecutionTime = (info.MutationExecutionTime + ps.Stats.MutationExecutionTime) / 2
|
||||
glog.V(4).Infof("updated avg mutation time %v", info.MutationExecutionTime)
|
||||
} else {
|
||||
info.MutationExecutionTime = ps.Stats.MutationExecutionTime
|
||||
}
|
||||
if info.ValidationExecutionTime != zeroDuration {
|
||||
info.ValidationExecutionTime = (info.ValidationExecutionTime + ps.Stats.ValidationExecutionTime) / 2
|
||||
glog.V(4).Infof("updated avg validation time %v", info.ValidationExecutionTime)
|
||||
} else {
|
||||
info.ValidationExecutionTime = ps.Stats.ValidationExecutionTime
|
||||
}
|
||||
if info.GenerationExecutionTime != zeroDuration {
|
||||
info.GenerationExecutionTime = (info.GenerationExecutionTime + ps.Stats.GenerationExecutionTime) / 2
|
||||
glog.V(4).Infof("updated avg generation time %v", info.GenerationExecutionTime)
|
||||
} else {
|
||||
info.GenerationExecutionTime = ps.Stats.GenerationExecutionTime
|
||||
}
|
||||
// aggregate rule details
|
||||
info.Rules = aggregateRules(info.Rules, ps.Stats.Rules)
|
||||
// update
|
||||
psa.policyData[ps.PolicyName] = info
|
||||
glog.V(4).Infof("updated stats for policy %s", ps.PolicyName)
|
||||
}
|
||||
|
||||
func aggregateRules(old []RuleStatinfo, update []RuleStatinfo) []RuleStatinfo {
|
||||
var zeroDuration time.Duration
|
||||
searchRule := func(list []RuleStatinfo, key string) *RuleStatinfo {
|
||||
for _, v := range list {
|
||||
if v.RuleName == key {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
newRules := []RuleStatinfo{}
|
||||
// search for new rules in old rules and update it
|
||||
for _, updateR := range update {
|
||||
if updateR.ExecutionTime != zeroDuration {
|
||||
if rule := searchRule(old, updateR.RuleName); rule != nil {
|
||||
rule.ExecutionTime = (rule.ExecutionTime + updateR.ExecutionTime) / 2
|
||||
rule.RuleAppliedCount = rule.RuleAppliedCount + updateR.RuleAppliedCount
|
||||
rule.RulesFailedCount = rule.RulesFailedCount + updateR.RulesFailedCount
|
||||
rule.MutationCount = rule.MutationCount + updateR.MutationCount
|
||||
newRules = append(newRules, *rule)
|
||||
} else {
|
||||
newRules = append(newRules, updateR)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newRules
|
||||
}
|
||||
|
||||
//GetPolicyStats returns the policy stats
|
||||
func (psa *PolicyStatusAggregator) GetPolicyStats(policyName string) PolicyStatInfo {
|
||||
func() {
|
||||
glog.V(4).Infof("read lock update policy %s", policyName)
|
||||
psa.mux.RLock()
|
||||
}()
|
||||
defer func() {
|
||||
glog.V(4).Infof("read Unlock update policy %s", policyName)
|
||||
psa.mux.RUnlock()
|
||||
}()
|
||||
glog.V(4).Infof("read stats for policy %s", policyName)
|
||||
return psa.policyData[policyName]
|
||||
}
|
||||
|
||||
//RemovePolicyStats rmves policy stats records
|
||||
func (psa *PolicyStatusAggregator) RemovePolicyStats(policyName string) {
|
||||
func() {
|
||||
glog.V(4).Infof("write lock update policy %s", policyName)
|
||||
psa.mux.Lock()
|
||||
}()
|
||||
defer func() {
|
||||
glog.V(4).Infof("write Unlock update policy %s", policyName)
|
||||
psa.mux.Unlock()
|
||||
}()
|
||||
glog.V(4).Infof("removing stats for policy %s", policyName)
|
||||
delete(psa.policyData, policyName)
|
||||
}
|
||||
|
||||
//PolicyStatusInterface provides methods to modify policyStatus
|
||||
type PolicyStatusInterface interface {
|
||||
SendStat(stat PolicyStat)
|
||||
// UpdateViolationCount(policyName string, pvList []*kyverno.PolicyViolation) error
|
||||
}
|
||||
|
||||
//PolicyStat stored stats for policy
|
||||
type PolicyStat struct {
|
||||
PolicyName string
|
||||
Stats PolicyStatInfo
|
||||
}
|
||||
|
||||
//PolicyStatInfo provides statistics for policy
|
||||
type PolicyStatInfo struct {
|
||||
MutationExecutionTime time.Duration
|
||||
ValidationExecutionTime time.Duration
|
||||
GenerationExecutionTime time.Duration
|
||||
RulesAppliedCount int
|
||||
ResourceBlocked int
|
||||
Rules []RuleStatinfo
|
||||
}
|
||||
|
||||
//RuleStatinfo provides statistics for rule
|
||||
type RuleStatinfo struct {
|
||||
RuleName string
|
||||
ExecutionTime time.Duration
|
||||
RuleAppliedCount int
|
||||
RulesFailedCount int
|
||||
MutationCount int
|
||||
}
|
||||
|
||||
//SendStat sends the stat information for aggregation
|
||||
func (psa *PolicyStatusAggregator) SendStat(stat PolicyStat) {
|
||||
glog.V(4).Infof("sending policy stats: %v", stat)
|
||||
// Send over channel
|
||||
psa.ch <- stat
|
||||
}
|
||||
|
||||
//GetPolicyStatusAggregator returns interface to send policy status stats
|
||||
func (pc *PolicyController) GetPolicyStatusAggregator() PolicyStatusInterface {
|
||||
return pc.statusAggregator
|
||||
}
|
31
pkg/policystatus/keyToMutex.go
Normal file
31
pkg/policystatus/keyToMutex.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package policystatus
|
||||
|
||||
import "sync"
|
||||
|
||||
// keyToMutex allows status to be updated
|
||||
//for different policies at the same time
|
||||
//while ensuring the status for same policies
|
||||
//are updated one at a time.
|
||||
type keyToMutex struct {
|
||||
mu sync.RWMutex
|
||||
keyMu map[string]*sync.RWMutex
|
||||
}
|
||||
|
||||
func newKeyToMutex() *keyToMutex {
|
||||
return &keyToMutex{
|
||||
mu: sync.RWMutex{},
|
||||
keyMu: make(map[string]*sync.RWMutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (k *keyToMutex) Get(key string) *sync.RWMutex {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
mutex := k.keyMu[key]
|
||||
if mutex == nil {
|
||||
mutex = &sync.RWMutex{}
|
||||
k.keyMu[key] = mutex
|
||||
}
|
||||
|
||||
return mutex
|
||||
}
|
146
pkg/policystatus/main.go
Normal file
146
pkg/policystatus/main.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package policystatus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
// Policy status implementation works in the following way,
|
||||
//Currently policy status maintains a cache of the status of
|
||||
//each policy.
|
||||
//Every x unit of time the status of policy is updated using
|
||||
//the data from the cache.
|
||||
//The sync exposes a listener which accepts a statusUpdater
|
||||
//interface which dictates how the status should be updated.
|
||||
//The status is updated by a worker that receives the interface
|
||||
//on a channel.
|
||||
//The worker then updates the current status using the methods
|
||||
//exposed by the interface.
|
||||
//Current implementation is designed to be threadsafe with optimised
|
||||
//locking for each policy.
|
||||
|
||||
// statusUpdater defines a type to have a method which
|
||||
//updates the given status
|
||||
type statusUpdater interface {
|
||||
PolicyName() string
|
||||
UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus
|
||||
}
|
||||
|
||||
type policyStore interface {
|
||||
Get(policyName string) (*v1.ClusterPolicy, error)
|
||||
}
|
||||
|
||||
type Listener chan statusUpdater
|
||||
|
||||
func (l Listener) Send(s statusUpdater) {
|
||||
l <- s
|
||||
}
|
||||
|
||||
// Sync is the object which is used to initialize
|
||||
//the policyStatus sync, can be considered the parent object
|
||||
//since it contains access to all the persistant data present
|
||||
//in this package.
|
||||
type Sync struct {
|
||||
cache *cache
|
||||
Listener Listener
|
||||
client *versioned.Clientset
|
||||
policyStore policyStore
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
dataMu sync.RWMutex
|
||||
data map[string]v1.PolicyStatus
|
||||
keyToMutex *keyToMutex
|
||||
}
|
||||
|
||||
func NewSync(c *versioned.Clientset, p policyStore) *Sync {
|
||||
return &Sync{
|
||||
cache: &cache{
|
||||
dataMu: sync.RWMutex{},
|
||||
data: make(map[string]v1.PolicyStatus),
|
||||
keyToMutex: newKeyToMutex(),
|
||||
},
|
||||
client: c,
|
||||
policyStore: p,
|
||||
Listener: make(chan statusUpdater, 20),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sync) Run(workers int, stopCh <-chan struct{}) {
|
||||
for i := 0; i < workers; i++ {
|
||||
go s.updateStatusCache(stopCh)
|
||||
}
|
||||
|
||||
wait.Until(s.updatePolicyStatus, 2*time.Second, stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// updateStatusCache is a worker which updates the current status
|
||||
//using the statusUpdater interface
|
||||
func (s *Sync) updateStatusCache(stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case statusUpdater := <-s.Listener:
|
||||
s.cache.keyToMutex.Get(statusUpdater.PolicyName()).Lock()
|
||||
|
||||
s.cache.dataMu.RLock()
|
||||
status, exist := s.cache.data[statusUpdater.PolicyName()]
|
||||
s.cache.dataMu.RUnlock()
|
||||
if !exist {
|
||||
policy, _ := s.policyStore.Get(statusUpdater.PolicyName())
|
||||
if policy != nil {
|
||||
status = policy.Status
|
||||
}
|
||||
}
|
||||
|
||||
updatedStatus := statusUpdater.UpdateStatus(status)
|
||||
|
||||
s.cache.dataMu.Lock()
|
||||
s.cache.data[statusUpdater.PolicyName()] = updatedStatus
|
||||
s.cache.dataMu.Unlock()
|
||||
|
||||
s.cache.keyToMutex.Get(statusUpdater.PolicyName()).Unlock()
|
||||
oldStatus, _ := json.Marshal(status)
|
||||
newStatus, _ := json.Marshal(updatedStatus)
|
||||
|
||||
glog.V(4).Infof("\nupdated status of policy - %v\noldStatus:\n%v\nnewStatus:\n%v\n", statusUpdater.PolicyName(), string(oldStatus), string(newStatus))
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updatePolicyStatus updates the status in the policy resource definition
|
||||
//from the status cache, syncing them
|
||||
func (s *Sync) updatePolicyStatus() {
|
||||
s.cache.dataMu.Lock()
|
||||
var nameToStatus = make(map[string]v1.PolicyStatus, len(s.cache.data))
|
||||
for k, v := range s.cache.data {
|
||||
nameToStatus[k] = v
|
||||
}
|
||||
s.cache.dataMu.Unlock()
|
||||
|
||||
for policyName, status := range nameToStatus {
|
||||
policy, err := s.policyStore.Get(policyName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
policy.Status = status
|
||||
_, err = s.client.KyvernoV1().ClusterPolicies().UpdateStatus(policy)
|
||||
if err != nil {
|
||||
s.cache.dataMu.Lock()
|
||||
delete(s.cache.data, policyName)
|
||||
s.cache.dataMu.Unlock()
|
||||
glog.V(4).Info(err)
|
||||
}
|
||||
}
|
||||
}
|
50
pkg/policystatus/status_test.go
Normal file
50
pkg/policystatus/status_test.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package policystatus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
type dummyStore struct {
|
||||
}
|
||||
|
||||
func (d dummyStore) Get(policyName string) (*v1.ClusterPolicy, error) {
|
||||
return &v1.ClusterPolicy{}, nil
|
||||
}
|
||||
|
||||
type dummyStatusUpdater struct {
|
||||
}
|
||||
|
||||
func (d dummyStatusUpdater) UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus {
|
||||
status.RulesAppliedCount++
|
||||
return status
|
||||
}
|
||||
|
||||
func (d dummyStatusUpdater) PolicyName() string {
|
||||
return "policy1"
|
||||
}
|
||||
|
||||
func TestKeyToMutex(t *testing.T) {
|
||||
expectedCache := `{"policy1":{"averageExecutionTime":"","rulesAppliedCount":100}}`
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
s := NewSync(nil, dummyStore{})
|
||||
for i := 0; i < 100; i++ {
|
||||
go s.updateStatusCache(stopCh)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go s.Listener.Send(dummyStatusUpdater{})
|
||||
}
|
||||
|
||||
<-time.After(time.Second * 3)
|
||||
stopCh <- struct{}{}
|
||||
|
||||
cacheRaw, _ := json.Marshal(s.cache.data)
|
||||
if string(cacheRaw) != expectedCache {
|
||||
t.Errorf("\nTestcase Failed\nGot:\n%v\nExpected:\n%v\n", string(cacheRaw), expectedCache)
|
||||
}
|
||||
}
|
|
@ -96,6 +96,10 @@ func (ps *PolicyStore) ListAll() ([]kyverno.ClusterPolicy, error) {
|
|||
return policies, nil
|
||||
}
|
||||
|
||||
func (ps *PolicyStore) Get(policyName string) (*kyverno.ClusterPolicy, error) {
|
||||
return ps.pLister.Get(policyName)
|
||||
}
|
||||
|
||||
//UnRegister Remove policy information
|
||||
func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
|
||||
ps.mu.Lock()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -20,16 +21,20 @@ type clusterPV struct {
|
|||
cpvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// policy violation interface
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
// update policy stats with violationCount
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
func newClusterPV(dclient *client.Client,
|
||||
cpvLister kyvernolister.ClusterPolicyViolationLister,
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface,
|
||||
policyStatus policystatus.Listener,
|
||||
) *clusterPV {
|
||||
cpv := clusterPV{
|
||||
dclient: dclient,
|
||||
cpvLister: cpvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
dclient: dclient,
|
||||
cpvLister: cpvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &cpv
|
||||
}
|
||||
|
@ -93,6 +98,11 @@ func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
|
|||
glog.V(4).Infof("failed to create Cluster Policy Violation: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
|
||||
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
@ -115,5 +125,8 @@ func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) err
|
|||
}
|
||||
glog.Infof("cluster policy violation updated for resource %v", newPv.Spec.ResourceSpec)
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
backoff "github.com/cenkalti/backoff"
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -70,3 +71,27 @@ func converLabelToSelector(labelMap map[string]string) (labels.Selector, error)
|
|||
|
||||
return policyViolationSelector, nil
|
||||
}
|
||||
|
||||
type violationCount struct {
|
||||
policyName string
|
||||
violatedRules []v1.ViolatedRule
|
||||
}
|
||||
|
||||
func (vc violationCount) PolicyName() string {
|
||||
return vc.policyName
|
||||
}
|
||||
|
||||
func (vc violationCount) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
|
||||
var ruleNameToViolations = make(map[string]int)
|
||||
for _, rule := range vc.violatedRules {
|
||||
ruleNameToViolations[rule.Name]++
|
||||
}
|
||||
|
||||
for i := range status.Rules {
|
||||
status.ViolationCount += ruleNameToViolations[status.Rules[i].Name]
|
||||
status.Rules[i].ViolationCount += ruleNameToViolations[status.Rules[i].Name]
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -37,9 +38,10 @@ type Generator struct {
|
|||
// returns true if the cluster policy store has been synced at least once
|
||||
pvSynced cache.InformerSynced
|
||||
// returns true if the namespaced cluster policy store has been synced at at least once
|
||||
nspvSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
nspvSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
//NewDataStore returns an instance of data store
|
||||
|
@ -79,6 +81,7 @@ type Info struct {
|
|||
PolicyName string
|
||||
Resource unstructured.Unstructured
|
||||
Rules []kyverno.ViolatedRule
|
||||
FromSync bool
|
||||
}
|
||||
|
||||
func (i Info) toKey() string {
|
||||
|
@ -103,16 +106,18 @@ type GeneratorInterface interface {
|
|||
func NewPVGenerator(client *kyvernoclient.Clientset,
|
||||
dclient *dclient.Client,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
nspvInformer kyvernoinformer.PolicyViolationInformer) *Generator {
|
||||
nspvInformer kyvernoinformer.PolicyViolationInformer,
|
||||
policyStatus policystatus.Listener) *Generator {
|
||||
gen := Generator{
|
||||
kyvernoInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
cpvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
nspvLister: nspvInformer.Lister(),
|
||||
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
kyvernoInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
cpvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
nspvLister: nspvInformer.Lister(),
|
||||
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &gen
|
||||
}
|
||||
|
@ -219,15 +224,21 @@ func (gen *Generator) syncHandler(info Info) error {
|
|||
builder := newPvBuilder()
|
||||
if info.Resource.GetNamespace() == "" {
|
||||
// cluster scope resource generate a clusterpolicy violation
|
||||
handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface)
|
||||
handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatusListener)
|
||||
} else {
|
||||
// namespaced resources generated a namespaced policy violation in the namespace of the resource
|
||||
handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface)
|
||||
handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatusListener)
|
||||
}
|
||||
|
||||
failure := false
|
||||
pv := builder.generate(info)
|
||||
|
||||
if info.FromSync {
|
||||
pv.Annotations = map[string]string{
|
||||
"fromSync": "true",
|
||||
}
|
||||
}
|
||||
|
||||
// Create Policy Violations
|
||||
glog.V(3).Infof("Creating policy violation: %s", info.toKey())
|
||||
if err := handler.create(pv); err != nil {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -20,16 +21,20 @@ type namespacedPV struct {
|
|||
nspvLister kyvernolister.PolicyViolationLister
|
||||
// policy violation interface
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
// update policy status with violationCount
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
func newNamespacedPV(dclient *client.Client,
|
||||
nspvLister kyvernolister.PolicyViolationLister,
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface,
|
||||
policyStatus policystatus.Listener,
|
||||
) *namespacedPV {
|
||||
nspv := namespacedPV{
|
||||
dclient: dclient,
|
||||
nspvLister: nspvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
dclient: dclient,
|
||||
nspvLister: nspvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &nspv
|
||||
}
|
||||
|
@ -92,6 +97,10 @@ func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
|
|||
glog.V(4).Infof("failed to create Cluster Policy Violation: %v", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
@ -112,6 +121,9 @@ func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error
|
|||
return fmt.Errorf("failed to update namespaced policy violation: %v", err)
|
||||
}
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
glog.Infof("namespaced policy violation updated for resource %v", newPv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
|
74
pkg/policyviolation/policyStatus_test.go
Normal file
74
pkg/policyviolation/policyStatus_test.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
func Test_Stats(t *testing.T) {
|
||||
testCase := struct {
|
||||
violationCountStats []struct {
|
||||
policyName string
|
||||
violatedRules []v1.ViolatedRule
|
||||
}
|
||||
expectedOutput []byte
|
||||
existingCache map[string]v1.PolicyStatus
|
||||
}{
|
||||
existingCache: map[string]v1.PolicyStatus{
|
||||
"policy1": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
"policy2": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"","violationCount":1,"ruleStatus":[{"ruleName":"rule4","violationCount":1}]},"policy2":{"averageExecutionTime":"","violationCount":1,"ruleStatus":[{"ruleName":"rule4","violationCount":1}]}}`),
|
||||
violationCountStats: []struct {
|
||||
policyName string
|
||||
violatedRules []v1.ViolatedRule
|
||||
}{
|
||||
{
|
||||
policyName: "policy1",
|
||||
violatedRules: []v1.ViolatedRule{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
policyName: "policy2",
|
||||
violatedRules: []v1.ViolatedRule{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := testCase.existingCache
|
||||
|
||||
for _, violationCountStat := range testCase.violationCountStats {
|
||||
receiver := &violationCount{
|
||||
policyName: violationCountStat.policyName,
|
||||
violatedRules: violationCountStat.violatedRules,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -1,8 +1,13 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
|
@ -61,6 +66,9 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
|
|||
if len(engineResponse.PolicyResponse.Rules) > 0 {
|
||||
// some generate rules do apply to the resource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
ws.statusListener.Send(generateStats{
|
||||
resp: engineResponse,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Adds Generate Request to a channel(queue size 1000) to generators
|
||||
|
@ -102,3 +110,73 @@ func transform(userRequestInfo kyverno.RequestInfo, er response.EngineResponse)
|
|||
}
|
||||
return gr
|
||||
}
|
||||
|
||||
type generateStats struct {
|
||||
resp response.EngineResponse
|
||||
}
|
||||
|
||||
func (gs generateStats) PolicyName() string {
|
||||
return gs.resp.PolicyResponse.Policy
|
||||
}
|
||||
|
||||
func (gs generateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, gs.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range gs.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
ruleStat.AppliedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func updateAverageTime(newTime time.Duration, oldAverageTimeString string, averageOver int64) time.Duration {
|
||||
if averageOver == 0 {
|
||||
return newTime
|
||||
}
|
||||
oldAverageExecutionTime, _ := time.ParseDuration(oldAverageTimeString)
|
||||
numerator := (oldAverageExecutionTime.Nanoseconds() * averageOver) + newTime.Nanoseconds()
|
||||
denominator := averageOver + 1
|
||||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
|
|
@ -1,19 +1,20 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/openapi"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
engineutils "github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
policyctr "github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
@ -25,40 +26,6 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
|
||||
|
||||
var patches [][]byte
|
||||
var policyStats []policyctr.PolicyStat
|
||||
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||
ps := policyctr.PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.MutationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
rs := policyctr.RuleStatinfo{}
|
||||
rs.RuleName = rule.Name
|
||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||
if rule.Success {
|
||||
rs.RuleAppliedCount++
|
||||
} else {
|
||||
rs.RulesFailedCount++
|
||||
}
|
||||
if rule.Patches != nil {
|
||||
rs.MutationCount++
|
||||
}
|
||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||
}
|
||||
policyStats = append(policyStats, ps)
|
||||
}
|
||||
// send stats for aggregation
|
||||
sendStat := func(blocked bool) {
|
||||
for _, stat := range policyStats {
|
||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||
//SEND
|
||||
ws.policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
|
||||
var engineResponses []response.EngineResponse
|
||||
|
||||
userRequestInfo := kyverno.RequestInfo{
|
||||
|
@ -93,12 +60,10 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
for _, policy := range policies {
|
||||
glog.V(2).Infof("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation)
|
||||
|
||||
policyContext.Policy = policy
|
||||
engineResponse := engine.Mutate(policyContext)
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
// Gather policy application statistics
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
ws.statusListener.Send(mutateStats{resp: engineResponse})
|
||||
if !engineResponse.IsSuccesful() {
|
||||
glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, resource.GetNamespace(), resource.GetName())
|
||||
continue
|
||||
|
@ -138,8 +103,6 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
events := generateEvents(engineResponses, false, (request.Operation == v1beta1.Update))
|
||||
ws.eventGen.Add(events...)
|
||||
|
||||
sendStat(false)
|
||||
|
||||
// debug info
|
||||
func() {
|
||||
if len(patches) != 0 {
|
||||
|
@ -159,3 +122,64 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
// patches holds all the successful patches, if no patch is created, it returns nil
|
||||
return engineutils.JoinPatches(patches)
|
||||
}
|
||||
|
||||
type mutateStats struct {
|
||||
resp response.EngineResponse
|
||||
}
|
||||
|
||||
func (ms mutateStats) PolicyName() string {
|
||||
return ms.resp.PolicyResponse.Policy
|
||||
}
|
||||
|
||||
func (ms mutateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, ms.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range ms.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
status.ResourcesMutatedCount++
|
||||
ruleStat.AppliedCount++
|
||||
ruleStat.ResourcesMutatedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
211
pkg/webhooks/policyStatus_test.go
Normal file
211
pkg/webhooks/policyStatus_test.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
)
|
||||
|
||||
func Test_GenerateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
generateStats []response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule5","averageExecutionTime":"243ns","appliedCount":1},{"ruleName":"rule6","averageExecutionTime":"251ns","failedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule5","averageExecutionTime":"222ns","appliedCount":1},{"ruleName":"rule6","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
generateStats: []response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy1",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule5",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule6",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy2",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule5",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule6",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
|
||||
for _, generateStat := range testCase.generateStats {
|
||||
receiver := generateStats{
|
||||
resp: generateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MutateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
mutateStats []response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesMutatedCount":1,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"243ns","appliedCount":1,"resourcesMutatedCount":1},{"ruleName":"rule2","averageExecutionTime":"251ns","failedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesMutatedCount":1,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"222ns","appliedCount":1,"resourcesMutatedCount":1},{"ruleName":"rule2","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
mutateStats: []response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy1",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule1",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy2",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule1",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
for _, mutateStat := range testCase.mutateStats {
|
||||
receiver := mutateStats{
|
||||
resp: mutateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
validateStats []response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesBlockedCount":1,"ruleStatus":[{"ruleName":"rule3","averageExecutionTime":"243ns","appliedCount":1},{"ruleName":"rule4","averageExecutionTime":"251ns","failedCount":1,"resourcesBlockedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule3","averageExecutionTime":"222ns","appliedCount":1},{"ruleName":"rule4","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
validateStats: []response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy1",
|
||||
ValidationFailureAction: "enforce",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule3",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule4",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy2",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule3",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule4",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
for _, validateStat := range testCase.validateStats {
|
||||
receiver := validateStats{
|
||||
resp: validateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
tlsutils "github.com/nirmata/kyverno/pkg/tls"
|
||||
|
@ -55,7 +55,7 @@ type WebhookServer struct {
|
|||
// webhook registration client
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
statusListener policystatus.Listener
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// channel for cleanup notification
|
||||
|
@ -82,7 +82,7 @@ func NewWebhookServer(
|
|||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
eventGen event.Interface,
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
|
||||
policyStatus policy.PolicyStatusInterface,
|
||||
statusSync policystatus.Listener,
|
||||
configHandler config.Interface,
|
||||
pMetaStore policystore.LookupInterface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
|
@ -112,7 +112,7 @@ func NewWebhookServer(
|
|||
crbSynced: crbInformer.Informer().HasSynced,
|
||||
eventGen: eventGen,
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
policyStatus: policyStatus,
|
||||
statusListener: statusSync,
|
||||
configHandler: configHandler,
|
||||
cleanUp: cleanUp,
|
||||
lastReqTime: resourceWebhookWatcher.LastReqTime,
|
||||
|
|
|
@ -2,16 +2,16 @@ package webhooks
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
policyctr "github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
)
|
||||
|
||||
|
@ -22,36 +22,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
glog.V(4).Infof("Receive request in validating webhook: Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
|
||||
|
||||
var policyStats []policyctr.PolicyStat
|
||||
evalTime := time.Now()
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||
ps := policyctr.PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.ValidationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
rs := policyctr.RuleStatinfo{}
|
||||
rs.RuleName = rule.Name
|
||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||
if rule.Success {
|
||||
rs.RuleAppliedCount++
|
||||
} else {
|
||||
rs.RulesFailedCount++
|
||||
}
|
||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||
}
|
||||
policyStats = append(policyStats, ps)
|
||||
}
|
||||
// send stats for aggregation
|
||||
sendStat := func(blocked bool) {
|
||||
for _, stat := range policyStats {
|
||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||
//SEND
|
||||
ws.policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
|
||||
// Get new and old resource
|
||||
newR, oldR, err := extractResources(patchedResource, request)
|
||||
|
@ -100,8 +71,9 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
continue
|
||||
}
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
// Gather policy application statistics
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
ws.statusListener.Send(validateStats{
|
||||
resp: engineResponse,
|
||||
})
|
||||
if !engineResponse.IsSuccesful() {
|
||||
glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, newR.GetNamespace(), newR.GetName())
|
||||
continue
|
||||
|
@ -129,9 +101,6 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
ws.eventGen.Add(events...)
|
||||
if blocked {
|
||||
glog.V(4).Infof("resource %s/%s/%s is blocked\n", newR.GetKind(), newR.GetNamespace(), newR.GetName())
|
||||
sendStat(true)
|
||||
// EVENTS
|
||||
// - event on the Policy
|
||||
return false, getEnforceFailureErrorMsg(engineResponses)
|
||||
}
|
||||
|
||||
|
@ -139,8 +108,70 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
// violations are created with resource on "audit"
|
||||
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
|
||||
ws.pvGenerator.Add(pvInfos...)
|
||||
sendStat(false)
|
||||
// report time end
|
||||
glog.V(4).Infof("report: %v %s/%s/%s", time.Since(reportTime), request.Kind, request.Namespace, request.Name)
|
||||
return true, ""
|
||||
}
|
||||
|
||||
type validateStats struct {
|
||||
resp response.EngineResponse
|
||||
}
|
||||
|
||||
func (vs validateStats) PolicyName() string {
|
||||
return vs.resp.PolicyResponse.Policy
|
||||
}
|
||||
|
||||
func (vs validateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, vs.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range vs.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
ruleStat.AppliedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
if vs.resp.PolicyResponse.ValidationFailureAction == "enforce" {
|
||||
status.ResourcesBlockedCount++
|
||||
ruleStat.ResourcesBlockedCount++
|
||||
}
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
|
@ -11,6 +11,14 @@ apiVersion: kyverno.io/v1
|
|||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: disallow-default-namespace
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/category: Workload Isolation
|
||||
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
|
||||
that provide a way to segment and isolate cluster resources across multiple
|
||||
applications and users. As a best practice, workloads should be isolated with
|
||||
namespaces. Namespaces should be required and the default (empty) namespace
|
||||
should not be used.
|
||||
spec:
|
||||
rules:
|
||||
- name: validate-namespace
|
||||
|
@ -33,4 +41,30 @@ spec:
|
|||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
- name: validate-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: require-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "A namespace is required for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
````
|
||||
|
|
|
@ -3,6 +3,7 @@ kind: ClusterPolicy
|
|||
metadata:
|
||||
name: disallow-default-namespace
|
||||
annotations:
|
||||
pod-policies.kyverno.io/autogen-controllers: none
|
||||
policies.kyverno.io/category: Workload Isolation
|
||||
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
|
||||
that provide a way to segment and isolate cluster resources across multiple
|
||||
|
@ -31,4 +32,30 @@ spec:
|
|||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
- name: validate-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "Using 'default' namespace is not allowed for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "!default"
|
||||
- name: require-podcontroller-namespace
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- DaemonSet
|
||||
- Deployment
|
||||
- Job
|
||||
- StatefulSet
|
||||
validate:
|
||||
message: "A namespace is required for podcontrollers"
|
||||
pattern:
|
||||
metadata:
|
||||
namespace: "?*"
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue