mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
Merge branch 'master' into access_check
This commit is contained in:
commit
e6e5bbb603
43 changed files with 2247 additions and 483 deletions
9
cmd/cli/kubectl-kyverno/main.go
Normal file
9
cmd/cli/kubectl-kyverno/main.go
Normal file
|
@ -0,0 +1,9 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"github.com/nirmata/kyverno/pkg/kyverno"
|
||||
)
|
||||
|
||||
func main() {
|
||||
kyverno.CLI()
|
||||
}
|
|
@ -7,6 +7,8 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/openapi"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/checker"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
|
||||
|
@ -16,6 +18,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/generate"
|
||||
generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/signal"
|
||||
|
@ -167,12 +170,18 @@ func main() {
|
|||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
log.Log.WithName("EventGenerator"))
|
||||
|
||||
// Policy Status Handler - deals with all logic related to policy status
|
||||
statusSync := policystatus.NewSync(
|
||||
pclient,
|
||||
policyMetaStore)
|
||||
|
||||
// POLICY VIOLATION GENERATOR
|
||||
// -- generate policy violation
|
||||
pvgen := policyviolation.NewPVGenerator(pclient,
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||
pInformer.Kyverno().V1().PolicyViolations(),
|
||||
statusSync.Listener,
|
||||
log.Log.WithName("PolicyViolationGenerator"),
|
||||
)
|
||||
|
||||
|
@ -211,6 +220,7 @@ func main() {
|
|||
egen,
|
||||
pvgen,
|
||||
kubedynamicInformer,
|
||||
statusSync.Listener,
|
||||
log.Log.WithName("GenerateController"),
|
||||
)
|
||||
// GENERATE REQUEST CLEANUP
|
||||
|
@ -241,6 +251,9 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Sync openAPI definitions of resources
|
||||
openApiSync := openapi.NewCRDSync(client)
|
||||
|
||||
// WEBHOOOK
|
||||
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
|
||||
// - reports the results based on the response from the policy engine:
|
||||
|
@ -256,7 +269,7 @@ func main() {
|
|||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
egen,
|
||||
webhookRegistrationClient,
|
||||
pc.GetPolicyStatusAggregator(),
|
||||
statusSync.Listener,
|
||||
configData,
|
||||
policyMetaStore,
|
||||
pvgen,
|
||||
|
@ -282,6 +295,8 @@ func main() {
|
|||
go grc.Run(1, stopCh)
|
||||
go grcc.Run(1, stopCh)
|
||||
go pvgen.Run(1, stopCh)
|
||||
go statusSync.Run(1, stopCh)
|
||||
go openApiSync.Run(1, stopCh)
|
||||
|
||||
// verifys if the admission control is enabled and active
|
||||
// resync: 60 seconds
|
||||
|
|
|
@ -227,21 +227,24 @@ type CloneFrom struct {
|
|||
Name string `json:"name,omitempty"`
|
||||
}
|
||||
|
||||
//PolicyStatus provides status for violations
|
||||
// PolicyStatus mostly contains statistics related to policy
|
||||
type PolicyStatus struct {
|
||||
ViolationCount int `json:"violationCount"`
|
||||
// average time required to process the policy rules on a resource
|
||||
AvgExecutionTime string `json:"averageExecutionTime"`
|
||||
// number of violations created by this policy
|
||||
ViolationCount int `json:"violationCount,omitempty"`
|
||||
// Count of rules that failed
|
||||
RulesFailedCount int `json:"rulesFailedCount,omitempty"`
|
||||
// Count of rules that were applied
|
||||
RulesAppliedCount int `json:"rulesAppliedCount"`
|
||||
// Count of resources for whom update/create api requests were blocked as the resoruce did not satisfy the policy rules
|
||||
ResourcesBlockedCount int `json:"resourcesBlockedCount"`
|
||||
// average time required to process the policy Mutation rules on a resource
|
||||
AvgExecutionTimeMutation string `json:"averageMutationRulesExecutionTime"`
|
||||
// average time required to process the policy Validation rules on a resource
|
||||
AvgExecutionTimeValidation string `json:"averageValidationRulesExecutionTime"`
|
||||
// average time required to process the policy Validation rules on a resource
|
||||
AvgExecutionTimeGeneration string `json:"averageGenerationRulesExecutionTime"`
|
||||
// statistics per rule
|
||||
Rules []RuleStats `json:"ruleStatus"`
|
||||
RulesAppliedCount int `json:"rulesAppliedCount,omitempty"`
|
||||
// Count of resources that were blocked for failing a validate, across all rules
|
||||
ResourcesBlockedCount int `json:"resourcesBlockedCount,omitempty"`
|
||||
// Count of resources that were successfully mutated, across all rules
|
||||
ResourcesMutatedCount int `json:"resourcesMutatedCount,omitempty"`
|
||||
// Count of resources that were successfully generated, across all rules
|
||||
ResourcesGeneratedCount int `json:"resourcesGeneratedCount,omitempty"`
|
||||
|
||||
Rules []RuleStats `json:"ruleStatus,omitempty"`
|
||||
}
|
||||
|
||||
//RuleStats provides status per rule
|
||||
|
@ -249,13 +252,19 @@ type RuleStats struct {
|
|||
// Rule name
|
||||
Name string `json:"ruleName"`
|
||||
// average time require to process the rule
|
||||
ExecutionTime string `json:"averageExecutionTime"`
|
||||
// Count of rules that were applied
|
||||
AppliedCount int `json:"appliedCount"`
|
||||
ExecutionTime string `json:"averageExecutionTime,omitempty"`
|
||||
// number of violations created by this rule
|
||||
ViolationCount int `json:"violationCount,omitempty"`
|
||||
// Count of rules that failed
|
||||
ViolationCount int `json:"violationCount"`
|
||||
// Count of mutations
|
||||
MutationCount int `json:"mutationsCount"`
|
||||
FailedCount int `json:"failedCount,omitempty"`
|
||||
// Count of rules that were applied
|
||||
AppliedCount int `json:"appliedCount,omitempty"`
|
||||
// Count of resources for whom update/create api requests were blocked as the resource did not satisfy the policy rules
|
||||
ResourcesBlockedCount int `json:"resourcesBlockedCount,omitempty"`
|
||||
// Count of resources that were successfully mutated
|
||||
ResourcesMutatedCount int `json:"resourcesMutatedCount,omitempty"`
|
||||
// Count of resources that were successfully generated
|
||||
ResourcesGeneratedCount int `json:"resourcesGeneratedCount,omitempty"`
|
||||
}
|
||||
|
||||
// PolicyList is a list of Policy resources
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
|
@ -217,6 +218,7 @@ func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSign
|
|||
type IDiscovery interface {
|
||||
GetGVRFromKind(kind string) schema.GroupVersionResource
|
||||
GetServerVersion() (*version.Info, error)
|
||||
OpenAPISchema() (*openapi_v2.Document, error)
|
||||
}
|
||||
|
||||
// SetDiscovery sets the discovery client implementation
|
||||
|
@ -250,6 +252,10 @@ func (c ServerPreferredResources) Poll(resync time.Duration, stopCh <-chan struc
|
|||
}
|
||||
}
|
||||
|
||||
func (c ServerPreferredResources) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
return c.cachedClient.OpenAPISchema()
|
||||
}
|
||||
|
||||
//GetGVRFromKind get the Group Version Resource from kind
|
||||
// if kind is not found in first attempt we invalidate the cache,
|
||||
// the retry will then fetch the new registered resources and check again
|
||||
|
|
|
@ -3,6 +3,8 @@ package client
|
|||
import (
|
||||
"strings"
|
||||
|
||||
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -74,6 +76,10 @@ func (c *fakeDiscoveryClient) GetGVRFromKind(kind string) schema.GroupVersionRes
|
|||
return c.getGVR(resource)
|
||||
}
|
||||
|
||||
func (c *fakeDiscoveryClient) OpenAPISchema() (*openapi_v2.Document, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
|
||||
return &unstructured.Unstructured{
|
||||
Object: map[string]interface{}{
|
||||
|
|
108
pkg/engine/forceMutate.go
Normal file
108
pkg/engine/forceMutate.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/mutate"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
"github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
"github.com/nirmata/kyverno/pkg/engine/variables"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
func mutateResourceWithOverlay(resource unstructured.Unstructured, overlay interface{}) (unstructured.Unstructured, error) {
|
||||
patches, err := mutate.MutateResourceWithOverlay(resource.UnstructuredContent(), overlay)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, err
|
||||
}
|
||||
if len(patches) == 0 {
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
// convert to RAW
|
||||
resourceRaw, err := resource.MarshalJSON()
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, err
|
||||
}
|
||||
|
||||
var patchResource []byte
|
||||
patchResource, err = utils.ApplyPatches(resourceRaw, patches)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, err
|
||||
}
|
||||
|
||||
resource = unstructured.Unstructured{}
|
||||
err = resource.UnmarshalJSON(patchResource)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, err
|
||||
}
|
||||
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
// ForceMutate does not check any conditions, it simply mutates the given resource
|
||||
func ForceMutate(ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured) (unstructured.Unstructured, error) {
|
||||
var err error
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if !rule.HasMutate() {
|
||||
continue
|
||||
}
|
||||
|
||||
mutation := rule.Mutation.DeepCopy()
|
||||
|
||||
if mutation.Overlay != nil {
|
||||
overlay := mutation.Overlay
|
||||
if ctx != nil {
|
||||
if overlay, err = variables.SubstituteVars(log.Log, ctx, overlay); err != nil {
|
||||
return unstructured.Unstructured{}, err
|
||||
}
|
||||
} else {
|
||||
overlay = replaceSubstituteVariables(overlay)
|
||||
}
|
||||
|
||||
resource, err = mutateResourceWithOverlay(resource, overlay)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, fmt.Errorf("could not mutate resource with overlay on rule %v:%v", rule.Name, err)
|
||||
}
|
||||
}
|
||||
|
||||
if rule.Mutation.Patches != nil {
|
||||
var resp response.RuleResponse
|
||||
resp, resource = mutate.ProcessPatches(log.Log, rule, resource)
|
||||
if !resp.Success {
|
||||
return unstructured.Unstructured{}, fmt.Errorf(resp.Message)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
func replaceSubstituteVariables(overlay interface{}) interface{} {
|
||||
overlayRaw, err := json.Marshal(overlay)
|
||||
if err != nil {
|
||||
return overlay
|
||||
}
|
||||
|
||||
regex := regexp.MustCompile(`\{\{([^{}]*)\}\}`)
|
||||
for {
|
||||
if len(regex.FindAllStringSubmatch(string(overlayRaw), -1)) > 0 {
|
||||
overlayRaw = regex.ReplaceAll(overlayRaw, []byte(`placeholderValue`))
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var output interface{}
|
||||
err = json.Unmarshal(overlayRaw, &output)
|
||||
if err != nil {
|
||||
return overlay
|
||||
}
|
||||
|
||||
return output
|
||||
}
|
|
@ -1,7 +1,10 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
|
@ -27,6 +30,9 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
|
|||
if !rule.HasGenerate() {
|
||||
return nil
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
|
||||
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -40,8 +46,12 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
|
|||
}
|
||||
// build rule Response
|
||||
return &response.RuleResponse{
|
||||
Name: rule.Name,
|
||||
Type: "Generation",
|
||||
Name: rule.Name,
|
||||
Type: "Generation",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Since(startTime),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ func processOverlayPatches(log logr.Logger, resource, overlay interface{}) ([][]
|
|||
}
|
||||
}
|
||||
|
||||
patchBytes, err := mutateResourceWithOverlay(resource, overlay)
|
||||
patchBytes, err := MutateResourceWithOverlay(resource, overlay)
|
||||
if err != nil {
|
||||
return patchBytes, newOverlayError(overlayFailure, err.Error())
|
||||
}
|
||||
|
@ -126,8 +126,8 @@ func processOverlayPatches(log logr.Logger, resource, overlay interface{}) ([][]
|
|||
return patchBytes, overlayError{}
|
||||
}
|
||||
|
||||
// mutateResourceWithOverlay is a start of overlaying process
|
||||
func mutateResourceWithOverlay(resource, pattern interface{}) ([][]byte, error) {
|
||||
// MutateResourceWithOverlay is a start of overlaying process
|
||||
func MutateResourceWithOverlay(resource, pattern interface{}) ([][]byte, error) {
|
||||
// It assumes that mutation is started from root, so "/" is passed
|
||||
return applyOverlay(resource, pattern, "/")
|
||||
}
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -55,8 +56,9 @@ type Controller struct {
|
|||
dynamicInformer dynamicinformer.DynamicSharedInformerFactory
|
||||
//TODO: list of generic informers
|
||||
// only support Namespaces for re-evalutation on resource updates
|
||||
nsInformer informers.GenericInformer
|
||||
log logr.Logger
|
||||
nsInformer informers.GenericInformer
|
||||
policyStatusListener policystatus.Listener
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
//NewController returns an instance of the Generate-Request Controller
|
||||
|
@ -68,6 +70,7 @@ func NewController(
|
|||
eventGen event.Interface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
dynamicInformer dynamicinformer.DynamicSharedInformerFactory,
|
||||
policyStatus policystatus.Listener,
|
||||
log logr.Logger,
|
||||
) *Controller {
|
||||
c := Controller{
|
||||
|
@ -77,9 +80,10 @@ func NewController(
|
|||
pvGenerator: pvGenerator,
|
||||
//TODO: do the math for worst case back off and make sure cleanup runs after that
|
||||
// as we dont want a deleted GR to be re-queue
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
|
||||
dynamicInformer: dynamicInformer,
|
||||
log: log,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
|
||||
dynamicInformer: dynamicInformer,
|
||||
log: log,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
c.statusControl = StatusControl{client: kyvernoclient}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package generate
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
|
@ -82,7 +83,7 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
|
|||
}
|
||||
|
||||
// Apply the generate rule on resource
|
||||
return applyGeneratePolicy(logger, c.client, policyContext)
|
||||
return c.applyGeneratePolicy(logger, policyContext, gr)
|
||||
}
|
||||
|
||||
func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateRequest, err error, genResources []kyverno.ResourceSpec) error {
|
||||
|
@ -94,7 +95,7 @@ func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateReque
|
|||
return statusControl.Success(gr, genResources)
|
||||
}
|
||||
|
||||
func applyGeneratePolicy(log logr.Logger, client *dclient.Client, policyContext engine.PolicyContext) ([]kyverno.ResourceSpec, error) {
|
||||
func (c *Controller) applyGeneratePolicy(log logr.Logger, policyContext engine.PolicyContext, gr kyverno.GenerateRequest) ([]kyverno.ResourceSpec, error) {
|
||||
// List of generatedResources
|
||||
var genResources []kyverno.ResourceSpec
|
||||
// Get the response as the actions to be performed on the resource
|
||||
|
@ -109,20 +110,69 @@ func applyGeneratePolicy(log logr.Logger, client *dclient.Client, policyContext
|
|||
return rcreationTime.Before(&pcreationTime)
|
||||
}()
|
||||
|
||||
ruleNameToProcessingTime := make(map[string]time.Duration)
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if !rule.HasGenerate() {
|
||||
continue
|
||||
}
|
||||
genResource, err := applyRule(log, client, rule, resource, ctx, processExisting)
|
||||
startTime := time.Now()
|
||||
genResource, err := applyRule(log, c.client, rule, resource, ctx, processExisting)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ruleNameToProcessingTime[rule.Name] = time.Since(startTime)
|
||||
genResources = append(genResources, genResource)
|
||||
}
|
||||
|
||||
if gr.Status.State == "" {
|
||||
c.policyStatusListener.Send(generateSyncStats{
|
||||
policyName: policy.Name,
|
||||
ruleNameToProcessingTime: ruleNameToProcessingTime,
|
||||
})
|
||||
}
|
||||
|
||||
return genResources, nil
|
||||
}
|
||||
|
||||
type generateSyncStats struct {
|
||||
policyName string
|
||||
ruleNameToProcessingTime map[string]time.Duration
|
||||
}
|
||||
|
||||
func (vc generateSyncStats) PolicyName() string {
|
||||
return vc.policyName
|
||||
}
|
||||
|
||||
func (vc generateSyncStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
|
||||
for i := range status.Rules {
|
||||
if executionTime, exist := vc.ruleNameToProcessingTime[status.Rules[i].Name]; exist {
|
||||
status.ResourcesGeneratedCount += 1
|
||||
status.Rules[i].ResourcesGeneratedCount += 1
|
||||
averageOver := int64(status.Rules[i].AppliedCount + status.Rules[i].FailedCount)
|
||||
status.Rules[i].ExecutionTime = updateGenerateExecutionTime(
|
||||
executionTime,
|
||||
status.Rules[i].ExecutionTime,
|
||||
averageOver,
|
||||
).String()
|
||||
}
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func updateGenerateExecutionTime(newTime time.Duration, oldAverageTimeString string, averageOver int64) time.Duration {
|
||||
if averageOver == 0 {
|
||||
return newTime
|
||||
}
|
||||
oldAverageExecutionTime, _ := time.ParseDuration(oldAverageTimeString)
|
||||
numerator := (oldAverageExecutionTime.Nanoseconds() * averageOver) + newTime.Nanoseconds()
|
||||
denominator := averageOver
|
||||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
||||
func applyRule(log logr.Logger, client *dclient.Client, rule kyverno.Rule, resource unstructured.Unstructured, ctx context.EvalInterface, processExisting bool) (kyverno.ResourceSpec, error) {
|
||||
var rdata map[string]interface{}
|
||||
var err error
|
||||
|
|
53
pkg/generate/policyStatus_test.go
Normal file
53
pkg/generate/policyStatus_test.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
func Test_Stats(t *testing.T) {
|
||||
testCase := struct {
|
||||
generatedSyncStats []generateSyncStats
|
||||
expectedOutput []byte
|
||||
existingStatus map[string]v1.PolicyStatus
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"","resourcesGeneratedCount":2,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"23ns","resourcesGeneratedCount":1},{"ruleName":"rule2","averageExecutionTime":"44ns","resourcesGeneratedCount":1},{"ruleName":"rule3"}]}}`),
|
||||
generatedSyncStats: []generateSyncStats{
|
||||
{
|
||||
policyName: "policy1",
|
||||
ruleNameToProcessingTime: map[string]time.Duration{
|
||||
"rule1": time.Nanosecond * 23,
|
||||
"rule2": time.Nanosecond * 44,
|
||||
},
|
||||
},
|
||||
},
|
||||
existingStatus: map[string]v1.PolicyStatus{
|
||||
"policy1": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule1",
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
},
|
||||
{
|
||||
Name: "rule3",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, generateSyncStat := range testCase.generatedSyncStats {
|
||||
testCase.existingStatus[generateSyncStat.PolicyName()] = generateSyncStat.UpdateStatus(testCase.existingStatus[generateSyncStat.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(testCase.existingStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
380
pkg/kyverno/apply/command.go
Normal file
380
pkg/kyverno/apply/command.go
Normal file
|
@ -0,0 +1,380 @@
|
|||
package apply
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/kyverno/sanitizedError"
|
||||
|
||||
policy2 "github.com/nirmata/kyverno/pkg/policy"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
||||
"k8s.io/client-go/discovery"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
|
||||
engineutils "github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/spf13/cobra"
|
||||
yamlv2 "gopkg.in/yaml.v2"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
)
|
||||
|
||||
func Command() *cobra.Command {
|
||||
var cmd *cobra.Command
|
||||
var resourcePaths []string
|
||||
var cluster bool
|
||||
|
||||
kubernetesConfig := genericclioptions.NewConfigFlags(true)
|
||||
|
||||
cmd = &cobra.Command{
|
||||
Use: "apply",
|
||||
Short: "Applies policies on resources",
|
||||
Example: fmt.Sprintf("To apply on a resource:\nkyverno apply /path/to/policy.yaml /path/to/folderOfPolicies --resource=/path/to/resource1 --resource=/path/to/resource2\n\nTo apply on a cluster\nkyverno apply /path/to/policy.yaml /path/to/folderOfPolicies --cluster"),
|
||||
RunE: func(cmd *cobra.Command, policyPaths []string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !sanitizedError.IsErrorSanitized(err) {
|
||||
glog.V(4).Info(err)
|
||||
err = fmt.Errorf("Internal error")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if len(resourcePaths) == 0 && !cluster {
|
||||
return sanitizedError.New(fmt.Sprintf("Specify path to resource file or cluster name"))
|
||||
}
|
||||
|
||||
policies, err := getPolicies(policyPaths)
|
||||
if err != nil {
|
||||
if !sanitizedError.IsErrorSanitized(err) {
|
||||
return sanitizedError.New("Could not parse policy paths")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, policy := range policies {
|
||||
err := policy2.Validate(*policy, nil, true)
|
||||
if err != nil {
|
||||
return sanitizedError.New(fmt.Sprintf("Policy %v is not valid", policy.Name))
|
||||
}
|
||||
}
|
||||
|
||||
var dClient discovery.CachedDiscoveryInterface
|
||||
if cluster {
|
||||
dClient, err = kubernetesConfig.ToDiscoveryClient()
|
||||
if err != nil {
|
||||
return sanitizedError.New(fmt.Errorf("Issues with kubernetes Config").Error())
|
||||
}
|
||||
}
|
||||
|
||||
resources, err := getResources(policies, resourcePaths, dClient)
|
||||
if err != nil {
|
||||
return sanitizedError.New(fmt.Errorf("Issues fetching resources").Error())
|
||||
}
|
||||
|
||||
for i, policy := range policies {
|
||||
for j, resource := range resources {
|
||||
if !(j == 0 && i == 0) {
|
||||
fmt.Printf("\n\n=======================================================================\n")
|
||||
}
|
||||
|
||||
err = applyPolicyOnResource(policy, resource)
|
||||
if err != nil {
|
||||
return sanitizedError.New(fmt.Errorf("Issues applying policy %v on resource %v", policy.Name, resource.GetName()).Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
cmd.Flags().StringArrayVarP(&resourcePaths, "resource", "r", []string{}, "Path to resource files")
|
||||
cmd.Flags().BoolVarP(&cluster, "cluster", "c", false, "Checks if policies should be applied to cluster in the current context")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getResources(policies []*v1.ClusterPolicy, resourcePaths []string, dClient discovery.CachedDiscoveryInterface) ([]*unstructured.Unstructured, error) {
|
||||
var resources []*unstructured.Unstructured
|
||||
var err error
|
||||
|
||||
if dClient != nil {
|
||||
var resourceTypesMap = make(map[string]bool)
|
||||
var resourceTypes []string
|
||||
for _, policy := range policies {
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
for _, kind := range rule.MatchResources.Kinds {
|
||||
resourceTypesMap[kind] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for kind := range resourceTypesMap {
|
||||
resourceTypes = append(resourceTypes, kind)
|
||||
}
|
||||
|
||||
resources, err = getResourcesOfTypeFromCluster(resourceTypes, dClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
for _, resourcePath := range resourcePaths {
|
||||
resource, err := getResource(resourcePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resources = append(resources, resource)
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func getResourcesOfTypeFromCluster(resourceTypes []string, dClient discovery.CachedDiscoveryInterface) ([]*unstructured.Unstructured, error) {
|
||||
var resources []*unstructured.Unstructured
|
||||
|
||||
for _, kind := range resourceTypes {
|
||||
endpoint, err := getListEndpointForKind(kind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listObjectRaw, err := dClient.RESTClient().Get().RequestURI(endpoint).Do().Raw()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
listObject, err := engineutils.ConvertToUnstructured(listObjectRaw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceList, err := listObject.ToList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
version := resourceList.GetAPIVersion()
|
||||
for _, resource := range resourceList.Items {
|
||||
resource.SetGroupVersionKind(schema.GroupVersionKind{
|
||||
Group: "",
|
||||
Version: version,
|
||||
Kind: kind,
|
||||
})
|
||||
resources = append(resources, resource.DeepCopy())
|
||||
}
|
||||
}
|
||||
|
||||
return resources, nil
|
||||
}
|
||||
|
||||
func getPoliciesInDir(path string) ([]*v1.ClusterPolicy, error) {
|
||||
var policies []*v1.ClusterPolicy
|
||||
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
policiesFromDir, err := getPoliciesInDir(filepath.Join(path, file.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policiesFromDir...)
|
||||
} else {
|
||||
policy, err := getPolicy(filepath.Join(path, file.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policy)
|
||||
}
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
func getPolicies(paths []string) ([]*v1.ClusterPolicy, error) {
|
||||
var policies = make([]*v1.ClusterPolicy, 0, len(paths))
|
||||
for _, path := range paths {
|
||||
path = filepath.Clean(path)
|
||||
|
||||
fileDesc, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fileDesc.IsDir() {
|
||||
policiesFromDir, err := getPoliciesInDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policiesFromDir...)
|
||||
} else {
|
||||
policy, err := getPolicy(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policy)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range policies {
|
||||
setFalse := false
|
||||
policies[i].Spec.Background = &setFalse
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
func getPolicy(path string) (*v1.ClusterPolicy, error) {
|
||||
policy := &v1.ClusterPolicy{}
|
||||
|
||||
file, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load file: %v", err)
|
||||
}
|
||||
|
||||
policyBytes, err := yaml.ToJSON(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(policyBytes, policy); err != nil {
|
||||
return nil, sanitizedError.New(fmt.Sprintf("failed to decode policy in %s", path))
|
||||
}
|
||||
|
||||
if policy.TypeMeta.Kind != "ClusterPolicy" {
|
||||
return nil, sanitizedError.New(fmt.Sprintf("resource %v is not a cluster policy", policy.Name))
|
||||
}
|
||||
|
||||
return policy, nil
|
||||
}
|
||||
|
||||
func getResource(path string) (*unstructured.Unstructured, error) {
|
||||
|
||||
resourceYaml, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
resourceObject, metaData, err := decode(resourceYaml, nil, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceUnstructured, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&resourceObject)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resourceJSON, err := json.Marshal(resourceUnstructured)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resource, err := engineutils.ConvertToUnstructured(resourceJSON)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resource.SetGroupVersionKind(*metaData)
|
||||
|
||||
if resource.GetNamespace() == "" {
|
||||
resource.SetNamespace("default")
|
||||
}
|
||||
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
func applyPolicyOnResource(policy *v1.ClusterPolicy, resource *unstructured.Unstructured) error {
|
||||
|
||||
fmt.Printf("\n\nApplying Policy %s on Resource %s/%s/%s\n", policy.Name, resource.GetNamespace(), resource.GetKind(), resource.GetName())
|
||||
|
||||
mutateResponse := engine.Mutate(engine.PolicyContext{Policy: *policy, NewResource: *resource})
|
||||
if !mutateResponse.IsSuccesful() {
|
||||
fmt.Printf("\n\nMutation:")
|
||||
fmt.Printf("\nFailed to apply mutation")
|
||||
for i, r := range mutateResponse.PolicyResponse.Rules {
|
||||
fmt.Printf("\n%d. %s", i+1, r.Message)
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
} else {
|
||||
if len(mutateResponse.PolicyResponse.Rules) > 0 {
|
||||
fmt.Printf("\n\nMutation:")
|
||||
fmt.Printf("\nMutation has been applied succesfully")
|
||||
yamlEncodedResource, err := yamlv2.Marshal(mutateResponse.PatchedResource.Object)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
fmt.Printf("\n\n" + string(yamlEncodedResource))
|
||||
fmt.Printf("\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
validateResponse := engine.Validate(engine.PolicyContext{Policy: *policy, NewResource: mutateResponse.PatchedResource})
|
||||
if !validateResponse.IsSuccesful() {
|
||||
fmt.Printf("\n\nValidation:")
|
||||
fmt.Printf("\nResource is invalid")
|
||||
for i, r := range validateResponse.PolicyResponse.Rules {
|
||||
fmt.Printf("\n%d. %s", i+1, r.Message)
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
} else {
|
||||
if len(validateResponse.PolicyResponse.Rules) > 0 {
|
||||
fmt.Printf("\n\nValidation:")
|
||||
fmt.Printf("\nResource is valid")
|
||||
fmt.Printf("\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
var policyHasGenerate bool
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if rule.HasGenerate() {
|
||||
policyHasGenerate = true
|
||||
}
|
||||
}
|
||||
|
||||
if policyHasGenerate {
|
||||
generateResponse := engine.Generate(engine.PolicyContext{Policy: *policy, NewResource: *resource})
|
||||
if len(generateResponse.PolicyResponse.Rules) > 0 {
|
||||
fmt.Printf("\n\nGenerate:")
|
||||
fmt.Printf("\nResource is valid")
|
||||
fmt.Printf("\n\n")
|
||||
} else {
|
||||
fmt.Printf("\n\nGenerate:")
|
||||
fmt.Printf("\nResource is invalid")
|
||||
for i, r := range generateResponse.PolicyResponse.Rules {
|
||||
fmt.Printf("\n%d. %s", i+1, r.Message)
|
||||
}
|
||||
fmt.Printf("\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
37
pkg/kyverno/apply/helper.go
Normal file
37
pkg/kyverno/apply/helper.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
package apply
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/openapi"
|
||||
)
|
||||
|
||||
func getListEndpointForKind(kind string) (string, error) {
|
||||
|
||||
definitionName := openapi.GetDefinitionNameFromKind(kind)
|
||||
definitionNameWithoutPrefix := strings.Replace(definitionName, "io.k8s.", "", -1)
|
||||
|
||||
parts := strings.Split(definitionNameWithoutPrefix, ".")
|
||||
definitionPrefix := strings.Join(parts[:len(parts)-1], ".")
|
||||
|
||||
defPrefixToApiPrefix := map[string]string{
|
||||
"api.core.v1": "/api/v1",
|
||||
"api.apps.v1": "/apis/apps/v1",
|
||||
"api.batch.v1": "/apis/batch/v1",
|
||||
"api.admissionregistration.v1": "/apis/admissionregistration.k8s.io/v1",
|
||||
"kube-aggregator.pkg.apis.apiregistration.v1": "/apis/apiregistration.k8s.io/v1",
|
||||
"apiextensions-apiserver.pkg.apis.apiextensions.v1": "/apis/apiextensions.k8s.io/v1",
|
||||
"api.autoscaling.v1": "/apis/autoscaling/v1/",
|
||||
"api.storage.v1": "/apis/storage.k8s.io/v1",
|
||||
"api.coordination.v1": "/apis/coordination.k8s.io/v1",
|
||||
"api.scheduling.v1": "/apis/scheduling.k8s.io/v1",
|
||||
"api.rbac.v1": "/apis/rbac.authorization.k8s.io/v1",
|
||||
}
|
||||
|
||||
if defPrefixToApiPrefix[definitionPrefix] == "" {
|
||||
return "", fmt.Errorf("Unsupported resource type %v", kind)
|
||||
}
|
||||
|
||||
return defPrefixToApiPrefix[definitionPrefix] + "/" + strings.ToLower(kind) + "s", nil
|
||||
}
|
50
pkg/kyverno/main.go
Normal file
50
pkg/kyverno/main.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package kyverno
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/kyverno/validate"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/kyverno/apply"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/kyverno/version"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func CLI() {
|
||||
cli := &cobra.Command{
|
||||
Use: "kyverno",
|
||||
Short: "kyverno manages native policies of Kubernetes",
|
||||
}
|
||||
|
||||
configureGlog(cli)
|
||||
|
||||
commands := []*cobra.Command{
|
||||
version.Command(),
|
||||
apply.Command(),
|
||||
validate.Command(),
|
||||
}
|
||||
|
||||
cli.AddCommand(commands...)
|
||||
|
||||
cli.SilenceUsage = true
|
||||
|
||||
if err := cli.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func configureGlog(cli *cobra.Command) {
|
||||
flag.Parse()
|
||||
_ = flag.Set("logtostderr", "true")
|
||||
|
||||
cli.PersistentFlags().AddGoFlagSet(flag.CommandLine)
|
||||
_ = cli.PersistentFlags().MarkHidden("alsologtostderr")
|
||||
_ = cli.PersistentFlags().MarkHidden("logtostderr")
|
||||
_ = cli.PersistentFlags().MarkHidden("log_dir")
|
||||
_ = cli.PersistentFlags().MarkHidden("log_backtrace_at")
|
||||
_ = cli.PersistentFlags().MarkHidden("stderrthreshold")
|
||||
_ = cli.PersistentFlags().MarkHidden("vmodule")
|
||||
}
|
20
pkg/kyverno/sanitizedError/error.go
Normal file
20
pkg/kyverno/sanitizedError/error.go
Normal file
|
@ -0,0 +1,20 @@
|
|||
package sanitizedError
|
||||
|
||||
type customError struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func (c customError) Error() string {
|
||||
return c.message
|
||||
}
|
||||
|
||||
func New(message string) error {
|
||||
return customError{message: message}
|
||||
}
|
||||
|
||||
func IsErrorSanitized(err error) bool {
|
||||
if _, ok := err.(customError); !ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
147
pkg/kyverno/validate/command.go
Normal file
147
pkg/kyverno/validate/command.go
Normal file
|
@ -0,0 +1,147 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/kyverno/sanitizedError"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
policyvalidate "github.com/nirmata/kyverno/pkg/policy"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/apimachinery/pkg/util/yaml"
|
||||
)
|
||||
|
||||
func Command() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "validate",
|
||||
Short: "Validates kyverno policies",
|
||||
Example: "kyverno validate /path/to/policy.yaml /path/to/folderOfPolicies",
|
||||
RunE: func(cmd *cobra.Command, policyPaths []string) (err error) {
|
||||
defer func() {
|
||||
if err != nil {
|
||||
if !sanitizedError.IsErrorSanitized(err) {
|
||||
glog.V(4).Info(err)
|
||||
err = fmt.Errorf("Internal error")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
policies, err := getPolicies(policyPaths)
|
||||
if err != nil {
|
||||
if !sanitizedError.IsErrorSanitized(err) {
|
||||
return sanitizedError.New("Could not parse policy paths")
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
for _, policy := range policies {
|
||||
err = policyvalidate.Validate(*policy, nil, true)
|
||||
if err != nil {
|
||||
fmt.Println("Policy " + policy.Name + " is invalid")
|
||||
} else {
|
||||
fmt.Println("Policy " + policy.Name + " is valid")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
},
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func getPoliciesInDir(path string) ([]*v1.ClusterPolicy, error) {
|
||||
var policies []*v1.ClusterPolicy
|
||||
|
||||
files, err := ioutil.ReadDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
if file.IsDir() {
|
||||
policiesFromDir, err := getPoliciesInDir(filepath.Join(path, file.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policiesFromDir...)
|
||||
} else {
|
||||
policy, err := getPolicy(filepath.Join(path, file.Name()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policy)
|
||||
}
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
func getPolicies(paths []string) ([]*v1.ClusterPolicy, error) {
|
||||
var policies = make([]*v1.ClusterPolicy, 0, len(paths))
|
||||
for _, path := range paths {
|
||||
path = filepath.Clean(path)
|
||||
|
||||
fileDesc, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fileDesc.IsDir() {
|
||||
policiesFromDir, err := getPoliciesInDir(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policiesFromDir...)
|
||||
} else {
|
||||
policy, err := getPolicy(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policies = append(policies, policy)
|
||||
}
|
||||
}
|
||||
|
||||
for i := range policies {
|
||||
setFalse := false
|
||||
policies[i].Spec.Background = &setFalse
|
||||
}
|
||||
|
||||
return policies, nil
|
||||
}
|
||||
|
||||
func getPolicy(path string) (*v1.ClusterPolicy, error) {
|
||||
policy := &v1.ClusterPolicy{}
|
||||
|
||||
file, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load file: %v", err)
|
||||
}
|
||||
|
||||
policyBytes, err := yaml.ToJSON(file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(policyBytes, policy); err != nil {
|
||||
return nil, sanitizedError.New(fmt.Sprintf("failed to decode policy in %s", path))
|
||||
}
|
||||
|
||||
if policy.TypeMeta.Kind != "ClusterPolicy" {
|
||||
return nil, sanitizedError.New(fmt.Sprintf("resource %v is not a cluster policy", policy.Name))
|
||||
}
|
||||
|
||||
return policy, nil
|
||||
}
|
21
pkg/kyverno/version/command.go
Normal file
21
pkg/kyverno/version/command.go
Normal file
|
@ -0,0 +1,21 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/version"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
func Command() *cobra.Command {
|
||||
return &cobra.Command{
|
||||
Use: "version",
|
||||
Short: "Shows current version of kyverno",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
fmt.Printf("Version: %s\n", version.BuildVersion)
|
||||
fmt.Printf("Time: %s\n", version.BuildTime)
|
||||
fmt.Printf("Git commit ID: %s\n", version.BuildHash)
|
||||
return nil
|
||||
},
|
||||
}
|
||||
}
|
112
pkg/openapi/crdSync.go
Normal file
112
pkg/openapi/crdSync.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
package openapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
|
||||
"github.com/googleapis/gnostic/compiler"
|
||||
|
||||
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
|
||||
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
type crdDefinition struct {
|
||||
Spec struct {
|
||||
Names struct {
|
||||
Kind string `json:"kind"`
|
||||
} `json:"names"`
|
||||
Versions []struct {
|
||||
Schema struct {
|
||||
OpenAPIV3Schema interface{} `json:"openAPIV3Schema"`
|
||||
} `json:"schema"`
|
||||
} `json:"versions"`
|
||||
} `json:"spec"`
|
||||
}
|
||||
|
||||
type crdSync struct {
|
||||
client *client.Client
|
||||
}
|
||||
|
||||
func NewCRDSync(client *client.Client) *crdSync {
|
||||
return &crdSync{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *crdSync) Run(workers int, stopCh <-chan struct{}) {
|
||||
newDoc, err := c.client.DiscoveryClient.OpenAPISchema()
|
||||
if err != nil {
|
||||
glog.V(4).Infof("cannot get openapi schema: %v", err)
|
||||
}
|
||||
|
||||
err = useOpenApiDocument(newDoc)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Could not set custom OpenApi document: %v\n", err)
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(c.sync, time.Second*10, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (c *crdSync) sync() {
|
||||
openApiGlobalState.mutex.Lock()
|
||||
defer openApiGlobalState.mutex.Unlock()
|
||||
|
||||
crds, err := c.client.ListResource("CustomResourceDefinition", "", nil)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("could not fetch crd's from server: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
deleteCRDFromPreviousSync()
|
||||
|
||||
for _, crd := range crds.Items {
|
||||
parseCRD(crd)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteCRDFromPreviousSync() {
|
||||
for _, crd := range openApiGlobalState.crdList {
|
||||
delete(openApiGlobalState.kindToDefinitionName, crd)
|
||||
delete(openApiGlobalState.definitions, crd)
|
||||
}
|
||||
|
||||
openApiGlobalState.crdList = []string{}
|
||||
}
|
||||
|
||||
func parseCRD(crd unstructured.Unstructured) {
|
||||
var crdDefinition crdDefinition
|
||||
crdRaw, _ := json.Marshal(crd.Object)
|
||||
_ = json.Unmarshal(crdRaw, &crdDefinition)
|
||||
|
||||
crdName := crdDefinition.Spec.Names.Kind
|
||||
if len(crdDefinition.Spec.Versions) < 1 {
|
||||
glog.V(4).Infof("could not parse crd schema, no versions present")
|
||||
return
|
||||
}
|
||||
|
||||
var schema yaml.MapSlice
|
||||
schemaRaw, _ := json.Marshal(crdDefinition.Spec.Versions[0].Schema.OpenAPIV3Schema)
|
||||
_ = yaml.Unmarshal(schemaRaw, &schema)
|
||||
|
||||
parsedSchema, err := openapi_v2.NewSchema(schema, compiler.NewContext("schema", nil))
|
||||
if err != nil {
|
||||
glog.V(4).Infof("could not parse crd schema:%v", err)
|
||||
return
|
||||
}
|
||||
|
||||
openApiGlobalState.crdList = append(openApiGlobalState.crdList, crdName)
|
||||
|
||||
openApiGlobalState.kindToDefinitionName[crdName] = crdName
|
||||
openApiGlobalState.definitions[crdName] = parsedSchema
|
||||
}
|
246
pkg/openapi/validation.go
Normal file
246
pkg/openapi/validation.go
Normal file
|
@ -0,0 +1,246 @@
|
|||
package openapi
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/nirmata/kyverno/data"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
|
||||
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
|
||||
"github.com/googleapis/gnostic/compiler"
|
||||
"k8s.io/kube-openapi/pkg/util/proto"
|
||||
"k8s.io/kube-openapi/pkg/util/proto/validation"
|
||||
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
var openApiGlobalState struct {
|
||||
mutex sync.RWMutex
|
||||
document *openapi_v2.Document
|
||||
definitions map[string]*openapi_v2.Schema
|
||||
kindToDefinitionName map[string]string
|
||||
crdList []string
|
||||
models proto.Models
|
||||
}
|
||||
|
||||
func init() {
|
||||
defaultDoc, err := getSchemaDocument()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
err = useOpenApiDocument(defaultDoc)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
|
||||
func ValidatePolicyMutation(policy v1.ClusterPolicy) error {
|
||||
openApiGlobalState.mutex.RLock()
|
||||
defer openApiGlobalState.mutex.RUnlock()
|
||||
|
||||
var kindToRules = make(map[string][]v1.Rule)
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
if rule.HasMutate() {
|
||||
rule.MatchResources = v1.MatchResources{
|
||||
UserInfo: v1.UserInfo{},
|
||||
ResourceDescription: v1.ResourceDescription{
|
||||
Kinds: rule.MatchResources.Kinds,
|
||||
},
|
||||
}
|
||||
rule.ExcludeResources = v1.ExcludeResources{}
|
||||
for _, kind := range rule.MatchResources.Kinds {
|
||||
kindToRules[kind] = append(kindToRules[kind], rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for kind, rules := range kindToRules {
|
||||
newPolicy := *policy.DeepCopy()
|
||||
newPolicy.Spec.Rules = rules
|
||||
resource, _ := generateEmptyResource(openApiGlobalState.definitions[openApiGlobalState.kindToDefinitionName[kind]]).(map[string]interface{})
|
||||
if resource == nil {
|
||||
glog.V(4).Infof("Cannot Validate policy: openApi definition now found for %v", kind)
|
||||
return nil
|
||||
}
|
||||
newResource := unstructured.Unstructured{Object: resource}
|
||||
newResource.SetKind(kind)
|
||||
|
||||
patchedResource, err := engine.ForceMutate(nil, *newPolicy.DeepCopy(), newResource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ValidateResource(*patchedResource.DeepCopy(), kind)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateResource(patchedResource unstructured.Unstructured, kind string) error {
|
||||
openApiGlobalState.mutex.RLock()
|
||||
defer openApiGlobalState.mutex.RUnlock()
|
||||
var err error
|
||||
|
||||
kind = openApiGlobalState.kindToDefinitionName[kind]
|
||||
schema := openApiGlobalState.models.LookupModel(kind)
|
||||
if schema == nil {
|
||||
schema, err = getSchemaFromDefinitions(kind)
|
||||
if err != nil || schema == nil {
|
||||
return fmt.Errorf("pre-validation: couldn't find model %s", kind)
|
||||
}
|
||||
delete(patchedResource.Object, "kind")
|
||||
}
|
||||
|
||||
if errs := validation.ValidateModel(patchedResource.UnstructuredContent(), schema, kind); len(errs) > 0 {
|
||||
var errorMessages []string
|
||||
for i := range errs {
|
||||
errorMessages = append(errorMessages, errs[i].Error())
|
||||
}
|
||||
|
||||
return fmt.Errorf(strings.Join(errorMessages, "\n\n"))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func GetDefinitionNameFromKind(kind string) string {
|
||||
openApiGlobalState.mutex.RLock()
|
||||
defer openApiGlobalState.mutex.RUnlock()
|
||||
return openApiGlobalState.kindToDefinitionName[kind]
|
||||
}
|
||||
|
||||
func useOpenApiDocument(customDoc *openapi_v2.Document) error {
|
||||
openApiGlobalState.mutex.Lock()
|
||||
defer openApiGlobalState.mutex.Unlock()
|
||||
|
||||
openApiGlobalState.document = customDoc
|
||||
|
||||
openApiGlobalState.definitions = make(map[string]*openapi_v2.Schema)
|
||||
openApiGlobalState.kindToDefinitionName = make(map[string]string)
|
||||
for _, definition := range openApiGlobalState.document.GetDefinitions().AdditionalProperties {
|
||||
openApiGlobalState.definitions[definition.GetName()] = definition.GetValue()
|
||||
path := strings.Split(definition.GetName(), ".")
|
||||
openApiGlobalState.kindToDefinitionName[path[len(path)-1]] = definition.GetName()
|
||||
}
|
||||
|
||||
var err error
|
||||
openApiGlobalState.models, err = proto.NewOpenAPIData(openApiGlobalState.document)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getSchemaDocument() (*openapi_v2.Document, error) {
|
||||
var spec yaml.MapSlice
|
||||
err := yaml.Unmarshal([]byte(data.SwaggerDoc), &spec)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return openapi_v2.NewDocument(spec, compiler.NewContext("$root", nil))
|
||||
}
|
||||
|
||||
// For crd, we do not store definition in document
|
||||
func getSchemaFromDefinitions(kind string) (proto.Schema, error) {
|
||||
path := proto.NewPath(kind)
|
||||
return (&proto.Definitions{}).ParseSchema(openApiGlobalState.definitions[kind], &path)
|
||||
}
|
||||
|
||||
func generateEmptyResource(kindSchema *openapi_v2.Schema) interface{} {
|
||||
|
||||
types := kindSchema.GetType().GetValue()
|
||||
|
||||
if kindSchema.GetXRef() != "" {
|
||||
return generateEmptyResource(openApiGlobalState.definitions[strings.TrimPrefix(kindSchema.GetXRef(), "#/definitions/")])
|
||||
}
|
||||
|
||||
if len(types) != 1 {
|
||||
if len(kindSchema.GetProperties().GetAdditionalProperties()) > 0 {
|
||||
types = []string{"object"}
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
switch types[0] {
|
||||
case "object":
|
||||
var props = make(map[string]interface{})
|
||||
properties := kindSchema.GetProperties().GetAdditionalProperties()
|
||||
if len(properties) == 0 {
|
||||
return props
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
var mutex sync.Mutex
|
||||
wg.Add(len(properties))
|
||||
for _, property := range properties {
|
||||
go func(property *openapi_v2.NamedSchema) {
|
||||
prop := generateEmptyResource(property.GetValue())
|
||||
mutex.Lock()
|
||||
props[property.GetName()] = prop
|
||||
mutex.Unlock()
|
||||
wg.Done()
|
||||
}(property)
|
||||
}
|
||||
wg.Wait()
|
||||
return props
|
||||
case "array":
|
||||
var array []interface{}
|
||||
for _, schema := range kindSchema.GetItems().GetSchema() {
|
||||
array = append(array, generateEmptyResource(schema))
|
||||
}
|
||||
return array
|
||||
case "string":
|
||||
if kindSchema.GetDefault() != nil {
|
||||
return string(kindSchema.GetDefault().Value.Value)
|
||||
}
|
||||
if kindSchema.GetExample() != nil {
|
||||
return string(kindSchema.GetExample().GetValue().Value)
|
||||
}
|
||||
return ""
|
||||
case "integer":
|
||||
if kindSchema.GetDefault() != nil {
|
||||
val, _ := strconv.Atoi(string(kindSchema.GetDefault().Value.Value))
|
||||
return int64(val)
|
||||
}
|
||||
if kindSchema.GetExample() != nil {
|
||||
val, _ := strconv.Atoi(string(kindSchema.GetExample().GetValue().Value))
|
||||
return int64(val)
|
||||
}
|
||||
return int64(0)
|
||||
case "number":
|
||||
if kindSchema.GetDefault() != nil {
|
||||
val, _ := strconv.Atoi(string(kindSchema.GetDefault().Value.Value))
|
||||
return int64(val)
|
||||
}
|
||||
if kindSchema.GetExample() != nil {
|
||||
val, _ := strconv.Atoi(string(kindSchema.GetExample().GetValue().Value))
|
||||
return int64(val)
|
||||
}
|
||||
return int64(0)
|
||||
case "boolean":
|
||||
if kindSchema.GetDefault() != nil {
|
||||
return string(kindSchema.GetDefault().Value.Value) == "true"
|
||||
}
|
||||
if kindSchema.GetExample() != nil {
|
||||
return string(kindSchema.GetExample().GetValue().Value) == "true"
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
65
pkg/openapi/validation_test.go
Normal file
65
pkg/openapi/validation_test.go
Normal file
|
@ -0,0 +1,65 @@
|
|||
package openapi
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
func Test_ValidateMutationPolicy(t *testing.T) {
|
||||
|
||||
tcs := []struct {
|
||||
description string
|
||||
policy []byte
|
||||
errMessage string
|
||||
}{
|
||||
{
|
||||
description: "Policy with mutating imagePullPolicy Overlay",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"set-image-pull-policy-2"},"spec":{"rules":[{"name":"set-image-pull-policy-2","match":{"resources":{"kinds":["Pod"]}},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":"Always"}]}}}}]}}`),
|
||||
},
|
||||
{
|
||||
description: "Policy with mutating imagePullPolicy Overlay, field does not exist",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"set-image-pull-policy-2"},"spec":{"rules":[{"name":"set-image-pull-policy-2","match":{"resources":{"kinds":["Pod"]}},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","nonExistantField":"Always"}]}}}}]}}`),
|
||||
errMessage: `ValidationError(io.k8s.api.core.v1.Pod.spec.containers[0]): unknown field "nonExistantField" in io.k8s.api.core.v1.Container`,
|
||||
},
|
||||
{
|
||||
description: "Policy with mutating imagePullPolicy Overlay, type of value is different (does not throw error since all numbers are also strings according to swagger)",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"set-image-pull-policy-2"},"spec":{"rules":[{"name":"set-image-pull-policy-2","match":{"resources":{"kinds":["Pod"]}},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":80}]}}}}]}}`),
|
||||
},
|
||||
{
|
||||
description: "Policy with patches",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"policy-endpoints"},"spec":{"rules":[{"name":"pEP","match":{"resources":{"kinds":["Endpoints"],"selector":{"matchLabels":{"label":"test"}}}},"mutate":{"patches":[{"path":"/subsets/0/ports/0/port","op":"replace","value":9663},{"path":"/metadata/labels/isMutated","op":"add","value":"true"}]}}]}}`),
|
||||
},
|
||||
{
|
||||
description: "Policy with patches, value converted from number to string",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"policy-endpoints"},"spec":{"rules":[{"name":"pEP","match":{"resources":{"kinds":["Endpoints"],"selector":{"matchLabels":{"label":"test"}}}},"mutate":{"patches":[{"path":"/subsets/0/ports/0/port","op":"replace","value":"9663"},{"path":"/metadata/labels/isMutated","op":"add","value":"true"}]}}]}}`),
|
||||
errMessage: `ValidationError(io.k8s.api.core.v1.Endpoints.subsets[0].ports[0].port): invalid type for io.k8s.api.core.v1.EndpointPort.port: got "string", expected "integer"`,
|
||||
},
|
||||
{
|
||||
description: "Policy where boolean is been converted to number",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"mutate-pod-disable-automoutingapicred"},"spec":{"rules":[{"name":"pod-disable-automoutingapicred","match":{"resources":{"kinds":["Pod"]}},"mutate":{"overlay":{"spec":{"(serviceAccountName)":"*","automountServiceAccountToken":80}}}}]}}`),
|
||||
errMessage: `ValidationError(io.k8s.api.core.v1.Pod.spec.automountServiceAccountToken): invalid type for io.k8s.api.core.v1.PodSpec.automountServiceAccountToken: got "integer", expected "boolean"`,
|
||||
},
|
||||
{
|
||||
description: "Dealing with nested variables",
|
||||
policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"add-ns-access-controls","annotations":{"policies.kyverno.io/category":"Workload Isolation","policies.kyverno.io/description":"Create roles and role bindings for a new namespace"}},"spec":{"background":false,"rules":[{"name":"add-sa-annotation","match":{"resources":{"kinds":["Namespace"]}},"mutate":{"overlay":{"metadata":{"annotations":{"nirmata.io/ns-creator":"{{serviceAccountName-{{something}}}}"}}}}},{"name":"generate-owner-role","match":{"resources":{"kinds":["Namespace"]}},"preconditions":[{"key":"{{request.userInfo.username}}","operator":"NotEqual","value":""},{"key":"{{serviceAccountName}}","operator":"NotEqual","value":""},{"key":"{{serviceAccountNamespace}}","operator":"NotEqual","value":""}],"generate":{"kind":"ClusterRole","name":"ns-owner-{{request.object.metadata.name{{something}}}}-{{request.userInfo.username}}","data":{"metadata":{"annotations":{"nirmata.io/ns-creator":"{{serviceAccountName}}"}},"rules":[{"apiGroups":[""],"resources":["namespaces"],"verbs":["delete"],"resourceNames":["{{request.object.metadata.name}}"]}]}}},{"name":"generate-owner-role-binding","match":{"resources":{"kinds":["Namespace"]}},"preconditions":[{"key":"{{request.userInfo.username}}","operator":"NotEqual","value":""},{"key":"{{serviceAccountName}}","operator":"NotEqual","value":""},{"key":"{{serviceAccountNamespace}}","operator":"NotEqual","value":""}],"generate":{"kind":"ClusterRoleBinding","name":"ns-owner-{{request.object.metadata.name}}-{{request.userInfo.username}}-binding","data":{"metadata":{"annotations":{"nirmata.io/ns-creator":"{{serviceAccountName}}"}},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"ns-owner-{{request.object.metadata.name}}-{{request.userInfo.username}}"},"subjects":[{"kind":"ServiceAccount","name":"{{serviceAccountName}}","namespace":"{{serviceAccountNamespace}}"}]}}},{"name":"generate-admin-role-binding","match":{"resources":{"kinds":["Namespace"]}},"preconditions":[{"key":"{{request.userInfo.username}}","operator":"NotEqual","value":""},{"key":"{{serviceAccountName}}","operator":"NotEqual","value":""},{"key":"{{serviceAccountNamespace}}","operator":"NotEqual","value":""}],"generate":{"kind":"RoleBinding","name":"ns-admin-{{request.object.metadata.name}}-{{request.userInfo.username}}-binding","namespace":"{{request.object.metadata.name}}","data":{"metadata":{"annotations":{"nirmata.io/ns-creator":"{{serviceAccountName}}"}},"roleRef":{"apiGroup":"rbac.authorization.k8s.io","kind":"ClusterRole","name":"admin"},"subjects":[{"kind":"ServiceAccount","name":"{{serviceAccountName}}","namespace":"{{serviceAccountNamespace}}"}]}}}]}}`),
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tcs {
|
||||
policy := v1.ClusterPolicy{}
|
||||
_ = json.Unmarshal(tc.policy, &policy)
|
||||
|
||||
var errMessage string
|
||||
err := ValidatePolicyMutation(policy)
|
||||
if err != nil {
|
||||
errMessage = err.Error()
|
||||
}
|
||||
|
||||
if errMessage != tc.errMessage {
|
||||
t.Errorf("\nTestcase [%v] failed:\nExpected Error: %v\nGot Error: %v", i+1, tc.errMessage, errMessage)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
|
@ -20,7 +20,7 @@ type Validation interface {
|
|||
// - Mutate
|
||||
// - Validation
|
||||
// - Generate
|
||||
func validateActions(idx int, rule kyverno.Rule, client *dclient.Client) error {
|
||||
func validateActions(idx int, rule kyverno.Rule, client *dclient.Client, mock bool) error {
|
||||
var checker Validation
|
||||
|
||||
// Mutate
|
||||
|
@ -41,9 +41,19 @@ func validateActions(idx int, rule kyverno.Rule, client *dclient.Client) error {
|
|||
|
||||
// Generate
|
||||
if rule.HasGenerate() {
|
||||
checker = generate.NewGenerateFactory(client, rule.Generation, log.Log)
|
||||
if path, err := checker.Validate(); err != nil {
|
||||
return fmt.Errorf("path: spec.rules[%d].generate.%s.: %v", idx, path, err)
|
||||
//TODO: this check is there to support offline validations
|
||||
// generate uses selfSubjectReviews to verify actions
|
||||
// this need to modified to use different implementation for online and offline mode
|
||||
if mock {
|
||||
checker = generate.NewFakeGenerate(rule.Generation)
|
||||
if path, err := checker.Validate(); err != nil {
|
||||
return fmt.Errorf("path: spec.rules[%d].generate.%s.: %v", idx, path, err)
|
||||
}
|
||||
} else {
|
||||
checker = generate.NewGenerateFactory(client, rule.Generation, log.Log)
|
||||
if path, err := checker.Validate(); err != nil {
|
||||
return fmt.Errorf("path: spec.rules[%d].generate.%s.: %v", idx, path, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -19,46 +19,14 @@ import (
|
|||
|
||||
// applyPolicy applies policy on a resource
|
||||
//TODO: generation rules
|
||||
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface, log logr.Logger) (responses []response.EngineResponse) {
|
||||
logger := log.WithValues("kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger) (responses []response.EngineResponse) {
|
||||
startTime := time.Now()
|
||||
var policyStats []PolicyStat
|
||||
|
||||
logger.Info("start applying policy", "startTime", startTime)
|
||||
defer func() {
|
||||
logger.Info("finisnhed applying policy", "processingTime", time.Since(startTime))
|
||||
}()
|
||||
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||
ps := PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.MutationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
rs := RuleStatinfo{}
|
||||
rs.RuleName = rule.Name
|
||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||
if rule.Success {
|
||||
rs.RuleAppliedCount++
|
||||
} else {
|
||||
rs.RulesFailedCount++
|
||||
}
|
||||
if rule.Patches != nil {
|
||||
rs.MutationCount++
|
||||
}
|
||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||
}
|
||||
policyStats = append(policyStats, ps)
|
||||
}
|
||||
// send stats for aggregation
|
||||
sendStat := func(blocked bool) {
|
||||
for _, stat := range policyStats {
|
||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||
//SEND
|
||||
policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
var engineResponses []response.EngineResponse
|
||||
var engineResponse response.EngineResponse
|
||||
var err error
|
||||
|
@ -67,27 +35,20 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure
|
|||
ctx.AddResource(transformResource(resource))
|
||||
|
||||
//MUTATION
|
||||
engineResponse, err = mutation(policy, resource, policyStatus, ctx, logger)
|
||||
engineResponse, err = mutation(policy, resource, ctx, logger)
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to process mutation rule")
|
||||
}
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
//send stats
|
||||
sendStat(false)
|
||||
|
||||
//VALIDATION
|
||||
engineResponse = engine.Validate(engine.PolicyContext{Policy: policy, Context: ctx, NewResource: resource})
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
// gather stats
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
//send stats
|
||||
sendStat(false)
|
||||
|
||||
//TODO: GENERATION
|
||||
return engineResponses
|
||||
}
|
||||
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface, ctx context.EvalInterface, log logr.Logger) (response.EngineResponse, error) {
|
||||
func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, ctx context.EvalInterface, log logr.Logger) (response.EngineResponse, error) {
|
||||
|
||||
engineResponse := engine.Mutate(engine.PolicyContext{Policy: policy, NewResource: resource, Context: ctx})
|
||||
if !engineResponse.IsSuccesful() {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
@ -66,8 +65,6 @@ type PolicyController struct {
|
|||
rm resourceManager
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// receives stats and aggregates details
|
||||
statusAggregator *PolicyStatusAggregator
|
||||
// store to hold policy meta data for faster lookup
|
||||
pMetaStore policystore.UpdateInterface
|
||||
// policy violation generator
|
||||
|
@ -146,10 +143,6 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
|
|||
//TODO: pass the time in seconds instead of converting it internally
|
||||
pc.rm = NewResourceManager(30)
|
||||
|
||||
// aggregator
|
||||
// pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient, pInformer)
|
||||
pc.statusAggregator = NewPolicyStatAggregator(pc.log, kyvernoClient)
|
||||
|
||||
return &pc, nil
|
||||
}
|
||||
|
||||
|
@ -270,9 +263,6 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
|||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(pc.worker, time.Second, stopCh)
|
||||
}
|
||||
// policy status aggregator
|
||||
//TODO: workers required for aggergation
|
||||
pc.statusAggregator.Run(1, stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
|
@ -336,8 +326,6 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
if err := pc.deleteNamespacedPolicyViolations(key); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the recorded stats for the policy
|
||||
pc.statusAggregator.RemovePolicyStats(key)
|
||||
|
||||
// remove webhook configurations if there are no policies
|
||||
if err := pc.removeResourceWebhookConfiguration(); err != nil {
|
||||
|
@ -352,23 +340,12 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
|
||||
pc.resourceWebhookWatcher.RegisterResourceWebhook()
|
||||
|
||||
// cluster policy violations
|
||||
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// namespaced policy violation
|
||||
nspvList, err := pc.getNamespacedPolicyViolationForPolicy(policy.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// process policies on existing resources
|
||||
engineResponses := pc.processExistingResources(*policy)
|
||||
// report errors
|
||||
pc.cleanupAndReport(engineResponses)
|
||||
// sync active
|
||||
return pc.syncStatusOnly(policy, cpvList, nspvList)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) deleteClusterPolicyViolations(policy string) error {
|
||||
|
@ -397,39 +374,6 @@ func (pc *PolicyController) deleteNamespacedPolicyViolations(policy string) erro
|
|||
return nil
|
||||
}
|
||||
|
||||
//syncStatusOnly updates the policy status subresource
|
||||
func (pc *PolicyController) syncStatusOnly(p *kyverno.ClusterPolicy, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.PolicyViolation) error {
|
||||
newStatus := pc.calculateStatus(p.Name, pvList, nspvList)
|
||||
if reflect.DeepEqual(newStatus, p.Status) {
|
||||
// no update to status
|
||||
return nil
|
||||
}
|
||||
// update status
|
||||
newPolicy := p
|
||||
newPolicy.Status = newStatus
|
||||
_, err := pc.kyvernoClient.KyvernoV1().ClusterPolicies().UpdateStatus(newPolicy)
|
||||
return err
|
||||
}
|
||||
|
||||
func (pc *PolicyController) calculateStatus(policyName string, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.PolicyViolation) kyverno.PolicyStatus {
|
||||
violationCount := len(pvList) + len(nspvList)
|
||||
status := kyverno.PolicyStatus{
|
||||
ViolationCount: violationCount,
|
||||
}
|
||||
// get stats
|
||||
stats := pc.statusAggregator.GetPolicyStats(policyName)
|
||||
if !reflect.DeepEqual(stats, (PolicyStatInfo{})) {
|
||||
status.RulesAppliedCount = stats.RulesAppliedCount
|
||||
status.ResourcesBlockedCount = stats.ResourceBlocked
|
||||
status.AvgExecutionTimeMutation = stats.MutationExecutionTime.String()
|
||||
status.AvgExecutionTimeValidation = stats.ValidationExecutionTime.String()
|
||||
status.AvgExecutionTimeGeneration = stats.GenerationExecutionTime.String()
|
||||
// update rule stats
|
||||
status.Rules = convertRules(stats.Rules)
|
||||
}
|
||||
return status
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getNamespacedPolicyViolationForPolicy(policy string) ([]*kyverno.PolicyViolation, error) {
|
||||
policySelector, err := buildPolicyLabel(policy)
|
||||
if err != nil {
|
||||
|
@ -465,19 +409,3 @@ func (r RealPVControl) DeleteClusterPolicyViolation(name string) error {
|
|||
func (r RealPVControl) DeleteNamespacedPolicyViolation(ns, name string) error {
|
||||
return r.Client.KyvernoV1().PolicyViolations(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// convertRules converts the internal rule stats to one used in policy.stats struct
|
||||
func convertRules(rules []RuleStatinfo) []kyverno.RuleStats {
|
||||
var stats []kyverno.RuleStats
|
||||
for _, r := range rules {
|
||||
stat := kyverno.RuleStats{
|
||||
Name: r.RuleName,
|
||||
ExecutionTime: r.ExecutionTime.String(),
|
||||
AppliedCount: r.RuleAppliedCount,
|
||||
ViolationCount: r.RulesFailedCount,
|
||||
MutationCount: r.MutationCount,
|
||||
}
|
||||
stats = append(stats, stat)
|
||||
}
|
||||
return stats
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/golang/glog"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
|
@ -39,8 +40,8 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
|
|||
}
|
||||
|
||||
// apply the policy on each
|
||||
logger.V(4).Info("apply policy on resource", "policyResourceVersion", policy.ResourceVersion, "resourceResourceVersion", resource.GetResourceVersion(), "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
|
||||
engineResponse := applyPolicy(policy, resource, pc.statusAggregator, logger)
|
||||
glog.V(4).Infof("apply policy %s with resource version %s on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
|
||||
engineResponse := applyPolicy(policy, resource, logger)
|
||||
// get engine response for mutation & validation independently
|
||||
engineResponses = append(engineResponses, engineResponse...)
|
||||
// post-processing, register the resource as processed
|
||||
|
|
|
@ -19,6 +19,10 @@ func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineRe
|
|||
pc.eventGen.Add(eventInfos...)
|
||||
// create policy violation
|
||||
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
|
||||
for i := range pvInfos {
|
||||
pvInfos[i].FromSync = true
|
||||
}
|
||||
|
||||
pc.pvGenerator.Add(pvInfos...)
|
||||
// cleanup existing violations if any
|
||||
// if there is any error in clean up, we dont re-queue the resource
|
||||
|
|
|
@ -1,217 +0,0 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
//PolicyStatusAggregator stores information abt aggregation
|
||||
type PolicyStatusAggregator struct {
|
||||
// time since we start aggregating the stats
|
||||
startTime time.Time
|
||||
// channel to receive stats
|
||||
ch chan PolicyStat
|
||||
//TODO: lock based on key, possibly sync.Map ?
|
||||
//sync RW for policyData
|
||||
mux sync.RWMutex
|
||||
// stores aggregated stats for policy
|
||||
policyData map[string]PolicyStatInfo
|
||||
// logging implementation
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
//NewPolicyStatAggregator returns a new policy status
|
||||
func NewPolicyStatAggregator(log logr.Logger, client *kyvernoclient.Clientset) *PolicyStatusAggregator {
|
||||
psa := PolicyStatusAggregator{
|
||||
startTime: time.Now(),
|
||||
ch: make(chan PolicyStat),
|
||||
policyData: map[string]PolicyStatInfo{},
|
||||
log: log,
|
||||
}
|
||||
return &psa
|
||||
}
|
||||
|
||||
//Run begins aggregator
|
||||
func (psa *PolicyStatusAggregator) Run(workers int, stopCh <-chan struct{}) {
|
||||
logger := psa.log
|
||||
defer utilruntime.HandleCrash()
|
||||
logger.Info("Started aggregator for policy status stats")
|
||||
defer func() {
|
||||
logger.Info("Shutting down aggregator for policy status stats")
|
||||
}()
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(psa.process, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (psa *PolicyStatusAggregator) process() {
|
||||
// As mutation and validation are handled separately
|
||||
// ideally we need to combine the execution time from both for a policy
|
||||
// but its tricky to detect here the type of rules policy contains
|
||||
// so we dont combine the results, but instead compute the execution time for
|
||||
// mutation & validation rules separately
|
||||
for r := range psa.ch {
|
||||
psa.log.V(4).Info("received policy stats", "stats", r)
|
||||
psa.aggregate(r)
|
||||
}
|
||||
}
|
||||
|
||||
func (psa *PolicyStatusAggregator) aggregate(ps PolicyStat) {
|
||||
logger := psa.log.WithValues("policy", ps.PolicyName)
|
||||
func() {
|
||||
logger.V(4).Info("write lock update policy")
|
||||
psa.mux.Lock()
|
||||
}()
|
||||
defer func() {
|
||||
logger.V(4).Info("write unlock update policy")
|
||||
psa.mux.Unlock()
|
||||
}()
|
||||
|
||||
if len(ps.Stats.Rules) == 0 {
|
||||
logger.V(4).Info("ignoring stats, as no rule was applied")
|
||||
return
|
||||
}
|
||||
|
||||
info, ok := psa.policyData[ps.PolicyName]
|
||||
if !ok {
|
||||
psa.policyData[ps.PolicyName] = ps.Stats
|
||||
logger.V(4).Info("added stats for policy")
|
||||
return
|
||||
}
|
||||
// aggregate policy information
|
||||
info.RulesAppliedCount = info.RulesAppliedCount + ps.Stats.RulesAppliedCount
|
||||
if ps.Stats.ResourceBlocked == 1 {
|
||||
info.ResourceBlocked++
|
||||
}
|
||||
var zeroDuration time.Duration
|
||||
if info.MutationExecutionTime != zeroDuration {
|
||||
info.MutationExecutionTime = (info.MutationExecutionTime + ps.Stats.MutationExecutionTime) / 2
|
||||
logger.V(4).Info("updated avg mutation time", "updatedTime", info.MutationExecutionTime)
|
||||
} else {
|
||||
info.MutationExecutionTime = ps.Stats.MutationExecutionTime
|
||||
}
|
||||
if info.ValidationExecutionTime != zeroDuration {
|
||||
info.ValidationExecutionTime = (info.ValidationExecutionTime + ps.Stats.ValidationExecutionTime) / 2
|
||||
logger.V(4).Info("updated avg validation time", "updatedTime", info.ValidationExecutionTime)
|
||||
} else {
|
||||
info.ValidationExecutionTime = ps.Stats.ValidationExecutionTime
|
||||
}
|
||||
if info.GenerationExecutionTime != zeroDuration {
|
||||
info.GenerationExecutionTime = (info.GenerationExecutionTime + ps.Stats.GenerationExecutionTime) / 2
|
||||
logger.V(4).Info("updated avg generation time", "updatedTime", info.GenerationExecutionTime)
|
||||
} else {
|
||||
info.GenerationExecutionTime = ps.Stats.GenerationExecutionTime
|
||||
}
|
||||
// aggregate rule details
|
||||
info.Rules = aggregateRules(info.Rules, ps.Stats.Rules)
|
||||
// update
|
||||
psa.policyData[ps.PolicyName] = info
|
||||
logger.V(4).Info("updated stats for policy")
|
||||
}
|
||||
|
||||
func aggregateRules(old []RuleStatinfo, update []RuleStatinfo) []RuleStatinfo {
|
||||
var zeroDuration time.Duration
|
||||
searchRule := func(list []RuleStatinfo, key string) *RuleStatinfo {
|
||||
for _, v := range list {
|
||||
if v.RuleName == key {
|
||||
return &v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
newRules := []RuleStatinfo{}
|
||||
// search for new rules in old rules and update it
|
||||
for _, updateR := range update {
|
||||
if updateR.ExecutionTime != zeroDuration {
|
||||
if rule := searchRule(old, updateR.RuleName); rule != nil {
|
||||
rule.ExecutionTime = (rule.ExecutionTime + updateR.ExecutionTime) / 2
|
||||
rule.RuleAppliedCount = rule.RuleAppliedCount + updateR.RuleAppliedCount
|
||||
rule.RulesFailedCount = rule.RulesFailedCount + updateR.RulesFailedCount
|
||||
rule.MutationCount = rule.MutationCount + updateR.MutationCount
|
||||
newRules = append(newRules, *rule)
|
||||
} else {
|
||||
newRules = append(newRules, updateR)
|
||||
}
|
||||
}
|
||||
}
|
||||
return newRules
|
||||
}
|
||||
|
||||
//GetPolicyStats returns the policy stats
|
||||
func (psa *PolicyStatusAggregator) GetPolicyStats(policyName string) PolicyStatInfo {
|
||||
logger := psa.log.WithValues("policy", policyName)
|
||||
func() {
|
||||
logger.V(4).Info("read lock update policy")
|
||||
psa.mux.RLock()
|
||||
}()
|
||||
defer func() {
|
||||
logger.V(4).Info("read unlock update policy")
|
||||
psa.mux.RUnlock()
|
||||
}()
|
||||
logger.V(4).Info("read stats for policy")
|
||||
return psa.policyData[policyName]
|
||||
}
|
||||
|
||||
//RemovePolicyStats rmves policy stats records
|
||||
func (psa *PolicyStatusAggregator) RemovePolicyStats(policyName string) {
|
||||
logger := psa.log.WithValues("policy", policyName)
|
||||
func() {
|
||||
logger.V(4).Info("write lock update policy")
|
||||
psa.mux.Lock()
|
||||
}()
|
||||
defer func() {
|
||||
logger.V(4).Info("write unlock update policy")
|
||||
psa.mux.Unlock()
|
||||
}()
|
||||
logger.V(4).Info("removing stats for policy")
|
||||
delete(psa.policyData, policyName)
|
||||
}
|
||||
|
||||
//PolicyStatusInterface provides methods to modify policyStatus
|
||||
type PolicyStatusInterface interface {
|
||||
SendStat(stat PolicyStat)
|
||||
// UpdateViolationCount(policyName string, pvList []*kyverno.PolicyViolation) error
|
||||
}
|
||||
|
||||
//PolicyStat stored stats for policy
|
||||
type PolicyStat struct {
|
||||
PolicyName string
|
||||
Stats PolicyStatInfo
|
||||
}
|
||||
|
||||
//PolicyStatInfo provides statistics for policy
|
||||
type PolicyStatInfo struct {
|
||||
MutationExecutionTime time.Duration
|
||||
ValidationExecutionTime time.Duration
|
||||
GenerationExecutionTime time.Duration
|
||||
RulesAppliedCount int
|
||||
ResourceBlocked int
|
||||
Rules []RuleStatinfo
|
||||
}
|
||||
|
||||
//RuleStatinfo provides statistics for rule
|
||||
type RuleStatinfo struct {
|
||||
RuleName string
|
||||
ExecutionTime time.Duration
|
||||
RuleAppliedCount int
|
||||
RulesFailedCount int
|
||||
MutationCount int
|
||||
}
|
||||
|
||||
//SendStat sends the stat information for aggregation
|
||||
func (psa *PolicyStatusAggregator) SendStat(stat PolicyStat) {
|
||||
psa.log.V(4).Info("sending policy stats", "stat", stat)
|
||||
// Send over channel
|
||||
psa.ch <- stat
|
||||
}
|
||||
|
||||
//GetPolicyStatusAggregator returns interface to send policy status stats
|
||||
func (pc *PolicyController) GetPolicyStatusAggregator() PolicyStatusInterface {
|
||||
return pc.statusAggregator
|
||||
}
|
|
@ -6,6 +6,8 @@ import (
|
|||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/openapi"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
|
@ -15,7 +17,7 @@ import (
|
|||
// Validate does some initial check to verify some conditions
|
||||
// - One operation per rule
|
||||
// - ResourceDescription mandatory checks
|
||||
func Validate(p kyverno.ClusterPolicy, client *dclient.Client) error {
|
||||
func Validate(p kyverno.ClusterPolicy, client *dclient.Client, mock bool) error {
|
||||
if path, err := validateUniqueRuleName(p); err != nil {
|
||||
return fmt.Errorf("path: spec.%s: %v", path, err)
|
||||
}
|
||||
|
@ -52,7 +54,7 @@ func Validate(p kyverno.ClusterPolicy, client *dclient.Client) error {
|
|||
// - Mutate
|
||||
// - Validate
|
||||
// - Generate
|
||||
if err := validateActions(i, rule, client); err != nil {
|
||||
if err := validateActions(i, rule, client, mock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -66,6 +68,10 @@ func Validate(p kyverno.ClusterPolicy, client *dclient.Client) error {
|
|||
}
|
||||
}
|
||||
|
||||
if err := openapi.ValidatePolicyMutation(p); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -373,7 +373,7 @@ func Test_Validate_Policy(t *testing.T) {
|
|||
err := json.Unmarshal(rawPolicy, &policy)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = Validate(policy, nil)
|
||||
err = Validate(policy, nil, true)
|
||||
assert.NilError(t, err)
|
||||
}
|
||||
|
||||
|
@ -519,7 +519,7 @@ func Test_Validate_ErrorFormat(t *testing.T) {
|
|||
err := json.Unmarshal(rawPolicy, &policy)
|
||||
assert.NilError(t, err)
|
||||
|
||||
err = Validate(policy, nil)
|
||||
err = Validate(policy, nil, true)
|
||||
assert.Assert(t, err != nil)
|
||||
}
|
||||
|
||||
|
|
31
pkg/policystatus/keyToMutex.go
Normal file
31
pkg/policystatus/keyToMutex.go
Normal file
|
@ -0,0 +1,31 @@
|
|||
package policystatus
|
||||
|
||||
import "sync"
|
||||
|
||||
// keyToMutex allows status to be updated
|
||||
//for different policies at the same time
|
||||
//while ensuring the status for same policies
|
||||
//are updated one at a time.
|
||||
type keyToMutex struct {
|
||||
mu sync.RWMutex
|
||||
keyMu map[string]*sync.RWMutex
|
||||
}
|
||||
|
||||
func newKeyToMutex() *keyToMutex {
|
||||
return &keyToMutex{
|
||||
mu: sync.RWMutex{},
|
||||
keyMu: make(map[string]*sync.RWMutex),
|
||||
}
|
||||
}
|
||||
|
||||
func (k *keyToMutex) Get(key string) *sync.RWMutex {
|
||||
k.mu.Lock()
|
||||
defer k.mu.Unlock()
|
||||
mutex := k.keyMu[key]
|
||||
if mutex == nil {
|
||||
mutex = &sync.RWMutex{}
|
||||
k.keyMu[key] = mutex
|
||||
}
|
||||
|
||||
return mutex
|
||||
}
|
146
pkg/policystatus/main.go
Normal file
146
pkg/policystatus/main.go
Normal file
|
@ -0,0 +1,146 @@
|
|||
package policystatus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
// Policy status implementation works in the following way,
|
||||
//Currently policy status maintains a cache of the status of
|
||||
//each policy.
|
||||
//Every x unit of time the status of policy is updated using
|
||||
//the data from the cache.
|
||||
//The sync exposes a listener which accepts a statusUpdater
|
||||
//interface which dictates how the status should be updated.
|
||||
//The status is updated by a worker that receives the interface
|
||||
//on a channel.
|
||||
//The worker then updates the current status using the methods
|
||||
//exposed by the interface.
|
||||
//Current implementation is designed to be threadsafe with optimised
|
||||
//locking for each policy.
|
||||
|
||||
// statusUpdater defines a type to have a method which
|
||||
//updates the given status
|
||||
type statusUpdater interface {
|
||||
PolicyName() string
|
||||
UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus
|
||||
}
|
||||
|
||||
type policyStore interface {
|
||||
Get(policyName string) (*v1.ClusterPolicy, error)
|
||||
}
|
||||
|
||||
type Listener chan statusUpdater
|
||||
|
||||
func (l Listener) Send(s statusUpdater) {
|
||||
l <- s
|
||||
}
|
||||
|
||||
// Sync is the object which is used to initialize
|
||||
//the policyStatus sync, can be considered the parent object
|
||||
//since it contains access to all the persistant data present
|
||||
//in this package.
|
||||
type Sync struct {
|
||||
cache *cache
|
||||
Listener Listener
|
||||
client *versioned.Clientset
|
||||
policyStore policyStore
|
||||
}
|
||||
|
||||
type cache struct {
|
||||
dataMu sync.RWMutex
|
||||
data map[string]v1.PolicyStatus
|
||||
keyToMutex *keyToMutex
|
||||
}
|
||||
|
||||
func NewSync(c *versioned.Clientset, p policyStore) *Sync {
|
||||
return &Sync{
|
||||
cache: &cache{
|
||||
dataMu: sync.RWMutex{},
|
||||
data: make(map[string]v1.PolicyStatus),
|
||||
keyToMutex: newKeyToMutex(),
|
||||
},
|
||||
client: c,
|
||||
policyStore: p,
|
||||
Listener: make(chan statusUpdater, 20),
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Sync) Run(workers int, stopCh <-chan struct{}) {
|
||||
for i := 0; i < workers; i++ {
|
||||
go s.updateStatusCache(stopCh)
|
||||
}
|
||||
|
||||
wait.Until(s.updatePolicyStatus, 2*time.Second, stopCh)
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// updateStatusCache is a worker which updates the current status
|
||||
//using the statusUpdater interface
|
||||
func (s *Sync) updateStatusCache(stopCh <-chan struct{}) {
|
||||
for {
|
||||
select {
|
||||
case statusUpdater := <-s.Listener:
|
||||
s.cache.keyToMutex.Get(statusUpdater.PolicyName()).Lock()
|
||||
|
||||
s.cache.dataMu.RLock()
|
||||
status, exist := s.cache.data[statusUpdater.PolicyName()]
|
||||
s.cache.dataMu.RUnlock()
|
||||
if !exist {
|
||||
policy, _ := s.policyStore.Get(statusUpdater.PolicyName())
|
||||
if policy != nil {
|
||||
status = policy.Status
|
||||
}
|
||||
}
|
||||
|
||||
updatedStatus := statusUpdater.UpdateStatus(status)
|
||||
|
||||
s.cache.dataMu.Lock()
|
||||
s.cache.data[statusUpdater.PolicyName()] = updatedStatus
|
||||
s.cache.dataMu.Unlock()
|
||||
|
||||
s.cache.keyToMutex.Get(statusUpdater.PolicyName()).Unlock()
|
||||
oldStatus, _ := json.Marshal(status)
|
||||
newStatus, _ := json.Marshal(updatedStatus)
|
||||
|
||||
glog.V(4).Infof("\nupdated status of policy - %v\noldStatus:\n%v\nnewStatus:\n%v\n", statusUpdater.PolicyName(), string(oldStatus), string(newStatus))
|
||||
case <-stopCh:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updatePolicyStatus updates the status in the policy resource definition
|
||||
//from the status cache, syncing them
|
||||
func (s *Sync) updatePolicyStatus() {
|
||||
s.cache.dataMu.Lock()
|
||||
var nameToStatus = make(map[string]v1.PolicyStatus, len(s.cache.data))
|
||||
for k, v := range s.cache.data {
|
||||
nameToStatus[k] = v
|
||||
}
|
||||
s.cache.dataMu.Unlock()
|
||||
|
||||
for policyName, status := range nameToStatus {
|
||||
policy, err := s.policyStore.Get(policyName)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
policy.Status = status
|
||||
_, err = s.client.KyvernoV1().ClusterPolicies().UpdateStatus(policy)
|
||||
if err != nil {
|
||||
s.cache.dataMu.Lock()
|
||||
delete(s.cache.data, policyName)
|
||||
s.cache.dataMu.Unlock()
|
||||
glog.V(4).Info(err)
|
||||
}
|
||||
}
|
||||
}
|
50
pkg/policystatus/status_test.go
Normal file
50
pkg/policystatus/status_test.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package policystatus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
type dummyStore struct {
|
||||
}
|
||||
|
||||
func (d dummyStore) Get(policyName string) (*v1.ClusterPolicy, error) {
|
||||
return &v1.ClusterPolicy{}, nil
|
||||
}
|
||||
|
||||
type dummyStatusUpdater struct {
|
||||
}
|
||||
|
||||
func (d dummyStatusUpdater) UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus {
|
||||
status.RulesAppliedCount++
|
||||
return status
|
||||
}
|
||||
|
||||
func (d dummyStatusUpdater) PolicyName() string {
|
||||
return "policy1"
|
||||
}
|
||||
|
||||
func TestKeyToMutex(t *testing.T) {
|
||||
expectedCache := `{"policy1":{"averageExecutionTime":"","rulesAppliedCount":100}}`
|
||||
|
||||
stopCh := make(chan struct{})
|
||||
s := NewSync(nil, dummyStore{})
|
||||
for i := 0; i < 100; i++ {
|
||||
go s.updateStatusCache(stopCh)
|
||||
}
|
||||
|
||||
for i := 0; i < 100; i++ {
|
||||
go s.Listener.Send(dummyStatusUpdater{})
|
||||
}
|
||||
|
||||
<-time.After(time.Second * 3)
|
||||
stopCh <- struct{}{}
|
||||
|
||||
cacheRaw, _ := json.Marshal(s.cache.data)
|
||||
if string(cacheRaw) != expectedCache {
|
||||
t.Errorf("\nTestcase Failed\nGot:\n%v\nExpected:\n%v\n", string(cacheRaw), expectedCache)
|
||||
}
|
||||
}
|
|
@ -100,6 +100,10 @@ func (ps *PolicyStore) ListAll() ([]kyverno.ClusterPolicy, error) {
|
|||
return policies, nil
|
||||
}
|
||||
|
||||
func (ps *PolicyStore) Get(policyName string) (*kyverno.ClusterPolicy, error) {
|
||||
return ps.pLister.Get(policyName)
|
||||
}
|
||||
|
||||
//UnRegister Remove policy information
|
||||
func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
|
||||
ps.mu.Lock()
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -22,17 +23,21 @@ type clusterPV struct {
|
|||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
// logger
|
||||
log logr.Logger
|
||||
// update policy stats with violationCount
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
func newClusterPV(log logr.Logger, dclient *client.Client,
|
||||
cpvLister kyvernolister.ClusterPolicyViolationLister,
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface,
|
||||
policyStatus policystatus.Listener,
|
||||
) *clusterPV {
|
||||
cpv := clusterPV{
|
||||
dclient: dclient,
|
||||
cpvLister: cpvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
log: log,
|
||||
dclient: dclient,
|
||||
cpvLister: cpvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
log: log,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &cpv
|
||||
}
|
||||
|
@ -98,6 +103,11 @@ func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
|
|||
logger.Error(err, "failed to create cluster policy violation")
|
||||
return err
|
||||
}
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
|
||||
logger.Info("cluster policy violation created")
|
||||
return nil
|
||||
}
|
||||
|
@ -121,5 +131,8 @@ func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) err
|
|||
}
|
||||
logger.Info("cluster policy violation created")
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
cpv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
|
||||
backoff "github.com/cenkalti/backoff"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -70,3 +71,27 @@ func converLabelToSelector(labelMap map[string]string) (labels.Selector, error)
|
|||
|
||||
return policyViolationSelector, nil
|
||||
}
|
||||
|
||||
type violationCount struct {
|
||||
policyName string
|
||||
violatedRules []v1.ViolatedRule
|
||||
}
|
||||
|
||||
func (vc violationCount) PolicyName() string {
|
||||
return vc.policyName
|
||||
}
|
||||
|
||||
func (vc violationCount) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
|
||||
var ruleNameToViolations = make(map[string]int)
|
||||
for _, rule := range vc.violatedRules {
|
||||
ruleNameToViolations[rule.Name]++
|
||||
}
|
||||
|
||||
for i := range status.Rules {
|
||||
status.ViolationCount += ruleNameToViolations[status.Rules[i].Name]
|
||||
status.Rules[i].ViolationCount += ruleNameToViolations[status.Rules[i].Name]
|
||||
}
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
@ -37,10 +38,11 @@ type Generator struct {
|
|||
// returns true if the cluster policy store has been synced at least once
|
||||
pvSynced cache.InformerSynced
|
||||
// returns true if the namespaced cluster policy store has been synced at at least once
|
||||
nspvSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
log logr.Logger
|
||||
log logr.Logger
|
||||
nspvSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
//NewDataStore returns an instance of data store
|
||||
|
@ -80,6 +82,7 @@ type Info struct {
|
|||
PolicyName string
|
||||
Resource unstructured.Unstructured
|
||||
Rules []kyverno.ViolatedRule
|
||||
FromSync bool
|
||||
}
|
||||
|
||||
func (i Info) toKey() string {
|
||||
|
@ -105,17 +108,19 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
|
|||
dclient *dclient.Client,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
nspvInformer kyvernoinformer.PolicyViolationInformer,
|
||||
policyStatus policystatus.Listener,
|
||||
log logr.Logger) *Generator {
|
||||
gen := Generator{
|
||||
kyvernoInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
cpvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
nspvLister: nspvInformer.Lister(),
|
||||
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
log: log,
|
||||
kyvernoInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
cpvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
nspvLister: nspvInformer.Lister(),
|
||||
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
log: log,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &gen
|
||||
}
|
||||
|
@ -222,15 +227,21 @@ func (gen *Generator) syncHandler(info Info) error {
|
|||
builder := newPvBuilder()
|
||||
if info.Resource.GetNamespace() == "" {
|
||||
// cluster scope resource generate a clusterpolicy violation
|
||||
handler = newClusterPV(gen.log.WithName("ClusterPV"), gen.dclient, gen.cpvLister, gen.kyvernoInterface)
|
||||
handler = newClusterPV(gen.log.WithName("ClusterPV"), gen.dclient, gen.cpvLister, gen.kyvernoInterface, gen.policyStatusListener)
|
||||
} else {
|
||||
// namespaced resources generated a namespaced policy violation in the namespace of the resource
|
||||
handler = newNamespacedPV(gen.log.WithName("NamespacedPV"), gen.dclient, gen.nspvLister, gen.kyvernoInterface)
|
||||
handler = newNamespacedPV(gen.log.WithName("NamespacedPV"), gen.dclient, gen.nspvLister, gen.kyvernoInterface, gen.policyStatusListener)
|
||||
}
|
||||
|
||||
failure := false
|
||||
pv := builder.generate(info)
|
||||
|
||||
if info.FromSync {
|
||||
pv.Annotations = map[string]string{
|
||||
"fromSync": "true",
|
||||
}
|
||||
}
|
||||
|
||||
// Create Policy Violations
|
||||
logger.V(4).Info("creating policy violation", "key", info.toKey())
|
||||
if err := handler.create(pv); err != nil {
|
||||
|
|
|
@ -9,6 +9,7 @@ import (
|
|||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -22,17 +23,21 @@ type namespacedPV struct {
|
|||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
// logger
|
||||
log logr.Logger
|
||||
// update policy status with violationCount
|
||||
policyStatusListener policystatus.Listener
|
||||
}
|
||||
|
||||
func newNamespacedPV(log logr.Logger, dclient *client.Client,
|
||||
nspvLister kyvernolister.PolicyViolationLister,
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface,
|
||||
policyStatus policystatus.Listener,
|
||||
) *namespacedPV {
|
||||
nspv := namespacedPV{
|
||||
dclient: dclient,
|
||||
nspvLister: nspvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
log: log,
|
||||
dclient: dclient,
|
||||
nspvLister: nspvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
log: log,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &nspv
|
||||
}
|
||||
|
@ -97,6 +102,10 @@ func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
|
|||
logger.Error(err, "failed to create namespaced policy violation")
|
||||
return err
|
||||
}
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
logger.Info("namespaced policy violation created")
|
||||
return nil
|
||||
}
|
||||
|
@ -117,6 +126,10 @@ func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to update namespaced policy violation: %v", err)
|
||||
}
|
||||
|
||||
if newPv.Annotations["fromSync"] != "true" {
|
||||
nspv.policyStatusListener.Send(violationCount{policyName: newPv.Spec.Policy, violatedRules: newPv.Spec.ViolatedRules})
|
||||
}
|
||||
logger.Info("namespaced policy violation created")
|
||||
return nil
|
||||
}
|
||||
|
|
74
pkg/policyviolation/policyStatus_test.go
Normal file
74
pkg/policyviolation/policyStatus_test.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
func Test_Stats(t *testing.T) {
|
||||
testCase := struct {
|
||||
violationCountStats []struct {
|
||||
policyName string
|
||||
violatedRules []v1.ViolatedRule
|
||||
}
|
||||
expectedOutput []byte
|
||||
existingCache map[string]v1.PolicyStatus
|
||||
}{
|
||||
existingCache: map[string]v1.PolicyStatus{
|
||||
"policy1": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
"policy2": {
|
||||
Rules: []v1.RuleStats{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"","violationCount":1,"ruleStatus":[{"ruleName":"rule4","violationCount":1}]},"policy2":{"averageExecutionTime":"","violationCount":1,"ruleStatus":[{"ruleName":"rule4","violationCount":1}]}}`),
|
||||
violationCountStats: []struct {
|
||||
policyName string
|
||||
violatedRules []v1.ViolatedRule
|
||||
}{
|
||||
{
|
||||
policyName: "policy1",
|
||||
violatedRules: []v1.ViolatedRule{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
policyName: "policy2",
|
||||
violatedRules: []v1.ViolatedRule{
|
||||
{
|
||||
Name: "rule4",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := testCase.existingCache
|
||||
|
||||
for _, violationCountStat := range testCase.violationCountStats {
|
||||
receiver := &violationCount{
|
||||
policyName: violationCountStat.policyName,
|
||||
violatedRules: violationCountStat.violatedRules,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -1,7 +1,12 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
|
@ -60,6 +65,9 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
|
|||
if len(engineResponse.PolicyResponse.Rules) > 0 {
|
||||
// some generate rules do apply to the resource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
ws.statusListener.Send(generateStats{
|
||||
resp: engineResponse,
|
||||
})
|
||||
}
|
||||
}
|
||||
// Adds Generate Request to a channel(queue size 1000) to generators
|
||||
|
@ -101,3 +109,73 @@ func transform(userRequestInfo kyverno.RequestInfo, er response.EngineResponse)
|
|||
}
|
||||
return gr
|
||||
}
|
||||
|
||||
type generateStats struct {
|
||||
resp response.EngineResponse
|
||||
}
|
||||
|
||||
func (gs generateStats) PolicyName() string {
|
||||
return gs.resp.PolicyResponse.Policy
|
||||
}
|
||||
|
||||
func (gs generateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, gs.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range gs.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
ruleStat.AppliedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
||||
func updateAverageTime(newTime time.Duration, oldAverageTimeString string, averageOver int64) time.Duration {
|
||||
if averageOver == 0 {
|
||||
return newTime
|
||||
}
|
||||
oldAverageExecutionTime, _ := time.ParseDuration(oldAverageTimeString)
|
||||
numerator := (oldAverageExecutionTime.Nanoseconds() * averageOver) + newTime.Nanoseconds()
|
||||
denominator := averageOver + 1
|
||||
newAverageTimeInNanoSeconds := numerator / denominator
|
||||
return time.Duration(newAverageTimeInNanoSeconds) * time.Nanosecond
|
||||
}
|
||||
|
|
|
@ -1,14 +1,20 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/nirmata/kyverno/pkg/openapi"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
engineutils "github.com/nirmata/kyverno/pkg/engine/utils"
|
||||
policyctr "github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
@ -20,40 +26,6 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
logger.V(4).Info("incoming request")
|
||||
|
||||
var patches [][]byte
|
||||
var policyStats []policyctr.PolicyStat
|
||||
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||
ps := policyctr.PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.MutationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
rs := policyctr.RuleStatinfo{}
|
||||
rs.RuleName = rule.Name
|
||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||
if rule.Success {
|
||||
rs.RuleAppliedCount++
|
||||
} else {
|
||||
rs.RulesFailedCount++
|
||||
}
|
||||
if rule.Patches != nil {
|
||||
rs.MutationCount++
|
||||
}
|
||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||
}
|
||||
policyStats = append(policyStats, ps)
|
||||
}
|
||||
// send stats for aggregation
|
||||
sendStat := func(blocked bool) {
|
||||
for _, stat := range policyStats {
|
||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||
//SEND
|
||||
ws.policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
|
||||
var engineResponses []response.EngineResponse
|
||||
|
||||
userRequestInfo := kyverno.RequestInfo{
|
||||
|
@ -91,12 +63,16 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
policyContext.Policy = policy
|
||||
engineResponse := engine.Mutate(policyContext)
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
// Gather policy application statistics
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
ws.statusListener.Send(mutateStats{resp: engineResponse})
|
||||
if !engineResponse.IsSuccesful() {
|
||||
logger.V(4).Info("failed to apply policy", "policy", policy.Name)
|
||||
continue
|
||||
}
|
||||
err := openapi.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetKind())
|
||||
if err != nil {
|
||||
glog.V(4).Infoln(err)
|
||||
continue
|
||||
}
|
||||
// gather patches
|
||||
patches = append(patches, engineResponse.GetPatches()...)
|
||||
logger.Info("mutation rules from policy applied succesfully", "policy", policy.Name)
|
||||
|
@ -124,8 +100,6 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
events := generateEvents(engineResponses, false, (request.Operation == v1beta1.Update), logger)
|
||||
ws.eventGen.Add(events...)
|
||||
|
||||
sendStat(false)
|
||||
|
||||
// debug info
|
||||
func() {
|
||||
if len(patches) != 0 {
|
||||
|
@ -141,3 +115,64 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou
|
|||
// patches holds all the successful patches, if no patch is created, it returns nil
|
||||
return engineutils.JoinPatches(patches)
|
||||
}
|
||||
|
||||
type mutateStats struct {
|
||||
resp response.EngineResponse
|
||||
}
|
||||
|
||||
func (ms mutateStats) PolicyName() string {
|
||||
return ms.resp.PolicyResponse.Policy
|
||||
}
|
||||
|
||||
func (ms mutateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, ms.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range ms.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
status.ResourcesMutatedCount++
|
||||
ruleStat.AppliedCount++
|
||||
ruleStat.ResourcesMutatedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
211
pkg/webhooks/policyStatus_test.go
Normal file
211
pkg/webhooks/policyStatus_test.go
Normal file
|
@ -0,0 +1,211 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
)
|
||||
|
||||
func Test_GenerateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
generateStats []response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule5","averageExecutionTime":"243ns","appliedCount":1},{"ruleName":"rule6","averageExecutionTime":"251ns","failedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule5","averageExecutionTime":"222ns","appliedCount":1},{"ruleName":"rule6","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
generateStats: []response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy1",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule5",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule6",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy2",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule5",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule6",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
|
||||
for _, generateStat := range testCase.generateStats {
|
||||
receiver := generateStats{
|
||||
resp: generateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MutateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
mutateStats []response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesMutatedCount":1,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"243ns","appliedCount":1,"resourcesMutatedCount":1},{"ruleName":"rule2","averageExecutionTime":"251ns","failedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesMutatedCount":1,"ruleStatus":[{"ruleName":"rule1","averageExecutionTime":"222ns","appliedCount":1,"resourcesMutatedCount":1},{"ruleName":"rule2","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
mutateStats: []response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy1",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule1",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy2",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule1",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule2",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
for _, mutateStat := range testCase.mutateStats {
|
||||
receiver := mutateStats{
|
||||
resp: mutateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ValidateStats(t *testing.T) {
|
||||
testCase := struct {
|
||||
validateStats []response.EngineResponse
|
||||
expectedOutput []byte
|
||||
}{
|
||||
expectedOutput: []byte(`{"policy1":{"averageExecutionTime":"494ns","rulesFailedCount":1,"rulesAppliedCount":1,"resourcesBlockedCount":1,"ruleStatus":[{"ruleName":"rule3","averageExecutionTime":"243ns","appliedCount":1},{"ruleName":"rule4","averageExecutionTime":"251ns","failedCount":1,"resourcesBlockedCount":1}]},"policy2":{"averageExecutionTime":"433ns","rulesFailedCount":1,"rulesAppliedCount":1,"ruleStatus":[{"ruleName":"rule3","averageExecutionTime":"222ns","appliedCount":1},{"ruleName":"rule4","averageExecutionTime":"211ns","failedCount":1}]}}`),
|
||||
validateStats: []response.EngineResponse{
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy1",
|
||||
ValidationFailureAction: "enforce",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule3",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 243,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule4",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 251,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
PolicyResponse: response.PolicyResponse{
|
||||
Policy: "policy2",
|
||||
Rules: []response.RuleResponse{
|
||||
{
|
||||
Name: "rule3",
|
||||
Success: true,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 222,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "rule4",
|
||||
Success: false,
|
||||
RuleStats: response.RuleStats{
|
||||
ProcessingTime: time.Nanosecond * 211,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
policyNameToStatus := map[string]v1.PolicyStatus{}
|
||||
for _, validateStat := range testCase.validateStats {
|
||||
receiver := validateStats{
|
||||
resp: validateStat,
|
||||
}
|
||||
policyNameToStatus[receiver.PolicyName()] = receiver.UpdateStatus(policyNameToStatus[receiver.PolicyName()])
|
||||
}
|
||||
|
||||
output, _ := json.Marshal(policyNameToStatus)
|
||||
if !reflect.DeepEqual(output, testCase.expectedOutput) {
|
||||
t.Errorf("\n\nTestcase has failed\nExpected:\n%v\nGot:\n%v\n\n", string(testCase.expectedOutput), string(output))
|
||||
}
|
||||
}
|
|
@ -7,6 +7,7 @@ import (
|
|||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
policyvalidate "github.com/nirmata/kyverno/pkg/policy"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -27,7 +28,7 @@ func (ws *WebhookServer) handlePolicyValidation(request *v1beta1.AdmissionReques
|
|||
Message: fmt.Sprintf("Failed to unmarshal policy admission request err %v", err),
|
||||
}}
|
||||
}
|
||||
if err := policyvalidate.Validate(*policy, ws.client); err != nil {
|
||||
if err := policyvalidate.Validate(*policy, ws.client, false); err != nil {
|
||||
admissionResp = &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
|
|
|
@ -18,7 +18,7 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystatus"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
tlsutils "github.com/nirmata/kyverno/pkg/tls"
|
||||
|
@ -55,7 +55,7 @@ type WebhookServer struct {
|
|||
// webhook registration client
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
statusListener policystatus.Listener
|
||||
// helpers to validate against current loaded configuration
|
||||
configHandler config.Interface
|
||||
// channel for cleanup notification
|
||||
|
@ -83,7 +83,7 @@ func NewWebhookServer(
|
|||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
eventGen event.Interface,
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
|
||||
policyStatus policy.PolicyStatusInterface,
|
||||
statusSync policystatus.Listener,
|
||||
configHandler config.Interface,
|
||||
pMetaStore policystore.LookupInterface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
|
@ -115,7 +115,7 @@ func NewWebhookServer(
|
|||
crbSynced: crbInformer.Informer().HasSynced,
|
||||
eventGen: eventGen,
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
policyStatus: policyStatus,
|
||||
statusListener: statusSync,
|
||||
configHandler: configHandler,
|
||||
cleanUp: cleanUp,
|
||||
lastReqTime: resourceWebhookWatcher.LastReqTime,
|
||||
|
|
|
@ -2,14 +2,15 @@ package webhooks
|
|||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine"
|
||||
"github.com/nirmata/kyverno/pkg/engine/context"
|
||||
"github.com/nirmata/kyverno/pkg/engine/response"
|
||||
policyctr "github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
)
|
||||
|
||||
|
@ -20,36 +21,6 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
logger := ws.log.WithValues("action", "validation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
|
||||
logger.V(4).Info("incoming request")
|
||||
|
||||
var policyStats []policyctr.PolicyStat
|
||||
// gather stats from the engine response
|
||||
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
|
||||
ps := policyctr.PolicyStat{}
|
||||
ps.PolicyName = policyName
|
||||
ps.Stats.ValidationExecutionTime = policyResponse.ProcessingTime
|
||||
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
|
||||
// capture rule level stats
|
||||
for _, rule := range policyResponse.Rules {
|
||||
rs := policyctr.RuleStatinfo{}
|
||||
rs.RuleName = rule.Name
|
||||
rs.ExecutionTime = rule.RuleStats.ProcessingTime
|
||||
if rule.Success {
|
||||
rs.RuleAppliedCount++
|
||||
} else {
|
||||
rs.RulesFailedCount++
|
||||
}
|
||||
ps.Stats.Rules = append(ps.Stats.Rules, rs)
|
||||
}
|
||||
policyStats = append(policyStats, ps)
|
||||
}
|
||||
// send stats for aggregation
|
||||
sendStat := func(blocked bool) {
|
||||
for _, stat := range policyStats {
|
||||
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
|
||||
//SEND
|
||||
ws.policyStatus.SendStat(stat)
|
||||
}
|
||||
}
|
||||
|
||||
// Get new and old resource
|
||||
newR, oldR, err := extractResources(patchedResource, request)
|
||||
if err != nil {
|
||||
|
@ -96,8 +67,9 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
continue
|
||||
}
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
// Gather policy application statistics
|
||||
gatherStat(policy.Name, engineResponse.PolicyResponse)
|
||||
ws.statusListener.Send(validateStats{
|
||||
resp: engineResponse,
|
||||
})
|
||||
if !engineResponse.IsSuccesful() {
|
||||
logger.V(4).Info("failed to apply policy", "policy", policy.Name)
|
||||
continue
|
||||
|
@ -121,9 +93,6 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
ws.eventGen.Add(events...)
|
||||
if blocked {
|
||||
logger.V(4).Info("resource blocked")
|
||||
sendStat(true)
|
||||
// EVENTS
|
||||
// - event on the Policy
|
||||
return false, getEnforceFailureErrorMsg(engineResponses)
|
||||
}
|
||||
|
||||
|
@ -131,7 +100,69 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
|
|||
// violations are created with resource on "audit"
|
||||
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
|
||||
ws.pvGenerator.Add(pvInfos...)
|
||||
sendStat(false)
|
||||
// report time end
|
||||
return true, ""
|
||||
}
|
||||
|
||||
type validateStats struct {
|
||||
resp response.EngineResponse
|
||||
}
|
||||
|
||||
func (vs validateStats) PolicyName() string {
|
||||
return vs.resp.PolicyResponse.Policy
|
||||
}
|
||||
|
||||
func (vs validateStats) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
|
||||
if reflect.DeepEqual(response.EngineResponse{}, vs.resp) {
|
||||
return status
|
||||
}
|
||||
|
||||
var nameToRule = make(map[string]v1.RuleStats)
|
||||
for _, rule := range status.Rules {
|
||||
nameToRule[rule.Name] = rule
|
||||
}
|
||||
|
||||
for _, rule := range vs.resp.PolicyResponse.Rules {
|
||||
ruleStat := nameToRule[rule.Name]
|
||||
ruleStat.Name = rule.Name
|
||||
|
||||
averageOver := int64(ruleStat.AppliedCount + ruleStat.FailedCount)
|
||||
ruleStat.ExecutionTime = updateAverageTime(
|
||||
rule.ProcessingTime,
|
||||
ruleStat.ExecutionTime,
|
||||
averageOver).String()
|
||||
|
||||
if rule.Success {
|
||||
status.RulesAppliedCount++
|
||||
ruleStat.AppliedCount++
|
||||
} else {
|
||||
status.RulesFailedCount++
|
||||
ruleStat.FailedCount++
|
||||
if vs.resp.PolicyResponse.ValidationFailureAction == "enforce" {
|
||||
status.ResourcesBlockedCount++
|
||||
ruleStat.ResourcesBlockedCount++
|
||||
}
|
||||
}
|
||||
|
||||
nameToRule[rule.Name] = ruleStat
|
||||
}
|
||||
|
||||
var policyAverageExecutionTime time.Duration
|
||||
var ruleStats = make([]v1.RuleStats, 0, len(nameToRule))
|
||||
for _, ruleStat := range nameToRule {
|
||||
executionTime, err := time.ParseDuration(ruleStat.ExecutionTime)
|
||||
if err == nil {
|
||||
policyAverageExecutionTime += executionTime
|
||||
}
|
||||
ruleStats = append(ruleStats, ruleStat)
|
||||
}
|
||||
|
||||
sort.Slice(ruleStats, func(i, j int) bool {
|
||||
return ruleStats[i].Name < ruleStats[j].Name
|
||||
})
|
||||
|
||||
status.AvgExecutionTime = policyAverageExecutionTime.String()
|
||||
status.Rules = ruleStats
|
||||
|
||||
return status
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue