1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 16:06:56 +00:00

Merge pull request #305 from nirmata/feature_redesign

Feature redesign
This commit is contained in:
Shivkumar Dudhani 2019-08-21 17:00:07 -07:00 committed by GitHub
commit 5672c4d67c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
114 changed files with 5554 additions and 3874 deletions

1
.gitignore vendored
View file

@ -1,7 +1,6 @@
certs
Gopkg.lock
.vscode
kyverno
gh-pages/public
_output
coverage.txt

View file

@ -54,7 +54,9 @@ spec:
type: string
name:
type: string
namespace:
namespaces:
type: array
items:
type: string
selector:
properties:
@ -92,7 +94,9 @@ spec:
type: string
name:
type: string
namespace:
namespaces:
type: array
items:
type: string
selector:
properties:

View file

@ -54,7 +54,9 @@ spec:
type: string
name:
type: string
namespace:
namespaces:
type: array
items:
type: string
selector:
properties:
@ -92,7 +94,9 @@ spec:
type: string
name:
type: string
namespace:
namespaces:
type: array
items:
type: string
selector:
properties:
@ -170,3 +174,53 @@ spec:
type: string
data:
AnyValue: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1alpha1
served: true
storage: true
scope: Cluster
names:
kind: PolicyViolation
plural: policyviolations
singular: policyviolation
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- policy
- resource
- rules
properties:
policy:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
rules:
type: array
items:
type: object
required:
- name
- type
- message
---

View file

@ -0,0 +1,285 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policies.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1alpha1
served: true
storage: true
scope: Cluster
names:
kind: Policy
plural: policies
singular: policy
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- rules
properties:
# default values to be handled by user
validationFailureAction:
type: string
enum:
- enforce # blocks the resorce api-reques if a rule fails. Default behavior
- audit # allows resource creationg and reports the failed validation rules as violations
rules:
type: array
items:
type: object
required:
- name
- match
properties:
name:
type: string
match:
type: object
required:
- resources
properties:
resources:
type: object
required:
- kinds
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespace:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
exclude:
type: object
required:
- resources
properties:
resources:
type: object
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespace:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
mutate:
type: object
properties:
overlay:
AnyValue: {}
patches:
type: array
items:
type: object
required:
- path
- op
properties:
path:
type: string
op:
type: string
enum:
- add
- replace
- remove
value:
AnyValue: {}
validate:
type: object
required:
- pattern
properties:
message:
type: string
pattern:
AnyValue: {}
generate:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
clone:
type: object
required:
- namespace
- name
properties:
namespace:
type: string
name:
type: string
data:
AnyValue: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1aplha1
served: true
storage: true
scope: Cluster
names:
kind: PolicyViolation
plural: policyviolations
singular: policyviolation
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- policyName
- resource
- rules
properties:
policyName:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
rules:
type: array
items:
type: object
required:
- name
- type
- status
- message
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kyverno
name: kyverno-svc
labels:
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kyverno-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
namespace: kyverno
name: kyverno
labels:
app: kyverno
spec:
replicas: 1
template:
metadata:
labels:
app: kyverno
spec:
serviceAccountName: kyverno-service-account
containers:
- name: kyverno
image: nirmata/kyverno:latest
args: ["--filterK8Resources","[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*]Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"]
ports:
- containerPort: 443
securityContext:
privileged: true

View file

@ -14,7 +14,8 @@ metadata:
spec:
rules:
- name: "Basic config generator for all namespaces"
resource:
match:
resources:
kinds:
- Namespace
selector:
@ -27,7 +28,8 @@ spec:
namespace: default
name: config-template
- name: "Basic config generator for all namespaces"
resource:
match:
resources:
kinds:
- Namespace
selector:
@ -59,7 +61,8 @@ metadata:
spec:
rules:
- name: "deny-all-traffic"
resource:
match:
resources:
kinds:
- Namespace
name: "*"

View file

@ -18,7 +18,8 @@ metadata :
spec :
rules:
- name: "add-init-secrets"
resource:
match:
resources:
kinds:
- Deployment
mutate:
@ -46,7 +47,8 @@ metadata :
spec :
rules:
- name: "Remove unwanted label"
resource:
match:
resources:
kinds:
- Secret
mutate:
@ -71,7 +73,8 @@ metadata :
spec :
rules:
- name: "Set hard memory limit to 2Gi"
resource:
match:
resources:
kinds:
- Pod
selector:
@ -103,7 +106,8 @@ metadata:
spec:
rules:
- name: "Add IP to subsets"
resource:
match:
resources:
kinds :
- Endpoints
mutate:
@ -128,7 +132,8 @@ metadata :
spec :
rules:
- name: "Set port"
resource:
match:
resources:
kinds :
- Endpoints
mutate:
@ -158,7 +163,8 @@ metadata :
spec :
rules:
- name: "Set port"
resource:
match:
resources:
kinds :
- Endpoints
mutate:

View file

@ -44,7 +44,8 @@ metadata :
spec :
rules:
- name: check-label
resource:
match:
resources:
# Kind specifies one or more resource types to match
kinds:
- Deployment
@ -79,7 +80,8 @@ metadata :
spec :
rules:
- name: check-memory_requests_link_in_yaml_relative
resource:
match:
resources:
# Kind specifies one or more resource types to match
kinds:
- Deployment

View file

@ -14,15 +14,29 @@ spec :
rules:
# Rules must have a unique name
- name: "check-pod-controller-labels"
# Each rule matches specific resource described by "resource" field.
resource:
kinds:
# Each rule matches specific resource described by "match" field.
match:
resources:
kinds: # Required, list of kinds
- Deployment
- StatefulSet
- DaemonSet
# A resource name is optional. Name supports wildcards * and ?
name: "mongo*" # Optional, a resource name is optional. Name supports wildcards * and ?
namespaces: # Optional, list of namespaces
- devtest2
- devtest1
selector: # Optional, a resource selector is optional. Selector values support wildcards * and ?
matchLabels:
app: mongodb
matchExpressions:
- {key: tier, operator: In, values: [database]}
# Resources that need to be excluded
exclude: # Optional, resources to be excluded from evaulation
resources:
kinds:
- Daemonsets
name: "*"
# A resoucre selector is optional. Selector values support wildcards * and ?
namespaces:
- devtest2
selector:
matchLabels:
app: mongodb
@ -35,5 +49,10 @@ spec :
Each rule can validate, mutate, or generate configurations of matching resources. A rule definition can contain only a single **mutate**, **validate**, or **generate** child node. These actions are applied to the resource in described order: mutation, validation and then generation.
**Resource description:**
* ```match``` is a required key that defines the parameters which identify the resources that need to matched
* ```exclude``` is an option key to exclude resources from the application of the rule
---
<small>*Read Next >> [Validate](/documentation/writing-policies-validate.md)*</small>

27
examples/cli/p1.yaml Normal file
View file

@ -0,0 +1,27 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-cpu-memory
spec:
rules:
- name: check-pod-resources
match:
resources:
kinds:
- Pod
validate:
message: "CPU and memory resource requests and limits are required"
pattern:
spec:
containers:
# 'name: *' selects all containers in the pod
- name: "*"
resources:
limits:
# '?' requires 1 alphanumeric character and '*' means that there can be 0 or more characters.
# Using them together e.g. '?*' requires at least one character.
memory: "?*"
cpu: "?*"
requests:
memory: "?*"
cpu: "?*"

32
examples/cli/pv1.yaml Normal file
View file

@ -0,0 +1,32 @@
apiVersion: kyverno.io/v1alpha1
kind: PolicyViolation
metadata:
name: pv1
spec:
policy: check-cpu-memory
resource:
kind: Pod
namespace: ""
name: pod1
rules:
- name: r1
type: Mutation
status: Failed
message: test mesaage for rule failure
---
apiVersion: kyverno.io/v1alpha1
kind: PolicyViolation
metadata:
name: pv2
spec:
policy: check-cpu-memory
resource:
kind: Pod
namespace: ""
name: pod1
rules:
- name: r1
type: Mutation
status: Failed
message: test mesaage for rule failure
---

View file

@ -12,6 +12,10 @@ spec:
selector:
matchLabels:
app : nginxlatest
exclude:
resources:
kinds:
- DaemonSet
mutate:
overlay:
spec:

28
examples/test/p1.yaml Normal file
View file

@ -0,0 +1,28 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-resources
spec:
validationFailureAction: "audit"
rules:
- name: check-pod-resources
match:
resources:
kinds:
- Pod
validate:
message: "CPU and memory resource requests and limits are required"
pattern:
spec:
containers:
# 'name: *' selects all containers in the pod
- name: "*"
resources:
requests:
# '?' requires 1 alphanumeric character and '*' means that there can be 0 or more characters.
# Using them together e.g. '?*' requires at least one character.
memory: "?*"
cpu: "?*"
limits:
memory: "?*"
cpu: "?*"

121
main.go
View file

@ -2,18 +2,21 @@ package main
import (
"flag"
"time"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/annotations"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
"github.com/nirmata/kyverno/pkg/config"
controller "github.com/nirmata/kyverno/pkg/controller"
client "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
gencontroller "github.com/nirmata/kyverno/pkg/gencontroller"
"github.com/nirmata/kyverno/pkg/sharedinformer"
"github.com/nirmata/kyverno/pkg/namespace"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/violation"
"github.com/nirmata/kyverno/pkg/webhookconfig"
"github.com/nirmata/kyverno/pkg/webhooks"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/sample-controller/pkg/signals"
)
@ -26,75 +29,127 @@ var (
webhookTimeout int
)
// TODO: tune resync time differently for each informer
const defaultReSyncTime = 10 * time.Second
func main() {
defer glog.Flush()
printVersionInfo()
// profile cpu and memory consuption
prof = enableProfiling(cpu, memory)
// CLIENT CONFIG
clientConfig, err := createClientConfig(kubeconfig)
if err != nil {
glog.Fatalf("Error building kubeconfig: %v\n", err)
}
// KYVENO CRD CLIENT
// access CRD resources
// - Policy
// - PolicyViolation
pclient, err := kyvernoclient.NewForConfig(clientConfig)
if err != nil {
glog.Fatalf("Error creating client: %v\n", err)
}
// DYNAMIC CLIENT
// - client for all registered resources
client, err := client.NewClient(clientConfig)
if err != nil {
glog.Fatalf("Error creating client: %v\n", err)
}
policyInformerFactory, err := sharedinformer.NewSharedInformerFactory(clientConfig)
// KUBERNETES CLIENT
kubeClient, err := utils.NewKubeClient(clientConfig)
if err != nil {
glog.Fatalf("Error creating policy sharedinformer: %v\n", err)
glog.Fatalf("Error creating kubernetes client: %v\n", err)
}
webhookRegistrationClient, err := webhooks.NewWebhookRegistrationClient(clientConfig, client, serverIP, int32(webhookTimeout))
// WERBHOOK REGISTRATION CLIENT
webhookRegistrationClient, err := webhookconfig.NewWebhookRegistrationClient(clientConfig, client, serverIP, int32(webhookTimeout))
if err != nil {
glog.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
}
if err = webhookRegistrationClient.Register(); err != nil {
glog.Fatalf("Failed registering Admission Webhooks: %v\n", err)
// KYVERNO CRD INFORMER
// watches CRD resources:
// - Policy
// - PolicyVolation
// - cache resync time: 10 seconds
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, 10*time.Second)
// KUBERNETES RESOURCES INFORMER
// watches namespace resource
// - cache resync time: 10 seconds
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
// EVENT GENERATOR
// - generate event with retry mechanism
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1alpha1().Policies())
// POLICY CONTROLLER
// - reconciliation policy and policy violation
// - process policy on existing resources
// - status aggregator: recieves stats when a policy is applied
// & updates the policy status
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().Policies(), pInformer.Kyverno().V1alpha1().PolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient)
if err != nil {
glog.Fatalf("error creating policy controller: %v\n", err)
}
kubeInformer := utils.NewKubeInformerFactory(clientConfig)
eventController := event.NewEventController(client, policyInformerFactory)
violationBuilder := violation.NewPolicyViolationBuilder(client, policyInformerFactory, eventController)
annotationsController := annotations.NewAnnotationControler(client)
policyController := controller.NewPolicyController(
client,
policyInformerFactory,
violationBuilder,
eventController,
filterK8Resources)
// POLICY VIOLATION CONTROLLER
// policy violation cleanup if the corresponding resource is deleted
// status: lastUpdatTime
pvc, err := policyviolation.NewPolicyViolationController(client, pclient, pInformer.Kyverno().V1alpha1().Policies(), pInformer.Kyverno().V1alpha1().PolicyViolations())
if err != nil {
glog.Fatalf("error creating policy violation controller: %v\n", err)
}
genControler := gencontroller.NewGenController(client, eventController, policyInformerFactory, violationBuilder, kubeInformer.Core().V1().Namespaces(), annotationsController)
// GENERATE CONTROLLER
// - watches for Namespace resource and generates resource based on the policy generate rule
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1alpha1().Policies(), pInformer.Kyverno().V1alpha1().PolicyViolations(), pc.GetPolicyStatusAggregator(), egen)
// CONFIGURE CERTIFICATES
tlsPair, err := initTLSPemPair(clientConfig, client)
if err != nil {
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
}
server, err := webhooks.NewWebhookServer(client, tlsPair, policyInformerFactory, eventController, violationBuilder, annotationsController, webhookRegistrationClient, filterK8Resources)
// WEBHOOK REGISTRATION
// - validationwebhookconfiguration (Policy)
// - mutatingwebhookconfiguration (All resources)
// webhook confgiuration is also generated dynamically in the policy controller
// based on the policy resources created
if err = webhookRegistrationClient.Register(); err != nil {
glog.Fatalf("Failed registering Admission Webhooks: %v\n", err)
}
// WEBHOOOK
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
// - reports the results based on the response from the policy engine:
// -- annotations on resources with update details on mutation JSON patches
// -- generate policy violation resource
// -- generate events on policy and resource
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1alpha1().Policies(), pInformer.Kyverno().V1alpha1().PolicyViolations(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), filterK8Resources)
if err != nil {
glog.Fatalf("Unable to create webhook server: %v\n", err)
}
stopCh := signals.SetupSignalHandler()
policyInformerFactory.Run(stopCh)
// Start the components
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
eventController.Run(stopCh)
genControler.Run(stopCh)
annotationsController.Run(stopCh)
if err = policyController.Run(stopCh); err != nil {
glog.Fatalf("Error running PolicyController: %v\n", err)
}
go pc.Run(1, stopCh)
go pvc.Run(1, stopCh)
go egen.Run(1, stopCh)
go nsc.Run(1, stopCh)
//TODO add WG for the go routines?
server.RunAsync()
<-stopCh
genControler.Stop()
eventController.Stop()
annotationsController.Stop()
policyController.Stop()
disableProfiling(prof)
server.Stop()
}

View file

@ -1,401 +0,0 @@
package annotations
import (
"encoding/json"
"reflect"
"github.com/golang/glog"
pinfo "github.com/nirmata/kyverno/pkg/info"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
//Policy information for annotations
type Policy struct {
Status string `json:"status"`
// Key Type/Name
MutationRules map[string]Rule `json:"mutationrules,omitempty"`
ValidationRules map[string]Rule `json:"validationrules,omitempty"`
GenerationRules map[string]Rule `json:"generationrules,omitempty"`
}
//Rule information for annotations
type Rule struct {
Status string `json:"status"`
Changes string `json:"changes,omitempty"` // TODO for mutation changes
Message string `json:"message,omitempty"`
}
func (p *Policy) getOverAllStatus() string {
// mutation
for _, v := range p.MutationRules {
if v.Status == "Failure" {
return "Failure"
}
}
// validation
for _, v := range p.ValidationRules {
if v.Status == "Failure" {
return "Failure"
}
}
// generation
for _, v := range p.GenerationRules {
if v.Status == "Failure" {
return "Failure"
}
}
return "Success"
}
func getRules(rules []*pinfo.RuleInfo, ruleType pinfo.RuleType) map[string]Rule {
if len(rules) == 0 {
return nil
}
annrules := make(map[string]Rule, 0)
// var annrules map[string]Rule
for _, r := range rules {
if r.RuleType != ruleType {
continue
}
rule := Rule{Status: getStatus(r.IsSuccessful())}
if !r.IsSuccessful() {
rule.Message = r.GetErrorString()
} else {
if ruleType == pinfo.Mutation {
// If ruleType is Mutation
// then for succesful mutation we store the json patch being applied in the annotation information
rule.Changes = r.Changes
}
}
annrules[r.Name] = rule
}
return annrules
}
func (p *Policy) updatePolicy(obj *Policy, ruleType pinfo.RuleType) bool {
updates := false
// Check Mutation rules
switch ruleType {
case pinfo.All:
if p.compareMutationRules(obj.MutationRules) {
updates = true
}
if p.compareValidationRules(obj.ValidationRules) {
updates = true
}
if p.compareGenerationRules(obj.GenerationRules) {
updates = true
}
case pinfo.Mutation:
if p.compareMutationRules(obj.MutationRules) {
updates = true
}
case pinfo.Validation:
if p.compareValidationRules(obj.ValidationRules) {
updates = true
}
case pinfo.Generation:
if p.compareGenerationRules(obj.GenerationRules) {
updates = true
}
if p.Status != obj.Status {
updates = true
}
}
// check if any rules failed
p.Status = p.getOverAllStatus()
// If there are any updates then the annotation can be updated, can skip
return updates
}
func (p *Policy) compareMutationRules(rules map[string]Rule) bool {
// check if the rules have changed
if !reflect.DeepEqual(p.MutationRules, rules) {
p.MutationRules = rules
return true
}
return false
}
func (p *Policy) compareValidationRules(rules map[string]Rule) bool {
// check if the rules have changed
if !reflect.DeepEqual(p.ValidationRules, rules) {
p.ValidationRules = rules
return true
}
return false
}
func (p *Policy) compareGenerationRules(rules map[string]Rule) bool {
// check if the rules have changed
if !reflect.DeepEqual(p.GenerationRules, rules) {
p.GenerationRules = rules
return true
}
return false
}
func newAnnotationForPolicy(pi *pinfo.PolicyInfo) *Policy {
return &Policy{Status: getStatus(pi.IsSuccessful()),
MutationRules: getRules(pi.Rules, pinfo.Mutation),
ValidationRules: getRules(pi.Rules, pinfo.Validation),
GenerationRules: getRules(pi.Rules, pinfo.Generation),
}
}
//AddPolicy will add policy annotation if not present or update if present
// modifies obj
// returns true, if there is any update -> caller need to update the obj
// returns false, if there is no change -> caller can skip the update
func AddPolicy(obj *unstructured.Unstructured, pi *pinfo.PolicyInfo, ruleType pinfo.RuleType) bool {
PolicyObj := newAnnotationForPolicy(pi)
// get annotation
ann := obj.GetAnnotations()
// check if policy already has annotation
cPolicy, ok := ann[BuildKey(pi.Name)]
if !ok {
PolicyByte, err := json.Marshal(PolicyObj)
if err != nil {
glog.Error(err)
return false
}
// insert policy information
ann[BuildKey(pi.Name)] = string(PolicyByte)
// set annotation back to unstr
obj.SetAnnotations(ann)
return true
}
cPolicyObj := Policy{}
err := json.Unmarshal([]byte(cPolicy), &cPolicyObj)
if err != nil {
return false
}
// update policy information inside the annotation
// 1> policy status
// 2> Mutation, Validation, Generation
if cPolicyObj.updatePolicy(PolicyObj, ruleType) {
cPolicyByte, err := json.Marshal(cPolicyObj)
if err != nil {
return false
}
// update policy information
ann[BuildKey(pi.Name)] = string(cPolicyByte)
// set annotation back to unstr
obj.SetAnnotations(ann)
return true
}
return false
}
//RemovePolicy to remove annotations
// return true -> if there was an entry and we deleted it
// return false -> if there is no entry, caller need not update
func RemovePolicy(obj *unstructured.Unstructured, policy string) bool {
// get annotations
ann := obj.GetAnnotations()
if ann == nil {
return false
}
if _, ok := ann[BuildKey(policy)]; !ok {
return false
}
delete(ann, BuildKey(policy))
// set annotation back to unstr
obj.SetAnnotations(ann)
return true
}
//ParseAnnotationsFromObject extracts annotations from the JSON obj
func ParseAnnotationsFromObject(bytes []byte) map[string]string {
var objectJSON map[string]interface{}
json.Unmarshal(bytes, &objectJSON)
meta, ok := objectJSON["metadata"].(map[string]interface{})
if !ok {
glog.Error("unable to parse")
return nil
}
ann, ok, err := unstructured.NestedStringMap(meta, "annotations")
if err != nil || !ok {
return nil
}
return ann
}
func PatchAnnotations(ann map[string]string, pi *pinfo.PolicyInfo, ruleType pinfo.RuleType) ([]byte, error) {
if ruleType != pinfo.All && !pi.ContainsRuleType(ruleType) {
// the rule was not proceesed in the current policy application
return nil, nil
}
// transform the PolicyInfo to anotation struct
policyObj := newAnnotationForPolicy(pi)
if ann == nil {
ann = make(map[string]string, 0)
policyByte, err := json.Marshal(policyObj)
if err != nil {
return nil, err
}
// create a json patch to add annotation object
ann[BuildKeyString(pi.Name)] = string(policyByte)
// patch adds the annotation map with the policy information
jsonPatch, err := createAddJSONPatchMap(ann)
return jsonPatch, err
}
// if the annotations map already exists then we need to update it by adding a patch to the field inside the annotation
cPolicy, ok := ann[BuildKey(pi.Name)]
if !ok {
// annotations does not contain the policy
policyByte, err := json.Marshal(policyObj)
if err != nil {
return nil, err
}
jsonPatch, err := createAddJSONPatch(BuildKey(pi.Name), string(policyByte))
return jsonPatch, err
}
// an annotaion exists for the policy, we need to update the information if anything has changed
cPolicyObj := Policy{}
err := json.Unmarshal([]byte(cPolicy), &cPolicyObj)
if err != nil {
// error while unmarshallign the content
return nil, err
}
// update policy information inside the annotation
// 1> policy status
// 2> rule (name, status,changes,type)
update := cPolicyObj.updatePolicy(policyObj, ruleType)
if !update {
// there is not update, so we dont
return nil, nil
}
policyByte, err := json.Marshal(cPolicyObj)
if err != nil {
return nil, err
}
jsonPatch, err := createAddJSONPatch(BuildKey(pi.Name), string(policyByte))
return jsonPatch, err
}
//AddPolicyJSONPatch generate JSON Patch to add policy informatino JSON patch
func AddPolicyJSONPatch(ann map[string]string, pi *pinfo.PolicyInfo, ruleType pinfo.RuleType) (map[string]string, []byte, error) {
if !pi.ContainsRuleType(ruleType) {
return nil, nil, nil
}
PolicyObj := newAnnotationForPolicy(pi)
if ann == nil {
ann = make(map[string]string, 0)
PolicyByte, err := json.Marshal(PolicyObj)
if err != nil {
return nil, nil, err
}
// create a json patch to add annotation object
ann[BuildKey(pi.Name)] = string(PolicyByte)
// create add JSON patch
jsonPatch, err := createAddJSONPatch(BuildKey(pi.Name), string(PolicyByte))
return ann, jsonPatch, err
}
// if the annotations map is present then we
cPolicy, ok := ann[BuildKey(pi.Name)]
if !ok {
PolicyByte, err := json.Marshal(PolicyObj)
if err != nil {
return nil, nil, err
}
// insert policy information
ann[BuildKey(pi.Name)] = string(PolicyByte)
// create add JSON patch
jsonPatch, err := createAddJSONPatch(BuildKey(pi.Name), string(PolicyByte))
return ann, jsonPatch, err
}
cPolicyObj := Policy{}
err := json.Unmarshal([]byte(cPolicy), &cPolicyObj)
// update policy information inside the annotation
// 1> policy status
// 2> rule (name, status,changes,type)
update := cPolicyObj.updatePolicy(PolicyObj, ruleType)
if !update {
return nil, nil, err
}
cPolicyByte, err := json.Marshal(cPolicyObj)
if err != nil {
return nil, nil, err
}
// update policy information
ann[BuildKey(pi.Name)] = string(cPolicyByte)
// create update JSON patch
jsonPatch, err := createReplaceJSONPatch(BuildKey(pi.Name), string(cPolicyByte))
return ann, jsonPatch, err
}
//RemovePolicyJSONPatch remove JSON patch
func RemovePolicyJSONPatch(ann map[string]string, policy string) (map[string]string, []byte, error) {
if ann == nil {
return nil, nil, nil
}
jsonPatch, err := createRemoveJSONPatchKey(policy)
return ann, jsonPatch, err
}
type patchMapValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value map[string]string `json:"value"`
}
type patchStringValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
func createRemoveJSONPatchMap() ([]byte, error) {
payload := []patchMapValue{{
Op: "remove",
Path: "/metadata/annotations",
}}
return json.Marshal(payload)
}
func createAddJSONPatchMap(ann map[string]string) ([]byte, error) {
payload := []patchMapValue{{
Op: "add",
Path: "/metadata/annotations",
Value: ann,
}}
return json.Marshal(payload)
}
func createAddJSONPatch(key, value string) ([]byte, error) {
payload := []patchStringValue{{
Op: "add",
Path: "/metadata/annotations/" + key,
Value: value,
}}
return json.Marshal(payload)
}
func createReplaceJSONPatch(key, value string) ([]byte, error) {
// if ann == nil {
// ann = make(map[string]string, 0)
// }
payload := []patchStringValue{{
Op: "replace",
Path: "/metadata/annotations/" + key,
Value: value,
}}
return json.Marshal(payload)
}
func createRemoveJSONPatchKey(key string) ([]byte, error) {
payload := []patchStringValue{{
Op: "remove",
Path: "/metadata/annotations/" + key,
}}
return json.Marshal(payload)
}

View file

@ -1,36 +0,0 @@
package annotations
import (
"encoding/json"
"testing"
pinfo "github.com/nirmata/kyverno/pkg/info"
)
func TestAddPatch(t *testing.T) {
// Create
objRaw := []byte(`{"kind":"Deployment","apiVersion":"apps/v1","metadata":{"name":"nginx-deployment","namespace":"default","creationTimestamp":null,"labels":{"app":"nginx"}},"spec":{"replicas":1,"selector":{"matchLabels":{"app":"nginx"}},"template":{"metadata":{"creationTimestamp":null,"labels":{"app":"nginx"}},"spec":{"containers":[{"name":"nginx","image":"nginx:latest","ports":[{"containerPort":80,"protocol":"TCP"}],"resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"},{"name":"ghost","image":"ghost:latest","resources":{},"terminationMessagePath":"/dev/termination-log","terminationMessagePolicy":"File","imagePullPolicy":"Always"}],"restartPolicy":"Always","terminationGracePeriodSeconds":30,"dnsPolicy":"ClusterFirst","securityContext":{},"schedulerName":"default-scheduler"}},"strategy":{"type":"RollingUpdate","rollingUpdate":{"maxUnavailable":"25%","maxSurge":"25%"}},"revisionHistoryLimit":10,"progressDeadlineSeconds":600},"status":{}}`)
piRaw := []byte(`{"Name":"set-image-pull-policy","RKind":"Deployment","RName":"nginx-deployment","RNamespace":"default","ValidationFailureAction":"","Rules":[{"Name":"nginx-deployment","Msgs":["Rule nginx-deployment: Overlay succesfully applied."],"RuleType":0}]}`)
ann := ParseAnnotationsFromObject(objRaw)
pi := pinfo.PolicyInfo{}
err := json.Unmarshal(piRaw, &pi)
if err != nil {
panic(err)
}
ann, _, err = AddPolicyJSONPatch(ann, &pi, pinfo.Mutation)
if err != nil {
panic(err)
}
// Update
piRaw = []byte(`{"Name":"set-image-pull-policy","RKind":"Deployment","RName":"nginx-deployment","RNamespace":"default","ValidationFailureAction":"","Rules":[{"Name":"nginx-deployment","Msgs":["Rule nginx-deployment1: Overlay succesfully applied."],"RuleType":0}]}`)
// ann = ParseAnnotationsFromObject(objRaw)
pi = pinfo.PolicyInfo{}
err = json.Unmarshal(piRaw, &pi)
if err != nil {
panic(err)
}
ann, _, err = AddPolicyJSONPatch(ann, &pi, pinfo.Mutation)
if err != nil {
panic(err)
}
}

View file

@ -1,108 +0,0 @@
package annotations
import (
"fmt"
"time"
"github.com/golang/glog"
client "github.com/nirmata/kyverno/pkg/dclient"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
)
type controller struct {
client *client.Client
queue workqueue.RateLimitingInterface
}
type Interface interface {
Add(rkind, rns, rname string, patch []byte)
}
type Controller interface {
Interface
Run(stopCh <-chan struct{})
Stop()
}
func NewAnnotationControler(client *client.Client) Controller {
return &controller{
client: client,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), annotationQueueName),
}
}
func (c *controller) Add(rkind, rns, rname string, patch []byte) {
c.queue.Add(newInfo(rkind, rns, rname, &patch))
}
func (c *controller) Run(stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
for i := 0; i < workerThreadCount; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("Started annotation controller workers")
}
func (c *controller) Stop() {
c.queue.ShutDown()
glog.Info("Shutting down annotation controller workers")
}
func (c *controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (pc *controller) processNextWorkItem() bool {
obj, shutdown := pc.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer pc.queue.Done(obj)
err := pc.syncHandler(obj)
pc.handleErr(err, obj)
return nil
}(obj)
if err != nil {
glog.Error(err)
return true
}
return true
}
func (pc *controller) handleErr(err error, key interface{}) {
if err == nil {
pc.queue.Forget(key)
return
}
// This controller retries if something goes wrong. After that, it stops trying.
if pc.queue.NumRequeues(key) < workQueueRetryLimit {
glog.Warningf("Error syncing events %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
pc.queue.AddRateLimited(key)
return
}
pc.queue.Forget(key)
glog.Error(err)
glog.Warningf("Dropping the key out of the queue: %v", err)
}
func (c *controller) syncHandler(obj interface{}) error {
var key info
var ok bool
if key, ok = obj.(info); !ok {
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
var err error
_, err = c.client.PatchResource(key.RKind, key.RNs, key.RName, *key.patch)
if err != nil {
glog.Errorf("Error creating annotation: unable to get resource %s/%s/%s, will retry: %s", key.RKind, key.RNs, key.RName, err)
return err
}
return nil
}

View file

@ -1,18 +0,0 @@
package annotations
type info struct {
RKind string
RNs string
RName string
//TODO:Hack as slice makes the struct unhasable
patch *[]byte
}
func newInfo(rkind, rns, rname string, patch *[]byte) info {
return info{
RKind: rkind,
RNs: rns,
RName: rname,
patch: patch,
}
}

View file

@ -1,21 +0,0 @@
package annotations
const annotationQueueName = "annotation-queue"
const workerThreadCount = 1
const workQueueRetryLimit = 5
func getStatus(status bool) string {
if status {
return "Success"
}
return "Failure"
}
func BuildKey(policyName string) string {
//JSON Pointers
return "policies.kyverno.io~1" + policyName
}
func BuildKeyString(policyName string) string {
return "policies.kyverno.io/" + policyName
}

View file

@ -1,4 +1,4 @@
package policy
package kyverno
const (
// GroupName must be the same as specified in Policy CRD

View file

@ -5,11 +5,11 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/nirmata/kyverno/pkg/apis/policy"
"github.com/nirmata/kyverno/pkg/api/kyverno"
)
// SchemeGroupVersion is group version used to register these objects
var SchemeGroupVersion = schema.GroupVersion{Group: policy.GroupName, Version: "v1alpha1"}
var SchemeGroupVersion = schema.GroupVersion{Group: kyverno.GroupName, Version: "v1alpha1"}
// Kind takes an unqualified kind and returns back a Group qualified GroupKind
func Kind(kind string) schema.GroupKind {
@ -31,6 +31,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&Policy{},
&PolicyList{},
&PolicyViolation{},
&PolicyViolationList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View file

@ -0,0 +1,162 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Policy contains rules to be applied to created resources
type Policy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec"`
Status PolicyStatus `json:"status"`
}
// Spec describes policy behavior by its rules
type Spec struct {
Rules []Rule `json:"rules"`
ValidationFailureAction string `json:"validationFailureAction"`
}
// Rule is set of mutation, validation and generation actions
// for the single resource description
type Rule struct {
Name string `json:"name"`
MatchResources MatchResources `json:"match"`
ExcludeResources ExcludeResources `json:"exclude,omitempty"`
Mutation Mutation `json:"mutate"`
Validation Validation `json:"validate"`
Generation Generation `json:"generate"`
}
//MatchResources contains resource description of the resources that the rule is to apply on
type MatchResources struct {
ResourceDescription `json:"resources"`
}
//ExcludeResources container resource description of the resources that are to be excluded from the applying the policy rule
type ExcludeResources struct {
ResourceDescription `json:"resources"`
}
// ResourceDescription describes the resource to which the PolicyRule will be applied.
type ResourceDescription struct {
Kinds []string `json:"kinds"`
Name string `json:"name"`
Namespaces []string `json:"namespaces,omitempty"`
Selector *metav1.LabelSelector `json:"selector"`
}
// Mutation describes the way how Mutating Webhook will react on resource creation
type Mutation struct {
Overlay interface{} `json:"overlay"`
Patches []Patch `json:"patches"`
}
// +k8s:deepcopy-gen=false
// Patch declares patch operation for created object according to RFC 6902
type Patch struct {
Path string `json:"path"`
Operation string `json:"op"`
Value interface{} `json:"value"`
}
// Validation describes the way how Validating Webhook will check the resource on creation
type Validation struct {
Message string `json:"message"`
Pattern interface{} `json:"pattern"`
}
// Generation describes which resources will be created when other resource is created
type Generation struct {
Kind string `json:"kind"`
Name string `json:"name"`
Data interface{} `json:"data"`
Clone CloneFrom `json:"clone"`
}
// CloneFrom - location of a Secret or a ConfigMap
// which will be used as source when applying 'generate'
type CloneFrom struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
}
//PolicyStatus provides status for violations
type PolicyStatus struct {
ViolationCount int `json:"violationCount"`
// Count of rules that were applied
RulesAppliedCount int `json:"rulesAppliedCount"`
// Count of resources for whom update/create api requests were blocked as the resoruce did not satisfy the policy rules
ResourcesBlockedCount int `json:"resourcesBlockedCount"`
// average time required to process the policy Mutation rules on a resource
AvgExecutionTimeMutation string `json:"averageMutationRulesExecutionTime"`
// average time required to process the policy Validation rules on a resource
AvgExecutionTimeValidation string `json:"averageValidationRulesExecutionTime"`
// average time required to process the policy Validation rules on a resource
AvgExecutionTimeGeneration string `json:"averageGenerationRulesExecutionTime"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PolicyList is a list of Policy resources
type PolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Policy `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PolicyViolation stores the information regarinding the resources for which a policy failed to apply
type PolicyViolation struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec PolicyViolationSpec `json:"spec"`
Status PolicyViolationStatus `json:"status"`
}
// PolicyViolationSpec describes policy behavior by its rules
type PolicyViolationSpec struct {
Policy string `json:"policy"`
ResourceSpec `json:"resource"`
ViolatedRules []ViolatedRule `json:"rules"`
}
// ResourceSpec information to identify the resource
type ResourceSpec struct {
Kind string `json:"kind"`
Namespace string `json:"namespace,omitempty"`
Name string `json:"name"`
}
// ViolatedRule stores the information regarding the rule
type ViolatedRule struct {
Name string `json:"name"`
Type string `json:"type"`
Message string `json:"message"`
}
//PolicyViolationStatus provides information regarding policyviolation status
// status:
// LastUpdateTime : the time the polivy violation was updated
type PolicyViolationStatus struct {
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
//TODO: having user information regarding the owner of resource can be helpful
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PolicyViolationList is a list of Policy Violation
type PolicyViolationList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []PolicyViolation `json:"items"`
}

View file

@ -15,10 +15,6 @@ func (r *Rule) Validate() error {
return err
}
if r.Mutation == nil && r.Validation == nil && r.Generation == nil {
return errors.New("The rule is empty")
}
return nil
}
@ -64,12 +60,12 @@ func (pp *Patch) Validate() error {
}
// Validate returns error if generator is configured incompletely
func (pcg *Generation) Validate() error {
if pcg.Data == nil && pcg.Clone == nil {
return fmt.Errorf("Neither data nor clone (source) of %s is specified", pcg.Kind)
func (gen *Generation) Validate() error {
if gen.Data == nil && gen.Clone == (CloneFrom{}) {
return fmt.Errorf("Neither data nor clone (source) of %s is specified", gen.Kind)
}
if pcg.Data != nil && pcg.Clone != nil {
return fmt.Errorf("Both data nor clone (source) of %s are specified", pcg.Kind)
if gen.Data != nil && gen.Clone != (CloneFrom{}) {
return fmt.Errorf("Both data nor clone (source) of %s are specified", gen.Kind)
}
return nil
}
@ -100,49 +96,16 @@ func (in *Validation) DeepCopyInto(out *Validation) {
// DeepCopyInto is declared because k8s:deepcopy-gen is
// not able to generate this method for interface{} member
func (in *Generation) DeepCopyInto(out *Generation) {
func (gen *Generation) DeepCopyInto(out *Generation) {
if out != nil {
*out = *in
*out = *gen
}
}
// return true -> if there were any removals
// return false -> if it looks the same
func (v *Violation) RemoveRulesOfType(ruleType string) bool {
removed := false
updatedRules := []FailedRule{}
for _, r := range v.Rules {
if r.Type == ruleType {
removed = true
continue
//ToKey generates the key string used for adding label to polivy violation
func (rs ResourceSpec) ToKey() string {
if rs.Namespace == "" {
return rs.Kind + "." + rs.Name
}
updatedRules = append(updatedRules, r)
}
if removed {
v.Rules = updatedRules
return true
}
return false
}
//IsEqual Check if violatiosn are equal
func (v *Violation) IsEqual(nv Violation) bool {
// We do not need to compare resource info as it will be same
// Reason
if v.Reason != nv.Reason {
return false
}
// Rule
if len(v.Rules) != len(nv.Rules) {
return false
}
// assumes the rules will be in order, as the rule are proceeed in order
// if the rule order changes, it means the policy has changed.. as it will afffect the order in which mutation rules are applied
for i, r := range v.Rules {
if r != nv.Rules[i] {
return false
}
}
return true
return rs.Kind + "." + rs.Namespace + "." + rs.Name
}

View file

@ -58,22 +58,6 @@ func (in *ExcludeResources) DeepCopy() *ExcludeResources {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *FailedRule) DeepCopyInto(out *FailedRule) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new FailedRule.
func (in *FailedRule) DeepCopy() *FailedRule {
if in == nil {
return nil
}
out := new(FailedRule)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generation.
func (in *Generation) DeepCopy() *Generation {
if in == nil {
@ -117,7 +101,7 @@ func (in *Policy) DeepCopyInto(out *Policy) {
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
out.Status = in.Status
return
}
@ -172,6 +156,122 @@ func (in *PolicyList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus.
func (in *PolicyStatus) DeepCopy() *PolicyStatus {
if in == nil {
return nil
}
out := new(PolicyStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyViolation) DeepCopyInto(out *PolicyViolation) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
in.Status.DeepCopyInto(&out.Status)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyViolation.
func (in *PolicyViolation) DeepCopy() *PolicyViolation {
if in == nil {
return nil
}
out := new(PolicyViolation)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PolicyViolation) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyViolationList) DeepCopyInto(out *PolicyViolationList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]PolicyViolation, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyViolationList.
func (in *PolicyViolationList) DeepCopy() *PolicyViolationList {
if in == nil {
return nil
}
out := new(PolicyViolationList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *PolicyViolationList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyViolationSpec) DeepCopyInto(out *PolicyViolationSpec) {
*out = *in
out.ResourceSpec = in.ResourceSpec
if in.ViolatedRules != nil {
in, out := &in.ViolatedRules, &out.ViolatedRules
*out = make([]ViolatedRule, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyViolationSpec.
func (in *PolicyViolationSpec) DeepCopy() *PolicyViolationSpec {
if in == nil {
return nil
}
out := new(PolicyViolationSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *PolicyViolationStatus) DeepCopyInto(out *PolicyViolationStatus) {
*out = *in
in.LastUpdateTime.DeepCopyInto(&out.LastUpdateTime)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyViolationStatus.
func (in *PolicyViolationStatus) DeepCopy() *PolicyViolationStatus {
if in == nil {
return nil
}
out := new(PolicyViolationStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
*out = *in
@ -180,15 +280,10 @@ func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Name != nil {
in, out := &in.Name, &out.Name
*out = new(string)
**out = **in
}
if in.Namespace != nil {
in, out := &in.Namespace, &out.Namespace
*out = new(string)
**out = **in
if in.Namespaces != nil {
in, out := &in.Namespaces, &out.Namespaces
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
@ -208,23 +303,30 @@ func (in *ResourceDescription) DeepCopy() *ResourceDescription {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec.
func (in *ResourceSpec) DeepCopy() *ResourceSpec {
if in == nil {
return nil
}
out := new(ResourceSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Rule) DeepCopyInto(out *Rule) {
*out = *in
in.MatchResources.DeepCopyInto(&out.MatchResources)
in.ExcludeResources.DeepCopyInto(&out.ExcludeResources)
if in.Mutation != nil {
in, out := &in.Mutation, &out.Mutation
*out = (*in).DeepCopy()
}
if in.Validation != nil {
in, out := &in.Validation, &out.Validation
*out = (*in).DeepCopy()
}
if in.Generation != nil {
in, out := &in.Generation, &out.Generation
*out = (*in).DeepCopy()
}
in.Mutation.DeepCopyInto(&out.Mutation)
in.Validation.DeepCopyInto(&out.Validation)
in.Generation.DeepCopyInto(&out.Generation)
return
}
@ -261,29 +363,6 @@ func (in *Spec) DeepCopy() *Spec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Status) DeepCopyInto(out *Status) {
*out = *in
if in.Violations != nil {
in, out := &in.Violations, &out.Violations
*out = make(map[string]Violation, len(*in))
for key, val := range *in {
(*out)[key] = *val.DeepCopy()
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Status.
func (in *Status) DeepCopy() *Status {
if in == nil {
return nil
}
out := new(Status)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation.
func (in *Validation) DeepCopy() *Validation {
if in == nil {
@ -295,22 +374,17 @@ func (in *Validation) DeepCopy() *Validation {
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Violation) DeepCopyInto(out *Violation) {
func (in *ViolatedRule) DeepCopyInto(out *ViolatedRule) {
*out = *in
if in.Rules != nil {
in, out := &in.Rules, &out.Rules
*out = make([]FailedRule, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Violation.
func (in *Violation) DeepCopy() *Violation {
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ViolatedRule.
func (in *ViolatedRule) DeepCopy() *ViolatedRule {
if in == nil {
return nil
}
out := new(Violation)
out := new(ViolatedRule)
in.DeepCopyInto(out)
return out
}

View file

@ -1,119 +0,0 @@
package v1alpha1
import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// Policy contains rules to be applied to created resources
type Policy struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec Spec `json:"spec"`
Status Status `json:"status"`
}
// Spec describes policy behavior by its rules
type Spec struct {
Rules []Rule `json:"rules"`
ValidationFailureAction string `json:"validationFailureAction"`
}
// Rule is set of mutation, validation and generation actions
// for the single resource description
type Rule struct {
Name string `json:"name"`
MatchResources MatchResources `json:"match"`
ExcludeResources ExcludeResources `json:"exclude,omitempty"`
Mutation *Mutation `json:"mutate"`
Validation *Validation `json:"validate"`
Generation *Generation `json:"generate"`
}
//MatchResources contains resource description of the resources that the rule is to apply on
type MatchResources struct {
ResourceDescription `json:"resources"`
}
//ExcludeResources container resource description of the resources that are to be excluded from the applying the policy rule
type ExcludeResources struct {
ResourceDescription `json:"resources"`
}
// ResourceDescription describes the resource to which the PolicyRule will be applied.
type ResourceDescription struct {
Kinds []string `json:"kinds"`
Name *string `json:"name"`
Namespace *string `json:"namespace,omitempty"`
Selector *metav1.LabelSelector `json:"selector"`
}
// Mutation describes the way how Mutating Webhook will react on resource creation
type Mutation struct {
Overlay *interface{} `json:"overlay"`
Patches []Patch `json:"patches"`
}
// +k8s:deepcopy-gen=false
// Patch declares patch operation for created object according to RFC 6902
type Patch struct {
Path string `json:"path"`
Operation string `json:"op"`
Value interface{} `json:"value"`
}
// Validation describes the way how Validating Webhook will check the resource on creation
type Validation struct {
Message *string `json:"message"`
Pattern interface{} `json:"pattern"`
}
// Generation describes which resources will be created when other resource is created
type Generation struct {
Kind string `json:"kind"`
Name string `json:"name"`
Data interface{} `json:"data"`
Clone *CloneFrom `json:"clone"`
}
// CloneFrom - location of a Secret or a ConfigMap
// which will be used as source when applying 'generate'
type CloneFrom struct {
Namespace string `json:"namespace"`
Name string `json:"name"`
}
// Status contains violations for existing resources
type Status struct {
// Violations map[kind/namespace/resource]Violation
Violations map[string]Violation `json:"violations,omitempty"`
}
// Violation for the policy
type Violation struct {
Kind string `json:"kind,omitempty"`
Name string `json:"name,omitempty"`
Namespace string `json:"namespace,omitempty"`
Rules []FailedRule `json:"rules"`
Reason string `json:"reason,omitempty"`
}
// FailedRule stored info and type of failed rules
type FailedRule struct {
Name string `json:"name"`
Type string `json:"type"` //Mutation, Validation, Genertaion
Error string `json:"error"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// PolicyList is a list of Policy resources
type PolicyList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []Policy `json:"items"`
}

View file

@ -1,88 +0,0 @@
package v1alpha1
import (
"testing"
"gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var defaultResourceDescriptionName = "defaultResourceDescription"
var defaultResourceDescription = ResourceDescription{
Kinds: []string{"Deployment"},
Name: &defaultResourceDescriptionName,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"LabelForSelector": "defaultResourceDescription"},
},
}
func Test_EmptyRule(t *testing.T) {
emptyRule := Rule{
Name: "defaultRule",
MatchResources: MatchResources{ResourceDescription: defaultResourceDescription},
}
err := emptyRule.Validate()
assert.Assert(t, err != nil)
}
func Test_ResourceDescription(t *testing.T) {
err := defaultResourceDescription.Validate()
assert.NilError(t, err)
}
func Test_ResourceDescription_EmptyKind(t *testing.T) {
resourceDescription := ResourceDescription{
Name: &defaultResourceDescriptionName,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"LabelForSelector": "defaultResourceDescription"},
},
}
err := resourceDescription.Validate()
assert.Assert(t, err != nil)
}
func Test_ResourceDescription_EmptyNameAndSelector(t *testing.T) {
resourceDescription := ResourceDescription{
Kinds: []string{"Deployment"},
}
err := resourceDescription.Validate()
assert.NilError(t, err)
}
func Test_Patch_EmptyPath(t *testing.T) {
patch := Patch{
Operation: "add",
Value: "true",
}
err := patch.Validate()
assert.Assert(t, err != nil)
}
func Test_Patch_EmptyValueWithAdd(t *testing.T) {
patch := Patch{
Path: "/metadata/labels/is-mutated",
Operation: "add",
}
err := patch.Validate()
assert.Assert(t, err != nil)
}
func Test_Patch_UnsupportedOperation(t *testing.T) {
patch := Patch{
Path: "/metadata/labels/is-mutated",
Operation: "overwrite",
Value: "true",
}
err := patch.Validate()
assert.Assert(t, err != nil)
}
func Test_Generation_EmptyCopyFrom(t *testing.T) {
generation := Generation{
Kind: "ConfigMap",
Name: "comfigmapGenerator",
}
err := generation.Validate()
assert.Assert(t, err != nil)
}

View file

@ -19,7 +19,7 @@ limitations under the License.
package versioned
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policy/v1alpha1"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"

View file

@ -20,8 +20,8 @@ package fake
import (
clientset "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policy/v1alpha1"
fakekyvernov1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policy/v1alpha1/fake"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1alpha1"
fakekyvernov1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"

View file

@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"

View file

@ -19,7 +19,7 @@ limitations under the License.
package scheme
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"

View file

@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
v1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1alpha1"
rest "k8s.io/client-go/rest"
testing "k8s.io/client-go/testing"
)
@ -32,6 +32,10 @@ func (c *FakeKyvernoV1alpha1) Policies() v1alpha1.PolicyInterface {
return &FakePolicies{c}
}
func (c *FakeKyvernoV1alpha1) PolicyViolations() v1alpha1.PolicyViolationInterface {
return &FakePolicyViolations{c}
}
// RESTClient returns a RESTClient that is used to communicate
// with API server by this client implementation.
func (c *FakeKyvernoV1alpha1) RESTClient() rest.Interface {

View file

@ -19,7 +19,7 @@ limitations under the License.
package fake
import (
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"

View file

@ -0,0 +1,131 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakePolicyViolations implements PolicyViolationInterface
type FakePolicyViolations struct {
Fake *FakeKyvernoV1alpha1
}
var policyviolationsResource = schema.GroupVersionResource{Group: "kyverno.io", Version: "v1alpha1", Resource: "policyviolations"}
var policyviolationsKind = schema.GroupVersionKind{Group: "kyverno.io", Version: "v1alpha1", Kind: "PolicyViolation"}
// Get takes name of the policyViolation, and returns the corresponding policyViolation object, and an error if there is any.
func (c *FakePolicyViolations) Get(name string, options v1.GetOptions) (result *v1alpha1.PolicyViolation, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootGetAction(policyviolationsResource, name), &v1alpha1.PolicyViolation{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PolicyViolation), err
}
// List takes label and field selectors, and returns the list of PolicyViolations that match those selectors.
func (c *FakePolicyViolations) List(opts v1.ListOptions) (result *v1alpha1.PolicyViolationList, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootListAction(policyviolationsResource, policyviolationsKind, opts), &v1alpha1.PolicyViolationList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &v1alpha1.PolicyViolationList{ListMeta: obj.(*v1alpha1.PolicyViolationList).ListMeta}
for _, item := range obj.(*v1alpha1.PolicyViolationList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested policyViolations.
func (c *FakePolicyViolations) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewRootWatchAction(policyviolationsResource, opts))
}
// Create takes the representation of a policyViolation and creates it. Returns the server's representation of the policyViolation, and an error, if there is any.
func (c *FakePolicyViolations) Create(policyViolation *v1alpha1.PolicyViolation) (result *v1alpha1.PolicyViolation, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootCreateAction(policyviolationsResource, policyViolation), &v1alpha1.PolicyViolation{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PolicyViolation), err
}
// Update takes the representation of a policyViolation and updates it. Returns the server's representation of the policyViolation, and an error, if there is any.
func (c *FakePolicyViolations) Update(policyViolation *v1alpha1.PolicyViolation) (result *v1alpha1.PolicyViolation, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateAction(policyviolationsResource, policyViolation), &v1alpha1.PolicyViolation{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PolicyViolation), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakePolicyViolations) UpdateStatus(policyViolation *v1alpha1.PolicyViolation) (*v1alpha1.PolicyViolation, error) {
obj, err := c.Fake.
Invokes(testing.NewRootUpdateSubresourceAction(policyviolationsResource, "status", policyViolation), &v1alpha1.PolicyViolation{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PolicyViolation), err
}
// Delete takes name of the policyViolation and deletes it. Returns an error if one occurs.
func (c *FakePolicyViolations) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewRootDeleteAction(policyviolationsResource, name), &v1alpha1.PolicyViolation{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakePolicyViolations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewRootDeleteCollectionAction(policyviolationsResource, listOptions)
_, err := c.Fake.Invokes(action, &v1alpha1.PolicyViolationList{})
return err
}
// Patch applies the patch and returns the patched policyViolation.
func (c *FakePolicyViolations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PolicyViolation, err error) {
obj, err := c.Fake.
Invokes(testing.NewRootPatchSubresourceAction(policyviolationsResource, name, pt, data, subresources...), &v1alpha1.PolicyViolation{})
if obj == nil {
return nil, err
}
return obj.(*v1alpha1.PolicyViolation), err
}

View file

@ -19,3 +19,5 @@ limitations under the License.
package v1alpha1
type PolicyExpansion interface{}
type PolicyViolationExpansion interface{}

View file

@ -19,7 +19,7 @@ limitations under the License.
package v1alpha1
import (
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
rest "k8s.io/client-go/rest"
@ -28,6 +28,7 @@ import (
type KyvernoV1alpha1Interface interface {
RESTClient() rest.Interface
PoliciesGetter
PolicyViolationsGetter
}
// KyvernoV1alpha1Client is used to interact with features provided by the kyverno.io group.
@ -39,6 +40,10 @@ func (c *KyvernoV1alpha1Client) Policies() PolicyInterface {
return newPolicies(c)
}
func (c *KyvernoV1alpha1Client) PolicyViolations() PolicyViolationInterface {
return newPolicyViolations(c)
}
// NewForConfig creates a new KyvernoV1alpha1Client for the given config.
func NewForConfig(c *rest.Config) (*KyvernoV1alpha1Client, error) {
config := *c

View file

@ -21,7 +21,7 @@ package v1alpha1
import (
"time"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
scheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"

View file

@ -0,0 +1,180 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1alpha1
import (
"time"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
scheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// PolicyViolationsGetter has a method to return a PolicyViolationInterface.
// A group's client should implement this interface.
type PolicyViolationsGetter interface {
PolicyViolations() PolicyViolationInterface
}
// PolicyViolationInterface has methods to work with PolicyViolation resources.
type PolicyViolationInterface interface {
Create(*v1alpha1.PolicyViolation) (*v1alpha1.PolicyViolation, error)
Update(*v1alpha1.PolicyViolation) (*v1alpha1.PolicyViolation, error)
UpdateStatus(*v1alpha1.PolicyViolation) (*v1alpha1.PolicyViolation, error)
Delete(name string, options *v1.DeleteOptions) error
DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error
Get(name string, options v1.GetOptions) (*v1alpha1.PolicyViolation, error)
List(opts v1.ListOptions) (*v1alpha1.PolicyViolationList, error)
Watch(opts v1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PolicyViolation, err error)
PolicyViolationExpansion
}
// policyViolations implements PolicyViolationInterface
type policyViolations struct {
client rest.Interface
}
// newPolicyViolations returns a PolicyViolations
func newPolicyViolations(c *KyvernoV1alpha1Client) *policyViolations {
return &policyViolations{
client: c.RESTClient(),
}
}
// Get takes name of the policyViolation, and returns the corresponding policyViolation object, and an error if there is any.
func (c *policyViolations) Get(name string, options v1.GetOptions) (result *v1alpha1.PolicyViolation, err error) {
result = &v1alpha1.PolicyViolation{}
err = c.client.Get().
Resource("policyviolations").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of PolicyViolations that match those selectors.
func (c *policyViolations) List(opts v1.ListOptions) (result *v1alpha1.PolicyViolationList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1alpha1.PolicyViolationList{}
err = c.client.Get().
Resource("policyviolations").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested policyViolations.
func (c *policyViolations) Watch(opts v1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Resource("policyviolations").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a policyViolation and creates it. Returns the server's representation of the policyViolation, and an error, if there is any.
func (c *policyViolations) Create(policyViolation *v1alpha1.PolicyViolation) (result *v1alpha1.PolicyViolation, err error) {
result = &v1alpha1.PolicyViolation{}
err = c.client.Post().
Resource("policyviolations").
Body(policyViolation).
Do().
Into(result)
return
}
// Update takes the representation of a policyViolation and updates it. Returns the server's representation of the policyViolation, and an error, if there is any.
func (c *policyViolations) Update(policyViolation *v1alpha1.PolicyViolation) (result *v1alpha1.PolicyViolation, err error) {
result = &v1alpha1.PolicyViolation{}
err = c.client.Put().
Resource("policyviolations").
Name(policyViolation.Name).
Body(policyViolation).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *policyViolations) UpdateStatus(policyViolation *v1alpha1.PolicyViolation) (result *v1alpha1.PolicyViolation, err error) {
result = &v1alpha1.PolicyViolation{}
err = c.client.Put().
Resource("policyviolations").
Name(policyViolation.Name).
SubResource("status").
Body(policyViolation).
Do().
Into(result)
return
}
// Delete takes name of the policyViolation and deletes it. Returns an error if one occurs.
func (c *policyViolations) Delete(name string, options *v1.DeleteOptions) error {
return c.client.Delete().
Resource("policyviolations").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *policyViolations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Resource("policyviolations").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched policyViolation.
func (c *policyViolations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1alpha1.PolicyViolation, err error) {
result = &v1alpha1.PolicyViolation{}
err = c.client.Patch(pt).
Resource("policyviolations").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View file

@ -25,7 +25,7 @@ import (
versioned "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
policy "github.com/nirmata/kyverno/pkg/client/informers/externalversions/policy"
kyverno "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
@ -172,9 +172,9 @@ type SharedInformerFactory interface {
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Kyverno() policy.Interface
Kyverno() kyverno.Interface
}
func (f *sharedInformerFactory) Kyverno() policy.Interface {
return policy.New(f, f.namespace, f.tweakListOptions)
func (f *sharedInformerFactory) Kyverno() kyverno.Interface {
return kyverno.New(f, f.namespace, f.tweakListOptions)
}

View file

@ -21,7 +21,7 @@ package externalversions
import (
"fmt"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
@ -55,6 +55,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
// Group=kyverno.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("policies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1alpha1().Policies().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("policyviolations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1alpha1().PolicyViolations().Informer()}, nil
}

View file

@ -20,7 +20,7 @@ package kyverno
import (
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/informers/externalversions/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
)
// Interface provides access to each of this group's versions.

View file

@ -26,6 +26,8 @@ import (
type Interface interface {
// Policies returns a PolicyInformer.
Policies() PolicyInformer
// PolicyViolations returns a PolicyViolationInformer.
PolicyViolations() PolicyViolationInformer
}
type version struct {
@ -43,3 +45,8 @@ func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakList
func (v *version) Policies() PolicyInformer {
return &policyInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
// PolicyViolations returns a PolicyViolationInformer.
func (v *version) PolicyViolations() PolicyViolationInformer {
return &policyViolationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}

View file

@ -21,10 +21,10 @@ package v1alpha1
import (
time "time"
policyv1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
versioned "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
@ -69,7 +69,7 @@ func NewFilteredPolicyInformer(client versioned.Interface, resyncPeriod time.Dur
return client.KyvernoV1alpha1().Policies().Watch(options)
},
},
&policyv1alpha1.Policy{},
&kyvernov1alpha1.Policy{},
resyncPeriod,
indexers,
)
@ -80,7 +80,7 @@ func (f *policyInformer) defaultInformer(client versioned.Interface, resyncPerio
}
func (f *policyInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&policyv1alpha1.Policy{}, f.defaultInformer)
return f.factory.InformerFor(&kyvernov1alpha1.Policy{}, f.defaultInformer)
}
func (f *policyInformer) Lister() v1alpha1.PolicyLister {

View file

@ -0,0 +1,88 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1alpha1
import (
time "time"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
versioned "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// PolicyViolationInformer provides access to a shared informer and lister for
// PolicyViolations.
type PolicyViolationInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1alpha1.PolicyViolationLister
}
type policyViolationInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
}
// NewPolicyViolationInformer constructs a new informer for PolicyViolation type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewPolicyViolationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredPolicyViolationInformer(client, resyncPeriod, indexers, nil)
}
// NewFilteredPolicyViolationInformer constructs a new informer for PolicyViolation type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredPolicyViolationInformer(client versioned.Interface, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options v1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KyvernoV1alpha1().PolicyViolations().List(options)
},
WatchFunc: func(options v1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KyvernoV1alpha1().PolicyViolations().Watch(options)
},
},
&kyvernov1alpha1.PolicyViolation{},
resyncPeriod,
indexers,
)
}
func (f *policyViolationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredPolicyViolationInformer(client, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *policyViolationInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&kyvernov1alpha1.PolicyViolation{}, f.defaultInformer)
}
func (f *policyViolationInformer) Lister() v1alpha1.PolicyViolationLister {
return v1alpha1.NewPolicyViolationLister(f.Informer().GetIndexer())
}

View file

@ -0,0 +1,101 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
"fmt"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
// PolicyListerExpansion allows custom methods to be added to
// PolicyLister.
type PolicyListerExpansion interface {
GetPolicyForPolicyViolation(pv *kyverno.PolicyViolation) ([]*kyverno.Policy, error)
ListResources(selector labels.Selector) (ret []*v1alpha1.Policy, err error)
}
// PolicyViolationListerExpansion allows custom methods to be added to
// PolicyViolationLister.
type PolicyViolationListerExpansion interface {
// List lists all PolicyViolations in the indexer with GVK.
// List lists all PolicyViolations in the indexer with GVK.
ListResources(selector labels.Selector) (ret []*v1alpha1.PolicyViolation, err error)
}
//ListResources is a wrapper to List and adds the resource kind information
// as the lister is specific to a gvk we can harcode the values here
func (pvl *policyViolationLister) ListResources(selector labels.Selector) (ret []*v1alpha1.PolicyViolation, err error) {
policyviolations, err := pvl.List(selector)
for index := range policyviolations {
policyviolations[index].SetGroupVersionKind(kyverno.SchemeGroupVersion.WithKind("PolicyViolation"))
}
return policyviolations, nil
}
//ListResources is a wrapper to List and adds the resource kind information
// as the lister is specific to a gvk we can harcode the values here
func (pl *policyLister) ListResources(selector labels.Selector) (ret []*v1alpha1.Policy, err error) {
policies, err := pl.List(selector)
for index := range policies {
policies[index].SetGroupVersionKind(kyverno.SchemeGroupVersion.WithKind("Policy"))
}
return policies, err
}
func (pl *policyLister) GetPolicyForPolicyViolation(pv *kyverno.PolicyViolation) ([]*kyverno.Policy, error) {
if len(pv.Labels) == 0 {
return nil, fmt.Errorf("no Policy found for PolicyViolation %v because it has no labels", pv.Name)
}
pList, err := pl.List(labels.Everything())
if err != nil {
return nil, err
}
var policies []*kyverno.Policy
for _, p := range pList {
policyLabelmap := map[string]string{"policy": p.Name}
ls := &metav1.LabelSelector{}
err = metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&policyLabelmap, ls, nil)
if err != nil {
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", p.Name, err)
}
selector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
// If a policy with a nil or empty selector creeps in, it should match nothing, not everything.
if selector.Empty() || !selector.Matches(labels.Set(pv.Labels)) {
continue
}
policies = append(policies, p)
}
if len(policies) == 0 {
return nil, fmt.Errorf("could not find Policy set for PolicyViolation %s with labels: %v", pv.Name, pv.Labels)
}
return policies, nil
}

View file

@ -19,7 +19,7 @@ limitations under the License.
package v1alpha1
import (
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"

View file

@ -0,0 +1,65 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
import (
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// PolicyViolationLister helps list PolicyViolations.
type PolicyViolationLister interface {
// List lists all PolicyViolations in the indexer.
List(selector labels.Selector) (ret []*v1alpha1.PolicyViolation, err error)
// Get retrieves the PolicyViolation from the index for a given name.
Get(name string) (*v1alpha1.PolicyViolation, error)
PolicyViolationListerExpansion
}
// policyViolationLister implements the PolicyViolationLister interface.
type policyViolationLister struct {
indexer cache.Indexer
}
// NewPolicyViolationLister returns a new PolicyViolationLister.
func NewPolicyViolationLister(indexer cache.Indexer) PolicyViolationLister {
return &policyViolationLister{indexer: indexer}
}
// List lists all PolicyViolations in the indexer.
func (s *policyViolationLister) List(selector labels.Selector) (ret []*v1alpha1.PolicyViolation, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1alpha1.PolicyViolation))
})
return ret, err
}
// Get retrieves the PolicyViolation from the index for a given name.
func (s *policyViolationLister) Get(name string) (*v1alpha1.PolicyViolation, error) {
obj, exists, err := s.indexer.GetByKey(name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1alpha1.Resource("policyviolation"), name)
}
return obj.(*v1alpha1.PolicyViolation), nil
}

View file

@ -1,23 +0,0 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1alpha1
// PolicyListerExpansion allows custom methods to be added to
// PolicyLister.
type PolicyListerExpansion interface{}

View file

@ -1,45 +0,0 @@
package controller
import (
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/annotations"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/utils"
"k8s.io/apimachinery/pkg/runtime"
)
func cleanAnnotations(client *client.Client, obj interface{}, filterK8Resources []utils.K8Resource) {
// get the policy struct from interface
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
if err != nil {
glog.Error(err)
return
}
policy := v1alpha1.Policy{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(unstr, &policy); err != nil {
glog.Error(err)
return
}
// Get the resources that apply to the policy
resourceMap := engine.ListResourcesThatApplyToPolicy(client, &policy, filterK8Resources)
// remove annotations for the resources
for _, obj := range resourceMap {
// get annotations
ann := obj.Resource.GetAnnotations()
_, patch, err := annotations.RemovePolicyJSONPatch(ann, annotations.BuildKey(policy.Name))
if err != nil {
glog.Error(err)
continue
}
// patch the resource
_, err = client.PatchResource(obj.Resource.GetKind(), obj.Resource.GetNamespace(), obj.Resource.GetName(), patch)
if err != nil {
glog.Error(err)
continue
}
}
}

View file

@ -1,281 +0,0 @@
package controller
import (
"fmt"
"reflect"
"time"
"github.com/nirmata/kyverno/pkg/annotations"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/golang/glog"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
lister "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/sharedinformer"
violation "github.com/nirmata/kyverno/pkg/violation"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
//PolicyController to manage Policy CRD
type PolicyController struct {
client *client.Client
policyLister lister.PolicyLister
policySynced cache.InformerSynced
violationBuilder violation.Generator
eventController event.Generator
queue workqueue.RateLimitingInterface
filterK8Resources []utils.K8Resource
}
// NewPolicyController from cmd args
func NewPolicyController(client *client.Client,
policyInformer sharedinformer.PolicyInformer,
violationBuilder violation.Generator,
eventController event.Generator,
filterK8Resources string) *PolicyController {
controller := &PolicyController{
client: client,
policyLister: policyInformer.GetLister(),
policySynced: policyInformer.GetInfomer().HasSynced,
violationBuilder: violationBuilder,
eventController: eventController,
filterK8Resources: utils.ParseKinds(filterK8Resources),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), policyWorkQueueName),
}
policyInformer.GetInfomer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.createPolicyHandler,
UpdateFunc: controller.updatePolicyHandler,
DeleteFunc: controller.deletePolicyHandler,
})
return controller
}
func (pc *PolicyController) createPolicyHandler(resource interface{}) {
pc.enqueuePolicy(resource)
}
func (pc *PolicyController) updatePolicyHandler(oldResource, newResource interface{}) {
newPolicy := newResource.(*v1alpha1.Policy)
oldPolicy := oldResource.(*v1alpha1.Policy)
newPolicy.Status = v1alpha1.Status{}
oldPolicy.Status = v1alpha1.Status{}
newPolicy.ResourceVersion = ""
oldPolicy.ResourceVersion = ""
if reflect.DeepEqual(newPolicy, oldPolicy) {
return
}
pc.enqueuePolicy(newResource)
}
func (pc *PolicyController) deletePolicyHandler(resource interface{}) {
var object metav1.Object
var ok bool
if object, ok = resource.(metav1.Object); !ok {
glog.Error("error decoding object, invalid type")
return
}
cleanAnnotations(pc.client, resource, pc.filterK8Resources)
glog.Infof("policy deleted: %s", object.GetName())
}
func (pc *PolicyController) enqueuePolicy(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
glog.Error(err)
return
}
pc.queue.Add(key)
}
// Run is main controller thread
func (pc *PolicyController) Run(stopCh <-chan struct{}) error {
defer utilruntime.HandleCrash()
if ok := cache.WaitForCacheSync(stopCh, pc.policySynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < policyControllerWorkerCount; i++ {
go wait.Until(pc.runWorker, time.Second, stopCh)
}
glog.Info("started policy controller workers")
return nil
}
//Stop to perform actions when controller is stopped
func (pc *PolicyController) Stop() {
pc.queue.ShutDown()
glog.Info("shutting down policy controller workers")
}
func (pc *PolicyController) runWorker() {
for pc.processNextWorkItem() {
}
}
func (pc *PolicyController) processNextWorkItem() bool {
obj, shutdown := pc.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer pc.queue.Done(obj)
err := pc.syncHandler(obj)
pc.handleErr(err, obj)
return nil
}(obj)
if err != nil {
glog.Error(err)
return true
}
return true
}
func (pc *PolicyController) handleErr(err error, key interface{}) {
if err == nil {
pc.queue.Forget(key)
return
}
// This controller retries if something goes wrong. After that, it stops trying.
if pc.queue.NumRequeues(key) < policyWorkQueueRetryLimit {
glog.Warningf("Error syncing events %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
pc.queue.AddRateLimited(key)
return
}
pc.queue.Forget(key)
glog.Error(err)
glog.Warningf("Dropping the key out of the queue: %v", err)
}
func (pc *PolicyController) syncHandler(obj interface{}) error {
var key string
var ok bool
if key, ok = obj.(string); !ok {
return fmt.Errorf("expected string in workqueue but got %#v", obj)
}
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.Errorf("invalid policy key: %s", key)
return nil
}
// Get Policy
policy, err := pc.policyLister.Get(name)
if err != nil {
if errors.IsNotFound(err) {
glog.Errorf("policy '%s' in work queue no longer exists", key)
return nil
}
return err
}
glog.Infof("process policy %s on existing resources", policy.GetName())
// Process policy on existing resources
policyInfos := engine.ProcessExisting(pc.client, policy, pc.filterK8Resources)
events, violations := pc.createEventsAndViolations(policyInfos)
// Events, Violations
pc.eventController.Add(events...)
err = pc.violationBuilder.Add(violations...)
if err != nil {
glog.Error(err)
}
// Annotations
pc.createAnnotations(policyInfos)
return nil
}
func (pc *PolicyController) createAnnotations(policyInfos []*info.PolicyInfo) {
for _, pi := range policyInfos {
//get resource
obj, err := pc.client.GetResource(pi.RKind, pi.RNamespace, pi.RName)
if err != nil {
glog.Error(err)
continue
}
// add annotation for policy application
ann := obj.GetAnnotations()
// if annotations are nil then create a map and patch
// else
// add the exact patch
patch, err := annotations.PatchAnnotations(ann, pi, info.All)
if patch == nil {
/// nothing to patch
return
}
_, err = pc.client.PatchResource(pi.RKind, pi.RNamespace, pi.RName, patch)
if err != nil {
glog.Error(err)
continue
}
}
}
func (pc *PolicyController) createEventsAndViolations(policyInfos []*info.PolicyInfo) ([]*event.Info, []*violation.Info) {
events := []*event.Info{}
violations := []*violation.Info{}
// Create events from the policyInfo
for _, policyInfo := range policyInfos {
frules := []v1alpha1.FailedRule{}
sruleNames := []string{}
for _, rule := range policyInfo.Rules {
if !rule.IsSuccessful() {
e := &event.Info{}
frule := v1alpha1.FailedRule{Name: rule.Name}
switch rule.RuleType {
case info.Mutation, info.Validation, info.Generation:
// Events
e = event.NewEvent(policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, event.PolicyViolation, event.FProcessRule, rule.Name, policyInfo.Name)
switch rule.RuleType {
case info.Mutation:
frule.Type = info.Mutation.String()
case info.Validation:
frule.Type = info.Validation.String()
case info.Generation:
frule.Type = info.Generation.String()
}
frule.Error = rule.GetErrorString()
default:
glog.Info("Unsupported Rule type")
}
frule.Error = rule.GetErrorString()
frules = append(frules, frule)
events = append(events, e)
} else {
sruleNames = append(sruleNames, rule.Name)
}
}
if !policyInfo.IsSuccessful() {
e := event.NewEvent("Policy", "", policyInfo.Name, event.PolicyViolation, event.FResourcePolcy, policyInfo.RNamespace+"/"+policyInfo.RName, concatFailedRules(frules))
events = append(events, e)
// Violation
v := violation.BuldNewViolation(policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, event.PolicyViolation.String(), policyInfo.GetFailedRules())
violations = append(violations, v)
} else {
// clean up violations
pc.violationBuilder.RemoveInactiveViolation(policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, info.Mutation)
pc.violationBuilder.RemoveInactiveViolation(policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, info.Validation)
}
}
return events, violations
}

View file

@ -1,147 +0,0 @@
package controller
import (
"testing"
"github.com/golang/glog"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/sharedinformer"
violation "github.com/nirmata/kyverno/pkg/violation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/sample-controller/pkg/signals"
)
func TestCreatePolicy(t *testing.T) {
f := newFixture(t)
// new policy is added to policy lister and explictly passed to sync-handler
// to process the existing
policy := newPolicy("test-policy")
f.policyLister = append(f.policyLister, policy)
f.objects = append(f.objects, policy)
// run controller
f.runControler("test-policy")
}
func (f *fixture) runControler(policyName string) {
policyInformerFactory, err := sharedinformer.NewFakeSharedInformerFactory()
if err != nil {
f.t.Fatal(err)
}
eventController := event.NewEventController(f.Client, policyInformerFactory)
violationBuilder := violation.NewPolicyViolationBuilder(f.Client, policyInformerFactory, eventController)
// new controller
policyController := NewPolicyController(
f.Client,
policyInformerFactory,
violationBuilder,
eventController,
"")
stopCh := signals.SetupSignalHandler()
// start informer & controller
policyInformerFactory.Run(stopCh)
if err = policyController.Run(stopCh); err != nil {
glog.Fatalf("Error running PolicyController: %v\n", err)
}
// add policy to the informer
for _, p := range f.policyLister {
policyInformerFactory.GetInfomer().GetIndexer().Add(p)
}
// sync handler
// reads the policy from the policy lister and processes them
err = policyController.syncHandler(policyName)
if err != nil {
f.t.Fatal(err)
}
policyController.Stop()
}
type fixture struct {
t *testing.T
Client *client.Client
policyLister []*types.Policy
objects []runtime.Object
}
func newFixture(t *testing.T) *fixture {
// init groupversion
regResource := []schema.GroupVersionResource{
schema.GroupVersionResource{Group: "group", Version: "version", Resource: "thekinds"},
schema.GroupVersionResource{Group: "group2", Version: "version", Resource: "thekinds"},
schema.GroupVersionResource{Group: "", Version: "v1", Resource: "namespaces"},
schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"},
}
objects := []runtime.Object{newUnstructured("group/version", "TheKind", "ns-foo", "name-foo"),
newUnstructured("group2/version", "TheKind", "ns-foo", "name2-foo"),
newUnstructured("group/version", "TheKind", "ns-foo", "name-bar"),
newUnstructured("group/version", "TheKind", "ns-foo", "name-baz"),
newUnstructured("group2/version", "TheKind", "ns-foo", "name2-baz"),
newUnstructured("apps/v1", "Deployment", "kyverno", "kyverno"),
}
scheme := runtime.NewScheme()
// Create mock client
fclient, err := client.NewMockClient(scheme, objects...)
if err != nil {
t.Fatal(err)
}
// set discovery Client
fclient.SetDiscovery(client.NewFakeDiscoveryClient(regResource))
f := &fixture{
t: t,
Client: fclient,
}
return f
}
// create mock client with initial resouces
// set registered resources for gvr
func (f *fixture) setupFixture() {
scheme := runtime.NewScheme()
fclient, err := client.NewMockClient(scheme, f.objects...)
if err != nil {
f.t.Fatal(err)
}
regresource := []schema.GroupVersionResource{
schema.GroupVersionResource{Group: "kyverno.io",
Version: "v1alpha1",
Resource: "policys"}}
fclient.SetDiscovery(client.NewFakeDiscoveryClient(regresource))
}
func newPolicy(name string) *types.Policy {
return &types.Policy{
TypeMeta: metav1.TypeMeta{APIVersion: types.SchemeGroupVersion.String()},
ObjectMeta: metav1.ObjectMeta{
Name: name,
},
}
}
func newUnstructured(apiVersion, kind, namespace, name string) *unstructured.Unstructured {
return &unstructured.Unstructured{
Object: map[string]interface{}{
"apiVersion": apiVersion,
"kind": kind,
"metadata": map[string]interface{}{
"namespace": namespace,
"name": name,
},
},
}
}

View file

@ -1,21 +0,0 @@
package controller
import (
"bytes"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
)
const policyWorkQueueName = "policyworkqueue"
const policyWorkQueueRetryLimit = 3
const policyControllerWorkerCount = 2
func concatFailedRules(frules []v1alpha1.FailedRule) string {
var buffer bytes.Buffer
for _, frule := range frules {
buffer.WriteString(frule.Name + ";")
}
return buffer.String()
}

View file

@ -6,7 +6,7 @@ import (
"time"
"github.com/golang/glog"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
apps "k8s.io/api/apps/v1"
certificates "k8s.io/api/certificates/v1beta1"
@ -184,7 +184,7 @@ func convertToUnstructured(obj interface{}) *unstructured.Unstructured {
}
// GenerateResource creates resource of the specified kind(supports 'clone' & 'data')
func (c *Client) GenerateResource(generator types.Generation, namespace string, processExistingResources bool) error {
func (c *Client) GenerateResource(generator kyverno.Generation, namespace string, processExistingResources bool) error {
var err error
resource := &unstructured.Unstructured{}
@ -198,7 +198,7 @@ func (c *Client) GenerateResource(generator types.Generation, namespace string,
}
}
// clone -> copy from existing resource
if generator.Clone != nil {
if generator.Clone != (kyverno.CloneFrom{}) {
resource, err = c.GetResource(generator.Kind, generator.Clone.Namespace, generator.Clone.Name)
if err != nil {
return err

View file

@ -3,7 +3,7 @@ package client
import (
"testing"
policytypes "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
policytypes "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
@ -130,7 +130,7 @@ func TestGenerateResource(t *testing.T) {
}
gen := policytypes.Generation{Kind: "TheKind",
Name: "gen-kind",
Clone: &policytypes.CloneFrom{Namespace: "ns-foo", Name: "name-foo"}}
Clone: policytypes.CloneFrom{Namespace: "ns-foo", Name: "name-foo"}}
err = f.client.GenerateResource(gen, ns.GetName(), false)
if err != nil {
t.Errorf("GenerateResource not working: %s", err)

11
pkg/dclient/violation.go Normal file
View file

@ -0,0 +1,11 @@
package client
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
//CreatePolicyViolation create a Policy Violation resource
func (c *Client) CreatePolicyViolation(pv kyvernov1alpha1.PolicyViolation) error {
_, err := c.CreateResource("PolicyViolation", ",", pv, false)
return err
}

View file

@ -1,104 +0,0 @@
package engine
import (
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ProcessExisting checks for mutation and validation violations of existing resources
func ProcessExisting(client *client.Client, policy *types.Policy, filterK8Resources []utils.K8Resource) []*info.PolicyInfo {
glog.Infof("Applying policy %s on existing resources", policy.Name)
// key uid
resourceMap := ListResourcesThatApplyToPolicy(client, policy, filterK8Resources)
policyInfos := []*info.PolicyInfo{}
// for the filtered resource apply policy
for _, v := range resourceMap {
policyInfo, err := applyPolicy(client, policy, v)
if err != nil {
glog.Errorf("unable to apply policy %s on resource %s/%s", policy.Name, v.Resource.GetName(), v.Resource.GetNamespace())
glog.Error(err)
continue
}
policyInfos = append(policyInfos, policyInfo)
}
return policyInfos
}
func applyPolicy(client *client.Client, policy *types.Policy, res resourceInfo) (*info.PolicyInfo, error) {
policyInfo := info.NewPolicyInfo(policy.Name, res.Gvk.Kind, res.Resource.GetName(), res.Resource.GetNamespace(), policy.Spec.ValidationFailureAction)
glog.Infof("Applying policy %s with %d rules\n", policy.ObjectMeta.Name, len(policy.Spec.Rules))
rawResource, err := res.Resource.MarshalJSON()
if err != nil {
return nil, err
}
// Mutate
mruleInfos, err := mutation(policy, rawResource, res.Gvk)
policyInfo.AddRuleInfos(mruleInfos)
if err != nil {
return nil, err
}
// Validation
vruleInfos, err := Validate(*policy, rawResource, *res.Gvk)
policyInfo.AddRuleInfos(vruleInfos)
if err != nil {
return nil, err
}
if res.Gvk.Kind == "Namespace" {
// Generation
gruleInfos := Generate(client, policy, res.Resource)
policyInfo.AddRuleInfos(gruleInfos)
}
return policyInfo, nil
}
func mutation(p *types.Policy, rawResource []byte, gvk *metav1.GroupVersionKind) ([]*info.RuleInfo, error) {
patches, ruleInfos := Mutate(*p, rawResource, *gvk)
if len(ruleInfos) == 0 {
// no rules were processed
return nil, nil
}
// if there are any errors return
for _, r := range ruleInfos {
if !r.IsSuccessful() {
return ruleInfos, nil
}
}
// if there are no patches // for overlay
if len(patches) == 0 {
return ruleInfos, nil
}
// option 2: (original Resource + patch) compare with (original resource)
mergePatches := JoinPatches(patches)
// merge the patches
patch, err := jsonpatch.DecodePatch(mergePatches)
if err != nil {
return nil, err
}
// apply the patches returned by mutate to the original resource
patchedResource, err := patch.Apply(rawResource)
if err != nil {
return nil, err
}
// compare (original Resource + patch) vs (original resource)
// to verify if they are equal
ruleInfo := info.NewRuleInfo("over-all mutation", info.Mutation)
if !jsonpatch.Equal(patchedResource, rawResource) {
//resource does not match so there was a mutation rule violated
// TODO : check the rule name "mutation rules"
ruleInfo.Fail()
ruleInfo.Add("resource does not satisfy mutation rules")
} else {
ruleInfo.Add("resource satisfys the mutation rule")
}
ruleInfos = append(ruleInfos, ruleInfo)
return ruleInfos, nil
}

View file

@ -2,10 +2,13 @@ package engine
import (
"encoding/json"
"errors"
"time"
"fmt"
"github.com/golang/glog"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/utils"
@ -15,27 +18,43 @@ import (
)
//Generate apply generation rules on a resource
func Generate(client *client.Client, policy *v1alpha1.Policy, ns unstructured.Unstructured) []*info.RuleInfo {
ris := []*info.RuleInfo{}
func Generate(client *client.Client, policy kyverno.Policy, ns unstructured.Unstructured) (response EngineResponse) {
startTime := time.Now()
glog.V(4).Infof("started applying generation rules of policy %q (%v)", policy.Name, startTime)
defer func() {
response.ExecutionTime = time.Since(startTime)
glog.V(4).Infof("Finished applying generation rules policy %q (%v)", policy.Name, response.ExecutionTime)
glog.V(4).Infof("Generation Rules appplied count %q for policy %q", response.RulesAppliedCount, policy.Name)
}()
incrementAppliedRuleCount := func() {
// rules applied succesfully count
response.RulesAppliedCount++
}
ris := []info.RuleInfo{}
for _, rule := range policy.Spec.Rules {
if rule.Generation == nil {
if rule.Generation == (kyverno.Generation{}) {
continue
}
glog.V(4).Infof("applying policy %s generate rule %s on resource %s/%s/%s", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName())
ri := info.NewRuleInfo(rule.Name, info.Generation)
err := applyRuleGenerator(client, ns, rule.Generation, policy.GetCreationTimestamp())
if err != nil {
ri.Fail()
ri.Addf("Rule %s: Failed to apply rule generator, err %v.", rule.Name, err)
ri.Addf("Failed to apply rule %s generator, err %v.", rule.Name, err)
glog.Infof("failed to apply policy %s rule %s on resource %s/%s/%s: %v", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName(), err)
} else {
ri.Addf("Rule %s: Generation succesfully.", rule.Name)
ri.Addf("Generation succesfully for rule %s", rule.Name)
glog.Infof("succesfully applied policy %s rule %s on resource %s/%s/%s", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName())
}
ris = append(ris, ri)
incrementAppliedRuleCount()
}
return ris
response.RuleInfos = ris
return response
}
func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen *v1alpha1.Generation, policyCreationTime metav1.Time) error {
func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen kyverno.Generation, policyCreationTime metav1.Time) error {
var err error
resource := &unstructured.Unstructured{}
var rdata map[string]interface{}
@ -45,19 +64,24 @@ func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen
return nsCreationTime.Before(&policyCreationTime)
}()
if gen.Data != nil {
glog.V(4).Info("generate rule: creates new resource")
// 1> Check if resource exists
obj, err := client.GetResource(gen.Kind, ns.GetName(), gen.Name)
if err == nil {
glog.V(4).Infof("generate rule: resource %s/%s/%s already present. checking if it contains the required configuration", gen.Kind, ns.GetName(), gen.Name)
// 2> If already exsists, then verify the content is contained
// found the resource
// check if the rule is create, if yes, then verify if the specified configuration is present in the resource
ok, err := checkResource(gen.Data, obj)
if err != nil {
glog.V(4).Infof("generate rule:: unable to check if configuration %v, is present in resource %s/%s/%s", gen.Data, gen.Kind, ns.GetName(), gen.Name)
return err
}
if !ok {
return fmt.Errorf("rule configuration not present in resource %s/%s", ns.GetName(), gen.Name)
glog.V(4).Infof("generate rule:: configuration %v not present in resource %s/%s/%s", gen.Data, gen.Kind, ns.GetName(), gen.Name)
return errors.New("rule configuration not present in resource")
}
glog.V(4).Infof("generate rule: required configuration %v is present in resource %s/%s/%s", gen.Data, gen.Kind, ns.GetName(), gen.Name)
return nil
}
rdata, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&gen.Data)
@ -66,17 +90,21 @@ func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen
return err
}
}
if gen.Clone != nil {
if gen.Clone != (kyverno.CloneFrom{}) {
glog.V(4).Info("generate rule: clone resource")
// 1> Check if resource exists
_, err := client.GetResource(gen.Kind, ns.GetName(), gen.Name)
if err == nil {
glog.V(4).Infof("generate rule: resource %s/%s/%s already present", gen.Kind, ns.GetName(), gen.Name)
return nil
}
// 2> If already exists return
// 2> If clone already exists return
resource, err = client.GetResource(gen.Kind, gen.Clone.Namespace, gen.Clone.Name)
if err != nil {
glog.V(4).Infof("generate rule: clone reference resource %s/%s/%s not present: %v", gen.Kind, gen.Clone.Namespace, gen.Clone.Name, err)
return err
}
glog.V(4).Infof("generate rule: clone reference resource %s/%s/%s present", gen.Kind, gen.Clone.Namespace, gen.Clone.Name)
rdata = resource.UnstructuredContent()
}
if processExisting {
@ -90,11 +118,14 @@ func applyRuleGenerator(client *client.Client, ns unstructured.Unstructured, gen
resource.SetResourceVersion("")
_, err = client.CreateResource(gen.Kind, ns.GetName(), resource, false)
if err != nil {
glog.V(4).Infof("generate rule: unable to create resource %s/%s/%s: %v", gen.Kind, resource.GetNamespace(), resource.GetName(), err)
return err
}
glog.V(4).Infof("generate rule: created resource %s/%s/%s", gen.Kind, resource.GetNamespace(), resource.GetName())
return nil
}
//checkResource checks if the config is present in th eresource
func checkResource(config interface{}, resource *unstructured.Unstructured) (bool, error) {
var err error
@ -119,7 +150,6 @@ func checkResource(config interface{}, resource *unstructured.Unstructured) (boo
if err != nil {
// unable to unmarshall
return false, err
}
var objData interface{}

View file

@ -1,66 +1,125 @@
package engine
import (
"reflect"
"time"
"github.com/golang/glog"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/info"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// Mutate performs mutation. Overlay first and then mutation patches
func Mutate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) ([][]byte, []*info.RuleInfo) {
var allPatches [][]byte
patchedDocument := rawResource
ris := []*info.RuleInfo{}
func Mutate(policy kyverno.Policy, resource unstructured.Unstructured) (response EngineResponse) {
// var response EngineResponse
var allPatches, rulePatches [][]byte
var err error
var errs []error
ris := []info.RuleInfo{}
startTime := time.Now()
glog.V(4).Infof("started applying mutation rules of policy %q (%v)", policy.Name, startTime)
defer func() {
response.ExecutionTime = time.Since(startTime)
glog.V(4).Infof("finished applying mutation rules policy %v (%v)", policy.Name, response.ExecutionTime)
glog.V(4).Infof("Mutation Rules appplied succesfully count %v for policy %q", response.RulesAppliedCount, policy.Name)
}()
incrementAppliedRuleCount := func() {
// rules applied succesfully count
response.RulesAppliedCount++
}
patchedDocument, err := resource.MarshalJSON()
if err != nil {
glog.Errorf("unable to marshal resource : %v\n", err)
}
if err != nil {
glog.V(4).Infof("unable to marshal resource : %v", err)
response.PatchedResource = resource
return response
}
for _, rule := range policy.Spec.Rules {
if rule.Mutation == nil {
if reflect.DeepEqual(rule.Mutation, kyverno.Mutation{}) {
continue
}
ri := info.NewRuleInfo(rule.Name, info.Mutation)
ok := ResourceMeetsDescription(rawResource, rule.MatchResources.ResourceDescription, rule.ExcludeResources.ResourceDescription, gvk)
// check if the resource satisfies the filter conditions defined in the rule
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont statisfy a policy rule resource description
ok := MatchesResourceDescription(resource, rule)
if !ok {
glog.V(3).Infof("Not applicable on specified resource kind%s", gvk.Kind)
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
continue
}
ruleInfo := info.NewRuleInfo(rule.Name, info.Mutation)
// Process Overlay
if rule.Mutation.Overlay != nil {
overlayPatches, err := ProcessOverlay(rule, rawResource, gvk)
rulePatches, err = processOverlay(rule, patchedDocument)
if err == nil {
if len(overlayPatches) == 0 {
if len(rulePatches) == 0 {
// if array elements dont match then we skip(nil patch, no error)
// or if acnohor is defined and doenst match
// policy is not applicable
glog.V(4).Info("overlay does not match, so skipping applying rule")
continue
}
ri.Addf("Rule %s: Overlay succesfully applied.", rule.Name)
// merge the json patches
patch := JoinPatches(overlayPatches)
ruleInfo.Addf("Rule %s: Overlay succesfully applied.", rule.Name)
// strip slashes from string
ri.Changes = string(patch)
allPatches = append(allPatches, overlayPatches...)
ruleInfo.Patches = rulePatches
allPatches = append(allPatches, rulePatches...)
glog.V(4).Infof("overlay applied succesfully on resource %s/%s", resource.GetNamespace(), resource.GetName())
} else {
ri.Fail()
ri.Addf("overlay application has failed, err %v.", err)
glog.V(4).Infof("failed to apply overlay: %v", err)
ruleInfo.Fail()
ruleInfo.Addf("failed to apply overlay: %v", err)
}
incrementAppliedRuleCount()
}
// Process Patches
if len(rule.Mutation.Patches) != 0 {
rulePatches, errs := ProcessPatches(rule, patchedDocument)
rulePatches, errs = processPatches(rule, patchedDocument)
if len(errs) > 0 {
ri.Fail()
ruleInfo.Fail()
for _, err := range errs {
ri.Addf("patches application has failed, err %v.", err)
glog.V(4).Infof("failed to apply patches: %v", err)
ruleInfo.Addf("patches application has failed, err %v.", err)
}
} else {
ri.Addf("Rule %s: Patches succesfully applied.", rule.Name)
glog.V(4).Infof("patches applied succesfully on resource %s/%s", resource.GetNamespace(), resource.GetName())
ruleInfo.Addf("Patches succesfully applied.")
ruleInfo.Patches = rulePatches
allPatches = append(allPatches, rulePatches...)
}
}
ris = append(ris, ri)
incrementAppliedRuleCount()
}
return allPatches, ris
patchedDocument, err = ApplyPatches(patchedDocument, rulePatches)
if err != nil {
glog.Errorf("Failed to apply patches on ruleName=%s, err%v\n:", rule.Name, err)
}
ris = append(ris, ruleInfo)
}
patchedResource, err := ConvertToUnstructured(patchedDocument)
if err != nil {
glog.Errorf("Failed to convert patched resource to unstructuredtype, err%v\n:", err)
response.PatchedResource = resource
return response
}
response.Patches = allPatches
response.PatchedResource = *patchedResource
response.RuleInfos = ris
return response
}

View file

@ -11,24 +11,28 @@ import (
"github.com/golang/glog"
jsonpatch "github.com/evanphx/json-patch"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
// ProcessOverlay handles validating admission request
// rawResource handles validating admission request
// Checks the target resources for rules defined in the policy
func ProcessOverlay(rule kubepolicy.Rule, rawResource []byte, gvk metav1.GroupVersionKind) ([][]byte, error) {
// TODO: pass in the unstructured object in stead of raw byte?
func processOverlay(rule kyverno.Rule, rawResource []byte) ([][]byte, error) {
var resource interface{}
if err := json.Unmarshal(rawResource, &resource); err != nil {
glog.V(4).Infof("unable to unmarshal resource : %v", err)
return nil, err
}
resourceInfo := ParseResourceInfoFromObject(rawResource)
patches, err := processOverlayPatches(resource, *rule.Mutation.Overlay)
patches, err := processOverlayPatches(resource, rule.Mutation.Overlay)
if err != nil && strings.Contains(err.Error(), "Conditions are not met") {
// glog.V(4).Infof("overlay pattern %s does not match resource %s/%s", rule.Mutation.Overlay, resourceUnstr.GetNamespace(), resourceUnstr.GetName())
glog.Infof("Resource does not meet conditions in overlay pattern, resource=%s, rule=%s\n", resourceInfo, rule.Name)
return nil, nil
// patches, err := processOverlayPatches(resource, rule.Mutation.Overlay)
// if err != nil && strings.Contains(err.Error(), "Conditions are not met") {
// glog.V(4).Infof("overlay pattern %s does not match resource %s/%s", rule.Mutation.Overlay, resourceUnstr.GetNamespace(), resourceUnstr.GetName())
// return nil, nil
}
return patches, err

View file

@ -5,17 +5,17 @@ import (
"strings"
"github.com/golang/glog"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
func patchOverlay(rule kubepolicy.Rule, rawResource []byte) ([][]byte, error) {
func patchOverlay(rule kyverno.Rule, rawResource []byte) ([][]byte, error) {
var resource interface{}
if err := json.Unmarshal(rawResource, &resource); err != nil {
return nil, err
}
//TODO: evaluate, Unmarshall called thrice
resourceInfo := ParseResourceInfoFromObject(rawResource)
patches, err := processOverlayPatches(resource, *rule.Mutation.Overlay)
patches, err := processOverlayPatches(resource, rule.Mutation.Overlay)
if err != nil && strings.Contains(err.Error(), "Conditions are not met") {
glog.Infof("Resource does not meet conditions in overlay pattern, resource=%s, rule=%s\n", resourceInfo, rule.Name)
return nil, nil

View file

@ -3,21 +3,23 @@ package engine
import (
"encoding/json"
"errors"
"reflect"
"github.com/golang/glog"
jsonpatch "github.com/evanphx/json-patch"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
// ProcessPatches Returns array from separate patches that can be applied to the document
// Returns error ONLY in case when creation of resource should be denied.
func ProcessPatches(rule kubepolicy.Rule, resource []byte) (allPatches [][]byte, errs []error) {
// TODO: pass in the unstructured object in stead of raw byte?
func processPatches(rule kyverno.Rule, resource []byte) (allPatches [][]byte, errs []error) {
if len(resource) == 0 {
errs = append(errs, errors.New("Source document for patching is empty"))
return nil, errs
}
if rule.Mutation == nil {
if reflect.DeepEqual(rule.Mutation, kyverno.Mutation{}) {
errs = append(errs, errors.New("No Mutation rules defined"))
return nil, errs
}

View file

@ -5,7 +5,7 @@ import (
"gotest.tools/assert"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
types "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
const endpointsDocument string = `{
@ -35,7 +35,7 @@ const endpointsDocument string = `{
func TestProcessPatches_EmptyPatches(t *testing.T) {
var emptyRule = types.Rule{}
patches, err := ProcessPatches(emptyRule, []byte(endpointsDocument))
patches, err := processPatches(emptyRule, []byte(endpointsDocument))
assert.Check(t, len(err) == 1)
assert.Assert(t, len(patches) == 0)
}
@ -58,20 +58,20 @@ func makeRuleWithPatches(patches []types.Patch) types.Rule {
Patches: patches,
}
return types.Rule{
Mutation: &mutation,
Mutation: mutation,
}
}
func TestProcessPatches_EmptyDocument(t *testing.T) {
rule := makeRuleWithPatch(makeAddIsMutatedLabelPatch())
patchesBytes, err := ProcessPatches(rule, nil)
patchesBytes, err := processPatches(rule, nil)
assert.Assert(t, err != nil)
assert.Assert(t, len(patchesBytes) == 0)
}
func TestProcessPatches_AllEmpty(t *testing.T) {
emptyRule := types.Rule{}
patchesBytes, err := ProcessPatches(emptyRule, nil)
patchesBytes, err := processPatches(emptyRule, nil)
assert.Check(t, len(err) == 1)
assert.Assert(t, len(patchesBytes) == 0)
}
@ -80,7 +80,7 @@ func TestProcessPatches_AddPathDoesntExist(t *testing.T) {
patch := makeAddIsMutatedLabelPatch()
patch.Path = "/metadata/additional/is-mutated"
rule := makeRuleWithPatch(patch)
patchesBytes, err := ProcessPatches(rule, []byte(endpointsDocument))
patchesBytes, err := processPatches(rule, []byte(endpointsDocument))
assert.Check(t, len(err) == 1)
assert.Assert(t, len(patchesBytes) == 0)
}
@ -88,7 +88,7 @@ func TestProcessPatches_AddPathDoesntExist(t *testing.T) {
func TestProcessPatches_RemovePathDoesntExist(t *testing.T) {
patch := types.Patch{Path: "/metadata/labels/is-mutated", Operation: "remove"}
rule := makeRuleWithPatch(patch)
patchesBytes, err := ProcessPatches(rule, []byte(endpointsDocument))
patchesBytes, err := processPatches(rule, []byte(endpointsDocument))
assert.Check(t, len(err) == 0)
assert.Assert(t, len(patchesBytes) == 0)
}
@ -97,7 +97,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_EmptyResult(t *testing.T) {
patch1 := types.Patch{Path: "/metadata/labels/is-mutated", Operation: "remove"}
patch2 := types.Patch{Path: "/spec/labels/label3", Operation: "add", Value: "label3Value"}
rule := makeRuleWithPatches([]types.Patch{patch1, patch2})
patchesBytes, err := ProcessPatches(rule, []byte(endpointsDocument))
patchesBytes, err := processPatches(rule, []byte(endpointsDocument))
assert.Check(t, len(err) == 1)
assert.Assert(t, len(patchesBytes) == 0)
}
@ -107,7 +107,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_ContinueOnError_NotEmptyResul
patch2 := types.Patch{Path: "/spec/labels/label2", Operation: "remove", Value: "label2Value"}
patch3 := types.Patch{Path: "/metadata/labels/label3", Operation: "add", Value: "label3Value"}
rule := makeRuleWithPatches([]types.Patch{patch1, patch2, patch3})
patchesBytes, err := ProcessPatches(rule, []byte(endpointsDocument))
patchesBytes, err := processPatches(rule, []byte(endpointsDocument))
assert.Check(t, len(err) == 0)
assert.Assert(t, len(patchesBytes) != 0)
assertEqStringAndData(t, `{"path":"/metadata/labels/label3","op":"add","value":"label3Value"}`, patchesBytes[0])
@ -116,7 +116,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_ContinueOnError_NotEmptyResul
func TestProcessPatches_RemovePathDoesntExist_EmptyResult(t *testing.T) {
patch := types.Patch{Path: "/metadata/labels/is-mutated", Operation: "remove"}
rule := makeRuleWithPatch(patch)
patchesBytes, err := ProcessPatches(rule, []byte(endpointsDocument))
patchesBytes, err := processPatches(rule, []byte(endpointsDocument))
assert.Check(t, len(err) == 0)
assert.Assert(t, len(patchesBytes) == 0)
}
@ -125,7 +125,7 @@ func TestProcessPatches_RemovePathDoesntExist_NotEmptyResult(t *testing.T) {
patch1 := types.Patch{Path: "/metadata/labels/is-mutated", Operation: "remove"}
patch2 := types.Patch{Path: "/metadata/labels/label2", Operation: "add", Value: "label2Value"}
rule := makeRuleWithPatches([]types.Patch{patch1, patch2})
patchesBytes, err := ProcessPatches(rule, []byte(endpointsDocument))
patchesBytes, err := processPatches(rule, []byte(endpointsDocument))
assert.Check(t, len(err) == 0)
assert.Assert(t, len(patchesBytes) == 1)
assertEqStringAndData(t, `{"path":"/metadata/labels/label2","op":"add","value":"label2Value"}`, patchesBytes[0])

View file

@ -5,183 +5,204 @@ import (
"fmt"
"strconv"
"strings"
"time"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/utils"
v1helper "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
//ListResourcesThatApplyToPolicy returns list of resources that are filtered by policy rules
func ListResourcesThatApplyToPolicy(client *client.Client, policy *types.Policy, filterK8Resources []utils.K8Resource) map[string]resourceInfo {
// key uid
resourceMap := map[string]resourceInfo{}
for _, rule := range policy.Spec.Rules {
// Match
for _, k := range rule.MatchResources.Kinds {
namespaces := []string{}
if k == "Namespace" {
namespaces = []string{""}
} else {
if rule.MatchResources.Namespace != nil {
// if namespace is specified then we add the namespace
namespaces = append(namespaces, *rule.MatchResources.Namespace)
} else {
// no namespace specified, refer to all namespaces
namespaces = getAllNamespaces(client)
//EngineResponse provides the response to the application of a policy rule set on a resource
type EngineResponse struct {
Patches [][]byte
PatchedResource unstructured.Unstructured
RuleInfos []info.RuleInfo
EngineStats
}
// Check if exclude namespace is not clashing
namespaces = excludeNamespaces(namespaces, rule.ExcludeResources.Namespace)
//EngineStats stores in the statistics for a single application of resource
type EngineStats struct {
// average time required to process the policy rules on a resource
ExecutionTime time.Duration
// Count of rules that were applied succesfully
RulesAppliedCount int
}
// If kind is namespace then namespace is "", override
// Get resources in the namespace
for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, filterK8Resources)
mergeresources(resourceMap, rMap)
}
}
}
return resourceMap
}
// //ListResourcesThatApplyToPolicy returns list of resources that are filtered by policy rules
// func ListResourcesThatApplyToPolicy(client *client.Client, policy *kyverno.Policy, filterK8Resources []utils.K8Resource) map[string]resourceInfo {
// // key uid
// resourceMap := map[string]resourceInfo{}
// for _, rule := range policy.Spec.Rules {
// // Match
// for _, k := range rule.MatchResources.Kinds {
// namespaces := []string{}
// if k == "Namespace" {
// namespaces = []string{""}
// } else {
// if rule.MatchResources.Namespace != "" {
// // if namespace is specified then we add the namespace
// namespaces = append(namespaces, rule.MatchResources.Namespace)
// } else {
// // no namespace specified, refer to all namespaces
// namespaces = getAllNamespaces(client)
// }
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule types.Rule, filterK8Resources []utils.K8Resource) map[string]resourceInfo {
resourceMap := map[string]resourceInfo{}
// List resources
list, err := client.ListResource(kind, namespace, rule.MatchResources.Selector)
if err != nil {
glog.Errorf("unable to list resource for %s with label selector %s", kind, rule.MatchResources.Selector.String())
return nil
}
var selector labels.Selector
// exclude label selector
if rule.ExcludeResources.Selector != nil {
selector, err = v1helper.LabelSelectorAsSelector(rule.ExcludeResources.Selector)
if err != nil {
glog.Error(err)
}
}
for _, res := range list.Items {
// exclude label selectors
if selector != nil {
set := labels.Set(res.GetLabels())
if selector.Matches(set) {
// if matches
continue
}
}
var name *string
// match
// name
// wild card matching
name = rule.MatchResources.Name
if name != nil {
// if does not match then we skip
if !wildcard.Match(*name, res.GetName()) {
continue
}
}
// exclude
// name
// wild card matching
name = rule.ExcludeResources.Name
if name != nil {
// if matches then we skip
if wildcard.Match(*name, res.GetName()) {
continue
}
}
gvk := res.GroupVersionKind()
// // Check if exclude namespace is not clashing
// namespaces = excludeNamespaces(namespaces, rule.ExcludeResources.Namespace)
// }
ri := resourceInfo{Resource: res, Gvk: &metav1.GroupVersionKind{Group: gvk.Group,
Version: gvk.Version,
Kind: gvk.Kind}}
// Skip the filtered resources
if utils.SkipFilteredResources(gvk.Kind, res.GetNamespace(), res.GetName(), filterK8Resources) {
continue
}
// // If kind is namespace then namespace is "", override
// // Get resources in the namespace
// for _, ns := range namespaces {
// rMap := getResourcesPerNamespace(k, client, ns, rule, filterK8Resources)
// mergeresources(resourceMap, rMap)
// }
// }
// }
// return resourceMap
// }
resourceMap[string(res.GetUID())] = ri
}
return resourceMap
}
// func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, filterK8Resources []utils.K8Resource) map[string]resourceInfo {
// resourceMap := map[string]resourceInfo{}
// // List resources
// list, err := client.ListResource(kind, namespace, rule.MatchResources.Selector)
// if err != nil {
// glog.Errorf("unable to list resource for %s with label selector %s", kind, rule.MatchResources.Selector.String())
// return nil
// }
// var selector labels.Selector
// // exclude label selector
// if rule.ExcludeResources.Selector != nil {
// selector, err = v1helper.LabelSelectorAsSelector(rule.ExcludeResources.Selector)
// if err != nil {
// glog.Error(err)
// }
// }
// for _, res := range list.Items {
// // exclude label selectors
// if selector != nil {
// set := labels.Set(res.GetLabels())
// if selector.Matches(set) {
// // if matches
// continue
// }
// }
// var name string
// // match
// // name
// // wild card matching
// name = rule.MatchResources.Name
// if name != "" {
// // if does not match then we skip
// if !wildcard.Match(name, res.GetName()) {
// continue
// }
// }
// // exclude
// // name
// // wild card matching
// name = rule.ExcludeResources.Name
// if name != "nil" {
// // if matches then we skip
// if wildcard.Match(name, res.GetName()) {
// continue
// }
// }
// gvk := res.GroupVersionKind()
// merge b into a map
func mergeresources(a, b map[string]resourceInfo) {
for k, v := range b {
a[k] = v
}
}
// ri := resourceInfo{Resource: res, Gvk: &metav1.GroupVersionKind{Group: gvk.Group,
// Version: gvk.Version,
// Kind: gvk.Kind}}
// // Skip the filtered resources
// if utils.SkipFilteredResources(gvk.Kind, res.GetNamespace(), res.GetName(), filterK8Resources) {
// continue
// }
func getAllNamespaces(client *client.Client) []string {
namespaces := []string{}
// get all namespaces
nsList, err := client.ListResource("Namespace", "", nil)
if err != nil {
glog.Error(err)
return namespaces
}
for _, ns := range nsList.Items {
namespaces = append(namespaces, ns.GetName())
}
return namespaces
}
// resourceMap[string(res.GetUID())] = ri
// }
// return resourceMap
// }
func excludeNamespaces(namespaces []string, excludeNs *string) []string {
if excludeNs == nil {
return namespaces
}
filteredNamespaces := []string{}
for _, n := range namespaces {
if n == *excludeNs {
continue
}
filteredNamespaces = append(filteredNamespaces, n)
}
return filteredNamespaces
}
// // merge b into a map
// func mergeresources(a, b map[string]resourceInfo) {
// for k, v := range b {
// a[k] = v
// }
// }
// ResourceMeetsDescription checks requests kind, name and labels to fit the policy rule
func ResourceMeetsDescription(resourceRaw []byte, matches v1alpha1.ResourceDescription, exclude v1alpha1.ResourceDescription, gvk metav1.GroupVersionKind) bool {
if !findKind(matches.Kinds, gvk.Kind) {
// func getAllNamespaces(client *client.Client) []string {
// namespaces := []string{}
// // get all namespaces
// nsList, err := client.ListResource("Namespace", "", nil)
// if err != nil {
// glog.Error(err)
// return namespaces
// }
// for _, ns := range nsList.Items {
// namespaces = append(namespaces, ns.GetName())
// }
// return namespaces
// }
// func excludeNamespaces(namespaces []string, excludeNs string) []string {
// if excludeNs == "" {
// return namespaces
// }
// filteredNamespaces := []string{}
// for _, n := range namespaces {
// if n == excludeNs {
// continue
// }
// filteredNamespaces = append(filteredNamespaces, n)
// }
// return filteredNamespaces
// }
//MatchesResourceDescription checks if the resource matches resource desription of the rule or not
func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno.Rule) bool {
matches := rule.MatchResources.ResourceDescription
exclude := rule.ExcludeResources.ResourceDescription
if !findKind(matches.Kinds, resource.GetKind()) {
return false
}
if resourceRaw != nil {
meta := parseMetadataFromObject(resourceRaw)
name := ParseNameFromObject(resourceRaw)
namespace := ParseNamespaceFromObject(resourceRaw)
name := resource.GetName()
if matches.Name != nil {
namespace := resource.GetNamespace()
if matches.Name != "" {
// Matches
if !wildcard.Match(*matches.Name, name) {
if !wildcard.Match(matches.Name, name) {
return false
}
}
// Exclude
// the resource name matches the exclude resource name then reject
if exclude.Name != nil {
if wildcard.Match(*exclude.Name, name) {
if exclude.Name != "" {
if wildcard.Match(exclude.Name, name) {
return false
}
}
// Matches
if matches.Namespace != nil && *matches.Namespace != namespace {
// check if the resource namespace is defined in the list of namespaces for inclusion
if len(matches.Namespaces) > 0 && !utils.Contains(matches.Namespaces, namespace) {
return false
}
// Exclude
if exclude.Namespace != nil && *exclude.Namespace == namespace {
// check if the resource namespace is defined in the list of namespace for exclusion
if len(exclude.Namespaces) > 0 && utils.Contains(exclude.Namespaces, namespace) {
return false
}
// Matches
if matches.Selector != nil {
selector, err := metav1.LabelSelectorAsSelector(matches.Selector)
@ -189,13 +210,10 @@ func ResourceMeetsDescription(resourceRaw []byte, matches v1alpha1.ResourceDescr
glog.Error(err)
return false
}
if meta != nil {
labelMap := parseLabelsFromMetadata(meta)
if !selector.Matches(labelMap) {
if !selector.Matches(labels.Set(resource.GetLabels())) {
return false
}
}
}
// Exclude
if exclude.Selector != nil {
selector, err := metav1.LabelSelectorAsSelector(exclude.Selector)
@ -204,28 +222,81 @@ func ResourceMeetsDescription(resourceRaw []byte, matches v1alpha1.ResourceDescr
glog.Error(err)
return false
}
if meta != nil {
labelMap := parseLabelsFromMetadata(meta)
if selector.Matches(labelMap) {
if selector.Matches(labels.Set(resource.GetLabels())) {
return false
}
}
}
}
return true
}
func parseMetadataFromObject(bytes []byte) map[string]interface{} {
var objectJSON map[string]interface{}
json.Unmarshal(bytes, &objectJSON)
meta, ok := objectJSON["metadata"].(map[string]interface{})
if !ok {
return nil
}
return meta
}
// // ResourceMeetsDescription checks requests kind, name and labels to fit the policy rule
// func ResourceMeetsDescription(resourceRaw []byte, matches kyverno.ResourceDescription, exclude kyverno.ResourceDescription, gvk metav1.GroupVersionKind) bool {
// if !findKind(matches.Kinds, gvk.Kind) {
// return false
// }
// if resourceRaw != nil {
// meta := parseMetadataFromObject(resourceRaw)
// name := ParseNameFromObject(resourceRaw)
// namespace := ParseNamespaceFromObject(resourceRaw)
// if matches.Name != "" {
// // Matches
// if !wildcard.Match(matches.Name, name) {
// return false
// }
// }
// // Exclude
// // the resource name matches the exclude resource name then reject
// if exclude.Name != "" {
// if wildcard.Match(exclude.Name, name) {
// return false
// }
// }
// // Matches
// // check if the resource namespace is defined in the list of namespaces for inclusion
// if len(matches.Namespaces) > 0 && !utils.Contains(matches.Namespaces, namespace) {
// return false
// }
// // Exclude
// // check if the resource namespace is defined in the list of namespace for exclusion
// if len(exclude.Namespaces) > 0 && utils.Contains(exclude.Namespaces, namespace) {
// return false
// }
// // Matches
// if matches.Selector != nil {
// selector, err := metav1.LabelSelectorAsSelector(matches.Selector)
// if err != nil {
// glog.Error(err)
// return false
// }
// if meta != nil {
// labelMap := parseLabelsFromMetadata(meta)
// if !selector.Matches(labelMap) {
// return false
// }
// }
// }
// // Exclude
// if exclude.Selector != nil {
// selector, err := metav1.LabelSelectorAsSelector(exclude.Selector)
// // if the label selector is incorrect, should be fail or
// if err != nil {
// glog.Error(err)
// return false
// }
// if meta != nil {
// labelMap := parseLabelsFromMetadata(meta)
// if selector.Matches(labelMap) {
// return false
// }
// }
// }
// }
// return true
// }
// ParseResourceInfoFromObject get kind/namepace/name from resource
func ParseResourceInfoFromObject(rawResource []byte) string {
@ -244,18 +315,6 @@ func ParseKindFromObject(bytes []byte) string {
return objectJSON["kind"].(string)
}
func parseLabelsFromMetadata(meta map[string]interface{}) labels.Set {
if interfaceMap, ok := meta["labels"].(map[string]interface{}); ok {
labelMap := make(labels.Set, len(interfaceMap))
for key, value := range interfaceMap {
labelMap[key] = value.(string)
}
return labelMap
}
return nil
}
//ParseNameFromObject extracts resource name from JSON obj
func ParseNameFromObject(bytes []byte) string {
var objectJSON map[string]interface{}
@ -295,15 +354,6 @@ func ParseNamespaceFromObject(bytes []byte) string {
return ""
}
// ParseRegexPolicyResourceName returns true if policyResourceName is a regexp
func ParseRegexPolicyResourceName(policyResourceName string) (string, bool) {
regex := strings.Split(policyResourceName, "regex:")
if len(regex) == 1 {
return regex[0], false
}
return strings.Trim(regex[1], " "), true
}
func getAnchorsFromMap(anchorsMap map[string]interface{}) map[string]interface{} {
result := make(map[string]interface{})
@ -458,3 +508,13 @@ type resourceInfo struct {
Resource unstructured.Unstructured
Gvk *metav1.GroupVersionKind
}
func ConvertToUnstructured(data []byte) (*unstructured.Unstructured, error) {
resource := &unstructured.Unstructured{}
err := resource.UnmarshalJSON(data)
if err != nil {
glog.V(4).Infof("failed to unmarshall resource: %v", err)
return nil, err
}
return resource, nil
}

View file

@ -3,106 +3,232 @@ package engine
import (
"testing"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestResourceMeetsDescription_Kind(t *testing.T) {
resourceName := "test-config-map"
resourceDescription := types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
// Match multiple kinds
func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
rawResource := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name": "nginx-deployment",
"labels": {
"app": "nginx"
}
},
"spec": {
"replicas": 3,
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80
}
]
}
]
}
}
}
}`)
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
t.Errorf("unable to convert raw resource to unstructured: %v", err)
}
resourceDescription := kyverno.ResourceDescription{
Kinds: []string{"Deployment", "Pods"},
Selector: &metav1.LabelSelector{
MatchLabels: nil,
MatchExpressions: nil,
},
}
excludeResourcesResourceDesc := types.ResourceDescription{}
groupVersionKind := metav1.GroupVersionKind{Kind: "ConfigMap"}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
}
// Match resource name
func TestResourceDescriptionMatch_Name(t *testing.T) {
rawResource := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"name": "nginx-deployment",
"labels": {
"label1":"test1",
"label2":"test2"
"app": "nginx"
}
},
"spec": {
"replicas": 3,
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80
}
]
}
]
}
}
}
}`)
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
t.Errorf("unable to convert raw resource to unstructured: %v", err)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resourceDescription.Kinds[0] = "Deployment"
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resourceDescription.Kinds[0] = "ConfigMap"
groupVersionKind.Kind = "Deployment"
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
}
func TestResourceMeetsDescription_Name(t *testing.T) {
resourceName := "test-config-map"
resourceDescription := types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
resourceDescription := kyverno.ResourceDescription{
Kinds: []string{"Deployment"},
Name: "nginx-deployment",
Selector: &metav1.LabelSelector{
MatchLabels: nil,
MatchExpressions: nil,
},
}
excludeResourcesResourceDesc := types.ResourceDescription{}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{resourceDescription}}
groupVersionKind := metav1.GroupVersionKind{Kind: "ConfigMap"}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
}
// Match resource regex
func TestResourceDescriptionMatch_Name_Regex(t *testing.T) {
rawResource := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"name": "nginx-deployment",
"labels": {
"label1":"test1",
"label2":"test2"
"app": "nginx"
}
},
"spec": {
"replicas": 3,
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80
}
]
}
]
}
}
}
}`)
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
t.Errorf("unable to convert raw resource to unstructured: %v", err)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resourceName = "test-config-map-new"
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
}
resourceDescription := kyverno.ResourceDescription{
Kinds: []string{"Deployment"},
Name: "nginx-*",
Selector: &metav1.LabelSelector{
MatchLabels: nil,
MatchExpressions: nil,
},
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{resourceDescription}}
rawResource = []byte(`{
assert.Assert(t, MatchesResourceDescription(*resource, rule))
}
// Match expressions for labels to not match
func TestResourceDescriptionMatch_Label_Expression_NotMatch(t *testing.T) {
rawResource := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name":"test-config-map-new",
"namespace":"default",
"creationTimestamp":null,
"name": "nginx-deployment",
"labels": {
"label1":"test1",
"label2":"test2"
"app": "nginx"
}
},
"spec": {
"replicas": 3,
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80
}
]
}
]
}
}
}
}`)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
t.Errorf("unable to convert raw resource to unstructured: %v", err)
rawResource = []byte(`{
"metadata":{
"name":"",
"namespace":"default",
"creationTimestamp":null,
"labels":{
"label1":"test1",
"label2":"test2"
}
}
}`)
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
}
func TestResourceMeetsDescription_MatchExpressions(t *testing.T) {
resourceName := "test-config-map"
resourceDescription := types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
resourceDescription := kyverno.ResourceDescription{
Kinds: []string{"Deployment"},
Name: "nginx-*",
Selector: &metav1.LabelSelector{
MatchLabels: nil,
MatchExpressions: []metav1.LabelSelectorRequirement{
@ -113,230 +239,157 @@ func TestResourceMeetsDescription_MatchExpressions(t *testing.T) {
"sometest1",
},
},
metav1.LabelSelectorRequirement{
Key: "label1",
Operator: "In",
Values: []string{
"test1",
"test8",
"test201",
},
},
metav1.LabelSelectorRequirement{
Key: "label3",
Operator: "DoesNotExist",
Values: nil,
},
metav1.LabelSelectorRequirement{
Key: "label2",
Operator: "In",
Values: []string{
"test2",
},
},
},
},
}
excludeResourcesResourceDesc := types.ResourceDescription{}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{resourceDescription}}
groupVersionKind := metav1.GroupVersionKind{Kind: "ConfigMap"}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
}
// Match label expression in matching set
func TestResourceDescriptionMatch_Label_Expression_Match(t *testing.T) {
rawResource := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"name": "nginx-deployment",
"labels": {
"label1":"test1",
"label2":"test2"
"app": "nginx"
}
},
"spec": {
"replicas": 3,
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80
}
]
}
]
}
}
}
}`)
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
t.Errorf("unable to convert raw resource to unstructured: %v", err)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
rawResource = []byte(`{
"metadata":{
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"labels":{
"label1":"test1234567890",
"label2":"test2"
}
}
}`)
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
}
func TestResourceMeetsDescription_MatchLabels(t *testing.T) {
resourceName := "test-config-map"
resourceDescription := types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
resourceDescription := kyverno.ResourceDescription{
Kinds: []string{"Deployment"},
Name: "nginx-*",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"label1": "test1",
"label2": "test2",
},
MatchExpressions: nil,
},
}
groupVersionKind := metav1.GroupVersionKind{Kind: "ConfigMap"}
excludeResourcesResourceDesc := types.ResourceDescription{}
rawResource := []byte(`{
"metadata":{
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"labels":{
"label1":"test1",
"label2":"test2"
}
}
}`)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
rawResource = []byte(`{
"metadata":{
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"labels":{
"label3":"test1",
"label2":"test2"
}
}
}`)
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resourceDescription = types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"label3": "test1",
"label2": "test2",
},
MatchExpressions: nil,
},
}
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
}
func TestResourceMeetsDescription_MatchLabelsAndMatchExpressions(t *testing.T) {
resourceName := "test-config-map"
resourceDescription := types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"label1": "test1",
},
MatchLabels: nil,
MatchExpressions: []metav1.LabelSelectorRequirement{
metav1.LabelSelectorRequirement{
Key: "label2",
Operator: "In",
Values: []string{
"test2",
},
},
},
},
}
groupVersionKind := metav1.GroupVersionKind{Kind: "ConfigMap"}
excludeResourcesResourceDesc := types.ResourceDescription{}
rawResource := []byte(`{
"metadata":{
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"labels":{
"label1":"test1",
"label2":"test2"
}
}
}`)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resourceDescription = types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"label1": "test1",
},
MatchExpressions: []metav1.LabelSelectorRequirement{
metav1.LabelSelectorRequirement{
Key: "label2",
Key: "app",
Operator: "NotIn",
Values: []string{
"sometest1",
"nginx1",
"nginx2",
},
},
},
},
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{resourceDescription}}
rawResource = []byte(`{
assert.Assert(t, MatchesResourceDescription(*resource, rule))
}
// check for exclude conditions
func TestResourceDescriptionExclude_Label_Expression_Match(t *testing.T) {
rawResource := []byte(`{
"apiVersion": "apps/v1",
"kind": "Deployment",
"metadata": {
"name":"test-config-map",
"namespace":"default",
"creationTimestamp":null,
"name": "nginx-deployment",
"labels": {
"label1":"test1",
"label2":"test2"
"app": "nginx",
"block": "true"
}
},
"spec": {
"replicas": 3,
"selector": {
"matchLabels": {
"app": "nginx"
}
},
"template": {
"metadata": {
"labels": {
"app": "nginx"
}
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:1.7.9",
"ports": [
{
"containerPort": 80
}
]
}
]
}
}
}
}`)
assert.Assert(t, ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
t.Errorf("unable to convert raw resource to unstructured: %v", err)
resourceDescription = types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
}
resourceDescription := kyverno.ResourceDescription{
Kinds: []string{"Deployment"},
Name: "nginx-*",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"label1": "test1",
},
MatchLabels: nil,
MatchExpressions: []metav1.LabelSelectorRequirement{
metav1.LabelSelectorRequirement{
Key: "label2",
Operator: "In",
Key: "app",
Operator: "NotIn",
Values: []string{
"sometest1",
"nginx1",
"nginx2",
},
},
},
},
}
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
resourceDescription = types.ResourceDescription{
Kinds: []string{"ConfigMap"},
Name: &resourceName,
resourceDescriptionExclude := kyverno.ResourceDescription{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{
"label1": "test1",
"label3": "test3",
},
MatchExpressions: []metav1.LabelSelectorRequirement{
metav1.LabelSelectorRequirement{
Key: "label2",
Operator: "In",
Values: []string{
"test2",
},
},
"block": "true",
},
},
}
assert.Assert(t, false == ResourceMeetsDescription(rawResource, resourceDescription, excludeResourcesResourceDesc, groupVersionKind))
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{resourceDescription},
ExcludeResources: kyverno.ExcludeResources{resourceDescriptionExclude}}
assert.Assert(t, !MatchesResourceDescription(*resource, rule))
}
func TestWrappedWithParentheses_StringIsWrappedWithParentheses(t *testing.T) {

View file

@ -8,48 +8,74 @@ import (
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/glog"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/info"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// Validate handles validating admission request
// Checks the target resources for rules defined in the policy
func Validate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) ([]*info.RuleInfo, error) {
var resource interface{}
ris := []*info.RuleInfo{}
err := json.Unmarshal(rawResource, &resource)
if err != nil {
return nil, err
func Validate(policy kyverno.Policy, resource unstructured.Unstructured) (response EngineResponse) {
// var response EngineResponse
startTime := time.Now()
glog.V(4).Infof("started applying validation rules of policy %q (%v)", policy.Name, startTime)
defer func() {
response.ExecutionTime = time.Since(startTime)
glog.V(4).Infof("Finished applying validation rules policy %v (%v)", policy.Name, response.ExecutionTime)
glog.V(4).Infof("Validation Rules appplied succesfully count %v for policy %q", response.RulesAppliedCount, policy.Name)
}()
incrementAppliedRuleCount := func() {
// rules applied succesfully count
response.RulesAppliedCount++
}
resourceRaw, err := resource.MarshalJSON()
if err != nil {
glog.V(4).Infof("Skip processing validating rule, unable to marshal resource : %v\n", err)
response.PatchedResource = resource
return response
}
var resourceInt interface{}
if err := json.Unmarshal(resourceRaw, &resourceInt); err != nil {
glog.V(4).Infof("unable to unmarshal resource : %v\n", err)
response.PatchedResource = resource
return response
}
var ruleInfos []info.RuleInfo
for _, rule := range policy.Spec.Rules {
if rule.Validation == nil {
if reflect.DeepEqual(rule.Validation, kyverno.Validation{}) {
continue
}
ri := info.NewRuleInfo(rule.Name, info.Validation)
ok := ResourceMeetsDescription(rawResource, rule.MatchResources.ResourceDescription, rule.ExcludeResources.ResourceDescription, gvk)
// check if the resource satisfies the filter conditions defined in the rule
// TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont statisfy a policy rule resource description
ok := MatchesResourceDescription(resource, rule)
if !ok {
glog.V(3).Infof("Not applicable on specified resource kind%s", gvk.Kind)
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
continue
}
err := validateResourceWithPattern(resource, rule.Validation.Pattern)
ruleInfo := info.NewRuleInfo(rule.Name, info.Validation)
err := validateResourceWithPattern(resourceInt, rule.Validation.Pattern)
if err != nil {
ri.Fail()
ri.Addf("validation has failed, err %v.", err)
ruleInfo.Fail()
ruleInfo.Addf("Failed to apply pattern: %v.", err)
} else {
ri.Addf("Rule %s: Validation succesfully.", rule.Name)
ruleInfo.Add("Pattern succesfully validated")
glog.V(4).Infof("pattern validated succesfully on resource %s/%s", resource.GetNamespace(), resource.GetName())
}
ris = append(ris, ri)
incrementAppliedRuleCount()
ruleInfos = append(ruleInfos, ruleInfo)
}
return ris, nil
response.RuleInfos = ruleInfos
return response
}
// validateResourceWithPattern is a start of element-by-element validation process

View file

@ -4,9 +4,8 @@ import (
"encoding/json"
"testing"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kubepolicy "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestValidateString_AsteriskTest(t *testing.T) {
@ -1570,11 +1569,10 @@ func TestValidate_ServiceTest(t *testing.T) {
var policy kubepolicy.Policy
json.Unmarshal(rawPolicy, &policy)
gvk := metav1.GroupVersionKind{
Kind: "Service",
}
_, err := Validate(policy, rawResource, gvk)
assert.Assert(t, err == nil)
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
res := Validate(policy, *resourceUnstructured)
assert.Assert(t, len(res.RuleInfos) == 0)
}
func TestValidate_MapHasFloats(t *testing.T) {
@ -1668,10 +1666,8 @@ func TestValidate_MapHasFloats(t *testing.T) {
var policy kubepolicy.Policy
json.Unmarshal(rawPolicy, &policy)
gvk := metav1.GroupVersionKind{
Kind: "Deployment",
}
_, err := Validate(policy, rawResource, gvk)
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
res := Validate(policy, *resourceUnstructured)
assert.Assert(t, len(res.RuleInfos) == 0)
}

View file

@ -4,11 +4,11 @@ import (
"time"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
policyscheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/sharedinformer"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
@ -18,40 +18,36 @@ import (
"k8s.io/client-go/util/workqueue"
)
type controller struct {
//Generator generate events
type Generator struct {
client *client.Client
policyLister v1alpha1.PolicyLister
pLister kyvernolister.PolicyLister
queue workqueue.RateLimitingInterface
recorder record.EventRecorder
}
//Generator to generate event
type Generator interface {
//Interface to generate event
type Interface interface {
Add(infoList ...*Info)
}
//Controller api
type Controller interface {
Generator
Run(stopCh <-chan struct{})
Stop()
}
//NewEventGenerator to generate a new event controller
func NewEventGenerator(client *client.Client,
pInformer kyvernoinformer.PolicyInformer) *Generator {
//NewEventController to generate a new event controller
func NewEventController(client *client.Client,
shareInformer sharedinformer.PolicyInformer) Controller {
return &controller{
gen := Generator{
client: client,
policyLister: shareInformer.GetLister(),
pLister: pInformer.Lister(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
recorder: initRecorder(client),
}
return &gen
}
func initRecorder(client *client.Client) record.EventRecorder {
// Initliaze Event Broadcaster
err := policyscheme.AddToScheme(scheme.Scheme)
err := scheme.AddToScheme(scheme.Scheme)
if err != nil {
glog.Error(err)
return nil
@ -72,67 +68,66 @@ func initRecorder(client *client.Client) record.EventRecorder {
return recorder
}
func (c *controller) Add(infos ...*Info) {
//Add queues an event for generation
func (gen *Generator) Add(infos ...*Info) {
for _, info := range infos {
c.queue.Add(*info)
gen.queue.Add(*info)
}
}
func (c *controller) Run(stopCh <-chan struct{}) {
// Run begins generator
func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.Info("Starting event generator")
defer glog.Info("Shutting down event generator")
for i := 0; i < eventWorkerThreadCount; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
for i := 0; i < workers; i++ {
go wait.Until(gen.runWorker, time.Second, stopCh)
}
glog.Info("Started eventbuilder controller workers")
<-stopCh
}
func (c *controller) Stop() {
c.queue.ShutDown()
glog.Info("Shutting down eventbuilder controller workers")
}
func (c *controller) runWorker() {
for c.processNextWorkItem() {
func (gen *Generator) runWorker() {
for gen.processNextWorkItem() {
}
}
func (c *controller) handleErr(err error, key interface{}) {
func (gen *Generator) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
gen.queue.Forget(key)
return
}
// This controller retries if something goes wrong. After that, it stops trying.
if c.queue.NumRequeues(key) < workQueueRetryLimit {
if gen.queue.NumRequeues(key) < workQueueRetryLimit {
glog.Warningf("Error syncing events %v: %v", key, err)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
c.queue.AddRateLimited(key)
gen.queue.AddRateLimited(key)
return
}
c.queue.Forget(key)
gen.queue.Forget(key)
glog.Error(err)
glog.Warningf("Dropping the key out of the queue: %v", err)
}
func (c *controller) processNextWorkItem() bool {
obj, shutdown := c.queue.Get()
func (gen *Generator) processNextWorkItem() bool {
obj, shutdown := gen.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.queue.Done(obj)
defer gen.queue.Done(obj)
var key Info
var ok bool
if key, ok = obj.(Info); !ok {
c.queue.Forget(obj)
gen.queue.Forget(obj)
glog.Warningf("Expecting type info by got %v\n", obj)
return nil
}
err := c.syncHandler(key)
c.handleErr(err, obj)
err := gen.syncHandler(key)
gen.handleErr(err, obj)
return nil
}(obj)
if err != nil {
@ -142,20 +137,20 @@ func (c *controller) processNextWorkItem() bool {
return true
}
func (c *controller) syncHandler(key Info) error {
func (gen *Generator) syncHandler(key Info) error {
var robj runtime.Object
var err error
switch key.Kind {
case "Policy":
//TODO: policy is clustered resource so wont need namespace
robj, err = c.policyLister.Get(key.Name)
robj, err = gen.pLister.Get(key.Name)
if err != nil {
glog.Errorf("Error creating event: unable to get policy %s, will retry ", key.Name)
return err
}
default:
robj, err = c.client.GetResource(key.Kind, key.Namespace, key.Name)
robj, err = gen.client.GetResource(key.Kind, key.Namespace, key.Name)
if err != nil {
glog.Errorf("Error creating event: unable to get resource %s, %s, will retry ", key.Kind, key.Namespace+"/"+key.Name)
return err
@ -163,13 +158,14 @@ func (c *controller) syncHandler(key Info) error {
}
if key.Reason == PolicyApplied.String() {
c.recorder.Event(robj, v1.EventTypeNormal, key.Reason, key.Message)
gen.recorder.Event(robj, v1.EventTypeNormal, key.Reason, key.Message)
} else {
c.recorder.Event(robj, v1.EventTypeWarning, key.Reason, key.Message)
gen.recorder.Event(robj, v1.EventTypeWarning, key.Reason, key.Message)
}
return nil
}
//TODO: check if we need this ?
//NewEvent returns a new event
func NewEvent(rkind string, rnamespace string, rname string, reason Reason, message MsgKey, args ...interface{}) *Info {
msgText, err := getEventMsg(message, args...)

View file

@ -1,164 +0,0 @@
package gencontroller
import (
"fmt"
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/annotations"
policyLister "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
policySharedInformer "github.com/nirmata/kyverno/pkg/sharedinformer"
"github.com/nirmata/kyverno/pkg/violation"
"k8s.io/apimachinery/pkg/api/errors"
v1Informer "k8s.io/client-go/informers/core/v1"
v1CoreLister "k8s.io/client-go/listers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
//Controller watches the 'Namespace' resource creation/update and applied the generation rules on them
type Controller struct {
client *client.Client
namespaceLister v1CoreLister.NamespaceLister
namespaceSynced cache.InformerSynced
policyLister policyLister.PolicyLister
eventController event.Generator
violationBuilder violation.Generator
annotationsController annotations.Controller
workqueue workqueue.RateLimitingInterface
}
//NewGenController returns a new Controller to manage generation rules
func NewGenController(client *client.Client,
eventController event.Generator,
policyInformer policySharedInformer.PolicyInformer,
violationBuilder violation.Generator,
namespaceInformer v1Informer.NamespaceInformer,
annotationsController annotations.Controller) *Controller {
// create the controller
controller := &Controller{
client: client,
namespaceLister: namespaceInformer.Lister(),
namespaceSynced: namespaceInformer.Informer().HasSynced,
policyLister: policyInformer.GetLister(),
eventController: eventController,
violationBuilder: violationBuilder,
annotationsController: annotationsController,
workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), wqNamespace),
}
namespaceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: controller.createNamespaceHandler,
UpdateFunc: controller.updateNamespaceHandler,
})
return controller
}
func (c *Controller) createNamespaceHandler(resource interface{}) {
c.enqueueNamespace(resource)
}
func (c *Controller) updateNamespaceHandler(oldResoruce, newResource interface{}) {
// DO we need to anything if the namespace is modified ?
}
func (c *Controller) enqueueNamespace(obj interface{}) {
var key string
var err error
if key, err = cache.MetaNamespaceKeyFunc(obj); err != nil {
glog.Error(err)
return
}
c.workqueue.Add(key)
}
//Run to run the controller
func (c *Controller) Run(stopCh <-chan struct{}) error {
if ok := cache.WaitForCacheSync(stopCh, c.namespaceSynced); !ok {
return fmt.Errorf("failed to wait for caches to sync")
}
for i := 0; i < workerCount; i++ {
go wait.Until(c.runWorker, time.Second, stopCh)
}
glog.Info("started namespace controller workers")
return nil
}
//Stop to stop the controller
func (c *Controller) Stop() {
c.workqueue.ShutDown()
glog.Info("shutting down namespace controller workers")
}
func (c *Controller) runWorker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
obj, shutdown := c.workqueue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer c.workqueue.Done(obj)
err := c.syncHandler(obj)
c.handleErr(err, obj)
return nil
}(obj)
if err != nil {
glog.Error(err)
return true
}
return true
}
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
c.workqueue.Forget(key)
return
}
if c.workqueue.NumRequeues(key) < wqRetryLimit {
glog.Warningf("Error syncing events %v: %v", key, err)
c.workqueue.AddRateLimited(key)
return
}
c.workqueue.Forget(key)
glog.Error(err)
glog.Warningf("Dropping the key %q out of the queue: %v", key, err)
}
func (c *Controller) syncHandler(obj interface{}) error {
var key string
var ok bool
if key, ok = obj.(string); !ok {
return fmt.Errorf("expected string in workqueue but got %v", obj)
}
// Namespace is cluster wide resource
_, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
glog.Errorf("invalid namespace key: %s", key)
return err
}
// Get Namespace
ns, err := c.namespaceLister.Get(name)
if err != nil {
if errors.IsNotFound(err) {
glog.Errorf("namespace '%s' in work queue no longer exists", key)
return nil
}
}
//TODO: need to find a way to store the policy such that we can directly queury the
// policies with generation policies
// PolicyListerExpansion
c.processNamespace(ns)
return nil
}

View file

@ -1,158 +0,0 @@
package gencontroller
import (
"encoding/json"
"fmt"
"strings"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/annotations"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine"
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/info"
violation "github.com/nirmata/kyverno/pkg/violation"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
func (c *Controller) processNamespace(ns *corev1.Namespace) error {
//Get all policies and then verify if the namespace matches any of the defined selectors
policies, err := c.listPolicies(ns)
if err != nil {
return err
}
// process policy on namespace
for _, p := range policies {
c.processPolicy(ns, p)
}
return nil
}
func (c *Controller) listPolicies(ns *corev1.Namespace) ([]*v1alpha1.Policy, error) {
var fpolicies []*v1alpha1.Policy
policies, err := c.policyLister.List(labels.NewSelector())
if err != nil {
glog.Error("Unable to connect to policy controller. Unable to access policies not applying GENERATION rules")
return nil, err
}
for _, p := range policies {
// Check if the policy contains a generatoin rule
for _, r := range p.Spec.Rules {
if r.Generation != nil {
// Check if the resource meets the description
data, err := json.Marshal(ns)
if err != nil {
glog.Error(err)
continue
}
// convert types of GVK
nsGvk := schema.FromAPIVersionAndKind("v1", "Namespace")
// Hardcode as we have a informer on specified gvk
gvk := metav1.GroupVersionKind{Group: nsGvk.Group, Kind: nsGvk.Kind, Version: nsGvk.Version}
if engine.ResourceMeetsDescription(data, r.MatchResources.ResourceDescription, r.ExcludeResources.ResourceDescription, gvk) {
fpolicies = append(fpolicies, p)
break
}
}
}
}
return fpolicies, nil
}
func (c *Controller) processPolicy(ns *corev1.Namespace, p *v1alpha1.Policy) {
var eventInfo *event.Info
var onViolation bool
var msg string
policyInfo := info.NewPolicyInfo(p.Name,
"Namespace",
ns.Name,
"",
p.Spec.ValidationFailureAction) // Namespace has no namespace..WOW
// convert to unstructured
unstrMap, err := runtime.DefaultUnstructuredConverter.ToUnstructured(ns)
if err != nil {
glog.Error(err)
return
}
unstObj := unstructured.Unstructured{Object: unstrMap}
ruleInfos := engine.Generate(c.client, p, unstObj)
policyInfo.AddRuleInfos(ruleInfos)
// generate annotations on namespace
c.createAnnotations(policyInfo)
//TODO generate namespace on created resources
if !policyInfo.IsSuccessful() {
glog.Infof("Failed to apply policy %s on resource %s %s", p.Name, ns.Kind, ns.Name)
for _, r := range ruleInfos {
glog.Warning(r.Msgs)
if msg = strings.Join(r.Msgs, " "); strings.Contains(msg, "rule configuration not present in resource") {
onViolation = true
msg = fmt.Sprintf(`Resource creation violates generate rule '%s' of policy '%s'`, r.Name, policyInfo.Name)
}
}
if onViolation {
glog.Infof("Adding violation for generation rule of policy %s\n", policyInfo.Name)
// Policy Violation
v := violation.BuldNewViolation(policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName, event.PolicyViolation.String(), policyInfo.GetFailedRules())
c.violationBuilder.Add(v)
} else {
// Event
eventInfo = event.NewEvent(policyKind, "", policyInfo.Name, event.RequestBlocked,
event.FPolicyApplyBlockCreate, policyInfo.RNamespace+"/"+policyInfo.RName, policyInfo.GetRuleNames(false))
glog.V(2).Infof("Request blocked event info has prepared for %s/%s\n", policyKind, policyInfo.Name)
c.eventController.Add(eventInfo)
}
return
}
glog.Infof("Generation from policy %s has succesfully applied to %s/%s", p.Name, policyInfo.RKind, policyInfo.RName)
eventInfo = event.NewEvent(policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName,
event.PolicyApplied, event.SRulesApply, policyInfo.GetRuleNames(true), policyInfo.Name)
glog.V(2).Infof("Success event info has prepared for %s/%s\n", policyInfo.RKind, policyInfo.RName)
c.eventController.Add(eventInfo)
}
func (c *Controller) createAnnotations(pi *info.PolicyInfo) {
//get resource
obj, err := c.client.GetResource(pi.RKind, pi.RNamespace, pi.RName)
if err != nil {
glog.Error(err)
return
}
// add annotation for policy application
ann := obj.GetAnnotations()
// Generation rules
gpatch, err := annotations.PatchAnnotations(ann, pi, info.Generation)
if err != nil {
glog.Error(err)
return
}
if gpatch == nil {
// nothing to patch
return
}
// add the anotation to the resource
_, err = c.client.PatchResource(pi.RKind, pi.RNamespace, pi.RName, gpatch)
if err != nil {
glog.Error(err)
return
}
}

View file

@ -1,63 +0,0 @@
package gencontroller
import (
"github.com/minio/minio/pkg/wildcard"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
const (
wqNamespace string = "namespace"
workerCount int = 1
wqRetryLimit int = 5
policyKind string = "Policy"
)
func namespaceMeetsRuleDescription(ns *corev1.Namespace, resourceDescription v1alpha1.ResourceDescription) bool {
//REWORK Not needed but verify the 'Namespace' is defined in the list of supported kinds
if !findKind(resourceDescription.Kinds, "Namespace") {
return false
}
if resourceDescription.Name != nil {
if !wildcard.Match(*resourceDescription.Name, ns.Name) {
return false
}
}
if resourceDescription.Selector != nil {
selector, err := metav1.LabelSelectorAsSelector(resourceDescription.Selector)
if err != nil {
return false
}
labelSet := convertLabelsToLabelSet(ns.Labels)
// labels
if !selector.Matches(labelSet) {
return false
}
}
return true
}
func convertLabelsToLabelSet(labelMap map[string]string) labels.Set {
labelSet := make(labels.Set, len(labelMap))
// REWORK: check if the below works
// if x, ok := labelMap.(labels.Set); !ok {
// }
for k, v := range labelMap {
labelSet[k] = v
}
return labelSet
}
func findKind(kinds []string, kindGVK string) bool {
for _, kind := range kinds {
if kind == kindGVK {
return true
}
}
return false
}

View file

@ -3,8 +3,6 @@ package info
import (
"fmt"
"strings"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
)
//PolicyInfo defines policy information
@ -20,13 +18,13 @@ type PolicyInfo struct {
RNamespace string
//TODO: add check/enum for types
ValidationFailureAction string // BlockChanges, ReportViolation
Rules []*RuleInfo
Rules []RuleInfo
success bool
}
//NewPolicyInfo returns a new policy info
func NewPolicyInfo(policyName, rKind, rName, rNamespace, validationFailureAction string) *PolicyInfo {
return &PolicyInfo{
func NewPolicyInfo(policyName, rKind, rName, rNamespace, validationFailureAction string) PolicyInfo {
pi := PolicyInfo{
Name: policyName,
RKind: rKind,
RName: rName,
@ -34,6 +32,7 @@ func NewPolicyInfo(policyName, rKind, rName, rNamespace, validationFailureAction
success: true, // fail to be set explicity
ValidationFailureAction: validationFailureAction,
}
return pi
}
//IsSuccessful checks if policy is succesful
@ -71,17 +70,6 @@ func (pi *PolicyInfo) FailedRules() []string {
return rules
}
//GetFailedRules returns the failed rules with rule type
func (pi *PolicyInfo) GetFailedRules() []v1alpha1.FailedRule {
var rules []v1alpha1.FailedRule
for _, r := range pi.Rules {
if !r.IsSuccessful() {
rules = append(rules, v1alpha1.FailedRule{Name: r.Name, Type: r.RuleType.String(), Error: r.GetErrorString()})
}
}
return rules
}
//ErrorRules returns error msgs from all rule
func (pi *PolicyInfo) ErrorRules() string {
errorMsgs := []string{}
@ -114,9 +102,9 @@ func (ri RuleType) String() string {
//RuleInfo defines rule struct
type RuleInfo struct {
Name string
Msgs []string
Changes string // this will store the mutation patch being applied by the rule
RuleType RuleType
Msgs []string
Patches [][]byte // this will store the mutation patch being applied by the rule
success bool
}
@ -134,8 +122,8 @@ func (ri *RuleInfo) GetErrorString() string {
}
//NewRuleInfo creates a new RuleInfo
func NewRuleInfo(ruleName string, ruleType RuleType) *RuleInfo {
return &RuleInfo{
func NewRuleInfo(ruleName string, ruleType RuleType) RuleInfo {
return RuleInfo{
Name: ruleName,
Msgs: []string{},
RuleType: ruleType,
@ -164,7 +152,7 @@ func (ri *RuleInfo) Addf(msg string, args ...interface{}) {
}
//RulesSuccesfuly check if the any rule has failed or not
func RulesSuccesfuly(rules []*RuleInfo) bool {
func rulesSuccesfuly(rules []RuleInfo) bool {
for _, r := range rules {
if !r.success {
return false
@ -174,11 +162,11 @@ func RulesSuccesfuly(rules []*RuleInfo) bool {
}
//AddRuleInfos sets the rule information
func (pi *PolicyInfo) AddRuleInfos(rules []*RuleInfo) {
func (pi *PolicyInfo) AddRuleInfos(rules []RuleInfo) {
if rules == nil {
return
}
if !RulesSuccesfuly(rules) {
if !rulesSuccesfuly(rules) {
pi.success = false
}
@ -202,13 +190,3 @@ func (pi *PolicyInfo) GetRuleNames(onSuccess bool) string {
return strings.Join(ruleNames, ",")
}
//ContainsRuleType checks if a policy info contains a rule type
func (pi *PolicyInfo) ContainsRuleType(ruleType RuleType) bool {
for _, r := range pi.Rules {
if r.RuleType == ruleType {
return true
}
}
return false
}

View file

@ -8,7 +8,7 @@ import (
"os"
"github.com/golang/glog"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/info"
"github.com/spf13/cobra"
@ -51,7 +51,7 @@ func NewCmdApply(in io.Reader, out, errout io.Writer) *cobra.Command {
return cmd
}
func complete(kubeconfig string, args []string) (*kubepolicy.Policy, []*resourceInfo) {
func complete(kubeconfig string, args []string) (*kyverno.Policy, []*resourceInfo) {
policyDir, resourceDir, err := validateDir(args)
if err != nil {
glog.Errorf("Failed to parse file path, err: %v\n", err)
@ -75,7 +75,7 @@ func complete(kubeconfig string, args []string) (*kubepolicy.Policy, []*resource
return policy, resources
}
func applyPolicy(policy *kubepolicy.Policy, resources []*resourceInfo) (output string) {
func applyPolicy(policy *kyverno.Policy, resources []*resourceInfo) (output string) {
for _, resource := range resources {
patchedDocument, err := applyPolicyOnRaw(policy, resource.rawResource, resource.gvk)
if err != nil {
@ -94,7 +94,7 @@ func applyPolicy(policy *kubepolicy.Policy, resources []*resourceInfo) (output s
return
}
func applyPolicyOnRaw(policy *kubepolicy.Policy, rawResource []byte, gvk *metav1.GroupVersionKind) ([]byte, error) {
func applyPolicyOnRaw(policy *kyverno.Policy, rawResource []byte, gvk *metav1.GroupVersionKind) ([]byte, error) {
patchedResource := rawResource
var err error
@ -106,45 +106,44 @@ func applyPolicyOnRaw(policy *kubepolicy.Policy, rawResource []byte, gvk *metav1
rns,
policy.Spec.ValidationFailureAction)
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
return nil, err
}
//TODO check if the kind information is present resource
// Process Mutation
patches, ruleInfos := engine.Mutate(*policy, rawResource, *gvk)
policyInfo.AddRuleInfos(ruleInfos)
engineResponse := engine.Mutate(*policy, *resource)
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
if !policyInfo.IsSuccessful() {
glog.Infof("Failed to apply policy %s on resource %s/%s", policy.Name, rname, rns)
for _, r := range ruleInfos {
for _, r := range engineResponse.RuleInfos {
glog.Warning(r.Msgs)
}
} else if len(patches) > 0 {
} else if len(engineResponse.Patches) > 0 {
glog.Infof("Mutation from policy %s has applied succesfully to %s %s/%s", policy.Name, gvk.Kind, rname, rns)
patchedResource, err = engine.ApplyPatches(rawResource, patches)
patchedResource, err = engine.ApplyPatches(rawResource, engineResponse.Patches)
if err != nil {
return nil, fmt.Errorf("Unable to apply mutation patches:\n%v", err)
}
// Process Validation
ruleInfos, err := engine.Validate(*policy, patchedResource, *gvk)
if err != nil {
// This is not policy error
// but if unable to parse request raw resource
// TODO : create event ? dont think so
glog.Error(err)
return patchedResource, err
}
policyInfo.AddRuleInfos(ruleInfos)
engineResponse := engine.Validate(*policy, *resource)
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
if !policyInfo.IsSuccessful() {
glog.Infof("Failed to apply policy %s on resource %s/%s", policy.Name, rname, rns)
for _, r := range ruleInfos {
for _, r := range engineResponse.RuleInfos {
glog.Warning(r.Msgs)
}
return patchedResource, fmt.Errorf("Failed to apply policy %s on resource %s/%s", policy.Name, rname, rns)
} else if len(ruleInfos) > 0 {
} else if len(engineResponse.RuleInfos) > 0 {
glog.Infof("Validation from policy %s has applied succesfully to %s %s/%s", policy.Name, gvk.Kind, rname, rns)
}
}
return patchedResource, nil
}
func extractPolicy(fileDir string) (*kubepolicy.Policy, error) {
policy := &kubepolicy.Policy{}
func extractPolicy(fileDir string) (*kyverno.Policy, error) {
policy := &kyverno.Policy{}
file, err := loadFile(fileDir)
if err != nil {

View file

@ -9,6 +9,7 @@ import (
"github.com/golang/glog"
yamlv2 "gopkg.in/yaml.v2"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
)
@ -93,3 +94,12 @@ func scanDir(dir string) ([]string, error) {
return res[1:], nil
}
func ConvertToUnstructured(data []byte) (*unstructured.Unstructured, error) {
resource := &unstructured.Unstructured{}
err := resource.UnmarshalJSON(data)
if err != nil {
glog.V(4).Infof("failed to unmarshall resource: %v", err)
return nil, err
}
return resource, nil
}

209
pkg/namespace/controller.go Normal file
View file

@ -0,0 +1,209 @@
package namespace
import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
"k8s.io/apimachinery/pkg/api/errors"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
v1Informer "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
maxRetries = 15
)
//NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
type NamespaceController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
syncHandler func(nsKey string) error
enqueueNs func(ns *v1.Namespace)
//nsLister provides expansion to the namespace lister to inject GVK for the resource
nsLister NamespaceListerExpansion
// nLsister can list/get namespaces from the shared informer's store
// nsLister v1CoreLister.NamespaceLister
// nsListerSynced returns true if the Namespace store has been synced at least once
nsListerSynced cache.InformerSynced
// pvLister can list/get policy violation from the shared informer's store
pLister kyvernolister.PolicyLister
// pvListerSynced retrns true if the Policy store has been synced at least once
pvListerSynced cache.InformerSynced
// pvLister can list/get policy violation from the shared informer's store
pvLister kyvernolister.PolicyViolationLister
// API to send policy stats for aggregation
policyStatus policy.PolicyStatusInterface
// eventGen provides interface to generate evenets
eventGen event.Interface
// Namespaces that need to be synced
queue workqueue.RateLimitingInterface
// Resource manager, manages the mapping for already processed resource
rm resourceManager
}
//NewNamespaceController returns a new Controller to manage generation rules
func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
nsInformer v1Informer.NamespaceInformer,
pInformer kyvernoinformer.PolicyInformer,
pvInformer kyvernoinformer.PolicyViolationInformer,
policyStatus policy.PolicyStatusInterface,
eventGen event.Interface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
// create the controller
nsc := &NamespaceController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addNamespace,
UpdateFunc: nsc.updateNamespace,
DeleteFunc: nsc.deleteNamespace,
})
nsc.enqueueNs = nsc.enqueue
nsc.syncHandler = nsc.syncNamespace
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
nsc.nsListerSynced = nsInformer.Informer().HasSynced
nsc.pLister = pInformer.Lister()
nsc.pvListerSynced = pInformer.Informer().HasSynced
nsc.pvLister = pvInformer.Lister()
nsc.policyStatus = policyStatus
// resource manager
// rebuild after 300 seconds/ 5 mins
nsc.rm = NewResourceManager(300)
return nsc
}
func (nsc *NamespaceController) addNamespace(obj interface{}) {
ns := obj.(*v1.Namespace)
glog.V(4).Infof("Adding Namespace %s", ns.Name)
nsc.enqueueNs(ns)
}
func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
oldNs := old.(*v1.Namespace)
curNs := cur.(*v1.Namespace)
if curNs.ResourceVersion == oldNs.ResourceVersion {
// Periodic resync will send update events for all known Namespace.
// Two different versions of the same replica set will always have different RVs.
return
}
glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
ns, _ := obj.(*v1.Namespace)
glog.V(4).Infof("Deleting Namespace %s", ns.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
key, err := cache.MetaNamespaceKeyFunc(ns)
if err != nil {
glog.Error(err)
return
}
nsc.queue.Add(key)
}
//Run to run the controller
func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer nsc.queue.ShutDown()
glog.Info("Starting namespace controller")
defer glog.Info("Shutting down namespace controller")
if ok := cache.WaitForCacheSync(stopCh, nsc.nsListerSynced); !ok {
return
}
for i := 0; i < workerCount; i++ {
go wait.Until(nsc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (nsc *NamespaceController) worker() {
for nsc.processNextWorkItem() {
}
}
func (nsc *NamespaceController) processNextWorkItem() bool {
key, quit := nsc.queue.Get()
if quit {
return false
}
defer nsc.queue.Done(key)
err := nsc.syncHandler(key.(string))
nsc.handleErr(err, key)
return true
}
func (nsc *NamespaceController) handleErr(err error, key interface{}) {
if err == nil {
nsc.queue.Forget(key)
return
}
if nsc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
nsc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
nsc.queue.Forget(key)
}
func (nsc *NamespaceController) syncNamespace(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()
namespace, err := nsc.nsLister.GetResource(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("namespace %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
n := namespace.DeepCopy()
// process generate rules
policyInfos := nsc.processNamespace(*n)
// report errors
nsc.report(policyInfos)
return nil
}

View file

@ -0,0 +1,46 @@
package namespace
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
v1CoreLister "k8s.io/client-go/listers/core/v1"
)
//NamespaceListerExpansion ...
type NamespaceListerExpansion interface {
v1CoreLister.NamespaceLister
// List lists all Namespaces in the indexer.
ListResources(selector labels.Selector) (ret []*v1.Namespace, err error)
// GetsResource and injects gvk
GetResource(name string) (*v1.Namespace, error)
}
//NamespaceLister ...
type NamespaceLister struct {
v1CoreLister.NamespaceLister
}
//NewNamespaceLister returns a new NamespaceLister
func NewNamespaceLister(nsLister v1CoreLister.NamespaceLister) NamespaceListerExpansion {
nsl := NamespaceLister{
nsLister,
}
return &nsl
}
//ListResources is a wrapper to List and adds the resource kind information
// as the lister is specific to a gvk we can harcode the values here
func (nsl *NamespaceLister) ListResources(selector labels.Selector) (ret []*v1.Namespace, err error) {
namespaces, err := nsl.List(selector)
for index := range namespaces {
namespaces[index].SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Namespace"))
}
return namespaces, err
}
//GetResource is a wrapper to get the resource and inject the GVK
func (nsl *NamespaceLister) GetResource(name string) (*v1.Namespace, error) {
namespace, err := nsl.Get(name)
namespace.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Namespace"))
return namespace, err
}

169
pkg/namespace/generation.go Normal file
View file

@ -0,0 +1,169 @@
package namespace
import (
"sync"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/policy"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
type resourceManager interface {
ProcessResource(policy, pv, kind, ns, name, rv string) bool
//TODO removeResource(kind, ns, name string) error
RegisterResource(policy, pv, kind, ns, name, rv string)
// reload
Drop()
}
// ResourceManager stores the details on already processed resources for caching
type ResourceManager struct {
// we drop and re-build the cache
// based on the memory consumer of by the map
data map[string]interface{}
mux sync.RWMutex
time time.Time
rebuildTime int64 // after how many seconds should we rebuild the cache
}
//NewResourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) *ResourceManager {
rm := ResourceManager{
data: make(map[string]interface{}),
time: time.Now(),
rebuildTime: rebuildTime,
}
// set time it was built
return &rm
}
var empty struct{}
//RegisterResource stores if the policy is processed on this resource version
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
rm.mux.Lock()
defer rm.mux.Unlock()
// add the resource
key := buildKey(policy, pv, kind, ns, name, rv)
rm.data[key] = empty
}
//ProcessResource returns true if the policy was not applied on the resource
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
rm.mux.RLock()
defer rm.mux.RUnlock()
key := buildKey(policy, pv, kind, ns, name, rv)
_, ok := rm.data[key]
return ok == false
}
//Drop drop the cache after every rebuild interval mins
//TODO: or drop based on the size
func (rm *ResourceManager) Drop() {
timeSince := time.Since(rm.time)
glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.data = map[string]interface{}{}
rm.time = time.Now()
glog.V(4).Infof("dropping cache at time %v", rm.time)
}
}
func buildKey(policy, pv, kind, ns, name, rv string) string {
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
}
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []info.PolicyInfo {
var policyInfos []info.PolicyInfo
// convert to unstructured
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
if err != nil {
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
return policyInfos
}
nsc.rm.Drop()
ns := unstructured.Unstructured{Object: unstr}
// get all the policies that have a generate rule and resource description satifies the namespace
// apply policy on resource
policies := listpolicies(ns, nsc.pLister)
for _, policy := range policies {
// pre-processing, check if the policy and resource version has been processed before
if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
continue
}
policyInfo := applyPolicy(nsc.client, ns, *policy, nsc.policyStatus)
policyInfos = append(policyInfos, policyInfo)
// post-processing, register the resource as processed
nsc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
}
return policyInfos
}
func listpolicies(ns unstructured.Unstructured, pLister kyvernolister.PolicyLister) []*kyverno.Policy {
var filteredpolicies []*kyverno.Policy
glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
policies, err := pLister.List(labels.NewSelector())
if err != nil {
glog.Errorf("failed to get list policies: %v", err)
return nil
}
for _, policy := range policies {
for _, rule := range policy.Spec.Rules {
if rule.Generation == (kyverno.Generation{}) {
continue
}
ok := engine.MatchesResourceDescription(ns, rule)
if !ok {
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
continue
}
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
filteredpolicies = append(filteredpolicies, policy)
}
}
return filteredpolicies
}
func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.Policy, policyStatus policy.PolicyStatusInterface) info.PolicyInfo {
var ps policy.PolicyStat
gatherStat := func(policyName string, er engine.EngineResponse) {
ps.PolicyName = policyName
ps.Stats.GenerationExecutionTime = er.ExecutionTime
ps.Stats.RulesAppliedCount = er.RulesAppliedCount
}
// send stats for aggregation
sendStat := func(blocked bool) {
//SEND
policyStatus.SendStat(ps)
}
startTime := time.Now()
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
defer func() {
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
}()
policyInfo := info.NewPolicyInfo(p.Name, resource.GetKind(), resource.GetName(), resource.GetNamespace(), p.Spec.ValidationFailureAction)
engineResponse := engine.Generate(client, p, resource)
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
// gather stats
gatherStat(p.Name, engineResponse)
//send stats
sendStat(false)
return policyInfo
}

59
pkg/namespace/report.go Normal file
View file

@ -0,0 +1,59 @@
package namespace
import (
"fmt"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/policyviolation"
)
func (nsc *NamespaceController) report(policyInfos []info.PolicyInfo) {
// generate events
// generate policy violations
for _, policyInfo := range policyInfos {
// events
// success - policy applied on resource
// failure - policy/rule failed to apply on the resource
reportEvents(policyInfo, nsc.eventGen)
// policy violations
// failure - policy/rule failed to apply on the resource
}
// generate policy violation
policyviolation.GeneratePolicyViolations(nsc.pvListerSynced, nsc.pvLister, nsc.kyvernoClient, policyInfos)
}
//reportEvents generates events for the failed resources
func reportEvents(policyInfo info.PolicyInfo, eventGen event.Interface) {
if policyInfo.IsSuccessful() {
return
}
glog.V(4).Infof("reporting results for policy %s application on resource %s/%s/%s", policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName)
for _, rule := range policyInfo.Rules {
if rule.IsSuccessful() {
continue
}
// generate event on resource for each failed rule
e := &event.Info{}
e.Kind = policyInfo.RKind
e.Namespace = policyInfo.RNamespace
e.Name = policyInfo.RName
e.Reason = "Failure"
e.Message = fmt.Sprintf("policy %s (%s) rule %s failed to apply. %v", policyInfo.Name, rule.RuleType.String(), rule.Name, rule.GetErrorString())
eventGen.Add(e)
}
// generate a event on policy for all failed rules
e := &event.Info{}
e.Kind = "Policy"
e.Namespace = ""
e.Name = policyInfo.Name
e.Reason = "Failure"
e.Message = fmt.Sprintf("failed to apply rules %s on resource %s/%s/%s", policyInfo.FailedRules(), policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName)
eventGen.Add(e)
}

55
pkg/namespace/utils.go Normal file
View file

@ -0,0 +1,55 @@
package namespace
const (
wqNamespace string = "namespace"
workerCount int = 1
wqRetryLimit int = 5
policyKind string = "Policy"
)
// func namespaceMeetsRuleDescription(ns *corev1.Namespace, resourceDescription v1alpha1.ResourceDescription) bool {
// //REWORK Not needed but verify the 'Namespace' is defined in the list of supported kinds
// if !findKind(resourceDescription.Kinds, "Namespace") {
// return false
// }
// if resourceDescription.Name != nil {
// if !wildcard.Match(*resourceDescription.Name, ns.Name) {
// return false
// }
// }
// if resourceDescription.Selector != nil {
// selector, err := metav1.LabelSelectorAsSelector(resourceDescription.Selector)
// if err != nil {
// return false
// }
// labelSet := convertLabelsToLabelSet(ns.Labels)
// // labels
// if !selector.Matches(labelSet) {
// return false
// }
// }
// return true
// }
// func convertLabelsToLabelSet(labelMap map[string]string) labels.Set {
// labelSet := make(labels.Set, len(labelMap))
// // REWORK: check if the below works
// // if x, ok := labelMap.(labels.Set); !ok {
// // }
// for k, v := range labelMap {
// labelSet[k] = v
// }
// return labelSet
// }
// func findKind(kinds []string, kindGVK string) bool {
// for _, kind := range kinds {
// if kind == kindGVK {
// return true
// }
// }
// return false
// }

131
pkg/policy/apply.go Normal file
View file

@ -0,0 +1,131 @@
package policy
import (
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/utils"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// applyPolicy applies policy on a resource
//TODO: generation rules
func applyPolicy(policy kyverno.Policy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (info.PolicyInfo, error) {
var ps PolicyStat
gatherStat := func(policyName string, er engine.EngineResponse) {
// ps := policyctr.PolicyStat{}
ps.PolicyName = policyName
ps.Stats.ValidationExecutionTime = er.ExecutionTime
ps.Stats.RulesAppliedCount = er.RulesAppliedCount
}
// send stats for aggregation
sendStat := func(blocked bool) {
//SEND
policyStatus.SendStat(ps)
}
startTime := time.Now()
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
defer func() {
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
}()
// glog.V(4).Infof("apply policy %s with resource version %s on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
policyInfo := info.NewPolicyInfo(policy.Name, resource.GetKind(), resource.GetName(), resource.GetNamespace(), policy.Spec.ValidationFailureAction)
//MUTATION
mruleInfos, err := mutation(policy, resource, policyStatus)
policyInfo.AddRuleInfos(mruleInfos)
if err != nil {
return policyInfo, err
}
//VALIDATION
engineResponse := engine.Validate(policy, resource)
if len(engineResponse.RuleInfos) != 0 {
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
}
// gather stats
gatherStat(policy.Name, engineResponse)
//send stats
sendStat(false)
//TODO: GENERATION
return policyInfo, nil
}
func mutation(policy kyverno.Policy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) ([]info.RuleInfo, error) {
var ps PolicyStat
// gather stats from the engine response
gatherStat := func(policyName string, er engine.EngineResponse) {
// ps := policyctr.PolicyStat{}
ps.PolicyName = policyName
ps.Stats.MutationExecutionTime = er.ExecutionTime
ps.Stats.RulesAppliedCount = er.RulesAppliedCount
}
// send stats for aggregation
sendStat := func(blocked bool) {
//SEND
policyStatus.SendStat(ps)
}
engineResponse := engine.Mutate(policy, resource)
// gather stats
gatherStat(policy.Name, engineResponse)
//send stats
sendStat(false)
patches := engineResponse.Patches
ruleInfos := engineResponse.RuleInfos
if len(ruleInfos) == 0 {
//no rules processed
return nil, nil
}
for _, r := range ruleInfos {
if !r.IsSuccessful() {
// no failures while processing rule
return ruleInfos, nil
}
}
if len(patches) == 0 {
// no patches for the resources
// either there were failures or the overlay already was satisfied
return ruleInfos, nil
}
// (original resource + patch) == (original resource)
mergePatches := utils.JoinPatches(patches)
patch, err := jsonpatch.DecodePatch(mergePatches)
if err != nil {
return nil, err
}
rawResource, err := resource.MarshalJSON()
if err != nil {
glog.V(4).Infof("unable to marshal resource : %v", err)
return nil, err
}
// apply the patches returned by mutate to the original resource
patchedResource, err := patch.Apply(rawResource)
if err != nil {
return nil, err
}
//TODO: this will be removed after the support for patching for each rule
ruleInfo := info.NewRuleInfo("over-all mutation", info.Mutation)
if !jsonpatch.Equal(patchedResource, rawResource) {
//resource does not match so there was a mutation rule violated
// TODO : check the rule name "mutation rules"
ruleInfo.Fail()
ruleInfo.Add("resource does not satisfy mutation rules")
} else {
ruleInfo.Add("resource satisfys the mutation rule")
}
ruleInfos = append(ruleInfos, ruleInfo)
return ruleInfos, nil
}

951
pkg/policy/controller.go Normal file
View file

@ -0,0 +1,951 @@
package policy
import (
"encoding/json"
"fmt"
"reflect"
"sync"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
webhookinformer "k8s.io/client-go/informers/admissionregistration/v1beta1"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
webhooklister "k8s.io/client-go/listers/admissionregistration/v1beta1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a Policy will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
)
var controllerKind = kyverno.SchemeGroupVersion.WithKind("Policy")
// PolicyController is responsible for synchronizing Policy objects stored
// in the system with the corresponding policy violations
type PolicyController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
eventGen event.Interface
eventRecorder record.EventRecorder
syncHandler func(pKey string) error
enqueuePolicy func(policy *kyverno.Policy)
//pvControl is used for adoptin/releasing policy violation
pvControl PVControlInterface
// Policys that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get policy from the shared informer's store
pLister kyvernolister.PolicyLister
// pvLister can list/get policy violation from the shared informer's store
pvLister kyvernolister.PolicyViolationLister
// pListerSynced returns true if the Policy store has been synced at least once
pListerSynced cache.InformerSynced
// pvListerSynced returns true if the Policy store has been synced at least once
pvListerSynced cache.InformerSynced
// mutationwebhookLister can list/get mutatingwebhookconfigurations
mutationwebhookLister webhooklister.MutatingWebhookConfigurationLister
// WebhookRegistrationClient
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// Resource manager, manages the mapping for already processed resource
rm resourceManager
// filter the resources defined in the list
filterK8Resources []utils.K8Resource
// recieves stats and aggregates details
statusAggregator *PolicyStatusAggregator
}
// NewPolicyController create a new PolicyController
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.PolicyInformer, pvInformer kyvernoinformer.PolicyViolationInformer,
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer, webhookRegistrationClient *webhookconfig.WebhookRegistrationClient) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventInterface, err := client.GetEventsInterface()
if err != nil {
return nil, err
}
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pc := PolicyController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
webhookRegistrationClient: webhookRegistrationClient,
}
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicy,
UpdateFunc: pc.updatePolicy,
DeleteFunc: pc.deletePolicy,
})
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicyViolation,
UpdateFunc: pc.updatePolicyViolation,
DeleteFunc: pc.deletePolicyViolation,
})
pc.enqueuePolicy = pc.enqueue
pc.syncHandler = pc.syncPolicy
pc.pLister = pInformer.Lister()
pc.pvLister = pvInformer.Lister()
pc.pListerSynced = pInformer.Informer().HasSynced
pc.pvListerSynced = pInformer.Informer().HasSynced
pc.mutationwebhookLister = webhookInformer.Lister()
// resource manager
// rebuild after 300 seconds/ 5 mins
//TODO: pass the time in seconds instead of converting it internally
pc.rm = NewResourceManager(30)
// aggregator
// pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient, pInformer)
pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient)
return &pc, nil
}
func (pc *PolicyController) addPolicy(obj interface{}) {
p := obj.(*kyverno.Policy)
glog.V(4).Infof("Adding Policy %s", p.Name)
pc.enqueuePolicy(p)
}
func (pc *PolicyController) updatePolicy(old, cur interface{}) {
oldP := old.(*kyverno.Policy)
curP := cur.(*kyverno.Policy)
glog.V(4).Infof("Updating Policy %s", oldP.Name)
pc.enqueuePolicy(curP)
}
func (pc *PolicyController) deletePolicy(obj interface{}) {
p, ok := obj.(*kyverno.Policy)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
p, ok = tombstone.Obj.(*kyverno.Policy)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a Policy %#v", obj))
return
}
}
glog.V(4).Infof("Deleting Policy %s", p.Name)
pc.enqueuePolicy(p)
}
func (pc *PolicyController) addPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.PolicyViolation)
if pv.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
// show up in a state that is already pending deletion.
pc.deletePolicyViolation(pv)
return
}
// generate labels to match the policy from the spec, if not present
if updatePolicyLabelIfNotDefined(pc.pvControl, pv) {
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(pv); controllerRef != nil {
p := pc.resolveControllerRef(controllerRef)
if p == nil {
return
}
glog.V(4).Infof("PolicyViolation %s added.", pv.Name)
pc.enqueuePolicy(p)
return
}
// Otherwise, it's an orphan. Get a list of all matching Policies and sync
// them to see if anyone wants to adopt it.
ps := pc.getPolicyForPolicyViolation(pv)
if len(ps) == 0 {
return
}
glog.V(4).Infof("Orphan Policy Violation %s added.", pv.Name)
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) updatePolicyViolation(old, cur interface{}) {
curPV := cur.(*kyverno.PolicyViolation)
oldPV := old.(*kyverno.PolicyViolation)
if curPV.ResourceVersion == oldPV.ResourceVersion {
// Periodic resync will send update events for all known Policy Violation.
// Two different versions of the same replica set will always have different RVs.
return
}
// generate labels to match the policy from the spec, if not present
if updatePolicyLabelIfNotDefined(pc.pvControl, curPV) {
return
}
curControllerRef := metav1.GetControllerOf(curPV)
oldControllerRef := metav1.GetControllerOf(oldPV)
controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if p := pc.resolveControllerRef(oldControllerRef); p != nil {
pc.enqueuePolicy(p)
}
}
// If it has a ControllerRef, that's all that matters.
if curControllerRef != nil {
p := pc.resolveControllerRef(curControllerRef)
if p == nil {
return
}
glog.V(4).Infof("PolicyViolation %s updated.", curPV.Name)
pc.enqueuePolicy(p)
return
}
// Otherwise, it's an orphan. If anything changed, sync matching controllers
// to see if anyone wants to adopt it now.
labelChanged := !reflect.DeepEqual(curPV.Labels, oldPV.Labels)
if labelChanged || controllerRefChanged {
ps := pc.getPolicyForPolicyViolation(curPV)
if len(ps) == 0 {
return
}
glog.V(4).Infof("Orphan PolicyViolation %s updated", curPV.Name)
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
}
// deletePolicyViolation enqueues the Policy that manages a PolicyViolation when
// the PolicyViolation is deleted. obj could be an *kyverno.PolicyViolation, or
// a DeletionFinalStateUnknown marker item.
func (pc *PolicyController) deletePolicyViolation(obj interface{}) {
pv, ok := obj.(*kyverno.PolicyViolation)
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
// changed labels the new Policy will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pv, ok = tombstone.Obj.(*kyverno.PolicyViolation)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
}
controllerRef := metav1.GetControllerOf(pv)
if controllerRef == nil {
// No controller should care about orphans being deleted.
return
}
p := pc.resolveControllerRef(controllerRef)
if p == nil {
return
}
glog.V(4).Infof("PolicyViolation %s deleted", pv.Name)
pc.enqueuePolicy(p)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (pc *PolicyController) resolveControllerRef(controllerRef *metav1.OwnerReference) *kyverno.Policy {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerRef.Kind {
return nil
}
p, err := pc.pLister.Get(controllerRef.Name)
if err != nil {
return nil
}
if p.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return p
}
func (pc *PolicyController) getPolicyForPolicyViolation(pv *kyverno.PolicyViolation) []*kyverno.Policy {
policies, err := pc.pLister.GetPolicyForPolicyViolation(pv)
if err != nil || len(policies) == 0 {
return nil
}
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
// there should never be more than one deployment returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(policies) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("user error! more than one policy is selecting policy violation %s with labels: %#v, returning %s",
pv.Name, pv.Labels, policies[0].Name)
}
return policies
}
func (pc *PolicyController) enqueue(policy *kyverno.Policy) {
key, err := cache.MetaNamespaceKeyFunc(policy)
if err != nil {
glog.Error(err)
return
}
pc.queue.Add(key)
}
// Run begins watching and syncing.
func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pc.queue.ShutDown()
glog.Info("Starting policy controller")
defer glog.Info("Shutting down policy controller")
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.pvListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(pc.worker, time.Second, stopCh)
}
// policy status aggregator
//TODO: workers required for aggergation
pc.statusAggregator.Run(1, stopCh)
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (pc *PolicyController) worker() {
for pc.processNextWorkItem() {
}
}
func (pc *PolicyController) processNextWorkItem() bool {
key, quit := pc.queue.Get()
if quit {
return false
}
defer pc.queue.Done(key)
err := pc.syncHandler(key.(string))
pc.handleErr(err, key)
return true
}
func (pc *PolicyController) handleErr(err error, key interface{}) {
if err == nil {
pc.queue.Forget(key)
return
}
if pc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing Policy %v: %v", key, err)
pc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping policy %q out of the queue: %v", key, err)
pc.queue.Forget(key)
}
func (pc *PolicyController) syncPolicy(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing policy %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing policy %q (%v)", key, time.Since(startTime))
}()
policy, err := pc.pLister.Get(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("Policy %v has been deleted", key)
// remove the recorded stats for the policy
pc.statusAggregator.RemovePolicyStats(key)
// remove webhook configurations if there are not policies
if err := pc.handleWebhookRegistration(true, nil); err != nil {
glog.Errorln(err)
}
return nil
}
if err != nil {
return err
}
if err := pc.handleWebhookRegistration(false, policy); err != nil {
glog.Errorln(err)
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
p := policy.DeepCopy()
pvList, err := pc.getPolicyViolationsForPolicy(p)
if err != nil {
return err
}
// process policies on existing resources
policyInfos := pc.processExistingResources(*p)
// report errors
pc.report(policyInfos)
// fetch the policy again via the aggreagator to remain consistent
// return pc.statusAggregator.UpdateViolationCount(p.Name, pvList)
return pc.syncStatusOnly(p, pvList)
}
// TODO: here checks mutatingwebhook only
// as 'kubectl scale' is not funtional with validatingwebhook
// refer to https://github.com/nirmata/kyverno/issues/250
func (pc *PolicyController) handleWebhookRegistration(delete bool, policy *kyverno.Policy) error {
policies, _ := pc.pLister.List(labels.NewSelector())
selector := &metav1.LabelSelector{MatchLabels: config.KubePolicyAppLabels}
webhookSelector, err := metav1.LabelSelectorAsSelector(selector)
if err != nil {
return fmt.Errorf("invalid label selector: %v", err)
}
webhookList, err := pc.mutationwebhookLister.List(webhookSelector)
if err != nil {
return fmt.Errorf("failed to list mutatingwebhookconfigurations, err %v", err)
}
if delete {
if webhookList == nil {
return nil
}
// webhook exist, deregister webhookconfigurations on condition
// check empty policy first, then rule type in terms of O(time)
if policies == nil {
glog.V(3).Infoln("No policy found in the cluster, deregistering webhook")
pc.webhookRegistrationClient.DeregisterMutatingWebhook()
} else if !HasMutateOrValidatePolicies(policies) {
glog.V(3).Infoln("No muatate/validate policy found in the cluster, deregistering webhook")
pc.webhookRegistrationClient.DeregisterMutatingWebhook()
}
return nil
}
if webhookList == nil && HasMutateOrValidate(*policy) {
glog.V(3).Infoln("Found policy without mutatingwebhook, registering webhook")
pc.webhookRegistrationClient.RegisterMutatingWebhook()
}
return nil
}
//syncStatusOnly updates the policy status subresource
// status:
// - violations : (count of the resources that violate this policy )
func (pc *PolicyController) syncStatusOnly(p *kyverno.Policy, pvList []*kyverno.PolicyViolation) error {
newStatus := pc.calculateStatus(p.Name, pvList)
if reflect.DeepEqual(newStatus, p.Status) {
// no update to status
return nil
}
// update status
newPolicy := p
newPolicy.Status = newStatus
_, err := pc.kyvernoClient.KyvernoV1alpha1().Policies().UpdateStatus(newPolicy)
return err
}
func (pc *PolicyController) calculateStatus(policyName string, pvList []*kyverno.PolicyViolation) kyverno.PolicyStatus {
violationCount := len(pvList)
status := kyverno.PolicyStatus{
ViolationCount: violationCount,
}
// get stats
stats := pc.statusAggregator.GetPolicyStats(policyName)
if stats != (PolicyStatInfo{}) {
status.RulesAppliedCount = stats.RulesAppliedCount
status.ResourcesBlockedCount = stats.ResourceBlocked
status.AvgExecutionTimeMutation = stats.MutationExecutionTime.String()
status.AvgExecutionTimeValidation = stats.ValidationExecutionTime.String()
status.AvgExecutionTimeGeneration = stats.GenerationExecutionTime.String()
}
return status
}
func (pc *PolicyController) getPolicyViolationsForPolicy(p *kyverno.Policy) ([]*kyverno.PolicyViolation, error) {
// List all PolicyViolation to find those we own but that no longer match our
// selector. They will be orphaned by ClaimPolicyViolation().
pvList, err := pc.pvLister.List(labels.Everything())
if err != nil {
return nil, err
}
policyLabelmap := map[string]string{"policy": p.Name}
//NOt using a field selector, as the match function will have to cash the runtime.object
// to get the field, while it can get labels directly, saves the cast effort
//spec.policyName!=default
// fs := fields.Set{"spec.name": name}.AsSelector().String()
ls := &metav1.LabelSelector{}
err = metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&policyLabelmap, ls, nil)
if err != nil {
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", p.Name, err)
}
policySelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("Policy %s has invalid label selector: %v", p.Name, err)
}
canAdoptFunc := RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := pc.kyvernoClient.KyvernoV1alpha1().Policies().Get(p.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != p.UID {
return nil, fmt.Errorf("original Policy %v is gone: got uid %v, wanted %v", p.Name, fresh.UID, p.UID)
}
return fresh, nil
})
cm := NewPolicyViolationControllerRefManager(pc.pvControl, p, policySelector, controllerKind, canAdoptFunc)
return cm.claimPolicyViolations(pvList)
}
func (m *PolicyViolationControllerRefManager) claimPolicyViolations(sets []*kyverno.PolicyViolation) ([]*kyverno.PolicyViolation, error) {
var claimed []*kyverno.PolicyViolation
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.adoptPolicyViolation(obj.(*kyverno.PolicyViolation))
}
release := func(obj metav1.Object) error {
return m.releasePolicyViolation(obj.(*kyverno.PolicyViolation))
}
for _, pv := range sets {
ok, err := m.ClaimObject(pv, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, pv)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
func (m *PolicyViolationControllerRefManager) adoptPolicyViolation(pv *kyverno.PolicyViolation) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt PolicyViolation %v (%v): %v", pv.Name, pv.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
//TODO Add JSON Patch Owner reference for resource
//TODO Update owner refence for resource
controllerFlag := true
blockOwnerDeletionFlag := true
pOwnerRef := metav1.OwnerReference{APIVersion: m.controllerKind.GroupVersion().String(),
Kind: m.controllerKind.Kind,
Name: m.Controller.GetName(),
UID: m.Controller.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
addControllerPatch, err := createOwnerReferencePatch(pOwnerRef)
if err != nil {
glog.Errorf("failed to add owner reference %v for PolicyViolation %s: %v", pOwnerRef, pv.Name, err)
return err
}
return m.pvControl.PatchPolicyViolation(pv.Name, addControllerPatch)
}
type patchOwnerReferenceValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value []metav1.OwnerReference `json:"value"`
}
func createOwnerReferencePatch(ownerRef metav1.OwnerReference) ([]byte, error) {
payload := []patchOwnerReferenceValue{{
Op: "add",
Path: "/metadata/ownerReferences",
Value: []metav1.OwnerReference{ownerRef},
}}
return json.Marshal(payload)
}
func removeOwnerReferencePatch(ownerRef metav1.OwnerReference) ([]byte, error) {
payload := []patchOwnerReferenceValue{{
Op: "remove",
Path: "/metadata/ownerReferences",
Value: []metav1.OwnerReference{ownerRef},
}}
return json.Marshal(payload)
}
func (m *PolicyViolationControllerRefManager) releasePolicyViolation(pv *kyverno.PolicyViolation) error {
glog.V(2).Infof("patching PolicyViolation %s to remove its controllerRef to %s/%s:%s",
pv.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
//TODO JSON patch for owner reference for resources
controllerFlag := true
blockOwnerDeletionFlag := true
pOwnerRef := metav1.OwnerReference{APIVersion: m.controllerKind.GroupVersion().String(),
Kind: m.controllerKind.Kind,
Name: m.Controller.GetName(),
UID: m.Controller.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
removeControllerPatch, err := removeOwnerReferencePatch(pOwnerRef)
if err != nil {
glog.Errorf("failed to add owner reference %v for PolicyViolation %s: %v", pOwnerRef, pv.Name, err)
return err
}
// deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pv.UID)
err = m.pvControl.PatchPolicyViolation(pv.Name, removeControllerPatch)
if err != nil {
if errors.IsNotFound(err) {
// If the ReplicaSet no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ReplicaSet
// has no owner reference, 2. the uid of the ReplicaSet doesn't
// match, which means the ReplicaSet is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
//PolicyViolationControllerRefManager manages adoption of policy violation by a policy
type PolicyViolationControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
pvControl PVControlInterface
}
//NewPolicyViolationControllerRefManager returns new PolicyViolationControllerRefManager
func NewPolicyViolationControllerRefManager(
pvControl PVControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func() error,
) *PolicyViolationControllerRefManager {
m := PolicyViolationControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
pvControl: pvControl,
}
return &m
}
//BaseControllerRefManager ...
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
CanAdoptFunc func() error
}
//CanAdopt ...
func (m *BaseControllerRefManager) CanAdopt() error {
m.canAdoptOnce.Do(func() {
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc()
}
})
return m.canAdoptErr
}
//ClaimObject ...
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef != nil {
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore
return false, nil
}
if match(obj) {
// We already own it and the selector matches.
// Return true (successfully claimed) before checking deletion timestamp.
// We're still allowed to claim things we already own while being deleted
// because doing so requires taking no actions.
return true, nil
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(obj); err != nil {
// If the PolicyViolation no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else released it, or there was a transient error.
// The controller should requeue and try again if it's still stale.
return false, err
}
// Successfully released.
return false, nil
}
// It's an orphan.
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
if obj.GetDeletionTimestamp() != nil {
// Ignore if the object is being deleted
return false, nil
}
// Selector matches. Try to adopt.
if err := adopt(obj); err != nil {
// If the PolicyViolation no longer exists, ignore the error
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else claimed it first, or there was a transient error.
// The controller should requeue and try again if it's still orphaned.
return false, err
}
// Successfully adopted.
return true, nil
}
//PVControlInterface provides interface to operate on policy violation resource
type PVControlInterface interface {
PatchPolicyViolation(name string, data []byte) error
}
// RealPVControl is the default implementation of PVControlInterface.
type RealPVControl struct {
Client kyvernoclient.Interface
Recorder record.EventRecorder
}
//PatchPolicyViolation patches the policy violation with the provided JSON Patch
func (r RealPVControl) PatchPolicyViolation(name string, data []byte) error {
_, err := r.Client.KyvernoV1alpha1().PolicyViolations().Patch(name, types.JSONPatchType, data)
return err
}
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
return func() error {
obj, err := getObject()
if err != nil {
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
}
if obj.GetDeletionTimestamp() != nil {
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
}
return nil
}
}
type patchLabelValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
type patchLabelMapValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value map[string]string `json:"value"`
}
func createPolicyLabelPatch(policy string) ([]byte, error) {
payload := []patchLabelValue{{
Op: "add",
Path: "/metadata/labels/policy",
Value: policy,
}}
return json.Marshal(payload)
}
func createResourceLabelPatch(resource string) ([]byte, error) {
payload := []patchLabelValue{{
Op: "add",
Path: "/metadata/labels/resource",
Value: resource,
}}
return json.Marshal(payload)
}
func createLabelMapPatch(policy string, resource string) ([]byte, error) {
payload := []patchLabelMapValue{{
Op: "add",
Path: "/metadata/labels",
Value: map[string]string{"policy": policy, "resource": resource},
}}
return json.Marshal(payload)
}
//updatePolicyLabelIfNotDefined adds the label 'policy' to the PolicyViolation
// label is used here to lookup policyViolation and corresponding Policy
func updatePolicyLabelIfNotDefined(pvControl PVControlInterface, pv *kyverno.PolicyViolation) bool {
updateLabel := func() bool {
glog.V(4).Infof("adding label 'policy:%s' to PolicyViolation %s", pv.Spec.Policy, pv.Name)
glog.V(4).Infof("adding label 'resource:%s' to PolicyViolation %s", pv.Spec.ResourceSpec.ToKey(), pv.Name)
// add label based on the policy spec
labels := pv.GetLabels()
if pv.Spec.Policy == "" {
glog.Error("policy not defined for violation")
// should be cleaned up
return false
}
if labels == nil {
// create a patch to generate the labels map with policy label
patch, err := createLabelMapPatch(pv.Spec.Policy, pv.Spec.ResourceSpec.ToKey())
if err != nil {
glog.Errorf("unable to init label map. %v", err)
return false
}
if err := pvControl.PatchPolicyViolation(pv.Name, patch); err != nil {
glog.Errorf("Unable to add 'policy' label to PolicyViolation %s: %v", pv.Name, err)
return false
}
// update successful
return true
}
// JSON Patch to add exact label
policyLabelPatch, err := createPolicyLabelPatch(pv.Spec.Policy)
if err != nil {
glog.Errorf("failed to generate patch to add label 'policy': %v", err)
return false
}
resourceLabelPatch, err := createResourceLabelPatch(pv.Spec.ResourceSpec.ToKey())
if err != nil {
glog.Errorf("failed to generate patch to add label 'resource': %v", err)
return false
}
//join patches
labelPatch := joinPatches(policyLabelPatch, resourceLabelPatch)
if labelPatch == nil {
glog.Errorf("failed to join patches : %v", err)
return false
}
glog.V(4).Infof("patching policy violation %s with patch %s", pv.Name, string(labelPatch))
if err := pvControl.PatchPolicyViolation(pv.Name, labelPatch); err != nil {
glog.Errorf("Unable to add 'policy' label to PolicyViolation %s: %v", pv.Name, err)
return false
}
// update successful
return true
}
var policy string
var ok bool
// operate oncopy of resource
curLabels := pv.GetLabels()
if policy, ok = curLabels["policy"]; !ok {
return updateLabel()
}
// TODO: would be benificial to add a check to verify if the policy in name and resource spec match
if policy != pv.Spec.Policy {
glog.Errorf("label 'policy:%s' and spec.policy %s dont match ", policy, pv.Spec.Policy)
//TODO handle this case
return updateLabel()
}
return false
}
func joinPatches(patches ...[]byte) []byte {
var result []byte
if patches == nil {
//nothing tot join
return result
}
result = append(result, []byte("[\n")...)
for index, patch := range patches {
result = append(result, patch...)
if index != len(patches)-1 {
result = append(result, []byte(",\n")...)
}
}
result = append(result, []byte("\n]")...)
return result
}
func HasMutateOrValidatePolicies(policies []*kyverno.Policy) bool {
for _, policy := range policies {
if HasMutateOrValidate(*policy) {
return true
}
}
return false
}
func HasMutateOrValidate(policy kyverno.Policy) bool {
for _, rule := range policy.Spec.Rules {
if !reflect.DeepEqual(rule.Mutation, kyverno.Mutation{}) || !reflect.DeepEqual(rule.Validation, kyverno.Validation{}) {
glog.Infoln(rule.Name)
return true
}
}
return false
}

256
pkg/policy/existing.go Normal file
View file

@ -0,0 +1,256 @@
package policy
import (
"sync"
"time"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func (pc *PolicyController) processExistingResources(policy kyverno.Policy) []info.PolicyInfo {
// Parse through all the resources
// drops the cache after configured rebuild time
pc.rm.Drop()
var policyInfos []info.PolicyInfo
// get resource that are satisfy the resource description defined in the rules
resourceMap := listResources(pc.client, policy, pc.filterK8Resources)
for _, resource := range resourceMap {
// pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
continue
}
// apply the policy on each
glog.V(4).Infof("apply policy %s with resource version %s on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
policyInfo := applyPolicyOnResource(policy, resource, pc.statusAggregator)
policyInfos = append(policyInfos, *policyInfo)
// post-processing, register the resource as processed
pc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion())
}
return policyInfos
}
func applyPolicyOnResource(policy kyverno.Policy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) *info.PolicyInfo {
policyInfo, err := applyPolicy(policy, resource, policyStatus)
if err != nil {
glog.V(4).Infof("failed to process policy %s on resource %s/%s/%s: %v", policy.GetName(), resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
return nil
}
return &policyInfo
}
func listResources(client *client.Client, policy kyverno.Policy, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured {
// key uid
resourceMap := map[string]unstructured.Unstructured{}
for _, rule := range policy.Spec.Rules {
// resources that match
for _, k := range rule.MatchResources.Kinds {
if kindIsExcluded(k, rule.ExcludeResources.Kinds) {
glog.V(4).Infof("processing policy %s rule %s: kind %s is exluded", policy.Name, rule.Name, k)
continue
}
var namespaces []string
if k == "Namespace" {
// TODO
// this is handled by generator controller
glog.V(4).Infof("skipping processing policy %s rule %s for kind Namespace", policy.Name, rule.Name)
continue
}
if len(rule.MatchResources.Namespaces) > 0 {
namespaces = append(namespaces, rule.MatchResources.Namespaces...)
glog.V(4).Infof("namespaces specified for inclusion: %v", rule.MatchResources.Namespaces)
} else {
glog.V(4).Infof("processing policy %s rule %s, namespace not defined, getting all namespaces ", policy.Name, rule.Name)
// get all namespaces
namespaces = getAllNamespaces(client)
}
// check if exclude namespace is not clashing
namespaces = excludeNamespaces(namespaces, rule.ExcludeResources.Namespaces)
// get resources in the namespaces
for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, filterK8Resources)
mergeresources(resourceMap, rMap)
}
}
}
return resourceMap
}
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, filterK8Resources []utils.K8Resource) map[string]unstructured.Unstructured {
resourceMap := map[string]unstructured.Unstructured{}
// merge include and exclude label selector values
ls := mergeLabelSectors(rule.MatchResources.Selector, rule.ExcludeResources.Selector)
// list resources
glog.V(4).Infof("get resources for kind %s, namespace %s, selector %v", kind, namespace, rule.MatchResources.Selector)
list, err := client.ListResource(kind, namespace, ls)
if err != nil {
glog.Infof("unable to get resources: err %v", err)
return nil
}
// filter based on name
for _, r := range list.Items {
// match name
if rule.MatchResources.Name != "" {
if !wildcard.Match(rule.MatchResources.Name, r.GetName()) {
glog.V(4).Infof("skipping resource %s/%s due to include condition name=%s mistatch", r.GetNamespace(), r.GetName(), rule.MatchResources.Name)
continue
}
}
// exclude name
if rule.ExcludeResources.Name != "" {
if wildcard.Match(rule.ExcludeResources.Name, r.GetName()) {
glog.V(4).Infof("skipping resource %s/%s due to exclude condition name=%s mistatch", r.GetNamespace(), r.GetName(), rule.MatchResources.Name)
continue
}
}
// Skip the filtered resources
if utils.SkipFilteredResources(r.GetKind(), r.GetNamespace(), r.GetName(), filterK8Resources) {
continue
}
//TODO check if the group version kind is present or not
resourceMap[string(r.GetUID())] = r
}
return resourceMap
}
// merge b into a map
func mergeresources(a, b map[string]unstructured.Unstructured) {
for k, v := range b {
a[k] = v
}
}
func mergeLabelSectors(include, exclude *metav1.LabelSelector) *metav1.LabelSelector {
if exclude == nil {
return include
}
// negate the exclude information
// copy the label selector
//TODO: support exclude expressions in exclude
ls := include.DeepCopy()
for k, v := range exclude.MatchLabels {
lsreq := metav1.LabelSelectorRequirement{
Key: k,
Operator: metav1.LabelSelectorOpNotIn,
Values: []string{v},
}
ls.MatchExpressions = append(ls.MatchExpressions, lsreq)
}
return ls
}
func kindIsExcluded(kind string, list []string) bool {
for _, b := range list {
if b == kind {
return true
}
}
return false
}
func excludeNamespaces(namespaces, excludeNs []string) []string {
if len(excludeNs) == 0 {
return namespaces
}
filteredNamespaces := []string{}
for _, n := range namespaces {
if utils.Contains(excludeNs, n) {
continue
}
filteredNamespaces = append(filteredNamespaces, n)
}
return filteredNamespaces
}
func getAllNamespaces(client *client.Client) []string {
var namespaces []string
// get all namespaces
nsList, err := client.ListResource("Namespace", "", nil)
if err != nil {
glog.Error(err)
return namespaces
}
for _, ns := range nsList.Items {
namespaces = append(namespaces, ns.GetName())
}
return namespaces
}
//NewResourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) *ResourceManager {
rm := ResourceManager{
data: make(map[string]interface{}),
time: time.Now(),
rebuildTime: rebuildTime,
}
// set time it was built
return &rm
}
// ResourceManager stores the details on already processed resources for caching
type ResourceManager struct {
// we drop and re-build the cache
// based on the memory consumer of by the map
data map[string]interface{}
mux sync.RWMutex
time time.Time
rebuildTime int64 // after how many seconds should we rebuild the cache
}
type resourceManager interface {
ProcessResource(policy, pv, kind, ns, name, rv string) bool
//TODO removeResource(kind, ns, name string) error
RegisterResource(policy, pv, kind, ns, name, rv string)
// reload
Drop()
}
//Drop drop the cache after every rebuild interval mins
//TODO: or drop based on the size
func (rm *ResourceManager) Drop() {
timeSince := time.Since(rm.time)
glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.data = map[string]interface{}{}
rm.time = time.Now()
glog.V(4).Infof("dropping cache at time %v", rm.time)
}
}
var empty struct{}
//RegisterResource stores if the policy is processed on this resource version
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
rm.mux.Lock()
defer rm.mux.Unlock()
// add the resource
key := buildKey(policy, pv, kind, ns, name, rv)
rm.data[key] = empty
}
//ProcessResource returns true if the policy was not applied on the resource
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
rm.mux.RLock()
defer rm.mux.RUnlock()
key := buildKey(policy, pv, kind, ns, name, rv)
_, ok := rm.data[key]
return ok == false
}
func buildKey(policy, pv, kind, ns, name, rv string) string {
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
}

58
pkg/policy/report.go Normal file
View file

@ -0,0 +1,58 @@
package policy
import (
"fmt"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/policyviolation"
)
func (pc *PolicyController) report(policyInfos []info.PolicyInfo) {
// generate events
// generate policy violations
for _, policyInfo := range policyInfos {
// events
// success - policy applied on resource
// failure - policy/rule failed to apply on the resource
reportEvents(policyInfo, pc.eventGen)
// policy violations
// failure - policy/rule failed to apply on the resource
}
// generate policy violation
policyviolation.GeneratePolicyViolations(pc.pvListerSynced, pc.pvLister, pc.kyvernoClient, policyInfos)
}
//reportEvents generates events for the failed resources
func reportEvents(policyInfo info.PolicyInfo, eventGen event.Interface) {
if policyInfo.IsSuccessful() {
return
}
glog.V(4).Infof("reporting results for policy %s application on resource %s/%s/%s", policyInfo.Name, policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName)
for _, rule := range policyInfo.Rules {
if rule.IsSuccessful() {
continue
}
// generate event on resource for each failed rule
e := &event.Info{}
e.Kind = policyInfo.RKind
e.Namespace = policyInfo.RNamespace
e.Name = policyInfo.RName
e.Reason = "Failure"
e.Message = fmt.Sprintf("policy %s (%s) rule %s failed to apply. %v", policyInfo.Name, rule.RuleType.String(), rule.Name, rule.GetErrorString())
eventGen.Add(e)
}
// generate a event on policy for all failed rules
e := &event.Info{}
e.Kind = "Policy"
e.Namespace = ""
e.Name = policyInfo.Name
e.Reason = "Failure"
e.Message = fmt.Sprintf("failed to apply rules %s on resource %s/%s/%s", policyInfo.FailedRules(), policyInfo.RKind, policyInfo.RNamespace, policyInfo.RName)
eventGen.Add(e)
}

165
pkg/policy/status.go Normal file
View file

@ -0,0 +1,165 @@
package policy
import (
"sync"
"time"
"github.com/golang/glog"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
)
//PolicyStatusAggregator stores information abt aggregation
type PolicyStatusAggregator struct {
// time since we start aggregating the stats
startTime time.Time
// channel to recieve stats
ch chan PolicyStat
//TODO: lock based on key, possibly sync.Map ?
//sync RW for policyData
mux sync.RWMutex
// stores aggregated stats for policy
policyData map[string]PolicyStatInfo
}
//NewPolicyStatAggregator returns a new policy status
func NewPolicyStatAggregator(client *kyvernoclient.Clientset,
// pInformer kyvernoinformer.PolicyInformer
) *PolicyStatusAggregator {
psa := PolicyStatusAggregator{
startTime: time.Now(),
ch: make(chan PolicyStat),
policyData: map[string]PolicyStatInfo{},
}
return &psa
}
//Run begins aggregator
func (psa *PolicyStatusAggregator) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
glog.V(4).Info("Started aggregator for policy status stats")
defer func() {
glog.V(4).Info("Shutting down aggregator for policy status stats")
}()
for i := 0; i < workers; i++ {
go wait.Until(psa.process, time.Second, stopCh)
}
}
func (psa *PolicyStatusAggregator) process() {
// As mutation and validation are handled seperately
// ideally we need to combine the exection time from both for a policy
// but its tricky to detect here the type of rules policy contains
// so we dont combine the results, but instead compute the execution time for
// mutation & validation rules seperately
for r := range psa.ch {
glog.V(4).Infof("recieved policy stats %v", r)
psa.aggregate(r)
}
}
func (psa *PolicyStatusAggregator) aggregate(ps PolicyStat) {
func() {
glog.V(4).Infof("write lock update policy %s", ps.PolicyName)
psa.mux.Lock()
}()
defer func() {
glog.V(4).Infof("write Unlock update policy %s", ps.PolicyName)
psa.mux.Unlock()
}()
info, ok := psa.policyData[ps.PolicyName]
if !ok {
psa.policyData[ps.PolicyName] = ps.Stats
glog.V(4).Infof("added stats for policy %s", ps.PolicyName)
return
}
// aggregate
info.RulesAppliedCount = info.RulesAppliedCount + ps.Stats.RulesAppliedCount
if ps.Stats.ResourceBlocked == 1 {
info.ResourceBlocked++
}
var zeroDuration time.Duration
if info.MutationExecutionTime != zeroDuration {
info.MutationExecutionTime = (info.MutationExecutionTime + ps.Stats.MutationExecutionTime) / 2
glog.V(4).Infof("updated avg mutation time %v", info.MutationExecutionTime)
} else {
info.MutationExecutionTime = ps.Stats.MutationExecutionTime
}
if info.ValidationExecutionTime != zeroDuration {
info.ValidationExecutionTime = (info.ValidationExecutionTime + ps.Stats.ValidationExecutionTime) / 2
glog.V(4).Infof("updated avg validation time %v", info.ValidationExecutionTime)
} else {
info.ValidationExecutionTime = ps.Stats.ValidationExecutionTime
}
if info.GenerationExecutionTime != zeroDuration {
info.GenerationExecutionTime = (info.GenerationExecutionTime + ps.Stats.GenerationExecutionTime) / 2
glog.V(4).Infof("updated avg generation time %v", info.GenerationExecutionTime)
} else {
info.GenerationExecutionTime = ps.Stats.GenerationExecutionTime
}
// update
psa.policyData[ps.PolicyName] = info
glog.V(4).Infof("updated stats for policy %s", ps.PolicyName)
}
//GetPolicyStats returns the policy stats
func (psa *PolicyStatusAggregator) GetPolicyStats(policyName string) PolicyStatInfo {
func() {
glog.V(4).Infof("read lock update policy %s", policyName)
psa.mux.RLock()
}()
defer func() {
glog.V(4).Infof("read Unlock update policy %s", policyName)
psa.mux.RUnlock()
}()
glog.V(4).Infof("read stats for policy %s", policyName)
return psa.policyData[policyName]
}
//RemovePolicyStats rmves policy stats records
func (psa *PolicyStatusAggregator) RemovePolicyStats(policyName string) {
func() {
glog.V(4).Infof("write lock update policy %s", policyName)
psa.mux.Lock()
}()
defer func() {
glog.V(4).Infof("write Unlock update policy %s", policyName)
psa.mux.Unlock()
}()
glog.V(4).Infof("removing stats for policy %s", policyName)
delete(psa.policyData, policyName)
}
//PolicyStatusInterface provides methods to modify policyStatus
type PolicyStatusInterface interface {
SendStat(stat PolicyStat)
// UpdateViolationCount(policyName string, pvList []*kyverno.PolicyViolation) error
}
//PolicyStat stored stats for policy
type PolicyStat struct {
PolicyName string
Stats PolicyStatInfo
}
type PolicyStatInfo struct {
MutationExecutionTime time.Duration
ValidationExecutionTime time.Duration
GenerationExecutionTime time.Duration
RulesAppliedCount int
ResourceBlocked int
}
//SendStat sends the stat information for aggregation
func (psa *PolicyStatusAggregator) SendStat(stat PolicyStat) {
glog.V(4).Infof("sending policy stats: %v", stat)
// Send over channel
psa.ch <- stat
}
//GetPolicyStatusAggregator returns interface to send policy status stats
func (pc *PolicyController) GetPolicyStatusAggregator() PolicyStatusInterface {
return pc.statusAggregator
}

View file

@ -0,0 +1,63 @@
package policystore
import (
"sync"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type Interface interface {
Register(policy *kyverno.Policy) error
UnRegister(policy *kyverno.Policy) error // check if the controller can see the policy spec for details?
LookUp(kind, namespace, name string, ls *metav1.LabelSelector) // returns a list of policies and rules that apply
}
type Store struct {
data map[string]string
mux sync.RWMutex
}
func NewStore() *Store {
s := Store{
data: make(map[string]string), //key: kind, value is the name of the policy
}
return &s
}
var empty struct{}
func (s *Store) Register(policy *kyverno.Policy) error {
// check if this policy is already registered for this resource kind
kinds := map[string]string{}
// get kinds from the rules
for _, r := range policy.Spec.Rules {
rkinds := map[string]string{}
// matching resources
for _, k := range r.MatchResources.Kinds {
rkinds[k] = policy.Name
}
for _, k := range r.ExcludeResources.Kinds {
delete(rkinds, k)
}
// merge the result
mergeMap(kinds, rkinds)
}
// have all the kinds that the policy has rule on
s.mux.Lock()
defer s.mux.Unlock()
// merge kinds
mergeMap(s.data, kinds)
return nil
}
// merge m2 into m2
func mergeMap(m1, m2 map[string]string) {
for k, v := range m2 {
m1[k] = v
}
}

View file

@ -0,0 +1,298 @@
package policyviolation
import (
"fmt"
"reflect"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a PolicyViolation will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
)
var controllerKind = kyverno.SchemeGroupVersion.WithKind("PolicyViolation")
// PolicyViolationController manages the policy violation resource
// - sync the lastupdate time
// - check if the resource is active
type PolicyViolationController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
eventRecorder record.EventRecorder
syncHandler func(pKey string) error
enqueuePolicyViolation func(policy *kyverno.PolicyViolation)
// Policys that need to be synced
queue workqueue.RateLimitingInterface
// pvLister can list/get policy violation from the shared informer's store
pvLister kyvernolister.PolicyViolationLister
// pLister can list/get policy from the shared informer's store
pLister kyvernolister.PolicyLister
// pListerSynced returns true if the Policy store has been synced at least once
pListerSynced cache.InformerSynced
// pvListerSynced retrns true if the Policy store has been synced at least once
pvListerSynced cache.InformerSynced
//pvControl is used for updating status/cleanup policy violation
pvControl PVControlInterface
}
//NewPolicyViolationController creates a new NewPolicyViolationController
func NewPolicyViolationController(client *client.Client, kyvernoClient *kyvernoclient.Clientset, pInformer kyvernoinformer.PolicyInformer, pvInformer kyvernoinformer.PolicyViolationInformer) (*PolicyViolationController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventInterface, err := client.GetEventsInterface()
if err != nil {
return nil, err
}
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pvc := PolicyViolationController{
kyvernoClient: kyvernoClient,
client: client,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policyviolation_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policyviolation"),
}
pvc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pvc.eventRecorder}
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pvc.addPolicyViolation,
UpdateFunc: pvc.updatePolicyViolation,
DeleteFunc: pvc.deletePolicyViolation,
})
pvc.enqueuePolicyViolation = pvc.enqueue
pvc.syncHandler = pvc.syncPolicyViolation
pvc.pLister = pInformer.Lister()
pvc.pvLister = pvInformer.Lister()
pvc.pListerSynced = pInformer.Informer().HasSynced
pvc.pvListerSynced = pvInformer.Informer().HasSynced
return &pvc, nil
}
func (pvc *PolicyViolationController) addPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.PolicyViolation)
glog.V(4).Infof("Adding PolicyViolation %s", pv.Name)
pvc.enqueuePolicyViolation(pv)
}
func (pvc *PolicyViolationController) updatePolicyViolation(old, cur interface{}) {
oldPv := old.(*kyverno.PolicyViolation)
curPv := cur.(*kyverno.PolicyViolation)
glog.V(4).Infof("Updating Policy Violation %s", oldPv.Name)
if err := pvc.syncLastUpdateTimeStatus(curPv, oldPv); err != nil {
glog.Errorf("Failed to update lastUpdateTime in PolicyViolation %s status: %v", curPv.Name, err)
}
pvc.enqueuePolicyViolation(curPv)
}
func (pvc *PolicyViolationController) deletePolicyViolation(obj interface{}) {
pv, ok := obj.(*kyverno.PolicyViolation)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pv, ok = tombstone.Obj.(*kyverno.PolicyViolation)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a PolicyViolation %#v", obj))
return
}
}
glog.V(4).Infof("Deleting PolicyViolation %s", pv.Name)
pvc.enqueuePolicyViolation(pv)
}
func (pvc *PolicyViolationController) enqueue(policyViolation *kyverno.PolicyViolation) {
key, err := cache.MetaNamespaceKeyFunc(policyViolation)
if err != nil {
glog.Error(err)
return
}
pvc.queue.Add(key)
}
// Run begins watching and syncing.
func (pvc *PolicyViolationController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pvc.queue.ShutDown()
glog.Info("Starting policyviolation controller")
defer glog.Info("Shutting down policyviolation controller")
if !cache.WaitForCacheSync(stopCh, pvc.pListerSynced, pvc.pvListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(pvc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (pvc *PolicyViolationController) worker() {
for pvc.processNextWorkItem() {
}
}
func (pvc *PolicyViolationController) processNextWorkItem() bool {
key, quit := pvc.queue.Get()
if quit {
return false
}
defer pvc.queue.Done(key)
err := pvc.syncHandler(key.(string))
pvc.handleErr(err, key)
return true
}
func (pvc *PolicyViolationController) handleErr(err error, key interface{}) {
if err == nil {
pvc.queue.Forget(key)
return
}
if pvc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing PolicyViolation %v: %v", key, err)
pvc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping policyviolation %q out of the queue: %v", key, err)
pvc.queue.Forget(key)
}
func (pvc *PolicyViolationController) syncPolicyViolation(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing policy violation %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing policy violation %q (%v)", key, time.Since(startTime))
}()
policyViolation, err := pvc.pvLister.Get(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("PolicyViolation %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
pv := policyViolation.DeepCopy()
// TODO: Update Status to update ObserverdGeneration
// TODO: check if the policy violation refers to a resource thats active ? // done by policy controller
// TODO: remove the PV, if the corresponding policy is not present
// TODO: additional check on deleted webhook for a resource, to delete a policy violation it has a policy violation
// list the resource with label selectors, but this can be expensive for each delete request of a resource
if err := pvc.syncActiveResource(pv); err != nil {
glog.V(4).Infof("not syncing policy violation status")
return err
}
return pvc.syncStatusOnly(pv)
}
func (pvc *PolicyViolationController) syncActiveResource(curPv *kyverno.PolicyViolation) error {
// check if the resource is active or not ?
rspec := curPv.Spec.ResourceSpec
// get resource
_, err := pvc.client.GetResource(rspec.Kind, rspec.Namespace, rspec.Name)
if errors.IsNotFound(err) {
// TODO: does it help to retry?
// resource is not found
// remove the violation
if err := pvc.pvControl.RemovePolicyViolation(curPv.Name); err != nil {
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
return err
}
glog.V(4).Infof("removing policy violation %s as the corresponding resource %s/%s/%s does not exist anymore", curPv.Name, rspec.Kind, rspec.Namespace, rspec.Name)
return nil
}
if err != nil {
glog.V(4).Infof("error while retrieved resource %s/%s/%s: %v", rspec.Kind, rspec.Namespace, rspec.Name, err)
return err
}
//TODO- if the policy is not present, remove the policy violation
return nil
}
//syncStatusOnly updates the policyviolation status subresource
// status:
func (pvc *PolicyViolationController) syncStatusOnly(curPv *kyverno.PolicyViolation) error {
// newStatus := calculateStatus(pv)
return nil
}
//TODO: think this through again
//syncLastUpdateTimeStatus updates the policyviolation lastUpdateTime if anything in ViolationSpec changed
// - lastUpdateTime : (time stamp when the policy violation changed)
func (pvc *PolicyViolationController) syncLastUpdateTimeStatus(curPv *kyverno.PolicyViolation, oldPv *kyverno.PolicyViolation) error {
// check if there is any change in policy violation information
if !updated(curPv, oldPv) {
return nil
}
// update the lastUpdateTime
newPolicyViolation := curPv
newPolicyViolation.Status = kyverno.PolicyViolationStatus{LastUpdateTime: metav1.Now()}
return pvc.pvControl.UpdateStatusPolicyViolation(newPolicyViolation)
}
func updated(curPv *kyverno.PolicyViolation, oldPv *kyverno.PolicyViolation) bool {
return !reflect.DeepEqual(curPv.Spec, oldPv.Spec)
//TODO check if owner reference changed, then should we update the lastUpdateTime as well ?
}
type PVControlInterface interface {
UpdateStatusPolicyViolation(newPv *kyverno.PolicyViolation) error
RemovePolicyViolation(name string) error
}
// RealPVControl is the default implementation of PVControlInterface.
type RealPVControl struct {
Client kyvernoclient.Interface
Recorder record.EventRecorder
}
//UpdateStatusPolicyViolation updates the status for policy violation
func (r RealPVControl) UpdateStatusPolicyViolation(newPv *kyverno.PolicyViolation) error {
_, err := r.Client.KyvernoV1alpha1().PolicyViolations().UpdateStatus(newPv)
return err
}
//RemovePolicyViolation removes the policy violation
func (r RealPVControl) RemovePolicyViolation(name string) error {
return r.Client.KyvernoV1alpha1().PolicyViolations().Delete(name, &metav1.DeleteOptions{})
}

View file

@ -0,0 +1,142 @@
package policyviolation
import (
"fmt"
"reflect"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
"github.com/nirmata/kyverno/pkg/info"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/tools/cache"
)
//BuildPolicyViolation returns an value of type PolicyViolation
func BuildPolicyViolation(policy string, resource kyverno.ResourceSpec, fRules []kyverno.ViolatedRule) kyverno.PolicyViolation {
pv := kyverno.PolicyViolation{
Spec: kyverno.PolicyViolationSpec{
Policy: policy,
ResourceSpec: resource,
ViolatedRules: fRules,
},
}
//TODO: check if this can be removed or use unstructured?
// pv.Kind = "PolicyViolation"
pv.SetGenerateName("pv-")
return pv
}
// buildPolicyViolationsForAPolicy returns a policy violation object if there are any rules that fail
func buildPolicyViolationsForAPolicy(pi info.PolicyInfo) kyverno.PolicyViolation {
var fRules []kyverno.ViolatedRule
var pv kyverno.PolicyViolation
for _, r := range pi.Rules {
if !r.IsSuccessful() {
fRules = append(fRules, kyverno.ViolatedRule{Name: r.Name, Message: r.GetErrorString(), Type: r.RuleType.String()})
}
}
if len(fRules) > 0 {
glog.V(4).Infof("building policy violation for policy %s on resource %s/%s/%s", pi.Name, pi.RKind, pi.RNamespace, pi.RName)
// there is an error
pv = BuildPolicyViolation(pi.Name, kyverno.ResourceSpec{
Kind: pi.RKind,
Namespace: pi.RNamespace,
Name: pi.RName,
},
fRules,
)
}
return pv
}
//generatePolicyViolations generate policyViolation resources for the rules that failed
//TODO: check if pvListerSynced is needed
func GeneratePolicyViolations(pvListerSynced cache.InformerSynced, pvLister kyvernolister.PolicyViolationLister, client *kyvernoclient.Clientset, policyInfos []info.PolicyInfo) {
var pvs []kyverno.PolicyViolation
for _, policyInfo := range policyInfos {
if !policyInfo.IsSuccessful() {
if pv := buildPolicyViolationsForAPolicy(policyInfo); !reflect.DeepEqual(pv, kyverno.PolicyViolation{}) {
pvs = append(pvs, pv)
}
}
}
if len(pvs) > 0 {
for _, newPv := range pvs {
// generate PolicyViolation objects
glog.V(4).Infof("creating policyViolation resource for policy %s and resource %s/%s/%s", newPv.Spec.Policy, newPv.Spec.Kind, newPv.Spec.Namespace, newPv.Spec.Name)
// check if there was a previous violation for policy & resource combination
curPv, err := getExistingPolicyViolationIfAny(pvListerSynced, pvLister, newPv)
if err != nil {
continue
}
if curPv == nil {
// no existing policy violation, create a new one
_, err := client.KyvernoV1alpha1().PolicyViolations().Create(&newPv)
if err != nil {
glog.Error(err)
}
continue
}
// compare the policyviolation spec for existing resource if present else
if reflect.DeepEqual(curPv.Spec, newPv.Spec) {
// if they are equal there has been no change so dont update the polivy violation
glog.Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
continue
}
// spec changed so update the policyviolation
//TODO: wont work, as name is not defined yet
_, err = client.KyvernoV1alpha1().PolicyViolations().Update(&newPv)
if err != nil {
glog.Error(err)
continue
}
}
}
}
//TODO: change the name
func getExistingPolicyViolationIfAny(pvListerSynced cache.InformerSynced, pvLister kyvernolister.PolicyViolationLister, newPv kyverno.PolicyViolation) (*kyverno.PolicyViolation, error) {
// TODO: check for existing ov using label selectors on resource and policy
labelMap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
glog.Errorf("failed to generate label sector of Policy name %s: %v", newPv.Spec.Policy, err)
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
glog.Errorf("invalid label selector: %v", err)
return nil, err
}
//TODO: sync the cache before reading from it ?
// check is this is needed ?
// stopCh := make(chan struct{}, 0)
// if !cache.WaitForCacheSync(stopCh, pvListerSynced) {
// //TODO: can this be handled or avoided ?
// glog.Info("unable to sync policy violation shared informer cache, might be out of sync")
// }
pvs, err := pvLister.List(policyViolationSelector)
if err != nil {
glog.Errorf("unable to list policy violations with label selector %v: %v", policyViolationSelector, err)
return nil, err
}
//TODO: ideally there should be only one policy violation returned
if len(pvs) > 1 {
glog.Errorf("more than one policy violation exists with labels %v", labelMap)
return nil, fmt.Errorf("more than one policy violation exists with labels %v", labelMap)
}
if len(pvs) == 0 {
glog.Infof("policy violation does not exist with labels %v", labelMap)
return nil, nil
}
return pvs[0], nil
}

View file

@ -1,21 +0,0 @@
package result
//Reason types of Result Reasons
type Reason int
const (
//Success policy applied
Success Reason = iota
//Violation there is a violation of policy
Violation
//Failed the request to create/update the resource was blocked(generated from admission-controller)
Failed
)
func (r Reason) String() string {
return [...]string{
"Success",
"Violation",
"Failed",
}[r]
}

View file

@ -1,182 +0,0 @@
package result
import (
"fmt"
)
// Indent acts for indenting in result hierarchy
type Indent string
const (
// SpaceIndent means 4 spaces
SpaceIndent Indent = " "
// TabIndent is a tab symbol
TabIndent Indent = "\t"
)
// Result is an interface that is used for result polymorphic behavior
type Result interface {
String() string
StringWithIndent(indent string) string
GetReason() Reason
ToError() error
}
// CompositeResult is used for result hierarchy
type CompositeResult struct {
Message string
Reason Reason
Children []Result
}
// RuleApplicationResult represents elementary result that is produced by PolicyEngine
// TODO: It can be used to create Kubernetes Results, so make method for this
type RuleApplicationResult struct {
PolicyRule string
Reason Reason
Messages []string
}
//NewRuleApplicationResult creates a new rule application result
func NewRuleApplicationResult(ruleName string) RuleApplicationResult {
return RuleApplicationResult{
PolicyRule: ruleName,
Reason: Success,
Messages: []string{},
}
}
// StringWithIndent makes result string where each
// line is prepended with specified indent
func (e *RuleApplicationResult) StringWithIndent(indent string) string {
message := fmt.Sprintf("%s* %s: policy rule - %s:\n", indent, e.Reason.String(), e.PolicyRule)
childrenIndent := indent + string(SpaceIndent)
for i, m := range e.Messages {
message += fmt.Sprintf("%s%d. %s\n", childrenIndent, i+1, m)
}
// remove last line feed
if 0 != len(message) {
message = message[:len(message)-1]
}
return message
}
// String makes result string
// for writing it to logs
func (e *RuleApplicationResult) String() string {
return e.StringWithIndent("")
}
// ToError returns the error if reason is not success
func (e *RuleApplicationResult) ToError() error {
if e.Reason != Success {
return fmt.Errorf(e.String())
}
return nil
}
//GetReason returns reason
func (e *RuleApplicationResult) GetReason() Reason {
return e.Reason
}
//AddMessagef Adds formatted message to this result
func (e *RuleApplicationResult) AddMessagef(message string, a ...interface{}) {
e.Messages = append(e.Messages, fmt.Sprintf(message, a...))
}
//FailWithMessagef Sets the Reason Failed and adds formatted message to this result
func (e *RuleApplicationResult) FailWithMessagef(message string, a ...interface{}) {
e.Reason = Failed
e.AddMessagef(message, a...)
}
//MergeWith Takes messages and higher reason from another RuleApplicationResult
func (e *RuleApplicationResult) MergeWith(other *RuleApplicationResult) {
if other != nil {
e.Messages = append(e.Messages, other.Messages...)
}
if other.Reason > e.Reason {
e.Reason = other.Reason
}
}
// StringWithIndent makes result string where each
// line is prepended with specified indent
func (e *CompositeResult) StringWithIndent(indent string) string {
childrenIndent := indent + string(SpaceIndent)
message := fmt.Sprintf("%s- %s: %s\n", indent, e.Reason, e.Message)
for _, res := range e.Children {
message += (res.StringWithIndent(childrenIndent) + "\n")
}
// remove last line feed
if 0 != len(message) {
message = message[:len(message)-1]
}
return message
}
// String makes result string
// for writing it to logs
func (e *CompositeResult) String() string {
return e.StringWithIndent("")
}
//ToError returns error if reason is not success
func (e *CompositeResult) ToError() error {
if e.Reason != Success {
return fmt.Errorf(e.String())
}
return nil
}
//GetReason returns reason
func (e *CompositeResult) GetReason() Reason {
return e.Reason
}
//NewPolicyApplicationResult creates a new policy application result
func NewPolicyApplicationResult(policyName string) Result {
return &CompositeResult{
Message: fmt.Sprintf("policy - %s:", policyName),
Reason: Success,
}
}
//NewAdmissionResult creates a new admission result
func NewAdmissionResult(requestUID string) Result {
return &CompositeResult{
Message: fmt.Sprintf("For resource with UID - %s:", requestUID),
Reason: Success,
}
}
// Append returns CompositeResult with target and source
// Or appends source to target if it is composite result
// If the source reason is more important than target reason,
// target takes the reason of the source.
func Append(target Result, source Result) Result {
targetReason := target.GetReason()
if targetReason < source.GetReason() {
targetReason = source.GetReason()
}
if composite, ok := target.(*CompositeResult); ok {
composite.Children = append(composite.Children, source)
composite.Reason = targetReason
return composite
}
composite := &CompositeResult{
Children: []Result{
target,
source,
},
Reason: targetReason,
}
return composite
}

View file

@ -1,54 +0,0 @@
package result
import (
"testing"
"gotest.tools/assert"
)
func TestAppend_TwoResultObjects(t *testing.T) {
firstRuleApplicationResult := RuleApplicationResult{
Reason: Failed,
Messages: []string{
"1. Test",
"2. Toast",
},
}
secondRuleApplicationResult := RuleApplicationResult{
Reason: Success,
Messages: []string{
"1. Kyverno",
"2. KubePolicy",
},
}
result := Append(&firstRuleApplicationResult, &secondRuleApplicationResult)
composite, ok := result.(*CompositeResult)
assert.Assert(t, ok)
assert.Equal(t, len(composite.Children), 2)
RuleApplicationResult, ok := composite.Children[0].(*RuleApplicationResult)
assert.Assert(t, ok)
assert.Equal(t, RuleApplicationResult.Messages[1], "2. Toast")
}
func TestAppend_FirstObjectIsComposite(t *testing.T) {
composite := &CompositeResult{}
firstRuleApplicationResult := RuleApplicationResult{
Reason: Failed,
Messages: []string{
"1. Test",
"2. Toast",
},
}
result := Append(composite, &firstRuleApplicationResult)
composite, ok := result.(*CompositeResult)
assert.Equal(t, len(composite.Children), 1)
RuleApplicationResult, ok := composite.Children[0].(*RuleApplicationResult)
assert.Assert(t, ok)
assert.Equal(t, RuleApplicationResult.Messages[1], "2. Toast")
}

View file

@ -1,57 +0,0 @@
package sharedinformer
import (
"fmt"
policyclientset "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
informers "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
infomertypes "github.com/nirmata/kyverno/pkg/client/informers/externalversions/policy/v1alpha1"
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
)
//PolicyInformer access policy informers
type PolicyInformer interface {
GetLister() v1alpha1.PolicyLister
GetInfomer() cache.SharedIndexInformer
}
// SharedInfomer access shared informers
type SharedInfomer interface {
PolicyInformer
Run(stopCh <-chan struct{})
}
type sharedInfomer struct {
policyInformerFactory informers.SharedInformerFactory
}
//NewSharedInformerFactory returns shared informer
func NewSharedInformerFactory(clientConfig *rest.Config) (SharedInfomer, error) {
// create policy client
policyClientset, err := policyclientset.NewForConfig(clientConfig)
if err != nil {
return nil, fmt.Errorf("Error creating policyClient: %v\n", err)
}
//TODO: replace with NewSharedInformerFactoryWithOptions
policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0)
return &sharedInfomer{
policyInformerFactory: policyInformerFactory,
}, nil
}
func (si *sharedInfomer) Run(stopCh <-chan struct{}) {
si.policyInformerFactory.Start(stopCh)
}
func (si *sharedInfomer) getInfomer() infomertypes.PolicyInformer {
return si.policyInformerFactory.Kyverno().V1alpha1().Policies()
}
func (si *sharedInfomer) GetInfomer() cache.SharedIndexInformer {
return si.getInfomer().Informer()
}
func (si *sharedInfomer) GetLister() v1alpha1.PolicyLister {
return si.getInfomer().Lister()
}

View file

@ -1,15 +0,0 @@
package sharedinformer
import (
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/fake"
informers "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
"k8s.io/apimachinery/pkg/runtime"
)
func NewFakeSharedInformerFactory(objects ...runtime.Object) (SharedInfomer, error) {
fakePolicyClient := fake.NewSimpleClientset(objects...)
policyInformerFactory := informers.NewSharedInformerFactory(fakePolicyClient, 0)
return &sharedInfomer{
policyInformerFactory: policyInformerFactory,
}, nil
}

View file

@ -2,15 +2,14 @@ package testrunner
import (
"fmt"
"reflect"
"strconv"
"testing"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
ospath "path"
"github.com/golang/glog"
pt "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/info"
@ -22,7 +21,7 @@ type test struct {
t *testing.T
testCase *testCase
// input
policy *pt.Policy
policy *kyverno.Policy
tResource *resourceInfo
loadResources []*resourceInfo
// expected
@ -64,7 +63,7 @@ func (t *test) run() {
t.checkGenerationResult(client, policyInfo)
}
func (t *test) checkMutationResult(pr *resourceInfo, policyInfo *info.PolicyInfo) {
func (t *test) checkMutationResult(pr *resourceInfo, policyInfo info.PolicyInfo) {
if t.testCase.Expected.Mutation == nil {
glog.Info("No Mutation check defined")
return
@ -91,12 +90,12 @@ func (t *test) overAllPass(result bool, expected string) {
}
}
func (t *test) compareRules(ruleInfos []*info.RuleInfo, rules []tRules) {
func (t *test) compareRules(ruleInfos []info.RuleInfo, rules []tRules) {
// Compare the rules specified in the expected against the actual rule info returned by the apply policy
for _, eRule := range rules {
// Look-up the rule from the policy info
rule := lookUpRule(eRule.Name, ruleInfos)
if rule == nil {
if reflect.DeepEqual(rule, info.RuleInfo{}) {
t.t.Errorf("Rule with name %s not found", eRule.Name)
continue
}
@ -118,16 +117,17 @@ func (t *test) compareRules(ruleInfos []*info.RuleInfo, rules []tRules) {
}
}
func lookUpRule(name string, ruleInfos []*info.RuleInfo) *info.RuleInfo {
func lookUpRule(name string, ruleInfos []info.RuleInfo) info.RuleInfo {
for _, r := range ruleInfos {
if r.Name == name {
return r
}
}
return nil
return info.RuleInfo{}
}
func (t *test) checkValidationResult(policyInfo *info.PolicyInfo) {
func (t *test) checkValidationResult(policyInfo info.PolicyInfo) {
if t.testCase.Expected.Validation == nil {
glog.Info("No Validation check defined")
return
@ -137,7 +137,7 @@ func (t *test) checkValidationResult(policyInfo *info.PolicyInfo) {
t.compareRules(policyInfo.Rules, t.testCase.Expected.Validation.Rules)
}
func (t *test) checkGenerationResult(client *client.Client, policyInfo *info.PolicyInfo) {
func (t *test) checkGenerationResult(client *client.Client, policyInfo info.PolicyInfo) {
if t.testCase.Expected.Generation == nil {
glog.Info("No Generate check defined")
return
@ -162,11 +162,12 @@ func (t *test) checkGenerationResult(client *client.Client, policyInfo *info.Pol
}
}
func (t *test) applyPolicy(policy *pt.Policy,
func (t *test) applyPolicy(policy *kyverno.Policy,
tresource *resourceInfo,
client *client.Client) (*resourceInfo, *info.PolicyInfo, error) {
client *client.Client) (*resourceInfo, info.PolicyInfo, error) {
// apply policy on the trigger resource
// Mutate
var zeroPolicyInfo info.PolicyInfo
var err error
rawResource := tresource.rawResource
rname := engine.ParseNameFromObject(rawResource)
@ -177,42 +178,43 @@ func (t *test) applyPolicy(policy *pt.Policy,
rname,
rns,
policy.Spec.ValidationFailureAction)
resource, err := ConvertToUnstructured(rawResource)
if err != nil {
return nil, zeroPolicyInfo, err
}
// Apply Mutation Rules
patches, ruleInfos := engine.Mutate(*policy, rawResource, *tresource.gvk)
policyInfo.AddRuleInfos(ruleInfos)
engineResponse := engine.Mutate(*policy, *resource)
// patches, ruleInfos := engine.Mutate(*policy, rawResource, *tresource.gvk)
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
// TODO: only validate if there are no errors in mutate, why?
if policyInfo.IsSuccessful() {
if len(patches) != 0 {
rawResource, err = engine.ApplyPatches(rawResource, patches)
if len(engineResponse.Patches) != 0 {
rawResource, err = engine.ApplyPatches(rawResource, engineResponse.Patches)
if err != nil {
return nil, nil, err
return nil, zeroPolicyInfo, err
}
}
}
// Validate
ruleInfos, err = engine.Validate(*policy, rawResource, *tresource.gvk)
policyInfo.AddRuleInfos(ruleInfos)
engineResponse = engine.Validate(*policy, *resource)
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
if err != nil {
return nil, nil, err
return nil, zeroPolicyInfo, err
}
if rkind == "Namespace" {
if client != nil {
// convert []byte to unstructured
unstr := unstructured.Unstructured{}
err := unstr.UnmarshalJSON(rawResource)
if err != nil {
glog.Error(err)
}
ruleInfos := engine.Generate(client, policy, unstr)
policyInfo.AddRuleInfos(ruleInfos)
engineResponse := engine.Generate(client, *policy, *resource)
policyInfo.AddRuleInfos(engineResponse.RuleInfos)
}
}
// Generate
// transform the patched Resource into resource Info
ri, err := extractResourceRaw(rawResource)
if err != nil {
return nil, nil, err
return nil, zeroPolicyInfo, err
}
// return the results
return ri, policyInfo, nil

View file

@ -7,7 +7,7 @@ import (
ospath "path"
"github.com/golang/glog"
pt "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
yaml "k8s.io/apimachinery/pkg/util/yaml"
@ -117,8 +117,8 @@ func (tc *testCase) loadTriggerResource(ap string) (*resourceInfo, error) {
}
// Loads a single policy
func (tc *testCase) loadPolicy(file string) (*pt.Policy, error) {
p := &pt.Policy{}
func (tc *testCase) loadPolicy(file string) (*kyverno.Policy, error) {
p := &kyverno.Policy{}
data, err := LoadFile(file)
if err != nil {
return nil, err

View file

@ -3,5 +3,7 @@ package testrunner
import "testing"
func TestCLI(t *testing.T) {
//https://github.com/nirmata/kyverno/issues/301
t.Skip("skipping testrunner as this needs a re-design")
runner(t, "/test/scenarios/cli")
}

View file

@ -121,3 +121,13 @@ func ParseNamespaceFromObject(bytes []byte) string {
}
return ""
}
func ConvertToUnstructured(data []byte) (*unstructured.Unstructured, error) {
resource := &unstructured.Unstructured{}
err := resource.UnmarshalJSON(data)
if err != nil {
glog.V(4).Infof("failed to unmarshall resource: %v", err)
return nil, err
}
return resource, nil
}

View file

@ -187,3 +187,21 @@ func subsetSlice(a, b []interface{}) bool {
}
return true
}
// JoinPatches joins array of serialized JSON patches to the single JSONPatch array
func JoinPatches(patches [][]byte) []byte {
var result []byte
if len(patches) == 0 {
return result
}
result = append(result, []byte("[\n")...)
for index, patch := range patches {
result = append(result, patch...)
if index != len(patches)-1 {
result = append(result, []byte(",\n")...)
}
}
result = append(result, []byte("\n]")...)
return result
}

View file

@ -6,8 +6,17 @@ import (
"github.com/minio/minio/pkg/wildcard"
"k8s.io/api/admission/v1beta1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
type K8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//Contains Check if strint is contained in a list of string
func Contains(list []string, element string) bool {
for _, e := range list {
if e == element {
@ -17,12 +26,6 @@ func Contains(list []string, element string) bool {
return false
}
type K8Resource struct {
Kind string //TODO: as we currently only support one GVK version, we use the kind only. But if we support multiple GVK, then GV need to be added
Namespace string
Name string
}
//SkipFilteredResourcesReq checks if request is to be skipped based on filtered kinds
func SkipFilteredResourcesReq(request *v1beta1.AdmissionRequest, filterK8Resources []K8Resource) bool {
kind := request.Kind.Kind
@ -74,3 +77,20 @@ func ParseKinds(list string) []K8Resource {
}
return resources
}
//NewKubeClient returns a new kubernetes client
func NewKubeClient(config *rest.Config) (kubernetes.Interface, error) {
kclient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return kclient, nil
}
//Btoi converts boolean to int
func Btoi(b bool) int {
if b {
return 1
}
return 0
}

View file

@ -1,276 +0,0 @@
package violation
import (
"errors"
"github.com/golang/glog"
v1alpha1 "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
lister "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/info"
"github.com/nirmata/kyverno/pkg/sharedinformer"
"k8s.io/apimachinery/pkg/runtime"
)
//Generator to generate policy violation
type Generator interface {
Add(infos ...*Info) error
RemoveInactiveViolation(policy, rKind, rNs, rName string, ruleType info.RuleType) error
ResourceRemoval(policy, rKind, rNs, rName string) error
}
type builder struct {
client *client.Client
policyLister lister.PolicyLister
eventBuilder event.Generator
}
//Builder is to build policy violations
type Builder interface {
Generator
processViolation(info *Info) error
}
//NewPolicyViolationBuilder returns new violation builder
func NewPolicyViolationBuilder(client *client.Client,
sharedInfomer sharedinformer.PolicyInformer,
eventController event.Generator) Builder {
builder := &builder{
client: client,
policyLister: sharedInfomer.GetLister(),
eventBuilder: eventController,
}
return builder
}
//BuldNewViolation returns a new violation
func BuldNewViolation(pName string, rKind string, rNs string, rName string, reason string, frules []v1alpha1.FailedRule) *Info {
return &Info{
Policy: pName,
Violation: v1alpha1.Violation{
Kind: rKind,
Namespace: rNs,
Name: rName,
Reason: reason,
Rules: frules,
},
}
}
func (b *builder) Add(infos ...*Info) error {
if infos == nil {
return nil
}
for _, info := range infos {
err := b.processViolation(info)
if err != nil {
glog.Error(err)
}
}
return nil
}
func (b *builder) processViolation(info *Info) error {
statusMap := map[string]interface{}{}
violationsMap := map[string]interface{}{}
violationMap := map[string]interface{}{}
var violations interface{}
var violation interface{}
// Get Policy
obj, err := b.client.GetResource("Policy", "", info.Policy, "status")
if err != nil {
return err
}
unstr := obj.UnstructuredContent()
// get "status" subresource
status, ok := unstr["status"]
if ok {
// status exists
// status is already present then we append violations
if statusMap, ok = status.(map[string]interface{}); !ok {
return errors.New("Unable to parse status subresource")
}
// get policy violations
violations, ok = statusMap["violations"]
if !ok {
return nil
}
violationsMap, ok = violations.(map[string]interface{})
if !ok {
return errors.New("Unable to get status.violations subresource")
}
// check if the resource has a violation
violation, ok = violationsMap[info.getKey()]
if !ok {
// add resource violation
violationsMap[info.getKey()] = info.Violation
statusMap["violations"] = violationsMap
unstr["status"] = statusMap
} else {
violationMap, ok = violation.(map[string]interface{})
if !ok {
return errors.New("Unable to get status.violations.violation subresource")
}
// we check if the new violation updates are different from stored violation info
v := v1alpha1.Violation{}
err := runtime.DefaultUnstructuredConverter.FromUnstructured(violationMap, &v)
if err != nil {
return err
}
// compare v & info.Violation
if v.IsEqual(info.Violation) {
// no updates to violation
// do nothing
return nil
}
// update the violation
violationsMap[info.getKey()] = info.Violation
statusMap["violations"] = violationsMap
unstr["status"] = statusMap
}
} else {
violationsMap[info.getKey()] = info.Violation
statusMap["violations"] = violationsMap
unstr["status"] = statusMap
}
obj.SetUnstructuredContent(unstr)
// update the status sub-resource for policy
_, err = b.client.UpdateStatusResource("Policy", "", obj, false)
if err != nil {
return err
}
return nil
}
//RemoveInactiveViolation
func (b *builder) RemoveInactiveViolation(policy, rKind, rNs, rName string, ruleType info.RuleType) error {
statusMap := map[string]interface{}{}
violationsMap := map[string]interface{}{}
violationMap := map[string]interface{}{}
var violations interface{}
var violation interface{}
// Get Policy
obj, err := b.client.GetResource("Policy", "", policy, "status")
if err != nil {
return err
}
unstr := obj.UnstructuredContent()
// get "status" subresource
status, ok := unstr["status"]
if !ok {
return nil
}
// status exists
// status is already present then we append violations
if statusMap, ok = status.(map[string]interface{}); !ok {
return errors.New("Unable to parse status subresource")
}
// get policy violations
violations, ok = statusMap["violations"]
if !ok {
return nil
}
violationsMap, ok = violations.(map[string]interface{})
if !ok {
return errors.New("Unable to get status.violations subresource")
}
// check if the resource has a violation
violation, ok = violationsMap[BuildKey(rKind, rNs, rName)]
if !ok {
// no violation for this resource
return nil
}
violationMap, ok = violation.(map[string]interface{})
if !ok {
return errors.New("Unable to get status.violations.violation subresource")
}
// check remove the rules of the given type
// this is called when the policy is applied succesfully, so we can remove the previous failed rules
// if all rules are to be removed, the deleted the violation
v := v1alpha1.Violation{}
err = runtime.DefaultUnstructuredConverter.FromUnstructured(violationMap, &v)
if err != nil {
return err
}
if !v.RemoveRulesOfType(ruleType.String()) {
// no rule of given type found,
// no need to remove rule
return nil
}
// if there are no faile rules remove the violation
if len(v.Rules) == 0 {
delete(violationsMap, BuildKey(rKind, rNs, rName))
} else {
// update the rules
violationsMap[BuildKey(rKind, rNs, rName)] = v
}
statusMap["violations"] = violationsMap
unstr["status"] = statusMap
obj.SetUnstructuredContent(unstr)
// update the status sub-resource for policy
_, err = b.client.UpdateStatusResource("Policy", "", obj, false)
if err != nil {
return err
}
return nil
}
// ResourceRemoval on resources reoval we remove the policy violation in the policy
func (b *builder) ResourceRemoval(policy, rKind, rNs, rName string) error {
statusMap := map[string]interface{}{}
violationsMap := map[string]interface{}{}
var violations interface{}
// Get Policy
obj, err := b.client.GetResource("Policy", "", policy, "status")
if err != nil {
return err
}
unstr := obj.UnstructuredContent()
// get "status" subresource
status, ok := unstr["status"]
if !ok {
return nil
}
// status exists
// status is already present then we append violations
if statusMap, ok = status.(map[string]interface{}); !ok {
return errors.New("Unable to parse status subresource")
}
// get policy violations
violations, ok = statusMap["violations"]
if !ok {
return nil
}
violationsMap, ok = violations.(map[string]interface{})
if !ok {
return errors.New("Unable to get status.violations subresource")
}
// check if the resource has a violation
_, ok = violationsMap[BuildKey(rKind, rNs, rName)]
if !ok {
// no violation for this resource
return nil
}
// remove the pair from the map
delete(violationsMap, BuildKey(rKind, rNs, rName))
if len(violationsMap) == 0 {
delete(statusMap, "violations")
} else {
statusMap["violations"] = violationsMap
}
unstr["status"] = statusMap
obj.SetUnstructuredContent(unstr)
// update the status sub-resource for policy
_, err = b.client.UpdateStatusResource("Policy", "", obj, false)
if err != nil {
return err
}
return nil
}

View file

@ -1,27 +0,0 @@
package violation
import policytype "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
// Source for the events recorder
const violationEventSource = "policy-controller"
// Name for the workqueue to store the events
const workqueueViolationName = "Policy-Violations"
// Event Reason
const violationEventResrouce = "Violation"
//Info describes the policyviolation details
type Info struct {
Policy string
policytype.Violation
}
func (i Info) getKey() string {
return i.Kind + "/" + i.Namespace + "/" + i.Name
}
//BuildKey returns the key format
func BuildKey(rKind, rNs, rName string) string {
return rKind + "/" + rNs + "/" + rName
}

Some files were not shown because too many files have changed in this diff Show more