1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 02:18:15 +00:00
* initial commit

* background policy validation

* correct message

* skip non-background policy process for add/update

* add Generate Request CR

* generate Request Generator Initial

* test generate request CR generation

* initial commit gr generator

* generate controller initial framework

* add crd for generate request

* gr cleanup controller initial commit

* cleanup controller initial

* generate mid-commit

* generate rule processing

* create PV on generate error

* embed resource type

* testing phase 1- generate resources with variable substitution

* fix tests

* comment broken test #586

* add printer column for state

* return if existing resource for clone

* set resync time to 2 mins & remove resource version check in update handler for gr

* generate events for reporting

* fix logs

* cleanup

* CR fixes

* fix logs
This commit is contained in:
Shivkumar Dudhani 2020-01-07 10:33:28 -08:00 committed by GitHub
parent 622d007e18
commit ffd2179b03
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
56 changed files with 2627 additions and 414 deletions

View file

@ -12,7 +12,8 @@ import (
"github.com/nirmata/kyverno/pkg/config"
dclient "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/namespace"
"github.com/nirmata/kyverno/pkg/generate"
generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
@ -21,6 +22,7 @@ import (
"github.com/nirmata/kyverno/pkg/version"
"github.com/nirmata/kyverno/pkg/webhookconfig"
"github.com/nirmata/kyverno/pkg/webhooks"
webhookgenerate "github.com/nirmata/kyverno/pkg/webhooks/generate"
kubeinformers "k8s.io/client-go/informers"
)
@ -154,18 +156,27 @@ func main() {
glog.Fatalf("error creating policy controller: %v\n", err)
}
// GENERATE REQUEST GENERATOR
grgen := webhookgenerate.NewGenerator(pclient, stopCh)
// GENERATE CONTROLLER
// - watches for Namespace resource and generates resource based on the policy generate rule
nsc := namespace.NewNamespaceController(
// - applies generate rules on resources based on generate requests created by webhook
grc := generate.NewController(
pclient,
client,
kubeInformer.Core().V1().Namespaces(),
pInformer.Kyverno().V1().ClusterPolicies(),
pc.GetPolicyStatusAggregator(),
pInformer.Kyverno().V1().GenerateRequests(),
egen,
configData,
pvgen,
policyMetaStore)
)
// GENERATE REQUEST CLEANUP
// -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout
grcc := generatecleanup.NewController(
pclient,
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().GenerateRequests(),
)
// CONFIGURE CERTIFICATES
tlsPair, err := client.InitTLSPemPair(clientConfig)
@ -201,6 +212,7 @@ func main() {
configData,
policyMetaStore,
pvgen,
grgen,
rWebhookWatcher,
cleanUp)
if err != nil {
@ -209,13 +221,14 @@ func main() {
// Start the components
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
go grgen.Run(1)
go rWebhookWatcher.Run(stopCh)
go configData.Run(stopCh)
go policyMetaStore.Run(stopCh)
go pc.Run(1, stopCh)
go egen.Run(1, stopCh)
go nsc.Run(1, stopCh)
go grc.Run(1, stopCh)
go grcc.Run(1, stopCh)
go pvgen.Run(1, stopCh)
// verifys if the admission control is enabled and active

View file

@ -383,6 +383,72 @@ spec:
creationBlocked:
type: boolean
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: generaterequests.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1
served: true
storage: true
scope: Namespaced
names:
kind: GenerateRequest
plural: generaterequests
singular: generaterequest
shortNames:
- gr
subresources:
status: {}
additionalPrinterColumns:
- name: Policy
type: string
description: The policy that resulted in the violation
JSONPath: .spec.policy
- name: ResourceKind
type: string
description: The resource kind that cause the violation
JSONPath: .spec.resource.kind
- name: ResourceName
type: string
description: The resource name that caused the violation
JSONPath: .spec.resource.name
- name: ResourceNamespace
type: string
description: The resource namespace that caused the violation
JSONPath: .spec.resource.namespace
- name: status
type : string
description: Current state of generate request
JSONPath: .status.state
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
validation:
openAPIV3Schema:
properties:
spec:
required:
- policy
- resource
properties:
policy:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
---
kind: Namespace
apiVersion: v1
metadata:

View file

@ -383,6 +383,72 @@ spec:
creationBlocked:
type: boolean
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: generaterequests.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1
served: true
storage: true
scope: Namespaced
names:
kind: GenerateRequest
plural: generaterequests
singular: generaterequest
shortNames:
- gr
subresources:
status: {}
additionalPrinterColumns:
- name: Policy
type: string
description: The policy that resulted in the violation
JSONPath: .spec.policy
- name: ResourceKind
type: string
description: The resource kind that cause the violation
JSONPath: .spec.resource.kind
- name: ResourceName
type: string
description: The resource name that caused the violation
JSONPath: .spec.resource.name
- name: ResourceNamespace
type: string
description: The resource namespace that caused the violation
JSONPath: .spec.resource.namespace
- name: status
type: string
description: Current state of generate request
JSONPath: .status.state
- name: Age
type: date
JSONPath: .metadata.creationTimestamp
validation:
openAPIV3Schema:
properties:
spec:
required:
- policy
- resource
properties:
policy:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
---
apiVersion: v1
kind: ConfigMap
metadata:

View file

@ -35,6 +35,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&ClusterPolicyViolationList{},
&PolicyViolation{},
&PolicyViolationList{},
&GenerateRequest{},
&GenerateRequestList{},
)
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
return nil

View file

@ -1,10 +1,71 @@
package v1
import (
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
//GenerateRequest is a request to process generate rule
type GenerateRequest struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec GenerateRequestSpec `json:"spec"`
Status GenerateRequestStatus `json:"status"`
}
//GenerateRequestSpec stores the request specification
type GenerateRequestSpec struct {
Policy string `json:"policy"`
Resource ResourceSpec `json:"resource"`
Context GenerateRequestContext `json:"context"`
}
//GenerateRequestContext stores the context to be shared
type GenerateRequestContext struct {
UserRequestInfo RequestInfo `json:"userInfo,omitempty"`
}
// RequestInfo contains permission info carried in an admission request
type RequestInfo struct {
// Roles is a list of possible role send the request
Roles []string `json:"roles"`
// ClusterRoles is a list of possible clusterRoles send the request
ClusterRoles []string `json:"clusterRoles"`
// UserInfo is the userInfo carried in the admission request
AdmissionUserInfo authenticationv1.UserInfo `json:"userInfo"`
}
//GenerateRequestStatus stores the status of generated request
type GenerateRequestStatus struct {
State GenerateRequestState `json:"state"`
Message string `json:"message,omitempty"`
}
//GenerateRequestState defines the state of
type GenerateRequestState string
const (
//Pending - the Request is yet to be processed or resource has not been created
Pending GenerateRequestState = "Pending"
//Failed - the Generate Request Controller failed to process the rules
Failed GenerateRequestState = "Failed"
//Completed - the Generate Request Controller created resources defined in the policy
Completed GenerateRequestState = "Completed"
)
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
//GenerateRequestList stores the list of generate requests
type GenerateRequestList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata"`
Items []GenerateRequest `json:"items"`
}
// +genclient
// +genclient:nonNamespaced
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
@ -129,13 +190,12 @@ type Validation struct {
// Generation describes which resources will be created when other resource is created
type Generation struct {
Kind string `json:"kind,omitempty"`
Name string `json:"name,omitempty"`
Data interface{} `json:"data,omitempty"`
Clone CloneFrom `json:"clone,omitempty"`
ResourceSpec
Data interface{} `json:"data"`
Clone CloneFrom `json:"clone"`
}
// CloneFrom - location of a Secret or a ConfigMap
// CloneFrom - location of the resource
// which will be used as source when applying 'generate'
type CloneFrom struct {
Namespace string `json:"namespace,omitempty"`
@ -192,8 +252,7 @@ type PolicyViolationSpec struct {
// ResourceSpec information to identify the resource
type ResourceSpec struct {
Kind string `json:"kind"`
// Is not used in processing, but will is present for backward compatablitiy
Kind string `json:"kind"`
Namespace string `json:"namespace,omitempty"`
Name string `json:"name"`
}

View file

@ -2,9 +2,9 @@ package v1
import "reflect"
func (p ClusterPolicy) HasMutateOrValidate() bool {
func (p ClusterPolicy) HasMutateOrValidateOrGenerate() bool {
for _, rule := range p.Spec.Rules {
if rule.HasMutate() || rule.HasValidate() {
if rule.HasMutate() || rule.HasValidate() || rule.HasGenerate() {
return true
}
}

View file

@ -167,21 +167,7 @@ func (in *ClusterPolicyViolationList) DeepCopyObject() runtime.Object {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExcludeResources) DeepCopyInto(out *ExcludeResources) {
*out = *in
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ClusterRoles != nil {
in, out := &in.ClusterRoles, &out.ClusterRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbacv1.Subject, len(*in))
copy(*out, *in)
}
in.UserInfo.DeepCopyInto(&out.UserInfo)
in.ResourceDescription.DeepCopyInto(&out.ResourceDescription)
return
}
@ -196,6 +182,118 @@ func (in *ExcludeResources) DeepCopy() *ExcludeResources {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenerateRequest) DeepCopyInto(out *GenerateRequest) {
*out = *in
out.TypeMeta = in.TypeMeta
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
in.Spec.DeepCopyInto(&out.Spec)
out.Status = in.Status
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequest.
func (in *GenerateRequest) DeepCopy() *GenerateRequest {
if in == nil {
return nil
}
out := new(GenerateRequest)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GenerateRequest) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenerateRequestContext) DeepCopyInto(out *GenerateRequestContext) {
*out = *in
in.UserRequestInfo.DeepCopyInto(&out.UserRequestInfo)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestContext.
func (in *GenerateRequestContext) DeepCopy() *GenerateRequestContext {
if in == nil {
return nil
}
out := new(GenerateRequestContext)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenerateRequestList) DeepCopyInto(out *GenerateRequestList) {
*out = *in
out.TypeMeta = in.TypeMeta
out.ListMeta = in.ListMeta
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]GenerateRequest, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestList.
func (in *GenerateRequestList) DeepCopy() *GenerateRequestList {
if in == nil {
return nil
}
out := new(GenerateRequestList)
in.DeepCopyInto(out)
return out
}
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
func (in *GenerateRequestList) DeepCopyObject() runtime.Object {
if c := in.DeepCopy(); c != nil {
return c
}
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenerateRequestSpec) DeepCopyInto(out *GenerateRequestSpec) {
*out = *in
out.Resource = in.Resource
in.Context.DeepCopyInto(&out.Context)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestSpec.
func (in *GenerateRequestSpec) DeepCopy() *GenerateRequestSpec {
if in == nil {
return nil
}
out := new(GenerateRequestSpec)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *GenerateRequestStatus) DeepCopyInto(out *GenerateRequestStatus) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestStatus.
func (in *GenerateRequestStatus) DeepCopy() *GenerateRequestStatus {
if in == nil {
return nil
}
out := new(GenerateRequestStatus)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Generation.
func (in *Generation) DeepCopy() *Generation {
if in == nil {
@ -225,21 +323,7 @@ func (in *ManagedResourceSpec) DeepCopy() *ManagedResourceSpec {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MatchResources) DeepCopyInto(out *MatchResources) {
*out = *in
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ClusterRoles != nil {
in, out := &in.ClusterRoles, &out.ClusterRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbacv1.Subject, len(*in))
copy(*out, *in)
}
in.UserInfo.DeepCopyInto(&out.UserInfo)
in.ResourceDescription.DeepCopyInto(&out.ResourceDescription)
return
}
@ -425,6 +509,33 @@ func (in *PolicyViolationTemplate) DeepCopy() *PolicyViolationTemplate {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *RequestInfo) DeepCopyInto(out *RequestInfo) {
*out = *in
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ClusterRoles != nil {
in, out := &in.ClusterRoles, &out.ClusterRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
in.AdmissionUserInfo.DeepCopyInto(&out.AdmissionUserInfo)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestInfo.
func (in *RequestInfo) DeepCopy() *RequestInfo {
if in == nil {
return nil
}
out := new(RequestInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
*out = *in
@ -532,6 +643,37 @@ func (in *Spec) DeepCopy() *Spec {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *UserInfo) DeepCopyInto(out *UserInfo) {
*out = *in
if in.Roles != nil {
in, out := &in.Roles, &out.Roles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.ClusterRoles != nil {
in, out := &in.ClusterRoles, &out.ClusterRoles
*out = make([]string, len(*in))
copy(*out, *in)
}
if in.Subjects != nil {
in, out := &in.Subjects, &out.Subjects
*out = make([]rbacv1.Subject, len(*in))
copy(*out, *in)
}
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
func (in *UserInfo) DeepCopy() *UserInfo {
if in == nil {
return nil
}
out := new(UserInfo)
in.DeepCopyInto(out)
return out
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Validation.
func (in *Validation) DeepCopy() *Validation {
if in == nil {

View file

@ -48,7 +48,7 @@ func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolic
glog.Error()
}
for _, policy := range policies {
if policy.HasMutateOrValidate() {
if policy.HasMutateOrValidateOrGenerate() {
// as there exists one policy with mutate or validate rule
// so there must be a webhook configuration on resource
return true

View file

@ -0,0 +1,140 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
labels "k8s.io/apimachinery/pkg/labels"
schema "k8s.io/apimachinery/pkg/runtime/schema"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
testing "k8s.io/client-go/testing"
)
// FakeGenerateRequests implements GenerateRequestInterface
type FakeGenerateRequests struct {
Fake *FakeKyvernoV1
ns string
}
var generaterequestsResource = schema.GroupVersionResource{Group: "kyverno.io", Version: "v1", Resource: "generaterequests"}
var generaterequestsKind = schema.GroupVersionKind{Group: "kyverno.io", Version: "v1", Kind: "GenerateRequest"}
// Get takes name of the generateRequest, and returns the corresponding generateRequest object, and an error if there is any.
func (c *FakeGenerateRequests) Get(name string, options v1.GetOptions) (result *kyvernov1.GenerateRequest, err error) {
obj, err := c.Fake.
Invokes(testing.NewGetAction(generaterequestsResource, c.ns, name), &kyvernov1.GenerateRequest{})
if obj == nil {
return nil, err
}
return obj.(*kyvernov1.GenerateRequest), err
}
// List takes label and field selectors, and returns the list of GenerateRequests that match those selectors.
func (c *FakeGenerateRequests) List(opts v1.ListOptions) (result *kyvernov1.GenerateRequestList, err error) {
obj, err := c.Fake.
Invokes(testing.NewListAction(generaterequestsResource, generaterequestsKind, c.ns, opts), &kyvernov1.GenerateRequestList{})
if obj == nil {
return nil, err
}
label, _, _ := testing.ExtractFromListOptions(opts)
if label == nil {
label = labels.Everything()
}
list := &kyvernov1.GenerateRequestList{ListMeta: obj.(*kyvernov1.GenerateRequestList).ListMeta}
for _, item := range obj.(*kyvernov1.GenerateRequestList).Items {
if label.Matches(labels.Set(item.Labels)) {
list.Items = append(list.Items, item)
}
}
return list, err
}
// Watch returns a watch.Interface that watches the requested generateRequests.
func (c *FakeGenerateRequests) Watch(opts v1.ListOptions) (watch.Interface, error) {
return c.Fake.
InvokesWatch(testing.NewWatchAction(generaterequestsResource, c.ns, opts))
}
// Create takes the representation of a generateRequest and creates it. Returns the server's representation of the generateRequest, and an error, if there is any.
func (c *FakeGenerateRequests) Create(generateRequest *kyvernov1.GenerateRequest) (result *kyvernov1.GenerateRequest, err error) {
obj, err := c.Fake.
Invokes(testing.NewCreateAction(generaterequestsResource, c.ns, generateRequest), &kyvernov1.GenerateRequest{})
if obj == nil {
return nil, err
}
return obj.(*kyvernov1.GenerateRequest), err
}
// Update takes the representation of a generateRequest and updates it. Returns the server's representation of the generateRequest, and an error, if there is any.
func (c *FakeGenerateRequests) Update(generateRequest *kyvernov1.GenerateRequest) (result *kyvernov1.GenerateRequest, err error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateAction(generaterequestsResource, c.ns, generateRequest), &kyvernov1.GenerateRequest{})
if obj == nil {
return nil, err
}
return obj.(*kyvernov1.GenerateRequest), err
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *FakeGenerateRequests) UpdateStatus(generateRequest *kyvernov1.GenerateRequest) (*kyvernov1.GenerateRequest, error) {
obj, err := c.Fake.
Invokes(testing.NewUpdateSubresourceAction(generaterequestsResource, "status", c.ns, generateRequest), &kyvernov1.GenerateRequest{})
if obj == nil {
return nil, err
}
return obj.(*kyvernov1.GenerateRequest), err
}
// Delete takes name of the generateRequest and deletes it. Returns an error if one occurs.
func (c *FakeGenerateRequests) Delete(name string, options *v1.DeleteOptions) error {
_, err := c.Fake.
Invokes(testing.NewDeleteAction(generaterequestsResource, c.ns, name), &kyvernov1.GenerateRequest{})
return err
}
// DeleteCollection deletes a collection of objects.
func (c *FakeGenerateRequests) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
action := testing.NewDeleteCollectionAction(generaterequestsResource, c.ns, listOptions)
_, err := c.Fake.Invokes(action, &kyvernov1.GenerateRequestList{})
return err
}
// Patch applies the patch and returns the patched generateRequest.
func (c *FakeGenerateRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *kyvernov1.GenerateRequest, err error) {
obj, err := c.Fake.
Invokes(testing.NewPatchSubresourceAction(generaterequestsResource, c.ns, name, pt, data, subresources...), &kyvernov1.GenerateRequest{})
if obj == nil {
return nil, err
}
return obj.(*kyvernov1.GenerateRequest), err
}

View file

@ -36,6 +36,10 @@ func (c *FakeKyvernoV1) ClusterPolicyViolations() v1.ClusterPolicyViolationInter
return &FakeClusterPolicyViolations{c}
}
func (c *FakeKyvernoV1) GenerateRequests(namespace string) v1.GenerateRequestInterface {
return &FakeGenerateRequests{c, namespace}
}
func (c *FakeKyvernoV1) PolicyViolations(namespace string) v1.PolicyViolationInterface {
return &FakePolicyViolations{c, namespace}
}

View file

@ -22,4 +22,6 @@ type ClusterPolicyExpansion interface{}
type ClusterPolicyViolationExpansion interface{}
type GenerateRequestExpansion interface{}
type PolicyViolationExpansion interface{}

View file

@ -0,0 +1,191 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package v1
import (
"time"
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
scheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
types "k8s.io/apimachinery/pkg/types"
watch "k8s.io/apimachinery/pkg/watch"
rest "k8s.io/client-go/rest"
)
// GenerateRequestsGetter has a method to return a GenerateRequestInterface.
// A group's client should implement this interface.
type GenerateRequestsGetter interface {
GenerateRequests(namespace string) GenerateRequestInterface
}
// GenerateRequestInterface has methods to work with GenerateRequest resources.
type GenerateRequestInterface interface {
Create(*v1.GenerateRequest) (*v1.GenerateRequest, error)
Update(*v1.GenerateRequest) (*v1.GenerateRequest, error)
UpdateStatus(*v1.GenerateRequest) (*v1.GenerateRequest, error)
Delete(name string, options *metav1.DeleteOptions) error
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
Get(name string, options metav1.GetOptions) (*v1.GenerateRequest, error)
List(opts metav1.ListOptions) (*v1.GenerateRequestList, error)
Watch(opts metav1.ListOptions) (watch.Interface, error)
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.GenerateRequest, err error)
GenerateRequestExpansion
}
// generateRequests implements GenerateRequestInterface
type generateRequests struct {
client rest.Interface
ns string
}
// newGenerateRequests returns a GenerateRequests
func newGenerateRequests(c *KyvernoV1Client, namespace string) *generateRequests {
return &generateRequests{
client: c.RESTClient(),
ns: namespace,
}
}
// Get takes name of the generateRequest, and returns the corresponding generateRequest object, and an error if there is any.
func (c *generateRequests) Get(name string, options metav1.GetOptions) (result *v1.GenerateRequest, err error) {
result = &v1.GenerateRequest{}
err = c.client.Get().
Namespace(c.ns).
Resource("generaterequests").
Name(name).
VersionedParams(&options, scheme.ParameterCodec).
Do().
Into(result)
return
}
// List takes label and field selectors, and returns the list of GenerateRequests that match those selectors.
func (c *generateRequests) List(opts metav1.ListOptions) (result *v1.GenerateRequestList, err error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
result = &v1.GenerateRequestList{}
err = c.client.Get().
Namespace(c.ns).
Resource("generaterequests").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Do().
Into(result)
return
}
// Watch returns a watch.Interface that watches the requested generateRequests.
func (c *generateRequests) Watch(opts metav1.ListOptions) (watch.Interface, error) {
var timeout time.Duration
if opts.TimeoutSeconds != nil {
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
}
opts.Watch = true
return c.client.Get().
Namespace(c.ns).
Resource("generaterequests").
VersionedParams(&opts, scheme.ParameterCodec).
Timeout(timeout).
Watch()
}
// Create takes the representation of a generateRequest and creates it. Returns the server's representation of the generateRequest, and an error, if there is any.
func (c *generateRequests) Create(generateRequest *v1.GenerateRequest) (result *v1.GenerateRequest, err error) {
result = &v1.GenerateRequest{}
err = c.client.Post().
Namespace(c.ns).
Resource("generaterequests").
Body(generateRequest).
Do().
Into(result)
return
}
// Update takes the representation of a generateRequest and updates it. Returns the server's representation of the generateRequest, and an error, if there is any.
func (c *generateRequests) Update(generateRequest *v1.GenerateRequest) (result *v1.GenerateRequest, err error) {
result = &v1.GenerateRequest{}
err = c.client.Put().
Namespace(c.ns).
Resource("generaterequests").
Name(generateRequest.Name).
Body(generateRequest).
Do().
Into(result)
return
}
// UpdateStatus was generated because the type contains a Status member.
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
func (c *generateRequests) UpdateStatus(generateRequest *v1.GenerateRequest) (result *v1.GenerateRequest, err error) {
result = &v1.GenerateRequest{}
err = c.client.Put().
Namespace(c.ns).
Resource("generaterequests").
Name(generateRequest.Name).
SubResource("status").
Body(generateRequest).
Do().
Into(result)
return
}
// Delete takes name of the generateRequest and deletes it. Returns an error if one occurs.
func (c *generateRequests) Delete(name string, options *metav1.DeleteOptions) error {
return c.client.Delete().
Namespace(c.ns).
Resource("generaterequests").
Name(name).
Body(options).
Do().
Error()
}
// DeleteCollection deletes a collection of objects.
func (c *generateRequests) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
var timeout time.Duration
if listOptions.TimeoutSeconds != nil {
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
}
return c.client.Delete().
Namespace(c.ns).
Resource("generaterequests").
VersionedParams(&listOptions, scheme.ParameterCodec).
Timeout(timeout).
Body(options).
Do().
Error()
}
// Patch applies the patch and returns the patched generateRequest.
func (c *generateRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.GenerateRequest, err error) {
result = &v1.GenerateRequest{}
err = c.client.Patch(pt).
Namespace(c.ns).
Resource("generaterequests").
SubResource(subresources...).
Name(name).
Body(data).
Do().
Into(result)
return
}

View file

@ -29,6 +29,7 @@ type KyvernoV1Interface interface {
RESTClient() rest.Interface
ClusterPoliciesGetter
ClusterPolicyViolationsGetter
GenerateRequestsGetter
PolicyViolationsGetter
}
@ -45,6 +46,10 @@ func (c *KyvernoV1Client) ClusterPolicyViolations() ClusterPolicyViolationInterf
return newClusterPolicyViolations(c)
}
func (c *KyvernoV1Client) GenerateRequests(namespace string) GenerateRequestInterface {
return newGenerateRequests(c, namespace)
}
func (c *KyvernoV1Client) PolicyViolations(namespace string) PolicyViolationInterface {
return newPolicyViolations(c, namespace)
}

View file

@ -57,6 +57,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().ClusterPolicies().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("clusterpolicyviolations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().ClusterPolicyViolations().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("generaterequests"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().GenerateRequests().Informer()}, nil
case v1.SchemeGroupVersion.WithResource("policyviolations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().PolicyViolations().Informer()}, nil

View file

@ -0,0 +1,89 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package v1
import (
time "time"
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
versioned "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
v1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
watch "k8s.io/apimachinery/pkg/watch"
cache "k8s.io/client-go/tools/cache"
)
// GenerateRequestInformer provides access to a shared informer and lister for
// GenerateRequests.
type GenerateRequestInformer interface {
Informer() cache.SharedIndexInformer
Lister() v1.GenerateRequestLister
}
type generateRequestInformer struct {
factory internalinterfaces.SharedInformerFactory
tweakListOptions internalinterfaces.TweakListOptionsFunc
namespace string
}
// NewGenerateRequestInformer constructs a new informer for GenerateRequest type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewGenerateRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
return NewFilteredGenerateRequestInformer(client, namespace, resyncPeriod, indexers, nil)
}
// NewFilteredGenerateRequestInformer constructs a new informer for GenerateRequest type.
// Always prefer using an informer factory to get a shared informer instead of getting an independent
// one. This reduces memory footprint and number of connections to the server.
func NewFilteredGenerateRequestInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
return cache.NewSharedIndexInformer(
&cache.ListWatch{
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KyvernoV1().GenerateRequests(namespace).List(options)
},
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
if tweakListOptions != nil {
tweakListOptions(&options)
}
return client.KyvernoV1().GenerateRequests(namespace).Watch(options)
},
},
&kyvernov1.GenerateRequest{},
resyncPeriod,
indexers,
)
}
func (f *generateRequestInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
return NewFilteredGenerateRequestInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
}
func (f *generateRequestInformer) Informer() cache.SharedIndexInformer {
return f.factory.InformerFor(&kyvernov1.GenerateRequest{}, f.defaultInformer)
}
func (f *generateRequestInformer) Lister() v1.GenerateRequestLister {
return v1.NewGenerateRequestLister(f.Informer().GetIndexer())
}

View file

@ -28,6 +28,8 @@ type Interface interface {
ClusterPolicies() ClusterPolicyInformer
// ClusterPolicyViolations returns a ClusterPolicyViolationInformer.
ClusterPolicyViolations() ClusterPolicyViolationInformer
// GenerateRequests returns a GenerateRequestInformer.
GenerateRequests() GenerateRequestInformer
// PolicyViolations returns a PolicyViolationInformer.
PolicyViolations() PolicyViolationInformer
}
@ -53,6 +55,11 @@ func (v *version) ClusterPolicyViolations() ClusterPolicyViolationInformer {
return &clusterPolicyViolationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
}
// GenerateRequests returns a GenerateRequestInformer.
func (v *version) GenerateRequests() GenerateRequestInformer {
return &generateRequestInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
}
// PolicyViolations returns a PolicyViolationInformer.
func (v *version) PolicyViolations() PolicyViolationInformer {
return &policyViolationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}

View file

@ -5,7 +5,7 @@ Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@ -144,3 +144,28 @@ func (pl *clusterPolicyLister) GetPolicyForNamespacedPolicyViolation(pv *kyverno
return policies, nil
}
// GenerateRequestListerExpansion allows custom methods to be added to
// GenerateRequestLister.
type GenerateRequestListerExpansion interface {
}
// GenerateRequestNamespaceListerExpansion allows custom methods to be added to
// GenerateRequestNamespaceLister.
type GenerateRequestNamespaceListerExpansion interface {
GetGenerateRequestsForClusterPolicy(policy string) ([]*kyvernov1.GenerateRequest, error)
}
func (s generateRequestNamespaceLister) GetGenerateRequestsForClusterPolicy(policy string) ([]*kyvernov1.GenerateRequest, error) {
var list []*kyvernov1.GenerateRequest
grs, err := s.List(labels.NewSelector())
if err != nil {
return nil, err
}
for idx, gr := range grs {
if gr.Spec.Policy == policy {
list = append(list, grs[idx])
}
}
return list, err
}

View file

@ -0,0 +1,94 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by lister-gen. DO NOT EDIT.
package v1
import (
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
// GenerateRequestLister helps list GenerateRequests.
type GenerateRequestLister interface {
// List lists all GenerateRequests in the indexer.
List(selector labels.Selector) (ret []*v1.GenerateRequest, err error)
// GenerateRequests returns an object that can list and get GenerateRequests.
GenerateRequests(namespace string) GenerateRequestNamespaceLister
GenerateRequestListerExpansion
}
// generateRequestLister implements the GenerateRequestLister interface.
type generateRequestLister struct {
indexer cache.Indexer
}
// NewGenerateRequestLister returns a new GenerateRequestLister.
func NewGenerateRequestLister(indexer cache.Indexer) GenerateRequestLister {
return &generateRequestLister{indexer: indexer}
}
// List lists all GenerateRequests in the indexer.
func (s *generateRequestLister) List(selector labels.Selector) (ret []*v1.GenerateRequest, err error) {
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
ret = append(ret, m.(*v1.GenerateRequest))
})
return ret, err
}
// GenerateRequests returns an object that can list and get GenerateRequests.
func (s *generateRequestLister) GenerateRequests(namespace string) GenerateRequestNamespaceLister {
return generateRequestNamespaceLister{indexer: s.indexer, namespace: namespace}
}
// GenerateRequestNamespaceLister helps list and get GenerateRequests.
type GenerateRequestNamespaceLister interface {
// List lists all GenerateRequests in the indexer for a given namespace.
List(selector labels.Selector) (ret []*v1.GenerateRequest, err error)
// Get retrieves the GenerateRequest from the indexer for a given namespace and name.
Get(name string) (*v1.GenerateRequest, error)
GenerateRequestNamespaceListerExpansion
}
// generateRequestNamespaceLister implements the GenerateRequestNamespaceLister
// interface.
type generateRequestNamespaceLister struct {
indexer cache.Indexer
namespace string
}
// List lists all GenerateRequests in the indexer for a given namespace.
func (s generateRequestNamespaceLister) List(selector labels.Selector) (ret []*v1.GenerateRequest, err error) {
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
ret = append(ret, m.(*v1.GenerateRequest))
})
return ret, err
}
// Get retrieves the GenerateRequest from the indexer for a given namespace and name.
func (s generateRequestNamespaceLister) Get(name string) (*v1.GenerateRequest, error) {
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
if err != nil {
return nil, err
}
if !exists {
return nil, errors.NewNotFound(v1.Resource("generaterequest"), name)
}
return obj.(*v1.GenerateRequest), nil
}

View file

@ -6,7 +6,6 @@ import (
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/config"
apps "k8s.io/api/apps/v1"
certificates "k8s.io/api/certificates/v1beta1"
@ -188,46 +187,6 @@ func convertToUnstructured(obj interface{}) *unstructured.Unstructured {
return &unstructured.Unstructured{Object: unstructuredObj}
}
// GenerateResource creates resource of the specified kind(supports 'clone' & 'data')
func (c *Client) GenerateResource(generator kyverno.Generation, namespace string, processExistingResources bool) error {
var err error
resource := &unstructured.Unstructured{}
var rdata map[string]interface{}
// data -> create new resource
if generator.Data != nil {
rdata, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&generator.Data)
if err != nil {
glog.Error(err)
return err
}
}
// clone -> copy from existing resource
if generator.Clone != (kyverno.CloneFrom{}) {
resource, err = c.GetResource(generator.Kind, generator.Clone.Namespace, generator.Clone.Name)
if err != nil {
return err
}
rdata = resource.UnstructuredContent()
}
resource.SetUnstructuredContent(rdata)
resource.SetName(generator.Name)
resource.SetNamespace(namespace)
resource.SetResourceVersion("")
err = c.waitUntilNamespaceIsCreated(namespace)
if err != nil {
glog.Errorf("Can't create a resource %s: %v", generator.Name, err)
return nil
}
_, err = c.CreateResource(generator.Kind, namespace, resource, false)
if err != nil {
return err
}
return nil
}
//To-Do remove this to use unstructured type
func convertToSecret(obj *unstructured.Unstructured) (v1.Secret, error) {
secret := v1.Secret{}

View file

@ -3,8 +3,6 @@ package client
import (
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
@ -118,41 +116,6 @@ func TestCSRInterface(t *testing.T) {
}
}
func TestGenerateResource(t *testing.T) {
f := newFixture(t)
//GenerateResource -> copy From
// 1 create namespace
// 2 generate resource
// create namespace
ns, err := f.client.CreateResource("Namespace", "", newUnstructured("v1", "Namespace", "", "ns1"), false)
if err != nil {
t.Errorf("CreateResource not working: %s", err)
}
gen := kyverno.Generation{Kind: "TheKind",
Name: "gen-kind",
Clone: kyverno.CloneFrom{Namespace: "ns-foo", Name: "name-foo"}}
err = f.client.GenerateResource(gen, ns.GetName(), false)
if err != nil {
t.Errorf("GenerateResource not working: %s", err)
}
_, err = f.client.GetResource("TheKind", "ns1", "gen-kind")
if err != nil {
t.Errorf("GetResource not working: %s", err)
}
// GenerateResource -> data
gen = kyverno.Generation{Kind: "TheKind",
Name: "name2-baz-new",
Data: newUnstructured("group2/version", "TheKind", "ns1", "name2-baz-new")}
err = f.client.GenerateResource(gen, ns.GetName(), false)
if err != nil {
t.Errorf("GenerateResource not working: %s", err)
}
_, err = f.client.GetResource("TheKind", "ns1", "name2-baz-new")
if err != nil {
t.Errorf("GetResource not working: %s", err)
}
}
func TestKubePolicyDeployment(t *testing.T) {
f := newFixture(t)
_, err := f.client.GetKubePolicyDeployment()

View file

@ -6,7 +6,7 @@ import (
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
authenticationv1 "k8s.io/api/authentication/v1"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
)
//Interface ... normal functions
@ -15,8 +15,8 @@ type Interface interface {
AddJSON(dataRaw []byte) error
// merges resource json under request.object
AddResource(dataRaw []byte) error
// merges userInfo json under request.userInfo
AddUserInfo(userInfo authenticationv1.UserInfo) error
// merges userInfo json under kyverno.userInfo
AddUserInfo(userInfo kyverno.UserInfo) error
EvalInterface
}
@ -27,7 +27,8 @@ type EvalInterface interface {
//Context stores the data resources as JSON
type Context struct {
mu sync.RWMutex
mu sync.RWMutex
// data map[string]interface{}
jsonRaw []byte
}
@ -54,7 +55,7 @@ func (ctx *Context) AddJSON(dataRaw []byte) error {
return nil
}
//AddResource adds data at path: request.object
//Add data at path: request.object
func (ctx *Context) AddResource(dataRaw []byte) error {
// unmarshall the resource struct
@ -82,16 +83,11 @@ func (ctx *Context) AddResource(dataRaw []byte) error {
return ctx.AddJSON(objRaw)
}
//AddUserInfo adds data at path: request.userInfo
func (ctx *Context) AddUserInfo(userInfo authenticationv1.UserInfo) error {
func (ctx *Context) AddUserInfo(userRequestInfo kyverno.RequestInfo) error {
modifiedResource := struct {
Request interface{} `json:"request"`
}{
Request: struct {
UserInfo interface{} `json:"userInfo"`
}{
UserInfo: userInfo,
},
Request: userRequestInfo,
}
objRaw, err := json.Marshal(modifiedResource)

View file

@ -4,6 +4,7 @@ import (
"reflect"
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
authenticationv1 "k8s.io/api/authentication/v1"
)
@ -46,6 +47,10 @@ func Test_addResourceAndUserContext(t *testing.T) {
Username: "admin",
UID: "014fbff9a07c",
}
userRequestInfo := kyverno.RequestInfo{
Roles: nil,
ClusterRoles: nil,
AdmissionUserInfo: userInfo}
var expectedResult string
ctx := NewContext()
@ -60,7 +65,7 @@ func Test_addResourceAndUserContext(t *testing.T) {
t.Error("exected result does not match")
}
ctx.AddUserInfo(userInfo)
ctx.AddUserInfo(userRequestInfo)
result, err = ctx.Query("request.object.apiVersion")
if err != nil {
t.Error(err)

View file

@ -1,130 +0,0 @@
package generate
import (
"time"
"fmt"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/validate"
"github.com/nirmata/kyverno/pkg/engine/variables"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
//ApplyRuleGenerator apply generate rules
func ApplyRuleGenerator(ctx context.EvalInterface, client *client.Client, ns unstructured.Unstructured, rule kyverno.Rule, policyCreationTime metav1.Time) (resp response.RuleResponse) {
startTime := time.Now()
glog.V(4).Infof("started applying generation rule %q (%v)", rule.Name, startTime)
resp.Name = rule.Name
resp.Type = "Generation"
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
glog.V(4).Infof("finished applying generation rule %q (%v)", resp.Name, resp.RuleStats.ProcessingTime)
}()
var err error
resource := &unstructured.Unstructured{}
var rdata map[string]interface{}
// To manage existing resource , we compare the creation time for the default resource to be generate and policy creation time
processExisting := func() bool {
nsCreationTime := ns.GetCreationTimestamp()
return nsCreationTime.Before(&policyCreationTime)
}()
if rule.Generation.Data != nil {
// perform variable substituion in generate resource pattern
newData := variables.SubstituteVariables(ctx, rule.Generation.Data)
glog.V(4).Info("generate rule: creates new resource")
// 1> Check if resource exists
obj, err := client.GetResource(rule.Generation.Kind, ns.GetName(), rule.Generation.Name)
if err == nil {
glog.V(4).Infof("generate rule: resource %s/%s/%s already present. checking if it contains the required configuration", rule.Generation.Kind, ns.GetName(), rule.Generation.Name)
// 2> If already exsists, then verify the content is contained
// found the resource
// check if the rule is create, if yes, then verify if the specified configuration is present in the resource
ok, err := checkResource(ctx, newData, obj)
if err != nil {
glog.V(4).Infof("generate rule: unable to check if configuration %v, is present in resource '%s/%s' in namespace '%s'", rule.Generation.Data, rule.Generation.Kind, rule.Generation.Name, ns.GetName())
resp.Success = false
resp.Message = fmt.Sprintf("unable to check if configuration %v, is present in resource '%s/%s' in namespace '%s'", rule.Generation.Data, rule.Generation.Kind, rule.Generation.Name, ns.GetName())
return resp
}
if !ok {
glog.V(4).Infof("generate rule: configuration %v not present in resource '%s/%s' in namespace '%s'", rule.Generation.Data, rule.Generation.Kind, rule.Generation.Name, ns.GetName())
resp.Success = false
resp.Message = fmt.Sprintf("configuration %v not present in resource '%s/%s' in namespace '%s'", rule.Generation.Data, rule.Generation.Kind, rule.Generation.Name, ns.GetName())
return resp
}
resp.Success = true
resp.Message = fmt.Sprintf("required configuration %v is present in resource '%s/%s' in namespace '%s'", rule.Generation.Data, rule.Generation.Kind, rule.Generation.Name, ns.GetName())
return resp
}
rdata, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&newData)
if err != nil {
glog.Error(err)
resp.Success = false
resp.Message = fmt.Sprintf("failed to parse the specified resource spec %v: %v", newData, err)
return resp
}
}
if rule.Generation.Clone != (kyverno.CloneFrom{}) {
glog.V(4).Info("generate rule: clone resource")
// 1> Check if resource exists
_, err := client.GetResource(rule.Generation.Kind, ns.GetName(), rule.Generation.Name)
if err == nil {
glog.V(4).Infof("generate rule: resource '%s/%s' already present in namespace '%s'", rule.Generation.Kind, rule.Generation.Name, ns.GetName())
resp.Success = true
resp.Message = fmt.Sprintf("resource '%s/%s' already present in namespace '%s'", rule.Generation.Kind, rule.Generation.Name, ns.GetName())
return resp
}
// 2> If clone already exists return
resource, err = client.GetResource(rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name)
if err != nil {
glog.V(4).Infof("generate rule: clone reference resource '%s/%s' not present in namespace '%s': %v", rule.Generation.Kind, rule.Generation.Clone.Name, rule.Generation.Clone.Namespace, err)
resp.Success = false
resp.Message = fmt.Sprintf("clone reference resource '%s/%s' not present in namespace '%s': %v", rule.Generation.Kind, rule.Generation.Clone.Name, rule.Generation.Clone.Namespace, err)
return resp
}
glog.V(4).Infof("generate rule: clone reference resource '%s/%s' present in namespace '%s'", rule.Generation.Kind, rule.Generation.Clone.Name, rule.Generation.Clone.Namespace)
rdata = resource.UnstructuredContent()
}
if processExisting {
glog.V(4).Infof("resource '%s/%s' not found in existing namespace '%s'", rule.Generation.Kind, rule.Generation.Name, ns.GetName())
resp.Success = false
resp.Message = fmt.Sprintf("resource '%s/%s' not found in existing namespace '%s'", rule.Generation.Kind, rule.Generation.Name, ns.GetName())
// for existing resources we generate an error which indirectly generates a policy violation
return resp
}
resource.SetUnstructuredContent(rdata)
resource.SetName(rule.Generation.Name)
resource.SetNamespace(ns.GetName())
// Reset resource version
resource.SetResourceVersion("")
_, err = client.CreateResource(rule.Generation.Kind, ns.GetName(), resource, false)
if err != nil {
glog.V(4).Infof("generate rule: unable to create resource %s/%s/%s: %v", rule.Generation.Kind, resource.GetNamespace(), resource.GetName(), err)
resp.Success = false
resp.Message = fmt.Sprintf("unable to create resource %s/%s/%s: %v", rule.Generation.Kind, resource.GetNamespace(), resource.GetName(), err)
return resp
}
glog.V(4).Infof("generate rule: created resource %s/%s/%s", rule.Generation.Kind, resource.GetNamespace(), resource.GetName())
resp.Success = true
resp.Message = fmt.Sprintf("created resource %s/%s/%s", rule.Generation.Kind, resource.GetNamespace(), resource.GetName())
return resp
}
//checkResource checks if the config is present in th eresource
func checkResource(ctx context.EvalInterface, config interface{}, resource *unstructured.Unstructured) (bool, error) {
// we are checking if config is a subset of resource with default pattern
path, err := validate.ValidateResourceWithPattern(ctx, resource.Object, config)
if err != nil {
glog.V(4).Infof("config not a subset of resource. failed at path %s: %v", path, err)
return false, err
}
return true, nil
}

View file

@ -1,50 +1,53 @@
package engine
import (
"time"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine/generate"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
//Generate apply generation rules on a resource
func Generate(policyContext PolicyContext) (resp response.EngineResponse) {
//GenerateNew returns the list of rules that are applicable on this policy and resource
func GenerateNew(policyContext PolicyContext) (resp response.EngineResponse) {
policy := policyContext.Policy
ns := policyContext.NewResource
client := policyContext.Client
ctx := policyContext.Context
resource := policyContext.NewResource
admissionInfo := policyContext.AdmissionInfo
return filterRules(policy, resource, admissionInfo)
}
startTime := time.Now()
// policy information
func() {
// set policy information
resp.PolicyResponse.Policy = policy.Name
// resource details
resp.PolicyResponse.Resource.Name = ns.GetName()
resp.PolicyResponse.Resource.Kind = ns.GetKind()
resp.PolicyResponse.Resource.APIVersion = ns.GetAPIVersion()
}()
glog.V(4).Infof("started applying generation rules of policy %q (%v)", policy.Name, startTime)
defer func() {
resp.PolicyResponse.ProcessingTime = time.Since(startTime)
glog.V(4).Infof("finished applying generation rules policy %v (%v)", policy.Name, resp.PolicyResponse.ProcessingTime)
glog.V(4).Infof("Generation Rules appplied succesfully count %v for policy %q", resp.PolicyResponse.RulesAppliedCount, policy.Name)
}()
incrementAppliedRuleCount := func() {
// rules applied succesfully count
resp.PolicyResponse.RulesAppliedCount++
func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.RuleResponse {
if !rule.HasGenerate() {
return nil
}
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
return nil
}
if !MatchesResourceDescription(resource, rule) {
return nil
}
// build rule Response
return &response.RuleResponse{
Name: rule.Name,
Type: "Generation",
}
}
func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) response.EngineResponse {
resp := response.EngineResponse{
PolicyResponse: response.PolicyResponse{
Policy: policy.Name,
Resource: response.ResourceSpec{
Kind: resource.GetKind(),
Name: resource.GetName(),
Namespace: resource.GetNamespace(),
},
},
}
for _, rule := range policy.Spec.Rules {
if !rule.HasGenerate() {
continue
if ruleResp := filterRule(rule, resource, admissionInfo); ruleResp != nil {
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *ruleResp)
}
glog.V(4).Infof("applying policy %s generate rule %s on resource %s/%s/%s", policy.Name, rule.Name, ns.GetKind(), ns.GetNamespace(), ns.GetName())
ruleResponse := generate.ApplyRuleGenerator(ctx, client, ns, rule, policy.GetCreationTimestamp())
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse)
incrementAppliedRuleCount()
}
// set resource in reponse
resp.PatchedResource = ns
return resp
}

View file

@ -8,6 +8,7 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/rbac"
)
const (
@ -53,7 +54,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
}
startTime := time.Now()
if !matchAdmissionInfo(rule, policyContext.AdmissionInfo) {
if !rbac.MatchAdmissionInfo(rule, policyContext.AdmissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), policyContext.AdmissionInfo)
continue

View file

@ -16,7 +16,7 @@ type PolicyContext struct {
NewResource unstructured.Unstructured
// old Resource - Update operations
OldResource unstructured.Unstructured
AdmissionInfo RequestInfo
AdmissionInfo kyverno.RequestInfo
// Dynamic client - used by generate
Client *client.Client
// Contexts to store resources

View file

@ -1,4 +1,4 @@
package engine
package rbac
import (
"reflect"
@ -14,10 +14,10 @@ const (
)
// matchAdmissionInfo return true if the rule can be applied to the request
func matchAdmissionInfo(rule kyverno.Rule, requestInfo RequestInfo) bool {
func MatchAdmissionInfo(rule kyverno.Rule, requestInfo kyverno.RequestInfo) bool {
// when processing existing resource, it does not contain requestInfo
// skip permission checking
if reflect.DeepEqual(requestInfo, RequestInfo{}) {
if reflect.DeepEqual(requestInfo, kyverno.RequestInfo{}) {
return true
}
@ -34,7 +34,7 @@ func matchAdmissionInfo(rule kyverno.Rule, requestInfo RequestInfo) bool {
// subjects: subject1, subject2
// validateMatch return true if (role1 || role2) and (clusterRole1 || clusterRole2)
// and (subject1 || subject2) are found in requestInfo, OR operation for each list
func validateMatch(match kyverno.MatchResources, requestInfo RequestInfo) bool {
func validateMatch(match kyverno.MatchResources, requestInfo kyverno.RequestInfo) bool {
if len(match.Roles) > 0 {
if !matchRoleRefs(match.Roles, requestInfo.Roles) {
return false
@ -61,7 +61,7 @@ func validateMatch(match kyverno.MatchResources, requestInfo RequestInfo) bool {
// subjects: subject1, subject2
// validateExclude return true if none of the above found in requestInfo
// otherwise return false immediately means rule should not be applied
func validateExclude(exclude kyverno.ExcludeResources, requestInfo RequestInfo) bool {
func validateExclude(exclude kyverno.ExcludeResources, requestInfo kyverno.RequestInfo) bool {
if len(exclude.Roles) > 0 {
if matchRoleRefs(exclude.Roles, requestInfo.Roles) {
return false

View file

@ -1,4 +1,4 @@
package engine
package rbac
import (
"flag"
@ -16,14 +16,14 @@ func Test_matchAdmissionInfo(t *testing.T) {
flag.Set("v", "3")
tests := []struct {
rule kyverno.Rule
info RequestInfo
info kyverno.RequestInfo
expected bool
}{
{
rule: kyverno.Rule{
MatchResources: kyverno.MatchResources{},
},
info: RequestInfo{},
info: kyverno.RequestInfo{},
expected: true,
},
{
@ -34,7 +34,7 @@ func Test_matchAdmissionInfo(t *testing.T) {
},
},
},
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{"ns-a:role-a"},
},
expected: true,
@ -47,7 +47,7 @@ func Test_matchAdmissionInfo(t *testing.T) {
},
},
},
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{"ns-a:role"},
},
expected: false,
@ -60,7 +60,7 @@ func Test_matchAdmissionInfo(t *testing.T) {
},
},
},
info: RequestInfo{
info: kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
Username: "serviceaccount:mynamespace:mysa",
},
@ -75,7 +75,7 @@ func Test_matchAdmissionInfo(t *testing.T) {
},
},
},
info: RequestInfo{
info: kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
UID: "1",
},
@ -90,7 +90,7 @@ func Test_matchAdmissionInfo(t *testing.T) {
},
},
},
info: RequestInfo{
info: kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
Username: "kubernetes-admin",
Groups: []string{"system:masters", "system:authenticated"},
@ -101,29 +101,29 @@ func Test_matchAdmissionInfo(t *testing.T) {
}
for _, test := range tests {
assert.Assert(t, test.expected == matchAdmissionInfo(test.rule, test.info))
assert.Assert(t, test.expected == MatchAdmissionInfo(test.rule, test.info))
}
}
func Test_validateMatch(t *testing.T) {
requestInfo := []struct {
info RequestInfo
info kyverno.RequestInfo
expected bool
}{
{
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{},
},
expected: false,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{"ns-b:role-b"},
},
expected: true,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{"ns:role"},
},
expected: false,
@ -141,35 +141,35 @@ func Test_validateMatch(t *testing.T) {
}
requestInfo = []struct {
info RequestInfo
info kyverno.RequestInfo
expected bool
}{
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{},
},
expected: false,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"role-b"},
},
expected: false,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"clusterrole-b"},
},
expected: true,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"clusterrole-a", "clusterrole-b"},
},
expected: true,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"fake-a", "fake-b"},
},
expected: false,
@ -189,23 +189,23 @@ func Test_validateMatch(t *testing.T) {
func Test_validateExclude(t *testing.T) {
requestInfo := []struct {
info RequestInfo
info kyverno.RequestInfo
expected bool
}{
{
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{},
},
expected: true,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{"ns-b:role-b"},
},
expected: false,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
Roles: []string{"ns:role"},
},
expected: true,
@ -223,29 +223,29 @@ func Test_validateExclude(t *testing.T) {
}
requestInfo = []struct {
info RequestInfo
info kyverno.RequestInfo
expected bool
}{
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{},
},
expected: true,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"role-b"},
},
expected: true,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"clusterrole-b"},
},
expected: false,
},
{
info: RequestInfo{
info: kyverno.RequestInfo{
ClusterRoles: []string{"fake-a", "fake-b"},
},
expected: true,

View file

@ -9,6 +9,7 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/validate"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -84,14 +85,14 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) {
return response.EngineResponse{}
}
func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo RequestInfo) *response.EngineResponse {
func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse {
resp := &response.EngineResponse{}
for _, rule := range policy.Spec.Rules {
if !rule.HasValidate() {
continue
}
startTime := time.Now()
if !matchAdmissionInfo(rule, admissionInfo) {
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), admissionInfo)
continue

View file

@ -2,6 +2,7 @@ package variables
import (
"regexp"
"strings"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine/context"
@ -72,7 +73,7 @@ func substituteValue(ctx context.EvalInterface, valuePattern string) interface{}
func getValueQuery(ctx context.EvalInterface, valuePattern string) interface{} {
var emptyInterface interface{}
// extract variable {{<variable>}}
variableRegex := regexp.MustCompile("^{{(.*)}}$")
variableRegex := regexp.MustCompile("{{(.*)}}")
groups := variableRegex.FindStringSubmatch(valuePattern)
if len(groups) < 2 {
return valuePattern
@ -84,6 +85,11 @@ func getValueQuery(ctx context.EvalInterface, valuePattern string) interface{} {
glog.V(4).Infof("variable substitution failed for query %s: %v", searchPath, err)
return emptyInterface
}
// only replace the value if returned value is scalar
if val, ok := variable.(string); ok {
newVal := strings.Replace(valuePattern, groups[0], val, -1)
return newVal
}
return variable
}

View file

@ -5,9 +5,141 @@ import (
"reflect"
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
authenticationv1 "k8s.io/api/authentication/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
)
func Test_variablesub1(t *testing.T) {
patternMap := []byte(`
{
"kind": "ClusterRole",
"name": "ns-owner-{{request.userInfo.username}}",
"data": {
"rules": [
{
"apiGroups": [
""
],
"resources": [
"namespaces"
],
"verbs": [
"*"
],
"resourceNames": [
"{{request.object.metadata.name}}"
]
}
]
}
}
`)
resourceRaw := []byte(`
{
"metadata": {
"name": "temp",
"namespace": "n1"
},
"spec": {
"namespace": "n1",
"name": "temp1"
}
}
`)
// userInfo
userReqInfo := kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
Username: "user1",
},
}
resultMap := []byte(`{"data":{"rules":[{"apiGroups":[""],"resourceNames":["temp"],"resources":["namespaces"],"verbs":["*"]}]},"kind":"ClusterRole","name":"ns-owner-user1"}`)
var pattern, resource interface{}
json.Unmarshal(patternMap, &pattern)
json.Unmarshal(resourceRaw, &resource)
// context
ctx := context.NewContext()
ctx.AddResource(resourceRaw)
ctx.AddUserInfo(userReqInfo)
value := SubstituteVariables(ctx, pattern)
resultRaw, err := json.Marshal(value)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(resultMap, resultRaw) {
t.Log(string(resultMap))
t.Log(string(resultRaw))
t.Error("result does not match")
}
}
func Test_variablesubstitution(t *testing.T) {
patternMap := []byte(`
{
"name": "ns-owner-{{request.userInfo.username}}",
"data": {
"rules": [
{
"apiGroups": [
""
],
"resources": [
"namespaces"
],
"verbs": [
"*"
],
"resourceNames": [
"{{request.object.metadata.name}}"
]
}
]
}
}
`)
resourceRaw := []byte(`
{
"metadata": {
"name": "temp",
"namespace": "n1"
},
"spec": {
"namespace": "n1",
"name": "temp1"
}
}
`)
resultMap := []byte(`{"data":{"rules":[{"apiGroups":[""],"resourceNames":["temp"],"resources":["namespaces"],"verbs":["*"]}]},"name":"ns-owner-user1"}`)
// userInfo
userReqInfo := kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
Username: "user1",
},
}
var pattern, resource interface{}
json.Unmarshal(patternMap, &pattern)
json.Unmarshal(resourceRaw, &resource)
// context
ctx := context.NewContext()
ctx.AddResource(resourceRaw)
ctx.AddUserInfo(userReqInfo)
value := SubstituteVariables(ctx, pattern)
resultRaw, err := json.Marshal(value)
if err != nil {
t.Error(err)
}
if !reflect.DeepEqual(resultMap, resultRaw) {
t.Log(string(resultMap))
t.Log(string(resultRaw))
t.Error("result does not match")
}
}
func Test_variableSubstitutionValue(t *testing.T) {
resourceRaw := []byte(`

View file

@ -25,9 +25,9 @@ type Generator struct {
// list/get cluster policy
pLister kyvernolister.ClusterPolicyLister
// returns true if the cluster policy store has been synced at least once
pSynced cache.InformerSynced
// queue to store event generation requests
queue workqueue.RateLimitingInterface
pSynced cache.InformerSynced
// queue to store event generation requests
queue workqueue.RateLimitingInterface
// events generated at policy controller
policyCtrRecorder record.EventRecorder
// events generated at admission control
@ -52,7 +52,6 @@ func NewEventGenerator(client *client.Client, pInformer kyvernoinformer.ClusterP
policyCtrRecorder: initRecorder(client, PolicyController),
admissionCtrRecorder: initRecorder(client, AdmissionController),
genPolicyRecorder: initRecorder(client, GeneratePolicyController),
}
return &gen
}
@ -173,7 +172,7 @@ func (gen *Generator) syncHandler(key Info) error {
default:
robj, err = gen.client.GetResource(key.Kind, key.Namespace, key.Name)
if err != nil {
glog.V(4).Infof("Error creating event: unable to get resource %s, %s, will retry ", key.Kind, key.Namespace+"/"+key.Name)
glog.V(4).Infof("Error creating event: unable to get resource %s/%s/%s, will retry ", key.Kind, key.Namespace, key.Name)
return err
}
}

View file

@ -10,6 +10,8 @@ const (
PolicyApplied
//RequestBlocked the request to create/update the resource was blocked( generated from admission-controller)
RequestBlocked
//PolicyFailed policy failed
PolicyFailed
)
func (r Reason) String() string {
@ -17,5 +19,6 @@ func (r Reason) String() string {
"PolicyViolation",
"PolicyApplied",
"RequestBlocked",
"PolicyFailed",
}[r]
}

View file

@ -0,0 +1,52 @@
package cleanup
import (
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"k8s.io/apimachinery/pkg/api/errors"
)
const timoutMins = 2
const timeout = time.Minute * timoutMins // 2 minutes
func (c *Controller) processGR(gr kyverno.GenerateRequest) error {
glog.V(4).Info("processGR cleanup")
// 1-Corresponding policy has been deleted
_, err := c.pLister.Get(gr.Spec.Policy)
if errors.IsNotFound(err) {
glog.V(4).Infof("delete GR %s", gr.Name)
return c.control.Delete(gr.Name)
}
// 2- Check for elapsed time since update
if gr.Status.State == kyverno.Completed {
glog.V(4).Infof("checking if owner exists for gr %s", gr.Name)
if !ownerResourceExists(c.client, gr) {
glog.V(4).Infof("delete GR %s", gr.Name)
return c.control.Delete(gr.Name)
}
return nil
}
createTime := gr.GetCreationTimestamp()
glog.V(4).Infof("state %s", string(gr.Status.State))
if time.Since(createTime.UTC()) > timeout {
// the GR was in state ["",Failed] for more than timeout
glog.V(4).Infof("GR %s was not processed succesfully in %d minutes", gr.Name, timoutMins)
glog.V(4).Infof("delete GR %s", gr.Name)
return c.control.Delete(gr.Name)
}
return nil
}
func ownerResourceExists(client *dclient.Client, gr kyverno.GenerateRequest) bool {
_, err := client.GetResource(gr.Spec.Resource.Kind, gr.Spec.Resource.Namespace, gr.Spec.Resource.Name)
if err != nil {
glog.V(4).Info("cleanup Resource does not exits")
return false
}
glog.V(4).Info("cleanup Resource does exits")
return true
}

View file

@ -0,0 +1,224 @@
package cleanup
import (
"fmt"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
maxRetries = 5
)
type Controller struct {
// dyanmic client implementation
client *dclient.Client
// typed client for kyverno CRDs
kyvernoClient *kyvernoclient.Clientset
// handler for GR CR
syncHandler func(grKey string) error
// handler to enqueue GR
enqueueGR func(gr *kyverno.GenerateRequest)
// control is used to delete the GR
control ControlInterface
// gr that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get cluster policy from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// grLister can list/get generate request from the shared informer's store
grLister kyvernolister.GenerateRequestNamespaceLister
// pSynced returns true if the cluster policy has been synced at least once
pSynced cache.InformerSynced
// grSynced returns true if the generate request store has been synced at least once
grSynced cache.InformerSynced
}
func NewController(
kyvernoclient *kyvernoclient.Clientset,
client *dclient.Client,
pInformer kyvernoinformer.ClusterPolicyInformer,
grInformer kyvernoinformer.GenerateRequestInformer,
) *Controller {
c := Controller{
kyvernoClient: kyvernoclient,
client: client,
//TODO: do the math for worst case back off and make sure cleanup runs after that
// as we dont want a deleted GR to be re-queue
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request-cleanup"),
}
c.control = Control{client: kyvernoclient}
c.enqueueGR = c.enqueue
c.syncHandler = c.syncGenerateRequest
c.pLister = pInformer.Lister()
c.grLister = grInformer.Lister().GenerateRequests("kyverno")
c.pSynced = pInformer.Informer().HasSynced
c.grSynced = grInformer.Informer().HasSynced
pInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.deletePolicy, // we only cleanup if the policy is delete
}, 2*time.Minute)
grInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: c.addGR,
UpdateFunc: c.updateGR,
DeleteFunc: c.deleteGR,
}, 2*time.Minute)
return &c
}
func (c *Controller) deletePolicy(obj interface{}) {
p, ok := obj.(*kyverno.ClusterPolicy)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
_, ok = tombstone.Obj.(*kyverno.ClusterPolicy)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a Generate Request %#v", obj))
return
}
}
glog.V(4).Infof("Deleting Policy %s", p.Name)
// clean up the GR
// Get the corresponding GR
// get the list of GR for the current Policy version
grs, err := c.grLister.GetGenerateRequestsForClusterPolicy(p.Name)
if err != nil {
glog.Errorf("failed to Generate Requests for policy %s: %v", p.Name, err)
return
}
for _, gr := range grs {
c.addGR(gr)
}
}
func (c *Controller) addGR(obj interface{}) {
gr := obj.(*kyverno.GenerateRequest)
c.enqueueGR(gr)
}
func (c *Controller) updateGR(old, cur interface{}) {
gr := cur.(*kyverno.GenerateRequest)
c.enqueueGR(gr)
}
func (c *Controller) deleteGR(obj interface{}) {
gr, ok := obj.(*kyverno.GenerateRequest)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
_, ok = tombstone.Obj.(*kyverno.GenerateRequest)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a Generate Request %#v", obj))
return
}
}
glog.V(4).Infof("Deleting GR %s", gr.Name)
// sync Handler will remove it from the queue
c.enqueueGR(gr)
}
func (c *Controller) enqueue(gr *kyverno.GenerateRequest) {
key, err := cache.MetaNamespaceKeyFunc(gr)
if err != nil {
glog.Error(err)
return
}
glog.V(4).Infof("cleanup enqueu: %v", gr.Name)
c.queue.Add(key)
}
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
glog.Info("Starting generate-policy-cleanup controller")
defer glog.Info("Shutting down generate-policy-cleanup controller")
if !cache.WaitForCacheSync(stopCh, c.pSynced, c.grSynced) {
glog.Error("generate-policy-cleanup controller: failed to sync informer cache")
return
}
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncHandler(key.(string))
c.handleErr(err, key)
return true
}
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
return
}
if c.queue.NumRequeues(key) < maxRetries {
glog.Errorf("Error syncing Generate Request %v: %v", key, err)
c.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.Infof("Dropping generate request %q out of the queue: %v", key, err)
c.queue.Forget(key)
}
func (c *Controller) syncGenerateRequest(key string) error {
var err error
startTime := time.Now()
glog.V(4).Infof("Started syncing GR %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing GR %q (%v)", key, time.Since(startTime))
}()
_, grName, err := cache.SplitMetaNamespaceKey(key)
if errors.IsNotFound(err) {
glog.Infof("Generate Request %s has been deleted", key)
return nil
}
if err != nil {
return err
}
gr, err := c.grLister.Get(grName)
if err != nil {
return err
}
return c.processGR(*gr)
}

View file

@ -0,0 +1,18 @@
package cleanup
import (
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type ControlInterface interface {
Delete(gr string) error
}
type Control struct {
client kyvernoclient.Interface
}
func (c Control) Delete(gr string) error {
return c.client.KyvernoV1().GenerateRequests("kyverno").Delete(gr,&metav1.DeleteOptions{})
}

236
pkg/generate/controller.go Normal file
View file

@ -0,0 +1,236 @@
package generate
import (
"fmt"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
maxRetries = 5
)
type Controller struct {
// dyanmic client implementation
client *dclient.Client
// typed client for kyverno CRDs
kyvernoClient *kyvernoclient.Clientset
// event generator interface
eventGen event.Interface
// handler for GR CR
syncHandler func(grKey string) error
// handler to enqueue GR
enqueueGR func(gr *kyverno.GenerateRequest)
// grStatusControl is used to update GR status
statusControl StatusControlInterface
// Gr that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get cluster policy from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// grLister can list/get generate request from the shared informer's store
grLister kyvernolister.GenerateRequestNamespaceLister
// pSynced returns true if the Cluster policy store has been synced at least once
pSynced cache.InformerSynced
// grSynced returns true if the Generate Request store has been synced at least once
grSynced cache.InformerSynced
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
}
func NewController(
kyvernoclient *kyvernoclient.Clientset,
client *dclient.Client,
pInformer kyvernoinformer.ClusterPolicyInformer,
grInformer kyvernoinformer.GenerateRequestInformer,
eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
) *Controller {
c := Controller{
client: client,
kyvernoClient: kyvernoclient,
eventGen: eventGen,
pvGenerator: pvGenerator,
//TODO: do the math for worst case back off and make sure cleanup runs after that
// as we dont want a deleted GR to be re-queue
queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"),
}
c.statusControl = StatusControl{client: kyvernoclient}
pInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
UpdateFunc: c.updatePolicy, // We only handle updates to policy
// Deletion of policy will be handled by cleanup controller
}, 2*time.Minute)
grInformer.Informer().AddEventHandlerWithResyncPeriod(cache.ResourceEventHandlerFuncs{
AddFunc: c.addGR,
UpdateFunc: c.updateGR,
DeleteFunc: c.deleteGR,
}, 2*time.Minute)
c.enqueueGR = c.enqueue
c.syncHandler = c.syncGenerateRequest
c.pLister = pInformer.Lister()
c.grLister = grInformer.Lister().GenerateRequests("kyverno")
c.pSynced = pInformer.Informer().HasSynced
c.grSynced = pInformer.Informer().HasSynced
return &c
}
func (c *Controller) enqueue(gr *kyverno.GenerateRequest) {
key, err := cache.MetaNamespaceKeyFunc(gr)
if err != nil {
glog.Error(err)
return
}
c.queue.Add(key)
}
func (c *Controller) updatePolicy(old, cur interface{}) {
oldP := old.(*kyverno.ClusterPolicy)
curP := cur.(*kyverno.ClusterPolicy)
if oldP.ResourceVersion == curP.ResourceVersion {
// Periodic resync will send update events for all known Namespace.
// Two different versions of the same replica set will always have different RVs.
return
}
glog.V(4).Infof("Updating Policy %s", oldP.Name)
// get the list of GR for the current Policy version
grs, err := c.grLister.GetGenerateRequestsForClusterPolicy(curP.Name)
if err != nil {
glog.Errorf("failed to Generate Requests for policy %s: %v", curP.Name, err)
return
}
// re-evaluate the GR as the policy was updated
for _, gr := range grs {
c.enqueueGR(gr)
}
}
func (c *Controller) addGR(obj interface{}) {
gr := obj.(*kyverno.GenerateRequest)
// glog.V(4).Infof("Adding GR %s; Policy %s; Resource %v", gr.Name, gr.Spec.Policy, gr.Spec.Resource)
c.enqueueGR(gr)
}
func (c *Controller) updateGR(old, cur interface{}) {
oldGr := old.(*kyverno.GenerateRequest)
curGr := cur.(*kyverno.GenerateRequest)
if oldGr.ResourceVersion == curGr.ResourceVersion {
// Periodic resync will send update events for all known Namespace.
// Two different versions of the same replica set will always have different RVs.
return
}
// only process the ones that are in "Pending"/"Completed" state
// if the Generate Request fails due to incorrect policy, it will be requeued during policy update
if curGr.Status.State == kyverno.Failed {
return
}
c.enqueueGR(curGr)
}
func (c *Controller) deleteGR(obj interface{}) {
gr, ok := obj.(*kyverno.GenerateRequest)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
_, ok = tombstone.Obj.(*kyverno.GenerateRequest)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a Generate Request %#v", obj))
return
}
}
glog.V(4).Infof("Deleting GR %s", gr.Name)
// sync Handler will remove it from the queue
c.enqueueGR(gr)
}
func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
glog.Info("Starting generate-policy controller")
defer glog.Info("Shutting down generate-policy controller")
if !cache.WaitForCacheSync(stopCh, c.pSynced, c.grSynced) {
glog.Error("generate-policy controller: failed to sync informer cache")
return
}
for i := 0; i < workers; i++ {
go wait.Until(c.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (c *Controller) worker() {
for c.processNextWorkItem() {
}
}
func (c *Controller) processNextWorkItem() bool {
key, quit := c.queue.Get()
if quit {
return false
}
defer c.queue.Done(key)
err := c.syncHandler(key.(string))
c.handleErr(err, key)
return true
}
func (c *Controller) handleErr(err error, key interface{}) {
if err == nil {
c.queue.Forget(key)
return
}
if c.queue.NumRequeues(key) < maxRetries {
glog.Errorf("Error syncing Generate Request %v: %v", key, err)
c.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.Infof("Dropping generate request %q out of the queue: %v", key, err)
c.queue.Forget(key)
}
func (c *Controller) syncGenerateRequest(key string) error {
var err error
startTime := time.Now()
glog.V(4).Infof("Started syncing GR %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing GR %q (%v)", key, time.Since(startTime))
}()
_, grName, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
return err
}
gr, err := c.grLister.Get(grName)
if err != nil {
glog.V(4).Info(err)
return err
}
return c.processGR(gr)
}

59
pkg/generate/errors.go Normal file
View file

@ -0,0 +1,59 @@
package generate
import "fmt"
// DATA
type ParseFailed struct {
spec interface{}
parseError error
}
func (e *ParseFailed) Error() string {
return fmt.Sprintf("failed to parse the resource spec %v: %v", e.spec, e.parseError.Error())
}
func NewParseFailed(spec interface{}, err error) *ParseFailed {
return &ParseFailed{spec: spec, parseError: err}
}
type Violation struct {
rule string
err error
}
func (e *Violation) Error() string {
return fmt.Sprintf("creating Violation; error %s", e.err)
}
func NewViolation(rule string, err error) *Violation {
return &Violation{rule: rule, err: err}
}
type NotFound struct {
kind string
namespace string
name string
}
func (e *NotFound) Error() string {
return fmt.Sprintf("resource %s/%s/%s not present", e.kind, e.namespace, e.name)
}
func NewNotFound(kind, namespace, name string) *NotFound {
return &NotFound{kind: kind, namespace: namespace, name: name}
}
type ConfigNotFound struct {
config interface{}
kind string
namespace string
name string
}
func (e *ConfigNotFound) Error() string {
return fmt.Sprintf("configuration %v, not present in resource %s/%s/%s", e.config, e.kind, e.namespace, e.name)
}
func NewConfigNotFound(config interface{}, kind, namespace, name string) *ConfigNotFound {
return &ConfigNotFound{config: config, kind: kind, namespace: namespace, name: name}
}

344
pkg/generate/generate.go Normal file
View file

@ -0,0 +1,344 @@
package generate
import (
"fmt"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/validate"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/policyviolation"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
)
func (c *Controller) processGR(gr *kyverno.GenerateRequest) error {
// 1 - Check if the resource exists
resource, err := getResource(c.client, gr.Spec.Resource)
if err != nil {
// Dont update status
glog.V(4).Infof("resource does not exist or is yet to be created, requeuing: %v", err)
return err
}
glog.V(4).Infof("processGR %v", gr.Status.State)
// 2 - Apply the generate policy on the resource
err = c.applyGenerate(*resource, *gr)
switch e := err.(type) {
case *Violation:
// Generate event
// - resource -> rule failed and created PV
// - policy -> failed to apply of resource and created PV
c.pvGenerator.Add(generatePV(*gr, *resource, e))
default:
// Generate event
// - resource -> rule failed
// - policy -> failed tp apply on resource
glog.V(4).Info(e)
}
// 3 - Report Events
reportEvents(err, c.eventGen, *gr, *resource)
// 4 - Update Status
return updateStatus(c.statusControl, *gr, err)
}
func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyverno.GenerateRequest) error {
// Get the list of rules to be applied
// get policy
glog.V(4).Info("applyGenerate")
policy, err := c.pLister.Get(gr.Spec.Policy)
if err != nil {
glog.V(4).Infof("policy %s not found: %v", gr.Spec.Policy, err)
return nil
}
// build context
ctx := context.NewContext()
resourceRaw, err := resource.MarshalJSON()
if err != nil {
glog.V(4).Infof("failed to marshal resource: %v", err)
return err
}
ctx.AddResource(resourceRaw)
ctx.AddUserInfo(gr.Spec.Context.UserRequestInfo)
policyContext := engine.PolicyContext{
NewResource: resource,
Policy: *policy,
Context: ctx,
AdmissionInfo: gr.Spec.Context.UserRequestInfo,
}
glog.V(4).Info("GenerateNew")
// check if the policy still applies to the resource
engineResponse := engine.GenerateNew(policyContext)
if len(engineResponse.PolicyResponse.Rules) == 0 {
glog.V(4).Infof("policy %s, dont not apply to resource %v", gr.Spec.Policy, gr.Spec.Resource)
return fmt.Errorf("policy %s, dont not apply to resource %v", gr.Spec.Policy, gr.Spec.Resource)
}
glog.V(4).Infof("%v", gr)
// Apply the generate rule on resource
return applyGeneratePolicy(c.client, policyContext, gr.Status.State)
}
func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateRequest, err error) error {
if err != nil {
return statusControl.Failed(gr, err.Error())
}
// Generate request successfully processed
return statusControl.Success(gr)
}
func applyGeneratePolicy(client *dclient.Client, policyContext engine.PolicyContext, state kyverno.GenerateRequestState) error {
// Get the response as the actions to be performed on the resource
// - DATA (rule.Generation.Data)
// - - substitute values
policy := policyContext.Policy
resource := policyContext.NewResource
ctx := policyContext.Context
glog.V(4).Info("applyGeneratePolicy")
// To manage existing resources, we compare the creation time for the default resiruce to be generated and policy creation time
processExisting := func() bool {
rcreationTime := resource.GetCreationTimestamp()
pcreationTime := policy.GetCreationTimestamp()
return rcreationTime.Before(&pcreationTime)
}()
for _, rule := range policy.Spec.Rules {
if !rule.HasGenerate() {
continue
}
if err := applyRule(client, rule, resource, ctx, state, processExisting); err != nil {
return err
}
}
return nil
}
func applyRule(client *dclient.Client, rule kyverno.Rule, resource unstructured.Unstructured, ctx context.EvalInterface, state kyverno.GenerateRequestState, processExisting bool) error {
var rdata map[string]interface{}
var err error
// variable substitution
// - name
// - namespace
// - clone.name
// - clone.namespace
gen := variableSubsitutionForAttributes(rule.Generation, ctx)
// DATA
glog.V(4).Info("applyRule")
if gen.Data != nil {
if rdata, err = handleData(rule.Name, gen, client, resource, ctx, state); err != nil {
glog.V(4).Info(err)
switch e := err.(type) {
case *ParseFailed, *NotFound, *ConfigNotFound:
// handled errors
case *Violation:
// create policy violation
return e
default:
// errors that cant be handled
return e
}
}
if rdata == nil {
// existing resource contains the configuration
return nil
}
}
// CLONE
if gen.Clone != (kyverno.CloneFrom{}) {
if rdata, err = handleClone(gen, client, resource, ctx, state); err != nil {
switch e := err.(type) {
case *NotFound:
// handled errors
return e
default:
// errors that cant be handled
return e
}
}
if rdata == nil {
// resource already exists
return nil
}
}
if processExisting {
// handle existing resources
// policy was generated after the resource
// we do not create new resource
return err
}
// Create the generate resource
newResource := &unstructured.Unstructured{}
glog.V(4).Info(rdata)
newResource.SetUnstructuredContent(rdata)
newResource.SetName(gen.Name)
newResource.SetNamespace(gen.Namespace)
// Reset resource version
newResource.SetResourceVersion("")
// set the ownerReferences
ownerRefs := newResource.GetOwnerReferences()
// add ownerRefs
newResource.SetOwnerReferences(ownerRefs)
glog.V(4).Infof("creating resource %v", newResource)
_, err = client.CreateResource(gen.Kind, gen.Namespace, newResource, false)
if err != nil {
glog.Info(err)
return err
}
glog.V(4).Infof("created new resource %s %s %s ", gen.Kind, gen.Namespace, gen.Name)
// New Resource created succesfully
return nil
}
func variableSubsitutionForAttributes(gen kyverno.Generation, ctx context.EvalInterface) kyverno.Generation {
// Name
name := gen.Name
namespace := gen.Namespace
newNameVar := variables.SubstituteVariables(ctx, name)
if newName, ok := newNameVar.(string); ok {
gen.Name = newName
}
newNamespaceVar := variables.SubstituteVariables(ctx, namespace)
if newNamespace, ok := newNamespaceVar.(string); ok {
gen.Namespace = newNamespace
}
// Clone
cloneName := gen.Clone.Name
cloneNamespace := gen.Clone.Namespace
newcloneNameVar := variables.SubstituteVariables(ctx, cloneName)
if newcloneName, ok := newcloneNameVar.(string); ok {
gen.Clone.Name = newcloneName
}
newcloneNamespaceVar := variables.SubstituteVariables(ctx, cloneNamespace)
if newcloneNamespace, ok := newcloneNamespaceVar.(string); ok {
gen.Clone.Namespace = newcloneNamespace
}
glog.V(4).Infof("var updated %v", gen.Name)
return gen
}
func createOwnerReference(ownerRefs []metav1.OwnerReference, resource unstructured.Unstructured) {
controllerFlag := true
blockOwnerDeletionFlag := true
ownerRef := metav1.OwnerReference{
APIVersion: resource.GetAPIVersion(),
Kind: resource.GetKind(),
Name: resource.GetName(),
UID: resource.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
ownerRefs = append(ownerRefs, ownerRef)
}
func handleData(ruleName string, generateRule kyverno.Generation, client *dclient.Client, resource unstructured.Unstructured, ctx context.EvalInterface, state kyverno.GenerateRequestState) (map[string]interface{}, error) {
newData := variables.SubstituteVariables(ctx, generateRule.Data)
// check if resource exists
obj, err := client.GetResource(generateRule.Kind, generateRule.Namespace, generateRule.Name)
glog.V(4).Info(err)
if errors.IsNotFound(err) {
glog.V(4).Info("handleData NotFound")
glog.V(4).Info(string(state))
// Resource does not exist
if state == "" {
// Processing the request first time
rdata, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&newData)
glog.V(4).Info(err)
if err != nil {
return nil, NewParseFailed(newData, err)
}
return rdata, nil
}
glog.V(4).Info("Creating violation")
// State : Failed,Completed
// request has been processed before, so dont create the resource
// report Violation to notify the error
return nil, NewViolation(ruleName, NewNotFound(generateRule.Kind, generateRule.Namespace, generateRule.Name))
}
glog.V(4).Info(err)
if err != nil {
//something wrong while fetching resource
return nil, err
}
// Resource exists; verfiy the content of the resource
ok, err := checkResource(ctx, newData, obj)
if err != nil {
//something wrong with configuration
glog.V(4).Info(err)
return nil, err
}
if !ok {
return nil, NewConfigNotFound(newData, generateRule.Kind, generateRule.Namespace, generateRule.Name)
}
// Existing resource does contain the required
return nil, nil
}
func handleClone(generateRule kyverno.Generation, client *dclient.Client, resource unstructured.Unstructured, ctx context.EvalInterface, state kyverno.GenerateRequestState) (map[string]interface{}, error) {
// check if resource exists
_, err := client.GetResource(generateRule.Kind, generateRule.Namespace, generateRule.Name)
if err == nil {
glog.V(4).Info("handleClone Exists")
// resource exists
return nil, nil
}
if !errors.IsNotFound(err) {
glog.V(4).Info("handleClone NotFound")
//something wrong while fetching resource
return nil, err
}
// get reference clone resource
obj, err := client.GetResource(generateRule.Kind, generateRule.Clone.Namespace, generateRule.Clone.Name)
if errors.IsNotFound(err) {
glog.V(4).Info("handleClone reference not Found")
return nil, NewNotFound(generateRule.Kind, generateRule.Clone.Namespace, generateRule.Clone.Name)
}
if err != nil {
glog.V(4).Info("handleClone reference Error")
//something wrong while fetching resource
return nil, err
}
glog.V(4).Info("handleClone refrerence sending")
return obj.UnstructuredContent(), nil
}
func checkResource(ctx context.EvalInterface, newResourceSpec interface{}, resource *unstructured.Unstructured) (bool, error) {
// check if the resource spec if a subset of the resource
path, err := validate.ValidateResourceWithPattern(ctx, resource.Object, newResourceSpec)
if err != nil {
glog.V(4).Infof("config not a subset of resource. failed at path %s: %v", path, err)
return false, err
}
return true, nil
}
func generatePV(gr kyverno.GenerateRequest, resource unstructured.Unstructured, err *Violation) policyviolation.Info {
info := policyviolation.Info{
Blocked: false,
PolicyName: gr.Spec.Policy,
Resource: resource,
Rules: []kyverno.ViolatedRule{kyverno.ViolatedRule{
Name: err.rule,
Type: "Generation",
Message: err.Error(),
}},
}
return info
}

110
pkg/generate/report.go Normal file
View file

@ -0,0 +1,110 @@
package generate
import (
"fmt"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/event"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func reportEvents(err error, eventGen event.Interface, gr kyverno.GenerateRequest, resource unstructured.Unstructured) {
if err == nil {
// Success Events
// - resource -> policy rule applied succesfully
// - policy -> rule succesfully applied on resource
events := successEvents(gr, resource)
eventGen.Add(events...)
return
}
switch e := err.(type) {
case *Violation:
// - resource -> rule failed and created PV
// - policy -> failed to apply of resource and created PV
glog.V(4).Infof("reporing events for %v", e)
events := failedEventsPV(err, gr, resource)
eventGen.Add(events...)
default:
// - resource -> rule failed
// - policy -> failed tp apply on resource
glog.V(4).Infof("reporing events for %v", e)
events := failedEvents(err, gr, resource)
eventGen.Add(events...)
}
}
func failedEventsPV(err error, gr kyverno.GenerateRequest, resource unstructured.Unstructured) []event.Info {
var events []event.Info
// Cluster Policy
pe := event.Info{}
pe.Kind = "ClusterPolicy"
// cluserwide-resource
pe.Name = gr.Spec.Policy
pe.Reason = event.PolicyViolation.String()
pe.Source = event.GeneratePolicyController
pe.Message = fmt.Sprintf("policy failed to apply on resource %s/%s/%s creating violation: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
events = append(events, pe)
// Resource
re := event.Info{}
re.Kind = resource.GetKind()
re.Namespace = resource.GetNamespace()
re.Name = resource.GetName()
re.Reason = event.PolicyViolation.String()
re.Source = event.GeneratePolicyController
re.Message = fmt.Sprintf("policy %s failed to apply created violation: %v", gr.Spec.Policy, err)
events = append(events, re)
return events
}
func failedEvents(err error, gr kyverno.GenerateRequest, resource unstructured.Unstructured) []event.Info {
var events []event.Info
// Cluster Policy
pe := event.Info{}
pe.Kind = "ClusterPolicy"
// cluserwide-resource
pe.Name = gr.Spec.Policy
pe.Reason = event.PolicyFailed.String()
pe.Source = event.GeneratePolicyController
pe.Message = fmt.Sprintf("policy failed to apply on resource %s/%s/%s: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
events = append(events, pe)
// Resource
re := event.Info{}
re.Kind = resource.GetKind()
re.Namespace = resource.GetNamespace()
re.Name = resource.GetName()
re.Reason = event.PolicyFailed.String()
re.Source = event.GeneratePolicyController
re.Message = fmt.Sprintf("policy %s failed to apply: %v", gr.Spec.Policy, err)
events = append(events, re)
return events
}
func successEvents(gr kyverno.GenerateRequest, resource unstructured.Unstructured) []event.Info {
var events []event.Info
// Cluster Policy
pe := event.Info{}
pe.Kind = "ClusterPolicy"
// clusterwide-resource
pe.Name = gr.Spec.Policy
pe.Reason = event.PolicyApplied.String()
pe.Source = event.GeneratePolicyController
pe.Message = fmt.Sprintf("applied succesfully on resource %s/%s/%s", resource.GetKind(), resource.GetNamespace(), resource.GetName())
events = append(events, pe)
// Resource
re := event.Info{}
re.Kind = resource.GetKind()
re.Namespace = resource.GetNamespace()
re.Name = resource.GetName()
re.Reason = event.PolicyApplied.String()
re.Source = event.GeneratePolicyController
re.Message = fmt.Sprintf("policy %s succesfully applied", gr.Spec.Policy)
events = append(events, re)
return events
}

11
pkg/generate/resource.go Normal file
View file

@ -0,0 +1,11 @@
package generate
import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
dclient "github.com/nirmata/kyverno/pkg/dclient"
)
func getResource(client *dclient.Client, resourceSpec kyverno.ResourceSpec) (*unstructured.Unstructured,error) {
return client.GetResource(resourceSpec.Kind, resourceSpec.Namespace, resourceSpec.Name)
}

45
pkg/generate/status.go Normal file
View file

@ -0,0 +1,45 @@
package generate
import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
)
type StatusControlInterface interface {
Failed(gr kyverno.GenerateRequest, message string) error
Success(gr kyverno.GenerateRequest) error
}
// StatusControl is default implementaation of GRStatusControlInterface
type StatusControl struct {
client kyvernoclient.Interface
}
//FailedGR sets gr status.state to failed with message
func (sc StatusControl) Failed(gr kyverno.GenerateRequest, message string) error {
gr.Status.State = kyverno.Failed
gr.Status.Message = message
_, err := sc.client.KyvernoV1().GenerateRequests("kyverno").UpdateStatus(&gr)
if err != nil {
glog.V(4).Infof("FAILED: updated gr %s status to %s", gr.Name, string(kyverno.Failed))
return err
}
glog.V(4).Infof("updated gr %s status to %s", gr.Name, string(kyverno.Failed))
return nil
}
// SuccessGR sets the gr status.state to completed and clears message
func (sc StatusControl) Success(gr kyverno.GenerateRequest) error {
gr.Status.State = kyverno.Completed
gr.Status.Message = ""
_, err := sc.client.KyvernoV1().GenerateRequests("kyverno").UpdateStatus(&gr)
if err != nil {
glog.V(4).Infof("FAILED: updated gr %s status to %s", gr.Name, string(kyverno.Completed))
return err
}
glog.V(4).Infof("updated gr %s status to %s", gr.Name, string(kyverno.Completed))
return nil
}

View file

@ -233,7 +233,7 @@ func applyPolicy(client *client.Client, resource unstructured.Unstructured, p ky
Client: client,
Context: ctx,
}
engineResponse := engine.Generate(policyContext)
engineResponse := engine.GenerateNew(policyContext)
// gather stats
gatherStat(p.Name, engineResponse.PolicyResponse)
//send stats

View file

@ -310,10 +310,7 @@ func (pc *PolicyController) syncPolicy(key string) error {
return err
}
// if the policy contains mutating & validation rules and it config does not exist we create one
if policy.HasMutateOrValidate() {
pc.resourceWebhookWatcher.RegisterResourceWebhook()
}
pc.resourceWebhookWatcher.RegisterResourceWebhook()
// cluster policy violations
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy.Name)

View file

@ -2,7 +2,6 @@ package policy
import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"k8s.io/apimachinery/pkg/labels"
)
@ -20,32 +19,12 @@ func (pc *PolicyController) removeResourceWebhookConfiguration() error {
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
}
// if polices only have generate rules, we dont need the webhook
if !hasMutateOrValidatePolicies(policies) {
glog.V(4).Info("no policies with mutating or validating webhook configurations, remove resource webhook configuration if one exists")
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
}
glog.V(4).Info("no policies with mutating or validating webhook configurations, remove resource webhook configuration if one exists")
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
return nil
}
func (pc *PolicyController) registerResourceWebhookConfiguration() {
policies, err := pc.pLister.List(labels.NewSelector())
if err != nil {
glog.Errorf("failed to register resource webhook configuration, error listing policies: %v", err)
}
if hasMutateOrValidatePolicies(policies) {
glog.V(4).Info("Found existing policy, registering resource webhook configuration")
pc.resourceWebhookWatcher.RegisterResourceWebhook()
}
}
func hasMutateOrValidatePolicies(policies []*kyverno.ClusterPolicy) bool {
for _, policy := range policies {
if (*policy).HasMutateOrValidate() {
return true
}
}
return false
pc.resourceWebhookWatcher.RegisterResourceWebhook()
}

View file

@ -57,6 +57,7 @@ func (ps *PolicyStore) Run(stopCh <-chan struct{}) {
//Register a new policy
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
glog.V(4).Infof("adding resources %s", policy.Name)
ps.mu.Lock()
defer ps.mu.Unlock()
var pmap policyMap

View file

@ -179,7 +179,7 @@ func runTestCase(t *testing.T, tc scaseT) bool {
Client: client,
}
er = engine.Generate(policyContext)
er = engine.GenerateNew(policyContext)
t.Log(("---Generation---"))
validateResponse(t, er.PolicyResponse, tc.Expected.Generation.PolicyResponse)
// Expected generate resource will be in same namesapces as resource

View file

@ -6,9 +6,9 @@ func Test_Mutate_EndPoint(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_mutate_endpoint.yaml")
}
func Test_Mutate_Validate_qos(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
}
// func Test_Mutate_Validate_qos(t *testing.T) {
// testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
// }
func Test_disallow_root_user(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_root_user.yaml")
@ -22,9 +22,10 @@ func Test_validate_healthChecks(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_validate_healthChecks.yaml")
}
func Test_add_networkPolicy(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/add_networkPolicy.yaml")
}
//TODO: add generate
// func Test_add_networkPolicy(t *testing.T) {
// testScenario(t, "/test/scenarios/samples/best_practices/add_networkPolicy.yaml")
// }
// namespace is blank, not "default" as testrunner evaulates the policyengine, but the "default" is added by kubeapiserver
@ -52,9 +53,10 @@ func Test_validate_ro_rootfs(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/require_ro_rootfs.yaml")
}
func Test_add_ns_quota(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/add_ns_quota.yaml")
}
//TODO: support generate
// func Test_add_ns_quota(t *testing.T) {
// testScenario(t, "test/scenarios/samples/best_practices/add_ns_quota.yaml")
// }
func Test_validate_disallow_default_serviceaccount(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_validate_disallow_default_serviceaccount.yaml")

View file

@ -5,7 +5,7 @@ import (
"strings"
"github.com/golang/glog"
engine "github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/rbac"
v1beta1 "k8s.io/api/admission/v1beta1"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
@ -96,7 +96,7 @@ func matchSubjectsMap(subject rbacv1.Subject, userInfo authenticationv1.UserInfo
}
func isServiceaccountUserInfo(username string) bool {
if strings.Contains(username, engine.SaPrefix) {
if strings.Contains(username, rbac.SaPrefix) {
return true
}
return false
@ -106,8 +106,8 @@ func isServiceaccountUserInfo(username string) bool {
// serviceaccount represents as saPrefix:namespace:name in userInfo
func matchServiceAccount(subject rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
subjectServiceAccount := subject.Namespace + ":" + subject.Name
if userInfo.Username[len(engine.SaPrefix):] != subjectServiceAccount {
glog.V(3).Infof("service account not match, expect %s, got %s", subjectServiceAccount, userInfo.Username[len(engine.SaPrefix):])
if userInfo.Username[len(rbac.SaPrefix):] != subjectServiceAccount {
glog.V(3).Infof("service account not match, expect %s, got %s", subjectServiceAccount, userInfo.Username[len(rbac.SaPrefix):])
return false
}

View file

@ -0,0 +1,115 @@
package generate
import (
"fmt"
"time"
backoff "github.com/cenkalti/backoff"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
)
type GenerateRequests interface {
Create(gr kyverno.GenerateRequestSpec) error
}
type Generator struct {
// channel to recieve request
ch chan kyverno.GenerateRequestSpec
client *kyvernoclient.Clientset
stopCh <-chan struct{}
}
func NewGenerator(client *kyvernoclient.Clientset, stopCh <-chan struct{}) *Generator {
gen := &Generator{
ch: make(chan kyverno.GenerateRequestSpec, 1000),
client: client,
stopCh: stopCh,
}
return gen
}
// blocking if channel is full
func (g *Generator) Create(gr kyverno.GenerateRequestSpec) error {
glog.V(4).Infof("create GR %v", gr)
// Send to channel
select {
case g.ch <- gr:
return nil
case <-g.stopCh:
glog.Info("shutting down channel")
return fmt.Errorf("shutting down gr create channel")
}
}
// Run starts the generate request spec
func (g *Generator) Run(workers int) {
defer utilruntime.HandleCrash()
glog.V(4).Info("Started generate request")
defer func() {
glog.V(4).Info("Shutting down generate request")
}()
for i := 0; i < workers; i++ {
go wait.Until(g.process, time.Second, g.stopCh)
}
<-g.stopCh
}
func (g *Generator) process() {
for r := range g.ch {
glog.V(4).Infof("recived generate request %v", r)
if err := g.generate(r); err != nil {
glog.Errorf("Failed to create Generate Request CR: %v", err)
}
}
}
func (g *Generator) generate(grSpec kyverno.GenerateRequestSpec) error {
// create a generate request
if err := retryCreateResource(g.client, grSpec); err != nil {
return err
}
return nil
}
// -> recieving channel to take requests to create request
// use worker pattern to read and create the CR resource
func retryCreateResource(client *kyvernoclient.Clientset, grSpec kyverno.GenerateRequestSpec) error {
var i int
var err error
createResource := func() error {
gr := kyverno.GenerateRequest{
Spec: grSpec,
}
gr.SetGenerateName("gr-")
gr.SetNamespace("kyverno")
// Initial state "Pending"
// TODO: status is not updated
// gr.Status.State = kyverno.Pending
// generate requests created in kyverno namespace
_, err = client.KyvernoV1().GenerateRequests("kyverno").Create(&gr)
glog.V(4).Infof("retry %v create generate request", i)
i++
return err
}
exbackoff := &backoff.ExponentialBackOff{
InitialInterval: 500 * time.Millisecond,
RandomizationFactor: 0.5,
Multiplier: 1.5,
MaxInterval: time.Second,
MaxElapsedTime: 3 * time.Second,
Clock: backoff.SystemClock,
}
exbackoff.Reset()
err = backoff.Retry(createResource, exbackoff)
if err != nil {
return err
}
return nil
}

View file

@ -0,0 +1,90 @@
package webhooks
import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/webhooks/generate"
v1beta1 "k8s.io/api/admission/v1beta1"
)
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []kyverno.ClusterPolicy, patchedResource []byte, roles, clusterRoles []string) (bool, string) {
var engineResponses []response.EngineResponse
// convert RAW to unstructured
resource, err := engine.ConvertToUnstructured(request.Object.Raw)
if err != nil {
//TODO: skip applying the admission control ?
glog.Errorf("unable to convert raw resource to unstructured: %v", err)
return true, ""
}
// CREATE resources, do not have name, assigned in admission-request
glog.V(4).Infof("Handle Generate: Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation)
userRequestInfo := kyverno.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo}
// build context
ctx := context.NewContext()
// load incoming resource into the context
// ctx.AddResource(request.Object.Raw)
ctx.AddUserInfo(userRequestInfo)
policyContext := engine.PolicyContext{
NewResource: *resource,
AdmissionInfo: userRequestInfo,
}
// engine.Generate returns a list of rules that are applicable on this resource
for _, policy := range policies {
policyContext.Policy = policy
engineResponse := engine.GenerateNew(policyContext)
if len(engineResponse.PolicyResponse.Rules) > 0 {
// some generate rules do apply to the resource
engineResponses = append(engineResponses, engineResponse)
}
}
// Adds Generate Request to a channel(queue size 1000) to generators
if err := createGenerateRequest(ws.grGenerator, userRequestInfo, engineResponses...); err != nil {
//TODO: send appropriate error
return false, "Kyverno blocked: failed to create Generate Requests"
}
// Generate Stats wont be used here, as we delegate the generate rule
// - Filter policies that apply on this resource
// - - build CR context(userInfo+roles+clusterRoles)
// - Create CR
// - send Success
// HandleGeneration always returns success
// Filter Policies
return true, ""
}
func createGenerateRequest(gnGenerator generate.GenerateRequests, userRequestInfo kyverno.RequestInfo, engineResponses ...response.EngineResponse) error {
for _, er := range engineResponses {
if err := gnGenerator.Create(transform(userRequestInfo, er)); err != nil {
return err
}
}
return nil
}
func transform(userRequestInfo kyverno.RequestInfo, er response.EngineResponse) kyverno.GenerateRequestSpec {
gr := kyverno.GenerateRequestSpec{
Policy: er.PolicyResponse.Policy,
Resource: kyverno.ResourceSpec{
Kind: er.PolicyResponse.Resource.Kind,
Namespace: er.PolicyResponse.Resource.Namespace,
Name: er.PolicyResponse.Resource.Name,
},
Context: kyverno.GenerateRequestContext{
UserRequestInfo: userRequestInfo,
},
}
return gr
}

View file

@ -63,19 +63,20 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, polic
resource.SetGroupVersionKind(schema.GroupVersionKind{Group: request.Kind.Group, Version: request.Kind.Version, Kind: request.Kind.Kind})
resource.SetNamespace(request.Namespace)
var engineResponses []response.EngineResponse
userRequestInfo := kyverno.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo}
// build context
ctx := context.NewContext()
// load incoming resource into the context
ctx.AddResource(request.Object.Raw)
ctx.AddUserInfo(request.UserInfo)
ctx.AddUserInfo(userRequestInfo)
policyContext := engine.PolicyContext{
NewResource: *resource,
Context: ctx,
AdmissionInfo: engine.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo},
NewResource: *resource,
AdmissionInfo: userRequestInfo,
}
for _, policy := range policies {

View file

@ -38,10 +38,8 @@ func (ws *WebhookServer) handlePolicyValidation(request *v1beta1.AdmissionReques
if admissionResp.Allowed {
// if the policy contains mutating & validation rules and it config does not exist we create one
if policy.HasMutateOrValidate() {
// queue the request
ws.resourceWebhookWatcher.RegisterResourceWebhook()
}
// queue the request
ws.resourceWebhookWatcher.RegisterResourceWebhook()
}
return admissionResp
}

View file

@ -26,6 +26,7 @@ import (
tlsutils "github.com/nirmata/kyverno/pkg/tls"
userinfo "github.com/nirmata/kyverno/pkg/userinfo"
"github.com/nirmata/kyverno/pkg/webhookconfig"
"github.com/nirmata/kyverno/pkg/webhooks/generate"
v1beta1 "k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
rbacinformer "k8s.io/client-go/informers/rbac/v1"
@ -66,7 +67,9 @@ type WebhookServer struct {
// store to hold policy meta data for faster lookup
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
pvGenerator policyviolation.GeneratorInterface
// generate request generator
grGenerator *generate.Generator
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
}
@ -85,6 +88,7 @@ func NewWebhookServer(
configHandler config.Interface,
pMetaStore policystore.LookupInterface,
pvGenerator policyviolation.GeneratorInterface,
grGenerator *generate.Generator,
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
cleanUp chan<- struct{}) (*WebhookServer, error) {
@ -116,6 +120,7 @@ func NewWebhookServer(
lastReqTime: resourceWebhookWatcher.LastReqTime,
pvGenerator: pvGenerator,
pMetaStore: pMetaStore,
grGenerator: grGenerator,
resourceWebhookWatcher: resourceWebhookWatcher,
}
mux := http.NewServeMux()
@ -249,6 +254,23 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
}
}
// GENERATE
// Only applied during resource creation
// Success -> Generate Request CR created successsfully
// Failed -> Failed to create Generate Request CR
if request.Operation == v1beta1.Create {
ok, msg = ws.HandleGenerate(request, policies, patchedResource, roles, clusterRoles)
if !ok {
glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name)
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: msg,
},
}
}
}
// Succesfful processing of mutation & validation rules in policy
patchType := v1beta1.PatchTypeJSONPatch
return &v1beta1.AdmissionResponse{
@ -279,6 +301,7 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
// deadline: 60 seconds (send request)
// max deadline: deadline*3 (set the deployment annotation as false)
go ws.lastReqTime.Run(ws.pLister, ws.eventGen, ws.client, checker.DefaultResync, checker.DefaultDeadline, stopCh)
}
// Stop TLS server and returns control after the server is shut down

View file

@ -59,20 +59,21 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol
glog.Error(err)
return true, ""
}
userRequestInfo := kyverno.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo}
// build context
ctx := context.NewContext()
// load incoming resource into the context
ctx.AddResource(request.Object.Raw)
ctx.AddUserInfo(request.UserInfo)
ctx.AddUserInfo(userRequestInfo)
policyContext := engine.PolicyContext{
NewResource: newR,
OldResource: oldR,
Context: ctx,
AdmissionInfo: engine.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo},
NewResource: newR,
OldResource: oldR,
Context: ctx,
AdmissionInfo: userRequestInfo,
}
var engineResponses []response.EngineResponse
for _, policy := range policies {

View file

@ -0,0 +1,62 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-ns-access-controls
annotations:
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: Create roles and role bindings for a new namespace
spec:
rules:
- name: generate-owner-role
match:
resources:
kinds:
- Namespace
name: devtest
generate:
kind: ClusterRole
name: "ns-owner-{{request.userInfo.username}}"
data:
rules:
- apiGroups: [""]
resources: ["namespaces"]
verbs: ["*"]
resourceNames:
- "{{request.object.metadata.name}}"
- name: generate-owner-role-binding
match:
resources:
kinds:
- Namespace
name: devtest
generate:
kind: ClusterRoleBinding
name: "ns-owner-{{request.userInfo.username}}-binding"
data:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: "nsowner-{{request.userInfo.username}}"
subjects:
- kind: ServiceAccount
name: "{{request.userInfo.username}}"
namespace: "{{request.object.metadata.name}}"
- name: generate-admin-role-binding
match:
resources:
kinds:
- Namespace
name: devtest
generate:
kind: RoleBinding
name: "ns-admin-{{request.userInfo.username}}-binding"
namespace: "{{request.object.metadata.name}}"
data:
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: admin
subjects:
- kind: ServiceAccount
name: "{{request.userInfo.username}}"
namespace: "{{request.object.metadata.name}}"