1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 10:28:36 +00:00

policyviolation, policy controller reconciliation

This commit is contained in:
shivkumar dudhani 2019-08-07 16:14:33 -07:00
parent 2200f9c784
commit 3dda879e51
18 changed files with 1856 additions and 17 deletions

View file

@ -169,4 +169,55 @@ spec:
name:
type: string
data:
AnyValue: {}
AnyValue: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1alpha1
served: true
storage: true
scope: Cluster
names:
kind: PolicyViolation
plural: policyviolations
singular: policyviolation
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- policyName
- resource
- rules
properties:
policyName:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
rules:
type: array
items:
type: object
required:
- name
- type
- status
- message
---

View file

@ -0,0 +1,285 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policies.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1alpha1
served: true
storage: true
scope: Cluster
names:
kind: Policy
plural: policies
singular: policy
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- rules
properties:
# default values to be handled by user
validationFailureAction:
type: string
enum:
- enforce # blocks the resorce api-reques if a rule fails. Default behavior
- audit # allows resource creationg and reports the failed validation rules as violations
rules:
type: array
items:
type: object
required:
- name
- match
properties:
name:
type: string
match:
type: object
required:
- resources
properties:
resources:
type: object
required:
- kinds
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespace:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
exclude:
type: object
required:
- resources
properties:
resources:
type: object
properties:
kinds:
type: array
items:
type: string
name:
type: string
namespace:
type: string
selector:
properties:
matchLabels:
type: object
additionalProperties:
type: string
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
mutate:
type: object
properties:
overlay:
AnyValue: {}
patches:
type: array
items:
type: object
required:
- path
- op
properties:
path:
type: string
op:
type: string
enum:
- add
- replace
- remove
value:
AnyValue: {}
validate:
type: object
required:
- pattern
properties:
message:
type: string
pattern:
AnyValue: {}
generate:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
clone:
type: object
required:
- namespace
- name
properties:
namespace:
type: string
name:
type: string
data:
AnyValue: {}
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
group: kyverno.io
versions:
- name: v1aplha1
served: true
storage: true
scope: Cluster
names:
kind: PolicyViolation
plural: policyviolations
singular: policyviolation
subresources:
status: {}
validation:
openAPIV3Schema:
properties:
spec:
required:
- policyName
- resource
- rules
properties:
policyName:
type: string
resource:
type: object
required:
- kind
- name
properties:
kind:
type: string
name:
type: string
namespace:
type: string
rules:
type: array
items:
type: object
required:
- name
- type
- status
- message
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kyverno
name: kyverno-svc
labels:
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kyverno-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kyverno-service-account
namespace: kyverno
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
namespace: kyverno
name: kyverno
labels:
app: kyverno
spec:
replicas: 1
template:
metadata:
labels:
app: kyverno
spec:
serviceAccountName: kyverno-service-account
containers:
- name: kyverno
image: nirmata/kyverno:latest
args: ["--filterK8Resources","[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*]Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"]
ports:
- containerPort: 443
securityContext:
privileged: true

27
examples/cli/p1.yaml Normal file
View file

@ -0,0 +1,27 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-cpu-memory
spec:
rules:
- name: check-pod-resources
match:
resources:
kinds:
- Pod
validate:
message: "CPU and memory resource requests and limits are required"
pattern:
spec:
containers:
# 'name: *' selects all containers in the pod
- name: "*"
resources:
limits:
# '?' requires 1 alphanumeric character and '*' means that there can be 0 or more characters.
# Using them together e.g. '?*' requires at least one character.
memory: "?*"
cpu: "?*"
requests:
memory: "?*"
cpu: "?*"

15
examples/cli/pv1.yaml Normal file
View file

@ -0,0 +1,15 @@
apiVersion: kyverno.io/v1alpha1
kind: PolicyViolation
metadata:
name: pv1
spec:
policyName: check-cpu-memory
resource:
kind: Pod
namespace: ""
name: pod1
rules:
- name: r1
type: Mutation
status: Failed
message: test mesaage for rule failure

54
main.go
View file

@ -5,11 +5,12 @@ import (
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/annotations"
clientNew "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/clientNew/informers/externalversions"
"github.com/nirmata/kyverno/pkg/config"
controller "github.com/nirmata/kyverno/pkg/controller"
client "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
gencontroller "github.com/nirmata/kyverno/pkg/gencontroller"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/sharedinformer"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/violation"
@ -36,6 +37,22 @@ func main() {
glog.Fatalf("Error creating client: %v\n", err)
}
// check if the k8 server version is supported ?
// if !version.Supportedk8Server(client) {
// glog.Fatalf("the k8 server version is not supported. refer to https://github.com/nirmata/kyverno/blob/master/documentation/installation.md for more details")
// }
//-------------------------------------
// create policy client
pclient, err := clientNew.NewForConfig(clientConfig)
if err != nil {
glog.Fatalf("Error creating client: %v\n", err)
}
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, 10)
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1alpha1().Policies(), pInformer.Kyverno().V1alpha1().PolicyViolations())
if err != nil {
glog.Fatalf("error creating policy controller: %v\n", err)
}
//-------------------------------------
policyInformerFactory, err := sharedinformer.NewSharedInformerFactory(clientConfig)
if err != nil {
glog.Fatalf("Error creating policy sharedinformer: %v\n", err)
@ -44,15 +61,15 @@ func main() {
eventController := event.NewEventController(client, policyInformerFactory)
violationBuilder := violation.NewPolicyViolationBuilder(client, policyInformerFactory, eventController)
annotationsController := annotations.NewAnnotationControler(client)
policyController := controller.NewPolicyController(
client,
policyInformerFactory,
violationBuilder,
eventController,
annotationsController,
filterK8Resources)
// policyController := controller.NewPolicyController(
// client,
// policyInformerFactory,
// violationBuilder,
// eventController,
// annotationsController,
// filterK8Resources)
genControler := gencontroller.NewGenController(client, eventController, policyInformerFactory, violationBuilder, kubeInformer.Core().V1().Namespaces(), annotationsController)
// genControler := gencontroller.NewGenController(client, eventController, policyInformerFactory, violationBuilder, kubeInformer.Core().V1().Namespaces(), annotationsController)
tlsPair, err := initTLSPemPair(clientConfig, client)
if err != nil {
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
@ -73,22 +90,27 @@ func main() {
glog.Fatalf("Failed registering Admission Webhooks: %v\n", err)
}
//--------
pInformer.Start(stopCh)
go pc.Run(1, stopCh)
//TODO add WG for the go routine?
//--------
policyInformerFactory.Run(stopCh)
kubeInformer.Start(stopCh)
eventController.Run(stopCh)
genControler.Run(stopCh)
// genControler.Run(stopCh)
annotationsController.Run(stopCh)
if err = policyController.Run(stopCh); err != nil {
glog.Fatalf("Error running PolicyController: %v\n", err)
}
// if err = policyController.Run(stopCh); err != nil {
// glog.Fatalf("Error running PolicyController: %v\n", err)
// }
server.RunAsync()
<-stopCh
server.Stop()
genControler.Stop()
// genControler.Stop()
eventController.Stop()
annotationsController.Stop()
policyController.Stop()
// policyController.Stop()
}
func init() {

View file

@ -0,0 +1,90 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package versioned
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned/typed/kyverno/v1alpha1"
discovery "k8s.io/client-go/discovery"
rest "k8s.io/client-go/rest"
flowcontrol "k8s.io/client-go/util/flowcontrol"
)
type Interface interface {
Discovery() discovery.DiscoveryInterface
KyvernoV1alpha1() kyvernov1alpha1.KyvernoV1alpha1Interface
}
// Clientset contains the clients for groups. Each group has exactly one
// version included in a Clientset.
type Clientset struct {
*discovery.DiscoveryClient
kyvernoV1alpha1 *kyvernov1alpha1.KyvernoV1alpha1Client
}
// KyvernoV1alpha1 retrieves the KyvernoV1alpha1Client
func (c *Clientset) KyvernoV1alpha1() kyvernov1alpha1.KyvernoV1alpha1Interface {
return c.kyvernoV1alpha1
}
// Discovery retrieves the DiscoveryClient
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
if c == nil {
return nil
}
return c.DiscoveryClient
}
// NewForConfig creates a new Clientset for the given config.
func NewForConfig(c *rest.Config) (*Clientset, error) {
configShallowCopy := *c
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
}
var cs Clientset
var err error
cs.kyvernoV1alpha1, err = kyvernov1alpha1.NewForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
if err != nil {
return nil, err
}
return &cs, nil
}
// NewForConfigOrDie creates a new Clientset for the given config and
// panics if there is an error in the config.
func NewForConfigOrDie(c *rest.Config) *Clientset {
var cs Clientset
cs.kyvernoV1alpha1 = kyvernov1alpha1.NewForConfigOrDie(c)
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
return &cs
}
// New creates a new Clientset for the given RESTClient.
func New(c rest.Interface) *Clientset {
var cs Clientset
cs.kyvernoV1alpha1 = kyvernov1alpha1.New(c)
cs.DiscoveryClient = discovery.NewDiscoveryClient(c)
return &cs
}

View file

@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated clientset.
package versioned

View file

@ -0,0 +1,77 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
clientset "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned"
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned/typed/kyverno/v1alpha1"
fakekyvernov1alpha1 "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned/typed/kyverno/v1alpha1/fake"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/discovery"
fakediscovery "k8s.io/client-go/discovery/fake"
"k8s.io/client-go/testing"
)
// NewSimpleClientset returns a clientset that will respond with the provided objects.
// It's backed by a very simple object tracker that processes creates, updates and deletions as-is,
// without applying any validations and/or defaults. It shouldn't be considered a replacement
// for a real clientset and is mostly useful in simple unit tests.
func NewSimpleClientset(objects ...runtime.Object) *Clientset {
o := testing.NewObjectTracker(scheme, codecs.UniversalDecoder())
for _, obj := range objects {
if err := o.Add(obj); err != nil {
panic(err)
}
}
cs := &Clientset{}
cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake}
cs.AddReactor("*", "*", testing.ObjectReaction(o))
cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) {
gvr := action.GetResource()
ns := action.GetNamespace()
watch, err := o.Watch(gvr, ns)
if err != nil {
return false, nil, err
}
return true, watch, nil
})
return cs
}
// Clientset implements clientset.Interface. Meant to be embedded into a
// struct to get a default implementation. This makes faking out just the method
// you want to test easier.
type Clientset struct {
testing.Fake
discovery *fakediscovery.FakeDiscovery
}
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
return c.discovery
}
var _ clientset.Interface = &Clientset{}
// KyvernoV1alpha1 retrieves the KyvernoV1alpha1Client
func (c *Clientset) KyvernoV1alpha1() kyvernov1alpha1.KyvernoV1alpha1Interface {
return &fakekyvernov1alpha1.FakeKyvernoV1alpha1{Fake: &c.Fake}
}

View file

@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package has the automatically generated fake clientset.
package fake

View file

@ -0,0 +1,56 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package fake
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var scheme = runtime.NewScheme()
var codecs = serializer.NewCodecFactory(scheme)
var parameterCodec = runtime.NewParameterCodec(scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
kyvernov1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(scheme))
}

View file

@ -0,0 +1,20 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
// This package contains the scheme of the automatically generated clientset.
package scheme

View file

@ -0,0 +1,56 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by client-gen. DO NOT EDIT.
package scheme
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
serializer "k8s.io/apimachinery/pkg/runtime/serializer"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
)
var Scheme = runtime.NewScheme()
var Codecs = serializer.NewCodecFactory(Scheme)
var ParameterCodec = runtime.NewParameterCodec(Scheme)
var localSchemeBuilder = runtime.SchemeBuilder{
kyvernov1alpha1.AddToScheme,
}
// AddToScheme adds all types of this clientset into the given scheme. This allows composition
// of clientsets, like in:
//
// import (
// "k8s.io/client-go/kubernetes"
// clientsetscheme "k8s.io/client-go/kubernetes/scheme"
// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme"
// )
//
// kclientset, _ := kubernetes.NewForConfig(c)
// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme)
//
// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types
// correctly.
var AddToScheme = localSchemeBuilder.AddToScheme
func init() {
v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"})
utilruntime.Must(AddToScheme(Scheme))
}

View file

@ -0,0 +1,180 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
reflect "reflect"
sync "sync"
time "time"
versioned "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned"
internalinterfaces "github.com/nirmata/kyverno/pkg/clientNew/informers/externalversions/internalinterfaces"
kyverno "github.com/nirmata/kyverno/pkg/clientNew/informers/externalversions/kyverno"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// SharedInformerOption defines the functional option type for SharedInformerFactory.
type SharedInformerOption func(*sharedInformerFactory) *sharedInformerFactory
type sharedInformerFactory struct {
client versioned.Interface
namespace string
tweakListOptions internalinterfaces.TweakListOptionsFunc
lock sync.Mutex
defaultResync time.Duration
customResync map[reflect.Type]time.Duration
informers map[reflect.Type]cache.SharedIndexInformer
// startedInformers is used for tracking which informers have been started.
// This allows Start() to be called multiple times safely.
startedInformers map[reflect.Type]bool
}
// WithCustomResyncConfig sets a custom resync period for the specified informer types.
func WithCustomResyncConfig(resyncConfig map[v1.Object]time.Duration) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
for k, v := range resyncConfig {
factory.customResync[reflect.TypeOf(k)] = v
}
return factory
}
}
// WithTweakListOptions sets a custom filter on all listers of the configured SharedInformerFactory.
func WithTweakListOptions(tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.tweakListOptions = tweakListOptions
return factory
}
}
// WithNamespace limits the SharedInformerFactory to the specified namespace.
func WithNamespace(namespace string) SharedInformerOption {
return func(factory *sharedInformerFactory) *sharedInformerFactory {
factory.namespace = namespace
return factory
}
}
// NewSharedInformerFactory constructs a new instance of sharedInformerFactory for all namespaces.
func NewSharedInformerFactory(client versioned.Interface, defaultResync time.Duration) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync)
}
// NewFilteredSharedInformerFactory constructs a new instance of sharedInformerFactory.
// Listers obtained via this SharedInformerFactory will be subject to the same filters
// as specified here.
// Deprecated: Please use NewSharedInformerFactoryWithOptions instead
func NewFilteredSharedInformerFactory(client versioned.Interface, defaultResync time.Duration, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) SharedInformerFactory {
return NewSharedInformerFactoryWithOptions(client, defaultResync, WithNamespace(namespace), WithTweakListOptions(tweakListOptions))
}
// NewSharedInformerFactoryWithOptions constructs a new instance of a SharedInformerFactory with additional options.
func NewSharedInformerFactoryWithOptions(client versioned.Interface, defaultResync time.Duration, options ...SharedInformerOption) SharedInformerFactory {
factory := &sharedInformerFactory{
client: client,
namespace: v1.NamespaceAll,
defaultResync: defaultResync,
informers: make(map[reflect.Type]cache.SharedIndexInformer),
startedInformers: make(map[reflect.Type]bool),
customResync: make(map[reflect.Type]time.Duration),
}
// Apply all options
for _, opt := range options {
factory = opt(factory)
}
return factory
}
// Start initializes all requested informers.
func (f *sharedInformerFactory) Start(stopCh <-chan struct{}) {
f.lock.Lock()
defer f.lock.Unlock()
for informerType, informer := range f.informers {
if !f.startedInformers[informerType] {
go informer.Run(stopCh)
f.startedInformers[informerType] = true
}
}
}
// WaitForCacheSync waits for all started informers' cache were synced.
func (f *sharedInformerFactory) WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool {
informers := func() map[reflect.Type]cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informers := map[reflect.Type]cache.SharedIndexInformer{}
for informerType, informer := range f.informers {
if f.startedInformers[informerType] {
informers[informerType] = informer
}
}
return informers
}()
res := map[reflect.Type]bool{}
for informType, informer := range informers {
res[informType] = cache.WaitForCacheSync(stopCh, informer.HasSynced)
}
return res
}
// InternalInformerFor returns the SharedIndexInformer for obj using an internal
// client.
func (f *sharedInformerFactory) InformerFor(obj runtime.Object, newFunc internalinterfaces.NewInformerFunc) cache.SharedIndexInformer {
f.lock.Lock()
defer f.lock.Unlock()
informerType := reflect.TypeOf(obj)
informer, exists := f.informers[informerType]
if exists {
return informer
}
resyncPeriod, exists := f.customResync[informerType]
if !exists {
resyncPeriod = f.defaultResync
}
informer = newFunc(f.client, resyncPeriod)
f.informers[informerType] = informer
return informer
}
// SharedInformerFactory provides shared informers for resources in all known
// API group versions.
type SharedInformerFactory interface {
internalinterfaces.SharedInformerFactory
ForResource(resource schema.GroupVersionResource) (GenericInformer, error)
WaitForCacheSync(stopCh <-chan struct{}) map[reflect.Type]bool
Kyverno() kyverno.Interface
}
func (f *sharedInformerFactory) Kyverno() kyverno.Interface {
return kyverno.New(f, f.namespace, f.tweakListOptions)
}

View file

@ -0,0 +1,64 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package externalversions
import (
"fmt"
v1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
schema "k8s.io/apimachinery/pkg/runtime/schema"
cache "k8s.io/client-go/tools/cache"
)
// GenericInformer is type of SharedIndexInformer which will locate and delegate to other
// sharedInformers based on type
type GenericInformer interface {
Informer() cache.SharedIndexInformer
Lister() cache.GenericLister
}
type genericInformer struct {
informer cache.SharedIndexInformer
resource schema.GroupResource
}
// Informer returns the SharedIndexInformer.
func (f *genericInformer) Informer() cache.SharedIndexInformer {
return f.informer
}
// Lister returns the GenericLister.
func (f *genericInformer) Lister() cache.GenericLister {
return cache.NewGenericLister(f.Informer().GetIndexer(), f.resource)
}
// ForResource gives generic access to a shared informer of the matching type
// TODO extend this to unknown resources with a client pool
func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource) (GenericInformer, error) {
switch resource {
// Group=kyverno.io, Version=v1alpha1
case v1alpha1.SchemeGroupVersion.WithResource("policies"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1alpha1().Policies().Informer()}, nil
case v1alpha1.SchemeGroupVersion.WithResource("policyviolations"):
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1alpha1().PolicyViolations().Informer()}, nil
}
return nil, fmt.Errorf("no informer found for %v", resource)
}

View file

@ -0,0 +1,40 @@
/*
Copyright The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Code generated by informer-gen. DO NOT EDIT.
package internalinterfaces
import (
time "time"
versioned "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime"
cache "k8s.io/client-go/tools/cache"
)
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
type SharedInformerFactory interface {
Start(stopCh <-chan struct{})
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
}
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
type TweakListOptionsFunc func(*v1.ListOptions)

11
pkg/dclient/violation.go Normal file
View file

@ -0,0 +1,11 @@
package client
import (
kyvernov1alpha1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
//CreatePolicyViolation create a Policy Violation resource
func (c *Client) CreatePolicyViolation(pv kyvernov1alpha1.PolicyViolation) error {
_, err := c.CreateResource("PolicyViolation", ",", pv, false)
return err
}

778
pkg/policy/controller.go Normal file
View file

@ -0,0 +1,778 @@
package policy
import (
"encoding/json"
"fmt"
"reflect"
"sync"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernoclient "github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned"
"github.com/nirmata/kyverno/pkg/clientNew/clientset/versioned/scheme"
informer "github.com/nirmata/kyverno/pkg/clientNew/informers/externalversions/kyverno/v1alpha1"
lister "github.com/nirmata/kyverno/pkg/clientNew/listers/kyverno/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/types"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a Polict will be retried before it is dropped out of the queue.
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
// a deployment is going to be requeued:
//
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
maxRetries = 15
)
var controllerKind = kyverno.SchemeGroupVersion.WithKind("Policy")
// PolicyController is responsible for synchronizing Policy objects stored
// in the system with the corresponding policy violations
type PolicyController struct {
client *client.Client
kyvernoclient *kyvernoclient.Clientset
eventRecorder record.EventRecorder
syncHandler func(pKey string) error
enqueuePolicy func(policy *kyverno.Policy)
//pvControl is used for adoptin/releasing replica sets
pvControl PVControlInterface
// Policys that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get policy from the shared informer's store
pLister lister.PolicyLister
// pvLister can list/get policy violation from the shared informer's store
pvLister lister.PolicyViolationLister
// pListerSynced returns true if the Policy store has been synced at least once
pListerSynced cache.InformerSynced
// pvListerSynced retrns true if the Policy store has been synced at least once
pvListerSynced cache.InformerSynced
}
// NewPolicyController create a new PolicyController
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer informer.PolicyInformer, pvInformer informer.PolicyViolationInformer) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(glog.Infof)
eventInterface, err := client.GetEventsInterface()
if err != nil {
return nil, err
}
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
pc := PolicyController{
client: client,
kyvernoclient: kyvernoClient,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
}
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicy,
UpdateFunc: pc.updatePolicy,
DeleteFunc: pc.deletePolicy,
})
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: pc.addPolicyViolation,
UpdateFunc: pc.updatePolicyViolation,
DeleteFunc: pc.deletePolicyViolation,
})
pc.enqueuePolicy = pc.enqueue
pc.syncHandler = pc.syncPolicy
pc.pLister = pInformer.Lister()
pc.pvLister = pvInformer.Lister()
pc.pListerSynced = pInformer.Informer().HasSynced
pc.pvListerSynced = pInformer.Informer().HasSynced
return &pc, nil
}
func (pc *PolicyController) addPolicy(obj interface{}) {
p := obj.(*kyverno.Policy)
glog.V(4).Infof("Adding Policy %s", p.Name)
pc.enqueuePolicy(p)
}
func (pc *PolicyController) updatePolicy(old, cur interface{}) {
oldP := old.(*kyverno.Policy)
curP := cur.(*kyverno.Policy)
glog.V(4).Infof("Updating Policy %s", oldP.Name)
pc.enqueuePolicy(curP)
}
func (pc *PolicyController) deletePolicy(obj interface{}) {
p, ok := obj.(*kyverno.Policy)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
p, ok = tombstone.Obj.(*kyverno.Policy)
if !ok {
glog.Info(fmt.Errorf("Tombstone contained object that is not a Policy %#v", obj))
return
}
}
glog.V(4).Infof("Deleting Policy %s", p.Name)
pc.enqueuePolicy(p)
}
func (pc *PolicyController) addPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.PolicyViolation)
if pv.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
// show up in a state that is already pending deletion.
pc.deletePolicyViolation(pv)
return
}
// generate labels to match the policy from the spec, if not present
if updatePolicyLabelIfNotDefined(pc.pvControl, pv) {
return
}
// If it has a ControllerRef, that's all that matters.
if controllerRef := metav1.GetControllerOf(pv); controllerRef != nil {
p := pc.resolveControllerRef(controllerRef)
if p == nil {
return
}
glog.V(4).Infof("PolicyViolation %s added.", pv.Name)
pc.enqueuePolicy(p)
return
}
// Otherwise, it's an orphan. Get a list of all matching Policies and sync
// them to see if anyone wants to adopt it.
ps := pc.getPolicyForPolicyViolation(pv)
if len(ps) == 0 {
return
}
glog.V(4).Infof("Orphan Policy Violation %s added.", pv.Name)
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) updatePolicyViolation(old, cur interface{}) {
curPV := cur.(*kyverno.PolicyViolation)
oldPV := old.(*kyverno.PolicyViolation)
if curPV.ResourceVersion == oldPV.ResourceVersion {
// Periodic resync will send update events for all known Policy Violation.
// Two different versions of the same replica set will always have different RVs.
return
}
// generate labels to match the policy from the spec, if not present
if updatePolicyLabelIfNotDefined(pc.pvControl, curPV) {
return
}
curControllerRef := metav1.GetControllerOf(curPV)
oldControllerRef := metav1.GetControllerOf(oldPV)
controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
if controllerRefChanged && oldControllerRef != nil {
// The ControllerRef was changed. Sync the old controller, if any.
if p := pc.resolveControllerRef(oldControllerRef); p != nil {
pc.enqueuePolicy(p)
}
}
// If it has a ControllerRef, that's all that matters.
if curControllerRef != nil {
p := pc.resolveControllerRef(curControllerRef)
if p == nil {
return
}
glog.V(4).Infof("PolicyViolation %s updated.", curPV.Name)
pc.enqueuePolicy(p)
return
}
// Otherwise, it's an orphan. If anything changed, sync matching controllers
// to see if anyone wants to adopt it now.
labelChanged := !reflect.DeepEqual(curPV.Labels, oldPV.Labels)
if labelChanged || controllerRefChanged {
ps := pc.getPolicyForPolicyViolation(curPV)
if len(ps) == 0 {
return
}
glog.V(4).Infof("Orphan PolicyViolation %s updated", curPV.Name)
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
}
// deletePolicyViolation enqueues the Policy that manages a PolicyViolation when
// the PolicyViolation is deleted. obj could be an *kyverno.PolicyViolation, or
// a DeletionFinalStateUnknown marker item.
func (pc *PolicyController) deletePolicyViolation(obj interface{}) {
pv, ok := obj.(*kyverno.PolicyViolation)
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
// in the list, leading to the insertion of a tombstone object which contains
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
// changed labels the new Policy will not be woken up till the periodic resync.
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
pv, ok = tombstone.Obj.(*kyverno.PolicyViolation)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
return
}
}
controllerRef := metav1.GetControllerOf(pv)
if controllerRef == nil {
// No controller should care about orphans being deleted.
return
}
p := pc.resolveControllerRef(controllerRef)
if p == nil {
return
}
glog.V(4).Infof("PolicyViolation %s deleted", pv.Name)
pc.enqueuePolicy(p)
}
// resolveControllerRef returns the controller referenced by a ControllerRef,
// or nil if the ControllerRef could not be resolved to a matching controller
// of the correct Kind.
func (pc *PolicyController) resolveControllerRef(controllerRef *metav1.OwnerReference) *kyverno.Policy {
// We can't look up by UID, so look up by Name and then verify UID.
// Don't even try to look up by Name if it's the wrong Kind.
if controllerRef.Kind != controllerRef.Kind {
return nil
}
p, err := pc.pLister.Get(controllerRef.Name)
if err != nil {
return nil
}
if p.UID != controllerRef.UID {
// The controller we found with this Name is not the same one that the
// ControllerRef points to.
return nil
}
return p
}
func (pc *PolicyController) getPolicyForPolicyViolation(pv *kyverno.PolicyViolation) []*kyverno.Policy {
policies, err := pc.pLister.GetPolicyForPolicyViolation(pv)
if err != nil || len(policies) == 0 {
return nil
}
// Because all ReplicaSet's belonging to a deployment should have a unique label key,
// there should never be more than one deployment returned by the above method.
// If that happens we should probably dynamically repair the situation by ultimately
// trying to clean up one of the controllers, for now we just return the older one
if len(policies) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("user error! more than one policy is selecting policy violation %s with labels: %#v, returning %s",
pv.Name, pv.Labels, policies[0].Name)
}
return policies
}
func (pc *PolicyController) enqueue(policy *kyverno.Policy) {
key, err := cache.MetaNamespaceKeyFunc(policy)
if err != nil {
glog.Error(err)
return
}
pc.queue.Add(key)
}
// Run begins watching and syncing.
func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer pc.queue.ShutDown()
glog.Info("Starting policy controller")
defer glog.Info("Shutting down policy controller")
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced) {
return
}
for i := 0; i < workers; i++ {
go wait.Until(pc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (pc *PolicyController) worker() {
for pc.processNextWorkItem() {
}
}
func (pc *PolicyController) processNextWorkItem() bool {
key, quit := pc.queue.Get()
if quit {
return false
}
defer pc.queue.Done(key)
err := pc.syncHandler(key.(string))
pc.handleErr(err, key)
return true
}
func (pc *PolicyController) handleErr(err error, key interface{}) {
if err == nil {
pc.queue.Forget(key)
return
}
if pc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing Policy %v: %v", key, err)
pc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping deployment %q out of the queue: %v", key, err)
pc.queue.Forget(key)
}
func (pc *PolicyController) syncPolicy(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing policy %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing policy %q (%v)", key, time.Since(startTime))
}()
policy, err := pc.pLister.Get(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("Policy %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
p := policy.DeepCopy()
// TODO: Update Status to update ObserverdGeneration
pvList, err := pc.getPolicyViolationsForPolicy(p)
if err != nil {
return err
}
// // Add messages
if p.DeletionTimestamp != nil {
return pc.syncStatusOnly(p, pvList)
}
return nil
}
//TODO
func (pc *PolicyController) syncStatusOnly(p *kyverno.Policy, pvList []*kyverno.PolicyViolation) error {
// Sync Status based on Policy Violation
return nil
}
func (pc *PolicyController) getPolicyViolationsForPolicy(p *kyverno.Policy) ([]*kyverno.PolicyViolation, error) {
// List all PolicyViolation to find those we own but that no longer match our
// selector. They will be orphaned by ClaimPolicyViolation().
pvList, err := pc.pvLister.List(labels.Everything())
if err != nil {
return nil, err
}
policyLabelmap := map[string]string{"policy": p.Name}
//NOt using a field selector, as the match function will have to cash the runtime.object
// to get the field, while it can get labels directly, saves the cast effort
//spec.policyName!=default
// fs := fields.Set{"spec.name": name}.AsSelector().String()
ls := &metav1.LabelSelector{}
err = metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&policyLabelmap, ls, nil)
if err != nil {
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", p.Name, err)
}
policySelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("Policy %s has invalid label selector: %v", p.Name, err)
}
canAdoptFunc := RecheckDeletionTimestamp(func() (metav1.Object, error) {
fresh, err := pc.kyvernoclient.KyvernoV1alpha1().Policies().Get(p.Name, metav1.GetOptions{})
if err != nil {
return nil, err
}
if fresh.UID != p.UID {
return nil, fmt.Errorf("original Policy %v is gone: got uid %v, wanted %v", p.Name, fresh.UID, p.UID)
}
return fresh, nil
})
cm := NewPolicyViolationControllerRefManager(pc.pvControl, p, policySelector, controllerKind, canAdoptFunc)
return cm.claimPolicyViolations(pvList)
}
func (m *PolicyViolationControllerRefManager) claimPolicyViolations(sets []*kyverno.PolicyViolation) ([]*kyverno.PolicyViolation, error) {
var claimed []*kyverno.PolicyViolation
var errlist []error
match := func(obj metav1.Object) bool {
return m.Selector.Matches(labels.Set(obj.GetLabels()))
}
adopt := func(obj metav1.Object) error {
return m.adoptPolicyViolation(obj.(*kyverno.PolicyViolation))
}
release := func(obj metav1.Object) error {
return m.releasePolicyViolation(obj.(*kyverno.PolicyViolation))
}
for _, pv := range sets {
ok, err := m.ClaimObject(pv, match, adopt, release)
if err != nil {
errlist = append(errlist, err)
continue
}
if ok {
claimed = append(claimed, pv)
}
}
return claimed, utilerrors.NewAggregate(errlist)
}
func (m *PolicyViolationControllerRefManager) adoptPolicyViolation(pv *kyverno.PolicyViolation) error {
if err := m.CanAdopt(); err != nil {
return fmt.Errorf("can't adopt PolicyViolation %v (%v): %v", pv.Name, pv.UID, err)
}
// Note that ValidateOwnerReferences() will reject this patch if another
// OwnerReference exists with controller=true.
//TODO Add JSON Patch Owner reference for resource
//TODO Update owner refence for resource
controllerFlag := true
blockOwnerDeletionFlag := true
pOwnerRef := metav1.OwnerReference{APIVersion: m.controllerKind.GroupVersion().String(),
Kind: m.controllerKind.Kind,
Name: m.Controller.GetName(),
UID: m.Controller.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
addControllerPatch, err := createOwnerReferencePatch(pOwnerRef)
if err != nil {
glog.Errorf("failed to add owner reference %v for PolicyViolation %s: %v", pOwnerRef, pv.Name, err)
return err
}
// addControllerPatch := fmt.Sprintf(
// `{"metadata":{"ownerReferences":[{"apiVersion":"%s","kind":"%s","name":"%s","uid":"%s","controller":true,"blockOwnerDeletion":true}],"uid":"%s"}}`,
// m.controllerKind.GroupVersion(), m.controllerKind.Kind,
// m.Controller.GetName(), m.Controller.GetUID(), pv.UID)
return m.pvControl.PatchPolicyViolation(pv.Name, addControllerPatch)
}
type patchOwnerReferenceValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value []metav1.OwnerReference `json:"value"`
}
func createOwnerReferencePatch(ownerRef metav1.OwnerReference) ([]byte, error) {
payload := []patchOwnerReferenceValue{{
Op: "add",
Path: "/metadata/ownerReferences",
Value: []metav1.OwnerReference{ownerRef},
}}
return json.Marshal(payload)
}
func removeOwnerReferencePatch(ownerRef metav1.OwnerReference) ([]byte, error) {
payload := []patchOwnerReferenceValue{{
Op: "remove",
Path: "/metadata/ownerReferences",
Value: []metav1.OwnerReference{ownerRef},
}}
return json.Marshal(payload)
}
func (m *PolicyViolationControllerRefManager) releasePolicyViolation(pv *kyverno.PolicyViolation) error {
glog.V(2).Infof("patching PolicyViolation %s to remove its controllerRef to %s/%s:%s",
pv.Name, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
//TODO JSON patch for owner reference for resources
controllerFlag := true
blockOwnerDeletionFlag := true
pOwnerRef := metav1.OwnerReference{APIVersion: m.controllerKind.GroupVersion().String(),
Kind: m.controllerKind.Kind,
Name: m.Controller.GetName(),
UID: m.Controller.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
removeControllerPatch, err := removeOwnerReferencePatch(pOwnerRef)
if err != nil {
glog.Errorf("failed to add owner reference %v for PolicyViolation %s: %v", pOwnerRef, pv.Name, err)
return err
}
// deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pv.UID)
err = m.pvControl.PatchPolicyViolation(pv.Name, removeControllerPatch)
if err != nil {
if errors.IsNotFound(err) {
// If the ReplicaSet no longer exists, ignore it.
return nil
}
if errors.IsInvalid(err) {
// Invalid error will be returned in two cases: 1. the ReplicaSet
// has no owner reference, 2. the uid of the ReplicaSet doesn't
// match, which means the ReplicaSet is deleted and then recreated.
// In both cases, the error can be ignored.
return nil
}
}
return err
}
type PolicyViolationControllerRefManager struct {
BaseControllerRefManager
controllerKind schema.GroupVersionKind
pvControl PVControlInterface
}
func NewPolicyViolationControllerRefManager(
pvControl PVControlInterface,
controller metav1.Object,
selector labels.Selector,
controllerKind schema.GroupVersionKind,
canAdopt func() error,
) *PolicyViolationControllerRefManager {
m := PolicyViolationControllerRefManager{
BaseControllerRefManager: BaseControllerRefManager{
Controller: controller,
Selector: selector,
CanAdoptFunc: canAdopt,
},
controllerKind: controllerKind,
pvControl: pvControl,
}
return &m
}
type BaseControllerRefManager struct {
Controller metav1.Object
Selector labels.Selector
canAdoptErr error
canAdoptOnce sync.Once
CanAdoptFunc func() error
}
func (m *BaseControllerRefManager) CanAdopt() error {
m.canAdoptOnce.Do(func() {
if m.CanAdoptFunc != nil {
m.canAdoptErr = m.CanAdoptFunc()
}
})
return m.canAdoptErr
}
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
controllerRef := metav1.GetControllerOf(obj)
if controllerRef != nil {
if controllerRef.UID != m.Controller.GetUID() {
// Owned by someone else. Ignore
return false, nil
}
if match(obj) {
// We already own it and the selector matches.
// Return true (successfully claimed) before checking deletion timestamp.
// We're still allowed to claim things we already own while being deleted
// because doing so requires taking no actions.
return true, nil
}
// Owned by us but selector doesn't match.
// Try to release, unless we're being deleted.
if m.Controller.GetDeletionTimestamp() != nil {
return false, nil
}
if err := release(obj); err != nil {
// If the PolicyViolation no longer exists, ignore the error.
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else released it, or there was a transient error.
// The controller should requeue and try again if it's still stale.
return false, err
}
// Successfully released.
return false, nil
}
// It's an orphan.
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
// Ignore if we're being deleted or selector doesn't match.
return false, nil
}
if obj.GetDeletionTimestamp() != nil {
// Ignore if the object is being deleted
return false, nil
}
// Selector matches. Try to adopt.
if err := adopt(obj); err != nil {
// If the PolicyViolation no longer exists, ignore the error
if errors.IsNotFound(err) {
return false, nil
}
// Either someone else claimed it first, or there was a transient error.
// The controller should requeue and try again if it's still orphaned.
return false, err
}
// Successfully adopted.
return true, nil
}
type PVControlInterface interface {
PatchPolicyViolation(name string, data []byte) error
}
// RealPVControl is the default implementation of PVControlInterface.
type RealPVControl struct {
Client kyvernoclient.Interface
Recorder record.EventRecorder
}
func (r RealPVControl) PatchPolicyViolation(name string, data []byte) error {
_, err := r.Client.KyvernoV1alpha1().PolicyViolations().Patch(name, types.JSONPatchType, data)
return err
}
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
//
// The CanAdopt() function calls getObject() to fetch the latest value,
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
return func() error {
obj, err := getObject()
if err != nil {
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
}
if obj.GetDeletionTimestamp() != nil {
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
}
return nil
}
}
type patchLabelValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value string `json:"value"`
}
type patchLabelMapValue struct {
Op string `json:"op"`
Path string `json:"path"`
Value map[string]string `json:"value"`
}
func createLabelPatch(policy string) ([]byte, error) {
payload := []patchLabelValue{{
Op: "add",
Path: "/metadata/labels/policy",
Value: policy,
}}
return json.Marshal(payload)
}
func createLabelMapPatch(policy string) ([]byte, error) {
payload := []patchLabelMapValue{{
Op: "add",
Path: "/metadata/labels",
Value: map[string]string{"policy": policy},
}}
return json.Marshal(payload)
}
//updatePolicyLabelIfNotDefined adds the label 'policy' to the PolicyViolation
// label is used here to lookup policyViolation and corresponding Policy
func updatePolicyLabelIfNotDefined(pvControl PVControlInterface, pv *kyverno.PolicyViolation) bool {
updateLabel := func() bool {
glog.V(4).Infof("adding label 'policy:%s' to PolicyViolation %s", pv.Spec.PolicyName, pv.Name)
// add label based on the policy spec
labels := pv.GetLabels()
if pv.Spec.PolicyName == "" {
glog.Error("policy not defined for violation")
// should be cleaned up
return false
}
if labels == nil {
// create a patch to generate the labels map with policy label
patch, err := createLabelMapPatch(pv.Spec.PolicyName)
if err != nil {
glog.Errorf("unable to init label map. %v", err)
return false
}
if err := pvControl.PatchPolicyViolation(pv.Name, patch); err != nil {
glog.Errorf("Unable to add 'policy' label to PolicyViolation %s: %v", pv.Name, err)
return false
}
// update successful
return true
}
// JSON Patch to add exact label
labelPatch, err := createLabelPatch(pv.Spec.PolicyName)
if err != nil {
glog.Errorf("failed to generate patch to add label 'policy': %v", err)
return false
}
if err := pvControl.PatchPolicyViolation(pv.Name, labelPatch); err != nil {
glog.Errorf("Unable to add 'policy' label to PolicyViolation %s: %v", pv.Name, err)
return false
}
// update successful
return true
}
var policyName string
var ok bool
// operate oncopy of resource
curLabels := pv.GetLabels()
if policyName, ok = curLabels["policy"]; !ok {
return updateLabel()
}
// TODO: would be benificial to add a check to verify if the policy in name and resource spec match
if policyName != pv.Spec.PolicyName {
glog.Errorf("label 'policy:%s' and spec.policyName %s dont match ", policyName, pv.Spec.PolicyName)
//TODO handle this case
return updateLabel()
}
return false
}

27
scripts/new_update-codegen.sh Executable file
View file

@ -0,0 +1,27 @@
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
case "$(uname -s)" in
Linux*) linkutil=readlink;;
Darwin*) linkutil=greadlink;;
*) machine="UNKNOWN:${unameOut}"
esac
# get nirmata root
NIRMATA_DIR=$(dirname ${BASH_SOURCE})/..
NIRMATA_ROOT=$(${linkutil} -f ${NIRMATA_DIR})
# get relative path to code generation script
CODEGEN_PKG=${NIRMATA_DIR}/vendor/k8s.io/code-generator
# get relative path of nirmata
NIRMATA_PKG=${NIRMATA_ROOT#"${GOPATH}/src/"}
# perform code generation
${CODEGEN_PKG}/generate-groups.sh \
"deepcopy,client,informer,lister" \
${NIRMATA_PKG}/pkg/clientNew \
${NIRMATA_PKG}/pkg/api \
kyverno:v1alpha1