1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

fix conflicts

This commit is contained in:
Jim Bugwadia 2019-05-22 20:26:53 -07:00
commit 55c2c1581e
66 changed files with 909 additions and 365 deletions

2
.gitignore vendored
View file

@ -2,7 +2,7 @@ vendor
pkg/client
pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go
certs
kube-policy
kyverno
Gopkg.lock
Dockerfile
.vscode

View file

@ -46,14 +46,14 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat
if err != nil {
return nil, err
}
csrList, err := c.ListResource(CSR, "")
csrList, err := c.ListResource(CSRs, "")
if err != nil {
return nil, errors.New(fmt.Sprintf("Unable to list existing certificate requests: %v", err))
}
for _, csr := range csrList.Items {
if csr.GetName() == req.ObjectMeta.Name {
err := c.DeleteResouce(CSR, "", csr.GetName())
err := c.DeleteResouce(CSRs, "", csr.GetName())
if err != nil {
return nil, errors.New(fmt.Sprintf("Unable to delete existing certificate request: %v", err))
}
@ -62,7 +62,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat
}
}
unstrRes, err := c.CreateResource(CSR, "", req)
unstrRes, err := c.CreateResource(CSRs, "", req)
if err != nil {
return nil, err
}
@ -91,7 +91,7 @@ func (c *Client) fetchCertificateFromRequest(req *certificates.CertificateSignin
// TODO: react of SIGINT and SIGTERM
timeStart := time.Now()
for time.Now().Sub(timeStart) < time.Duration(maxWaitSeconds)*time.Second {
unstrR, err := c.GetResource(CSR, "", req.ObjectMeta.Name)
unstrR, err := c.GetResource(CSRs, "", req.ObjectMeta.Name)
if err != nil {
return nil, err
}
@ -119,7 +119,7 @@ const certificateField string = "certificate"
// Reads the pair of TLS certificate and key from the specified secret.
func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
name := generateSecretName(props)
unstrSecret, err := c.GetResource(Secret, props.Namespace, name)
unstrSecret, err := c.GetResource(Secrets, props.Namespace, name)
if err != nil {
c.logger.Printf("Unable to get secret %s/%s: %s", props.Namespace, name, err)
return nil
@ -147,7 +147,7 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
// Updates existing secret or creates new one.
func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPemPair) error {
name := generateSecretName(props)
unstrSecret, err := c.GetResource(Secret, props.Namespace, name)
unstrSecret, err := c.GetResource(Secrets, props.Namespace, name)
if err == nil {
secret, err := convertToSecret(unstrSecret)
if err != nil {
@ -159,7 +159,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem
}
secret.Data[certificateField] = pemPair.Certificate
secret.Data[privateKeyField] = pemPair.PrivateKey
_, err = c.UpdateResource(Secret, props.Namespace, secret)
_, err = c.UpdateResource(Secrets, props.Namespace, secret)
if err == nil {
c.logger.Printf("Secret %s is updated", name)
}
@ -181,7 +181,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem
},
}
_, err := c.CreateResource(Secret, props.Namespace, secret)
_, err := c.CreateResource(Secrets, props.Namespace, secret)
if err == nil {
c.logger.Printf("Secret %s is created", name)
}

View file

@ -4,7 +4,6 @@ import (
"fmt"
"log"
"os"
"strings"
"time"
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
@ -17,16 +16,21 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/discovery"
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
csrtype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
event "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
)
type Client struct {
logger *log.Logger
client dynamic.Interface
cachedClient discovery.CachedDiscoveryInterface
logger *log.Logger
clientConfig *rest.Config
kclient *kubernetes.Clientset
}
func NewClient(config *rest.Config, logger *log.Logger) (*Client, error) {
@ -39,10 +43,17 @@ func NewClient(config *rest.Config, logger *log.Logger) (*Client, error) {
logger = log.New(os.Stdout, "Client : ", log.LstdFlags)
}
kclient, err := kubernetes.NewForConfig(config)
if err != nil {
return nil, err
}
return &Client{
logger: logger,
client: client,
clientConfig: config,
kclient: kclient,
cachedClient: memory.NewMemCacheClient(kclient.Discovery()),
}, nil
}
@ -62,29 +73,20 @@ func (c *Client) GetKubePolicyDeployment() (*apps.Deployment, error) {
// or generate a kube client value to access the interface
//GetEventsInterface provides typed interface for events
func (c *Client) GetEventsInterface() (event.EventInterface, error) {
kubeClient, err := newKubeClient(c.clientConfig)
if err != nil {
return nil, err
}
return kubeClient.CoreV1().Events(""), nil
return c.kclient.CoreV1().Events(""), nil
}
func (c *Client) GetCSRInterface() (csrtype.CertificateSigningRequestInterface, error) {
kubeClient, err := newKubeClient(c.clientConfig)
if err != nil {
return nil, err
}
return kubeClient.CertificatesV1beta1().CertificateSigningRequests(), nil
return c.kclient.CertificatesV1beta1().CertificateSigningRequests(), nil
}
func (c *Client) getInterface(kind string) dynamic.NamespaceableResourceInterface {
return c.client.Resource(c.getGroupVersionMapper(kind))
func (c *Client) getInterface(resource string) dynamic.NamespaceableResourceInterface {
return c.client.Resource(c.getGroupVersionMapper(resource))
}
func (c *Client) getResourceInterface(kind string, namespace string) dynamic.ResourceInterface {
func (c *Client) getResourceInterface(resource string, namespace string) dynamic.ResourceInterface {
// Get the resource interface
namespaceableInterface := c.getInterface(kind)
namespaceableInterface := c.getInterface(resource)
// Get the namespacable interface
var resourceInteface dynamic.ResourceInterface
if namespace != "" {
@ -96,58 +98,58 @@ func (c *Client) getResourceInterface(kind string, namespace string) dynamic.Res
}
// Keep this a stateful as the resource list will be based on the kubernetes version we connect to
func (c *Client) getGroupVersionMapper(kind string) schema.GroupVersionResource {
//TODO: add checks to see if the kind is supported
//TODO: build the resource list dynamically( by querying the registered resource kinds)
func (c *Client) getGroupVersionMapper(resource string) schema.GroupVersionResource {
//TODO: add checks to see if the resource is supported
//TODO: build the resource list dynamically( by querying the registered resources)
//TODO: the error scenarios
return getGrpVersionMapper(kind, c.clientConfig, false)
return c.getGVR(resource)
}
// GetResource returns the resource in unstructured/json format
func (c *Client) GetResource(kind string, namespace string, name string) (*unstructured.Unstructured, error) {
return c.getResourceInterface(kind, namespace).Get(name, meta.GetOptions{})
func (c *Client) GetResource(resource string, namespace string, name string) (*unstructured.Unstructured, error) {
return c.getResourceInterface(resource, namespace).Get(name, meta.GetOptions{})
}
// ListResource returns the list of resources in unstructured/json format
// Access items using []Items
func (c *Client) ListResource(kind string, namespace string) (*unstructured.UnstructuredList, error) {
return c.getResourceInterface(kind, namespace).List(meta.ListOptions{})
func (c *Client) ListResource(resource string, namespace string) (*unstructured.UnstructuredList, error) {
return c.getResourceInterface(resource, namespace).List(meta.ListOptions{})
}
func (c *Client) DeleteResouce(kind string, namespace string, name string) error {
return c.getResourceInterface(kind, namespace).Delete(name, &meta.DeleteOptions{})
func (c *Client) DeleteResouce(resource string, namespace string, name string) error {
return c.getResourceInterface(resource, namespace).Delete(name, &meta.DeleteOptions{})
}
// CreateResource creates object for the specified kind/namespace
func (c *Client) CreateResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
// CreateResource creates object for the specified resource/namespace
func (c *Client) CreateResource(resource string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
// convert typed to unstructured obj
if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil {
return c.getResourceInterface(kind, namespace).Create(unstructuredObj, meta.CreateOptions{})
return c.getResourceInterface(resource, namespace).Create(unstructuredObj, meta.CreateOptions{})
}
return nil, fmt.Errorf("Unable to create resource ")
}
// UpdateResource updates object for the specified kind/namespace
func (c *Client) UpdateResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
// UpdateResource updates object for the specified resource/namespace
func (c *Client) UpdateResource(resource string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
// convert typed to unstructured obj
if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil {
return c.getResourceInterface(kind, namespace).Update(unstructuredObj, meta.UpdateOptions{})
return c.getResourceInterface(resource, namespace).Update(unstructuredObj, meta.UpdateOptions{})
}
return nil, fmt.Errorf("Unable to update resource ")
}
// UpdateStatusResource updates the resource "status" subresource
func (c *Client) UpdateStatusResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
func (c *Client) UpdateStatusResource(resource string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
// convert typed to unstructured obj
if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil {
return c.getResourceInterface(kind, namespace).UpdateStatus(unstructuredObj, meta.UpdateOptions{})
return c.getResourceInterface(resource, namespace).UpdateStatus(unstructuredObj, meta.UpdateOptions{})
}
return nil, fmt.Errorf("Unable to update resource ")
}
func convertToUnstructured(obj interface{}) *unstructured.Unstructured {
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj)
if err != nil {
utilruntime.HandleError(fmt.Errorf("Unable to convert : %v", err))
return nil
@ -174,12 +176,12 @@ func ConvertToRuntimeObject(obj *unstructured.Unstructured) (*runtime.Object, er
func (c *Client) GenerateSecret(generator types.Generation, namespace string) error {
c.logger.Printf("Preparing to create secret %s/%s", namespace, generator.Name)
secret := &v1.Secret{}
secret := v1.Secret{}
// if generator.CopyFrom != nil {
c.logger.Printf("Copying data from secret %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
// Get configMap resource
unstrSecret, err := c.GetResource(Secret, generator.CopyFrom.Namespace, generator.CopyFrom.Name)
unstrSecret, err := c.GetResource(Secrets, generator.CopyFrom.Namespace, generator.CopyFrom.Name)
if err != nil {
return err
}
@ -206,7 +208,7 @@ func (c *Client) GenerateSecret(generator types.Generation, namespace string) er
}
}
go c.createSecretAfterNamespaceIsCreated(*secret, namespace)
go c.createSecretAfterNamespaceIsCreated(secret, namespace)
return nil
}
@ -214,12 +216,12 @@ func (c *Client) GenerateSecret(generator types.Generation, namespace string) er
//GenerateConfigMap to generate configMap
func (c *Client) GenerateConfigMap(generator types.Generation, namespace string) error {
c.logger.Printf("Preparing to create configmap %s/%s", namespace, generator.Name)
configMap := &v1.ConfigMap{}
configMap := v1.ConfigMap{}
// if generator.CopyFrom != nil {
c.logger.Printf("Copying data from configmap %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
// Get configMap resource
unstrConfigMap, err := c.GetResource("configmaps", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
unstrConfigMap, err := c.GetResource(ConfigMaps, generator.CopyFrom.Namespace, generator.CopyFrom.Name)
if err != nil {
return err
}
@ -245,24 +247,24 @@ func (c *Client) GenerateConfigMap(generator types.Generation, namespace string)
configMap.Data[k] = v
}
}
go c.createConfigMapAfterNamespaceIsCreated(*configMap, namespace)
go c.createConfigMapAfterNamespaceIsCreated(configMap, namespace)
return nil
}
func convertToConfigMap(obj *unstructured.Unstructured) (*v1.ConfigMap, error) {
func convertToConfigMap(obj *unstructured.Unstructured) (v1.ConfigMap, error) {
configMap := v1.ConfigMap{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &configMap); err != nil {
return nil, err
return configMap, err
}
return &configMap, nil
return configMap, nil
}
func convertToSecret(obj *unstructured.Unstructured) (*v1.Secret, error) {
func convertToSecret(obj *unstructured.Unstructured) (v1.Secret, error) {
secret := v1.Secret{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &secret); err != nil {
return nil, err
return secret, err
}
return &secret, nil
return secret, nil
}
func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSigningRequest, error) {
@ -276,7 +278,7 @@ func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSign
func (c *Client) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap, namespace string) {
err := c.waitUntilNamespaceIsCreated(namespace)
if err == nil {
_, err = c.CreateResource("configmaps", namespace, configMap)
_, err = c.CreateResource(ConfigMaps, namespace, configMap)
}
if err != nil {
c.logger.Printf("Can't create a configmap: %s", err)
@ -286,7 +288,7 @@ func (c *Client) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap,
func (c *Client) createSecretAfterNamespaceIsCreated(secret v1.Secret, namespace string) {
err := c.waitUntilNamespaceIsCreated(namespace)
if err == nil {
_, err = c.CreateResource(Secret, namespace, secret)
_, err = c.CreateResource(Secrets, namespace, secret)
}
if err != nil {
c.logger.Printf("Can't create a secret: %s", err)
@ -299,7 +301,7 @@ func (c *Client) waitUntilNamespaceIsCreated(name string) error {
var lastError error = nil
for time.Now().Sub(timeStart) < namespaceCreationMaxWaitTime {
_, lastError = c.GetResource("namespaces", "", name)
_, lastError = c.GetResource(Namespaces, "", name)
if lastError == nil {
break
}
@ -308,10 +310,24 @@ func (c *Client) waitUntilNamespaceIsCreated(name string) error {
return lastError
}
// KindIsSupported checks if the kind is a registerd GVK
func (c *Client) KindIsSupported(kind string) bool {
kind = strings.ToLower(kind) + "s"
buildGVKMapper(c.clientConfig, false)
_, ok := getValue(kind)
return ok
func (c *Client) getGVR(resource string) schema.GroupVersionResource {
emptyGVR := schema.GroupVersionResource{}
serverresources, err := c.cachedClient.ServerPreferredResources()
if err != nil {
utilruntime.HandleError(err)
return emptyGVR
}
resources, err := discovery.GroupVersionResources(serverresources)
if err != nil {
utilruntime.HandleError(err)
return emptyGVR
}
//TODO using cached client to support cache validation and invalidation
// iterate over the key to compare the resource
for gvr, _ := range resources {
if gvr.Resource == resource {
return gvr
}
}
return emptyGVR
}

View file

@ -1,99 +1,14 @@
package client
import (
"fmt"
"strings"
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
const (
CSR string = "certificatesigningrequests"
Secret string = "secrets"
CSRs string = "certificatesigningrequests"
Secrets string = "secrets"
ConfigMaps string = "configmaps"
Namespaces string = "namespaces"
)
const namespaceCreationMaxWaitTime time.Duration = 30 * time.Second
const namespaceCreationWaitInterval time.Duration = 100 * time.Millisecond
var groupVersionMapper map[string]schema.GroupVersionResource
var kubeClient *kubernetes.Clientset
func getGrpVersionMapper(kind string, clientConfig *rest.Config, refresh bool) schema.GroupVersionResource {
// build the GVK mapper
buildGVKMapper(clientConfig, refresh)
// Query mapper
if val, ok := getValue(kind); ok {
return *val
}
utilruntime.HandleError(fmt.Errorf("Resouce '%s' not registered", kind))
return schema.GroupVersionResource{}
}
func buildGVKMapper(clientConfig *rest.Config, refresh bool) {
if groupVersionMapper == nil || refresh {
groupVersionMapper = make(map[string]schema.GroupVersionResource)
// refresh the mapper
if err := refreshRegisteredResources(groupVersionMapper, clientConfig); err != nil {
utilruntime.HandleError(err)
return
}
}
}
func getValue(kind string) (*schema.GroupVersionResource, bool) {
if groupVersionMapper == nil {
utilruntime.HandleError(fmt.Errorf("GroupVersionKind mapper is not loaded"))
return nil, false
}
if val, ok := groupVersionMapper[kind]; ok {
return &val, true
}
return nil, false
}
func refreshRegisteredResources(mapper map[string]schema.GroupVersionResource, clientConfig *rest.Config) error {
// build kubernetes client
client, err := newKubeClient(clientConfig)
if err != nil {
return err
}
// get registered server groups and resources
_, resourceList, err := client.Discovery().ServerGroupsAndResources()
if err != nil {
return err
}
for _, apiResource := range resourceList {
for _, resource := range apiResource.APIResources {
grpVersion := strings.Split(apiResource.GroupVersion, "/")
if len(grpVersion) == 2 {
mapper[resource.Name] = schema.GroupVersionResource{
Group: grpVersion[0],
Version: grpVersion[1],
Resource: resource.Name,
}
} else {
// resources with only versions
mapper[resource.Name] = schema.GroupVersionResource{
Version: apiResource.GroupVersion,
Resource: resource.Name,
}
}
}
}
return nil
}
func newKubeClient(clientConfig *rest.Config) (*kubernetes.Clientset, error) {
var err error
if kubeClient == nil {
kubeClient, err = kubernetes.NewForConfig(clientConfig)
if err != nil {
return nil, err
}
}
return kubeClient, nil
}

View file

@ -3,14 +3,14 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kube-policy-webhook-cfg
name: nirmata-kyverno-webhook-cfg
labels:
app: kube-policy
app: kyverno
webhooks:
- name: webhook.nirmata.kube-policy
- name: webhook.nirmata.kyverno
clientConfig:
service:
name: kube-policy-svc
name: kyverno-svc
namespace: default
path: "/mutate"
caBundle: ${CA_BUNDLE}

View file

@ -3,11 +3,11 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kube-policy-webhook-cfg-debug
name: nirmata-kyverno-webhook-cfg-debug
labels:
app: kube-policy
app: kyverno
webhooks:
- name: webhook.nirmata.kube-policy
- name: webhook.nirmata.kyverno
clientConfig:
url: "https://localhost/mutate"
caBundle: ${CA_BUNDLE}

View file

@ -1,9 +1,9 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policies.kubepolicy.nirmata.io
name: policies.kyverno.io
spec:
group: kubepolicy.nirmata.io
group: kyverno.io
versions:
- name: v1alpha1
served: true
@ -35,29 +35,12 @@ spec:
resource:
type: object
required:
- kind
- kinds
properties:
kind:
type: string
enum:
- ConfigMap
- CronJob
- DaemonSet
- Deployment
- Endpoints
- HorizontalPodAutoscaler
- Ingress
- Job
- LimitRange
- Namespace
- NetworkPolicy
- PersistentVolumeClaim
- PodDisruptionBudget
- PodTemplate
- ResourceQuota
- Secret
- Service
- StatefulSet
kinds:
type: array
items:
type: string
name:
type: string
selector:
@ -143,57 +126,62 @@ spec:
additionalProperties:
type: string
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kube-system
name: kube-policy-svc
namespace: kyverno
name: kyverno-svc
labels:
app: kube-policy
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kube-policy
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-policy-service-account
namespace: kube-system
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-policy-admin
name: kyverno-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kube-policy-service-account
namespace: kube-system
name: kyverno-service-account
namespace: kyverno
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
namespace: kube-system
name: kube-policy-deployment
namespace: kyverno
name: kyverno-deployment
labels:
app: kube-policy
app: kyverno
spec:
replicas: 1
template:
metadata:
labels:
app: kube-policy
app: kyverno
spec:
serviceAccountName: kube-policy-service-account
serviceAccountName: kyverno-service-account
containers:
- name: kube-policy
image: nirmata/kube-policy:latest
- name: kyverno
image: nirmata/kyverno:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 443

View file

@ -26,7 +26,11 @@ kubectl logs <kyverno-pod-name> -n kyverno
# Installing in a Development Environment
To run Kyverno in a development environment see: https://github.com/nirmata/kyverno/wiki/Building
To build and run Kyverno in a development environment see: https://github.com/nirmata/kyverno/wiki/Building
To check if the controller is working, find it in the list of kyverno pods:
`kubectl get pods -n kyverno`
# Try Kyverno without a Kubernetes cluster

View file

@ -1,4 +1,4 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-deployment
@ -6,7 +6,8 @@ spec :
rules:
- name: deployment-policy
resource:
kind : Deployment
kinds :
- Deployment
selector :
matchLabels :
cli: test

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-cpu-memory
@ -6,7 +6,8 @@ spec:
rules:
- name: check-defined
resource:
kind: Deployment
kinds:
- Deployment
validate:
message: "Resource requests and limits are required for CPU and memory"
pattern:
@ -22,7 +23,8 @@ spec:
cpu: "?"
- name: check-memory-in-range
resource:
kind: Deployment
kinds:
- Deployment
validate:
message: "Memory request cannot be greater than 10Gi"
pattern:

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-host-path
@ -6,7 +6,8 @@ spec:
rules:
- name: check-host-path
resource:
kind: Pod
kinds:
- Pod
validate:
message: "Host path volumes are not allowed"
pattern:

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: image-pull-policy
@ -7,7 +7,8 @@ spec:
- name: image-pull-policy
message: "Image tag ':latest' requires imagePullPolicy 'Always'"
resource:
kind: Deployment
kinds:
- Deployment
overlay:
template:
spec:

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-host-path
@ -6,7 +6,8 @@ spec:
rules:
- name: check-host-path
resource:
kind: Service
kinds:
- Service
validate:
message: "Node port services are not allowed"
pattern:

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-probe-exists
@ -6,7 +6,8 @@ spec:
rules:
- name: check-liveness-probe-exists
resource:
kind: StatefulSet
kinds:
- StatefulSet
validate:
message: "a livenessProbe is required"
pattern:
@ -14,10 +15,11 @@ spec:
# In this case every object in containers list will be checked for pattern
- name: "*"
livenessProbe:
periodSeconds: ?
periodSeconds: "?"
- resource:
kind: Deployment
name: check-readiness-probe-exists
kinds:
- Deployment
name: check-readinessprobe-exists
validate:
message: "a readinessProbe is required"
pattern:
@ -25,4 +27,4 @@ spec:
# In this case every object in containers list will be checked for pattern
- name: "*"
readinessProbe:
periodSeconds: ?
periodSeconds: "?"

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-probe-intervals
@ -6,7 +6,8 @@ spec:
rules:
- name: check-probe-intervals
resource:
kind: Deployment
kinds:
- Deployment
validate:
message: "livenessProbe must be > 10s"
pattern:
@ -16,7 +17,9 @@ spec:
livenessProbe:
periodSeconds: ">10"
- resource:
kind: Deployment
kinds:
- Deployment
name: check-readinessprobe-intervals
validate:
pattern:
message: "readinessProbe must be > 10s"

View file

@ -1,4 +1,4 @@
apiVersion: policy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata:
name: check-whitelist-registries
@ -7,7 +7,8 @@ spec:
- name: check-whitelist-registries
message: "Registry is not allowed"
resource:
kind: Deployment
kinds:
- Deployment
validate:
pattern:
template:

View file

@ -0,0 +1,20 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: game-config
namespace: default
labels:
originalLabel : isHere
data:
ui.properties : |
color.good=green
color.bad=red
game.properties : |
enemies=predators
lives=3
configmap.data: |
ns=default
labels=originalLabel
labelscount=1

View file

@ -0,0 +1,7 @@
kind: Namespace
apiVersion: v1
metadata:
name: "ns2"
labels:
LabelForSelector : "namespace2"

View file

@ -0,0 +1,34 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: "zk-kafka-address"
spec:
rules:
- name: "copy-comfigmap"
resource :
kinds :
- Namespace
selector:
matchLabels:
LabelForSelector : "namespace2"
generate :
kind: ConfigMap
name : copied-cm
copyFrom :
namespace : default
name : game-config
data :
secretData: "data from cmg"
- name: "zk-kafka-address"
resource:
kinds:
- Namespace
selector:
matchExpressions:
- {key: LabelForSelector, operator: In, values: [namespace2]}
generate:
kind: ConfigMap
name: zk-kafka-address
data:
ZK_ADDRESS: "192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181"
KAFKA_ADDRESS: "192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092"

View file

@ -0,0 +1,20 @@
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: set-image-pull-policy
spec:
rules:
- name: set-image-pull-policy
resource:
kinds:
- Deployment
mutate:
overlay:
spec:
template:
spec:
containers:
# match images which end with :latest
- (image): "*:latest"
# set the imagePullPolicy to "Always"
imagePullPolicy: "Always"

View file

@ -0,0 +1,23 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:latest
ports:
- containerPort: 80
- name: ghost
image: ghost:latest

View file

@ -0,0 +1,13 @@
apiVersion: v1
kind: Endpoints
metadata:
name: test-endpoint
labels:
label : test
subsets:
- addresses:
- ip: 192.168.10.171
ports:
- name: secure-connection
port: 443
protocol: TCP

View file

@ -0,0 +1,27 @@
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-endpoints
spec :
rules:
- name: pEP
resource:
kinds :
- Endpoints
selector:
matchLabels:
label : test
mutate:
patches:
- path : "/subsets/0/ports/0/port"
op : replace
value: 9663
- path : "/subsets/0"
op: add
value:
addresses:
- ip: "192.168.10.172"
ports:
- name: load-balancer-connection
port: 80
protocol: UDP

View file

@ -2,5 +2,5 @@ package policy
const (
// GroupName must be the same as specified in Policy CRD
GroupName = "kubepolicy.nirmata.io"
GroupName = "kyverno.io"
)

View file

@ -1,4 +1,4 @@
// +k8s:deepcopy-gen=package
// +groupName=kubepolicy.nirmata.io
// +groupName=kyverno.io
package v1alpha1

View file

@ -33,7 +33,7 @@ type Rule struct {
// ResourceDescription describes the resource to which the PolicyRule will be applied.
type ResourceDescription struct {
Kind string `json:"kind"`
Kinds []string `json:"kinds"`
Name *string `json:"name"`
Selector *metav1.LabelSelector `json:"selector"`
}

View file

@ -22,13 +22,11 @@ func (r *Rule) Validate() error {
}
// Validate checks if all necesarry fields are present and have values. Also checks a Selector.
// Returns error if resource definition is invalid.
// Returns error if
// - kinds is not defined
func (pr *ResourceDescription) Validate() error {
// TBD: selector or name MUST be specified
if pr.Kind == "" {
if len(pr.Kinds) == 0 {
return errors.New("The Kind is not specified")
} else if pr.Name == nil && pr.Selector == nil {
return errors.New("Neither Name nor Selector is specified")
}
if pr.Selector != nil {

View file

@ -9,8 +9,8 @@ import (
var defaultResourceDescriptionName = "defaultResourceDescription"
var defaultResourceDescription = ResourceDescription{
Kind: "Deployment",
Name: &defaultResourceDescriptionName,
Kinds: []string{"Deployment"},
Name: &defaultResourceDescriptionName,
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"LabelForSelector": "defaultResourceDescription"},
},
@ -43,7 +43,7 @@ func Test_ResourceDescription_EmptyKind(t *testing.T) {
func Test_ResourceDescription_EmptyNameAndSelector(t *testing.T) {
resourceDescription := ResourceDescription{
Kind: "Deployment",
Kinds: []string{"Deployment"},
}
err := resourceDescription.Validate()
assert.Assert(t, err != nil)

View file

@ -2,28 +2,28 @@ package config
const (
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
KubePolicyNamespace = "kube-system"
WebhookServiceName = "kube-policy-svc"
KubePolicyNamespace = "kyverno"
WebhookServiceName = "kyverno-svc"
MutatingWebhookConfigurationName = "kube-policy-mutating-webhook-cfg"
MutatingWebhookName = "nirmata.kube-policy.mutating-webhook"
MutatingWebhookConfigurationName = "kyverno-mutating-webhook-cfg"
MutatingWebhookName = "nirmata.kyverno.mutating-webhook"
ValidatingWebhookConfigurationName = "kube-policy-validating-webhook-cfg"
ValidatingWebhookName = "nirmata.kube-policy.validating-webhook"
ValidatingWebhookConfigurationName = "kyverno-validating-webhook-cfg"
ValidatingWebhookName = "nirmata.kyverno.validating-webhook"
// Due to kubernetes issue, we must use next literal constants instead of deployment TypeMeta fields
// Issue: https://github.com/kubernetes/kubernetes/pull/63972
// When the issue is closed, we should use TypeMeta struct instead of this constants
DeploymentKind = "Deployment"
DeploymentAPIVersion = "extensions/v1beta1"
KubePolicyDeploymentName = "kube-policy-deployment"
KubePolicyDeploymentName = "kyverno-deployment"
)
var (
MutatingWebhookServicePath = "/mutate"
ValidatingWebhookServicePath = "/validate"
KubePolicyAppLabels = map[string]string{
"app": "kube-policy",
"app": "kyverno",
}
SupportedKinds = []string{

View file

@ -4,15 +4,14 @@ import (
"log"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
"github.com/nirmata/kyverno/pkg/engine/mutation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Mutate performs mutation. Overlay first and then mutation patches
// TODO: return events and violations
func Mutate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) ([]mutation.PatchBytes, []byte) {
var policyPatches []mutation.PatchBytes
var processedPatches []mutation.PatchBytes
func Mutate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) ([]PatchBytes, []byte) {
var policyPatches []PatchBytes
var processedPatches []PatchBytes
var err error
patchedDocument := rawResource
@ -30,7 +29,7 @@ func Mutate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersio
// Process Overlay
if rule.Mutation.Overlay != nil {
overlayPatches, err := mutation.ProcessOverlay(rule.Mutation.Overlay, rawResource)
overlayPatches, err := ProcessOverlay(policy, rawResource, gvk)
if err != nil {
log.Printf("Overlay application has failed for rule %s in policy %s, err: %v\n", rule.Name, policy.ObjectMeta.Name, err)
} else {
@ -41,7 +40,7 @@ func Mutate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersio
// Process Patches
if rule.Mutation.Patches != nil {
processedPatches, patchedDocument, err = mutation.ProcessPatches(rule.Mutation.Patches, patchedDocument)
processedPatches, patchedDocument, err = ProcessPatches(rule.Mutation.Patches, patchedDocument)
if err != nil {
log.Printf("Patches application has failed for rule %s in policy %s, err: %v\n", rule.Name, policy.ObjectMeta.Name, err)
} else {

View file

@ -1,6 +0,0 @@
package mutation
func ProcessOverlay(overlay interface{}, rawResource []byte) ([]PatchBytes, error) {
// TODO: Overlay to be implemented
return nil, nil
}

379
pkg/engine/overlay.go Normal file
View file

@ -0,0 +1,379 @@
package engine
import (
"encoding/json"
"fmt"
"log"
"reflect"
jsonpatch "github.com/evanphx/json-patch"
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ProcessOverlay handles validating admission request
// Checks the target resourse for rules defined in the policy
func ProcessOverlay(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) ([]PatchBytes, []byte) {
var resource interface{}
json.Unmarshal(rawResource, &resource)
for _, rule := range policy.Spec.Rules {
if rule.Mutation == nil || rule.Mutation.Overlay == nil {
continue
}
ok := ResourceMeetsDescription(rawResource, rule.ResourceDescription, gvk)
if !ok {
log.Printf("Rule \"%s\" is not applicable to resource\n", rule.Name)
continue
}
overlay := *rule.Mutation.Overlay
if err, _ := applyOverlay(resource, overlay, ""); err != nil {
//return fmt.Errorf("%s: %s", *rule.Validation.Message, err.Error())
}
}
return nil, nil
}
func applyOverlay(resource, overlay interface{}, path string) ([]PatchBytes, error) {
var appliedPatches []PatchBytes
// resource item exists but has different type - replace
// all subtree within this path by overlay
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
patch, err := replaceSubtree(overlay, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
return appliedPatches, nil
}
switch typedOverlay := overlay.(type) {
case map[string]interface{}:
typedResource := resource.(map[string]interface{})
for key, value := range typedOverlay {
if wrappedWithParentheses(key) {
key = key[1 : len(key)-1]
}
currentPath := path + key + "/"
resourcePart, ok := typedResource[key]
if ok {
patches, err := applyOverlay(resourcePart, value, currentPath)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patches...)
} else {
patch, err := insertSubtree(value, currentPath)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
}
}
case []interface{}:
typedResource := resource.([]interface{})
patches, err := applyOverlayToArray(typedResource, typedOverlay, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patches...)
case string:
patch, err := replaceSubtree(overlay, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
case float64:
patch, err := replaceSubtree(overlay, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
case int64:
patch, err := replaceSubtree(overlay, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
}
return appliedPatches, nil
}
func applyOverlayToArray(resource, overlay []interface{}, path string) ([]PatchBytes, error) {
var appliedPatches []PatchBytes
if len(overlay) == 0 {
return nil, fmt.Errorf("overlay does not support empty arrays")
}
if len(resource) == 0 {
patches, err := fillEmptyArray(overlay, path)
if err != nil {
return nil, err
}
return patches, nil
}
if reflect.TypeOf(resource[0]) != reflect.TypeOf(overlay[0]) {
return nil, fmt.Errorf("overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0])
}
switch overlay[0].(type) {
case map[string]interface{}:
for _, overlayElement := range overlay {
typedOverlay := overlayElement.(map[string]interface{})
anchors := GetAnchorsFromMap(typedOverlay)
currentPath := path + "0/"
for _, resourceElement := range resource {
typedResource := resourceElement.(map[string]interface{})
if len(anchors) > 0 {
if !skipArrayObject(typedResource, anchors) {
patches, err := applyOverlay(resourceElement, overlayElement, currentPath)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patches...)
}
} else {
if hasNestedAnchors(overlayElement) {
patches, err := applyOverlay(resourceElement, overlayElement, currentPath)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patches...)
} else {
patch, err := insertSubtree(overlayElement, currentPath)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
}
}
}
}
default:
path += "0/"
for _, value := range overlay {
patch, err := insertSubtree(value, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
}
}
return appliedPatches, nil
}
// In case of empty resource array
// append all non-anchor items to front
func fillEmptyArray(overlay []interface{}, path string) ([]PatchBytes, error) {
var appliedPatches []PatchBytes
if len(overlay) == 0 {
return nil, fmt.Errorf("overlay does not support empty arrays")
}
path += "0/"
switch overlay[0].(type) {
case map[string]interface{}:
for _, overlayElement := range overlay {
typedOverlay := overlayElement.(map[string]interface{})
anchors := GetAnchorsFromMap(typedOverlay)
if len(anchors) == 0 {
patch, err := insertSubtree(overlayElement, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
}
}
default:
for _, overlayElement := range overlay {
patch, err := insertSubtree(overlayElement, path)
if err != nil {
return nil, err
}
appliedPatches = append(appliedPatches, patch)
}
}
return appliedPatches, nil
}
func skipArrayObject(object, anchors map[string]interface{}) bool {
for key, pattern := range anchors {
key = key[1 : len(key)-1]
value, ok := object[key]
if !ok {
return true
}
if value != pattern {
return true
}
}
return false
}
func insertSubtree(overlay interface{}, path string) ([]byte, error) {
return processSubtree(overlay, path, "add")
}
func replaceSubtree(overlay interface{}, path string) ([]byte, error) {
return processSubtree(overlay, path, "replace")
}
func processSubtree(overlay interface{}, path string, op string) ([]byte, error) {
if len(path) > 1 && path[len(path)-1] == '/' {
path = path[:len(path)-1]
}
if path == "" {
path = "/"
}
value := prepareJSONValue(overlay)
patchStr := fmt.Sprintf(`{ "op": "%s", "path": "%s", "value": %s }`, op, path, value)
// check the patch
_, err := jsonpatch.DecodePatch([]byte("[" + patchStr + "]"))
if err != nil {
return nil, err
}
return []byte(patchStr), nil
}
func prepareJSONValue(overlay interface{}) string {
switch typed := overlay.(type) {
case map[string]interface{}:
if len(typed) == 0 {
return ""
}
if hasOnlyAnchors(overlay) {
return ""
}
result := ""
for key, value := range typed {
jsonValue := prepareJSONValue(value)
pair := fmt.Sprintf(`"%s":%s`, key, jsonValue)
if result != "" {
result += ", "
}
result += pair
}
result = fmt.Sprintf(`{ %s }`, result)
return result
case []interface{}:
if len(typed) == 0 {
return ""
}
if hasOnlyAnchors(overlay) {
return ""
}
result := ""
for _, value := range typed {
jsonValue := prepareJSONValue(value)
if result != "" {
result += ", "
}
result += jsonValue
}
result = fmt.Sprintf(`[ %s ]`, result)
return result
case string:
return fmt.Sprintf(`"%s"`, typed)
case float64:
return fmt.Sprintf("%f", typed)
case int64:
return fmt.Sprintf("%d", typed)
default:
return ""
}
}
func hasOnlyAnchors(overlay interface{}) bool {
switch typed := overlay.(type) {
case map[string]interface{}:
if anchors := GetAnchorsFromMap(typed); len(anchors) == len(typed) {
return true
}
for _, value := range typed {
if !hasOnlyAnchors(value) {
return false
}
}
return true
case string:
return false
case float64:
return false
case int64:
return false
default:
return false
}
}
func hasNestedAnchors(overlay interface{}) bool {
switch typed := overlay.(type) {
case map[string]interface{}:
if anchors := GetAnchorsFromMap(typed); len(anchors) > 0 {
return true
}
for _, value := range typed {
if hasNestedAnchors(value) {
return true
}
}
return false
case string:
return false
case float64:
return false
case int64:
return false
default:
return false
}
}

View file

@ -0,0 +1,62 @@
package engine
import (
"encoding/json"
"testing"
jsonpatch "github.com/evanphx/json-patch"
"gotest.tools/assert"
)
func TestApplyOverlay_NestedListWithAnchor(t *testing.T) {
resourceRaw := []byte(`{ "apiVersion": "v1", "kind": "Endpoints", "metadata": { "name": "test-endpoint", "labels": { "label": "test" } }, "subsets": [ { "addresses": [ { "ip": "192.168.10.171" } ], "ports": [ { "name": "secure-connection", "port": 443, "protocol": "TCP" } ] } ] }`)
overlayRaw := []byte(`{ "subsets": [ { "ports": [ { "(name)": "secure-connection", "port": 444, "protocol": "UDP" } ] } ] }`)
var resource, overlay interface{}
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := applyOverlay(resource, overlay, "/")
assert.NilError(t, err)
assert.Assert(t, patches != nil)
patch := JoinPatches(patches)
decoded, err := jsonpatch.DecodePatch(patch)
assert.NilError(t, err)
assert.Assert(t, decoded != nil)
patched, err := decoded.Apply(resourceRaw)
assert.NilError(t, err)
assert.Assert(t, patched != nil)
expectedResult := []byte(`{"apiVersion":"v1","kind":"Endpoints","metadata":{"name":"test-endpoint","labels":{"label":"test"}},"subsets":[{"addresses":[{"ip":"192.168.10.171"}],"ports":[{"name":"secure-connection","port":444.000000,"protocol":"UDP"}]}]}`)
assert.Equal(t, string(expectedResult), string(patched))
}
func TestApplyOverlay_InsertIntoArray(t *testing.T) {
resourceRaw := []byte(`{ "apiVersion": "v1", "kind": "Endpoints", "metadata": { "name": "test-endpoint", "labels": { "label": "test" } }, "subsets": [ { "addresses": [ { "ip": "192.168.10.171" } ], "ports": [ { "name": "secure-connection", "port": 443, "protocol": "TCP" } ] } ] }`)
overlayRaw := []byte(`{ "subsets": [ { "addresses": [ { "ip": "192.168.10.172" }, { "ip": "192.168.10.173" } ], "ports": [ { "name": "insecure-connection", "port": 80, "protocol": "UDP" } ] } ] }`)
var resource, overlay interface{}
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := applyOverlay(resource, overlay, "/")
assert.NilError(t, err)
assert.Assert(t, patches != nil)
patch := JoinPatches(patches)
decoded, err := jsonpatch.DecodePatch(patch)
assert.NilError(t, err)
assert.Assert(t, decoded != nil)
patched, err := decoded.Apply(resourceRaw)
assert.NilError(t, err)
assert.Assert(t, patched != nil)
expectedResult := []byte(`{"apiVersion":"v1","kind":"Endpoints","metadata":{"name":"test-endpoint","labels":{"label":"test"}},"subsets":[{"addresses":[{"ip":"192.168.10.172"},{"ip":"192.168.10.173"}],"ports":[{"name":"insecure-connection","port":80.000000,"protocol":"UDP"}]},{"addresses":[{"ip":"192.168.10.171"}],"ports":[{"name":"secure-connection","port":443,"protocol":"TCP"}]}]}`)
assert.Equal(t, string(expectedResult), string(patched))
}

View file

@ -1,4 +1,4 @@
package mutation
package engine
import (
"encoding/json"

View file

@ -1,4 +1,4 @@
package mutation
package engine
import (
"testing"

View file

@ -13,7 +13,7 @@ import (
// ResourceMeetsDescription checks requests kind, name and labels to fit the policy rule
func ResourceMeetsDescription(resourceRaw []byte, description kubepolicy.ResourceDescription, gvk metav1.GroupVersionKind) bool {
if description.Kind != gvk.Kind {
if !findKind(description.Kinds, gvk.Kind) {
return false
}
@ -104,3 +104,24 @@ func ParseRegexPolicyResourceName(policyResourceName string) (string, bool) {
}
return strings.Trim(regex[1], " "), true
}
func GetAnchorsFromMap(anchorsMap map[string]interface{}) map[string]interface{} {
result := make(map[string]interface{})
for key, value := range anchorsMap {
if wrappedWithParentheses(key) {
result[key] = value
}
}
return result
}
func findKind(kinds []string, kindGVK string) bool {
for _, kind := range kinds {
if kind == kindGVK {
return true
}
}
return false
}

View file

@ -96,10 +96,7 @@ func validateArray(resourcePart, patternPart interface{}) error {
switch pattern := patternArray[0].(type) {
case map[string]interface{}:
anchors, err := getAnchorsFromMap(pattern)
if err != nil {
return err
}
anchors := GetAnchorsFromMap(pattern)
for _, value := range resourceArray {
resource, ok := value.(map[string]interface{})
@ -107,7 +104,7 @@ func validateArray(resourcePart, patternPart interface{}) error {
return fmt.Errorf("expected array, found %T", resourcePart)
}
if skipArrayObject(resource, anchors) {
if skipValidatingObject(resource, anchors) {
continue
}
@ -177,19 +174,7 @@ func validateMapElement(resourcePart, patternPart interface{}) error {
return nil
}
func getAnchorsFromMap(pattern map[string]interface{}) (map[string]interface{}, error) {
result := make(map[string]interface{})
for key, value := range pattern {
if wrappedWithParentheses(key) {
result[key] = value
}
}
return result, nil
}
func skipArrayObject(object, anchors map[string]interface{}) bool {
func skipValidatingObject(object, anchors map[string]interface{}) bool {
for key, pattern := range anchors {
key = key[1 : len(key)-1]

View file

@ -306,8 +306,7 @@ func TestGetAnchorsFromMap_ThereAreAnchors(t *testing.T) {
var unmarshalled map[string]interface{}
json.Unmarshal(rawMap, &unmarshalled)
actualMap, err := getAnchorsFromMap(unmarshalled)
assert.NilError(t, err)
actualMap := GetAnchorsFromMap(unmarshalled)
assert.Equal(t, len(actualMap), 2)
assert.Equal(t, actualMap["(name)"].(string), "nirmata-*")
assert.Equal(t, actualMap["(namespace)"].(string), "kube-?olicy")
@ -319,8 +318,7 @@ func TestGetAnchorsFromMap_ThereAreNoAnchors(t *testing.T) {
var unmarshalled map[string]interface{}
json.Unmarshal(rawMap, &unmarshalled)
actualMap, err := getAnchorsFromMap(unmarshalled)
assert.NilError(t, err)
actualMap := GetAnchorsFromMap(unmarshalled)
assert.Assert(t, len(actualMap) == 0)
}
@ -358,7 +356,7 @@ func TestValidateMapElement_OneElementInArrayNotPass(t *testing.T) {
}
func TestValidate_ServiceTest(t *testing.T) {
rawPolicy := []byte(`{ "apiVersion": "kubepolicy.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-service" }, "spec": { "rules": [ { "name": "ps1", "resource": { "kind": "Service", "name": "game-service*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/secretLabel", "op": "replace", "value": "weKnow" }, { "path": "/metadata/labels/originalLabel", "op": "remove" }, { "path": "/spec/selector/app", "op": "replace", "value": "mutedApp" } ] }, "validate": { "message": "This resource is broken", "pattern": { "spec": { "ports": [ { "name": "hs", "protocol": 32 } ] } } } } ] } }`)
rawPolicy := []byte(`{ "apiVersion": "kyverno.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-service" }, "spec": { "rules": [ { "name": "ps1", "resource": { "kind": "Service", "name": "game-service*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/secretLabel", "op": "replace", "value": "weKnow" }, { "path": "/metadata/labels/originalLabel", "op": "remove" }, { "path": "/spec/selector/app", "op": "replace", "value": "mutedApp" } ] }, "validate": { "message": "This resource is broken", "pattern": { "spec": { "ports": [ { "name": "hs", "protocol": 32 } ] } } } } ] } }`)
rawResource := []byte(`{ "kind": "Service", "apiVersion": "v1", "metadata": { "name": "game-service", "labels": { "originalLabel": "isHere", "secretLabel": "thisIsMySecret" } }, "spec": { "selector": { "app": "MyApp" }, "ports": [ { "name": "http", "protocol": "TCP", "port": 80, "targetPort": 9376 } ] } }`)
var policy kubepolicy.Policy
@ -372,7 +370,7 @@ func TestValidate_ServiceTest(t *testing.T) {
}
func TestValidate_MapHasFloats(t *testing.T) {
rawPolicy := []byte(`{ "apiVersion": "kubepolicy.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-deployment-changed" }, "spec": { "rules": [ { "name": "First policy v2", "resource": { "kind": "Deployment", "name": "nginx-*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/app", "op": "replace", "value": "nginx_is_mutated" } ] }, "validate": { "message": "replicas number is wrong", "pattern": { "metadata": { "labels": { "app": "*" } }, "spec": { "replicas": 3 } } } } ] } }`)
rawPolicy := []byte(`{ "apiVersion": "kyverno.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-deployment-changed" }, "spec": { "rules": [ { "name": "First policy v2", "resource": { "kind": "Deployment", "name": "nginx-*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/app", "op": "replace", "value": "nginx_is_mutated" } ] }, "validate": { "message": "replicas number is wrong", "pattern": { "metadata": { "labels": { "app": "*" } }, "spec": { "replicas": 3 } } } } ] } }`)
rawResource := []byte(`{ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": "nginx-deployment", "labels": { "app": "nginx" } }, "spec": { "replicas": 3, "selector": { "matchLabels": { "app": "nginx" } }, "template": { "metadata": { "labels": { "app": "nginx" } }, "spec": { "containers": [ { "name": "nginx", "image": "nginx:1.7.9", "ports": [ { "containerPort": 80 } ] } ] } } } }`)
var policy kubepolicy.Policy

View file

@ -82,7 +82,7 @@ func complete(args []string) (*kubepolicy.Policy, []*resourceInfo) {
func applyPolicy(policy *kubepolicy.Policy, rawResource []byte, gvk *metav1.GroupVersionKind) ([]byte, error) {
_, patchedDocument := engine.Mutate(*policy, rawResource, *gvk)
if err := engine.Validate(*policy, rawResource, *gvk); err != nil {
if err := engine.Validate(*policy, patchedDocument, *gvk); err != nil {
return nil, err
}
return patchedDocument, nil

View file

@ -44,7 +44,7 @@ func (si *sharedInfomer) Run(stopCh <-chan struct{}) {
}
func (si *sharedInfomer) getInfomer() infomertypes.PolicyInformer {
return si.policyInformerFactory.Kubepolicy().V1alpha1().Policies()
return si.policyInformerFactory.Kyverno().V1alpha1().Policies()
}
func (si *sharedInfomer) GetInfomer() cache.SharedIndexInformer {
return si.getInfomer().Informer()

View file

@ -90,7 +90,7 @@ func (b *builder) processViolation(info Info) error {
modifiedPolicy.Status.Violations = modifiedViolations
// Violations are part of the status sub resource, so we can use the Update Status api instead of updating the policy object
_, err = b.client.UpdateStatusResource("policies", namespace, modifiedPolicy)
_, err = b.client.UpdateStatusResource("policies/status", namespace, modifiedPolicy)
if err != nil {
return err
}

View file

@ -16,7 +16,6 @@ import (
"github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
"github.com/nirmata/kyverno/pkg/config"
engine "github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/mutation"
"github.com/nirmata/kyverno/pkg/sharedinformer"
tlsutils "github.com/nirmata/kyverno/pkg/tls"
v1beta1 "k8s.io/api/admission/v1beta1"
@ -87,13 +86,11 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
admissionReview.Response = &v1beta1.AdmissionResponse{
Allowed: true,
}
if ws.client.KindIsSupported(admissionReview.Request.Kind.Kind) {
switch r.URL.Path {
case config.MutatingWebhookServicePath:
admissionReview.Response = ws.HandleMutation(admissionReview.Request)
case config.ValidatingWebhookServicePath:
admissionReview.Response = ws.HandleValidation(admissionReview.Request)
}
switch r.URL.Path {
case config.MutatingWebhookServicePath:
admissionReview.Response = ws.HandleMutation(admissionReview.Request)
case config.ValidatingWebhookServicePath:
admissionReview.Response = ws.HandleValidation(admissionReview.Request)
}
admissionReview.Response.UID = admissionReview.Request.UID
@ -143,7 +140,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) *v1be
return nil
}
var allPatches []mutation.PatchBytes
var allPatches []engine.PatchBytes
for _, policy := range policies {
ws.logger.Printf("Applying policy %s with %d rules\n", policy.ObjectMeta.Name, len(policy.Spec.Rules))
@ -160,7 +157,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) *v1be
patchType := v1beta1.PatchTypeJSONPatch
return &v1beta1.AdmissionResponse{
Allowed: true,
Patch: mutation.JoinPatches(allPatches),
Patch: engine.JoinPatches(allPatches),
PatchType: &patchType,
}
}

View file

@ -7,7 +7,7 @@ Compiles the project to go executable, generates docker image and pushes it to t
### generate-server-cert.sh ###
Generates TLS certificate and key that used by webhook server. Example:
`scripts/generate-server-cert.sh --service=kube-policy-svc --namespace=my_namespace --serverIp=192.168.10.117`
`scripts/generate-server-cert.sh --service=kyverno-svc --namespace=my_namespace --serverIp=192.168.10.117`
* `--service` identifies the service for in-cluster webhook server. Do not specify it if you plan to run webhook server outside the cluster, or cpecify 'localhost' if you want to run controller locally.
* `--namespace` identifies the namespace for in-cluster webhook server. Do not specify it if you plan to run controller locally.
* `--serverIp` is the IP of master node, it can be found in `~/.kube/config`: clusters.cluster[0].server. You should explicitly specify it.
@ -18,7 +18,7 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce
* `--namespace` - the target namespace to deploy the controller. Do not specify it if you want to depoloy controller locally.
* `--serverIp` means the same as for `generate-server-cert.sh`
Examples:
`scripts/deploy-controller.sh --service=my-kube-policy --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kube-policy'
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117'
### test-web-hook.sh ###

View file

@ -19,7 +19,7 @@ esac
done
hub_user_name="nirmata"
project_name="kube-policy"
project_name="kyverno"
if [ -z "${service_name}" ]; then
service_name="${project_name}-svc"
@ -40,7 +40,7 @@ if [ -z "${namespace}" ]; then # controller should be launched locally
kubectl delete -f definitions/install.yaml
kubectl create -f definitions/install.yaml || exit 3
echo -e "\n### You can build and run kube-policy project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)."
echo -e "\n### You can build and run kyverno project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)."
else # controller should be launched within a cluster

View file

@ -1,4 +1,4 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-cm
@ -6,7 +6,8 @@ spec :
rules:
- name: pCM1
resource:
kind : ConfigMap
kinds :
- ConfigMap
name: "game-config"
mutate:
patches:
@ -15,7 +16,8 @@ spec :
value : newValue
- name: pCM2
resource:
kind : ConfigMap
kinds :
- ConfigMap
name: "game-config"
mutate:
patches:
@ -26,7 +28,8 @@ spec :
value : "data is replaced"
- name: pCM3
resource:
kind : ConfigMap
kinds :
- ConfigMap
name: "game-config"
mutate:
patches:
@ -40,7 +43,8 @@ spec :
game.properties: "*enemies=aliens*"
- name: pCM4
resource:
kind : ConfigMap
kinds :
- ConfigMap
name: "game-config"
validate:
message: "This CM data is broken because it does not have ui.properties"

View file

@ -1,4 +1,4 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind: Policy
metadata :
name: "policy-configmapgenerator-test"
@ -6,7 +6,8 @@ spec:
rules:
- name: "copyCM"
resource :
kind : Namespace
kinds :
- Namespace
selector:
matchLabels:
LabelForSelector : "namespace2"

View file

@ -3,7 +3,7 @@
# To apply this policy you need to create secret and configMap in "default" namespace
# and then create a namespace
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : "policy-ns-patch-cmg-sg"
@ -11,7 +11,8 @@ spec :
rules:
- name: "patchNamespace2"
resource :
kind : Namespace
kinds :
- Namespace
selector:
matchLabels:
LabelForSelector : "namespace2"
@ -23,7 +24,8 @@ spec :
- name: "copyCM"
resource :
kind : Namespace
kinds :
- Namespace
selector:
matchLabels:
LabelForSelector : "namespace2"
@ -38,7 +40,8 @@ spec :
- name: "generateCM"
resource :
kind : Namespace
kinds :
- Namespace
selector:
matchLabels:
LabelForSelector : "namespace2"
@ -56,7 +59,8 @@ spec :
- name: "generateSecret"
resource :
kind : Namespace
kinds :
- Namespace
name: ns2
generate :
kind: Secret
@ -72,7 +76,8 @@ spec :
- name: "copySecret"
resource :
kind : Namespace
kinds :
- Namespace
name: ns2
generate :
kind: Secret

View file

@ -1,12 +1,13 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-cronjob
spec:
rules:
- name:
- name: pCJ
resource:
kind : CronJob
kinds :
- CronJob
name: "?ell*"
mutate:
patches:

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-daemonset
@ -6,7 +6,8 @@ spec:
rules:
- name: "Patch and Volume validation"
resource:
kind: DaemonSet
kinds:
- DaemonSet
name: fluentd-elasticsearch
mutate:
patches:

View file

@ -1,4 +1,4 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-deployment
@ -6,8 +6,8 @@ spec :
rules:
- name: "First policy v2"
resource:
kind : Deployment
name: nginx-*
kinds :
- Deployment
mutate:
patches:
- path: /metadata/labels/isMutated

View file

@ -1,12 +1,13 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-endpoints
spec :
rules:
- name:
- name: pEP
resource:
kind : Endpoints
kinds :
- Endpoints
selector:
matchLabels:
label : test
@ -19,7 +20,7 @@ spec :
op: add
value:
addresses:
- ip: "192.168.10.171"
- ip: "192.168.10.172"
ports:
- name: load-balancer-connection
port: 80

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-hpa
@ -6,7 +6,8 @@ spec :
rules:
- name: hpa1
resource:
kind : HorizontalPodAutoscaler
kinds :
- HorizontalPodAutoscaler
selector:
matchLabels:
originalLabel: isHere

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata :
name : policy-ingress
@ -6,7 +6,8 @@ spec :
rules:
- name: ingress1
resource:
kind : Ingress
kinds :
- Ingress
selector:
matchLabels:
originalLabel: isHere

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-job-perl-bigint
@ -6,7 +6,8 @@ spec :
rules:
- name: job1
resource:
kind: Job
kinds:
- Job
name: pi
mutate:
patches:

View file

@ -1,12 +1,13 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-limitrange
spec :
rules:
- name:
- name: "rule"
resource:
kind : LimitRange
kinds :
- LimitRange
selector:
matchLabels:
containerSize: minimal

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata :
name : policy-namespace
@ -7,7 +7,8 @@ spec :
rules:
- name: ns1
resource:
kind : Namespace
kinds :
- Namespace
selector:
matchLabels:
LabelForSelector : "namespace"

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-network-policy
@ -6,7 +6,8 @@ spec:
rules:
- name: np1
resource:
kind : NetworkPolicy
kinds :
- NetworkPolicy
selector:
matchLabels:
originalLabel: isHere

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-pvc
@ -6,7 +6,8 @@ spec:
rules:
- name: pvc1
resource:
kind : PersistentVolumeClaim
kinds :
- PersistentVolumeClaim
matchLabels:
originalLabel: isHere
mutate:

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-pdb
@ -6,7 +6,8 @@ spec:
rules:
- name: pdb1
resource:
kind : PodDisruptionBudget
kinds :
- PodDisruptionBudget
name: "game-pdb"
mutate:
patches:

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: test-podtemplate
@ -6,7 +6,8 @@ spec:
rules:
- name: podtemplate1
resource:
kind : PodTemplate
kinds :
- PodTemplate
selector:
matchLabels:
originalLabel: isHere

View file

@ -1,9 +1,12 @@
# Test examples
Examples of policies and resources with which you can play to see the kube-policy in action. There are definitions for each supported resource type and an example policy for the corresponding resource.
## How to play
For now, the testing is possible only via ```kubectl``` when kyverno is installed to the cluster. So, [build and install the policy controller](/documentation/installation.md) first.
# Test samples
Each folder contains a pair of files, one of which is the definition of the resource, and the second is the definition of the policy for this resource. Let's look at an example of the endpoints mutation. Endpoints are listed in file `examples/Endpoints/endpoints.yaml`:
This directory contains policies and resources for testing. There are definitions for each supported resource type and an sample policy for the corresponding resource.
## How to use
Currently, the testing is possible only via ```kubectl``` when kyverno is installed to the cluster. So, [build and install the policy controller](/documentation/installation.md) first.
Each folder contains a pair of files, one of which is the definition of the resource, and the second is the definition of the policy for this resource. Let's look at an example of the endpoints mutation. Endpoints are listed in file `test/Endpoints/endpoints.yaml`:
````yaml
apiVersion: v1

View file

@ -1,12 +1,13 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-quota-low-test-validation
spec :
rules:
- name:
- name: "rule1"
resource:
kind : ResourceQuota
kinds :
- ResourceQuota
selector:
matchLabels:
quota: low
@ -16,9 +17,10 @@ spec :
spec:
hard:
memory: "8Gi|12Gi"
- name:
- name: "rule2"
resource:
kind : ResourceQuota
kinds :
- ResourceQuota
selector:
matchLabels:
quota: low
@ -28,9 +30,10 @@ spec :
spec:
hard:
cpu: <3
- name:
- name: "rule3"
resource:
kind : ResourceQuota
kinds :
- ResourceQuota
selector:
matchLabels:
quota: low

View file

@ -1,12 +1,13 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-quota-low-test
spec :
rules:
- name:
- name: "rule"
resource:
kind : ResourceQuota
kinds :
- ResourceQuota
selector:
matchLabels:
quota: low

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-secrets
@ -6,7 +6,8 @@ spec:
rules:
- name: secret1
resource:
kind : Secret
kinds :
- Secret
name: "mysecret"
mutate:
patches:

View file

@ -1,4 +1,4 @@
apiVersion : kubepolicy.nirmata.io/v1alpha1
apiVersion : kyverno.io/v1alpha1
kind : Policy
metadata :
name : policy-service
@ -6,7 +6,8 @@ spec :
rules:
- name: ps1
resource:
kind: Service
kinds:
- Service
name: "game-service*"
mutate:
patches:

View file

@ -1,4 +1,4 @@
apiVersion: kubepolicy.nirmata.io/v1alpha1
apiVersion: kyverno.io/v1alpha1
kind: Policy
metadata:
name: policy-statefulset
@ -6,7 +6,8 @@ spec:
rules:
- name: statefulset1
resource:
kind : StatefulSet
kinds :
- StatefulSet
selector:
matchLabels:
originalLabel: isHere