diff --git a/client/certificates.go b/client/certificates.go index 740c2d5a79..d79f0eeb75 100644 --- a/client/certificates.go +++ b/client/certificates.go @@ -46,14 +46,14 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat if err != nil { return nil, err } - csrList, err := c.ListResource(CSR, "") + csrList, err := c.ListResource(CSRs, "") if err != nil { return nil, errors.New(fmt.Sprintf("Unable to list existing certificate requests: %v", err)) } for _, csr := range csrList.Items { if csr.GetName() == req.ObjectMeta.Name { - err := c.DeleteResouce(CSR, "", csr.GetName()) + err := c.DeleteResouce(CSRs, "", csr.GetName()) if err != nil { return nil, errors.New(fmt.Sprintf("Unable to delete existing certificate request: %v", err)) } @@ -62,7 +62,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat } } - unstrRes, err := c.CreateResource(CSR, "", req) + unstrRes, err := c.CreateResource(CSRs, "", req) if err != nil { return nil, err } @@ -91,7 +91,7 @@ func (c *Client) fetchCertificateFromRequest(req *certificates.CertificateSignin // TODO: react of SIGINT and SIGTERM timeStart := time.Now() for time.Now().Sub(timeStart) < time.Duration(maxWaitSeconds)*time.Second { - unstrR, err := c.GetResource(CSR, "", req.ObjectMeta.Name) + unstrR, err := c.GetResource(CSRs, "", req.ObjectMeta.Name) if err != nil { return nil, err } @@ -119,7 +119,7 @@ const certificateField string = "certificate" // Reads the pair of TLS certificate and key from the specified secret. func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair { name := generateSecretName(props) - unstrSecret, err := c.GetResource(Secret, props.Namespace, name) + unstrSecret, err := c.GetResource(Secrets, props.Namespace, name) if err != nil { c.logger.Printf("Unable to get secret %s/%s: %s", props.Namespace, name, err) return nil @@ -147,7 +147,7 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair { // Updates existing secret or creates new one. func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPemPair) error { name := generateSecretName(props) - unstrSecret, err := c.GetResource(Secret, props.Namespace, name) + unstrSecret, err := c.GetResource(Secrets, props.Namespace, name) if err == nil { secret, err := convertToSecret(unstrSecret) if err != nil { @@ -159,7 +159,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem } secret.Data[certificateField] = pemPair.Certificate secret.Data[privateKeyField] = pemPair.PrivateKey - _, err = c.UpdateResource(Secret, props.Namespace, secret) + _, err = c.UpdateResource(Secrets, props.Namespace, secret) if err == nil { c.logger.Printf("Secret %s is updated", name) } @@ -181,7 +181,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem }, } - _, err := c.CreateResource(Secret, props.Namespace, secret) + _, err := c.CreateResource(Secrets, props.Namespace, secret) if err == nil { c.logger.Printf("Secret %s is created", name) } diff --git a/client/client.go b/client/client.go index 91483f0dd9..3b5a39cb7f 100644 --- a/client/client.go +++ b/client/client.go @@ -4,7 +4,6 @@ import ( "fmt" "log" "os" - "strings" "time" types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1" @@ -17,16 +16,21 @@ import ( "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/runtime/schema" utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/client-go/discovery" + "k8s.io/client-go/discovery/cached/memory" "k8s.io/client-go/dynamic" + "k8s.io/client-go/kubernetes" csrtype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1" event "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/rest" ) type Client struct { - logger *log.Logger client dynamic.Interface + cachedClient discovery.CachedDiscoveryInterface + logger *log.Logger clientConfig *rest.Config + kclient *kubernetes.Clientset } func NewClient(config *rest.Config, logger *log.Logger) (*Client, error) { @@ -39,10 +43,17 @@ func NewClient(config *rest.Config, logger *log.Logger) (*Client, error) { logger = log.New(os.Stdout, "Client : ", log.LstdFlags) } + kclient, err := kubernetes.NewForConfig(config) + if err != nil { + return nil, err + } + return &Client{ logger: logger, client: client, clientConfig: config, + kclient: kclient, + cachedClient: memory.NewMemCacheClient(kclient.Discovery()), }, nil } @@ -62,29 +73,20 @@ func (c *Client) GetKubePolicyDeployment() (*apps.Deployment, error) { // or generate a kube client value to access the interface //GetEventsInterface provides typed interface for events func (c *Client) GetEventsInterface() (event.EventInterface, error) { - kubeClient, err := newKubeClient(c.clientConfig) - if err != nil { - return nil, err - } - return kubeClient.CoreV1().Events(""), nil + return c.kclient.CoreV1().Events(""), nil } func (c *Client) GetCSRInterface() (csrtype.CertificateSigningRequestInterface, error) { - kubeClient, err := newKubeClient(c.clientConfig) - if err != nil { - return nil, err - } - - return kubeClient.CertificatesV1beta1().CertificateSigningRequests(), nil + return c.kclient.CertificatesV1beta1().CertificateSigningRequests(), nil } -func (c *Client) getInterface(kind string) dynamic.NamespaceableResourceInterface { - return c.client.Resource(c.getGroupVersionMapper(kind)) +func (c *Client) getInterface(resource string) dynamic.NamespaceableResourceInterface { + return c.client.Resource(c.getGroupVersionMapper(resource)) } -func (c *Client) getResourceInterface(kind string, namespace string) dynamic.ResourceInterface { +func (c *Client) getResourceInterface(resource string, namespace string) dynamic.ResourceInterface { // Get the resource interface - namespaceableInterface := c.getInterface(kind) + namespaceableInterface := c.getInterface(resource) // Get the namespacable interface var resourceInteface dynamic.ResourceInterface if namespace != "" { @@ -96,52 +98,52 @@ func (c *Client) getResourceInterface(kind string, namespace string) dynamic.Res } // Keep this a stateful as the resource list will be based on the kubernetes version we connect to -func (c *Client) getGroupVersionMapper(kind string) schema.GroupVersionResource { - //TODO: add checks to see if the kind is supported - //TODO: build the resource list dynamically( by querying the registered resource kinds) +func (c *Client) getGroupVersionMapper(resource string) schema.GroupVersionResource { + //TODO: add checks to see if the resource is supported + //TODO: build the resource list dynamically( by querying the registered resources) //TODO: the error scenarios - return getGrpVersionMapper(kind, c.clientConfig, false) + return c.getGVR(resource) } // GetResource returns the resource in unstructured/json format -func (c *Client) GetResource(kind string, namespace string, name string) (*unstructured.Unstructured, error) { - return c.getResourceInterface(kind, namespace).Get(name, meta.GetOptions{}) +func (c *Client) GetResource(resource string, namespace string, name string) (*unstructured.Unstructured, error) { + return c.getResourceInterface(resource, namespace).Get(name, meta.GetOptions{}) } // ListResource returns the list of resources in unstructured/json format // Access items using []Items -func (c *Client) ListResource(kind string, namespace string) (*unstructured.UnstructuredList, error) { - return c.getResourceInterface(kind, namespace).List(meta.ListOptions{}) +func (c *Client) ListResource(resource string, namespace string) (*unstructured.UnstructuredList, error) { + return c.getResourceInterface(resource, namespace).List(meta.ListOptions{}) } -func (c *Client) DeleteResouce(kind string, namespace string, name string) error { - return c.getResourceInterface(kind, namespace).Delete(name, &meta.DeleteOptions{}) +func (c *Client) DeleteResouce(resource string, namespace string, name string) error { + return c.getResourceInterface(resource, namespace).Delete(name, &meta.DeleteOptions{}) } -// CreateResource creates object for the specified kind/namespace -func (c *Client) CreateResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) { +// CreateResource creates object for the specified resource/namespace +func (c *Client) CreateResource(resource string, namespace string, obj interface{}) (*unstructured.Unstructured, error) { // convert typed to unstructured obj if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil { - return c.getResourceInterface(kind, namespace).Create(unstructuredObj, meta.CreateOptions{}) + return c.getResourceInterface(resource, namespace).Create(unstructuredObj, meta.CreateOptions{}) } return nil, fmt.Errorf("Unable to create resource ") } -// UpdateResource updates object for the specified kind/namespace -func (c *Client) UpdateResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) { +// UpdateResource updates object for the specified resource/namespace +func (c *Client) UpdateResource(resource string, namespace string, obj interface{}) (*unstructured.Unstructured, error) { // convert typed to unstructured obj if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil { - return c.getResourceInterface(kind, namespace).Update(unstructuredObj, meta.UpdateOptions{}) + return c.getResourceInterface(resource, namespace).Update(unstructuredObj, meta.UpdateOptions{}) } return nil, fmt.Errorf("Unable to update resource ") } // UpdateStatusResource updates the resource "status" subresource -func (c *Client) UpdateStatusResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) { +func (c *Client) UpdateStatusResource(resource string, namespace string, obj interface{}) (*unstructured.Unstructured, error) { // convert typed to unstructured obj if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil { - return c.getResourceInterface(kind, namespace).UpdateStatus(unstructuredObj, meta.UpdateOptions{}) + return c.getResourceInterface(resource, namespace).UpdateStatus(unstructuredObj, meta.UpdateOptions{}) } return nil, fmt.Errorf("Unable to update resource ") } @@ -179,7 +181,7 @@ func (c *Client) GenerateSecret(generator types.Generation, namespace string) er // if generator.CopyFrom != nil { c.logger.Printf("Copying data from secret %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name) // Get configMap resource - unstrSecret, err := c.GetResource(Secret, generator.CopyFrom.Namespace, generator.CopyFrom.Name) + unstrSecret, err := c.GetResource(Secrets, generator.CopyFrom.Namespace, generator.CopyFrom.Name) if err != nil { return err } @@ -219,7 +221,7 @@ func (c *Client) GenerateConfigMap(generator types.Generation, namespace string) // if generator.CopyFrom != nil { c.logger.Printf("Copying data from configmap %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name) // Get configMap resource - unstrConfigMap, err := c.GetResource("configmaps", generator.CopyFrom.Namespace, generator.CopyFrom.Name) + unstrConfigMap, err := c.GetResource(ConfigMaps, generator.CopyFrom.Namespace, generator.CopyFrom.Name) if err != nil { return err } @@ -276,7 +278,7 @@ func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSign func (c *Client) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap, namespace string) { err := c.waitUntilNamespaceIsCreated(namespace) if err == nil { - _, err = c.CreateResource("configmaps", namespace, configMap) + _, err = c.CreateResource(ConfigMaps, namespace, configMap) } if err != nil { c.logger.Printf("Can't create a configmap: %s", err) @@ -286,7 +288,7 @@ func (c *Client) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap, func (c *Client) createSecretAfterNamespaceIsCreated(secret v1.Secret, namespace string) { err := c.waitUntilNamespaceIsCreated(namespace) if err == nil { - _, err = c.CreateResource(Secret, namespace, secret) + _, err = c.CreateResource(Secrets, namespace, secret) } if err != nil { c.logger.Printf("Can't create a secret: %s", err) @@ -299,7 +301,7 @@ func (c *Client) waitUntilNamespaceIsCreated(name string) error { var lastError error = nil for time.Now().Sub(timeStart) < namespaceCreationMaxWaitTime { - _, lastError = c.GetResource("namespaces", "", name) + _, lastError = c.GetResource(Namespaces, "", name) if lastError == nil { break } @@ -308,10 +310,24 @@ func (c *Client) waitUntilNamespaceIsCreated(name string) error { return lastError } -// KindIsSupported checks if the kind is a registerd GVK -func (c *Client) KindIsSupported(kind string) bool { - kind = strings.ToLower(kind) + "s" - buildGVKMapper(c.clientConfig, false) - _, ok := getValue(kind) - return ok +func (c *Client) getGVR(resource string) schema.GroupVersionResource { + emptyGVR := schema.GroupVersionResource{} + serverresources, err := c.cachedClient.ServerPreferredResources() + if err != nil { + utilruntime.HandleError(err) + return emptyGVR + } + resources, err := discovery.GroupVersionResources(serverresources) + if err != nil { + utilruntime.HandleError(err) + return emptyGVR + } + //TODO using cached client to support cache validation and invalidation + // iterate over the key to compare the resource + for gvr, _ := range resources { + if gvr.Resource == resource { + return gvr + } + } + return emptyGVR } diff --git a/client/utils.go b/client/utils.go index d768ddda78..d787cdf8e6 100644 --- a/client/utils.go +++ b/client/utils.go @@ -1,99 +1,14 @@ package client import ( - "fmt" - "strings" "time" - - "k8s.io/apimachinery/pkg/runtime/schema" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/client-go/kubernetes" - "k8s.io/client-go/rest" ) const ( - CSR string = "certificatesigningrequests" - Secret string = "secrets" + CSRs string = "certificatesigningrequests" + Secrets string = "secrets" + ConfigMaps string = "configmaps" + Namespaces string = "namespaces" ) const namespaceCreationMaxWaitTime time.Duration = 30 * time.Second const namespaceCreationWaitInterval time.Duration = 100 * time.Millisecond - -var groupVersionMapper map[string]schema.GroupVersionResource -var kubeClient *kubernetes.Clientset - -func getGrpVersionMapper(kind string, clientConfig *rest.Config, refresh bool) schema.GroupVersionResource { - // build the GVK mapper - buildGVKMapper(clientConfig, refresh) - // Query mapper - if val, ok := getValue(kind); ok { - return *val - } - utilruntime.HandleError(fmt.Errorf("Resouce '%s' not registered", kind)) - return schema.GroupVersionResource{} -} - -func buildGVKMapper(clientConfig *rest.Config, refresh bool) { - if groupVersionMapper == nil || refresh { - groupVersionMapper = make(map[string]schema.GroupVersionResource) - // refresh the mapper - if err := refreshRegisteredResources(groupVersionMapper, clientConfig); err != nil { - utilruntime.HandleError(err) - return - } - } -} - -func getValue(kind string) (*schema.GroupVersionResource, bool) { - if groupVersionMapper == nil { - utilruntime.HandleError(fmt.Errorf("GroupVersionKind mapper is not loaded")) - return nil, false - } - if val, ok := groupVersionMapper[kind]; ok { - return &val, true - } - return nil, false -} - -func refreshRegisteredResources(mapper map[string]schema.GroupVersionResource, clientConfig *rest.Config) error { - // build kubernetes client - client, err := newKubeClient(clientConfig) - if err != nil { - return err - } - - // get registered server groups and resources - _, resourceList, err := client.Discovery().ServerGroupsAndResources() - if err != nil { - return err - } - for _, apiResource := range resourceList { - for _, resource := range apiResource.APIResources { - grpVersion := strings.Split(apiResource.GroupVersion, "/") - if len(grpVersion) == 2 { - mapper[resource.Name] = schema.GroupVersionResource{ - Group: grpVersion[0], - Version: grpVersion[1], - Resource: resource.Name, - } - } else { - // resources with only versions - mapper[resource.Name] = schema.GroupVersionResource{ - Version: apiResource.GroupVersion, - Resource: resource.Name, - } - } - } - } - return nil -} - -func newKubeClient(clientConfig *rest.Config) (*kubernetes.Clientset, error) { - var err error - if kubeClient == nil { - kubeClient, err = kubernetes.NewForConfig(clientConfig) - if err != nil { - return nil, err - } - } - return kubeClient, nil -} diff --git a/definitions/MutatingWebhookConfiguration.yaml b/definitions/MutatingWebhookConfiguration.yaml index 420ee023d5..a45fc667c8 100644 --- a/definitions/MutatingWebhookConfiguration.yaml +++ b/definitions/MutatingWebhookConfiguration.yaml @@ -3,14 +3,14 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: MutatingWebhookConfiguration metadata: - name: nirmata-kube-policy-webhook-cfg + name: nirmata-kyverno-webhook-cfg labels: - app: kube-policy + app: kyverno webhooks: - - name: webhook.nirmata.kube-policy + - name: webhook.nirmata.kyverno clientConfig: service: - name: kube-policy-svc + name: kyverno-svc namespace: default path: "/mutate" caBundle: ${CA_BUNDLE} diff --git a/definitions/MutatingWebhookConfiguration_debug.yaml b/definitions/MutatingWebhookConfiguration_debug.yaml index 713fb9cb40..e3be018429 100644 --- a/definitions/MutatingWebhookConfiguration_debug.yaml +++ b/definitions/MutatingWebhookConfiguration_debug.yaml @@ -3,11 +3,11 @@ apiVersion: admissionregistration.k8s.io/v1beta1 kind: MutatingWebhookConfiguration metadata: - name: nirmata-kube-policy-webhook-cfg-debug + name: nirmata-kyverno-webhook-cfg-debug labels: - app: kube-policy + app: kyverno webhooks: - - name: webhook.nirmata.kube-policy + - name: webhook.nirmata.kyverno clientConfig: url: "https://localhost/mutate" caBundle: ${CA_BUNDLE} diff --git a/definitions/install.yaml b/definitions/install.yaml index f23e12fad3..f7ca441a23 100644 --- a/definitions/install.yaml +++ b/definitions/install.yaml @@ -1,9 +1,9 @@ apiVersion: apiextensions.k8s.io/v1beta1 kind: CustomResourceDefinition metadata: - name: policies.kubepolicy.nirmata.io + name: policies.kyverno.io spec: - group: kubepolicy.nirmata.io + group: kyverno.io versions: - name: v1alpha1 served: true @@ -35,29 +35,31 @@ spec: resource: type: object required: - - kind + - kinds properties: - kind: - type: string - enum: - - ConfigMap - - CronJob - - DaemonSet - - Deployment - - Endpoints - - HorizontalPodAutoscaler - - Ingress - - Job - - LimitRange - - Namespace - - NetworkPolicy - - PersistentVolumeClaim - - PodDisruptionBudget - - PodTemplate - - ResourceQuota - - Secret - - Service - - StatefulSet + kinds: + type: array + items: + type: string + enum: + - ConfigMap + - CronJob + - DaemonSet + - Deployment + - Endpoints + - HorizontalPodAutoscaler + - Ingress + - Job + - LimitRange + - Namespace + - NetworkPolicy + - PersistentVolumeClaim + - PodDisruptionBudget + - PodTemplate + - ResourceQuota + - Secret + - Service + - StatefulSet name: type: string selector: @@ -143,57 +145,62 @@ spec: additionalProperties: type: string --- +kind: Namespace +apiVersion: v1 +metadata: + name: "kyverno" +--- apiVersion: v1 kind: Service metadata: - namespace: kube-system - name: kube-policy-svc + namespace: kyverno + name: kyverno-svc labels: - app: kube-policy + app: kyverno spec: ports: - port: 443 targetPort: 443 selector: - app: kube-policy + app: kyverno --- apiVersion: v1 kind: ServiceAccount metadata: - name: kube-policy-service-account - namespace: kube-system + name: kyverno-service-account + namespace: kyverno --- kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: - name: kube-policy-admin + name: kyverno-admin roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cluster-admin subjects: - kind: ServiceAccount - name: kube-policy-service-account - namespace: kube-system + name: kyverno-service-account + namespace: kyverno --- apiVersion: extensions/v1beta1 kind: Deployment metadata: - namespace: kube-system - name: kube-policy-deployment + namespace: kyverno + name: kyverno-deployment labels: - app: kube-policy + app: kyverno spec: replicas: 1 template: metadata: labels: - app: kube-policy + app: kyverno spec: - serviceAccountName: kube-policy-service-account + serviceAccountName: kyverno-service-account containers: - - name: kube-policy - image: nirmata/kube-policy:latest + - name: kyverno + image: nirmata/kyverno:latest imagePullPolicy: IfNotPresent ports: - containerPort: 443 diff --git a/documentation/installation.md b/documentation/installation.md index 97d88a591c..03cab883f3 100644 --- a/documentation/installation.md +++ b/documentation/installation.md @@ -9,15 +9,15 @@ Just execute the command for creating all necesarry resources: `kubectl create -f definitions/install.yaml` In this mode controller will get TLS key/certificate pair and loads in-cluster config automatically on start. -To check if the controller is working, find it in the list of kube-system pods: +To check if the controller is working, find it in the list of kyverno pods: -`kubectl get pods -n kube-system` +`kubectl get pods -n kyverno` -The pod with controller contains **'kube-policy'** in its name. The STATUS column will show the health state of the controller. If controller doesn't start, see its logs: +The pod with controller contains **'kyverno'** in its name. The STATUS column will show the health state of the controller. If controller doesn't start, see its logs: -`kubectl describe pod -n kube-system` +`kubectl describe pod -n kyverno` or -`kubectl logs -n kube-system` +`kubectl logs -n kyverno` diff --git a/examples/CLI/deployment/policy-deployment.yaml b/examples/CLI/deployment/policy-deployment.yaml index ef8ab8b9bf..23e52be16b 100644 --- a/examples/CLI/deployment/policy-deployment.yaml +++ b/examples/CLI/deployment/policy-deployment.yaml @@ -1,4 +1,4 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.io/v1alpha1 kind : Policy metadata : name : policy-deployment @@ -6,7 +6,8 @@ spec : rules: - name: deployment-policy resource: - kind : Deployment + kinds : + - Deployment selector : matchLabels : cli: test diff --git a/examples/Validate/check_cpu_memory.yaml b/examples/Validate/check_cpu_memory.yaml index 61ab1d2b70..f4a418296a 100644 --- a/examples/Validate/check_cpu_memory.yaml +++ b/examples/Validate/check_cpu_memory.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: check-cpu-memory @@ -6,7 +6,8 @@ spec: rules: - name: check-defined resource: - kind: Deployment + kinds: + - Deployment validate: message: "Resource requests and limits are required for CPU and memory" pattern: @@ -22,7 +23,8 @@ spec: cpu: "?" - name: check-memory-in-range resource: - kind: Deployment + kinds: + - Deployment validate: message: "Memory request cannot be greater than 10Gi" pattern: diff --git a/examples/Validate/check_hostpath.yaml b/examples/Validate/check_hostpath.yaml index 8ebe1d7817..35dd08225b 100644 --- a/examples/Validate/check_hostpath.yaml +++ b/examples/Validate/check_hostpath.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: check-host-path @@ -6,7 +6,8 @@ spec: rules: - name: check-host-path resource: - kind: Pod + kinds: + - Pod validate: message: "Host path volumes are not allowed" pattern: diff --git a/examples/Validate/check_image_version.yaml b/examples/Validate/check_image_version.yaml index e71335499e..15c8cb2d9a 100644 --- a/examples/Validate/check_image_version.yaml +++ b/examples/Validate/check_image_version.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: image-pull-policy @@ -7,7 +7,8 @@ spec: - name: image-pull-policy message: "Image tag ':latest' requires imagePullPolicy 'Always'" resource: - kind: Deployment + kinds: + - Deployment overlay: template: spec: diff --git a/examples/Validate/check_nodeport.yaml b/examples/Validate/check_nodeport.yaml index 1a5fd25c05..c2456f00e3 100644 --- a/examples/Validate/check_nodeport.yaml +++ b/examples/Validate/check_nodeport.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: check-host-path @@ -6,7 +6,8 @@ spec: rules: - name: check-host-path resource: - kind: Service + kinds: + - Service validate: message: "Node port services are not allowed" pattern: diff --git a/examples/Validate/check_probe_exists.yaml b/examples/Validate/check_probe_exists.yaml index d055455bf5..442525a423 100644 --- a/examples/Validate/check_probe_exists.yaml +++ b/examples/Validate/check_probe_exists.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: check-probe-exists @@ -6,7 +6,8 @@ spec: rules: - name: check-liveness-probe-exists resource: - kind: StatefulSet + kinds: + - StatefulSet validate: message: "a livenessProbe is required" pattern: @@ -14,10 +15,11 @@ spec: # In this case every object in containers list will be checked for pattern - name: "*" livenessProbe: - periodSeconds: ? + periodSeconds: "?" - resource: - kind: Deployment - name: check-readiness-probe-exists + kinds: + - Deployment + name: check-readinessprobe-exists validate: message: "a readinessProbe is required" pattern: @@ -25,4 +27,4 @@ spec: # In this case every object in containers list will be checked for pattern - name: "*" readinessProbe: - periodSeconds: ? + periodSeconds: "?" diff --git a/examples/Validate/check_probe_intervals.yaml b/examples/Validate/check_probe_intervals.yaml index 77bc50b9ce..15f93e4a88 100644 --- a/examples/Validate/check_probe_intervals.yaml +++ b/examples/Validate/check_probe_intervals.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: check-probe-intervals @@ -6,7 +6,8 @@ spec: rules: - name: check-probe-intervals resource: - kind: Deployment + kinds: + - Deployment validate: message: "livenessProbe must be > 10s" pattern: @@ -16,7 +17,9 @@ spec: livenessProbe: periodSeconds: ">10" - resource: - kind: Deployment + kinds: + - Deployment + name: check-readinessprobe-intervals validate: pattern: message: "readinessProbe must be > 10s" diff --git a/examples/Validate/check_whitelist_registries.yaml b/examples/Validate/check_whitelist_registries.yaml index 79aceab2e0..386fe92cda 100644 --- a/examples/Validate/check_whitelist_registries.yaml +++ b/examples/Validate/check_whitelist_registries.yaml @@ -1,4 +1,4 @@ -apiVersion: policy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: check-whitelist-registries @@ -7,7 +7,8 @@ spec: - name: check-whitelist-registries message: "Registry is not allowed" resource: - kind: Deployment + kinds: + - Deployment validate: pattern: template: diff --git a/pkg/apis/policy/register.go b/pkg/apis/policy/register.go index d55c28d2f6..eb0717d576 100644 --- a/pkg/apis/policy/register.go +++ b/pkg/apis/policy/register.go @@ -2,5 +2,5 @@ package policy const ( // GroupName must be the same as specified in Policy CRD - GroupName = "kubepolicy.nirmata.io" + GroupName = "kyverno.io" ) diff --git a/pkg/apis/policy/v1alpha1/doc.go b/pkg/apis/policy/v1alpha1/doc.go index d1cb706659..4fa2b53292 100644 --- a/pkg/apis/policy/v1alpha1/doc.go +++ b/pkg/apis/policy/v1alpha1/doc.go @@ -1,4 +1,4 @@ // +k8s:deepcopy-gen=package -// +groupName=kubepolicy.nirmata.io +// +groupName=kyverno.io package v1alpha1 diff --git a/pkg/apis/policy/v1alpha1/types.go b/pkg/apis/policy/v1alpha1/types.go index 0e4374494c..12c0f7c9dd 100644 --- a/pkg/apis/policy/v1alpha1/types.go +++ b/pkg/apis/policy/v1alpha1/types.go @@ -33,7 +33,7 @@ type Rule struct { // ResourceDescription describes the resource to which the PolicyRule will be applied. type ResourceDescription struct { - Kind string `json:"kind"` + Kinds []string `json:"kinds"` Name *string `json:"name"` Selector *metav1.LabelSelector `json:"selector"` } diff --git a/pkg/apis/policy/v1alpha1/utils.go b/pkg/apis/policy/v1alpha1/utils.go index 65719ba360..3fe4bf570b 100644 --- a/pkg/apis/policy/v1alpha1/utils.go +++ b/pkg/apis/policy/v1alpha1/utils.go @@ -22,13 +22,11 @@ func (r *Rule) Validate() error { } // Validate checks if all necesarry fields are present and have values. Also checks a Selector. -// Returns error if resource definition is invalid. +// Returns error if +// - kinds is not defined func (pr *ResourceDescription) Validate() error { - // TBD: selector or name MUST be specified - if pr.Kind == "" { + if len(pr.Kinds) == 0 { return errors.New("The Kind is not specified") - } else if pr.Name == nil && pr.Selector == nil { - return errors.New("Neither Name nor Selector is specified") } if pr.Selector != nil { diff --git a/pkg/apis/policy/v1alpha1/utils_test.go b/pkg/apis/policy/v1alpha1/utils_test.go index 2d050ce3dc..867cfd729e 100644 --- a/pkg/apis/policy/v1alpha1/utils_test.go +++ b/pkg/apis/policy/v1alpha1/utils_test.go @@ -9,8 +9,8 @@ import ( var defaultResourceDescriptionName = "defaultResourceDescription" var defaultResourceDescription = ResourceDescription{ - Kind: "Deployment", - Name: &defaultResourceDescriptionName, + Kinds: []string{"Deployment"}, + Name: &defaultResourceDescriptionName, Selector: &metav1.LabelSelector{ MatchLabels: map[string]string{"LabelForSelector": "defaultResourceDescription"}, }, @@ -43,7 +43,7 @@ func Test_ResourceDescription_EmptyKind(t *testing.T) { func Test_ResourceDescription_EmptyNameAndSelector(t *testing.T) { resourceDescription := ResourceDescription{ - Kind: "Deployment", + Kinds: []string{"Deployment"}, } err := resourceDescription.Validate() assert.Assert(t, err != nil) diff --git a/pkg/config/config.go b/pkg/config/config.go index 8e0e8eb359..f6db7fc86c 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -2,28 +2,28 @@ package config const ( // These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml - KubePolicyNamespace = "kube-system" - WebhookServiceName = "kube-policy-svc" + KubePolicyNamespace = "kyverno" + WebhookServiceName = "kyverno-svc" - MutatingWebhookConfigurationName = "kube-policy-mutating-webhook-cfg" - MutatingWebhookName = "nirmata.kube-policy.mutating-webhook" + MutatingWebhookConfigurationName = "kyverno-mutating-webhook-cfg" + MutatingWebhookName = "nirmata.kyverno.mutating-webhook" - ValidatingWebhookConfigurationName = "kube-policy-validating-webhook-cfg" - ValidatingWebhookName = "nirmata.kube-policy.validating-webhook" + ValidatingWebhookConfigurationName = "kyverno-validating-webhook-cfg" + ValidatingWebhookName = "nirmata.kyverno.validating-webhook" // Due to kubernetes issue, we must use next literal constants instead of deployment TypeMeta fields // Issue: https://github.com/kubernetes/kubernetes/pull/63972 // When the issue is closed, we should use TypeMeta struct instead of this constants DeploymentKind = "Deployment" DeploymentAPIVersion = "extensions/v1beta1" - KubePolicyDeploymentName = "kube-policy-deployment" + KubePolicyDeploymentName = "kyverno-deployment" ) var ( MutatingWebhookServicePath = "/mutate" ValidatingWebhookServicePath = "/validate" KubePolicyAppLabels = map[string]string{ - "app": "kube-policy", + "app": "kyverno", } SupportedKinds = []string{ diff --git a/pkg/engine/utils.go b/pkg/engine/utils.go index af96f97806..3b18d3253b 100644 --- a/pkg/engine/utils.go +++ b/pkg/engine/utils.go @@ -13,7 +13,7 @@ import ( // ResourceMeetsDescription checks requests kind, name and labels to fit the policy rule func ResourceMeetsDescription(resourceRaw []byte, description kubepolicy.ResourceDescription, gvk metav1.GroupVersionKind) bool { - if description.Kind != gvk.Kind { + if !findKind(description.Kinds, gvk.Kind) { return false } @@ -116,3 +116,12 @@ func GetAnchorsFromMap(anchorsMap map[string]interface{}) map[string]interface{} return result } + +func findKind(kinds []string, kindGVK string) bool { + for _, kind := range kinds { + if kind == kindGVK { + return true + } + } + return false +} diff --git a/pkg/engine/validation_test.go b/pkg/engine/validation_test.go index 7a55cc0b1e..e2c9f54454 100644 --- a/pkg/engine/validation_test.go +++ b/pkg/engine/validation_test.go @@ -356,7 +356,7 @@ func TestValidateMapElement_OneElementInArrayNotPass(t *testing.T) { } func TestValidate_ServiceTest(t *testing.T) { - rawPolicy := []byte(`{ "apiVersion": "kubepolicy.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-service" }, "spec": { "rules": [ { "name": "ps1", "resource": { "kind": "Service", "name": "game-service*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/secretLabel", "op": "replace", "value": "weKnow" }, { "path": "/metadata/labels/originalLabel", "op": "remove" }, { "path": "/spec/selector/app", "op": "replace", "value": "mutedApp" } ] }, "validate": { "message": "This resource is broken", "pattern": { "spec": { "ports": [ { "name": "hs", "protocol": 32 } ] } } } } ] } }`) + rawPolicy := []byte(`{ "apiVersion": "kyverno.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-service" }, "spec": { "rules": [ { "name": "ps1", "resource": { "kind": "Service", "name": "game-service*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/secretLabel", "op": "replace", "value": "weKnow" }, { "path": "/metadata/labels/originalLabel", "op": "remove" }, { "path": "/spec/selector/app", "op": "replace", "value": "mutedApp" } ] }, "validate": { "message": "This resource is broken", "pattern": { "spec": { "ports": [ { "name": "hs", "protocol": 32 } ] } } } } ] } }`) rawResource := []byte(`{ "kind": "Service", "apiVersion": "v1", "metadata": { "name": "game-service", "labels": { "originalLabel": "isHere", "secretLabel": "thisIsMySecret" } }, "spec": { "selector": { "app": "MyApp" }, "ports": [ { "name": "http", "protocol": "TCP", "port": 80, "targetPort": 9376 } ] } }`) var policy kubepolicy.Policy @@ -370,7 +370,7 @@ func TestValidate_ServiceTest(t *testing.T) { } func TestValidate_MapHasFloats(t *testing.T) { - rawPolicy := []byte(`{ "apiVersion": "kubepolicy.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-deployment-changed" }, "spec": { "rules": [ { "name": "First policy v2", "resource": { "kind": "Deployment", "name": "nginx-*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/app", "op": "replace", "value": "nginx_is_mutated" } ] }, "validate": { "message": "replicas number is wrong", "pattern": { "metadata": { "labels": { "app": "*" } }, "spec": { "replicas": 3 } } } } ] } }`) + rawPolicy := []byte(`{ "apiVersion": "kyverno.nirmata.io/v1alpha1", "kind": "Policy", "metadata": { "name": "policy-deployment-changed" }, "spec": { "rules": [ { "name": "First policy v2", "resource": { "kind": "Deployment", "name": "nginx-*" }, "mutate": { "patches": [ { "path": "/metadata/labels/isMutated", "op": "add", "value": "true" }, { "path": "/metadata/labels/app", "op": "replace", "value": "nginx_is_mutated" } ] }, "validate": { "message": "replicas number is wrong", "pattern": { "metadata": { "labels": { "app": "*" } }, "spec": { "replicas": 3 } } } } ] } }`) rawResource := []byte(`{ "apiVersion": "apps/v1", "kind": "Deployment", "metadata": { "name": "nginx-deployment", "labels": { "app": "nginx" } }, "spec": { "replicas": 3, "selector": { "matchLabels": { "app": "nginx" } }, "template": { "metadata": { "labels": { "app": "nginx" } }, "spec": { "containers": [ { "name": "nginx", "image": "nginx:1.7.9", "ports": [ { "containerPort": 80 } ] } ] } } } }`) var policy kubepolicy.Policy diff --git a/pkg/kyverno/apply/apply.go b/pkg/kyverno/apply/apply.go index 1bdfd9d293..785cfc15bf 100644 --- a/pkg/kyverno/apply/apply.go +++ b/pkg/kyverno/apply/apply.go @@ -82,7 +82,7 @@ func complete(args []string) (*kubepolicy.Policy, []*resourceInfo) { func applyPolicy(policy *kubepolicy.Policy, rawResource []byte, gvk *metav1.GroupVersionKind) ([]byte, error) { _, patchedDocument := engine.Mutate(*policy, rawResource, *gvk) - if err := engine.Validate(*policy, rawResource, *gvk); err != nil { + if err := engine.Validate(*policy, patchedDocument, *gvk); err != nil { return nil, err } return patchedDocument, nil diff --git a/pkg/sharedinformer/sharedinformerfactory.go b/pkg/sharedinformer/sharedinformerfactory.go index 4400e38d38..16c1b722d6 100644 --- a/pkg/sharedinformer/sharedinformerfactory.go +++ b/pkg/sharedinformer/sharedinformerfactory.go @@ -44,7 +44,7 @@ func (si *sharedInfomer) Run(stopCh <-chan struct{}) { } func (si *sharedInfomer) getInfomer() infomertypes.PolicyInformer { - return si.policyInformerFactory.Kubepolicy().V1alpha1().Policies() + return si.policyInformerFactory.Kyverno().V1alpha1().Policies() } func (si *sharedInfomer) GetInfomer() cache.SharedIndexInformer { return si.getInfomer().Informer() diff --git a/pkg/violation/builder.go b/pkg/violation/builder.go index 3579037e47..8e76660be8 100644 --- a/pkg/violation/builder.go +++ b/pkg/violation/builder.go @@ -90,7 +90,7 @@ func (b *builder) processViolation(info Info) error { modifiedPolicy.Status.Violations = modifiedViolations // Violations are part of the status sub resource, so we can use the Update Status api instead of updating the policy object - _, err = b.client.UpdateStatusResource("policies", namespace, modifiedPolicy) + _, err = b.client.UpdateStatusResource("policies/status", namespace, modifiedPolicy) if err != nil { return err } diff --git a/pkg/webhooks/server.go b/pkg/webhooks/server.go index 0c5156ed38..116c0f7b98 100644 --- a/pkg/webhooks/server.go +++ b/pkg/webhooks/server.go @@ -86,13 +86,11 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) { admissionReview.Response = &v1beta1.AdmissionResponse{ Allowed: true, } - if ws.client.KindIsSupported(admissionReview.Request.Kind.Kind) { - switch r.URL.Path { - case config.MutatingWebhookServicePath: - admissionReview.Response = ws.HandleMutation(admissionReview.Request) - case config.ValidatingWebhookServicePath: - admissionReview.Response = ws.HandleValidation(admissionReview.Request) - } + switch r.URL.Path { + case config.MutatingWebhookServicePath: + admissionReview.Response = ws.HandleMutation(admissionReview.Request) + case config.ValidatingWebhookServicePath: + admissionReview.Response = ws.HandleValidation(admissionReview.Request) } admissionReview.Response.UID = admissionReview.Request.UID diff --git a/scripts/README.md b/scripts/README.md index 9452b1796e..3db3daaab4 100644 --- a/scripts/README.md +++ b/scripts/README.md @@ -7,7 +7,7 @@ Compiles the project to go executable, generates docker image and pushes it to t ### generate-server-cert.sh ### Generates TLS certificate and key that used by webhook server. Example: -`scripts/generate-server-cert.sh --service=kube-policy-svc --namespace=my_namespace --serverIp=192.168.10.117` +`scripts/generate-server-cert.sh --service=kyverno-svc --namespace=my_namespace --serverIp=192.168.10.117` * `--service` identifies the service for in-cluster webhook server. Do not specify it if you plan to run webhook server outside the cluster, or cpecify 'localhost' if you want to run controller locally. * `--namespace` identifies the namespace for in-cluster webhook server. Do not specify it if you plan to run controller locally. * `--serverIp` is the IP of master node, it can be found in `~/.kube/config`: clusters.cluster[0].server. You should explicitly specify it. @@ -18,7 +18,7 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce * `--namespace` - the target namespace to deploy the controller. Do not specify it if you want to depoloy controller locally. * `--serverIp` means the same as for `generate-server-cert.sh` Examples: -`scripts/deploy-controller.sh --service=my-kube-policy --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kube-policy' +`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno' `scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117' ### test-web-hook.sh ### diff --git a/scripts/deploy-controller.sh b/scripts/deploy-controller.sh index ff9bd3f0d5..5399d4b1f2 100755 --- a/scripts/deploy-controller.sh +++ b/scripts/deploy-controller.sh @@ -19,7 +19,7 @@ esac done hub_user_name="nirmata" -project_name="kube-policy" +project_name="kyverno" if [ -z "${service_name}" ]; then service_name="${project_name}-svc" @@ -40,7 +40,7 @@ if [ -z "${namespace}" ]; then # controller should be launched locally kubectl delete -f definitions/install.yaml kubectl create -f definitions/install.yaml || exit 3 - echo -e "\n### You can build and run kube-policy project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)." + echo -e "\n### You can build and run kyverno project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)." else # controller should be launched within a cluster diff --git a/test/ConfigMap/policy-CM.yaml b/test/ConfigMap/policy-CM.yaml index 843ff23f7e..4cef6e1084 100644 --- a/test/ConfigMap/policy-CM.yaml +++ b/test/ConfigMap/policy-CM.yaml @@ -1,4 +1,4 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.io/v1alpha1 kind : Policy metadata : name : policy-cm @@ -6,7 +6,8 @@ spec : rules: - name: pCM1 resource: - kind : ConfigMap + kinds : + - ConfigMap name: "game-config" mutate: patches: @@ -15,7 +16,8 @@ spec : value : newValue - name: pCM2 resource: - kind : ConfigMap + kinds : + - ConfigMap name: "game-config" mutate: patches: @@ -26,7 +28,8 @@ spec : value : "data is replaced" - name: pCM3 resource: - kind : ConfigMap + kinds : + - ConfigMap name: "game-config" mutate: patches: @@ -40,7 +43,8 @@ spec : game.properties: "*enemies=aliens*" - name: pCM4 resource: - kind : ConfigMap + kinds : + - ConfigMap name: "game-config" validate: message: "This CM data is broken because it does not have ui.properties" diff --git a/test/ConfigMapGenerator-SecretGenerator/policy-cm-test.yaml b/test/ConfigMapGenerator-SecretGenerator/policy-cm-test.yaml index 3a0ce26477..9ae1f69d10 100644 --- a/test/ConfigMapGenerator-SecretGenerator/policy-cm-test.yaml +++ b/test/ConfigMapGenerator-SecretGenerator/policy-cm-test.yaml @@ -1,4 +1,4 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.io/v1alpha1 kind: Policy metadata : name: "policy-configmapgenerator-test" @@ -6,12 +6,13 @@ spec: rules: - name: "copyCM" resource : - kind : Namespace + kinds : + - Namespace selector: matchLabels: LabelForSelector : "namespace2" generate : - kind: ConfigMap + - kind: ConfigMap name : copied-cm copyFrom : namespace : default diff --git a/test/ConfigMapGenerator-SecretGenerator/policy-namespace-patch-cmgCG-sgCG.yaml b/test/ConfigMapGenerator-SecretGenerator/policy-namespace-patch-cmgCG-sgCG.yaml index 56e21a4598..522e3829c9 100644 --- a/test/ConfigMapGenerator-SecretGenerator/policy-namespace-patch-cmgCG-sgCG.yaml +++ b/test/ConfigMapGenerator-SecretGenerator/policy-namespace-patch-cmgCG-sgCG.yaml @@ -3,7 +3,7 @@ # To apply this policy you need to create secret and configMap in "default" namespace # and then create a namespace -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.io/v1alpha1 kind : Policy metadata : name : "policy-ns-patch-cmg-sg" @@ -11,7 +11,8 @@ spec : rules: - name: "patchNamespace2" resource : - kind : Namespace + kinds : + - Namespace selector: matchLabels: LabelForSelector : "namespace2" @@ -23,12 +24,13 @@ spec : - name: "copyCM" resource : - kind : Namespace + kinds : + - Namespace selector: matchLabels: LabelForSelector : "namespace2" generate : - kind: ConfigMap + - kind: ConfigMap name : copied-cm copyFrom : namespace : default @@ -38,12 +40,13 @@ spec : - name: "generateCM" resource : - kind : Namespace + kinds : + - Namespace selector: matchLabels: LabelForSelector : "namespace2" generate : - kind: ConfigMap + - kind: ConfigMap name : generated-cm data : secretData: "very sensitive data from cmg" @@ -56,10 +59,11 @@ spec : - name: "generateSecret" resource : - kind : Namespace + kinds : + - Namespace name: ns2 generate : - kind: Secret + - kind: Secret name : generated-secrets data : foo : bar @@ -72,10 +76,11 @@ spec : - name: "copySecret" resource : - kind : Namespace + kinds : + - Namespace name: ns2 generate : - kind: Secret + - kind: Secret name : copied-secrets copyFrom : namespace : default diff --git a/test/CronJob/policy-cronjob-wldcrd.yaml b/test/CronJob/policy-cronjob-wldcrd.yaml index 4ef1598c35..c19f7f2f07 100644 --- a/test/CronJob/policy-cronjob-wldcrd.yaml +++ b/test/CronJob/policy-cronjob-wldcrd.yaml @@ -1,12 +1,13 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.io/v1alpha1 kind: Policy metadata: name: policy-cronjob spec: rules: - - name: + - name: pCJ resource: - kind : CronJob + kinds : + - CronJob name: "?ell*" mutate: patches: diff --git a/test/DaemonSet/policy-daemonset.yaml b/test/DaemonSet/policy-daemonset.yaml index 47912c2795..63e41ef074 100644 --- a/test/DaemonSet/policy-daemonset.yaml +++ b/test/DaemonSet/policy-daemonset.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.io/v1alpha1 kind: Policy metadata: name: policy-daemonset @@ -6,7 +6,8 @@ spec: rules: - name: "Patch and Volume validation" resource: - kind: DaemonSet + kinds: + - DaemonSet name: fluentd-elasticsearch mutate: patches: diff --git a/test/Deployment/policy-deployment-any.yaml b/test/Deployment/policy-deployment-any.yaml index 6a43ec225d..0218a1a8a2 100644 --- a/test/Deployment/policy-deployment-any.yaml +++ b/test/Deployment/policy-deployment-any.yaml @@ -1,4 +1,4 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.io/v1alpha1 kind : Policy metadata : name : policy-deployment @@ -6,8 +6,8 @@ spec : rules: - name: "First policy v2" resource: - kind : Deployment - name: nginx-* + kinds : + - Deployment mutate: patches: - path: /metadata/labels/isMutated diff --git a/test/Endpoint/policy-endpoints.yaml b/test/Endpoint/policy-endpoints.yaml index 335573c6ba..696dac4711 100644 --- a/test/Endpoint/policy-endpoints.yaml +++ b/test/Endpoint/policy-endpoints.yaml @@ -1,12 +1,13 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.io/v1alpha1 kind : Policy metadata : name : policy-endpoints spec : rules: - - name: + - name: pEP resource: - kind : Endpoints + kinds : + - Endpoints selector: matchLabels: label : test @@ -19,7 +20,7 @@ spec : op: add value: addresses: - - ip: "192.168.10.171" + - ip: "192.168.10.172" ports: - name: load-balancer-connection port: 80 diff --git a/test/HorizontalPodAutoscaler/policy-hpa.yaml b/test/HorizontalPodAutoscaler/policy-hpa.yaml index 840c41fc46..fbdad6b56f 100644 --- a/test/HorizontalPodAutoscaler/policy-hpa.yaml +++ b/test/HorizontalPodAutoscaler/policy-hpa.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.io/v1alpha1 kind: Policy metadata: name: policy-hpa @@ -6,7 +6,8 @@ spec : rules: - name: hpa1 resource: - kind : HorizontalPodAutoscaler + kinds : + - HorizontalPodAutoscaler selector: matchLabels: originalLabel: isHere diff --git a/test/Ingress/policy-ingress.yaml b/test/Ingress/policy-ingress.yaml index e0c2abb1a7..220f741df6 100644 --- a/test/Ingress/policy-ingress.yaml +++ b/test/Ingress/policy-ingress.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata : name : policy-ingress @@ -6,7 +6,8 @@ spec : rules: - name: ingress1 resource: - kind : Ingress + kinds : + - Ingress selector: matchLabels: originalLabel: isHere diff --git a/test/Job/policy-job.yaml b/test/Job/policy-job.yaml index eb023a8bf4..a9ef3e035d 100644 --- a/test/Job/policy-job.yaml +++ b/test/Job/policy-job.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: policy-job-perl-bigint @@ -6,7 +6,8 @@ spec : rules: - name: job1 resource: - kind: Job + kinds: + - Job name: pi mutate: patches: diff --git a/test/LimitRange/policy-limitrange.yaml b/test/LimitRange/policy-limitrange.yaml index 79d3c7bf45..ccc8fee490 100644 --- a/test/LimitRange/policy-limitrange.yaml +++ b/test/LimitRange/policy-limitrange.yaml @@ -1,12 +1,13 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind : Policy metadata : name : policy-limitrange spec : rules: - - name: + - name: "rule" resource: - kind : LimitRange + kinds : + - LimitRange selector: matchLabels: containerSize: minimal diff --git a/test/Namespace/policy-namespace.yaml b/test/Namespace/policy-namespace.yaml index 9ef999212f..1f8657f579 100644 --- a/test/Namespace/policy-namespace.yaml +++ b/test/Namespace/policy-namespace.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata : name : policy-namespace @@ -7,7 +7,8 @@ spec : rules: - name: ns1 resource: - kind : Namespace + kinds : + - Namespace selector: matchLabels: LabelForSelector : "namespace" diff --git a/test/NetworkPolicy/policy-network-policy.yaml b/test/NetworkPolicy/policy-network-policy.yaml index 3e105c687b..2c7d415ddc 100644 --- a/test/NetworkPolicy/policy-network-policy.yaml +++ b/test/NetworkPolicy/policy-network-policy.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: policy-network-policy @@ -6,7 +6,8 @@ spec: rules: - name: np1 resource: - kind : NetworkPolicy + kinds : + - NetworkPolicy selector: matchLabels: originalLabel: isHere diff --git a/test/PersistentVolumeClaim/policy-PVC.yaml b/test/PersistentVolumeClaim/policy-PVC.yaml index 4a05f586af..7d45d2ae50 100644 --- a/test/PersistentVolumeClaim/policy-PVC.yaml +++ b/test/PersistentVolumeClaim/policy-PVC.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: policy-pvc @@ -6,7 +6,8 @@ spec: rules: - name: pvc1 resource: - kind : PersistentVolumeClaim + kinds : + - PersistentVolumeClaim matchLabels: originalLabel: isHere mutate: diff --git a/test/PodDisruptionBudget/policy-pdb.yaml b/test/PodDisruptionBudget/policy-pdb.yaml index 736d0199d4..dce6c993e0 100644 --- a/test/PodDisruptionBudget/policy-pdb.yaml +++ b/test/PodDisruptionBudget/policy-pdb.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: policy-pdb @@ -6,7 +6,8 @@ spec: rules: - name: pdb1 resource: - kind : PodDisruptionBudget + kinds : + - PodDisruptionBudget name: "game-pdb" mutate: patches: diff --git a/test/PodTemplate/policy-PodTemplate.yaml b/test/PodTemplate/policy-PodTemplate.yaml index dbe7b01e2a..bd19ba6d0f 100644 --- a/test/PodTemplate/policy-PodTemplate.yaml +++ b/test/PodTemplate/policy-PodTemplate.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: test-podtemplate @@ -6,7 +6,8 @@ spec: rules: - name: podtemplate1 resource: - kind : PodTemplate + kinds : + - PodTemplate selector: matchLabels: originalLabel: isHere diff --git a/test/README.md b/test/README.md index 83136b4cb8..1867123f85 100644 --- a/test/README.md +++ b/test/README.md @@ -1,5 +1,5 @@ # Examples -Examples of policies and resources with which you can play to see the kube-policy in action. There are definitions for each supported resource type and an example policy for the corresponding resource. +Examples of policies and resources with which you can play to see the kyverno in action. There are definitions for each supported resource type and an example policy for the corresponding resource. ## How to play First of all, **build and install the policy controller**: see README file in the project's root. Each folder contains a pair of files, one of which is the definition of the resource, and the second is the definition of the policy for this resource. Let's look at an example of the endpoints mutation. Endpoints are listed in file `examples/Endpoints/endpoints.yaml`: diff --git a/test/ResourceQuota/policy-quota-validation.yaml b/test/ResourceQuota/policy-quota-validation.yaml index fcf59a5173..5fe16379b2 100644 --- a/test/ResourceQuota/policy-quota-validation.yaml +++ b/test/ResourceQuota/policy-quota-validation.yaml @@ -1,12 +1,13 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind : Policy metadata : name : policy-quota-low-test-validation spec : rules: - - name: + - name: "rule1" resource: - kind : ResourceQuota + kinds : + - ResourceQuota selector: matchLabels: quota: low @@ -16,9 +17,10 @@ spec : spec: hard: memory: "8Gi|12Gi" - - name: + - name: "rule2" resource: - kind : ResourceQuota + kinds : + - ResourceQuota selector: matchLabels: quota: low @@ -28,9 +30,10 @@ spec : spec: hard: cpu: <3 - - name: + - name: "rule3" resource: - kind : ResourceQuota + kinds : + - ResourceQuota selector: matchLabels: quota: low diff --git a/test/ResourceQuota/policy-quota.yaml b/test/ResourceQuota/policy-quota.yaml index 89248787fe..fe131c5a74 100644 --- a/test/ResourceQuota/policy-quota.yaml +++ b/test/ResourceQuota/policy-quota.yaml @@ -1,12 +1,13 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind : Policy metadata : name : policy-quota-low-test spec : rules: - - name: + - name: "rule" resource: - kind : ResourceQuota + kinds : + - ResourceQuota selector: matchLabels: quota: low diff --git a/test/Secret/policy-secret.yaml b/test/Secret/policy-secret.yaml index aeae51ad14..8e008d040c 100644 --- a/test/Secret/policy-secret.yaml +++ b/test/Secret/policy-secret.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: policy-secrets @@ -6,7 +6,8 @@ spec: rules: - name: secret1 resource: - kind : Secret + kinds : + - Secret name: "mysecret" mutate: patches: diff --git a/test/Service/policy-service.yaml b/test/Service/policy-service.yaml index c92f4c6d21..c3ba48b24b 100644 --- a/test/Service/policy-service.yaml +++ b/test/Service/policy-service.yaml @@ -1,4 +1,4 @@ -apiVersion : kubepolicy.nirmata.io/v1alpha1 +apiVersion : kyverno.nirmata.io/v1alpha1 kind : Policy metadata : name : policy-service @@ -6,7 +6,8 @@ spec : rules: - name: ps1 resource: - kind: Service + kinds: + - Service name: "game-service*" mutate: patches: diff --git a/test/StatefulSet/policy-StatefulSet.yaml b/test/StatefulSet/policy-StatefulSet.yaml index f9277c6016..9da7967d7a 100644 --- a/test/StatefulSet/policy-StatefulSet.yaml +++ b/test/StatefulSet/policy-StatefulSet.yaml @@ -1,4 +1,4 @@ -apiVersion: kubepolicy.nirmata.io/v1alpha1 +apiVersion: kyverno.nirmata.io/v1alpha1 kind: Policy metadata: name: policy-statefulset @@ -6,7 +6,8 @@ spec: rules: - name: statefulset1 resource: - kind : StatefulSet + kinds : + - StatefulSet selector: matchLabels: originalLabel: isHere