1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-08 10:04:25 +00:00

- Change kube-policy to kyverno in install.yaml - Install in namespace kyverno

This commit is contained in:
shuting 2019-05-21 18:36:24 -07:00
parent a61a7c9c31
commit 09bfdc6ba3
12 changed files with 52 additions and 47 deletions

View file

@ -3,14 +3,14 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kube-policy-webhook-cfg
name: nirmata-kyverno-webhook-cfg
labels:
app: kube-policy
app: kyverno
webhooks:
- name: webhook.nirmata.kube-policy
- name: webhook.nirmata.kyverno
clientConfig:
service:
name: kube-policy-svc
name: kyverno-svc
namespace: default
path: "/mutate"
caBundle: ${CA_BUNDLE}

View file

@ -3,11 +3,11 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kube-policy-webhook-cfg-debug
name: nirmata-kyverno-webhook-cfg-debug
labels:
app: kube-policy
app: kyverno
webhooks:
- name: webhook.nirmata.kube-policy
- name: webhook.nirmata.kyverno
clientConfig:
url: "https://localhost/mutate"
caBundle: ${CA_BUNDLE}

View file

@ -1,9 +1,9 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policies.kubepolicy.nirmata.io
name: policies.kyverno.io
spec:
group: kubepolicy.nirmata.io
group: kyverno.io
versions:
- name: v1alpha1
served: true
@ -143,57 +143,62 @@ spec:
additionalProperties:
type: string
---
kind: Namespace
apiVersion: v1
metadata:
name: "kyverno"
---
apiVersion: v1
kind: Service
metadata:
namespace: kube-system
name: kube-policy-svc
namespace: kyverno
name: kyverno-svc
labels:
app: kube-policy
app: kyverno
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kube-policy
app: kyverno
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kube-policy-service-account
namespace: kube-system
name: kyverno-service-account
namespace: kyverno
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kube-policy-admin
name: kyverno-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: kube-policy-service-account
namespace: kube-system
name: kyverno-service-account
namespace: kyverno
---
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
namespace: kube-system
name: kube-policy-deployment
namespace: kyverno
name: kyverno-deployment
labels:
app: kube-policy
app: kyverno
spec:
replicas: 1
template:
metadata:
labels:
app: kube-policy
app: kyverno
spec:
serviceAccountName: kube-policy-service-account
serviceAccountName: kyverno-service-account
containers:
- name: kube-policy
image: nirmata/kube-policy:latest
- name: kyverno
image: nirmata/kyverno:latest
imagePullPolicy: IfNotPresent
ports:
- containerPort: 443

View file

@ -9,15 +9,15 @@ Just execute the command for creating all necesarry resources:
`kubectl create -f definitions/install.yaml`
In this mode controller will get TLS key/certificate pair and loads in-cluster config automatically on start.
To check if the controller is working, find it in the list of kube-system pods:
To check if the controller is working, find it in the list of kyverno pods:
`kubectl get pods -n kube-system`
`kubectl get pods -n kyverno`
The pod with controller contains **'kube-policy'** in its name. The STATUS column will show the health state of the controller. If controller doesn't start, see its logs:
The pod with controller contains **'kyverno'** in its name. The STATUS column will show the health state of the controller. If controller doesn't start, see its logs:
`kubectl describe pod <kube-policy-pod-name> -n kube-system`
`kubectl describe pod <kyverno-pod-name> -n kyverno`
or
`kubectl logs <kube-policy-pod-name> -n kube-system`
`kubectl logs <kyverno-pod-name> -n kyverno`

View file

@ -2,5 +2,5 @@ package policy
const (
// GroupName must be the same as specified in Policy CRD
GroupName = "kubepolicy.nirmata.io"
GroupName = "kyverno.io"
)

View file

@ -1,4 +1,4 @@
// +k8s:deepcopy-gen=package
// +groupName=kubepolicy.nirmata.io
// +groupName=kyverno.io
package v1alpha1

View file

@ -2,28 +2,28 @@ package config
const (
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
KubePolicyNamespace = "kube-system"
WebhookServiceName = "kube-policy-svc"
KubePolicyNamespace = "kyverno"
WebhookServiceName = "kyverno-svc"
MutatingWebhookConfigurationName = "kube-policy-mutating-webhook-cfg"
MutatingWebhookName = "nirmata.kube-policy.mutating-webhook"
MutatingWebhookConfigurationName = "kyverno-mutating-webhook-cfg"
MutatingWebhookName = "nirmata.kyverno.mutating-webhook"
ValidatingWebhookConfigurationName = "kube-policy-validating-webhook-cfg"
ValidatingWebhookName = "nirmata.kube-policy.validating-webhook"
ValidatingWebhookConfigurationName = "kyverno-validating-webhook-cfg"
ValidatingWebhookName = "nirmata.kyverno.validating-webhook"
// Due to kubernetes issue, we must use next literal constants instead of deployment TypeMeta fields
// Issue: https://github.com/kubernetes/kubernetes/pull/63972
// When the issue is closed, we should use TypeMeta struct instead of this constants
DeploymentKind = "Deployment"
DeploymentAPIVersion = "extensions/v1beta1"
KubePolicyDeploymentName = "kube-policy-deployment"
KubePolicyDeploymentName = "kyverno-deployment"
)
var (
MutatingWebhookServicePath = "/mutate"
ValidatingWebhookServicePath = "/validate"
KubePolicyAppLabels = map[string]string{
"app": "kube-policy",
"app": "kyverno",
}
SupportedKinds = []string{

View file

@ -82,7 +82,7 @@ func complete(args []string) (*kubepolicy.Policy, []*resourceInfo) {
func applyPolicy(policy *kubepolicy.Policy, rawResource []byte, gvk *metav1.GroupVersionKind) ([]byte, error) {
_, patchedDocument := engine.Mutate(*policy, rawResource, *gvk)
if err := engine.Validate(*policy, rawResource, *gvk); err != nil {
if err := engine.Validate(*policy, patchedDocument, *gvk); err != nil {
return nil, err
}
return patchedDocument, nil

View file

@ -44,7 +44,7 @@ func (si *sharedInfomer) Run(stopCh <-chan struct{}) {
}
func (si *sharedInfomer) getInfomer() infomertypes.PolicyInformer {
return si.policyInformerFactory.Kubepolicy().V1alpha1().Policies()
return si.policyInformerFactory.Kyverno().V1alpha1().Policies()
}
func (si *sharedInfomer) GetInfomer() cache.SharedIndexInformer {
return si.getInfomer().Informer()

View file

@ -7,7 +7,7 @@ Compiles the project to go executable, generates docker image and pushes it to t
### generate-server-cert.sh ###
Generates TLS certificate and key that used by webhook server. Example:
`scripts/generate-server-cert.sh --service=kube-policy-svc --namespace=my_namespace --serverIp=192.168.10.117`
`scripts/generate-server-cert.sh --service=kyverno-svc --namespace=my_namespace --serverIp=192.168.10.117`
* `--service` identifies the service for in-cluster webhook server. Do not specify it if you plan to run webhook server outside the cluster, or cpecify 'localhost' if you want to run controller locally.
* `--namespace` identifies the namespace for in-cluster webhook server. Do not specify it if you plan to run controller locally.
* `--serverIp` is the IP of master node, it can be found in `~/.kube/config`: clusters.cluster[0].server. You should explicitly specify it.
@ -18,7 +18,7 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce
* `--namespace` - the target namespace to deploy the controller. Do not specify it if you want to depoloy controller locally.
* `--serverIp` means the same as for `generate-server-cert.sh`
Examples:
`scripts/deploy-controller.sh --service=my-kube-policy --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kube-policy'
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117'
### test-web-hook.sh ###

View file

@ -19,7 +19,7 @@ esac
done
hub_user_name="nirmata"
project_name="kube-policy"
project_name="kyverno"
if [ -z "${service_name}" ]; then
service_name="${project_name}-svc"
@ -40,7 +40,7 @@ if [ -z "${namespace}" ]; then # controller should be launched locally
kubectl delete -f definitions/install.yaml
kubectl create -f definitions/install.yaml || exit 3
echo -e "\n### You can build and run kube-policy project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)."
echo -e "\n### You can build and run kyverno project locally.\n### To check its work, run it with parameters -cert, -key and -kubeconfig parameters (see paths of -cert and -key in the log above)."
else # controller should be launched within a cluster

View file

@ -1,5 +1,5 @@
# Examples
Examples of policies and resources with which you can play to see the kube-policy in action. There are definitions for each supported resource type and an example policy for the corresponding resource.
Examples of policies and resources with which you can play to see the kyverno in action. There are definitions for each supported resource type and an example policy for the corresponding resource.
## How to play
First of all, **build and install the policy controller**: see README file in the project's root.
Each folder contains a pair of files, one of which is the definition of the resource, and the second is the definition of the policy for this resource. Let's look at an example of the endpoints mutation. Endpoints are listed in file `examples/Endpoints/endpoints.yaml`: