1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

NK-9: Merged Webhook server and Policy controller. Added logger for controller.

This commit is contained in:
belyshevdenis 2019-02-14 16:36:55 +02:00
commit 5680480600
17 changed files with 408 additions and 32 deletions

3
.gitignore vendored
View file

@ -1,3 +1,6 @@
vendor
pkg/client
pkg/apis/policy/v1alpha1/zz_generated.deepcopy.go
certs
kube-policy
Gopkg.lock

View file

@ -40,7 +40,7 @@ If you don't want to use SSH, you just can clone repo with git, but ensure that
## Restore dependencies
Navigate to kube-policy project dir and execute:
`dep restore`
`dep ensure`
This will install necessary dependencies described in README.md
# Contributing

View file

@ -2,36 +2,36 @@ package controller
import (
"time"
"fmt"
"log"
"k8s.io/sample-controller/pkg/signals"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
clientset "nirmata/kube-policy/pkg/client/clientset/versioned"
informers "nirmata/kube-policy/pkg/client/informers/externalversions"
lister "nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
types "nirmata/kube-policy/pkg/apis/policy/v1alpha1"
clientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions"
lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
)
// Controller for CRD
type Controller struct {
policyInformerFactory informers.SharedInformerFactory
policyLister lister.PolicyLister
logger *log.Logger
}
// NewController from cmd args
func NewController(masterURL, kubeconfigPath string) (*Controller, error) {
func NewController(masterURL, kubeconfigPath string, logger *log.Logger) (*Controller, error) {
cfg, err := clientcmd.BuildConfigFromFlags(masterURL, kubeconfigPath)
if err != nil {
fmt.Printf("Error building kubeconfig: %v\n", err)
logger.Printf("Error building kubeconfig: %v\n", err)
return nil, err
}
policyClientset, err := clientset.NewForConfig(cfg)
if err != nil {
fmt.Printf("Error building policy clientset: %v\n", err)
logger.Printf("Error building policy clientset: %v\n", err)
return nil, err
}
@ -53,15 +53,8 @@ func NewController(masterURL, kubeconfigPath string) (*Controller, error) {
}
// Run is main controller thread
func (c *Controller) Run() error {
stopCh := signals.SetupSignalHandler()
func (c *Controller) Run(stopCh <-chan struct{}) {
c.policyInformerFactory.Start(stopCh)
fmt.Println("Running controller...")
<-stopCh
fmt.Println("\nShutting down controller...")
return nil
}
// GetPolicies retrieves all policy resources
@ -85,24 +78,24 @@ func (c *Controller) GetPolicies() ([]*types.Policy, error) {
func (c *Controller) createPolicyHandler(resource interface{}) {
key := c.getResourceKey(resource)
fmt.Printf("Created policy: %s\n", key)
c.logger.Printf("Created policy: %s\n", key)
}
func (c *Controller) updatePolicyHandler(oldResource, newResource interface{}) {
oldKey := c.getResourceKey(oldResource)
newKey := c.getResourceKey(newResource)
fmt.Printf("Updated policy from %s to %s\n", oldKey, newKey)
c.logger.Printf("Updated policy from %s to %s\n", oldKey, newKey)
}
func (c *Controller) deletePolicyHandler(resource interface{}) {
key := c.getResourceKey(resource)
fmt.Printf("Deleted policy: %s\n", key)
c.logger.Printf("Deleted policy: %s\n", key)
}
func (c *Controller) getResourceKey(resource interface{}) string {
if key, err := cache.MetaNamespaceKeyFunc(resource); err != nil {
fmt.Printf("Error retrieving policy key: %v\n", err)
c.logger.Printf("Error retrieving policy key: %v\n", err)
return ""
} else {
return key

View file

@ -0,0 +1,19 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kube-policy-webhook-cfg
labels:
app: kube-policy
webhooks:
- name: webhook.nirmata.kube-policy
clientConfig:
service:
name: kube-policy-svc
namespace: default
path: "/mutate"
caBundle: MIIC5zCCAc+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAVMRMwEQYDVQQDEwptaW5pa3ViZUNBMB4XDTE5MDIwMzE1MjM0M1oXDTI5MDIwMTE1MjM0M1owFTETMBEGA1UEAxMKbWluaWt1YmVDQTCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAOW3JNJEhX6syO6a+Vr8fezQUmHgJ+oUwYZbwIcb1TQKAGVoIPcN5nkBw11P6syjnrxoPt9HVq3/0mWJOacBgVtuAAZ4sQ8QevFwKmipTpTAC+SEBVhsypqO/1aLs2imbHQr2AVlCy2LxppX7lupl5ELwt9t5nSI3zuauezZ6ujkOCWcO52dGA3dIEXBiKiSQ4Svlqfnjpt7w8Frf6z77nmZSCbAXOf8jjPlObQGTFqzKq+gOmK3LzpANoY6VJSAjQP0jTTc7qC9u3KG53lbTectcBrcQnHRukUvfExI1YyYBTjekjN3DzTkjsn8FCar8hkR8/G4OnwZmiHgDVClrtsCAwEAAaNCMEAwDgYDVR0PAQH/BAQDAgKkMB0GA1UdJQQWMBQGCCsGAQUFBwMCBggrBgEFBQcDATAPBgNVHRMBAf8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4IBAQAPvhLvSaYjT32cqy8tR7KR6PyO3lWKt1Tg1R6IrzavSp+5q9AkQyJQmgsm6WcOsxVwFHDMb23iPzv5UDQPmhmJlGRtFgHbCXOYL+Gf/f/6atez5EzbX3T/tSZPF7ASGLSClEGtOwFUYcXqOeQtInPVPe26PbG5k+XCdqDL8HvrRvyKf5HkTt/5nMYMig5TBs6L1O+GGfvM8dTNwW8w3T0ZUMoF4CKVmhMynG47hWW1HGdvqj/NWp8VWqO6Mo+6pBGJrrMdb7IArN725jhZps2CaD1bpGYVIB4Ad65E6ZbSXl12xUq+RI/LfqIaRAALJkXK3v0bfiJ1+SPMWok0QxjJ
rules:
- operations: [ "CREATE" ]
resources: [ "*/*" ]
apiGroups: [ "*" ]
apiVersions: [ "*" ]

View file

@ -0,0 +1,15 @@
apiVersion: admissionregistration.k8s.io/v1beta1
kind: MutatingWebhookConfiguration
metadata:
name: nirmata-kube-policy-webhook-cfg
labels:
app: kube-policy
webhooks:
- name: webhook.nirmata.kube-policy
clientConfig:
url: "https://localhost/mutate"
rules:
- operations: [ "CREATE" ]
resources: [ "*/*" ]
apiGroups: [ "*" ]
apiVersions: [ "*" ]

View file

@ -8,9 +8,8 @@ spec:
- name: v1alpha1
served: true
storage: true
scope: Namespaced
scope: Cluster
names:
kind: Policy
plural: policies
singular: policy
version: v1alpha1

28
crd/deployment.yaml Normal file
View file

@ -0,0 +1,28 @@
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: kube-policy-deployment
labels:
app: kube-policy
spec:
replicas: 1
template:
metadata:
labels:
app: kube-policy
spec:
containers:
- name: kube-policy
image: nirmata/kube-policy:latest
imagePullPolicy: IfNotPresent
args:
- -cert=/etc/kube-policy/certs/server.crt
- -key=/etc/kube-policy/certs/server-key.pem
volumeMounts:
- name: kube-policy-certs
mountPath: /etc/kube-policy/certs
readOnly: true
volumes:
- name: kube-policy-certs
secret:
secretName: kube-policy-secret

12
crd/service.yaml Normal file
View file

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: kube-policy-svc
labels:
app: kube-policy
spec:
ports:
- port: 443
targetPort: 443
selector:
app: kube-policy

32
main.go
View file

@ -1,33 +1,59 @@
package main
import (
"log"
"os"
"flag"
"fmt"
controller "nirmata/kube-policy/controller"
"github.com/nirmata/kube-policy/controller"
"github.com/nirmata/kube-policy/server"
"k8s.io/sample-controller/pkg/signals"
)
var (
masterURL string
kubeconfig string
cert string
key string
)
func main() {
flag.Parse()
controller, err := controller.NewController(masterURL, kubeconfig)
if cert == "" || key == "" {
log.Fatal("TLS certificate or/and key is not set")
}
httpLogger := log.New(os.Stdout, "http: ", log.LstdFlags|log.Lshortfile)
crdcLogger := log.New(os.Stdout, "crdc: ", log.LstdFlags|log.Lshortfile)
server := server.NewWebhookServer(cert, key, httpLogger)
server.RunAsync()
controller, err := controller.NewController(masterURL, kubeconfig, crdcLogger)
if err != nil {
fmt.Printf("Error creating Controller! Error: %s\n", err)
return
}
err = controller.Run()
stopCh := signals.SetupSignalHandler()
controller.Run(stopCh)
if err != nil {
fmt.Printf("Error running Controller! Error: %s\n", err)
}
fmt.Printf("Policy Controller has started")
<-stopCh
server.Stop()
fmt.Printf("Policy Controller has stopped")
}
func init() {
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&masterURL, "master", "", "The address of the Kubernetes API server. Overrides any value in kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&cert, "cert", "", "TLS certificate used in connection with cluster.")
flag.StringVar(&key, "key", "", "Key, used in TLS connection.")
}

View file

@ -5,7 +5,7 @@ import (
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
"nirmata/kube-policy/pkg/apis/policy"
"github.com/nirmata/kube-policy/pkg/apis/policy"
)
// SchemeGroupVersion is group version used to register these objects

View file

@ -26,7 +26,7 @@ type PolicySpec struct {
type PolicyRule struct {
Resource PolicyResource `json:"resource"`
Patches []PolicyPatch `json:"patches"`
Generator *PolicyConfigGenerator `json:"generator"`
Generators []PolicyConfigGenerator `json:"generator"`
}
// PolicyResource describes the resource rule applied to
@ -47,7 +47,7 @@ type PolicyPatch struct {
type PolicyConfigGenerator struct {
Name string `json:"name"`
CopyFrom *PolicyCopyFrom `json:"copyFrom"`
Data *map[string]string `json:"data"`
Data map[string]string `json:"data"`
}
// PolicyCopyFrom is TODO

25
scripts/README.md Normal file
View file

@ -0,0 +1,25 @@
Use these scripts to prepare the controller for work.
All these scripts should be launched from the root folder of the project, for example:
`scripts/compile-image.sh`
### compile-image.sh ###
Compiles the project to go executable, generates docker image and pushes it to the repo. Has no arguments.
### generate-server-cert.sh ###
Generates TLS certificate and key that used by webhook server. Example:
`scripts/generate-server-cert.sh --service=kube-policy-svc --namespace=my_namespace --serverIp=192.168.10.117`
* `--service` identifies the service for in-cluster webhook server. Do not specify it if you plan to run webhook server outside the cluster.
* `--namespace` identifies the namespace for in-cluster webhook server. Default value is "default".
* `--serverIp` is the IP of master node, it can be found in `~/.kube/config`: clusters.cluster[0].server. **The default is hardcoded value**, so you should explicitly specify it.
### deploy-controller.sh ###
Prepares controller for current environment in 1 of 2 possible modes: free (local) and in-cluster. Usage:
`scripts/deploy-controller.sh --namespace=my_namespace --serverIp=192.168.10.117`
* --namespace identifies the namespace for in-cluster webhook server. Do not specify it if you plan to run webhook server outside the cluster.
* --serverIp is the IP of master node, means the same as for `generate-server-cert.sh`.
### test-web-hook.sh ###
Quickly creates and deletes test config map. If your webhook server is running, you should see the corresponding output from it. Use this script after `deploy-controller.sh`.
### update-codegen.sh ###
Generates additional code for controller object. You should resolve all dependencies before using it, see main Readme for details.

23
scripts/compile-image.sh Executable file
View file

@ -0,0 +1,23 @@
#!/bin/bash
hub_user_name="nirmata"
project_name="kube-policy"
version="latest"
echo "# Ensuring Go dependencies..."
#dep ensure || exit 2
echo "# Building executable ${project_name}..."
CGO_ENABLED=0 GOOS=linux go build -a -installsuffix cgo -o ${project_name} . || exit 3
echo "# Building docker image ${hub_user_name}/${project_name}:${version}"
cat <<EOF > Dockerfile
FROM alpine:latest
WORKDIR ~/
ADD ${project_name} ./${project_name}
ENTRYPOINT ["./${project_name}"]
EOF
tag="${hub_user_name}/${project_name}:${version}"
docker build --no-cache -t "${tag}" . || exit 4
echo "# Pushing image to repository..."
docker push "${tag}" || exit 5

64
scripts/deploy-controller.sh Executable file
View file

@ -0,0 +1,64 @@
#!/bin/bash
for i in "$@"
do
case $i in
--namespace=*)
namespace="${i#*=}"
shift
;;
--serverIp=*)
serverIp="${i#*=}"
shift
;;
esac
done
if [ -z "${serverIp}" ]; then
# This is the standard IP of minikube
serverIp="192.168.10.117" #TODO: ! Read it from ~/.kube/config !
fi
hub_user_name="nirmata"
project_name="kube-policy"
service_name="${project_name}-svc"
echo "Generating certificate for the service ${service_name}..."
certsGenerator="./scripts/generate-server-cert.sh"
chmod +x "${certsGenerator}"
if [ -z "${namespace}" ]; then # controller is launched locally
${certsGenerator} "--serverIp=${serverIp}" || exit 2
echo "Applying webhook..."
kubectl delete -f crd/MutatingWebhookConfiguration_local.yaml
kubectl create -f crd/MutatingWebhookConfiguration_local.yaml || exit 3
echo -e "\n### You can build and run kube-policy project locally.\n### To check its work, run it with parameters -cert and -key, which contain generated TLS certificate and key (see their paths in log above)."
else # controller is launched within a cluster
${certsGenerator} "--service=${service_name}" "--namespace=${namespace}" "--serverIp=${serverIp}" || exit 2
secret_name="${project_name}-secret"
echo "Generating secret ${secret_name}..."
kubectl delete secret "${secret_name}" 2>/dev/null
kubectl create secret generic ${secret_name} --namespace ${namespace} --from-file=./certs || exit 3
echo "Creating the service ${service_name}..."
kubectl delete -f crd/service.yaml
kubectl create -f crd/service.yaml || exit 4
echo "Creating deployment..."
kubectl delete -f crd/deployment.yaml
kubectl create -f crd/deployment.yaml || exit 5
echo "Applying webhook..."
kubectl delete -f crd/MutatingWebhookConfiguration.yaml
kubectl create -f crd/MutatingWebhookConfiguration.yaml || exit 3
echo -e "\n### Controller is running in cluster.\n### You can use compile-image.sh to rebuild its image and then the current script to redeploy the controller.\n### Check its work by 'kubectl logs <controller_pod> command'"
fi

89
scripts/generate-server-cert.sh Executable file
View file

@ -0,0 +1,89 @@
#!/bin/bash
for i in "$@"
do
case $i in
--service=*)
service="${i#*=}"
shift
;;
--namespace=*)
namespace="${i#*=}"
shift
;;
--serverIp=*)
serverIp="${i#*=}"
shift
;;
esac
done
if [ -z "${namespace}" ]; then
namespace="default"
fi
echo "service is $service"
echo "namespace is $namespace"
echo "serverIp is $serverIp"
destdir="certs"
if [ ! -d "$destdir" ]; then
mkdir ${destdir} || exit 1
fi
tmpdir=$(mktemp -d)
cat <<EOF >> ${tmpdir}/csr.conf
[req]
req_extensions = v3_req
distinguished_name = req_distinguished_name
[req_distinguished_name]
[ v3_req ]
basicConstraints = CA:FALSE
keyUsage = nonRepudiation, digitalSignature, keyEncipherment
extendedKeyUsage = serverAuth
subjectAltName = @alt_names
[alt_names]
DNS.1 = ${service}
DNS.2 = ${service}.${namespace}
DNS.3 = ${service}.${namespace}.svc
DNS.4 = ${serverIp}
EOF
outKeyFile=${destdir}/server-key.pem
outCertFile=${destdir}/server.crt
openssl genrsa -out ${outKeyFile} 2048 || exit 2
if [ ! -z "${service}" ]; then
subjectCN="${service}.${namespace}.svc"
echo "Configuring work WITHIN a cluster with CN=${subjectCN}"
else
subjectCN=${serverIp}
echo "Configuring work OUTSIDE a cluster with CN=${subjectCN}"
fi
openssl req -new -key ${destdir}/server-key.pem -subj "/CN=${subjectCN}" -out ${tmpdir}/server.csr -config ${tmpdir}/csr.conf || exit 3
CSR_NAME=${service}.cert-request
kubectl delete csr ${CSR_NAME} 2>/dev/null
cat <<EOF | kubectl create -f -
apiVersion: certificates.k8s.io/v1beta1
kind: CertificateSigningRequest
metadata:
name: ${CSR_NAME}
spec:
groups:
- system:authenticated
request: $(cat ${tmpdir}/server.csr | base64 | tr -d '\n')
usages:
- digital signature
- key encipherment
- server auth
EOF
kubectl certificate approve ${CSR_NAME} || exit 4
kubectl get csr ${CSR_NAME} -o jsonpath='{.status.certificate}' | base64 --decode > ${outCertFile} || exit 5
echo "Generated:"
echo ${outKeyFile}
echo ${outCertFile}

4
scripts/test-web-hook.sh Executable file
View file

@ -0,0 +1,4 @@
#!/bin/bash
# You should see the trace of requests in the output of webhook server
kubectl create configmap test-config-map --from-literal="some_var=some_value"
kubectl delete configmap test-config-map

76
server/server.go Normal file
View file

@ -0,0 +1,76 @@
package server
import (
"net/http/httputil"
"net/http"
"crypto/tls"
"context"
"time"
"log"
"os"
)
// WebhookServer is a struct that describes
// TLS server with mutation webhook
type WebhookServer struct {
server http.Server
logger *log.Logger
}
func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
dump, _ := httputil.DumpRequest(r, true)
ws.logger.Printf("%s", dump)
}
// RunAsync runs TLS server in separate
// thread and returns control immediately
func (ws *WebhookServer) RunAsync() {
go func(ws *WebhookServer) {
err := ws.server.ListenAndServeTLS("", "")
if err != nil {
ws.logger.Fatal(err)
}
}(ws)
}
// Stop stops TLS server
func (ws *WebhookServer) Stop() {
err := ws.server.Shutdown(context.Background())
if err != nil {
// Error from closing listeners, or context timeout:
ws.logger.Printf("Server Shutdown error: %v", err)
ws.server.Close()
}
}
// NewWebhookServer creates new instance of WebhookServer and configures it
func NewWebhookServer(certFile string, keyFile string, logger *log.Logger) *WebhookServer {
if logger == nil {
logger = log.New(os.Stdout, "", log.LstdFlags|log.Lshortfile)
}
var config tls.Config
pair, err := tls.LoadX509KeyPair(certFile, keyFile)
if err != nil {
logger.Fatal("Unable to load certificate and key: ", err)
}
config.Certificates = []tls.Certificate{pair}
mux := http.NewServeMux()
ws := &WebhookServer {
server: http.Server {
Addr: ":443", // Listen on port for HTTPS requests
TLSConfig: &config,
Handler: mux,
ErrorLog: logger,
ReadTimeout: 5 * time.Second,
WriteTimeout: 5 * time.Second,
},
logger: logger,
}
mux.HandleFunc("/mutate", ws.serve)
return ws
}