1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

420 init container (#501)

* init container to cleanup stale webhook configurations if any.

* remove test code

* use internal pkg for os signals

* move webhook cleanup before http.server shutown.

* update make file and remove init

* update CI script
This commit is contained in:
Shivkumar Dudhani 2019-11-18 11:41:37 -08:00 committed by GitHub
parent 54744151b2
commit 61b202c64a
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 472 additions and 242 deletions

View file

@ -31,5 +31,6 @@ after_success:
if [ $TRAVIS_PULL_REQUEST == 'false' ]
then
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
make docker-publish
# make docker-publish-initContainer
# make docker-publish-kyverno
fi

View file

@ -1,63 +1,82 @@
.DEFAULT_GOAL: build
# The CLI binary to build
BIN ?= kyverno
##################################
# DEFAULTS
##################################
REGISTRY=index.docker.io
REPO=$(REGISTRY)/nirmata/kyverno
IMAGE_TAG=$(GIT_VERSION)
GOOS ?= $(shell go env GOOS)
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
GIT_VERSION := $(shell git describe --dirty --always --tags)
GIT_BRANCH := $(shell git branch | grep \* | cut -d ' ' -f2)
GIT_HASH := $(GIT_BRANCH)/$(shell git log -1 --pretty=format:"%H")
TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
PACKAGE ?=github.com/nirmata/kyverno
MAIN ?=$(PACKAGE)
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
# default docker hub
REGISTRY=index.docker.io
REPO=$(REGISTRY)/nirmata/kyverno
IMAGE_TAG=$(GIT_VERSION)
GOOS ?= $(shell go env GOOS)
OUTPUT=$(shell pwd)/_output/cli/$(BIN)
##################################
# KYVERNO
##################################
KYVERNO_PATH:= cmd/kyverno
build:
CGO_ENABLED=0 GOOS=linux go build -ldflags=$(LD_FLAGS) $(MAIN)
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
local:
go build -ldflags=$(LD_FLAGS) $(MAIN)
##################################
# INIT CONTAINER
##################################
INITC_PATH := cmd/initContainer
INITC_IMAGE := kyvernopre
initContainer:
GOOS=$(GOOS) go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
cli: cli-dirs
GOOS=$(GOOS) \
go build \
-o $(OUTPUT) \
-ldflags $(LD_FLAGS) \
$(PACKAGE)/cmd/$(BIN)
.PHONY: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
cli-dirs:
@mkdir -p _output/cli
docker-publish-initContainer: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
clean:
go clean
docker-build-initContainer:
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
echo $(PWD)/$(INITC_PATH)/
@docker build -f $(PWD)/$(INITC_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(PWD)/$(INITC_PATH)/
# docker image build targets
# user must be logged in the $(REGISTRY) to push images
.PHONY: docker-build docker-tag-repo docker-push
docker-tag-repo-initContainer:
@docker tag $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
docker-publish: docker-build docker-tag-repo docker-push
docker-push-initContainer:
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG)
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
docker-build:
@docker build -t $(REPO):$(IMAGE_TAG) .
##################################
# KYVERNO CONTAINER
##################################
.PHONY: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
KYVERNO_PATH := cmd/kyverno
KYVERNO_IMAGE := kyverno
docker-publish-kyverno: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
docker-tag-repo:
@docker tag $(REPO):$(IMAGE_TAG) $(REPO):latest
docker-build-kyverno:
GO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(PWD)/$(KYVERNO_PATH)
docker-push:
@docker push $(REPO):$(IMAGE_TAG)
@docker push $(REPO):latest
docker-tag-repo-kyverno:
@docker tag $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
## Testing & Code-Coverage
docker-push-kyverno:
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG)
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
##################################
# CLI
##################################
CLI_PATH := cmd/cli
cli:
GOOS=$(GOOS) go build -o $(PWD)/$(CLI_PATH)/kyvernocli -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)/main.go
##################################
# Testing & Code-Coverage
##################################
## variables
BIN_DIR := $(GOPATH)/bin

View file

@ -0,0 +1,3 @@
FROM scratch
ADD kyvernopre /kyvernopre
ENTRYPOINT ["/kyvernopre"]

199
cmd/initContainer/main.go Normal file
View file

@ -0,0 +1,199 @@
/*
Cleans up stale webhookconfigurations created by kyverno that were not cleanedup
*/
package main
import (
"flag"
"os"
"sync"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/signal"
"k8s.io/apimachinery/pkg/api/errors"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
)
var (
kubeconfig string
)
const (
mutatingWebhookConfigKind string = "MutatingWebhookConfiguration"
validatingWebhookConfigKind string = "ValidatingWebhookConfiguration"
)
func main() {
defer glog.Flush()
// os signal handler
stopCh := signal.SetupSignalHandler()
// arguments
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
// create client config
clientConfig, err := createClientConfig(kubeconfig)
if err != nil {
glog.Fatalf("Error building kubeconfig: %v\n", err)
}
// DYNAMIC CLIENT
// - client for all registered resources
client, err := client.NewClient(clientConfig)
if err != nil {
glog.Fatalf("Error creating client: %v\n", err)
}
requests := []request{
// Resource
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
// Policy
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName},
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName},
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName},
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName},
}
done := make(chan struct{})
defer close(done)
failure := false
// use pipline to pass request to cleanup resources
// generate requests
in := gen(done, stopCh, requests...)
// process requests
// processing routine count : 2
p1 := process(client, done, stopCh, in)
p2 := process(client, done, stopCh, in)
// merge results from processing routines
for err := range merge(done, stopCh, p1, p2) {
if err != nil {
failure = true
glog.Errorf("failed to cleanup: %v", err)
}
}
// if there is any failure then we fail process
if failure {
glog.Errorf("failed to cleanup webhook configurations")
os.Exit(1)
}
}
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
var err error
// Get resource
_, err = client.GetResource(kind, "", name)
if errors.IsNotFound(err) {
glog.V(4).Infof("%s(%s) not found", name, kind)
return nil
}
if err != nil {
glog.Errorf("failed to get resource %s(%s)", name, kind)
return err
}
// Delete resource
err = client.DeleteResouce(kind, "", name, false)
if err != nil {
glog.Errorf("failed to delete resource %s(%s)", name, kind)
return err
}
glog.Infof("cleaned up resource %s(%s)", name, kind)
return nil
}
func createClientConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig == "" {
glog.Info("Using in-cluster configuration")
return rest.InClusterConfig()
}
glog.Infof("Using configuration from '%s'", kubeconfig)
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
type request struct {
kind string
name string
}
/* Processing Pipeline
-> Process Requests
Generate Requests -> Process Requests -> Merge Results
-> Process Requests
- number of processes can be controlled
- stop processing on SIGTERM OR SIGNKILL signal
- stop processing if any process fails(supported)
*/
// Generates requests to be processed
func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-chan request {
out := make(chan request)
go func() {
defer close(out)
for _, req := range requests {
select {
case out <- req:
case <-done:
println("done generate")
return
case <-stopCh:
println("shutting down generate")
return
}
}
}()
return out
}
// processes the requests
func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error {
out := make(chan error)
go func() {
defer close(out)
for req := range requests {
select {
case out <- removeWebhookIfExists(client, req.kind, req.name):
case <-done:
println("done process")
return
case <-stopCh:
println("shutting down process")
return
}
}
}()
return out
}
// waits for all processes to be complete and merges result
func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error {
var wg sync.WaitGroup
out := make(chan error)
// gets the output from each process
output := func(ch <-chan error) {
defer wg.Done()
for err := range ch {
select {
case out <- err:
case <-done:
println("done merge")
return
case <-stopCh:
println("shutting down merge")
return
}
}
}
wg.Add(len(processes))
for _, process := range processes {
go output(process)
}
// close when all the process goroutines are done
go func() {
wg.Wait()
close(out)
}()
return out
}

View file

@ -1,6 +1,7 @@
package main
import (
"context"
"flag"
"time"
@ -8,17 +9,18 @@ import (
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
dclient "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/namespace"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/signal"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/version"
"github.com/nirmata/kyverno/pkg/webhookconfig"
"github.com/nirmata/kyverno/pkg/webhooks"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/sample-controller/pkg/signals"
)
var (
@ -34,15 +36,14 @@ var (
func main() {
defer glog.Flush()
printVersionInfo()
// profile cpu and memory consuption
prof = enableProfiling(cpu, memory)
version.PrintVersionInfo()
// cleanUp Channel
cleanUp := make(chan struct{})
// SIGINT & SIGTERM channel
stopCh := signals.SetupSignalHandler()
// handle os signals
stopCh := signal.SetupSignalHandler()
// CLIENT CONFIG
clientConfig, err := createClientConfig(kubeconfig)
clientConfig, err := config.CreateClientConfig(kubeconfig)
if err != nil {
glog.Fatalf("Error building kubeconfig: %v\n", err)
}
@ -58,7 +59,7 @@ func main() {
// DYNAMIC CLIENT
// - client for all registered resources
client, err := client.NewClient(clientConfig)
client, err := dclient.NewClient(clientConfig)
if err != nil {
glog.Fatalf("Error creating client: %v\n", err)
}
@ -136,7 +137,7 @@ func main() {
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData, pvgen, policyMetaStore)
// CONFIGURE CERTIFICATES
tlsPair, err := initTLSPemPair(clientConfig, client)
tlsPair, err := client.InitTLSPemPair(clientConfig)
if err != nil {
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
}
@ -181,22 +182,23 @@ func main() {
server.RunAsync(stopCh)
<-stopCh
disableProfiling(prof)
server.Stop()
// by default http.Server waits indefinitely for connections to return to idle and then shuts down
// adding a threshold will handle zombie connections
// adjust the context deadline to 5 seconds
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
defer func() {
cancel()
}()
// cleanup webhookconfigurations followed by webhook shutdown
server.Stop(ctx)
// resource cleanup
// remove webhook configurations
<-cleanUp
glog.Info("successful shutdown of kyverno controller")
}
func init() {
// profiling feature gate
// cpu and memory profiling cannot be enabled at same time
// if both cpu and memory are enabled
// by default is to profile cpu
flag.BoolVar(&cpu, "cpu", false, "cpu profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
flag.BoolVar(&memory, "memory", false, "memory profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")

View file

@ -418,6 +418,9 @@ spec:
app: kyverno
spec:
serviceAccountName: kyverno-service-account
initContainers:
- name: kyverno-pre
image: nirmata/kyvernopre:latest
containers:
- name: kyverno
image: nirmata/kyverno:latest

102
init.go
View file

@ -1,102 +0,0 @@
package main
import (
"fmt"
"math/rand"
"time"
"github.com/pkg/profile"
"github.com/golang/glog"
client "github.com/nirmata/kyverno/pkg/dclient"
tls "github.com/nirmata/kyverno/pkg/tls"
"github.com/nirmata/kyverno/pkg/version"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
)
func printVersionInfo() {
v := version.GetVersion()
glog.Infof("Kyverno version: %s\n", v.BuildVersion)
glog.Infof("Kyverno BuildHash: %s\n", v.BuildHash)
glog.Infof("Kyverno BuildTime: %s\n", v.BuildTime)
}
func createClientConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig == "" {
glog.Info("Using in-cluster configuration")
return rest.InClusterConfig()
}
glog.Infof("Using configuration from '%s'", kubeconfig)
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}
// Loads or creates PEM private key and TLS certificate for webhook server.
// Created pair is stored in cluster's secret.
// Returns struct with key/certificate pair.
func initTLSPemPair(configuration *rest.Config, client *client.Client) (*tls.TlsPemPair, error) {
certProps, err := client.GetTLSCertProps(configuration)
if err != nil {
return nil, err
}
tlsPair := client.ReadTlsPair(certProps)
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
glog.Info("Generating new key/certificate pair for TLS")
tlsPair, err = client.GenerateTlsPemPair(certProps)
if err != nil {
return nil, err
}
if err = client.WriteTlsPair(certProps, tlsPair); err != nil {
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
}
return tlsPair, nil
}
glog.Infoln("Using existing TLS key/certificate pair")
return tlsPair, nil
}
var prof interface {
Stop()
}
func enableProfiling(cpu, memory bool) interface {
Stop()
} {
file := "/opt/nirmata/kyverno/" + randomString(6)
if cpu {
glog.Infof("Enable cpu profiling ...")
prof = profile.Start(profile.CPUProfile, profile.ProfilePath(file))
} else if memory {
glog.Infof("Enable memory profiling ...")
prof = profile.Start(profile.MemProfile, profile.ProfilePath(file))
}
return prof
}
func disableProfiling(p interface{ Stop() }) {
if p != nil {
p.Stop()
}
}
// generate random string
const charset = "abcdefghijklmnopqrstuvwxyz" +
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
var seededRand *rand.Rand = rand.New(
rand.NewSource(time.Now().UnixNano()))
func stringWithCharset(length int, charset string) string {
b := make([]byte, length)
for i := range b {
b[i] = charset[seededRand.Intn(len(charset))]
}
return string(b)
}
func randomString(length int) string {
return stringWithCharset(length, charset)
}

View file

@ -1,6 +1,12 @@
package config
import "flag"
import (
"flag"
"github.com/golang/glog"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
)
const (
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
@ -70,3 +76,13 @@ func LogDefaultFlags() {
flag.Set("stderrthreshold", "WARNING")
flag.Set("v", "2")
}
//CreateClientConfig creates client config
func CreateClientConfig(kubeconfig string) (*rest.Config, error) {
if kubeconfig == "" {
glog.Info("Using in-cluster configuration")
return rest.InClusterConfig()
}
glog.Infof("Using configuration from '%s'", kubeconfig)
return clientcmd.BuildConfigFromFlags("", kubeconfig)
}

View file

@ -15,6 +15,31 @@ import (
"k8s.io/client-go/rest"
)
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
// Created pair is stored in cluster's secret.
// Returns struct with key/certificate pair.
func (c *Client) InitTLSPemPair(configuration *rest.Config) (*tls.TlsPemPair, error) {
certProps, err := c.GetTLSCertProps(configuration)
if err != nil {
return nil, err
}
tlsPair := c.ReadTlsPair(certProps)
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
glog.Info("Generating new key/certificate pair for TLS")
tlsPair, err = c.GenerateTlsPemPair(certProps)
if err != nil {
return nil, err
}
if err = c.WriteTlsPair(certProps, tlsPair); err != nil {
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
}
return tlsPair, nil
}
glog.Infoln("Using existing TLS key/certificate pair")
return tlsPair, nil
}
//GenerateTlsPemPair Issues TLS certificate for webhook server using given PEM private key
// Returns signed and approved TLS certificate in PEM format
func (c *Client) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {

43
pkg/signal/signal.go Normal file
View file

@ -0,0 +1,43 @@
package signal
import (
"os"
"os/signal"
)
var onlyOneSignalHandler = make(chan struct{})
var shutdownHandler chan os.Signal
// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned
// which is closed on one of these signals. If a second signal is caught, the program
// is terminated with exit code 1.
func SetupSignalHandler() <-chan struct{} {
close(onlyOneSignalHandler) // panics when called twice
shutdownHandler = make(chan os.Signal, 2)
stop := make(chan struct{})
signal.Notify(shutdownHandler, shutdownSignals...)
go func() {
<-shutdownHandler
close(stop)
<-shutdownHandler
os.Exit(1) // second signal. Exit directly.
}()
return stop
}
// RequestShutdown emulates a received event that is considered as shutdown signal (SIGTERM/SIGINT)
// This returns whether a handler was notified
func RequestShutdown() bool {
if shutdownHandler != nil {
select {
case shutdownHandler <- shutdownSignals[0]:
return true
default:
}
}
return false
}

View file

@ -0,0 +1,8 @@
package signal
import (
"os"
"syscall"
)
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}

View file

@ -0,0 +1,8 @@
package signal
//TODO: how to pick files based on OS compilation ?
// import (
// "os"
// )
// var shutdownSignals = []os.Signal{os.Interrupt}

View file

@ -1,24 +1,20 @@
package version
import (
"github.com/golang/glog"
)
// These fields are set during an official build
// Global vars set from command-line arguments
var (
BuildVersion = "--"
BuildHash = "--"
BuildTime = "--"
)
// VersionInfo gets json info about the agent version
type VersionInfo struct {
BuildVersion string
BuildHash string
BuildTime string
}
// GetVersion gets the current agent version
func GetVersion() *VersionInfo {
return &VersionInfo{
BuildVersion: BuildVersion,
BuildHash: BuildHash,
BuildTime: BuildTime,
}
//PrintVersionInfo displays the kyverno version - git version
func PrintVersionInfo() {
glog.Infof("Kyverno version: %s\n", BuildVersion)
glog.Infof("Kyverno BuildHash: %s\n", BuildHash)
glog.Infof("Kyverno BuildTime: %s\n", BuildTime)
}

View file

@ -2,6 +2,7 @@ package webhookconfig
import (
"errors"
"sync"
"time"
"github.com/golang/glog"
@ -74,9 +75,9 @@ func (wrc *WebhookRegistrationClient) Register() error {
return nil
}
// RemovePolicyWebhookConfigurations removes webhook configurations for reosurces and policy
// RemoveWebhookConfigurations removes webhook configurations for reosurces and policy
// called during webhook server shutdown
func (wrc *WebhookRegistrationClient) RemovePolicyWebhookConfigurations(cleanUp chan<- struct{}) {
func (wrc *WebhookRegistrationClient) RemoveWebhookConfigurations(cleanUp chan<- struct{}) {
//TODO: dupliate, but a placeholder to perform more error handlind during cleanup
wrc.removeWebhookConfigurations()
// close channel to notify cleanup is complete
@ -230,12 +231,69 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
defer func() {
glog.V(4).Infof("Finished cleaning up webhookcongfigurations (%v)", time.Since(startTime))
}()
var wg sync.WaitGroup
wg.Add(3)
// mutating and validating webhook configuration for Kubernetes resources
wrc.RemoveResourceMutatingWebhookConfiguration()
go wrc.removeResourceMutatingWebhookConfiguration(&wg)
// mutating and validating webhook configurtion for Policy CRD resource
wrc.removePolicyWebhookConfigurations()
go wrc.removePolicyMutatingWebhookConfiguration(&wg)
go wrc.removePolicyValidatingWebhookConfiguration(&wg)
// muating webhook configuration use to verify if admission control flow is working or not
wrc.removeVerifyWebhookMutatingWebhookConfig()
// wait for the removal go routines to return
wg.Wait()
}
// wrapper to handle wait group
// TODO: re-work with RemoveResourceMutatingWebhookConfiguration, as the only difference is wg handling
func (wrc *WebhookRegistrationClient) removeResourceMutatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
wrc.RemoveResourceMutatingWebhookConfiguration()
}
// delete policy mutating webhookconfigurations
// handle wait group
func (wrc *WebhookRegistrationClient) removePolicyMutatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
// Mutating webhook configuration
var mutatingConfig string
if wrc.serverIP != "" {
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
} else {
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
err := wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", mutatingConfig)
}
}
// delete policy validating webhookconfigurations
// handle wait group
func (wrc *WebhookRegistrationClient) removePolicyValidatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
// Validating webhook configuration
var err error
var validatingConfig string
if wrc.serverIP != "" {
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
} else {
validatingConfig = config.PolicyValidatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", validatingConfig)
}
}

View file

@ -261,17 +261,17 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
}
// Stop TLS server and returns control after the server is shut down
func (ws *WebhookServer) Stop() {
err := ws.server.Shutdown(context.Background())
func (ws *WebhookServer) Stop(ctx context.Context) {
// cleanUp
// remove the static webhookconfigurations
go ws.webhookRegistrationClient.RemoveWebhookConfigurations(ws.cleanUp)
// shutdown http.Server with context timeout
err := ws.server.Shutdown(ctx)
if err != nil {
// Error from closing listeners, or context timeout:
glog.Info("Server Shutdown error: ", err)
ws.server.Close()
}
// cleanUp
// remove the static webhookconfigurations for policy CRD
ws.webhookRegistrationClient.RemovePolicyWebhookConfigurations(ws.cleanUp)
}
// bodyToAdmissionReview creates AdmissionReview object from request body

View file

@ -1,9 +1,5 @@
Use these scripts to prepare the controller for work.
All these scripts should be launched from the root folder of the project, for example:
`scripts/compile-image.sh`
### compile-image.sh ###
Compiles the project to go executable, generates docker image and pushes it to the repo. Has no arguments.
All these scripts should be launched from the root folder of the project.
### generate-server-cert.sh ###
Generates TLS certificate and key that used by webhook server. Example:
@ -19,10 +15,8 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce
* `--serverIp` means the same as for `generate-server-cert.sh`
Examples:
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117'
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with master node at '192.168.10.117'
### test-web-hook.sh ###
Quickly creates and deletes test config map. If your webhook server is running, you should see the corresponding output from it. Use this script after `deploy-controller.sh`.
### update-codegen.sh ###
Generates additional code for controller object. You should resolve all dependencies before using it, see main Readme for details.

View file

@ -1,2 +0,0 @@
kubectl delete -f definitions/install.yaml
kubectl delete csr,MutatingWebhookConfiguration,ValidatingWebhookConfiguration --all

View file

@ -1,34 +0,0 @@
#!/bin/bash
default_version="dev-testing"
version=$1
if [[ -z "$1" ]]
then
echo "Using default version: ${default_version}"
version="${default_version}"
fi
hub_user_name="nirmata"
project_name="kyverno"
echo "# Ensuring Go dependencies..."
dep ensure -v || exit 2
echo "# Building executable ${project_name}..."
chmod +x scripts/update-codegen.sh
scripts/update-codegen.sh
make build || exit 3
echo "# Building docker image ${hub_user_name}/${project_name}:${version}"
cat <<EOF > Dockerfile
FROM alpine:latest
WORKDIR ~/
ADD ${project_name} ./${project_name}
ENTRYPOINT ["./${project_name}"]
EOF
tag="${hub_user_name}/${project_name}:${version}"
docker build --no-cache -t "${tag}" . || exit 4
echo "# Pushing image to repository..."
docker push "${tag}" || exit 5

View file

@ -1,3 +0,0 @@
cd "$(dirname "$0")"
kubectl create -f resources/test-configmap.yaml
kubectl delete -f resources/test-configmap.yaml

View file

@ -1,4 +0,0 @@
#!/bin/bash
# You should see the trace of requests in the output of webhook server
kubectl create configmap test-config-map --from-literal="some_var=some_value"
kubectl delete configmap test-config-map