mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
merge with v1.1.0
This commit is contained in:
commit
40b685c9db
29 changed files with 638 additions and 318 deletions
.travis.ymlMakefile
cmd
definitions
init.gopkg
config
dclient
event
namespace
policy
policystore
policyviolation
signal
version
webhookconfig
webhooks
scripts
|
@ -31,5 +31,6 @@ after_success:
|
||||||
if [ $TRAVIS_PULL_REQUEST == 'false' ]
|
if [ $TRAVIS_PULL_REQUEST == 'false' ]
|
||||||
then
|
then
|
||||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||||
make docker-publish
|
# make docker-publish-initContainer
|
||||||
|
# make docker-publish-kyverno
|
||||||
fi
|
fi
|
99
Makefile
99
Makefile
|
@ -1,63 +1,82 @@
|
||||||
.DEFAULT_GOAL: build
|
.DEFAULT_GOAL: build
|
||||||
|
|
||||||
|
##################################
|
||||||
# The CLI binary to build
|
# DEFAULTS
|
||||||
BIN ?= kyverno
|
##################################
|
||||||
|
REGISTRY=index.docker.io
|
||||||
|
REPO=$(REGISTRY)/nirmata/kyverno
|
||||||
|
IMAGE_TAG=$(GIT_VERSION)
|
||||||
|
GOOS ?= $(shell go env GOOS)
|
||||||
|
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
||||||
|
|
||||||
GIT_VERSION := $(shell git describe --dirty --always --tags)
|
GIT_VERSION := $(shell git describe --dirty --always --tags)
|
||||||
GIT_BRANCH := $(shell git branch | grep \* | cut -d ' ' -f2)
|
GIT_BRANCH := $(shell git branch | grep \* | cut -d ' ' -f2)
|
||||||
GIT_HASH := $(GIT_BRANCH)/$(shell git log -1 --pretty=format:"%H")
|
GIT_HASH := $(GIT_BRANCH)/$(shell git log -1 --pretty=format:"%H")
|
||||||
TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
|
TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
|
||||||
|
|
||||||
PACKAGE ?=github.com/nirmata/kyverno
|
##################################
|
||||||
MAIN ?=$(PACKAGE)
|
# KYVERNO
|
||||||
|
##################################
|
||||||
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
|
||||||
|
|
||||||
# default docker hub
|
|
||||||
REGISTRY=index.docker.io
|
|
||||||
REPO=$(REGISTRY)/nirmata/kyverno
|
|
||||||
IMAGE_TAG=$(GIT_VERSION)
|
|
||||||
|
|
||||||
GOOS ?= $(shell go env GOOS)
|
|
||||||
OUTPUT=$(shell pwd)/_output/cli/$(BIN)
|
|
||||||
|
|
||||||
|
KYVERNO_PATH:= cmd/kyverno
|
||||||
build:
|
build:
|
||||||
CGO_ENABLED=0 GOOS=linux go build -ldflags=$(LD_FLAGS) $(MAIN)
|
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||||
|
|
||||||
local:
|
##################################
|
||||||
go build -ldflags=$(LD_FLAGS) $(MAIN)
|
# INIT CONTAINER
|
||||||
|
##################################
|
||||||
|
INITC_PATH := cmd/initContainer
|
||||||
|
INITC_IMAGE := kyvernopre
|
||||||
|
initContainer:
|
||||||
|
GOOS=$(GOOS) go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||||
|
|
||||||
cli: cli-dirs
|
.PHONY: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
|
||||||
GOOS=$(GOOS) \
|
|
||||||
go build \
|
|
||||||
-o $(OUTPUT) \
|
|
||||||
-ldflags $(LD_FLAGS) \
|
|
||||||
$(PACKAGE)/cmd/$(BIN)
|
|
||||||
|
|
||||||
cli-dirs:
|
docker-publish-initContainer: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
|
||||||
@mkdir -p _output/cli
|
|
||||||
|
|
||||||
clean:
|
docker-build-initContainer:
|
||||||
go clean
|
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||||
|
echo $(PWD)/$(INITC_PATH)/
|
||||||
|
@docker build -f $(PWD)/$(INITC_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(PWD)/$(INITC_PATH)/
|
||||||
|
|
||||||
# docker image build targets
|
docker-tag-repo-initContainer:
|
||||||
# user must be logged in the $(REGISTRY) to push images
|
@docker tag $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
|
||||||
.PHONY: docker-build docker-tag-repo docker-push
|
|
||||||
|
|
||||||
docker-publish: docker-build docker-tag-repo docker-push
|
docker-push-initContainer:
|
||||||
|
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG)
|
||||||
|
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
|
||||||
|
|
||||||
docker-build:
|
##################################
|
||||||
@docker build -t $(REPO):$(IMAGE_TAG) .
|
# KYVERNO CONTAINER
|
||||||
|
##################################
|
||||||
|
.PHONY: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
|
||||||
|
KYVERNO_PATH := cmd/kyverno
|
||||||
|
KYVERNO_IMAGE := kyverno
|
||||||
|
docker-publish-kyverno: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
|
||||||
|
|
||||||
docker-tag-repo:
|
docker-build-kyverno:
|
||||||
@docker tag $(REPO):$(IMAGE_TAG) $(REPO):latest
|
GO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||||
|
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(PWD)/$(KYVERNO_PATH)
|
||||||
|
|
||||||
docker-push:
|
docker-tag-repo-kyverno:
|
||||||
@docker push $(REPO):$(IMAGE_TAG)
|
@docker tag $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
|
||||||
@docker push $(REPO):latest
|
|
||||||
|
|
||||||
## Testing & Code-Coverage
|
docker-push-kyverno:
|
||||||
|
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG)
|
||||||
|
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
|
||||||
|
|
||||||
|
|
||||||
|
##################################
|
||||||
|
# CLI
|
||||||
|
##################################
|
||||||
|
CLI_PATH := cmd/cli
|
||||||
|
cli:
|
||||||
|
GOOS=$(GOOS) go build -o $(PWD)/$(CLI_PATH)/kyvernocli -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)/main.go
|
||||||
|
|
||||||
|
|
||||||
|
##################################
|
||||||
|
# Testing & Code-Coverage
|
||||||
|
##################################
|
||||||
|
|
||||||
## variables
|
## variables
|
||||||
BIN_DIR := $(GOPATH)/bin
|
BIN_DIR := $(GOPATH)/bin
|
||||||
|
|
3
cmd/initContainer/Dockerfile
Normal file
3
cmd/initContainer/Dockerfile
Normal file
|
@ -0,0 +1,3 @@
|
||||||
|
FROM scratch
|
||||||
|
ADD kyvernopre /kyvernopre
|
||||||
|
ENTRYPOINT ["/kyvernopre"]
|
199
cmd/initContainer/main.go
Normal file
199
cmd/initContainer/main.go
Normal file
|
@ -0,0 +1,199 @@
|
||||||
|
/*
|
||||||
|
Cleans up stale webhookconfigurations created by kyverno that were not cleanedup
|
||||||
|
*/
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"os"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
"github.com/nirmata/kyverno/pkg/config"
|
||||||
|
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||||
|
"github.com/nirmata/kyverno/pkg/signal"
|
||||||
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
rest "k8s.io/client-go/rest"
|
||||||
|
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
kubeconfig string
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
mutatingWebhookConfigKind string = "MutatingWebhookConfiguration"
|
||||||
|
validatingWebhookConfigKind string = "ValidatingWebhookConfiguration"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
defer glog.Flush()
|
||||||
|
// os signal handler
|
||||||
|
stopCh := signal.SetupSignalHandler()
|
||||||
|
// arguments
|
||||||
|
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||||
|
|
||||||
|
// create client config
|
||||||
|
clientConfig, err := createClientConfig(kubeconfig)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DYNAMIC CLIENT
|
||||||
|
// - client for all registered resources
|
||||||
|
client, err := client.NewClient(clientConfig)
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("Error creating client: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
requests := []request{
|
||||||
|
// Resource
|
||||||
|
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
|
||||||
|
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
|
||||||
|
// Policy
|
||||||
|
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName},
|
||||||
|
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName},
|
||||||
|
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName},
|
||||||
|
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName},
|
||||||
|
}
|
||||||
|
|
||||||
|
done := make(chan struct{})
|
||||||
|
defer close(done)
|
||||||
|
failure := false
|
||||||
|
// use pipline to pass request to cleanup resources
|
||||||
|
// generate requests
|
||||||
|
in := gen(done, stopCh, requests...)
|
||||||
|
// process requests
|
||||||
|
// processing routine count : 2
|
||||||
|
p1 := process(client, done, stopCh, in)
|
||||||
|
p2 := process(client, done, stopCh, in)
|
||||||
|
// merge results from processing routines
|
||||||
|
for err := range merge(done, stopCh, p1, p2) {
|
||||||
|
if err != nil {
|
||||||
|
failure = true
|
||||||
|
glog.Errorf("failed to cleanup: %v", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// if there is any failure then we fail process
|
||||||
|
if failure {
|
||||||
|
glog.Errorf("failed to cleanup webhook configurations")
|
||||||
|
os.Exit(1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
|
||||||
|
var err error
|
||||||
|
// Get resource
|
||||||
|
_, err = client.GetResource(kind, "", name)
|
||||||
|
if errors.IsNotFound(err) {
|
||||||
|
glog.V(4).Infof("%s(%s) not found", name, kind)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("failed to get resource %s(%s)", name, kind)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// Delete resource
|
||||||
|
err = client.DeleteResouce(kind, "", name, false)
|
||||||
|
if err != nil {
|
||||||
|
glog.Errorf("failed to delete resource %s(%s)", name, kind)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
glog.Infof("cleaned up resource %s(%s)", name, kind)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||||
|
if kubeconfig == "" {
|
||||||
|
glog.Info("Using in-cluster configuration")
|
||||||
|
return rest.InClusterConfig()
|
||||||
|
}
|
||||||
|
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||||
|
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||||
|
}
|
||||||
|
|
||||||
|
type request struct {
|
||||||
|
kind string
|
||||||
|
name string
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Processing Pipeline
|
||||||
|
-> Process Requests
|
||||||
|
Generate Requests -> Process Requests -> Merge Results
|
||||||
|
-> Process Requests
|
||||||
|
- number of processes can be controlled
|
||||||
|
- stop processing on SIGTERM OR SIGNKILL signal
|
||||||
|
- stop processing if any process fails(supported)
|
||||||
|
*/
|
||||||
|
// Generates requests to be processed
|
||||||
|
func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-chan request {
|
||||||
|
out := make(chan request)
|
||||||
|
go func() {
|
||||||
|
defer close(out)
|
||||||
|
for _, req := range requests {
|
||||||
|
select {
|
||||||
|
case out <- req:
|
||||||
|
case <-done:
|
||||||
|
println("done generate")
|
||||||
|
return
|
||||||
|
case <-stopCh:
|
||||||
|
println("shutting down generate")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// processes the requests
|
||||||
|
func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error {
|
||||||
|
out := make(chan error)
|
||||||
|
go func() {
|
||||||
|
defer close(out)
|
||||||
|
for req := range requests {
|
||||||
|
select {
|
||||||
|
case out <- removeWebhookIfExists(client, req.kind, req.name):
|
||||||
|
case <-done:
|
||||||
|
println("done process")
|
||||||
|
return
|
||||||
|
case <-stopCh:
|
||||||
|
println("shutting down process")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// waits for all processes to be complete and merges result
|
||||||
|
func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error {
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
out := make(chan error)
|
||||||
|
// gets the output from each process
|
||||||
|
output := func(ch <-chan error) {
|
||||||
|
defer wg.Done()
|
||||||
|
for err := range ch {
|
||||||
|
select {
|
||||||
|
case out <- err:
|
||||||
|
case <-done:
|
||||||
|
println("done merge")
|
||||||
|
return
|
||||||
|
case <-stopCh:
|
||||||
|
println("shutting down merge")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(len(processes))
|
||||||
|
for _, process := range processes {
|
||||||
|
go output(process)
|
||||||
|
}
|
||||||
|
|
||||||
|
// close when all the process goroutines are done
|
||||||
|
go func() {
|
||||||
|
wg.Wait()
|
||||||
|
close(out)
|
||||||
|
}()
|
||||||
|
return out
|
||||||
|
}
|
|
@ -1,6 +1,7 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"flag"
|
"flag"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -8,17 +9,18 @@ import (
|
||||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
|
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
|
||||||
"github.com/nirmata/kyverno/pkg/config"
|
"github.com/nirmata/kyverno/pkg/config"
|
||||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||||
event "github.com/nirmata/kyverno/pkg/event"
|
event "github.com/nirmata/kyverno/pkg/event"
|
||||||
"github.com/nirmata/kyverno/pkg/namespace"
|
"github.com/nirmata/kyverno/pkg/namespace"
|
||||||
"github.com/nirmata/kyverno/pkg/policy"
|
"github.com/nirmata/kyverno/pkg/policy"
|
||||||
"github.com/nirmata/kyverno/pkg/policystore"
|
"github.com/nirmata/kyverno/pkg/policystore"
|
||||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||||
|
"github.com/nirmata/kyverno/pkg/signal"
|
||||||
"github.com/nirmata/kyverno/pkg/utils"
|
"github.com/nirmata/kyverno/pkg/utils"
|
||||||
|
"github.com/nirmata/kyverno/pkg/version"
|
||||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||||
"github.com/nirmata/kyverno/pkg/webhooks"
|
"github.com/nirmata/kyverno/pkg/webhooks"
|
||||||
kubeinformers "k8s.io/client-go/informers"
|
kubeinformers "k8s.io/client-go/informers"
|
||||||
"k8s.io/sample-controller/pkg/signals"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -34,15 +36,14 @@ var (
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
defer glog.Flush()
|
defer glog.Flush()
|
||||||
printVersionInfo()
|
version.PrintVersionInfo()
|
||||||
// profile cpu and memory consuption
|
|
||||||
prof = enableProfiling(cpu, memory)
|
|
||||||
// cleanUp Channel
|
// cleanUp Channel
|
||||||
cleanUp := make(chan struct{})
|
cleanUp := make(chan struct{})
|
||||||
// SIGINT & SIGTERM channel
|
// handle os signals
|
||||||
stopCh := signals.SetupSignalHandler()
|
stopCh := signal.SetupSignalHandler()
|
||||||
// CLIENT CONFIG
|
// CLIENT CONFIG
|
||||||
clientConfig, err := createClientConfig(kubeconfig)
|
clientConfig, err := config.CreateClientConfig(kubeconfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||||
}
|
}
|
||||||
|
@ -58,7 +59,7 @@ func main() {
|
||||||
|
|
||||||
// DYNAMIC CLIENT
|
// DYNAMIC CLIENT
|
||||||
// - client for all registered resources
|
// - client for all registered resources
|
||||||
client, err := client.NewClient(clientConfig)
|
client, err := dclient.NewClient(clientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Error creating client: %v\n", err)
|
glog.Fatalf("Error creating client: %v\n", err)
|
||||||
}
|
}
|
||||||
|
@ -74,35 +75,55 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// WERBHOOK REGISTRATION CLIENT
|
// WERBHOOK REGISTRATION CLIENT
|
||||||
webhookRegistrationClient := webhookconfig.NewWebhookRegistrationClient(clientConfig, client, serverIP, int32(webhookTimeout))
|
webhookRegistrationClient, err := webhookconfig.NewWebhookRegistrationClient(
|
||||||
|
clientConfig,
|
||||||
|
client,
|
||||||
|
serverIP,
|
||||||
|
int32(webhookTimeout))
|
||||||
|
if err != nil {
|
||||||
|
glog.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
// KYVERNO CRD INFORMER
|
// KYVERNO CRD INFORMER
|
||||||
// watches CRD resources:
|
// watches CRD resources:
|
||||||
// - Policy
|
// - Policy
|
||||||
// - PolicyVolation
|
// - PolicyVolation
|
||||||
// - cache resync time: 10 seconds
|
// - cache resync time: 10 seconds
|
||||||
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, 10*time.Second)
|
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(
|
||||||
|
pclient,
|
||||||
|
10*time.Second)
|
||||||
|
|
||||||
// KUBERNETES RESOURCES INFORMER
|
// KUBERNETES RESOURCES INFORMER
|
||||||
// watches namespace resource
|
// watches namespace resource
|
||||||
// - cache resync time: 10 seconds
|
// - cache resync time: 10 seconds
|
||||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
|
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||||
|
kubeClient,
|
||||||
|
10*time.Second)
|
||||||
|
|
||||||
// Configuration Data
|
// Configuration Data
|
||||||
// dyamically load the configuration from configMap
|
// dyamically load the configuration from configMap
|
||||||
// - resource filters
|
// - resource filters
|
||||||
// if the configMap is update, the configuration will be updated :D
|
// if the configMap is update, the configuration will be updated :D
|
||||||
configData := config.NewConfigData(kubeClient, kubeInformer.Core().V1().ConfigMaps(), filterK8Resources)
|
configData := config.NewConfigData(
|
||||||
|
kubeClient,
|
||||||
|
kubeInformer.Core().V1().ConfigMaps(),
|
||||||
|
filterK8Resources)
|
||||||
|
|
||||||
// Policy meta-data store
|
// Policy meta-data store
|
||||||
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies().Lister())
|
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies())
|
||||||
|
|
||||||
// EVENT GENERATOR
|
// EVENT GENERATOR
|
||||||
// - generate event with retry mechanism
|
// - generate event with retry mechanism
|
||||||
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1().ClusterPolicies())
|
egen := event.NewEventGenerator(
|
||||||
|
client,
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicies())
|
||||||
|
|
||||||
// POLICY VIOLATION GENERATOR
|
// POLICY VIOLATION GENERATOR
|
||||||
// -- generate policy violation
|
// -- generate policy violation
|
||||||
pvgen := policyviolation.NewPVGenerator(pclient, client, pInformer.Kyverno().V1().ClusterPolicyViolations().Lister(), pInformer.Kyverno().V1().NamespacedPolicyViolations().Lister())
|
pvgen := policyviolation.NewPVGenerator(pclient,
|
||||||
|
client,
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||||
|
pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
||||||
|
|
||||||
// POLICY CONTROLLER
|
// POLICY CONTROLLER
|
||||||
// - reconciliation policy and policy violation
|
// - reconciliation policy and policy violation
|
||||||
|
@ -127,22 +148,39 @@ func main() {
|
||||||
// POLICY VIOLATION CONTROLLER
|
// POLICY VIOLATION CONTROLLER
|
||||||
// policy violation cleanup if the corresponding resource is deleted
|
// policy violation cleanup if the corresponding resource is deleted
|
||||||
// status: lastUpdatTime
|
// status: lastUpdatTime
|
||||||
pvc, err := policyviolation.NewPolicyViolationController(client, pclient, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations())
|
pvc, err := policyviolation.NewPolicyViolationController(
|
||||||
|
client,
|
||||||
|
pclient,
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicyViolations())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("error creating cluster policy violation controller: %v\n", err)
|
glog.Fatalf("error creating cluster policy violation controller: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
nspvc, err := policyviolation.NewNamespacedPolicyViolationController(client, pclient, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
nspvc, err := policyviolation.NewNamespacedPolicyViolationController(
|
||||||
|
client,
|
||||||
|
pclient,
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||||
|
pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("error creating namespaced policy violation controller: %v\n", err)
|
glog.Fatalf("error creating namespaced policy violation controller: %v\n", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GENERATE CONTROLLER
|
// GENERATE CONTROLLER
|
||||||
// - watches for Namespace resource and generates resource based on the policy generate rule
|
// - watches for Namespace resource and generates resource based on the policy generate rule
|
||||||
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData, pvgen, policyMetaStore)
|
nsc := namespace.NewNamespaceController(
|
||||||
|
pclient,
|
||||||
|
client,
|
||||||
|
kubeInformer.Core().V1().Namespaces(),
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||||
|
pc.GetPolicyStatusAggregator(),
|
||||||
|
egen,
|
||||||
|
configData,
|
||||||
|
pvgen,
|
||||||
|
policyMetaStore)
|
||||||
|
|
||||||
// CONFIGURE CERTIFICATES
|
// CONFIGURE CERTIFICATES
|
||||||
tlsPair, err := initTLSPemPair(clientConfig, client)
|
tlsPair, err := client.InitTLSPemPair(clientConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||||
}
|
}
|
||||||
|
@ -162,17 +200,29 @@ func main() {
|
||||||
// -- annotations on resources with update details on mutation JSON patches
|
// -- annotations on resources with update details on mutation JSON patches
|
||||||
// -- generate policy violation resource
|
// -- generate policy violation resource
|
||||||
// -- generate events on policy and resource
|
// -- generate events on policy and resource
|
||||||
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pInformer.Kyverno().V1().NamespacedPolicyViolations(),
|
server, err := webhooks.NewWebhookServer(
|
||||||
kubeInformer.Rbac().V1().RoleBindings(), kubeInformer.Rbac().V1().ClusterRoleBindings(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, policyMetaStore, pvgen, cleanUp)
|
pclient,
|
||||||
|
client,
|
||||||
|
tlsPair,
|
||||||
|
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||||
|
kubeInformer.Rbac().V1().RoleBindings(),
|
||||||
|
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||||
|
egen,
|
||||||
|
webhookRegistrationClient,
|
||||||
|
pc.GetPolicyStatusAggregator(),
|
||||||
|
configData,
|
||||||
|
policyMetaStore,
|
||||||
|
pvgen,
|
||||||
|
cleanUp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
||||||
}
|
}
|
||||||
// Start the components
|
// Start the components
|
||||||
pInformer.Start(stopCh)
|
pInformer.Start(stopCh)
|
||||||
kubeInformer.Start(stopCh)
|
kubeInformer.Start(stopCh)
|
||||||
if err := configData.Run(stopCh); err != nil {
|
|
||||||
glog.Fatalf("Unable to load dynamic configuration: %v\n", err)
|
go configData.Run(stopCh)
|
||||||
}
|
go policyMetaStore.Run(stopCh)
|
||||||
go pc.Run(1, stopCh)
|
go pc.Run(1, stopCh)
|
||||||
go pvc.Run(1, stopCh)
|
go pvc.Run(1, stopCh)
|
||||||
go nspvc.Run(1, stopCh)
|
go nspvc.Run(1, stopCh)
|
||||||
|
@ -187,22 +237,23 @@ func main() {
|
||||||
server.RunAsync(stopCh)
|
server.RunAsync(stopCh)
|
||||||
|
|
||||||
<-stopCh
|
<-stopCh
|
||||||
disableProfiling(prof)
|
|
||||||
server.Stop()
|
// by default http.Server waits indefinitely for connections to return to idle and then shuts down
|
||||||
|
// adding a threshold will handle zombie connections
|
||||||
|
// adjust the context deadline to 5 seconds
|
||||||
|
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||||
|
defer func() {
|
||||||
|
cancel()
|
||||||
|
}()
|
||||||
|
// cleanup webhookconfigurations followed by webhook shutdown
|
||||||
|
server.Stop(ctx)
|
||||||
// resource cleanup
|
// resource cleanup
|
||||||
// remove webhook configurations
|
// remove webhook configurations
|
||||||
<-cleanUp
|
<-cleanUp
|
||||||
|
glog.Info("successful shutdown of kyverno controller")
|
||||||
}
|
}
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
// profiling feature gate
|
|
||||||
// cpu and memory profiling cannot be enabled at same time
|
|
||||||
// if both cpu and memory are enabled
|
|
||||||
// by default is to profile cpu
|
|
||||||
flag.BoolVar(&cpu, "cpu", false, "cpu profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
|
|
||||||
flag.BoolVar(&memory, "memory", false, "memory profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
|
|
||||||
//TODO: this has been added to backward support command line arguments
|
|
||||||
// will be removed in future and the configuration will be set only via configmaps
|
|
||||||
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
|
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
|
||||||
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
|
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
|
||||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
|
@ -418,6 +418,9 @@ spec:
|
||||||
app: kyverno
|
app: kyverno
|
||||||
spec:
|
spec:
|
||||||
serviceAccountName: kyverno-service-account
|
serviceAccountName: kyverno-service-account
|
||||||
|
initContainers:
|
||||||
|
- name: kyverno-pre
|
||||||
|
image: nirmata/kyvernopre:latest
|
||||||
containers:
|
containers:
|
||||||
- name: kyverno
|
- name: kyverno
|
||||||
image: nirmata/kyverno:latest
|
image: nirmata/kyverno:latest
|
||||||
|
|
102
init.go
102
init.go
|
@ -1,102 +0,0 @@
|
||||||
package main
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"math/rand"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/pkg/profile"
|
|
||||||
|
|
||||||
"github.com/golang/glog"
|
|
||||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
|
||||||
tls "github.com/nirmata/kyverno/pkg/tls"
|
|
||||||
"github.com/nirmata/kyverno/pkg/version"
|
|
||||||
rest "k8s.io/client-go/rest"
|
|
||||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
|
||||||
)
|
|
||||||
|
|
||||||
func printVersionInfo() {
|
|
||||||
v := version.GetVersion()
|
|
||||||
glog.Infof("Kyverno version: %s\n", v.BuildVersion)
|
|
||||||
glog.Infof("Kyverno BuildHash: %s\n", v.BuildHash)
|
|
||||||
glog.Infof("Kyverno BuildTime: %s\n", v.BuildTime)
|
|
||||||
}
|
|
||||||
|
|
||||||
func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
|
||||||
if kubeconfig == "" {
|
|
||||||
glog.Info("Using in-cluster configuration")
|
|
||||||
return rest.InClusterConfig()
|
|
||||||
}
|
|
||||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
|
||||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Loads or creates PEM private key and TLS certificate for webhook server.
|
|
||||||
// Created pair is stored in cluster's secret.
|
|
||||||
// Returns struct with key/certificate pair.
|
|
||||||
func initTLSPemPair(configuration *rest.Config, client *client.Client) (*tls.TlsPemPair, error) {
|
|
||||||
certProps, err := client.GetTLSCertProps(configuration)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
tlsPair := client.ReadTlsPair(certProps)
|
|
||||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
|
||||||
glog.Info("Generating new key/certificate pair for TLS")
|
|
||||||
tlsPair, err = client.GenerateTlsPemPair(certProps)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
if err = client.WriteTlsPair(certProps, tlsPair); err != nil {
|
|
||||||
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
|
|
||||||
}
|
|
||||||
return tlsPair, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
glog.Infoln("Using existing TLS key/certificate pair")
|
|
||||||
return tlsPair, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
var prof interface {
|
|
||||||
Stop()
|
|
||||||
}
|
|
||||||
|
|
||||||
func enableProfiling(cpu, memory bool) interface {
|
|
||||||
Stop()
|
|
||||||
} {
|
|
||||||
|
|
||||||
file := "/opt/nirmata/kyverno/" + randomString(6)
|
|
||||||
if cpu {
|
|
||||||
glog.Infof("Enable cpu profiling ...")
|
|
||||||
prof = profile.Start(profile.CPUProfile, profile.ProfilePath(file))
|
|
||||||
} else if memory {
|
|
||||||
glog.Infof("Enable memory profiling ...")
|
|
||||||
prof = profile.Start(profile.MemProfile, profile.ProfilePath(file))
|
|
||||||
}
|
|
||||||
|
|
||||||
return prof
|
|
||||||
}
|
|
||||||
|
|
||||||
func disableProfiling(p interface{ Stop() }) {
|
|
||||||
if p != nil {
|
|
||||||
p.Stop()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// generate random string
|
|
||||||
const charset = "abcdefghijklmnopqrstuvwxyz" +
|
|
||||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
|
||||||
|
|
||||||
var seededRand *rand.Rand = rand.New(
|
|
||||||
rand.NewSource(time.Now().UnixNano()))
|
|
||||||
|
|
||||||
func stringWithCharset(length int, charset string) string {
|
|
||||||
b := make([]byte, length)
|
|
||||||
for i := range b {
|
|
||||||
b[i] = charset[seededRand.Intn(len(charset))]
|
|
||||||
}
|
|
||||||
return string(b)
|
|
||||||
}
|
|
||||||
|
|
||||||
func randomString(length int) string {
|
|
||||||
return stringWithCharset(length, charset)
|
|
||||||
}
|
|
|
@ -1,6 +1,12 @@
|
||||||
package config
|
package config
|
||||||
|
|
||||||
import "flag"
|
import (
|
||||||
|
"flag"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
|
rest "k8s.io/client-go/rest"
|
||||||
|
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||||
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
||||||
|
@ -70,3 +76,13 @@ func LogDefaultFlags() {
|
||||||
flag.Set("stderrthreshold", "WARNING")
|
flag.Set("stderrthreshold", "WARNING")
|
||||||
flag.Set("v", "2")
|
flag.Set("v", "2")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//CreateClientConfig creates client config
|
||||||
|
func CreateClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||||
|
if kubeconfig == "" {
|
||||||
|
glog.Info("Using in-cluster configuration")
|
||||||
|
return rest.InClusterConfig()
|
||||||
|
}
|
||||||
|
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||||
|
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||||
|
}
|
||||||
|
|
|
@ -30,7 +30,7 @@ type ConfigData struct {
|
||||||
// configuration data
|
// configuration data
|
||||||
filters []k8Resource
|
filters []k8Resource
|
||||||
// hasynced
|
// hasynced
|
||||||
cmListerSycned cache.InformerSynced
|
cmSycned cache.InformerSynced
|
||||||
}
|
}
|
||||||
|
|
||||||
// ToFilter checks if the given resource is set to be filtered in the configuration
|
// ToFilter checks if the given resource is set to be filtered in the configuration
|
||||||
|
@ -57,9 +57,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
|
||||||
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
|
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
|
||||||
}
|
}
|
||||||
cd := ConfigData{
|
cd := ConfigData{
|
||||||
client: rclient,
|
client: rclient,
|
||||||
cmName: os.Getenv(cmNameEnv),
|
cmName: os.Getenv(cmNameEnv),
|
||||||
cmListerSycned: cmInformer.Informer().HasSynced,
|
cmSycned: cmInformer.Informer().HasSynced,
|
||||||
}
|
}
|
||||||
//TODO: this has been added to backward support command line arguments
|
//TODO: this has been added to backward support command line arguments
|
||||||
// will be removed in future and the configuration will be set only via configmaps
|
// will be removed in future and the configuration will be set only via configmaps
|
||||||
|
@ -76,12 +76,12 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
|
||||||
return &cd
|
return &cd
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cd *ConfigData) Run(stopCh <-chan struct{}) error {
|
//Run checks syncing
|
||||||
|
func (cd *ConfigData) Run(stopCh <-chan struct{}) {
|
||||||
// wait for cache to populate first time
|
// wait for cache to populate first time
|
||||||
if !cache.WaitForCacheSync(stopCh, cd.cmListerSycned) {
|
if !cache.WaitForCacheSync(stopCh, cd.cmSycned) {
|
||||||
return fmt.Errorf("Configuration: Failed to sync informer cache")
|
glog.Error("configuration: failed to sync informer cache")
|
||||||
}
|
}
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (cd *ConfigData) addCM(obj interface{}) {
|
func (cd *ConfigData) addCM(obj interface{}) {
|
||||||
|
|
|
@ -15,6 +15,31 @@ import (
|
||||||
"k8s.io/client-go/rest"
|
"k8s.io/client-go/rest"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
|
||||||
|
// Created pair is stored in cluster's secret.
|
||||||
|
// Returns struct with key/certificate pair.
|
||||||
|
func (c *Client) InitTLSPemPair(configuration *rest.Config) (*tls.TlsPemPair, error) {
|
||||||
|
certProps, err := c.GetTLSCertProps(configuration)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
tlsPair := c.ReadTlsPair(certProps)
|
||||||
|
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||||
|
glog.Info("Generating new key/certificate pair for TLS")
|
||||||
|
tlsPair, err = c.GenerateTlsPemPair(certProps)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if err = c.WriteTlsPair(certProps, tlsPair); err != nil {
|
||||||
|
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
|
||||||
|
}
|
||||||
|
return tlsPair, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.Infoln("Using existing TLS key/certificate pair")
|
||||||
|
return tlsPair, nil
|
||||||
|
}
|
||||||
|
|
||||||
//GenerateTlsPemPair Issues TLS certificate for webhook server using given PEM private key
|
//GenerateTlsPemPair Issues TLS certificate for webhook server using given PEM private key
|
||||||
// Returns signed and approved TLS certificate in PEM format
|
// Returns signed and approved TLS certificate in PEM format
|
||||||
func (c *Client) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
func (c *Client) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
||||||
|
|
|
@ -14,14 +14,18 @@ import (
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
)
|
)
|
||||||
|
|
||||||
//Generator generate events
|
//Generator generate events
|
||||||
type Generator struct {
|
type Generator struct {
|
||||||
client *client.Client
|
client *client.Client
|
||||||
pLister kyvernolister.ClusterPolicyLister
|
// list/get cluster policy
|
||||||
|
pLister kyvernolister.ClusterPolicyLister
|
||||||
|
// returns true if the cluster policy store has been synced at least once
|
||||||
|
pSynced cache.InformerSynced
|
||||||
queue workqueue.RateLimitingInterface
|
queue workqueue.RateLimitingInterface
|
||||||
recorder record.EventRecorder
|
recorder record.EventRecorder
|
||||||
}
|
}
|
||||||
|
@ -38,6 +42,7 @@ func NewEventGenerator(client *client.Client, pInformer kyvernoinformer.ClusterP
|
||||||
client: client,
|
client: client,
|
||||||
pLister: pInformer.Lister(),
|
pLister: pInformer.Lister(),
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
|
||||||
|
pSynced: pInformer.Informer().HasSynced,
|
||||||
recorder: initRecorder(client),
|
recorder: initRecorder(client),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -86,6 +91,10 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
||||||
glog.Info("Starting event generator")
|
glog.Info("Starting event generator")
|
||||||
defer glog.Info("Shutting down event generator")
|
defer glog.Info("Shutting down event generator")
|
||||||
|
|
||||||
|
if !cache.WaitForCacheSync(stopCh, gen.pSynced) {
|
||||||
|
glog.Error("event generator: failed to sync informer cache")
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go wait.Until(gen.runWorker, time.Second, stopCh)
|
go wait.Until(gen.runWorker, time.Second, stopCh)
|
||||||
}
|
}
|
||||||
|
|
|
@ -39,16 +39,12 @@ type NamespaceController struct {
|
||||||
|
|
||||||
//nsLister provides expansion to the namespace lister to inject GVK for the resource
|
//nsLister provides expansion to the namespace lister to inject GVK for the resource
|
||||||
nsLister NamespaceListerExpansion
|
nsLister NamespaceListerExpansion
|
||||||
// nLsister can list/get namespaces from the shared informer's store
|
// nsSynced returns true if the Namespace store has been synced at least once
|
||||||
// nsLister v1CoreLister.NamespaceLister
|
nsSynced cache.InformerSynced
|
||||||
// nsListerSynced returns true if the Namespace store has been synced at least once
|
|
||||||
nsListerSynced cache.InformerSynced
|
|
||||||
// pvLister can list/get policy violation from the shared informer's store
|
// pvLister can list/get policy violation from the shared informer's store
|
||||||
pLister kyvernolister.ClusterPolicyLister
|
pLister kyvernolister.ClusterPolicyLister
|
||||||
// pvListerSynced retrns true if the Policy store has been synced at least once
|
// pSynced retrns true if the Policy store has been synced at least once
|
||||||
pvListerSynced cache.InformerSynced
|
pSynced cache.InformerSynced
|
||||||
// pvLister can list/get policy violation from the shared informer's store
|
|
||||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
|
||||||
// API to send policy stats for aggregation
|
// API to send policy stats for aggregation
|
||||||
policyStatus policy.PolicyStatusInterface
|
policyStatus policy.PolicyStatusInterface
|
||||||
// eventGen provides interface to generate evenets
|
// eventGen provides interface to generate evenets
|
||||||
|
@ -70,7 +66,6 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
||||||
client *client.Client,
|
client *client.Client,
|
||||||
nsInformer v1Informer.NamespaceInformer,
|
nsInformer v1Informer.NamespaceInformer,
|
||||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
|
||||||
policyStatus policy.PolicyStatusInterface,
|
policyStatus policy.PolicyStatusInterface,
|
||||||
eventGen event.Interface,
|
eventGen event.Interface,
|
||||||
configHandler config.Interface,
|
configHandler config.Interface,
|
||||||
|
@ -103,10 +98,9 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
||||||
nsc.syncHandler = nsc.syncNamespace
|
nsc.syncHandler = nsc.syncNamespace
|
||||||
|
|
||||||
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
|
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
|
||||||
nsc.nsListerSynced = nsInformer.Informer().HasSynced
|
nsc.nsSynced = nsInformer.Informer().HasSynced
|
||||||
nsc.pLister = pInformer.Lister()
|
nsc.pLister = pInformer.Lister()
|
||||||
nsc.pvListerSynced = pInformer.Informer().HasSynced
|
nsc.pSynced = pInformer.Informer().HasSynced
|
||||||
nsc.pvLister = pvInformer.Lister()
|
|
||||||
nsc.policyStatus = policyStatus
|
nsc.policyStatus = policyStatus
|
||||||
|
|
||||||
// resource manager
|
// resource manager
|
||||||
|
@ -174,7 +168,8 @@ func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
glog.Info("Starting namespace controller")
|
glog.Info("Starting namespace controller")
|
||||||
defer glog.Info("Shutting down namespace controller")
|
defer glog.Info("Shutting down namespace controller")
|
||||||
|
|
||||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsListerSynced); !ok {
|
if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
|
||||||
|
glog.Error("namespace generator: failed to sync cache")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -401,6 +401,7 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
||||||
defer glog.Info("Shutting down policy controller")
|
defer glog.Info("Shutting down policy controller")
|
||||||
|
|
||||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.pvListerSynced, pc.nspvListerSynced, pc.mwebhookconfigSynced) {
|
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.pvListerSynced, pc.nspvListerSynced, pc.mwebhookconfigSynced) {
|
||||||
|
glog.Error("failed to sync informer cache")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
|
|
|
@ -3,8 +3,11 @@ package policystore
|
||||||
import (
|
import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
|
"github.com/golang/glog"
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||||
|
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
)
|
)
|
||||||
|
|
||||||
type policyMap map[string]interface{}
|
type policyMap map[string]interface{}
|
||||||
|
@ -13,9 +16,12 @@ type kindMap map[string]namespaceMap
|
||||||
|
|
||||||
//PolicyStore Store the meta-data information to faster lookup policies
|
//PolicyStore Store the meta-data information to faster lookup policies
|
||||||
type PolicyStore struct {
|
type PolicyStore struct {
|
||||||
data map[string]namespaceMap
|
data map[string]namespaceMap
|
||||||
mu sync.RWMutex
|
mu sync.RWMutex
|
||||||
|
// list/get cluster policy
|
||||||
pLister kyvernolister.ClusterPolicyLister
|
pLister kyvernolister.ClusterPolicyLister
|
||||||
|
// returns true if the cluster policy store has been synced at least once
|
||||||
|
pSynched cache.InformerSynced
|
||||||
}
|
}
|
||||||
|
|
||||||
//UpdateInterface provides api to update policies
|
//UpdateInterface provides api to update policies
|
||||||
|
@ -33,14 +39,22 @@ type LookupInterface interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewPolicyStore returns a new policy store
|
// NewPolicyStore returns a new policy store
|
||||||
func NewPolicyStore(pLister kyvernolister.ClusterPolicyLister) *PolicyStore {
|
func NewPolicyStore(pInformer kyvernoinformer.ClusterPolicyInformer) *PolicyStore {
|
||||||
ps := PolicyStore{
|
ps := PolicyStore{
|
||||||
data: make(kindMap),
|
data: make(kindMap),
|
||||||
pLister: pLister,
|
pLister: pInformer.Lister(),
|
||||||
|
pSynched: pInformer.Informer().HasSynced,
|
||||||
}
|
}
|
||||||
return &ps
|
return &ps
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//Run checks syncing
|
||||||
|
func (ps *PolicyStore) Run(stopCh <-chan struct{}) {
|
||||||
|
if !cache.WaitForCacheSync(stopCh, ps.pSynched) {
|
||||||
|
glog.Error("policy meta store: failed to sync informer cache")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//Register a new policy
|
//Register a new policy
|
||||||
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
|
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
|
||||||
ps.mu.Lock()
|
ps.mu.Lock()
|
||||||
|
|
|
@ -12,13 +12,16 @@ import (
|
||||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||||
|
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||||
|
|
||||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||||
"k8s.io/apimachinery/pkg/labels"
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||||
"k8s.io/apimachinery/pkg/util/wait"
|
"k8s.io/apimachinery/pkg/util/wait"
|
||||||
|
"k8s.io/client-go/tools/cache"
|
||||||
"k8s.io/client-go/util/workqueue"
|
"k8s.io/client-go/util/workqueue"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -29,13 +32,20 @@ const workQueueRetryLimit = 3
|
||||||
type Generator struct {
|
type Generator struct {
|
||||||
dclient *dclient.Client
|
dclient *dclient.Client
|
||||||
pvInterface kyvernov1.KyvernoV1Interface
|
pvInterface kyvernov1.KyvernoV1Interface
|
||||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
// get/list cluster policy violation
|
||||||
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||||
queue workqueue.RateLimitingInterface
|
// get/ist namespaced policy violation
|
||||||
dataStore *dataStore
|
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
||||||
|
// returns true if the cluster policy store has been synced at least once
|
||||||
|
pvSynced cache.InformerSynced
|
||||||
|
// returns true if the namespaced cluster policy store has been synced at at least once
|
||||||
|
nspvSynced cache.InformerSynced
|
||||||
|
queue workqueue.RateLimitingInterface
|
||||||
|
dataStore *dataStore
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewDataStore() *dataStore {
|
//NewDataStore returns an instance of data store
|
||||||
|
func newDataStore() *dataStore {
|
||||||
ds := dataStore{
|
ds := dataStore{
|
||||||
data: make(map[string]Info),
|
data: make(map[string]Info),
|
||||||
}
|
}
|
||||||
|
@ -95,15 +105,17 @@ type GeneratorInterface interface {
|
||||||
|
|
||||||
// NewPVGenerator returns a new instance of policy violation generator
|
// NewPVGenerator returns a new instance of policy violation generator
|
||||||
func NewPVGenerator(client *kyvernoclient.Clientset, dclient *client.Client,
|
func NewPVGenerator(client *kyvernoclient.Clientset, dclient *client.Client,
|
||||||
pvLister kyvernolister.ClusterPolicyViolationLister,
|
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||||
nspvLister kyvernolister.NamespacedPolicyViolationLister) *Generator {
|
nspvInformer kyvernoinformer.NamespacedPolicyViolationInformer) *Generator {
|
||||||
gen := Generator{
|
gen := Generator{
|
||||||
pvInterface: client.KyvernoV1(),
|
pvInterface: client.KyvernoV1(),
|
||||||
dclient: dclient,
|
dclient: dclient,
|
||||||
pvLister: pvLister,
|
pvLister: pvInformer.Lister(),
|
||||||
nspvLister: nspvLister,
|
pvSynced: pvInformer.Informer().HasSynced,
|
||||||
|
nspvLister: nspvInformer.Lister(),
|
||||||
|
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||||
dataStore: NewDataStore(),
|
dataStore: newDataStore(),
|
||||||
}
|
}
|
||||||
return &gen
|
return &gen
|
||||||
}
|
}
|
||||||
|
@ -130,6 +142,10 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
||||||
glog.Info("Start policy violation generator")
|
glog.Info("Start policy violation generator")
|
||||||
defer glog.Info("Shutting down policy violation generator")
|
defer glog.Info("Shutting down policy violation generator")
|
||||||
|
|
||||||
|
if !cache.WaitForCacheSync(stopCh, gen.pvSynced, gen.nspvSynced) {
|
||||||
|
glog.Error("policy violation generator: failed to sync informer cache")
|
||||||
|
}
|
||||||
|
|
||||||
for i := 0; i < workers; i++ {
|
for i := 0; i < workers; i++ {
|
||||||
go wait.Until(gen.runWorker, time.Second, stopCh)
|
go wait.Until(gen.runWorker, time.Second, stopCh)
|
||||||
}
|
}
|
||||||
|
@ -331,8 +347,8 @@ func buildPVObj(policyName string, resourceSpec kyverno.ResourceSpec, rules []ky
|
||||||
"policy": policyName,
|
"policy": policyName,
|
||||||
"resource": resourceSpec.ToKey(),
|
"resource": resourceSpec.ToKey(),
|
||||||
}
|
}
|
||||||
|
pv.SetGenerateName(fmt.Sprintf("%s-", policyName))
|
||||||
pv.SetLabels(labelMap)
|
pv.SetLabels(labelMap)
|
||||||
pv.SetGenerateName("pv-")
|
|
||||||
return pv
|
return pv
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,7 @@ func buildNamespacedPVObj(policy string, resource kyverno.ResourceSpec, fRules [
|
||||||
"policy": policy,
|
"policy": policy,
|
||||||
"resource": resource.ToKey(),
|
"resource": resource.ToKey(),
|
||||||
}
|
}
|
||||||
pv.SetGenerateName("pv-")
|
pv.SetGenerateName(fmt.Sprintf("%s-", policy))
|
||||||
pv.SetLabels(labelMap)
|
pv.SetLabels(labelMap)
|
||||||
return pv
|
return pv
|
||||||
}
|
}
|
||||||
|
|
43
pkg/signal/signal.go
Normal file
43
pkg/signal/signal.go
Normal file
|
@ -0,0 +1,43 @@
|
||||||
|
package signal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
)
|
||||||
|
|
||||||
|
var onlyOneSignalHandler = make(chan struct{})
|
||||||
|
var shutdownHandler chan os.Signal
|
||||||
|
|
||||||
|
// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned
|
||||||
|
// which is closed on one of these signals. If a second signal is caught, the program
|
||||||
|
// is terminated with exit code 1.
|
||||||
|
func SetupSignalHandler() <-chan struct{} {
|
||||||
|
close(onlyOneSignalHandler) // panics when called twice
|
||||||
|
|
||||||
|
shutdownHandler = make(chan os.Signal, 2)
|
||||||
|
|
||||||
|
stop := make(chan struct{})
|
||||||
|
signal.Notify(shutdownHandler, shutdownSignals...)
|
||||||
|
go func() {
|
||||||
|
<-shutdownHandler
|
||||||
|
close(stop)
|
||||||
|
<-shutdownHandler
|
||||||
|
os.Exit(1) // second signal. Exit directly.
|
||||||
|
}()
|
||||||
|
|
||||||
|
return stop
|
||||||
|
}
|
||||||
|
|
||||||
|
// RequestShutdown emulates a received event that is considered as shutdown signal (SIGTERM/SIGINT)
|
||||||
|
// This returns whether a handler was notified
|
||||||
|
func RequestShutdown() bool {
|
||||||
|
if shutdownHandler != nil {
|
||||||
|
select {
|
||||||
|
case shutdownHandler <- shutdownSignals[0]:
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
|
}
|
8
pkg/signal/signal_posix.go
Normal file
8
pkg/signal/signal_posix.go
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package signal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"os"
|
||||||
|
"syscall"
|
||||||
|
)
|
||||||
|
|
||||||
|
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
8
pkg/signal/signal_windows.go
Normal file
8
pkg/signal/signal_windows.go
Normal file
|
@ -0,0 +1,8 @@
|
||||||
|
package signal
|
||||||
|
|
||||||
|
//TODO: how to pick files based on OS compilation ?
|
||||||
|
// import (
|
||||||
|
// "os"
|
||||||
|
// )
|
||||||
|
|
||||||
|
// var shutdownSignals = []os.Signal{os.Interrupt}
|
|
@ -1,24 +1,20 @@
|
||||||
package version
|
package version
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/golang/glog"
|
||||||
|
)
|
||||||
|
|
||||||
// These fields are set during an official build
|
// These fields are set during an official build
|
||||||
|
// Global vars set from command-line arguments
|
||||||
var (
|
var (
|
||||||
BuildVersion = "--"
|
BuildVersion = "--"
|
||||||
BuildHash = "--"
|
BuildHash = "--"
|
||||||
BuildTime = "--"
|
BuildTime = "--"
|
||||||
)
|
)
|
||||||
|
|
||||||
// VersionInfo gets json info about the agent version
|
//PrintVersionInfo displays the kyverno version - git version
|
||||||
type VersionInfo struct {
|
func PrintVersionInfo() {
|
||||||
BuildVersion string
|
glog.Infof("Kyverno version: %s\n", BuildVersion)
|
||||||
BuildHash string
|
glog.Infof("Kyverno BuildHash: %s\n", BuildHash)
|
||||||
BuildTime string
|
glog.Infof("Kyverno BuildTime: %s\n", BuildTime)
|
||||||
}
|
|
||||||
|
|
||||||
// GetVersion gets the current agent version
|
|
||||||
func GetVersion() *VersionInfo {
|
|
||||||
return &VersionInfo{
|
|
||||||
BuildVersion: BuildVersion,
|
|
||||||
BuildHash: BuildHash,
|
|
||||||
BuildTime: BuildTime,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,6 +2,7 @@ package webhookconfig
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
|
@ -71,9 +72,9 @@ func (wrc *WebhookRegistrationClient) Register() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// RemovePolicyWebhookConfigurations removes webhook configurations for reosurces and policy
|
// RemoveWebhookConfigurations removes webhook configurations for reosurces and policy
|
||||||
// called during webhook server shutdown
|
// called during webhook server shutdown
|
||||||
func (wrc *WebhookRegistrationClient) RemovePolicyWebhookConfigurations(cleanUp chan<- struct{}) {
|
func (wrc *WebhookRegistrationClient) RemoveWebhookConfigurations(cleanUp chan<- struct{}) {
|
||||||
//TODO: dupliate, but a placeholder to perform more error handlind during cleanup
|
//TODO: dupliate, but a placeholder to perform more error handlind during cleanup
|
||||||
wrc.removeWebhookConfigurations()
|
wrc.removeWebhookConfigurations()
|
||||||
// close channel to notify cleanup is complete
|
// close channel to notify cleanup is complete
|
||||||
|
@ -214,12 +215,69 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
|
||||||
defer func() {
|
defer func() {
|
||||||
glog.V(4).Infof("Finished cleaning up webhookcongfigurations (%v)", time.Since(startTime))
|
glog.V(4).Infof("Finished cleaning up webhookcongfigurations (%v)", time.Since(startTime))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
wg.Add(3)
|
||||||
// mutating and validating webhook configuration for Kubernetes resources
|
// mutating and validating webhook configuration for Kubernetes resources
|
||||||
wrc.RemoveResourceMutatingWebhookConfiguration()
|
go wrc.removeResourceMutatingWebhookConfiguration(&wg)
|
||||||
|
|
||||||
// mutating and validating webhook configurtion for Policy CRD resource
|
// mutating and validating webhook configurtion for Policy CRD resource
|
||||||
wrc.removePolicyWebhookConfigurations()
|
go wrc.removePolicyMutatingWebhookConfiguration(&wg)
|
||||||
|
go wrc.removePolicyValidatingWebhookConfiguration(&wg)
|
||||||
|
|
||||||
// muating webhook configuration use to verify if admission control flow is working or not
|
// wait for the removal go routines to return
|
||||||
wrc.removeVerifyWebhookMutatingWebhookConfig()
|
wg.Wait()
|
||||||
|
}
|
||||||
|
|
||||||
|
// wrapper to handle wait group
|
||||||
|
// TODO: re-work with RemoveResourceMutatingWebhookConfiguration, as the only difference is wg handling
|
||||||
|
func (wrc *WebhookRegistrationClient) removeResourceMutatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
wrc.RemoveResourceMutatingWebhookConfiguration()
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete policy mutating webhookconfigurations
|
||||||
|
// handle wait group
|
||||||
|
func (wrc *WebhookRegistrationClient) removePolicyMutatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
// Mutating webhook configuration
|
||||||
|
var mutatingConfig string
|
||||||
|
if wrc.serverIP != "" {
|
||||||
|
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
|
||||||
|
} else {
|
||||||
|
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
|
||||||
|
}
|
||||||
|
|
||||||
|
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||||
|
err := wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
|
||||||
|
if errorsapi.IsNotFound(err) {
|
||||||
|
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||||
|
} else if err != nil {
|
||||||
|
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
|
||||||
|
} else {
|
||||||
|
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", mutatingConfig)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete policy validating webhookconfigurations
|
||||||
|
// handle wait group
|
||||||
|
func (wrc *WebhookRegistrationClient) removePolicyValidatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||||
|
defer wg.Done()
|
||||||
|
// Validating webhook configuration
|
||||||
|
var err error
|
||||||
|
var validatingConfig string
|
||||||
|
if wrc.serverIP != "" {
|
||||||
|
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
|
||||||
|
} else {
|
||||||
|
validatingConfig = config.PolicyValidatingWebhookConfigurationName
|
||||||
|
}
|
||||||
|
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
|
||||||
|
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
|
||||||
|
if errorsapi.IsNotFound(err) {
|
||||||
|
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
|
||||||
|
} else if err != nil {
|
||||||
|
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
|
||||||
|
} else {
|
||||||
|
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", validatingConfig)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,18 +34,24 @@ import (
|
||||||
// WebhookServer contains configured TLS server with MutationWebhook.
|
// WebhookServer contains configured TLS server with MutationWebhook.
|
||||||
// MutationWebhook gets policies from policyController and takes control of the cluster with kubeclient.
|
// MutationWebhook gets policies from policyController and takes control of the cluster with kubeclient.
|
||||||
type WebhookServer struct {
|
type WebhookServer struct {
|
||||||
server http.Server
|
server http.Server
|
||||||
client *client.Client
|
client *client.Client
|
||||||
kyvernoClient *kyvernoclient.Clientset
|
kyvernoClient *kyvernoclient.Clientset
|
||||||
pLister kyvernolister.ClusterPolicyLister
|
// list/get cluster policy resource
|
||||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
pLister kyvernolister.ClusterPolicyLister
|
||||||
namespacepvLister kyvernolister.NamespacedPolicyViolationLister
|
// returns true if the cluster policy store has synced atleast
|
||||||
pListerSynced cache.InformerSynced
|
pSynced cache.InformerSynced
|
||||||
pvListerSynced cache.InformerSynced
|
// list/get role binding resource
|
||||||
namespacepvListerSynced cache.InformerSynced
|
rbLister rbaclister.RoleBindingLister
|
||||||
rbLister rbaclister.RoleBindingLister
|
// return true if role bining store has synced atleast once
|
||||||
crbLister rbaclister.ClusterRoleBindingLister
|
rbSynced cache.InformerSynced
|
||||||
eventGen event.Interface
|
// list/get cluster role binding resource
|
||||||
|
crbLister rbaclister.ClusterRoleBindingLister
|
||||||
|
// return true if cluster role binding store has synced atleast once
|
||||||
|
crbSynced cache.InformerSynced
|
||||||
|
// generate events
|
||||||
|
eventGen event.Interface
|
||||||
|
// webhook registration client
|
||||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||||
// API to send policy stats for aggregation
|
// API to send policy stats for aggregation
|
||||||
policyStatus policy.PolicyStatusInterface
|
policyStatus policy.PolicyStatusInterface
|
||||||
|
@ -68,8 +74,6 @@ func NewWebhookServer(
|
||||||
client *client.Client,
|
client *client.Client,
|
||||||
tlsPair *tlsutils.TlsPemPair,
|
tlsPair *tlsutils.TlsPemPair,
|
||||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
|
||||||
namespacepvInformer kyvernoinformer.NamespacedPolicyViolationInformer,
|
|
||||||
rbInformer rbacinformer.RoleBindingInformer,
|
rbInformer rbacinformer.RoleBindingInformer,
|
||||||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||||
eventGen event.Interface,
|
eventGen event.Interface,
|
||||||
|
@ -96,17 +100,15 @@ func NewWebhookServer(
|
||||||
client: client,
|
client: client,
|
||||||
kyvernoClient: kyvernoClient,
|
kyvernoClient: kyvernoClient,
|
||||||
pLister: pInformer.Lister(),
|
pLister: pInformer.Lister(),
|
||||||
pvLister: pvInformer.Lister(),
|
pSynced: pInformer.Informer().HasSynced,
|
||||||
namespacepvLister: namespacepvInformer.Lister(),
|
rbLister: rbInformer.Lister(),
|
||||||
pListerSynced: pvInformer.Informer().HasSynced,
|
rbSynced: rbInformer.Informer().HasSynced,
|
||||||
pvListerSynced: pInformer.Informer().HasSynced,
|
crbLister: crbInformer.Lister(),
|
||||||
namespacepvListerSynced: namespacepvInformer.Informer().HasSynced,
|
crbSynced: crbInformer.Informer().HasSynced,
|
||||||
eventGen: eventGen,
|
eventGen: eventGen,
|
||||||
webhookRegistrationClient: webhookRegistrationClient,
|
webhookRegistrationClient: webhookRegistrationClient,
|
||||||
policyStatus: policyStatus,
|
policyStatus: policyStatus,
|
||||||
configHandler: configHandler,
|
configHandler: configHandler,
|
||||||
rbLister: rbInformer.Lister(),
|
|
||||||
crbLister: crbInformer.Lister(),
|
|
||||||
cleanUp: cleanUp,
|
cleanUp: cleanUp,
|
||||||
lastReqTime: checker.NewLastReqTime(),
|
lastReqTime: checker.NewLastReqTime(),
|
||||||
pvGenerator: pvGenerator,
|
pvGenerator: pvGenerator,
|
||||||
|
@ -246,6 +248,10 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
|
||||||
|
|
||||||
// RunAsync TLS server in separate thread and returns control immediately
|
// RunAsync TLS server in separate thread and returns control immediately
|
||||||
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
||||||
|
if !cache.WaitForCacheSync(stopCh, ws.pSynced, ws.rbSynced, ws.crbSynced) {
|
||||||
|
glog.Error("webhook: failed to sync informer cache")
|
||||||
|
}
|
||||||
|
|
||||||
go func(ws *WebhookServer) {
|
go func(ws *WebhookServer) {
|
||||||
glog.V(3).Infof("serving on %s\n", ws.server.Addr)
|
glog.V(3).Infof("serving on %s\n", ws.server.Addr)
|
||||||
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
|
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
|
||||||
|
@ -261,17 +267,17 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Stop TLS server and returns control after the server is shut down
|
// Stop TLS server and returns control after the server is shut down
|
||||||
func (ws *WebhookServer) Stop() {
|
func (ws *WebhookServer) Stop(ctx context.Context) {
|
||||||
err := ws.server.Shutdown(context.Background())
|
// cleanUp
|
||||||
|
// remove the static webhookconfigurations
|
||||||
|
go ws.webhookRegistrationClient.RemoveWebhookConfigurations(ws.cleanUp)
|
||||||
|
// shutdown http.Server with context timeout
|
||||||
|
err := ws.server.Shutdown(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Error from closing listeners, or context timeout:
|
// Error from closing listeners, or context timeout:
|
||||||
glog.Info("Server Shutdown error: ", err)
|
glog.Info("Server Shutdown error: ", err)
|
||||||
ws.server.Close()
|
ws.server.Close()
|
||||||
}
|
}
|
||||||
// cleanUp
|
|
||||||
// remove the static webhookconfigurations for policy CRD
|
|
||||||
ws.webhookRegistrationClient.RemovePolicyWebhookConfigurations(ws.cleanUp)
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// bodyToAdmissionReview creates AdmissionReview object from request body
|
// bodyToAdmissionReview creates AdmissionReview object from request body
|
||||||
|
|
|
@ -1,9 +1,5 @@
|
||||||
Use these scripts to prepare the controller for work.
|
Use these scripts to prepare the controller for work.
|
||||||
All these scripts should be launched from the root folder of the project, for example:
|
All these scripts should be launched from the root folder of the project.
|
||||||
`scripts/compile-image.sh`
|
|
||||||
|
|
||||||
### compile-image.sh ###
|
|
||||||
Compiles the project to go executable, generates docker image and pushes it to the repo. Has no arguments.
|
|
||||||
|
|
||||||
### generate-server-cert.sh ###
|
### generate-server-cert.sh ###
|
||||||
Generates TLS certificate and key that used by webhook server. Example:
|
Generates TLS certificate and key that used by webhook server. Example:
|
||||||
|
@ -19,10 +15,8 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce
|
||||||
* `--serverIp` means the same as for `generate-server-cert.sh`
|
* `--serverIp` means the same as for `generate-server-cert.sh`
|
||||||
Examples:
|
Examples:
|
||||||
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
|
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
|
||||||
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117'
|
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with master node at '192.168.10.117'
|
||||||
|
|
||||||
### test-web-hook.sh ###
|
|
||||||
Quickly creates and deletes test config map. If your webhook server is running, you should see the corresponding output from it. Use this script after `deploy-controller.sh`.
|
|
||||||
|
|
||||||
### update-codegen.sh ###
|
### update-codegen.sh ###
|
||||||
Generates additional code for controller object. You should resolve all dependencies before using it, see main Readme for details.
|
Generates additional code for controller object. You should resolve all dependencies before using it, see main Readme for details.
|
||||||
|
|
|
@ -1,2 +0,0 @@
|
||||||
kubectl delete -f definitions/install.yaml
|
|
||||||
kubectl delete csr,MutatingWebhookConfiguration,ValidatingWebhookConfiguration --all
|
|
|
@ -1,34 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
|
|
||||||
default_version="dev-testing"
|
|
||||||
version=$1
|
|
||||||
|
|
||||||
if [[ -z "$1" ]]
|
|
||||||
then
|
|
||||||
echo "Using default version: ${default_version}"
|
|
||||||
version="${default_version}"
|
|
||||||
fi
|
|
||||||
|
|
||||||
hub_user_name="nirmata"
|
|
||||||
project_name="kyverno"
|
|
||||||
|
|
||||||
echo "# Ensuring Go dependencies..."
|
|
||||||
dep ensure -v || exit 2
|
|
||||||
|
|
||||||
echo "# Building executable ${project_name}..."
|
|
||||||
chmod +x scripts/update-codegen.sh
|
|
||||||
scripts/update-codegen.sh
|
|
||||||
make build || exit 3
|
|
||||||
|
|
||||||
echo "# Building docker image ${hub_user_name}/${project_name}:${version}"
|
|
||||||
cat <<EOF > Dockerfile
|
|
||||||
FROM alpine:latest
|
|
||||||
WORKDIR ~/
|
|
||||||
ADD ${project_name} ./${project_name}
|
|
||||||
ENTRYPOINT ["./${project_name}"]
|
|
||||||
EOF
|
|
||||||
tag="${hub_user_name}/${project_name}:${version}"
|
|
||||||
docker build --no-cache -t "${tag}" . || exit 4
|
|
||||||
|
|
||||||
echo "# Pushing image to repository..."
|
|
||||||
docker push "${tag}" || exit 5
|
|
|
@ -1,3 +0,0 @@
|
||||||
cd "$(dirname "$0")"
|
|
||||||
kubectl create -f resources/test-configmap.yaml
|
|
||||||
kubectl delete -f resources/test-configmap.yaml
|
|
|
@ -1,4 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
# You should see the trace of requests in the output of webhook server
|
|
||||||
kubectl create configmap test-config-map --from-literal="some_var=some_value"
|
|
||||||
kubectl delete configmap test-config-map
|
|
Loading…
Add table
Reference in a new issue