mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
Merge branch 'v1.1.0' into 504_bug
This commit is contained in:
commit
89d0cc8799
33 changed files with 721 additions and 377 deletions
|
@ -31,5 +31,6 @@ after_success:
|
|||
if [ $TRAVIS_PULL_REQUEST == 'false' ]
|
||||
then
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
make docker-publish
|
||||
# make docker-publish-initContainer
|
||||
# make docker-publish-kyverno
|
||||
fi
|
99
Makefile
99
Makefile
|
@ -1,63 +1,82 @@
|
|||
.DEFAULT_GOAL: build
|
||||
|
||||
|
||||
# The CLI binary to build
|
||||
BIN ?= kyverno
|
||||
##################################
|
||||
# DEFAULTS
|
||||
##################################
|
||||
REGISTRY=index.docker.io
|
||||
REPO=$(REGISTRY)/nirmata/kyverno
|
||||
IMAGE_TAG=$(GIT_VERSION)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
||||
|
||||
GIT_VERSION := $(shell git describe --dirty --always --tags)
|
||||
GIT_BRANCH := $(shell git branch | grep \* | cut -d ' ' -f2)
|
||||
GIT_HASH := $(GIT_BRANCH)/$(shell git log -1 --pretty=format:"%H")
|
||||
TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
|
||||
|
||||
PACKAGE ?=github.com/nirmata/kyverno
|
||||
MAIN ?=$(PACKAGE)
|
||||
|
||||
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
||||
|
||||
# default docker hub
|
||||
REGISTRY=index.docker.io
|
||||
REPO=$(REGISTRY)/nirmata/kyverno
|
||||
IMAGE_TAG=$(GIT_VERSION)
|
||||
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
OUTPUT=$(shell pwd)/_output/cli/$(BIN)
|
||||
##################################
|
||||
# KYVERNO
|
||||
##################################
|
||||
|
||||
KYVERNO_PATH:= cmd/kyverno
|
||||
build:
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags=$(LD_FLAGS) $(MAIN)
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
|
||||
local:
|
||||
go build -ldflags=$(LD_FLAGS) $(MAIN)
|
||||
##################################
|
||||
# INIT CONTAINER
|
||||
##################################
|
||||
INITC_PATH := cmd/initContainer
|
||||
INITC_IMAGE := kyvernopre
|
||||
initContainer:
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||
|
||||
cli: cli-dirs
|
||||
GOOS=$(GOOS) \
|
||||
go build \
|
||||
-o $(OUTPUT) \
|
||||
-ldflags $(LD_FLAGS) \
|
||||
$(PACKAGE)/cmd/$(BIN)
|
||||
.PHONY: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
|
||||
|
||||
cli-dirs:
|
||||
@mkdir -p _output/cli
|
||||
docker-publish-initContainer: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
|
||||
|
||||
clean:
|
||||
go clean
|
||||
docker-build-initContainer:
|
||||
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||
echo $(PWD)/$(INITC_PATH)/
|
||||
@docker build -f $(PWD)/$(INITC_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(PWD)/$(INITC_PATH)/
|
||||
|
||||
# docker image build targets
|
||||
# user must be logged in the $(REGISTRY) to push images
|
||||
.PHONY: docker-build docker-tag-repo docker-push
|
||||
docker-tag-repo-initContainer:
|
||||
@docker tag $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
|
||||
|
||||
docker-publish: docker-build docker-tag-repo docker-push
|
||||
docker-push-initContainer:
|
||||
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG)
|
||||
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
|
||||
|
||||
docker-build:
|
||||
@docker build -t $(REPO):$(IMAGE_TAG) .
|
||||
##################################
|
||||
# KYVERNO CONTAINER
|
||||
##################################
|
||||
.PHONY: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
|
||||
KYVERNO_PATH := cmd/kyverno
|
||||
KYVERNO_IMAGE := kyverno
|
||||
docker-publish-kyverno: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
|
||||
|
||||
docker-tag-repo:
|
||||
@docker tag $(REPO):$(IMAGE_TAG) $(REPO):latest
|
||||
docker-build-kyverno:
|
||||
GO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(PWD)/$(KYVERNO_PATH)
|
||||
|
||||
docker-push:
|
||||
@docker push $(REPO):$(IMAGE_TAG)
|
||||
@docker push $(REPO):latest
|
||||
docker-tag-repo-kyverno:
|
||||
@docker tag $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
|
||||
|
||||
## Testing & Code-Coverage
|
||||
docker-push-kyverno:
|
||||
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG)
|
||||
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
|
||||
|
||||
|
||||
##################################
|
||||
# CLI
|
||||
##################################
|
||||
CLI_PATH := cmd/cli
|
||||
cli:
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(CLI_PATH)/kyvernocli -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)/main.go
|
||||
|
||||
|
||||
##################################
|
||||
# Testing & Code-Coverage
|
||||
##################################
|
||||
|
||||
## variables
|
||||
BIN_DIR := $(GOPATH)/bin
|
||||
|
|
3
cmd/initContainer/Dockerfile
Normal file
3
cmd/initContainer/Dockerfile
Normal file
|
@ -0,0 +1,3 @@
|
|||
FROM scratch
|
||||
ADD kyvernopre /kyvernopre
|
||||
ENTRYPOINT ["/kyvernopre"]
|
199
cmd/initContainer/main.go
Normal file
199
cmd/initContainer/main.go
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
Cleans up stale webhookconfigurations created by kyverno that were not cleanedup
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/signal"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
)
|
||||
|
||||
const (
|
||||
mutatingWebhookConfigKind string = "MutatingWebhookConfiguration"
|
||||
validatingWebhookConfigKind string = "ValidatingWebhookConfiguration"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer glog.Flush()
|
||||
// os signal handler
|
||||
stopCh := signal.SetupSignalHandler()
|
||||
// arguments
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
|
||||
// create client config
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
||||
// DYNAMIC CLIENT
|
||||
// - client for all registered resources
|
||||
client, err := client.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
||||
requests := []request{
|
||||
// Resource
|
||||
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
|
||||
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
|
||||
// Policy
|
||||
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName},
|
||||
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName},
|
||||
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName},
|
||||
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName},
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
failure := false
|
||||
// use pipline to pass request to cleanup resources
|
||||
// generate requests
|
||||
in := gen(done, stopCh, requests...)
|
||||
// process requests
|
||||
// processing routine count : 2
|
||||
p1 := process(client, done, stopCh, in)
|
||||
p2 := process(client, done, stopCh, in)
|
||||
// merge results from processing routines
|
||||
for err := range merge(done, stopCh, p1, p2) {
|
||||
if err != nil {
|
||||
failure = true
|
||||
glog.Errorf("failed to cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
// if there is any failure then we fail process
|
||||
if failure {
|
||||
glog.Errorf("failed to cleanup webhook configurations")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
|
||||
var err error
|
||||
// Get resource
|
||||
_, err = client.GetResource(kind, "", name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("%s(%s) not found", name, kind)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get resource %s(%s)", name, kind)
|
||||
return err
|
||||
}
|
||||
// Delete resource
|
||||
err = client.DeleteResouce(kind, "", name, false)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to delete resource %s(%s)", name, kind)
|
||||
return err
|
||||
}
|
||||
glog.Infof("cleaned up resource %s(%s)", name, kind)
|
||||
return nil
|
||||
}
|
||||
|
||||
func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig == "" {
|
||||
glog.Info("Using in-cluster configuration")
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
||||
type request struct {
|
||||
kind string
|
||||
name string
|
||||
}
|
||||
|
||||
/* Processing Pipeline
|
||||
-> Process Requests
|
||||
Generate Requests -> Process Requests -> Merge Results
|
||||
-> Process Requests
|
||||
- number of processes can be controlled
|
||||
- stop processing on SIGTERM OR SIGNKILL signal
|
||||
- stop processing if any process fails(supported)
|
||||
*/
|
||||
// Generates requests to be processed
|
||||
func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-chan request {
|
||||
out := make(chan request)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for _, req := range requests {
|
||||
select {
|
||||
case out <- req:
|
||||
case <-done:
|
||||
println("done generate")
|
||||
return
|
||||
case <-stopCh:
|
||||
println("shutting down generate")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// processes the requests
|
||||
func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error {
|
||||
out := make(chan error)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for req := range requests {
|
||||
select {
|
||||
case out <- removeWebhookIfExists(client, req.kind, req.name):
|
||||
case <-done:
|
||||
println("done process")
|
||||
return
|
||||
case <-stopCh:
|
||||
println("shutting down process")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// waits for all processes to be complete and merges result
|
||||
func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error {
|
||||
var wg sync.WaitGroup
|
||||
out := make(chan error)
|
||||
// gets the output from each process
|
||||
output := func(ch <-chan error) {
|
||||
defer wg.Done()
|
||||
for err := range ch {
|
||||
select {
|
||||
case out <- err:
|
||||
case <-done:
|
||||
println("done merge")
|
||||
return
|
||||
case <-stopCh:
|
||||
println("shutting down merge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(len(processes))
|
||||
for _, process := range processes {
|
||||
go output(process)
|
||||
}
|
||||
|
||||
// close when all the process goroutines are done
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
|
@ -1,6 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
|
@ -8,17 +9,18 @@ import (
|
|||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/namespace"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/signal"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/version"
|
||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/sample-controller/pkg/signals"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -34,15 +36,14 @@ var (
|
|||
|
||||
func main() {
|
||||
defer glog.Flush()
|
||||
printVersionInfo()
|
||||
// profile cpu and memory consuption
|
||||
prof = enableProfiling(cpu, memory)
|
||||
version.PrintVersionInfo()
|
||||
|
||||
// cleanUp Channel
|
||||
cleanUp := make(chan struct{})
|
||||
// SIGINT & SIGTERM channel
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
// handle os signals
|
||||
stopCh := signal.SetupSignalHandler()
|
||||
// CLIENT CONFIG
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
clientConfig, err := config.CreateClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
@ -58,7 +59,7 @@ func main() {
|
|||
|
||||
// DYNAMIC CLIENT
|
||||
// - client for all registered resources
|
||||
client, err := client.NewClient(clientConfig)
|
||||
client, err := dclient.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
@ -74,7 +75,11 @@ func main() {
|
|||
}
|
||||
|
||||
// WERBHOOK REGISTRATION CLIENT
|
||||
webhookRegistrationClient, err := webhookconfig.NewWebhookRegistrationClient(clientConfig, client, serverIP, int32(webhookTimeout))
|
||||
webhookRegistrationClient, err := webhookconfig.NewWebhookRegistrationClient(
|
||||
clientConfig,
|
||||
client,
|
||||
serverIP,
|
||||
int32(webhookTimeout))
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||
}
|
||||
|
@ -84,36 +89,58 @@ func main() {
|
|||
// - Policy
|
||||
// - PolicyVolation
|
||||
// - cache resync time: 10 seconds
|
||||
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, 10*time.Second)
|
||||
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(
|
||||
pclient,
|
||||
10*time.Second)
|
||||
|
||||
// KUBERNETES RESOURCES INFORMER
|
||||
// watches namespace resource
|
||||
// - cache resync time: 10 seconds
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
10*time.Second)
|
||||
|
||||
// Configuration Data
|
||||
// dyamically load the configuration from configMap
|
||||
// - resource filters
|
||||
// if the configMap is update, the configuration will be updated :D
|
||||
configData := config.NewConfigData(kubeClient, kubeInformer.Core().V1().ConfigMaps(), filterK8Resources)
|
||||
configData := config.NewConfigData(
|
||||
kubeClient,
|
||||
kubeInformer.Core().V1().ConfigMaps(),
|
||||
filterK8Resources)
|
||||
|
||||
// Policy meta-data store
|
||||
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies().Lister())
|
||||
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies())
|
||||
|
||||
// EVENT GENERATOR
|
||||
// - generate event with retry mechanism
|
||||
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1().ClusterPolicies())
|
||||
egen := event.NewEventGenerator(
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicies())
|
||||
|
||||
// POLICY VIOLATION GENERATOR
|
||||
// -- generate policy violation
|
||||
pvgen := policyviolation.NewPVGenerator(pclient, client, pInformer.Kyverno().V1().ClusterPolicyViolations().Lister(), pInformer.Kyverno().V1().NamespacedPolicyViolations().Lister())
|
||||
pvgen := policyviolation.NewPVGenerator(pclient,
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||
pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
||||
|
||||
// POLICY CONTROLLER
|
||||
// - reconciliation policy and policy violation
|
||||
// - process policy on existing resources
|
||||
// - status aggregator: recieves stats when a policy is applied
|
||||
// & updates the policy status
|
||||
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pInformer.Kyverno().V1().NamespacedPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData, pvgen, policyMetaStore)
|
||||
pc, err := policy.NewPolicyController(pclient,
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||
pInformer.Kyverno().V1().NamespacedPolicyViolations(),
|
||||
kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(),
|
||||
webhookRegistrationClient,
|
||||
configData,
|
||||
egen,
|
||||
pvgen,
|
||||
policyMetaStore)
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating policy controller: %v\n", err)
|
||||
}
|
||||
|
@ -121,22 +148,39 @@ func main() {
|
|||
// POLICY VIOLATION CONTROLLER
|
||||
// policy violation cleanup if the corresponding resource is deleted
|
||||
// status: lastUpdatTime
|
||||
pvc, err := policyviolation.NewPolicyViolationController(client, pclient, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations())
|
||||
pvc, err := policyviolation.NewPolicyViolationController(
|
||||
client,
|
||||
pclient,
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations())
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating cluster policy violation controller: %v\n", err)
|
||||
}
|
||||
|
||||
nspvc, err := policyviolation.NewNamespacedPolicyViolationController(client, pclient, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
||||
nspvc, err := policyviolation.NewNamespacedPolicyViolationController(
|
||||
client,
|
||||
pclient,
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating namespaced policy violation controller: %v\n", err)
|
||||
}
|
||||
|
||||
// GENERATE CONTROLLER
|
||||
// - watches for Namespace resource and generates resource based on the policy generate rule
|
||||
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData, pvgen, policyMetaStore)
|
||||
nsc := namespace.NewNamespaceController(
|
||||
pclient,
|
||||
client,
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pc.GetPolicyStatusAggregator(),
|
||||
egen,
|
||||
configData,
|
||||
pvgen,
|
||||
policyMetaStore)
|
||||
|
||||
// CONFIGURE CERTIFICATES
|
||||
tlsPair, err := initTLSPemPair(clientConfig, client)
|
||||
tlsPair, err := client.InitTLSPemPair(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
}
|
||||
|
@ -156,17 +200,29 @@ func main() {
|
|||
// -- annotations on resources with update details on mutation JSON patches
|
||||
// -- generate policy violation resource
|
||||
// -- generate events on policy and resource
|
||||
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pInformer.Kyverno().V1().NamespacedPolicyViolations(),
|
||||
kubeInformer.Rbac().V1().RoleBindings(), kubeInformer.Rbac().V1().ClusterRoleBindings(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, policyMetaStore, pvgen, cleanUp)
|
||||
server, err := webhooks.NewWebhookServer(
|
||||
pclient,
|
||||
client,
|
||||
tlsPair,
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
kubeInformer.Rbac().V1().RoleBindings(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
egen,
|
||||
webhookRegistrationClient,
|
||||
pc.GetPolicyStatusAggregator(),
|
||||
configData,
|
||||
policyMetaStore,
|
||||
pvgen,
|
||||
cleanUp)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
// Start the components
|
||||
pInformer.Start(stopCh)
|
||||
kubeInformer.Start(stopCh)
|
||||
if err := configData.Run(stopCh); err != nil {
|
||||
glog.Fatalf("Unable to load dynamic configuration: %v\n", err)
|
||||
}
|
||||
|
||||
go configData.Run(stopCh)
|
||||
go policyMetaStore.Run(stopCh)
|
||||
go pc.Run(1, stopCh)
|
||||
go pvc.Run(1, stopCh)
|
||||
go nspvc.Run(1, stopCh)
|
||||
|
@ -181,22 +237,23 @@ func main() {
|
|||
server.RunAsync(stopCh)
|
||||
|
||||
<-stopCh
|
||||
disableProfiling(prof)
|
||||
server.Stop()
|
||||
|
||||
// by default http.Server waits indefinitely for connections to return to idle and then shuts down
|
||||
// adding a threshold will handle zombie connections
|
||||
// adjust the context deadline to 5 seconds
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
// cleanup webhookconfigurations followed by webhook shutdown
|
||||
server.Stop(ctx)
|
||||
// resource cleanup
|
||||
// remove webhook configurations
|
||||
<-cleanUp
|
||||
glog.Info("successful shutdown of kyverno controller")
|
||||
}
|
||||
|
||||
func init() {
|
||||
// profiling feature gate
|
||||
// cpu and memory profiling cannot be enabled at same time
|
||||
// if both cpu and memory are enabled
|
||||
// by default is to profile cpu
|
||||
flag.BoolVar(&cpu, "cpu", false, "cpu profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
|
||||
flag.BoolVar(&memory, "memory", false, "memory profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
|
||||
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
|
@ -410,6 +410,9 @@ spec:
|
|||
app: kyverno
|
||||
spec:
|
||||
serviceAccountName: kyverno-service-account
|
||||
initContainers:
|
||||
- name: kyverno-pre
|
||||
image: nirmata/kyvernopre:latest
|
||||
containers:
|
||||
- name: kyverno
|
||||
image: nirmata/kyverno:latest
|
||||
|
|
102
init.go
102
init.go
|
@ -1,102 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/profile"
|
||||
|
||||
"github.com/golang/glog"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
tls "github.com/nirmata/kyverno/pkg/tls"
|
||||
"github.com/nirmata/kyverno/pkg/version"
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func printVersionInfo() {
|
||||
v := version.GetVersion()
|
||||
glog.Infof("Kyverno version: %s\n", v.BuildVersion)
|
||||
glog.Infof("Kyverno BuildHash: %s\n", v.BuildHash)
|
||||
glog.Infof("Kyverno BuildTime: %s\n", v.BuildTime)
|
||||
}
|
||||
|
||||
func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig == "" {
|
||||
glog.Info("Using in-cluster configuration")
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
||||
// Loads or creates PEM private key and TLS certificate for webhook server.
|
||||
// Created pair is stored in cluster's secret.
|
||||
// Returns struct with key/certificate pair.
|
||||
func initTLSPemPair(configuration *rest.Config, client *client.Client) (*tls.TlsPemPair, error) {
|
||||
certProps, err := client.GetTLSCertProps(configuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsPair := client.ReadTlsPair(certProps)
|
||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
glog.Info("Generating new key/certificate pair for TLS")
|
||||
tlsPair, err = client.GenerateTlsPemPair(certProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = client.WriteTlsPair(certProps, tlsPair); err != nil {
|
||||
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
|
||||
}
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
glog.Infoln("Using existing TLS key/certificate pair")
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
var prof interface {
|
||||
Stop()
|
||||
}
|
||||
|
||||
func enableProfiling(cpu, memory bool) interface {
|
||||
Stop()
|
||||
} {
|
||||
|
||||
file := "/opt/nirmata/kyverno/" + randomString(6)
|
||||
if cpu {
|
||||
glog.Infof("Enable cpu profiling ...")
|
||||
prof = profile.Start(profile.CPUProfile, profile.ProfilePath(file))
|
||||
} else if memory {
|
||||
glog.Infof("Enable memory profiling ...")
|
||||
prof = profile.Start(profile.MemProfile, profile.ProfilePath(file))
|
||||
}
|
||||
|
||||
return prof
|
||||
}
|
||||
|
||||
func disableProfiling(p interface{ Stop() }) {
|
||||
if p != nil {
|
||||
p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// generate random string
|
||||
const charset = "abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
|
||||
var seededRand *rand.Rand = rand.New(
|
||||
rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
func stringWithCharset(length int, charset string) string {
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func randomString(length int) string {
|
||||
return stringWithCharset(length, charset)
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
package config
|
||||
|
||||
import "flag"
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/golang/glog"
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
const (
|
||||
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
||||
|
@ -70,3 +76,13 @@ func LogDefaultFlags() {
|
|||
flag.Set("stderrthreshold", "WARNING")
|
||||
flag.Set("v", "2")
|
||||
}
|
||||
|
||||
//CreateClientConfig creates client config
|
||||
func CreateClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig == "" {
|
||||
glog.Info("Using in-cluster configuration")
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ type ConfigData struct {
|
|||
// configuration data
|
||||
filters []k8Resource
|
||||
// hasynced
|
||||
cmListerSycned cache.InformerSynced
|
||||
cmSycned cache.InformerSynced
|
||||
}
|
||||
|
||||
// ToFilter checks if the given resource is set to be filtered in the configuration
|
||||
|
@ -57,9 +57,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
|
|||
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
|
||||
}
|
||||
cd := ConfigData{
|
||||
client: rclient,
|
||||
cmName: os.Getenv(cmNameEnv),
|
||||
cmListerSycned: cmInformer.Informer().HasSynced,
|
||||
client: rclient,
|
||||
cmName: os.Getenv(cmNameEnv),
|
||||
cmSycned: cmInformer.Informer().HasSynced,
|
||||
}
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
|
@ -76,12 +76,12 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
|
|||
return &cd
|
||||
}
|
||||
|
||||
func (cd *ConfigData) Run(stopCh <-chan struct{}) error {
|
||||
//Run checks syncing
|
||||
func (cd *ConfigData) Run(stopCh <-chan struct{}) {
|
||||
// wait for cache to populate first time
|
||||
if !cache.WaitForCacheSync(stopCh, cd.cmListerSycned) {
|
||||
return fmt.Errorf("Configuration: Failed to sync informer cache")
|
||||
if !cache.WaitForCacheSync(stopCh, cd.cmSycned) {
|
||||
glog.Error("configuration: failed to sync informer cache")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cd *ConfigData) addCM(obj interface{}) {
|
||||
|
|
|
@ -15,6 +15,31 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
|
||||
// Created pair is stored in cluster's secret.
|
||||
// Returns struct with key/certificate pair.
|
||||
func (c *Client) InitTLSPemPair(configuration *rest.Config) (*tls.TlsPemPair, error) {
|
||||
certProps, err := c.GetTLSCertProps(configuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsPair := c.ReadTlsPair(certProps)
|
||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
glog.Info("Generating new key/certificate pair for TLS")
|
||||
tlsPair, err = c.GenerateTlsPemPair(certProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = c.WriteTlsPair(certProps, tlsPair); err != nil {
|
||||
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
|
||||
}
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
glog.Infoln("Using existing TLS key/certificate pair")
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
//GenerateTlsPemPair Issues TLS certificate for webhook server using given PEM private key
|
||||
// Returns signed and approved TLS certificate in PEM format
|
||||
func (c *Client) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
||||
|
|
|
@ -14,14 +14,18 @@ import (
|
|||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
//Generator generate events
|
||||
type Generator struct {
|
||||
client *client.Client
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
client *client.Client
|
||||
// list/get cluster policy
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// returns true if the cluster policy store has been synced at least once
|
||||
pSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
@ -38,6 +42,7 @@ func NewEventGenerator(client *client.Client, pInformer kyvernoinformer.ClusterP
|
|||
client: client,
|
||||
pLister: pInformer.Lister(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
|
||||
pSynced: pInformer.Informer().HasSynced,
|
||||
recorder: initRecorder(client),
|
||||
}
|
||||
|
||||
|
@ -86,6 +91,10 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Starting event generator")
|
||||
defer glog.Info("Shutting down event generator")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, gen.pSynced) {
|
||||
glog.Error("event generator: failed to sync informer cache")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(gen.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
|
|
@ -39,14 +39,12 @@ type NamespaceController struct {
|
|||
|
||||
//nsLister provides expansion to the namespace lister to inject GVK for the resource
|
||||
nsLister NamespaceListerExpansion
|
||||
// nsListerSynced returns true if the Namespace store has been synced at least once
|
||||
nsListerSynced cache.InformerSynced
|
||||
// nsSynced returns true if the Namespace store has been synced at least once
|
||||
nsSynced cache.InformerSynced
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// pvListerSynced retrns true if the Policy store has been synced at least once
|
||||
pvListerSynced cache.InformerSynced
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// pSynced retrns true if the Policy store has been synced at least once
|
||||
pSynced cache.InformerSynced
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
// eventGen provides interface to generate evenets
|
||||
|
@ -68,7 +66,6 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
|||
client *client.Client,
|
||||
nsInformer v1Informer.NamespaceInformer,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
policyStatus policy.PolicyStatusInterface,
|
||||
eventGen event.Interface,
|
||||
configHandler config.Interface,
|
||||
|
@ -101,10 +98,9 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
|||
nsc.syncHandler = nsc.syncNamespace
|
||||
|
||||
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
|
||||
nsc.nsListerSynced = nsInformer.Informer().HasSynced
|
||||
nsc.nsSynced = nsInformer.Informer().HasSynced
|
||||
nsc.pLister = pInformer.Lister()
|
||||
nsc.pvListerSynced = pInformer.Informer().HasSynced
|
||||
nsc.pvLister = pvInformer.Lister()
|
||||
nsc.pSynced = pInformer.Informer().HasSynced
|
||||
nsc.policyStatus = policyStatus
|
||||
|
||||
// resource manager
|
||||
|
@ -172,7 +168,8 @@ func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Starting namespace controller")
|
||||
defer glog.Info("Shutting down namespace controller")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsListerSynced); !ok {
|
||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
|
||||
glog.Error("namespace generator: failed to sync cache")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -28,9 +28,9 @@ import (
|
|||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
webhookinformer "k8s.io/client-go/informers/admissionregistration/v1beta1"
|
||||
mconfiginformer "k8s.io/client-go/informers/admissionregistration/v1beta1"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
webhooklister "k8s.io/client-go/listers/admissionregistration/v1beta1"
|
||||
mconfiglister "k8s.io/client-go/listers/admissionregistration/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -71,10 +71,12 @@ type PolicyController struct {
|
|||
pListerSynced cache.InformerSynced
|
||||
// pvListerSynced returns true if the Policy store has been synced at least once
|
||||
pvListerSynced cache.InformerSynced
|
||||
// pvListerSynced returns true if the Policy store has been synced at least once
|
||||
// pvListerSynced returns true if the Policy Violation store has been synced at least once
|
||||
nspvListerSynced cache.InformerSynced
|
||||
// mutationwebhookLister can list/get mutatingwebhookconfigurations
|
||||
mutationwebhookLister webhooklister.MutatingWebhookConfigurationLister
|
||||
// mwebhookconfigSynced returns true if the Mutating Webhook Config store has been synced at least once
|
||||
mwebhookconfigSynced cache.InformerSynced
|
||||
// list/get mutatingwebhookconfigurations
|
||||
mWebhookConfigLister mconfiglister.MutatingWebhookConfigurationLister
|
||||
// WebhookRegistrationClient
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// Resource manager, manages the mapping for already processed resource
|
||||
|
@ -90,10 +92,15 @@ type PolicyController struct {
|
|||
}
|
||||
|
||||
// NewPolicyController create a new PolicyController
|
||||
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer, nspvInformer kyvernoinformer.NamespacedPolicyViolationInformer,
|
||||
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer,
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, configHandler config.Interface,
|
||||
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
|
||||
client *client.Client,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
nspvInformer kyvernoinformer.NamespacedPolicyViolationInformer,
|
||||
mconfigwebhookinformer mconfiginformer.MutatingWebhookConfigurationInformer,
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
|
||||
configHandler config.Interface,
|
||||
eventGen event.Interface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
pMetaStore policystore.UpdateInterface) (*PolicyController, error) {
|
||||
// Event broad caster
|
||||
|
@ -147,9 +154,8 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
|
|||
pc.pListerSynced = pInformer.Informer().HasSynced
|
||||
pc.pvListerSynced = pvInformer.Informer().HasSynced
|
||||
pc.nspvListerSynced = nspvInformer.Informer().HasSynced
|
||||
|
||||
pc.mutationwebhookLister = webhookInformer.Lister()
|
||||
|
||||
pc.mwebhookconfigSynced = mconfigwebhookinformer.Informer().HasSynced
|
||||
pc.mWebhookConfigLister = mconfigwebhookinformer.Lister()
|
||||
// resource manager
|
||||
// rebuild after 300 seconds/ 5 mins
|
||||
//TODO: pass the time in seconds instead of converting it internally
|
||||
|
@ -394,7 +400,8 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Starting policy controller")
|
||||
defer glog.Info("Shutting down policy controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.pvListerSynced, pc.nspvListerSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.pvListerSynced, pc.nspvListerSynced, pc.mwebhookconfigSynced) {
|
||||
glog.Error("failed to sync informer cache")
|
||||
return
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
|
|
|
@ -9,6 +9,17 @@ import (
|
|||
func (pc *PolicyController) removeResourceWebhookConfiguration() error {
|
||||
removeWebhookConfig := func() error {
|
||||
var err error
|
||||
// check informer cache
|
||||
configName := pc.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
|
||||
config, err := pc.mWebhookConfigLister.Get(configName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to list mutating webhook config: %v", err)
|
||||
return err
|
||||
}
|
||||
if config == nil {
|
||||
// as no resource is found
|
||||
return nil
|
||||
}
|
||||
err = pc.webhookRegistrationClient.RemoveResourceMutatingWebhookConfiguration()
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -30,7 +41,7 @@ func (pc *PolicyController) removeResourceWebhookConfiguration() error {
|
|||
return removeWebhookConfig()
|
||||
}
|
||||
|
||||
// if there are policies, check if they contain mutating or validating rule
|
||||
// if polices only have generate rules, we dont need the webhook
|
||||
if !hasMutateOrValidatePolicies(policies) {
|
||||
glog.V(4).Info("no policies with mutating or validating webhook configurations, remove resource webhook configuration if one exists")
|
||||
return removeWebhookConfig()
|
||||
|
@ -42,6 +53,17 @@ func (pc *PolicyController) removeResourceWebhookConfiguration() error {
|
|||
func (pc *PolicyController) createResourceMutatingWebhookConfigurationIfRequired(policy kyverno.ClusterPolicy) error {
|
||||
// if the policy contains mutating & validation rules and it config does not exist we create one
|
||||
if policy.HasMutateOrValidate() {
|
||||
// check cache
|
||||
configName := pc.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
|
||||
config, err := pc.mWebhookConfigLister.Get(configName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to list mutating webhook configuration: %v", err)
|
||||
return err
|
||||
}
|
||||
if config != nil {
|
||||
// mutating webhoook configuration already exists
|
||||
return nil
|
||||
}
|
||||
if err := pc.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -3,8 +3,11 @@ package policystore
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type policyMap map[string]interface{}
|
||||
|
@ -13,9 +16,12 @@ type kindMap map[string]namespaceMap
|
|||
|
||||
//PolicyStore Store the meta-data information to faster lookup policies
|
||||
type PolicyStore struct {
|
||||
data map[string]namespaceMap
|
||||
mu sync.RWMutex
|
||||
data map[string]namespaceMap
|
||||
mu sync.RWMutex
|
||||
// list/get cluster policy
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// returns true if the cluster policy store has been synced at least once
|
||||
pSynched cache.InformerSynced
|
||||
}
|
||||
|
||||
//UpdateInterface provides api to update policies
|
||||
|
@ -33,14 +39,22 @@ type LookupInterface interface {
|
|||
}
|
||||
|
||||
// NewPolicyStore returns a new policy store
|
||||
func NewPolicyStore(pLister kyvernolister.ClusterPolicyLister) *PolicyStore {
|
||||
func NewPolicyStore(pInformer kyvernoinformer.ClusterPolicyInformer) *PolicyStore {
|
||||
ps := PolicyStore{
|
||||
data: make(kindMap),
|
||||
pLister: pLister,
|
||||
data: make(kindMap),
|
||||
pLister: pInformer.Lister(),
|
||||
pSynched: pInformer.Informer().HasSynced,
|
||||
}
|
||||
return &ps
|
||||
}
|
||||
|
||||
//Run checks syncing
|
||||
func (ps *PolicyStore) Run(stopCh <-chan struct{}) {
|
||||
if !cache.WaitForCacheSync(stopCh, ps.pSynched) {
|
||||
glog.Error("policy meta store: failed to sync informer cache")
|
||||
}
|
||||
}
|
||||
|
||||
//Register a new policy
|
||||
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
|
||||
ps.mu.Lock()
|
||||
|
|
|
@ -11,12 +11,15 @@ import (
|
|||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
|
@ -27,13 +30,20 @@ const workQueueRetryLimit = 3
|
|||
type Generator struct {
|
||||
dclient *dclient.Client
|
||||
pvInterface kyvernov1.KyvernoV1Interface
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
// get/list cluster policy violation
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// get/ist namespaced policy violation
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
// returns true if the cluster policy store has been synced at least once
|
||||
pvSynced cache.InformerSynced
|
||||
// returns true if the namespaced cluster policy store has been synced at at least once
|
||||
nspvSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
}
|
||||
|
||||
func NewDataStore() *dataStore {
|
||||
//NewDataStore returns an instance of data store
|
||||
func newDataStore() *dataStore {
|
||||
ds := dataStore{
|
||||
data: make(map[string]Info),
|
||||
}
|
||||
|
@ -93,15 +103,17 @@ type GeneratorInterface interface {
|
|||
|
||||
// NewPVGenerator returns a new instance of policy violation generator
|
||||
func NewPVGenerator(client *kyvernoclient.Clientset, dclient *client.Client,
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister,
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister) *Generator {
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
nspvInformer kyvernoinformer.NamespacedPolicyViolationInformer) *Generator {
|
||||
gen := Generator{
|
||||
pvInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
pvLister: pvLister,
|
||||
nspvLister: nspvLister,
|
||||
pvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
nspvLister: nspvInformer.Lister(),
|
||||
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: NewDataStore(),
|
||||
dataStore: newDataStore(),
|
||||
}
|
||||
return &gen
|
||||
}
|
||||
|
@ -128,6 +140,10 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Start policy violation generator")
|
||||
defer glog.Info("Shutting down policy violation generator")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, gen.pvSynced, gen.nspvSynced) {
|
||||
glog.Error("policy violation generator: failed to sync informer cache")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(gen.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
|
|
@ -53,7 +53,7 @@ func buildNamespacedPVObj(policy string, resource kyverno.ResourceSpec, fRules [
|
|||
"policy": policy,
|
||||
"resource": resource.ToKey(),
|
||||
}
|
||||
pv.SetGenerateName("pv-")
|
||||
pv.SetGenerateName(fmt.Sprintf("%s-", policy))
|
||||
pv.SetLabels(labelMap)
|
||||
return pv
|
||||
}
|
||||
|
|
43
pkg/signal/signal.go
Normal file
43
pkg/signal/signal.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package signal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
var onlyOneSignalHandler = make(chan struct{})
|
||||
var shutdownHandler chan os.Signal
|
||||
|
||||
// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned
|
||||
// which is closed on one of these signals. If a second signal is caught, the program
|
||||
// is terminated with exit code 1.
|
||||
func SetupSignalHandler() <-chan struct{} {
|
||||
close(onlyOneSignalHandler) // panics when called twice
|
||||
|
||||
shutdownHandler = make(chan os.Signal, 2)
|
||||
|
||||
stop := make(chan struct{})
|
||||
signal.Notify(shutdownHandler, shutdownSignals...)
|
||||
go func() {
|
||||
<-shutdownHandler
|
||||
close(stop)
|
||||
<-shutdownHandler
|
||||
os.Exit(1) // second signal. Exit directly.
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
// RequestShutdown emulates a received event that is considered as shutdown signal (SIGTERM/SIGINT)
|
||||
// This returns whether a handler was notified
|
||||
func RequestShutdown() bool {
|
||||
if shutdownHandler != nil {
|
||||
select {
|
||||
case shutdownHandler <- shutdownSignals[0]:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
8
pkg/signal/signal_posix.go
Normal file
8
pkg/signal/signal_posix.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package signal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
8
pkg/signal/signal_windows.go
Normal file
8
pkg/signal/signal_windows.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package signal
|
||||
|
||||
//TODO: how to pick files based on OS compilation ?
|
||||
// import (
|
||||
// "os"
|
||||
// )
|
||||
|
||||
// var shutdownSignals = []os.Signal{os.Interrupt}
|
|
@ -1,24 +1,20 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// These fields are set during an official build
|
||||
// Global vars set from command-line arguments
|
||||
var (
|
||||
BuildVersion = "--"
|
||||
BuildHash = "--"
|
||||
BuildTime = "--"
|
||||
)
|
||||
|
||||
// VersionInfo gets json info about the agent version
|
||||
type VersionInfo struct {
|
||||
BuildVersion string
|
||||
BuildHash string
|
||||
BuildTime string
|
||||
}
|
||||
|
||||
// GetVersion gets the current agent version
|
||||
func GetVersion() *VersionInfo {
|
||||
return &VersionInfo{
|
||||
BuildVersion: BuildVersion,
|
||||
BuildHash: BuildHash,
|
||||
BuildTime: BuildTime,
|
||||
}
|
||||
//PrintVersionInfo displays the kyverno version - git version
|
||||
func PrintVersionInfo() {
|
||||
glog.Infof("Kyverno version: %s\n", BuildVersion)
|
||||
glog.Infof("Kyverno BuildHash: %s\n", BuildHash)
|
||||
glog.Infof("Kyverno BuildTime: %s\n", BuildTime)
|
||||
}
|
||||
|
|
|
@ -67,7 +67,7 @@ func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig()
|
|||
mutatingConfig = config.VerifyMutatingWebhookConfigurationName
|
||||
}
|
||||
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
|
||||
err = wrc.client.DeleteResouce(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("verify webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||
} else if err != nil {
|
||||
|
|
|
@ -118,7 +118,7 @@ func (wrc *WebhookRegistrationClient) removePolicyWebhookConfigurations() {
|
|||
validatingConfig = config.PolicyValidatingWebhookConfigurationName
|
||||
}
|
||||
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
|
||||
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
|
||||
err = wrc.client.DeleteResouce(ValidatingWebhookConfigurationKind, "", validatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
|
||||
} else if err != nil {
|
||||
|
@ -136,7 +136,7 @@ func (wrc *WebhookRegistrationClient) removePolicyWebhookConfigurations() {
|
|||
}
|
||||
|
||||
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
|
||||
err = wrc.client.DeleteResouce(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||
} else if err != nil {
|
||||
|
|
|
@ -2,44 +2,42 @@ package webhookconfig
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
admregapi "k8s.io/api/admissionregistration/v1beta1"
|
||||
errorsapi "k8s.io/apimachinery/pkg/api/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
admregclient "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
MutatingWebhookConfigurationKind string = "MutatingWebhookConfiguration"
|
||||
ValidatingWebhookConfigurationKind string = "ValidatingWebhookConfiguration"
|
||||
)
|
||||
|
||||
// WebhookRegistrationClient is client for registration webhooks on cluster
|
||||
type WebhookRegistrationClient struct {
|
||||
registrationClient *admregclient.AdmissionregistrationV1beta1Client
|
||||
client *client.Client
|
||||
clientConfig *rest.Config
|
||||
client *client.Client
|
||||
clientConfig *rest.Config
|
||||
// serverIP should be used if running Kyverno out of clutser
|
||||
serverIP string
|
||||
timeoutSeconds int32
|
||||
}
|
||||
|
||||
// NewWebhookRegistrationClient creates new WebhookRegistrationClient instance
|
||||
func NewWebhookRegistrationClient(clientConfig *rest.Config, client *client.Client, serverIP string, webhookTimeout int32) (*WebhookRegistrationClient, error) {
|
||||
registrationClient, err := admregclient.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Registering webhook client using serverIP %s\n", serverIP)
|
||||
|
||||
func NewWebhookRegistrationClient(
|
||||
clientConfig *rest.Config,
|
||||
client *client.Client,
|
||||
serverIP string,
|
||||
webhookTimeout int32) *WebhookRegistrationClient {
|
||||
return &WebhookRegistrationClient{
|
||||
registrationClient: registrationClient,
|
||||
client: client,
|
||||
clientConfig: clientConfig,
|
||||
serverIP: serverIP,
|
||||
timeoutSeconds: webhookTimeout,
|
||||
}, nil
|
||||
clientConfig: clientConfig,
|
||||
client: client,
|
||||
serverIP: serverIP,
|
||||
timeoutSeconds: webhookTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Register creates admission webhooks configs on cluster
|
||||
|
@ -74,9 +72,9 @@ func (wrc *WebhookRegistrationClient) Register() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// RemovePolicyWebhookConfigurations removes webhook configurations for reosurces and policy
|
||||
// RemoveWebhookConfigurations removes webhook configurations for reosurces and policy
|
||||
// called during webhook server shutdown
|
||||
func (wrc *WebhookRegistrationClient) RemovePolicyWebhookConfigurations(cleanUp chan<- struct{}) {
|
||||
func (wrc *WebhookRegistrationClient) RemoveWebhookConfigurations(cleanUp chan<- struct{}) {
|
||||
//TODO: dupliate, but a placeholder to perform more error handlind during cleanup
|
||||
wrc.removeWebhookConfigurations()
|
||||
// close channel to notify cleanup is complete
|
||||
|
@ -105,8 +103,7 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration
|
|||
// clientConfig - service
|
||||
config = wrc.constructMutatingWebhookConfig(caData)
|
||||
}
|
||||
|
||||
_, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config)
|
||||
_, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false)
|
||||
if errorsapi.IsAlreadyExists(err) {
|
||||
glog.V(4).Infof("resource mutating webhook configuration %s, already exists. not creating one", config.Name)
|
||||
return nil
|
||||
|
@ -118,18 +115,6 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration
|
|||
return nil
|
||||
}
|
||||
|
||||
//GetResourceMutatingWebhookConfiguration returns the MutatingWebhookConfiguration
|
||||
func (wrc *WebhookRegistrationClient) GetResourceMutatingWebhookConfiguration() (*admregapi.MutatingWebhookConfiguration, error) {
|
||||
var name string
|
||||
if wrc.serverIP != "" {
|
||||
name = config.MutatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
name = config.MutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
return wrc.registrationClient.MutatingWebhookConfigurations().Get(name, v1.GetOptions{})
|
||||
}
|
||||
|
||||
//registerPolicyValidatingWebhookConfiguration create a Validating webhook configuration for Policy CRD
|
||||
func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration() error {
|
||||
var caData []byte
|
||||
|
@ -153,7 +138,7 @@ func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration
|
|||
}
|
||||
|
||||
// create validating webhook configuration resource
|
||||
if _, err := wrc.registrationClient.ValidatingWebhookConfigurations().Create(config); err != nil {
|
||||
if _, err := wrc.client.CreateResource(ValidatingWebhookConfigurationKind, "", *config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -183,7 +168,7 @@ func (wrc *WebhookRegistrationClient) createPolicyMutatingWebhookConfiguration()
|
|||
}
|
||||
|
||||
// create mutating webhook configuration resource
|
||||
if _, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config); err != nil {
|
||||
if _, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -213,7 +198,7 @@ func (wrc *WebhookRegistrationClient) createVerifyMutatingWebhookConfiguration()
|
|||
}
|
||||
|
||||
// create mutating webhook configuration resource
|
||||
if _, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config); err != nil {
|
||||
if _, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -230,12 +215,69 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
|
|||
defer func() {
|
||||
glog.V(4).Infof("Finished cleaning up webhookcongfigurations (%v)", time.Since(startTime))
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(3)
|
||||
// mutating and validating webhook configuration for Kubernetes resources
|
||||
wrc.RemoveResourceMutatingWebhookConfiguration()
|
||||
|
||||
go wrc.removeResourceMutatingWebhookConfiguration(&wg)
|
||||
// mutating and validating webhook configurtion for Policy CRD resource
|
||||
wrc.removePolicyWebhookConfigurations()
|
||||
go wrc.removePolicyMutatingWebhookConfiguration(&wg)
|
||||
go wrc.removePolicyValidatingWebhookConfiguration(&wg)
|
||||
|
||||
// muating webhook configuration use to verify if admission control flow is working or not
|
||||
wrc.removeVerifyWebhookMutatingWebhookConfig()
|
||||
// wait for the removal go routines to return
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// wrapper to handle wait group
|
||||
// TODO: re-work with RemoveResourceMutatingWebhookConfiguration, as the only difference is wg handling
|
||||
func (wrc *WebhookRegistrationClient) removeResourceMutatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
wrc.RemoveResourceMutatingWebhookConfiguration()
|
||||
}
|
||||
|
||||
// delete policy mutating webhookconfigurations
|
||||
// handle wait group
|
||||
func (wrc *WebhookRegistrationClient) removePolicyMutatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Mutating webhook configuration
|
||||
var mutatingConfig string
|
||||
if wrc.serverIP != "" {
|
||||
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||
err := wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||
} else if err != nil {
|
||||
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
|
||||
} else {
|
||||
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", mutatingConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// delete policy validating webhookconfigurations
|
||||
// handle wait group
|
||||
func (wrc *WebhookRegistrationClient) removePolicyValidatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Validating webhook configuration
|
||||
var err error
|
||||
var validatingConfig string
|
||||
if wrc.serverIP != "" {
|
||||
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
validatingConfig = config.PolicyValidatingWebhookConfigurationName
|
||||
}
|
||||
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
|
||||
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
|
||||
} else if err != nil {
|
||||
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
|
||||
} else {
|
||||
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", validatingConfig)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,16 +58,20 @@ func (wrc *WebhookRegistrationClient) constructMutatingWebhookConfig(caData []by
|
|||
}
|
||||
}
|
||||
|
||||
//GetResourceMutatingWebhookConfigName provi
|
||||
func (wrc *WebhookRegistrationClient) GetResourceMutatingWebhookConfigName() string {
|
||||
if wrc.serverIP != "" {
|
||||
return config.MutatingWebhookConfigurationDebugName
|
||||
}
|
||||
return config.MutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
//RemoveResourceMutatingWebhookConfiguration removes mutating webhook configuration for all resources
|
||||
func (wrc *WebhookRegistrationClient) RemoveResourceMutatingWebhookConfiguration() error {
|
||||
var configName string
|
||||
if wrc.serverIP != "" {
|
||||
configName = config.MutatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
configName = config.MutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
configName := wrc.GetResourceMutatingWebhookConfigName()
|
||||
// delete webhook configuration
|
||||
err := wrc.registrationClient.MutatingWebhookConfigurations().Delete(configName, &v1.DeleteOptions{})
|
||||
err := wrc.client.DeleteResouce(MutatingWebhookConfigurationKind, "", configName, false)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("resource webhook configuration %s does not exits, so not deleting", configName)
|
||||
return nil
|
||||
|
|
|
@ -34,18 +34,24 @@ import (
|
|||
// WebhookServer contains configured TLS server with MutationWebhook.
|
||||
// MutationWebhook gets policies from policyController and takes control of the cluster with kubeclient.
|
||||
type WebhookServer struct {
|
||||
server http.Server
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
namespacepvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
pListerSynced cache.InformerSynced
|
||||
pvListerSynced cache.InformerSynced
|
||||
namespacepvListerSynced cache.InformerSynced
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
eventGen event.Interface
|
||||
server http.Server
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
// list/get cluster policy resource
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// returns true if the cluster policy store has synced atleast
|
||||
pSynced cache.InformerSynced
|
||||
// list/get role binding resource
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
// return true if role bining store has synced atleast once
|
||||
rbSynced cache.InformerSynced
|
||||
// list/get cluster role binding resource
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
// return true if cluster role binding store has synced atleast once
|
||||
crbSynced cache.InformerSynced
|
||||
// generate events
|
||||
eventGen event.Interface
|
||||
// webhook registration client
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
|
@ -68,8 +74,6 @@ func NewWebhookServer(
|
|||
client *client.Client,
|
||||
tlsPair *tlsutils.TlsPemPair,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
namespacepvInformer kyvernoinformer.NamespacedPolicyViolationInformer,
|
||||
rbInformer rbacinformer.RoleBindingInformer,
|
||||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
eventGen event.Interface,
|
||||
|
@ -96,17 +100,15 @@ func NewWebhookServer(
|
|||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
pLister: pInformer.Lister(),
|
||||
pvLister: pvInformer.Lister(),
|
||||
namespacepvLister: namespacepvInformer.Lister(),
|
||||
pListerSynced: pvInformer.Informer().HasSynced,
|
||||
pvListerSynced: pInformer.Informer().HasSynced,
|
||||
namespacepvListerSynced: namespacepvInformer.Informer().HasSynced,
|
||||
pSynced: pInformer.Informer().HasSynced,
|
||||
rbLister: rbInformer.Lister(),
|
||||
rbSynced: rbInformer.Informer().HasSynced,
|
||||
crbLister: crbInformer.Lister(),
|
||||
crbSynced: crbInformer.Informer().HasSynced,
|
||||
eventGen: eventGen,
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
policyStatus: policyStatus,
|
||||
configHandler: configHandler,
|
||||
rbLister: rbInformer.Lister(),
|
||||
crbLister: crbInformer.Lister(),
|
||||
cleanUp: cleanUp,
|
||||
lastReqTime: checker.NewLastReqTime(),
|
||||
pvGenerator: pvGenerator,
|
||||
|
@ -246,6 +248,10 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
|
|||
|
||||
// RunAsync TLS server in separate thread and returns control immediately
|
||||
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
||||
if !cache.WaitForCacheSync(stopCh, ws.pSynced, ws.rbSynced, ws.crbSynced) {
|
||||
glog.Error("webhook: failed to sync informer cache")
|
||||
}
|
||||
|
||||
go func(ws *WebhookServer) {
|
||||
glog.V(3).Infof("serving on %s\n", ws.server.Addr)
|
||||
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
|
||||
|
@ -261,17 +267,17 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
|||
}
|
||||
|
||||
// Stop TLS server and returns control after the server is shut down
|
||||
func (ws *WebhookServer) Stop() {
|
||||
err := ws.server.Shutdown(context.Background())
|
||||
func (ws *WebhookServer) Stop(ctx context.Context) {
|
||||
// cleanUp
|
||||
// remove the static webhookconfigurations
|
||||
go ws.webhookRegistrationClient.RemoveWebhookConfigurations(ws.cleanUp)
|
||||
// shutdown http.Server with context timeout
|
||||
err := ws.server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
// Error from closing listeners, or context timeout:
|
||||
glog.Info("Server Shutdown error: ", err)
|
||||
ws.server.Close()
|
||||
}
|
||||
// cleanUp
|
||||
// remove the static webhookconfigurations for policy CRD
|
||||
ws.webhookRegistrationClient.RemovePolicyWebhookConfigurations(ws.cleanUp)
|
||||
|
||||
}
|
||||
|
||||
// bodyToAdmissionReview creates AdmissionReview object from request body
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
Use these scripts to prepare the controller for work.
|
||||
All these scripts should be launched from the root folder of the project, for example:
|
||||
`scripts/compile-image.sh`
|
||||
|
||||
### compile-image.sh ###
|
||||
Compiles the project to go executable, generates docker image and pushes it to the repo. Has no arguments.
|
||||
All these scripts should be launched from the root folder of the project.
|
||||
|
||||
### generate-server-cert.sh ###
|
||||
Generates TLS certificate and key that used by webhook server. Example:
|
||||
|
@ -19,10 +15,8 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce
|
|||
* `--serverIp` means the same as for `generate-server-cert.sh`
|
||||
Examples:
|
||||
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
|
||||
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117'
|
||||
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with master node at '192.168.10.117'
|
||||
|
||||
### test-web-hook.sh ###
|
||||
Quickly creates and deletes test config map. If your webhook server is running, you should see the corresponding output from it. Use this script after `deploy-controller.sh`.
|
||||
|
||||
### update-codegen.sh ###
|
||||
Generates additional code for controller object. You should resolve all dependencies before using it, see main Readme for details.
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
kubectl delete -f definitions/install.yaml
|
||||
kubectl delete csr,MutatingWebhookConfiguration,ValidatingWebhookConfiguration --all
|
|
@ -1,34 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
default_version="dev-testing"
|
||||
version=$1
|
||||
|
||||
if [[ -z "$1" ]]
|
||||
then
|
||||
echo "Using default version: ${default_version}"
|
||||
version="${default_version}"
|
||||
fi
|
||||
|
||||
hub_user_name="nirmata"
|
||||
project_name="kyverno"
|
||||
|
||||
echo "# Ensuring Go dependencies..."
|
||||
dep ensure -v || exit 2
|
||||
|
||||
echo "# Building executable ${project_name}..."
|
||||
chmod +x scripts/update-codegen.sh
|
||||
scripts/update-codegen.sh
|
||||
make build || exit 3
|
||||
|
||||
echo "# Building docker image ${hub_user_name}/${project_name}:${version}"
|
||||
cat <<EOF > Dockerfile
|
||||
FROM alpine:latest
|
||||
WORKDIR ~/
|
||||
ADD ${project_name} ./${project_name}
|
||||
ENTRYPOINT ["./${project_name}"]
|
||||
EOF
|
||||
tag="${hub_user_name}/${project_name}:${version}"
|
||||
docker build --no-cache -t "${tag}" . || exit 4
|
||||
|
||||
echo "# Pushing image to repository..."
|
||||
docker push "${tag}" || exit 5
|
|
@ -1,3 +0,0 @@
|
|||
cd "$(dirname "$0")"
|
||||
kubectl create -f resources/test-configmap.yaml
|
||||
kubectl delete -f resources/test-configmap.yaml
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash
|
||||
# You should see the trace of requests in the output of webhook server
|
||||
kubectl create configmap test-config-map --from-literal="some_var=some_value"
|
||||
kubectl delete configmap test-config-map
|
Loading…
Reference in a new issue