mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-29 10:55:05 +00:00
Merge commit '337e0f7d1d6985b5683ddb7b7a42df0ef8130708' into 544_documentation
This commit is contained in:
commit
dd97cdd95f
13919 changed files with 83965 additions and 2557845 deletions
|
@ -31,5 +31,6 @@ after_success:
|
|||
if [ $TRAVIS_PULL_REQUEST == 'false' ]
|
||||
then
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
make docker-publish
|
||||
make docker-publish-initContainer
|
||||
make docker-publish-kyverno
|
||||
fi
|
99
Makefile
99
Makefile
|
@ -1,63 +1,82 @@
|
|||
.DEFAULT_GOAL: build
|
||||
|
||||
|
||||
# The CLI binary to build
|
||||
BIN ?= kyverno
|
||||
##################################
|
||||
# DEFAULTS
|
||||
##################################
|
||||
REGISTRY=index.docker.io
|
||||
REPO=$(REGISTRY)/nirmata/kyverno
|
||||
IMAGE_TAG=$(GIT_VERSION)
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
||||
|
||||
GIT_VERSION := $(shell git describe --dirty --always --tags)
|
||||
GIT_BRANCH := $(shell git branch | grep \* | cut -d ' ' -f2)
|
||||
GIT_HASH := $(GIT_BRANCH)/$(shell git log -1 --pretty=format:"%H")
|
||||
TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
|
||||
|
||||
PACKAGE ?=github.com/nirmata/kyverno
|
||||
MAIN ?=$(PACKAGE)
|
||||
|
||||
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
||||
|
||||
# default docker hub
|
||||
REGISTRY=index.docker.io
|
||||
REPO=$(REGISTRY)/nirmata/kyverno
|
||||
IMAGE_TAG=$(GIT_VERSION)
|
||||
|
||||
GOOS ?= $(shell go env GOOS)
|
||||
OUTPUT=$(shell pwd)/_output/cli/$(BIN)
|
||||
##################################
|
||||
# KYVERNO
|
||||
##################################
|
||||
|
||||
KYVERNO_PATH:= cmd/kyverno
|
||||
build:
|
||||
CGO_ENABLED=0 GOOS=linux go build -ldflags=$(LD_FLAGS) $(MAIN)
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
|
||||
local:
|
||||
go build -ldflags=$(LD_FLAGS) $(MAIN)
|
||||
##################################
|
||||
# INIT CONTAINER
|
||||
##################################
|
||||
INITC_PATH := cmd/initContainer
|
||||
INITC_IMAGE := kyvernopre
|
||||
initContainer:
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||
|
||||
cli: cli-dirs
|
||||
GOOS=$(GOOS) \
|
||||
go build \
|
||||
-o $(OUTPUT) \
|
||||
-ldflags $(LD_FLAGS) \
|
||||
$(PACKAGE)/cmd/$(BIN)
|
||||
.PHONY: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
|
||||
|
||||
cli-dirs:
|
||||
@mkdir -p _output/cli
|
||||
docker-publish-initContainer: docker-build-initContainer docker-tag-repo-initContainer docker-push-initContainer
|
||||
|
||||
clean:
|
||||
go clean
|
||||
docker-build-initContainer:
|
||||
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(INITC_PATH)/kyvernopre -ldflags=$(LD_FLAGS) $(PWD)/$(INITC_PATH)/main.go
|
||||
echo $(PWD)/$(INITC_PATH)/
|
||||
@docker build -f $(PWD)/$(INITC_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(PWD)/$(INITC_PATH)/
|
||||
|
||||
# docker image build targets
|
||||
# user must be logged in the $(REGISTRY) to push images
|
||||
.PHONY: docker-build docker-tag-repo docker-push
|
||||
docker-tag-repo-initContainer:
|
||||
@docker tag $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
|
||||
|
||||
docker-publish: docker-build docker-tag-repo docker-push
|
||||
docker-push-initContainer:
|
||||
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):$(IMAGE_TAG)
|
||||
@docker push $(REGISTRY)/nirmata/$(INITC_IMAGE):latest
|
||||
|
||||
docker-build:
|
||||
@docker build -t $(REPO):$(IMAGE_TAG) .
|
||||
##################################
|
||||
# KYVERNO CONTAINER
|
||||
##################################
|
||||
.PHONY: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
|
||||
KYVERNO_PATH := cmd/kyverno
|
||||
KYVERNO_IMAGE := kyverno
|
||||
docker-publish-kyverno: docker-build-kyverno docker-tag-repo-kyverno docker-push-kyverno
|
||||
|
||||
docker-tag-repo:
|
||||
@docker tag $(REPO):$(IMAGE_TAG) $(REPO):latest
|
||||
docker-build-kyverno:
|
||||
GO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(PWD)/$(KYVERNO_PATH)
|
||||
|
||||
docker-push:
|
||||
@docker push $(REPO):$(IMAGE_TAG)
|
||||
@docker push $(REPO):latest
|
||||
docker-tag-repo-kyverno:
|
||||
@docker tag $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
|
||||
|
||||
## Testing & Code-Coverage
|
||||
docker-push-kyverno:
|
||||
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):$(IMAGE_TAG)
|
||||
@docker push $(REGISTRY)/nirmata/$(KYVERNO_IMAGE):latest
|
||||
|
||||
|
||||
##################################
|
||||
# CLI
|
||||
##################################
|
||||
CLI_PATH := cmd/cli
|
||||
cli:
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(CLI_PATH)/kyvernocli -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)/main.go
|
||||
|
||||
|
||||
##################################
|
||||
# Testing & Code-Coverage
|
||||
##################################
|
||||
|
||||
## variables
|
||||
BIN_DIR := $(GOPATH)/bin
|
||||
|
|
3
cmd/initContainer/Dockerfile
Normal file
3
cmd/initContainer/Dockerfile
Normal file
|
@ -0,0 +1,3 @@
|
|||
FROM scratch
|
||||
ADD kyvernopre /kyvernopre
|
||||
ENTRYPOINT ["/kyvernopre"]
|
199
cmd/initContainer/main.go
Normal file
199
cmd/initContainer/main.go
Normal file
|
@ -0,0 +1,199 @@
|
|||
/*
|
||||
Cleans up stale webhookconfigurations created by kyverno that were not cleanedup
|
||||
*/
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/signal"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
)
|
||||
|
||||
const (
|
||||
mutatingWebhookConfigKind string = "MutatingWebhookConfiguration"
|
||||
validatingWebhookConfigKind string = "ValidatingWebhookConfiguration"
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer glog.Flush()
|
||||
// os signal handler
|
||||
stopCh := signal.SetupSignalHandler()
|
||||
// arguments
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
|
||||
// create client config
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
||||
// DYNAMIC CLIENT
|
||||
// - client for all registered resources
|
||||
client, err := client.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
||||
requests := []request{
|
||||
// Resource
|
||||
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
|
||||
request{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
|
||||
// Policy
|
||||
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName},
|
||||
request{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName},
|
||||
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName},
|
||||
request{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName},
|
||||
}
|
||||
|
||||
done := make(chan struct{})
|
||||
defer close(done)
|
||||
failure := false
|
||||
// use pipline to pass request to cleanup resources
|
||||
// generate requests
|
||||
in := gen(done, stopCh, requests...)
|
||||
// process requests
|
||||
// processing routine count : 2
|
||||
p1 := process(client, done, stopCh, in)
|
||||
p2 := process(client, done, stopCh, in)
|
||||
// merge results from processing routines
|
||||
for err := range merge(done, stopCh, p1, p2) {
|
||||
if err != nil {
|
||||
failure = true
|
||||
glog.Errorf("failed to cleanup: %v", err)
|
||||
}
|
||||
}
|
||||
// if there is any failure then we fail process
|
||||
if failure {
|
||||
glog.Errorf("failed to cleanup webhook configurations")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
|
||||
var err error
|
||||
// Get resource
|
||||
_, err = client.GetResource(kind, "", name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("%s(%s) not found", name, kind)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get resource %s(%s)", name, kind)
|
||||
return err
|
||||
}
|
||||
// Delete resource
|
||||
err = client.DeleteResource(kind, "", name, false)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to delete resource %s(%s)", name, kind)
|
||||
return err
|
||||
}
|
||||
glog.Infof("cleaned up resource %s(%s)", name, kind)
|
||||
return nil
|
||||
}
|
||||
|
||||
func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig == "" {
|
||||
glog.Info("Using in-cluster configuration")
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
||||
type request struct {
|
||||
kind string
|
||||
name string
|
||||
}
|
||||
|
||||
/* Processing Pipeline
|
||||
-> Process Requests
|
||||
Generate Requests -> Process Requests -> Merge Results
|
||||
-> Process Requests
|
||||
- number of processes can be controlled
|
||||
- stop processing on SIGTERM OR SIGNKILL signal
|
||||
- stop processing if any process fails(supported)
|
||||
*/
|
||||
// Generates requests to be processed
|
||||
func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-chan request {
|
||||
out := make(chan request)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for _, req := range requests {
|
||||
select {
|
||||
case out <- req:
|
||||
case <-done:
|
||||
println("done generate")
|
||||
return
|
||||
case <-stopCh:
|
||||
println("shutting down generate")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// processes the requests
|
||||
func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error {
|
||||
out := make(chan error)
|
||||
go func() {
|
||||
defer close(out)
|
||||
for req := range requests {
|
||||
select {
|
||||
case out <- removeWebhookIfExists(client, req.kind, req.name):
|
||||
case <-done:
|
||||
println("done process")
|
||||
return
|
||||
case <-stopCh:
|
||||
println("shutting down process")
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
return out
|
||||
}
|
||||
|
||||
// waits for all processes to be complete and merges result
|
||||
func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error {
|
||||
var wg sync.WaitGroup
|
||||
out := make(chan error)
|
||||
// gets the output from each process
|
||||
output := func(ch <-chan error) {
|
||||
defer wg.Done()
|
||||
for err := range ch {
|
||||
select {
|
||||
case out <- err:
|
||||
case <-done:
|
||||
println("done merge")
|
||||
return
|
||||
case <-stopCh:
|
||||
println("shutting down merge")
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wg.Add(len(processes))
|
||||
for _, process := range processes {
|
||||
go output(process)
|
||||
}
|
||||
|
||||
// close when all the process goroutines are done
|
||||
go func() {
|
||||
wg.Wait()
|
||||
close(out)
|
||||
}()
|
||||
return out
|
||||
}
|
247
cmd/kyverno/main.go
Normal file
247
cmd/kyverno/main.go
Normal file
|
@ -0,0 +1,247 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/checker"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/namespace"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/signal"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/version"
|
||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
serverIP string
|
||||
cpu bool
|
||||
memory bool
|
||||
webhookTimeout int
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
filterK8Resources string
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer glog.Flush()
|
||||
version.PrintVersionInfo()
|
||||
|
||||
// cleanUp Channel
|
||||
cleanUp := make(chan struct{})
|
||||
// handle os signals
|
||||
stopCh := signal.SetupSignalHandler()
|
||||
// CLIENT CONFIG
|
||||
clientConfig, err := config.CreateClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
||||
// KYVENO CRD CLIENT
|
||||
// access CRD resources
|
||||
// - Policy
|
||||
// - PolicyViolation
|
||||
pclient, err := kyvernoclient.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
||||
// DYNAMIC CLIENT
|
||||
// - client for all registered resources
|
||||
client, err := dclient.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
// CRD CHECK
|
||||
// - verify if the CRD for Policy & PolicyViolation are avialalbe
|
||||
if !utils.CRDInstalled(client.DiscoveryClient) {
|
||||
glog.Fatalf("Required CRDs unavailable")
|
||||
}
|
||||
// KUBERNETES CLIENT
|
||||
kubeClient, err := utils.NewKubeClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating kubernetes client: %v\n", err)
|
||||
}
|
||||
|
||||
// KUBERNETES RESOURCES INFORMER
|
||||
// watches namespace resource
|
||||
// - cache resync time: 10 seconds
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(
|
||||
kubeClient,
|
||||
10*time.Second)
|
||||
|
||||
// WERBHOOK REGISTRATION CLIENT
|
||||
webhookRegistrationClient := webhookconfig.NewWebhookRegistrationClient(
|
||||
clientConfig,
|
||||
client,
|
||||
serverIP,
|
||||
int32(webhookTimeout))
|
||||
|
||||
// Resource Mutating Webhook Watcher
|
||||
lastReqTime := checker.NewLastReqTime()
|
||||
rWebhookWatcher := webhookconfig.NewResourceWebhookRegister(
|
||||
lastReqTime,
|
||||
kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(),
|
||||
webhookRegistrationClient,
|
||||
)
|
||||
|
||||
// KYVERNO CRD INFORMER
|
||||
// watches CRD resources:
|
||||
// - Policy
|
||||
// - PolicyVolation
|
||||
// - cache resync time: 10 seconds
|
||||
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(
|
||||
pclient,
|
||||
10*time.Second)
|
||||
|
||||
// Configuration Data
|
||||
// dyamically load the configuration from configMap
|
||||
// - resource filters
|
||||
// if the configMap is update, the configuration will be updated :D
|
||||
configData := config.NewConfigData(
|
||||
kubeClient,
|
||||
kubeInformer.Core().V1().ConfigMaps(),
|
||||
filterK8Resources)
|
||||
|
||||
// Policy meta-data store
|
||||
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies())
|
||||
|
||||
// EVENT GENERATOR
|
||||
// - generate event with retry mechanism
|
||||
egen := event.NewEventGenerator(
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicies())
|
||||
|
||||
// POLICY VIOLATION GENERATOR
|
||||
// -- generate policy violation
|
||||
pvgen := policyviolation.NewPVGenerator(pclient,
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||
pInformer.Kyverno().V1().PolicyViolations())
|
||||
|
||||
// POLICY CONTROLLER
|
||||
// - reconciliation policy and policy violation
|
||||
// - process policy on existing resources
|
||||
// - status aggregator: recieves stats when a policy is applied
|
||||
// & updates the policy status
|
||||
pc, err := policy.NewPolicyController(pclient,
|
||||
client,
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pInformer.Kyverno().V1().ClusterPolicyViolations(),
|
||||
pInformer.Kyverno().V1().PolicyViolations(),
|
||||
configData,
|
||||
egen,
|
||||
pvgen,
|
||||
policyMetaStore,
|
||||
rWebhookWatcher)
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating policy controller: %v\n", err)
|
||||
}
|
||||
|
||||
// GENERATE CONTROLLER
|
||||
// - watches for Namespace resource and generates resource based on the policy generate rule
|
||||
nsc := namespace.NewNamespaceController(
|
||||
pclient,
|
||||
client,
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
pc.GetPolicyStatusAggregator(),
|
||||
egen,
|
||||
configData,
|
||||
pvgen,
|
||||
policyMetaStore)
|
||||
|
||||
// CONFIGURE CERTIFICATES
|
||||
tlsPair, err := client.InitTLSPemPair(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
}
|
||||
|
||||
// WEBHOOK REGISTRATION
|
||||
// - mutating,validatingwebhookconfiguration (Policy)
|
||||
// - verifymutatingwebhookconfiguration (Kyverno Deployment)
|
||||
// resource webhook confgiuration is generated dynamically in the webhook server and policy controller
|
||||
// based on the policy resources created
|
||||
if err = webhookRegistrationClient.Register(); err != nil {
|
||||
glog.Fatalf("Failed registering Admission Webhooks: %v\n", err)
|
||||
}
|
||||
|
||||
// WEBHOOOK
|
||||
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
|
||||
// - reports the results based on the response from the policy engine:
|
||||
// -- annotations on resources with update details on mutation JSON patches
|
||||
// -- generate policy violation resource
|
||||
// -- generate events on policy and resource
|
||||
server, err := webhooks.NewWebhookServer(
|
||||
pclient,
|
||||
client,
|
||||
tlsPair,
|
||||
pInformer.Kyverno().V1().ClusterPolicies(),
|
||||
kubeInformer.Rbac().V1().RoleBindings(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
egen,
|
||||
webhookRegistrationClient,
|
||||
pc.GetPolicyStatusAggregator(),
|
||||
configData,
|
||||
policyMetaStore,
|
||||
pvgen,
|
||||
rWebhookWatcher,
|
||||
cleanUp)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
// Start the components
|
||||
pInformer.Start(stopCh)
|
||||
kubeInformer.Start(stopCh)
|
||||
|
||||
go rWebhookWatcher.Run(stopCh)
|
||||
go configData.Run(stopCh)
|
||||
go policyMetaStore.Run(stopCh)
|
||||
go pc.Run(1, stopCh)
|
||||
go egen.Run(1, stopCh)
|
||||
go nsc.Run(1, stopCh)
|
||||
go pvgen.Run(1, stopCh)
|
||||
|
||||
// verifys if the admission control is enabled and active
|
||||
// resync: 60 seconds
|
||||
// deadline: 60 seconds (send request)
|
||||
// max deadline: deadline*3 (set the deployment annotation as false)
|
||||
server.RunAsync(stopCh)
|
||||
|
||||
<-stopCh
|
||||
|
||||
// by default http.Server waits indefinitely for connections to return to idle and then shuts down
|
||||
// adding a threshold will handle zombie connections
|
||||
// adjust the context deadline to 5 seconds
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second)
|
||||
defer func() {
|
||||
cancel()
|
||||
}()
|
||||
// cleanup webhookconfigurations followed by webhook shutdown
|
||||
server.Stop(ctx)
|
||||
// resource cleanup
|
||||
// remove webhook configurations
|
||||
<-cleanUp
|
||||
glog.Info("successful shutdown of kyverno controller")
|
||||
}
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
|
||||
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
||||
config.LogDefaultFlags()
|
||||
flag.Parse()
|
||||
}
|
|
@ -13,6 +13,8 @@ spec:
|
|||
kind: ClusterPolicy
|
||||
plural: clusterpolicies
|
||||
singular: clusterpolicy
|
||||
shortNames:
|
||||
- cpol
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
|
@ -238,8 +240,26 @@ spec:
|
|||
kind: ClusterPolicyViolation
|
||||
plural: clusterpolicyviolations
|
||||
singular: clusterpolicyviolation
|
||||
shortNames:
|
||||
- cpolv
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Policy
|
||||
type: string
|
||||
description: The policy that resulted in the violation
|
||||
JSONPath: .spec.policy
|
||||
- name: ResourceKind
|
||||
type: string
|
||||
description: The resource kind that cause the violation
|
||||
JSONPath: .spec.resource.kind
|
||||
- name: ResourceName
|
||||
type: string
|
||||
description: The resource name that caused the violation
|
||||
JSONPath: .spec.resource.name
|
||||
- name: Age
|
||||
type: date
|
||||
JSONPath: .metadata.creationTimestamp
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
|
@ -261,8 +281,6 @@ spec:
|
|||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
rules:
|
||||
type: array
|
||||
items:
|
||||
|
@ -283,15 +301,13 @@ spec:
|
|||
properties:
|
||||
kind:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
creationBlocked:
|
||||
type: boolean
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: namespacedpolicyviolations.kyverno.io
|
||||
name: policyviolations.kyverno.io
|
||||
spec:
|
||||
group: kyverno.io
|
||||
versions:
|
||||
|
@ -300,11 +316,29 @@ spec:
|
|||
storage: true
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: NamespacedPolicyViolation
|
||||
plural: namespacedpolicyviolations
|
||||
singular: namespacedpolicyviolation
|
||||
kind: PolicyViolation
|
||||
plural: policyviolations
|
||||
singular: policyviolation
|
||||
shortNames:
|
||||
- polv
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Policy
|
||||
type: string
|
||||
description: The policy that resulted in the violation
|
||||
JSONPath: .spec.policy
|
||||
- name: ResourceKind
|
||||
type: string
|
||||
description: The resource kind that cause the violation
|
||||
JSONPath: .spec.resource.kind
|
||||
- name: ResourceName
|
||||
type: string
|
||||
description: The resource name that caused the violation
|
||||
JSONPath: .spec.resource.name
|
||||
- name: Age
|
||||
type: date
|
||||
JSONPath: .metadata.creationTimestamp
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
|
@ -326,8 +360,6 @@ spec:
|
|||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
rules:
|
||||
type: array
|
||||
items:
|
||||
|
@ -348,8 +380,6 @@ spec:
|
|||
properties:
|
||||
kind:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
creationBlocked:
|
||||
type: boolean
|
||||
---
|
||||
|
@ -391,6 +421,16 @@ subjects:
|
|||
name: kyverno-service-account
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: policyviolation
|
||||
rules:
|
||||
- apiGroups: ["kyverno.io"]
|
||||
resources:
|
||||
- policyviolations
|
||||
verbs: ["get", "list", "watch"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
|
@ -418,9 +458,12 @@ spec:
|
|||
app: kyverno
|
||||
spec:
|
||||
serviceAccountName: kyverno-service-account
|
||||
initContainers:
|
||||
- name: kyverno-pre
|
||||
image: nirmata/kyvernopre:latest
|
||||
containers:
|
||||
- name: kyverno
|
||||
image: nirmata/kyverno:v1.0.0
|
||||
image: nirmata/kyverno:latest
|
||||
args:
|
||||
- "--filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*]"
|
||||
# customize webhook timout
|
||||
|
|
|
@ -13,6 +13,8 @@ spec:
|
|||
kind: ClusterPolicy
|
||||
plural: clusterpolicies
|
||||
singular: clusterpolicy
|
||||
shortNames:
|
||||
- cpol
|
||||
subresources:
|
||||
status: {}
|
||||
validation:
|
||||
|
@ -238,8 +240,26 @@ spec:
|
|||
kind: ClusterPolicyViolation
|
||||
plural: clusterpolicyviolations
|
||||
singular: clusterpolicyviolation
|
||||
shortNames:
|
||||
- cpolv
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Policy
|
||||
type: string
|
||||
description: The policy that resulted in the violation
|
||||
JSONPath: .spec.policy
|
||||
- name: ResourceKind
|
||||
type: string
|
||||
description: The resource kind that cause the violation
|
||||
JSONPath: .spec.resource.kind
|
||||
- name: ResourceName
|
||||
type: string
|
||||
description: The resource name that caused the violation
|
||||
JSONPath: .spec.resource.name
|
||||
- name: Age
|
||||
type: date
|
||||
JSONPath: .metadata.creationTimestamp
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
|
@ -261,8 +281,6 @@ spec:
|
|||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
rules:
|
||||
type: array
|
||||
items:
|
||||
|
@ -283,15 +301,13 @@ spec:
|
|||
properties:
|
||||
kind:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
creationBlocked:
|
||||
type: boolean
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: namespacedpolicyviolations.kyverno.io
|
||||
name: policyviolations.kyverno.io
|
||||
spec:
|
||||
group: kyverno.io
|
||||
versions:
|
||||
|
@ -300,11 +316,29 @@ spec:
|
|||
storage: true
|
||||
scope: Namespaced
|
||||
names:
|
||||
kind: NamespacedPolicyViolation
|
||||
plural: namespacedpolicyviolations
|
||||
singular: namespacedpolicyviolation
|
||||
kind: PolicyViolation
|
||||
plural: policyviolations
|
||||
singular: policyviolation
|
||||
shortNames:
|
||||
- polv
|
||||
subresources:
|
||||
status: {}
|
||||
additionalPrinterColumns:
|
||||
- name: Policy
|
||||
type: string
|
||||
description: The policy that resulted in the violation
|
||||
JSONPath: .spec.policy
|
||||
- name: ResourceKind
|
||||
type: string
|
||||
description: The resource kind that cause the violation
|
||||
JSONPath: .spec.resource.kind
|
||||
- name: ResourceName
|
||||
type: string
|
||||
description: The resource name that caused the violation
|
||||
JSONPath: .spec.resource.name
|
||||
- name: Age
|
||||
type: date
|
||||
JSONPath: .metadata.creationTimestamp
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
properties:
|
||||
|
@ -326,8 +360,6 @@ spec:
|
|||
type: string
|
||||
name:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
rules:
|
||||
type: array
|
||||
items:
|
||||
|
@ -348,8 +380,6 @@ spec:
|
|||
properties:
|
||||
kind:
|
||||
type: string
|
||||
namespace:
|
||||
type: string
|
||||
creationBlocked:
|
||||
type: boolean
|
||||
---
|
||||
|
|
21
definitions/rolebinding.yaml
Normal file
21
definitions/rolebinding.yaml
Normal file
|
@ -0,0 +1,21 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: policyviolation
|
||||
# change namespace below to create rolebinding for the namespace admin
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: policyviolation
|
||||
subjects:
|
||||
# configure below to access policy violation for the namespace admin
|
||||
- kind: ServiceAccount
|
||||
name: default
|
||||
namespace: default
|
||||
# - apiGroup: rbac.authorization.k8s.io
|
||||
# kind: User
|
||||
# name:
|
||||
# - apiGroup: rbac.authorization.k8s.io
|
||||
# kind: Group
|
||||
# name:
|
|
@ -81,7 +81,7 @@ Kyverno uses secrets created above to setup TLS communication with the kube-apis
|
|||
|
||||
To install a specific version, change the image tag with git tag in `install.yaml`.
|
||||
|
||||
e.g., change image tag from `latest` to the specific tag `v0.3.0`.
|
||||
e.g., change image tag from `latest` to the specific tag `v1.0.0`.
|
||||
>>>
|
||||
spec:
|
||||
containers:
|
||||
|
@ -112,6 +112,14 @@ kubectl logs <kyverno-pod-name> -n kyverno
|
|||
Here is a script that generates a self-signed CA, a TLS certificate-key pair, and the corresponding kubernetes secrets: [helper script](/scripts/generate-self-signed-cert-and-k8secrets.sh)
|
||||
|
||||
|
||||
# Configure a namespace admin to access policy violations
|
||||
|
||||
During Kyverno installation, it creates a ClusterRole `policyviolation` which has the `list,get,watch` operation on resource `policyviolations`. To grant access to a namespace admin, configure [definitions/rolebinding.yaml](../definitions/rolebinding.yaml) then apply to the cluster.
|
||||
|
||||
- Replace `metadata.namespace` with namespace of the admin
|
||||
- Configure `subjects` field to bind admin's role to the ClusterRole `policyviolation`
|
||||
|
||||
|
||||
# Installing outside of the cluster (debug mode)
|
||||
|
||||
To build Kyverno in a development environment see: https://github.com/nirmata/kyverno/wiki/Building
|
||||
|
|
102
init.go
102
init.go
|
@ -1,102 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/profile"
|
||||
|
||||
"github.com/golang/glog"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
tls "github.com/nirmata/kyverno/pkg/tls"
|
||||
"github.com/nirmata/kyverno/pkg/version"
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
func printVersionInfo() {
|
||||
v := version.GetVersion()
|
||||
glog.Infof("Kyverno version: %s\n", v.BuildVersion)
|
||||
glog.Infof("Kyverno BuildHash: %s\n", v.BuildHash)
|
||||
glog.Infof("Kyverno BuildTime: %s\n", v.BuildTime)
|
||||
}
|
||||
|
||||
func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig == "" {
|
||||
glog.Info("Using in-cluster configuration")
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
||||
// Loads or creates PEM private key and TLS certificate for webhook server.
|
||||
// Created pair is stored in cluster's secret.
|
||||
// Returns struct with key/certificate pair.
|
||||
func initTLSPemPair(configuration *rest.Config, client *client.Client) (*tls.TlsPemPair, error) {
|
||||
certProps, err := client.GetTLSCertProps(configuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsPair := client.ReadTlsPair(certProps)
|
||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
glog.Info("Generating new key/certificate pair for TLS")
|
||||
tlsPair, err = client.GenerateTlsPemPair(certProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = client.WriteTlsPair(certProps, tlsPair); err != nil {
|
||||
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
|
||||
}
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
glog.Infoln("Using existing TLS key/certificate pair")
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
var prof interface {
|
||||
Stop()
|
||||
}
|
||||
|
||||
func enableProfiling(cpu, memory bool) interface {
|
||||
Stop()
|
||||
} {
|
||||
|
||||
file := "/opt/nirmata/kyverno/" + randomString(6)
|
||||
if cpu {
|
||||
glog.Infof("Enable cpu profiling ...")
|
||||
prof = profile.Start(profile.CPUProfile, profile.ProfilePath(file))
|
||||
} else if memory {
|
||||
glog.Infof("Enable memory profiling ...")
|
||||
prof = profile.Start(profile.MemProfile, profile.ProfilePath(file))
|
||||
}
|
||||
|
||||
return prof
|
||||
}
|
||||
|
||||
func disableProfiling(p interface{ Stop() }) {
|
||||
if p != nil {
|
||||
p.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
// generate random string
|
||||
const charset = "abcdefghijklmnopqrstuvwxyz" +
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789"
|
||||
|
||||
var seededRand *rand.Rand = rand.New(
|
||||
rand.NewSource(time.Now().UnixNano()))
|
||||
|
||||
func stringWithCharset(length int, charset string) string {
|
||||
b := make([]byte, length)
|
||||
for i := range b {
|
||||
b[i] = charset[seededRand.Intn(len(charset))]
|
||||
}
|
||||
return string(b)
|
||||
}
|
||||
|
||||
func randomString(length int) string {
|
||||
return stringWithCharset(length, charset)
|
||||
}
|
206
main.go
206
main.go
|
@ -1,206 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
"github.com/nirmata/kyverno/pkg/namespace"
|
||||
"github.com/nirmata/kyverno/pkg/policy"
|
||||
"github.com/nirmata/kyverno/pkg/policystore"
|
||||
"github.com/nirmata/kyverno/pkg/policyviolation"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/nirmata/kyverno/pkg/webhookconfig"
|
||||
"github.com/nirmata/kyverno/pkg/webhooks"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/sample-controller/pkg/signals"
|
||||
)
|
||||
|
||||
var (
|
||||
kubeconfig string
|
||||
serverIP string
|
||||
cpu bool
|
||||
memory bool
|
||||
webhookTimeout int
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
filterK8Resources string
|
||||
)
|
||||
|
||||
func main() {
|
||||
defer glog.Flush()
|
||||
printVersionInfo()
|
||||
// profile cpu and memory consuption
|
||||
prof = enableProfiling(cpu, memory)
|
||||
// cleanUp Channel
|
||||
cleanUp := make(chan struct{})
|
||||
// SIGINT & SIGTERM channel
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
// CLIENT CONFIG
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
||||
// KYVENO CRD CLIENT
|
||||
// access CRD resources
|
||||
// - Policy
|
||||
// - PolicyViolation
|
||||
pclient, err := kyvernoclient.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
||||
// DYNAMIC CLIENT
|
||||
// - client for all registered resources
|
||||
client, err := client.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
// CRD CHECK
|
||||
// - verify if the CRD for Policy & PolicyViolation are avialalbe
|
||||
if !utils.CRDInstalled(client.DiscoveryClient) {
|
||||
glog.Fatalf("Required CRDs unavailable")
|
||||
}
|
||||
// KUBERNETES CLIENT
|
||||
kubeClient, err := utils.NewKubeClient(clientConfig)
|
||||
if err != nil {
|
||||
glog.Fatalf("Error creating kubernetes client: %v\n", err)
|
||||
}
|
||||
|
||||
// WERBHOOK REGISTRATION CLIENT
|
||||
webhookRegistrationClient, err := webhookconfig.NewWebhookRegistrationClient(clientConfig, client, serverIP, int32(webhookTimeout))
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||
}
|
||||
|
||||
// KYVERNO CRD INFORMER
|
||||
// watches CRD resources:
|
||||
// - Policy
|
||||
// - PolicyVolation
|
||||
// - cache resync time: 10 seconds
|
||||
pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, 10*time.Second)
|
||||
|
||||
// KUBERNETES RESOURCES INFORMER
|
||||
// watches namespace resource
|
||||
// - cache resync time: 10 seconds
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, 10*time.Second)
|
||||
|
||||
// Configuration Data
|
||||
// dyamically load the configuration from configMap
|
||||
// - resource filters
|
||||
// if the configMap is update, the configuration will be updated :D
|
||||
configData := config.NewConfigData(kubeClient, kubeInformer.Core().V1().ConfigMaps(), filterK8Resources)
|
||||
|
||||
// Policy meta-data store
|
||||
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies().Lister())
|
||||
|
||||
// EVENT GENERATOR
|
||||
// - generate event with retry mechanism
|
||||
egen := event.NewEventGenerator(client, pInformer.Kyverno().V1().ClusterPolicies())
|
||||
|
||||
// POLICY VIOLATION GENERATOR
|
||||
// -- generate policy violation
|
||||
pvgen := policyviolation.NewPVGenerator(pclient, client, pInformer.Kyverno().V1().ClusterPolicyViolations().Lister(), pInformer.Kyverno().V1().NamespacedPolicyViolations().Lister())
|
||||
|
||||
// POLICY CONTROLLER
|
||||
// - reconciliation policy and policy violation
|
||||
// - process policy on existing resources
|
||||
// - status aggregator: recieves stats when a policy is applied
|
||||
// & updates the policy status
|
||||
pc, err := policy.NewPolicyController(pclient, client, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pInformer.Kyverno().V1().NamespacedPolicyViolations(), egen, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), webhookRegistrationClient, configData, pvgen, policyMetaStore)
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating policy controller: %v\n", err)
|
||||
}
|
||||
|
||||
// POLICY VIOLATION CONTROLLER
|
||||
// policy violation cleanup if the corresponding resource is deleted
|
||||
// status: lastUpdatTime
|
||||
pvc, err := policyviolation.NewPolicyViolationController(client, pclient, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations())
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating cluster policy violation controller: %v\n", err)
|
||||
}
|
||||
|
||||
nspvc, err := policyviolation.NewNamespacedPolicyViolationController(client, pclient, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().NamespacedPolicyViolations())
|
||||
if err != nil {
|
||||
glog.Fatalf("error creating namespaced policy violation controller: %v\n", err)
|
||||
}
|
||||
|
||||
// GENERATE CONTROLLER
|
||||
// - watches for Namespace resource and generates resource based on the policy generate rule
|
||||
nsc := namespace.NewNamespaceController(pclient, client, kubeInformer.Core().V1().Namespaces(), pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pc.GetPolicyStatusAggregator(), egen, configData, pvgen, policyMetaStore)
|
||||
|
||||
// CONFIGURE CERTIFICATES
|
||||
tlsPair, err := initTLSPemPair(clientConfig, client)
|
||||
if err != nil {
|
||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
}
|
||||
|
||||
// WEBHOOK REGISTRATION
|
||||
// - validationwebhookconfiguration (Policy)
|
||||
// - mutatingwebhookconfiguration (All resources)
|
||||
// webhook confgiuration is also generated dynamically in the policy controller
|
||||
// based on the policy resources created
|
||||
if err = webhookRegistrationClient.Register(); err != nil {
|
||||
glog.Fatalf("Failed registering Admission Webhooks: %v\n", err)
|
||||
}
|
||||
|
||||
// WEBHOOOK
|
||||
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
|
||||
// - reports the results based on the response from the policy engine:
|
||||
// -- annotations on resources with update details on mutation JSON patches
|
||||
// -- generate policy violation resource
|
||||
// -- generate events on policy and resource
|
||||
server, err := webhooks.NewWebhookServer(pclient, client, tlsPair, pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().ClusterPolicyViolations(), pInformer.Kyverno().V1().NamespacedPolicyViolations(),
|
||||
kubeInformer.Rbac().V1().RoleBindings(), kubeInformer.Rbac().V1().ClusterRoleBindings(), egen, webhookRegistrationClient, pc.GetPolicyStatusAggregator(), configData, policyMetaStore, pvgen, cleanUp)
|
||||
if err != nil {
|
||||
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
// Start the components
|
||||
pInformer.Start(stopCh)
|
||||
kubeInformer.Start(stopCh)
|
||||
if err := configData.Run(stopCh); err != nil {
|
||||
glog.Fatalf("Unable to load dynamic configuration: %v\n", err)
|
||||
}
|
||||
go pc.Run(1, stopCh)
|
||||
go pvc.Run(1, stopCh)
|
||||
go nspvc.Run(1, stopCh)
|
||||
go egen.Run(1, stopCh)
|
||||
go nsc.Run(1, stopCh)
|
||||
go pvgen.Run(1, stopCh)
|
||||
|
||||
// verifys if the admission control is enabled and active
|
||||
// resync: 60 seconds
|
||||
// deadline: 60 seconds (send request)
|
||||
// max deadline: deadline*3 (set the deployment annotation as false)
|
||||
server.RunAsync(stopCh)
|
||||
|
||||
<-stopCh
|
||||
disableProfiling(prof)
|
||||
server.Stop()
|
||||
// resource cleanup
|
||||
// remove webhook configurations
|
||||
<-cleanUp
|
||||
}
|
||||
|
||||
func init() {
|
||||
// profiling feature gate
|
||||
// cpu and memory profiling cannot be enabled at same time
|
||||
// if both cpu and memory are enabled
|
||||
// by default is to profile cpu
|
||||
flag.BoolVar(&cpu, "cpu", false, "cpu profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
|
||||
flag.BoolVar(&memory, "memory", false, "memory profilling feature gate, default to false || cpu and memory profiling cannot be enabled at the same time")
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
|
||||
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
||||
config.LogDefaultFlags()
|
||||
flag.Parse()
|
||||
}
|
|
@ -29,16 +29,12 @@ var (
|
|||
// Adds the list of known types to Scheme.
|
||||
func addKnownTypes(scheme *runtime.Scheme) error {
|
||||
scheme.AddKnownTypes(SchemeGroupVersion,
|
||||
// &Policy{},
|
||||
// &PolicyList{},
|
||||
// &PolicyViolation{},
|
||||
// &PolicyViolationList{},
|
||||
&ClusterPolicy{},
|
||||
&ClusterPolicyList{},
|
||||
&ClusterPolicyViolation{},
|
||||
&ClusterPolicyViolationList{},
|
||||
&NamespacedPolicyViolation{},
|
||||
&NamespacedPolicyViolationList{},
|
||||
&PolicyViolation{},
|
||||
&PolicyViolationList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(scheme, SchemeGroupVersion)
|
||||
return nil
|
||||
|
|
|
@ -25,8 +25,8 @@ type ClusterPolicyList struct {
|
|||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ClusterPolicyViolation ...
|
||||
type ClusterPolicyViolation PolicyViolation
|
||||
// ClusterPolicyViolation represents cluster-wide violations
|
||||
type ClusterPolicyViolation PolicyViolationTemplate
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
|
@ -40,16 +40,16 @@ type ClusterPolicyViolationList struct {
|
|||
// +genclient
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NamespacedPolicyViolation ...
|
||||
type NamespacedPolicyViolation PolicyViolation
|
||||
// PolicyViolation represents namespaced violations
|
||||
type PolicyViolation PolicyViolationTemplate
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// NamespacedPolicyViolationList ...
|
||||
type NamespacedPolicyViolationList struct {
|
||||
// PolicyViolationList ...
|
||||
type PolicyViolationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata"`
|
||||
Items []NamespacedPolicyViolation `json:"items"`
|
||||
Items []PolicyViolation `json:"items"`
|
||||
}
|
||||
|
||||
// Policy contains rules to be applied to created resources
|
||||
|
@ -172,7 +172,7 @@ type RuleStats struct {
|
|||
// PolicyList is a list of Policy resources
|
||||
|
||||
// PolicyViolation stores the information regarinding the resources for which a policy failed to apply
|
||||
type PolicyViolation struct {
|
||||
type PolicyViolationTemplate struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec PolicyViolationSpec `json:"spec"`
|
||||
|
@ -188,7 +188,8 @@ type PolicyViolationSpec struct {
|
|||
|
||||
// ResourceSpec information to identify the resource
|
||||
type ResourceSpec struct {
|
||||
Kind string `json:"kind"`
|
||||
Kind string `json:"kind"`
|
||||
// Is not used in processing, but will is present for backward compatablitiy
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
Name string `json:"name"`
|
||||
}
|
||||
|
@ -201,8 +202,11 @@ type ViolatedRule struct {
|
|||
ManagedResource ManagedResourceSpec `json:"managedResource,omitempty"`
|
||||
}
|
||||
|
||||
// ManagedResourceSpec is used when the violations is created on resource owner
|
||||
// to determing the kind of child resource that caused the violation
|
||||
type ManagedResourceSpec struct {
|
||||
Kind string `json:"kind,omitempty"`
|
||||
Kind string `json:"kind,omitempty"`
|
||||
// Is not used in processing, but will is present for backward compatablitiy
|
||||
Namespace string `json:"namespace,omitempty"`
|
||||
CreationBlocked bool `json:"creationBlocked,omitempty"`
|
||||
}
|
||||
|
@ -212,5 +216,4 @@ type ManagedResourceSpec struct {
|
|||
// LastUpdateTime : the time the polivy violation was updated
|
||||
type PolicyViolationStatus struct {
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
//TODO: having user information regarding the owner of resource can be helpful
|
||||
}
|
||||
|
|
|
@ -57,18 +57,5 @@ func (gen *Generation) DeepCopyInto(out *Generation) {
|
|||
|
||||
//ToKey generates the key string used for adding label to polivy violation
|
||||
func (rs ResourceSpec) ToKey() string {
|
||||
if rs.Namespace == "" {
|
||||
return rs.Kind + "." + rs.Name
|
||||
}
|
||||
return rs.Kind + "." + rs.Namespace + "." + rs.Name
|
||||
}
|
||||
|
||||
//BuildKey builds the key
|
||||
func BuildResourceKey(kind, namespace, name string) string {
|
||||
resource := ResourceSpec{
|
||||
Kind: kind,
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
}
|
||||
return resource.ToKey()
|
||||
return rs.Kind + "." + rs.Name
|
||||
}
|
||||
|
|
|
@ -264,67 +264,6 @@ func (in *Mutation) DeepCopy() *Mutation {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamespacedPolicyViolation) DeepCopyInto(out *NamespacedPolicyViolation) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedPolicyViolation.
|
||||
func (in *NamespacedPolicyViolation) DeepCopy() *NamespacedPolicyViolation {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespacedPolicyViolation)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *NamespacedPolicyViolation) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *NamespacedPolicyViolationList) DeepCopyInto(out *NamespacedPolicyViolationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]NamespacedPolicyViolation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NamespacedPolicyViolationList.
|
||||
func (in *NamespacedPolicyViolationList) DeepCopy() *NamespacedPolicyViolationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(NamespacedPolicyViolationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *NamespacedPolicyViolationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Policy) DeepCopyInto(out *Policy) {
|
||||
*out = *in
|
||||
|
@ -386,6 +325,47 @@ func (in *PolicyViolation) DeepCopy() *PolicyViolation {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PolicyViolation) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PolicyViolationList) DeepCopyInto(out *PolicyViolationList) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
out.ListMeta = in.ListMeta
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]PolicyViolation, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyViolationList.
|
||||
func (in *PolicyViolationList) DeepCopy() *PolicyViolationList {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PolicyViolationList)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *PolicyViolationList) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PolicyViolationSpec) DeepCopyInto(out *PolicyViolationSpec) {
|
||||
*out = *in
|
||||
|
@ -425,6 +405,26 @@ func (in *PolicyViolationStatus) DeepCopy() *PolicyViolationStatus {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PolicyViolationTemplate) DeepCopyInto(out *PolicyViolationTemplate) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyViolationTemplate.
|
||||
func (in *PolicyViolationTemplate) DeepCopy() *PolicyViolationTemplate {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PolicyViolationTemplate)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
||||
*out = *in
|
||||
|
|
|
@ -12,7 +12,11 @@ import (
|
|||
)
|
||||
|
||||
//MaxRetryCount defines the max deadline count
|
||||
const MaxRetryCount int = 3
|
||||
const (
|
||||
MaxRetryCount int = 3
|
||||
DefaultDeadline time.Duration = 60 * time.Second
|
||||
DefaultResync time.Duration = 60 * time.Second
|
||||
)
|
||||
|
||||
// LastReqTime
|
||||
type LastReqTime struct {
|
||||
|
@ -54,13 +58,13 @@ func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolic
|
|||
}
|
||||
|
||||
//Run runs the checker and verify the resource update
|
||||
func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister,eventGen event.Interface, client *dclient.Client, defaultResync time.Duration, deadline time.Duration, stopCh <-chan struct{}) {
|
||||
glog.V(2).Infof("starting default resync for webhook checker with resync time %d", defaultResync)
|
||||
func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister, eventGen event.Interface, client *dclient.Client, defaultResync time.Duration, deadline time.Duration, stopCh <-chan struct{}) {
|
||||
glog.V(2).Infof("starting default resync for webhook checker with resync time %d nanoseconds", defaultResync)
|
||||
maxDeadline := deadline * time.Duration(MaxRetryCount)
|
||||
ticker := time.NewTicker(defaultResync)
|
||||
var statuscontrol StatusInterface
|
||||
/// interface to update and increment kyverno webhook status via annotations
|
||||
statuscontrol = NewVerifyControl(client,eventGen)
|
||||
statuscontrol = NewVerifyControl(client, eventGen)
|
||||
// send the initial update status
|
||||
if checkIfPolicyWithMutateAndGenerateExists(pLister) {
|
||||
if err := statuscontrol.SuccessStatus(); err != nil {
|
||||
|
|
|
@ -36,8 +36,8 @@ func (c *FakeKyvernoV1) ClusterPolicyViolations() v1.ClusterPolicyViolationInter
|
|||
return &FakeClusterPolicyViolations{c}
|
||||
}
|
||||
|
||||
func (c *FakeKyvernoV1) NamespacedPolicyViolations(namespace string) v1.NamespacedPolicyViolationInterface {
|
||||
return &FakeNamespacedPolicyViolations{c, namespace}
|
||||
func (c *FakeKyvernoV1) PolicyViolations(namespace string) v1.PolicyViolationInterface {
|
||||
return &FakePolicyViolations{c, namespace}
|
||||
}
|
||||
|
||||
// RESTClient returns a RESTClient that is used to communicate
|
||||
|
|
|
@ -1,140 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakeNamespacedPolicyViolations implements NamespacedPolicyViolationInterface
|
||||
type FakeNamespacedPolicyViolations struct {
|
||||
Fake *FakeKyvernoV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var namespacedpolicyviolationsResource = schema.GroupVersionResource{Group: "kyverno.io", Version: "v1", Resource: "namespacedpolicyviolations"}
|
||||
|
||||
var namespacedpolicyviolationsKind = schema.GroupVersionKind{Group: "kyverno.io", Version: "v1", Kind: "NamespacedPolicyViolation"}
|
||||
|
||||
// Get takes name of the namespacedPolicyViolation, and returns the corresponding namespacedPolicyViolation object, and an error if there is any.
|
||||
func (c *FakeNamespacedPolicyViolations) Get(name string, options v1.GetOptions) (result *kyvernov1.NamespacedPolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(namespacedpolicyviolationsResource, c.ns, name), &kyvernov1.NamespacedPolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.NamespacedPolicyViolation), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of NamespacedPolicyViolations that match those selectors.
|
||||
func (c *FakeNamespacedPolicyViolations) List(opts v1.ListOptions) (result *kyvernov1.NamespacedPolicyViolationList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(namespacedpolicyviolationsResource, namespacedpolicyviolationsKind, c.ns, opts), &kyvernov1.NamespacedPolicyViolationList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &kyvernov1.NamespacedPolicyViolationList{ListMeta: obj.(*kyvernov1.NamespacedPolicyViolationList).ListMeta}
|
||||
for _, item := range obj.(*kyvernov1.NamespacedPolicyViolationList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested namespacedPolicyViolations.
|
||||
func (c *FakeNamespacedPolicyViolations) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(namespacedpolicyviolationsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a namespacedPolicyViolation and creates it. Returns the server's representation of the namespacedPolicyViolation, and an error, if there is any.
|
||||
func (c *FakeNamespacedPolicyViolations) Create(namespacedPolicyViolation *kyvernov1.NamespacedPolicyViolation) (result *kyvernov1.NamespacedPolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(namespacedpolicyviolationsResource, c.ns, namespacedPolicyViolation), &kyvernov1.NamespacedPolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.NamespacedPolicyViolation), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a namespacedPolicyViolation and updates it. Returns the server's representation of the namespacedPolicyViolation, and an error, if there is any.
|
||||
func (c *FakeNamespacedPolicyViolations) Update(namespacedPolicyViolation *kyvernov1.NamespacedPolicyViolation) (result *kyvernov1.NamespacedPolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(namespacedpolicyviolationsResource, c.ns, namespacedPolicyViolation), &kyvernov1.NamespacedPolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.NamespacedPolicyViolation), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakeNamespacedPolicyViolations) UpdateStatus(namespacedPolicyViolation *kyvernov1.NamespacedPolicyViolation) (*kyvernov1.NamespacedPolicyViolation, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(namespacedpolicyviolationsResource, "status", c.ns, namespacedPolicyViolation), &kyvernov1.NamespacedPolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.NamespacedPolicyViolation), err
|
||||
}
|
||||
|
||||
// Delete takes name of the namespacedPolicyViolation and deletes it. Returns an error if one occurs.
|
||||
func (c *FakeNamespacedPolicyViolations) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(namespacedpolicyviolationsResource, c.ns, name), &kyvernov1.NamespacedPolicyViolation{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakeNamespacedPolicyViolations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(namespacedpolicyviolationsResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &kyvernov1.NamespacedPolicyViolationList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched namespacedPolicyViolation.
|
||||
func (c *FakeNamespacedPolicyViolations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *kyvernov1.NamespacedPolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(namespacedpolicyviolationsResource, c.ns, name, pt, data, subresources...), &kyvernov1.NamespacedPolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.NamespacedPolicyViolation), err
|
||||
}
|
|
@ -0,0 +1,140 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package fake
|
||||
|
||||
import (
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
schema "k8s.io/apimachinery/pkg/runtime/schema"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
testing "k8s.io/client-go/testing"
|
||||
)
|
||||
|
||||
// FakePolicyViolations implements PolicyViolationInterface
|
||||
type FakePolicyViolations struct {
|
||||
Fake *FakeKyvernoV1
|
||||
ns string
|
||||
}
|
||||
|
||||
var policyviolationsResource = schema.GroupVersionResource{Group: "kyverno.io", Version: "v1", Resource: "policyviolations"}
|
||||
|
||||
var policyviolationsKind = schema.GroupVersionKind{Group: "kyverno.io", Version: "v1", Kind: "PolicyViolation"}
|
||||
|
||||
// Get takes name of the policyViolation, and returns the corresponding policyViolation object, and an error if there is any.
|
||||
func (c *FakePolicyViolations) Get(name string, options v1.GetOptions) (result *kyvernov1.PolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewGetAction(policyviolationsResource, c.ns, name), &kyvernov1.PolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.PolicyViolation), err
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of PolicyViolations that match those selectors.
|
||||
func (c *FakePolicyViolations) List(opts v1.ListOptions) (result *kyvernov1.PolicyViolationList, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewListAction(policyviolationsResource, policyviolationsKind, c.ns, opts), &kyvernov1.PolicyViolationList{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
label, _, _ := testing.ExtractFromListOptions(opts)
|
||||
if label == nil {
|
||||
label = labels.Everything()
|
||||
}
|
||||
list := &kyvernov1.PolicyViolationList{ListMeta: obj.(*kyvernov1.PolicyViolationList).ListMeta}
|
||||
for _, item := range obj.(*kyvernov1.PolicyViolationList).Items {
|
||||
if label.Matches(labels.Set(item.Labels)) {
|
||||
list.Items = append(list.Items, item)
|
||||
}
|
||||
}
|
||||
return list, err
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested policyViolations.
|
||||
func (c *FakePolicyViolations) Watch(opts v1.ListOptions) (watch.Interface, error) {
|
||||
return c.Fake.
|
||||
InvokesWatch(testing.NewWatchAction(policyviolationsResource, c.ns, opts))
|
||||
|
||||
}
|
||||
|
||||
// Create takes the representation of a policyViolation and creates it. Returns the server's representation of the policyViolation, and an error, if there is any.
|
||||
func (c *FakePolicyViolations) Create(policyViolation *kyvernov1.PolicyViolation) (result *kyvernov1.PolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewCreateAction(policyviolationsResource, c.ns, policyViolation), &kyvernov1.PolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.PolicyViolation), err
|
||||
}
|
||||
|
||||
// Update takes the representation of a policyViolation and updates it. Returns the server's representation of the policyViolation, and an error, if there is any.
|
||||
func (c *FakePolicyViolations) Update(policyViolation *kyvernov1.PolicyViolation) (result *kyvernov1.PolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateAction(policyviolationsResource, c.ns, policyViolation), &kyvernov1.PolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.PolicyViolation), err
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
func (c *FakePolicyViolations) UpdateStatus(policyViolation *kyvernov1.PolicyViolation) (*kyvernov1.PolicyViolation, error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewUpdateSubresourceAction(policyviolationsResource, "status", c.ns, policyViolation), &kyvernov1.PolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.PolicyViolation), err
|
||||
}
|
||||
|
||||
// Delete takes name of the policyViolation and deletes it. Returns an error if one occurs.
|
||||
func (c *FakePolicyViolations) Delete(name string, options *v1.DeleteOptions) error {
|
||||
_, err := c.Fake.
|
||||
Invokes(testing.NewDeleteAction(policyviolationsResource, c.ns, name), &kyvernov1.PolicyViolation{})
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *FakePolicyViolations) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error {
|
||||
action := testing.NewDeleteCollectionAction(policyviolationsResource, c.ns, listOptions)
|
||||
|
||||
_, err := c.Fake.Invokes(action, &kyvernov1.PolicyViolationList{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched policyViolation.
|
||||
func (c *FakePolicyViolations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *kyvernov1.PolicyViolation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(policyviolationsResource, c.ns, name, pt, data, subresources...), &kyvernov1.PolicyViolation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj.(*kyvernov1.PolicyViolation), err
|
||||
}
|
|
@ -22,4 +22,4 @@ type ClusterPolicyExpansion interface{}
|
|||
|
||||
type ClusterPolicyViolationExpansion interface{}
|
||||
|
||||
type NamespacedPolicyViolationExpansion interface{}
|
||||
type PolicyViolationExpansion interface{}
|
||||
|
|
|
@ -29,7 +29,7 @@ type KyvernoV1Interface interface {
|
|||
RESTClient() rest.Interface
|
||||
ClusterPoliciesGetter
|
||||
ClusterPolicyViolationsGetter
|
||||
NamespacedPolicyViolationsGetter
|
||||
PolicyViolationsGetter
|
||||
}
|
||||
|
||||
// KyvernoV1Client is used to interact with features provided by the kyverno.io group.
|
||||
|
@ -45,8 +45,8 @@ func (c *KyvernoV1Client) ClusterPolicyViolations() ClusterPolicyViolationInterf
|
|||
return newClusterPolicyViolations(c)
|
||||
}
|
||||
|
||||
func (c *KyvernoV1Client) NamespacedPolicyViolations(namespace string) NamespacedPolicyViolationInterface {
|
||||
return newNamespacedPolicyViolations(c, namespace)
|
||||
func (c *KyvernoV1Client) PolicyViolations(namespace string) PolicyViolationInterface {
|
||||
return newPolicyViolations(c, namespace)
|
||||
}
|
||||
|
||||
// NewForConfig creates a new KyvernoV1Client for the given config.
|
||||
|
|
|
@ -1,191 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
scheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// NamespacedPolicyViolationsGetter has a method to return a NamespacedPolicyViolationInterface.
|
||||
// A group's client should implement this interface.
|
||||
type NamespacedPolicyViolationsGetter interface {
|
||||
NamespacedPolicyViolations(namespace string) NamespacedPolicyViolationInterface
|
||||
}
|
||||
|
||||
// NamespacedPolicyViolationInterface has methods to work with NamespacedPolicyViolation resources.
|
||||
type NamespacedPolicyViolationInterface interface {
|
||||
Create(*v1.NamespacedPolicyViolation) (*v1.NamespacedPolicyViolation, error)
|
||||
Update(*v1.NamespacedPolicyViolation) (*v1.NamespacedPolicyViolation, error)
|
||||
UpdateStatus(*v1.NamespacedPolicyViolation) (*v1.NamespacedPolicyViolation, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
|
||||
Get(name string, options metav1.GetOptions) (*v1.NamespacedPolicyViolation, error)
|
||||
List(opts metav1.ListOptions) (*v1.NamespacedPolicyViolationList, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NamespacedPolicyViolation, err error)
|
||||
NamespacedPolicyViolationExpansion
|
||||
}
|
||||
|
||||
// namespacedPolicyViolations implements NamespacedPolicyViolationInterface
|
||||
type namespacedPolicyViolations struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newNamespacedPolicyViolations returns a NamespacedPolicyViolations
|
||||
func newNamespacedPolicyViolations(c *KyvernoV1Client, namespace string) *namespacedPolicyViolations {
|
||||
return &namespacedPolicyViolations{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the namespacedPolicyViolation, and returns the corresponding namespacedPolicyViolation object, and an error if there is any.
|
||||
func (c *namespacedPolicyViolations) Get(name string, options metav1.GetOptions) (result *v1.NamespacedPolicyViolation, err error) {
|
||||
result = &v1.NamespacedPolicyViolation{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of NamespacedPolicyViolations that match those selectors.
|
||||
func (c *namespacedPolicyViolations) List(opts metav1.ListOptions) (result *v1.NamespacedPolicyViolationList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.NamespacedPolicyViolationList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested namespacedPolicyViolations.
|
||||
func (c *namespacedPolicyViolations) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a namespacedPolicyViolation and creates it. Returns the server's representation of the namespacedPolicyViolation, and an error, if there is any.
|
||||
func (c *namespacedPolicyViolations) Create(namespacedPolicyViolation *v1.NamespacedPolicyViolation) (result *v1.NamespacedPolicyViolation, err error) {
|
||||
result = &v1.NamespacedPolicyViolation{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
Body(namespacedPolicyViolation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a namespacedPolicyViolation and updates it. Returns the server's representation of the namespacedPolicyViolation, and an error, if there is any.
|
||||
func (c *namespacedPolicyViolations) Update(namespacedPolicyViolation *v1.NamespacedPolicyViolation) (result *v1.NamespacedPolicyViolation, err error) {
|
||||
result = &v1.NamespacedPolicyViolation{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
Name(namespacedPolicyViolation.Name).
|
||||
Body(namespacedPolicyViolation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *namespacedPolicyViolations) UpdateStatus(namespacedPolicyViolation *v1.NamespacedPolicyViolation) (result *v1.NamespacedPolicyViolation, err error) {
|
||||
result = &v1.NamespacedPolicyViolation{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
Name(namespacedPolicyViolation.Name).
|
||||
SubResource("status").
|
||||
Body(namespacedPolicyViolation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the namespacedPolicyViolation and deletes it. Returns an error if one occurs.
|
||||
func (c *namespacedPolicyViolations) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *namespacedPolicyViolations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched namespacedPolicyViolation.
|
||||
func (c *namespacedPolicyViolations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.NamespacedPolicyViolation, err error) {
|
||||
result = &v1.NamespacedPolicyViolation{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("namespacedpolicyviolations").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -0,0 +1,191 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by client-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
scheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
types "k8s.io/apimachinery/pkg/types"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// PolicyViolationsGetter has a method to return a PolicyViolationInterface.
|
||||
// A group's client should implement this interface.
|
||||
type PolicyViolationsGetter interface {
|
||||
PolicyViolations(namespace string) PolicyViolationInterface
|
||||
}
|
||||
|
||||
// PolicyViolationInterface has methods to work with PolicyViolation resources.
|
||||
type PolicyViolationInterface interface {
|
||||
Create(*v1.PolicyViolation) (*v1.PolicyViolation, error)
|
||||
Update(*v1.PolicyViolation) (*v1.PolicyViolation, error)
|
||||
UpdateStatus(*v1.PolicyViolation) (*v1.PolicyViolation, error)
|
||||
Delete(name string, options *metav1.DeleteOptions) error
|
||||
DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error
|
||||
Get(name string, options metav1.GetOptions) (*v1.PolicyViolation, error)
|
||||
List(opts metav1.ListOptions) (*v1.PolicyViolationList, error)
|
||||
Watch(opts metav1.ListOptions) (watch.Interface, error)
|
||||
Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PolicyViolation, err error)
|
||||
PolicyViolationExpansion
|
||||
}
|
||||
|
||||
// policyViolations implements PolicyViolationInterface
|
||||
type policyViolations struct {
|
||||
client rest.Interface
|
||||
ns string
|
||||
}
|
||||
|
||||
// newPolicyViolations returns a PolicyViolations
|
||||
func newPolicyViolations(c *KyvernoV1Client, namespace string) *policyViolations {
|
||||
return &policyViolations{
|
||||
client: c.RESTClient(),
|
||||
ns: namespace,
|
||||
}
|
||||
}
|
||||
|
||||
// Get takes name of the policyViolation, and returns the corresponding policyViolation object, and an error if there is any.
|
||||
func (c *policyViolations) Get(name string, options metav1.GetOptions) (result *v1.PolicyViolation, err error) {
|
||||
result = &v1.PolicyViolation{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
Name(name).
|
||||
VersionedParams(&options, scheme.ParameterCodec).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// List takes label and field selectors, and returns the list of PolicyViolations that match those selectors.
|
||||
func (c *policyViolations) List(opts metav1.ListOptions) (result *v1.PolicyViolationList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.PolicyViolationList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Watch returns a watch.Interface that watches the requested policyViolations.
|
||||
func (c *policyViolations) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
// Create takes the representation of a policyViolation and creates it. Returns the server's representation of the policyViolation, and an error, if there is any.
|
||||
func (c *policyViolations) Create(policyViolation *v1.PolicyViolation) (result *v1.PolicyViolation, err error) {
|
||||
result = &v1.PolicyViolation{}
|
||||
err = c.client.Post().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
Body(policyViolation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Update takes the representation of a policyViolation and updates it. Returns the server's representation of the policyViolation, and an error, if there is any.
|
||||
func (c *policyViolations) Update(policyViolation *v1.PolicyViolation) (result *v1.PolicyViolation, err error) {
|
||||
result = &v1.PolicyViolation{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
Name(policyViolation.Name).
|
||||
Body(policyViolation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// UpdateStatus was generated because the type contains a Status member.
|
||||
// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus().
|
||||
|
||||
func (c *policyViolations) UpdateStatus(policyViolation *v1.PolicyViolation) (result *v1.PolicyViolation, err error) {
|
||||
result = &v1.PolicyViolation{}
|
||||
err = c.client.Put().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
Name(policyViolation.Name).
|
||||
SubResource("status").
|
||||
Body(policyViolation).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
||||
|
||||
// Delete takes name of the policyViolation and deletes it. Returns an error if one occurs.
|
||||
func (c *policyViolations) Delete(name string, options *metav1.DeleteOptions) error {
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
Name(name).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *policyViolations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
}
|
||||
|
||||
// Patch applies the patch and returns the patched policyViolation.
|
||||
func (c *policyViolations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.PolicyViolation, err error) {
|
||||
result = &v1.PolicyViolation{}
|
||||
err = c.client.Patch(pt).
|
||||
Namespace(c.ns).
|
||||
Resource("policyviolations").
|
||||
SubResource(subresources...).
|
||||
Name(name).
|
||||
Body(data).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
}
|
|
@ -57,8 +57,8 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource
|
|||
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().ClusterPolicies().Informer()}, nil
|
||||
case v1.SchemeGroupVersion.WithResource("clusterpolicyviolations"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().ClusterPolicyViolations().Informer()}, nil
|
||||
case v1.SchemeGroupVersion.WithResource("namespacedpolicyviolations"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().NamespacedPolicyViolations().Informer()}, nil
|
||||
case v1.SchemeGroupVersion.WithResource("policyviolations"):
|
||||
return &genericInformer{resource: resource.GroupResource(), informer: f.Kyverno().V1().PolicyViolations().Informer()}, nil
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -28,8 +28,8 @@ type Interface interface {
|
|||
ClusterPolicies() ClusterPolicyInformer
|
||||
// ClusterPolicyViolations returns a ClusterPolicyViolationInformer.
|
||||
ClusterPolicyViolations() ClusterPolicyViolationInformer
|
||||
// NamespacedPolicyViolations returns a NamespacedPolicyViolationInformer.
|
||||
NamespacedPolicyViolations() NamespacedPolicyViolationInformer
|
||||
// PolicyViolations returns a PolicyViolationInformer.
|
||||
PolicyViolations() PolicyViolationInformer
|
||||
}
|
||||
|
||||
type version struct {
|
||||
|
@ -53,7 +53,7 @@ func (v *version) ClusterPolicyViolations() ClusterPolicyViolationInformer {
|
|||
return &clusterPolicyViolationInformer{factory: v.factory, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
||||
// NamespacedPolicyViolations returns a NamespacedPolicyViolationInformer.
|
||||
func (v *version) NamespacedPolicyViolations() NamespacedPolicyViolationInformer {
|
||||
return &namespacedPolicyViolationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
// PolicyViolations returns a PolicyViolationInformer.
|
||||
func (v *version) PolicyViolations() PolicyViolationInformer {
|
||||
return &policyViolationInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions}
|
||||
}
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
versioned "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NamespacedPolicyViolationInformer provides access to a shared informer and lister for
|
||||
// NamespacedPolicyViolations.
|
||||
type NamespacedPolicyViolationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.NamespacedPolicyViolationLister
|
||||
}
|
||||
|
||||
type namespacedPolicyViolationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewNamespacedPolicyViolationInformer constructs a new informer for NamespacedPolicyViolation type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewNamespacedPolicyViolationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredNamespacedPolicyViolationInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredNamespacedPolicyViolationInformer constructs a new informer for NamespacedPolicyViolation type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredNamespacedPolicyViolationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.KyvernoV1().NamespacedPolicyViolations(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.KyvernoV1().NamespacedPolicyViolations(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&kyvernov1.NamespacedPolicyViolation{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *namespacedPolicyViolationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredNamespacedPolicyViolationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *namespacedPolicyViolationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&kyvernov1.NamespacedPolicyViolation{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *namespacedPolicyViolationInformer) Lister() v1.NamespacedPolicyViolationLister {
|
||||
return v1.NewNamespacedPolicyViolationLister(f.Informer().GetIndexer())
|
||||
}
|
|
@ -0,0 +1,89 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by informer-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
time "time"
|
||||
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
versioned "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
internalinterfaces "github.com/nirmata/kyverno/pkg/client/informers/externalversions/internalinterfaces"
|
||||
v1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// PolicyViolationInformer provides access to a shared informer and lister for
|
||||
// PolicyViolations.
|
||||
type PolicyViolationInformer interface {
|
||||
Informer() cache.SharedIndexInformer
|
||||
Lister() v1.PolicyViolationLister
|
||||
}
|
||||
|
||||
type policyViolationInformer struct {
|
||||
factory internalinterfaces.SharedInformerFactory
|
||||
tweakListOptions internalinterfaces.TweakListOptionsFunc
|
||||
namespace string
|
||||
}
|
||||
|
||||
// NewPolicyViolationInformer constructs a new informer for PolicyViolation type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewPolicyViolationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer {
|
||||
return NewFilteredPolicyViolationInformer(client, namespace, resyncPeriod, indexers, nil)
|
||||
}
|
||||
|
||||
// NewFilteredPolicyViolationInformer constructs a new informer for PolicyViolation type.
|
||||
// Always prefer using an informer factory to get a shared informer instead of getting an independent
|
||||
// one. This reduces memory footprint and number of connections to the server.
|
||||
func NewFilteredPolicyViolationInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer {
|
||||
return cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: func(options metav1.ListOptions) (runtime.Object, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.KyvernoV1().PolicyViolations(namespace).List(options)
|
||||
},
|
||||
WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) {
|
||||
if tweakListOptions != nil {
|
||||
tweakListOptions(&options)
|
||||
}
|
||||
return client.KyvernoV1().PolicyViolations(namespace).Watch(options)
|
||||
},
|
||||
},
|
||||
&kyvernov1.PolicyViolation{},
|
||||
resyncPeriod,
|
||||
indexers,
|
||||
)
|
||||
}
|
||||
|
||||
func (f *policyViolationInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer {
|
||||
return NewFilteredPolicyViolationInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions)
|
||||
}
|
||||
|
||||
func (f *policyViolationInformer) Informer() cache.SharedIndexInformer {
|
||||
return f.factory.InformerFor(&kyvernov1.PolicyViolation{}, f.defaultInformer)
|
||||
}
|
||||
|
||||
func (f *policyViolationInformer) Lister() v1.PolicyViolationLister {
|
||||
return v1.NewPolicyViolationLister(f.Informer().GetIndexer())
|
||||
}
|
|
@ -30,7 +30,7 @@ import (
|
|||
// ClusterPolicyLister.
|
||||
type ClusterPolicyListerExpansion interface {
|
||||
GetPolicyForPolicyViolation(pv *kyvernov1.ClusterPolicyViolation) ([]*kyvernov1.ClusterPolicy, error)
|
||||
GetPolicyForNamespacedPolicyViolation(pv *kyvernov1.NamespacedPolicyViolation) ([]*kyvernov1.ClusterPolicy, error)
|
||||
GetPolicyForNamespacedPolicyViolation(pv *kyvernov1.PolicyViolation) ([]*kyvernov1.ClusterPolicy, error)
|
||||
ListResources(selector labels.Selector) (ret []*kyvernov1.ClusterPolicy, err error)
|
||||
}
|
||||
|
||||
|
@ -41,13 +41,13 @@ type ClusterPolicyViolationListerExpansion interface {
|
|||
ListResources(selector labels.Selector) (ret []*kyvernov1.ClusterPolicyViolation, err error)
|
||||
}
|
||||
|
||||
// NamespacedPolicyViolationListerExpansion allows custom methods to be added to
|
||||
// NamespacedPolicyViolationLister.
|
||||
type NamespacedPolicyViolationListerExpansion interface{}
|
||||
// PolicyViolationListerExpansion allows custom methods to be added to
|
||||
// PolicyViolationLister.
|
||||
type PolicyViolationListerExpansion interface{}
|
||||
|
||||
// NamespacedPolicyViolationNamespaceListerExpansion allows custom methods to be added to
|
||||
// NamespacedPolicyViolationNamespaceLister.
|
||||
type NamespacedPolicyViolationNamespaceListerExpansion interface{}
|
||||
// PolicyViolationNamespaceListerExpansion allows custom methods to be added to
|
||||
// PolicyViolationNamespaceLister.
|
||||
type PolicyViolationNamespaceListerExpansion interface{}
|
||||
|
||||
//ListResources is a wrapper to List and adds the resource kind information
|
||||
// as the lister is specific to a gvk we can harcode the values here
|
||||
|
@ -107,7 +107,7 @@ func (pl *clusterPolicyLister) GetPolicyForPolicyViolation(pv *kyvernov1.Cluster
|
|||
|
||||
}
|
||||
|
||||
func (pl *clusterPolicyLister) GetPolicyForNamespacedPolicyViolation(pv *kyvernov1.NamespacedPolicyViolation) ([]*kyvernov1.ClusterPolicy, error) {
|
||||
func (pl *clusterPolicyLister) GetPolicyForNamespacedPolicyViolation(pv *kyvernov1.PolicyViolation) ([]*kyvernov1.ClusterPolicy, error) {
|
||||
if len(pv.Labels) == 0 {
|
||||
return nil, fmt.Errorf("no Policy found for PolicyViolation %v because it has no labels", pv.Name)
|
||||
}
|
||||
|
|
|
@ -1,94 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NamespacedPolicyViolationLister helps list NamespacedPolicyViolations.
|
||||
type NamespacedPolicyViolationLister interface {
|
||||
// List lists all NamespacedPolicyViolations in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1.NamespacedPolicyViolation, err error)
|
||||
// NamespacedPolicyViolations returns an object that can list and get NamespacedPolicyViolations.
|
||||
NamespacedPolicyViolations(namespace string) NamespacedPolicyViolationNamespaceLister
|
||||
NamespacedPolicyViolationListerExpansion
|
||||
}
|
||||
|
||||
// namespacedPolicyViolationLister implements the NamespacedPolicyViolationLister interface.
|
||||
type namespacedPolicyViolationLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewNamespacedPolicyViolationLister returns a new NamespacedPolicyViolationLister.
|
||||
func NewNamespacedPolicyViolationLister(indexer cache.Indexer) NamespacedPolicyViolationLister {
|
||||
return &namespacedPolicyViolationLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all NamespacedPolicyViolations in the indexer.
|
||||
func (s *namespacedPolicyViolationLister) List(selector labels.Selector) (ret []*v1.NamespacedPolicyViolation, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.NamespacedPolicyViolation))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// NamespacedPolicyViolations returns an object that can list and get NamespacedPolicyViolations.
|
||||
func (s *namespacedPolicyViolationLister) NamespacedPolicyViolations(namespace string) NamespacedPolicyViolationNamespaceLister {
|
||||
return namespacedPolicyViolationNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// NamespacedPolicyViolationNamespaceLister helps list and get NamespacedPolicyViolations.
|
||||
type NamespacedPolicyViolationNamespaceLister interface {
|
||||
// List lists all NamespacedPolicyViolations in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v1.NamespacedPolicyViolation, err error)
|
||||
// Get retrieves the NamespacedPolicyViolation from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1.NamespacedPolicyViolation, error)
|
||||
NamespacedPolicyViolationNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// namespacedPolicyViolationNamespaceLister implements the NamespacedPolicyViolationNamespaceLister
|
||||
// interface.
|
||||
type namespacedPolicyViolationNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all NamespacedPolicyViolations in the indexer for a given namespace.
|
||||
func (s namespacedPolicyViolationNamespaceLister) List(selector labels.Selector) (ret []*v1.NamespacedPolicyViolation, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.NamespacedPolicyViolation))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the NamespacedPolicyViolation from the indexer for a given namespace and name.
|
||||
func (s namespacedPolicyViolationNamespaceLister) Get(name string) (*v1.NamespacedPolicyViolation, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1.Resource("namespacedpolicyviolation"), name)
|
||||
}
|
||||
return obj.(*v1.NamespacedPolicyViolation), nil
|
||||
}
|
94
pkg/client/listers/kyverno/v1/policyviolation.go
Normal file
94
pkg/client/listers/kyverno/v1/policyviolation.go
Normal file
|
@ -0,0 +1,94 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by lister-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// PolicyViolationLister helps list PolicyViolations.
|
||||
type PolicyViolationLister interface {
|
||||
// List lists all PolicyViolations in the indexer.
|
||||
List(selector labels.Selector) (ret []*v1.PolicyViolation, err error)
|
||||
// PolicyViolations returns an object that can list and get PolicyViolations.
|
||||
PolicyViolations(namespace string) PolicyViolationNamespaceLister
|
||||
PolicyViolationListerExpansion
|
||||
}
|
||||
|
||||
// policyViolationLister implements the PolicyViolationLister interface.
|
||||
type policyViolationLister struct {
|
||||
indexer cache.Indexer
|
||||
}
|
||||
|
||||
// NewPolicyViolationLister returns a new PolicyViolationLister.
|
||||
func NewPolicyViolationLister(indexer cache.Indexer) PolicyViolationLister {
|
||||
return &policyViolationLister{indexer: indexer}
|
||||
}
|
||||
|
||||
// List lists all PolicyViolations in the indexer.
|
||||
func (s *policyViolationLister) List(selector labels.Selector) (ret []*v1.PolicyViolation, err error) {
|
||||
err = cache.ListAll(s.indexer, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.PolicyViolation))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// PolicyViolations returns an object that can list and get PolicyViolations.
|
||||
func (s *policyViolationLister) PolicyViolations(namespace string) PolicyViolationNamespaceLister {
|
||||
return policyViolationNamespaceLister{indexer: s.indexer, namespace: namespace}
|
||||
}
|
||||
|
||||
// PolicyViolationNamespaceLister helps list and get PolicyViolations.
|
||||
type PolicyViolationNamespaceLister interface {
|
||||
// List lists all PolicyViolations in the indexer for a given namespace.
|
||||
List(selector labels.Selector) (ret []*v1.PolicyViolation, err error)
|
||||
// Get retrieves the PolicyViolation from the indexer for a given namespace and name.
|
||||
Get(name string) (*v1.PolicyViolation, error)
|
||||
PolicyViolationNamespaceListerExpansion
|
||||
}
|
||||
|
||||
// policyViolationNamespaceLister implements the PolicyViolationNamespaceLister
|
||||
// interface.
|
||||
type policyViolationNamespaceLister struct {
|
||||
indexer cache.Indexer
|
||||
namespace string
|
||||
}
|
||||
|
||||
// List lists all PolicyViolations in the indexer for a given namespace.
|
||||
func (s policyViolationNamespaceLister) List(selector labels.Selector) (ret []*v1.PolicyViolation, err error) {
|
||||
err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) {
|
||||
ret = append(ret, m.(*v1.PolicyViolation))
|
||||
})
|
||||
return ret, err
|
||||
}
|
||||
|
||||
// Get retrieves the PolicyViolation from the indexer for a given namespace and name.
|
||||
func (s policyViolationNamespaceLister) Get(name string) (*v1.PolicyViolation, error) {
|
||||
obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, errors.NewNotFound(v1.Resource("policyviolation"), name)
|
||||
}
|
||||
return obj.(*v1.PolicyViolation), nil
|
||||
}
|
|
@ -1,6 +1,12 @@
|
|||
package config
|
||||
|
||||
import "flag"
|
||||
import (
|
||||
"flag"
|
||||
|
||||
"github.com/golang/glog"
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
const (
|
||||
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
||||
|
@ -70,3 +76,13 @@ func LogDefaultFlags() {
|
|||
flag.Set("stderrthreshold", "WARNING")
|
||||
flag.Set("v", "2")
|
||||
}
|
||||
|
||||
//CreateClientConfig creates client config
|
||||
func CreateClientConfig(kubeconfig string) (*rest.Config, error) {
|
||||
if kubeconfig == "" {
|
||||
glog.Info("Using in-cluster configuration")
|
||||
return rest.InClusterConfig()
|
||||
}
|
||||
glog.Infof("Using configuration from '%s'", kubeconfig)
|
||||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ type ConfigData struct {
|
|||
// configuration data
|
||||
filters []k8Resource
|
||||
// hasynced
|
||||
cmListerSycned cache.InformerSynced
|
||||
cmSycned cache.InformerSynced
|
||||
}
|
||||
|
||||
// ToFilter checks if the given resource is set to be filtered in the configuration
|
||||
|
@ -57,9 +57,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
|
|||
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
|
||||
}
|
||||
cd := ConfigData{
|
||||
client: rclient,
|
||||
cmName: os.Getenv(cmNameEnv),
|
||||
cmListerSycned: cmInformer.Informer().HasSynced,
|
||||
client: rclient,
|
||||
cmName: os.Getenv(cmNameEnv),
|
||||
cmSycned: cmInformer.Informer().HasSynced,
|
||||
}
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
|
@ -76,12 +76,12 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
|
|||
return &cd
|
||||
}
|
||||
|
||||
func (cd *ConfigData) Run(stopCh <-chan struct{}) error {
|
||||
//Run checks syncing
|
||||
func (cd *ConfigData) Run(stopCh <-chan struct{}) {
|
||||
// wait for cache to populate first time
|
||||
if !cache.WaitForCacheSync(stopCh, cd.cmListerSycned) {
|
||||
return fmt.Errorf("Configuration: Failed to sync informer cache")
|
||||
if !cache.WaitForCacheSync(stopCh, cd.cmSycned) {
|
||||
glog.Error("configuration: failed to sync informer cache")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cd *ConfigData) addCM(obj interface{}) {
|
||||
|
|
|
@ -15,6 +15,31 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
|
||||
// Created pair is stored in cluster's secret.
|
||||
// Returns struct with key/certificate pair.
|
||||
func (c *Client) InitTLSPemPair(configuration *rest.Config) (*tls.TlsPemPair, error) {
|
||||
certProps, err := c.GetTLSCertProps(configuration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tlsPair := c.ReadTlsPair(certProps)
|
||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
glog.Info("Generating new key/certificate pair for TLS")
|
||||
tlsPair, err = c.GenerateTlsPemPair(certProps)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = c.WriteTlsPair(certProps, tlsPair); err != nil {
|
||||
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
|
||||
}
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
glog.Infoln("Using existing TLS key/certificate pair")
|
||||
return tlsPair, nil
|
||||
}
|
||||
|
||||
//GenerateTlsPemPair Issues TLS certificate for webhook server using given PEM private key
|
||||
// Returns signed and approved TLS certificate in PEM format
|
||||
func (c *Client) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
||||
|
@ -57,7 +82,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat
|
|||
|
||||
for _, csr := range csrList.Items {
|
||||
if csr.GetName() == req.ObjectMeta.Name {
|
||||
err := c.DeleteResouce(CSRs, "", csr.GetName(), false)
|
||||
err := c.DeleteResource(CSRs, "", csr.GetName(), false)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Unable to delete existing certificate request: %v", err)
|
||||
}
|
||||
|
|
|
@ -21,7 +21,6 @@ import (
|
|||
"k8s.io/client-go/discovery/cached/memory"
|
||||
"k8s.io/client-go/dynamic"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
csrtype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
|
||||
event "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
|
@ -52,8 +51,6 @@ func NewClient(config *rest.Config) (*Client, error) {
|
|||
kclient: kclient,
|
||||
}
|
||||
// Set discovery client
|
||||
//
|
||||
|
||||
discoveryClient := ServerPreferredResources{memory.NewMemCacheClient(kclient.Discovery())}
|
||||
client.SetDiscovery(discoveryClient)
|
||||
return &client, nil
|
||||
|
@ -72,10 +69,6 @@ func (c *Client) GetKubePolicyDeployment() (*apps.Deployment, error) {
|
|||
return &deploy, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetAppsV1Interface() appsv1.AppsV1Interface {
|
||||
return c.kclient.AppsV1()
|
||||
}
|
||||
|
||||
//GetEventsInterface provides typed interface for events
|
||||
//TODO: can we use dynamic client to fetch the typed interface
|
||||
// or generate a kube client value to access the interface
|
||||
|
@ -115,7 +108,7 @@ func (c *Client) GetResource(kind string, namespace string, name string, subreso
|
|||
return c.getResourceInterface(kind, namespace).Get(name, meta.GetOptions{}, subresources...)
|
||||
}
|
||||
|
||||
//Patch
|
||||
//PatchResource patches the resource
|
||||
func (c *Client) PatchResource(kind string, namespace string, name string, patch []byte) (*unstructured.Unstructured, error) {
|
||||
return c.getResourceInterface(kind, namespace).Patch(name, patchTypes.JSONPatchType, patch, meta.PatchOptions{})
|
||||
}
|
||||
|
@ -130,8 +123,8 @@ func (c *Client) ListResource(kind string, namespace string, lselector *meta.Lab
|
|||
return c.getResourceInterface(kind, namespace).List(options)
|
||||
}
|
||||
|
||||
// DeleteResouce deletes the specified resource
|
||||
func (c *Client) DeleteResouce(kind string, namespace string, name string, dryRun bool) error {
|
||||
// DeleteResource deletes the specified resource
|
||||
func (c *Client) DeleteResource(kind string, namespace string, name string, dryRun bool) error {
|
||||
options := meta.DeleteOptions{}
|
||||
if dryRun {
|
||||
options = meta.DeleteOptions{DryRun: []string{meta.DryRunAll}}
|
||||
|
|
|
@ -74,7 +74,7 @@ func TestCRUDResource(t *testing.T) {
|
|||
t.Errorf("ListResource not working: %s", err)
|
||||
}
|
||||
// DeleteResouce
|
||||
err = f.client.DeleteResouce("thekind", "ns-foo", "name-bar", false)
|
||||
err = f.client.DeleteResource("thekind", "ns-foo", "name-bar", false)
|
||||
if err != nil {
|
||||
t.Errorf("DeleteResouce not working: %s", err)
|
||||
}
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
kyvernov "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
)
|
||||
|
||||
//CreatePolicyViolation create a Policy Violation resource
|
||||
func (c *Client) CreatePolicyViolation(pv kyvernov.ClusterPolicyViolation) error {
|
||||
_, err := c.CreateResource("PolicyViolation", ",", pv, false)
|
||||
return err
|
||||
}
|
|
@ -35,15 +35,14 @@ func processOverlay(rule kyverno.Rule, resource unstructured.Unstructured) (resp
|
|||
// condition key is not present in the resource, don't apply this rule
|
||||
// consider as success
|
||||
case conditionNotPresent:
|
||||
glog.V(3).Infof("Resource %s/%s/%s: %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg())
|
||||
glog.V(3).Infof("Skip applying rule '%s' on resource '%s/%s/%s': %s", rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg())
|
||||
response.Success = true
|
||||
return response, resource
|
||||
// conditions are not met, don't apply this rule
|
||||
// consider as failure
|
||||
case conditionFailure:
|
||||
glog.Errorf("Resource %s/%s/%s does not meet the conditions in the rule %s with overlay pattern %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), rule.Name, rule.Mutation.Overlay)
|
||||
glog.V(3).Infof("Skip applying rule '%s' on resource '%s/%s/%s': %s", rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg())
|
||||
//TODO: send zero response and not consider this as applied?
|
||||
response.Success = false
|
||||
response.Success = true
|
||||
response.Message = overlayerr.ErrorMsg()
|
||||
return response, resource
|
||||
// rule application failed
|
||||
|
@ -106,12 +105,12 @@ func processOverlayPatches(resource, overlay interface{}) ([][]byte, overlayErro
|
|||
// anchor key does not exist in the resource, skip applying policy
|
||||
case conditionNotPresent:
|
||||
glog.V(4).Infof("Mutate rule: skip applying policy: %v at %s", overlayerr, path)
|
||||
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("policy not applied: %v at %s", overlayerr.ErrorMsg(), path))
|
||||
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("Policy not applied, condition tag not present: %v at %s", overlayerr.ErrorMsg(), path))
|
||||
// anchor key is not satisfied in the resource, skip applying policy
|
||||
case conditionFailure:
|
||||
// anchor key is not satisfied in the resource, skip applying policy
|
||||
glog.V(4).Infof("Mutate rule: failed to validate condition at %s, err: %v", path, overlayerr)
|
||||
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("Conditions are not met at %s, %v", path, overlayerr))
|
||||
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("Policy not applied, conditions are not met at %s, %v", path, overlayerr))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -110,8 +110,8 @@ func validateConditionAnchorMap(resourceMap, anchors map[string]interface{}, pat
|
|||
// resource - A: B2
|
||||
func compareOverlay(resource, overlay interface{}, path string) (string, overlayError) {
|
||||
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
|
||||
glog.V(4).Infof("Found anchor on different types of element: overlay %T, resource %T\nSkip processing overlay.", overlay, resource)
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("Found anchor on different types of element: overlay %T, resource %T\nSkip processing overlay.", overlay, resource))
|
||||
glog.V(4).Infof("Found anchor on different types of element: overlay %T, resource %T", overlay, resource)
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("Found anchor on different types of element: overlay %T, resource %T", overlay, resource))
|
||||
}
|
||||
|
||||
switch typedOverlay := overlay.(type) {
|
||||
|
@ -122,7 +122,7 @@ func compareOverlay(resource, overlay interface{}, path string) (string, overlay
|
|||
curPath := path + noAnchorKey + "/"
|
||||
resourceVal, ok := typedResource[noAnchorKey]
|
||||
if !ok {
|
||||
return curPath, newOverlayError(conditionFailure, fmt.Sprintf("field %s is not present", noAnchorKey))
|
||||
return curPath, newOverlayError(conditionFailure, fmt.Sprintf("Field %s is not present", noAnchorKey))
|
||||
}
|
||||
if newPath, err := compareOverlay(resourceVal, overlayVal, curPath); !reflect.DeepEqual(err, overlayError{}) {
|
||||
return newPath, err
|
||||
|
@ -140,10 +140,10 @@ func compareOverlay(resource, overlay interface{}, path string) (string, overlay
|
|||
case string, float64, int, int64, bool, nil:
|
||||
if !ValidateValueWithPattern(resource, overlay) {
|
||||
glog.V(4).Infof("Mutate rule: failed validating value %v with overlay %v", resource, overlay)
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("failed validating value %v with overlay %v", resource, overlay))
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("Failed validating value %v with overlay %v", resource, overlay))
|
||||
}
|
||||
default:
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("overlay has unknown type %T, value %v", overlay, overlay))
|
||||
return path, newOverlayError(conditionFailure, fmt.Sprintf("Overlay has unknown type %T, value %v", overlay, overlay))
|
||||
}
|
||||
|
||||
return "", overlayError{}
|
||||
|
|
|
@ -194,7 +194,7 @@ func TestMeetConditions_anchosInSameObject(t *testing.T) {
|
|||
json.Unmarshal(overlayRaw, &overlay)
|
||||
|
||||
_, err := meetConditions(resource, overlay)
|
||||
assert.Error(t, err, "[overlayError:0] failed validating value 443 with overlay 444")
|
||||
assert.Error(t, err, "[overlayError:0] Failed validating value 443 with overlay 444")
|
||||
}
|
||||
|
||||
func TestMeetConditions_anchorOnPeer(t *testing.T) {
|
||||
|
@ -444,7 +444,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
|
|||
json.Unmarshal(overlayRaw, &overlay)
|
||||
|
||||
_, err := meetConditions(resource, overlay)
|
||||
assert.Error(t, err, "[overlayError:0] failed validating value true with overlay false")
|
||||
assert.Error(t, err, "[overlayError:0] Failed validating value true with overlay false")
|
||||
|
||||
overlayRaw = []byte(`{
|
||||
"spec": {
|
||||
|
@ -594,7 +594,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
|
|||
json.Unmarshal(overlayRaw, &overlay)
|
||||
|
||||
_, err = meetConditions(resource, overlay)
|
||||
assert.Error(t, err, "[overlayError:0] failed validating value ENV_VALUE with overlay ENV_VALUE1")
|
||||
assert.Error(t, err, "[overlayError:0] Failed validating value ENV_VALUE with overlay ENV_VALUE1")
|
||||
}
|
||||
|
||||
func TestMeetConditions_AtleastOneExist(t *testing.T) {
|
||||
|
|
|
@ -494,7 +494,7 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
|
|||
json.Unmarshal(overlayRaw, &overlay)
|
||||
|
||||
patches, err = processOverlayPatches(resource, overlay)
|
||||
assert.Error(t, err, "[overlayError:0] Conditions are not met at /spec/template/metadata/labels/app/, [overlayError:0] failed validating value nginx with overlay nginx1")
|
||||
assert.Error(t, err, "[overlayError:0] Policy not applied, conditions are not met at /spec/template/metadata/labels/app/, [overlayError:0] Failed validating value nginx with overlay nginx1")
|
||||
assert.Assert(t, len(patches) == 0)
|
||||
}
|
||||
|
||||
|
@ -807,7 +807,7 @@ func TestProcessOverlayPatches_anchorOnPeer(t *testing.T) {
|
|||
json.Unmarshal(overlayRaw, &overlay)
|
||||
|
||||
patches, err = processOverlayPatches(resource, overlay)
|
||||
assert.Error(t, err, "[overlayError:0] Conditions are not met at /subsets/0/ports/0/port/, [overlayError:0] failed validating value 443 with overlay 444")
|
||||
assert.Error(t, err, "[overlayError:0] Policy not applied, conditions are not met at /subsets/0/ports/0/port/, [overlayError:0] Failed validating value 443 with overlay 444")
|
||||
assert.Assert(t, len(patches) == 0)
|
||||
}
|
||||
|
||||
|
|
|
@ -60,7 +60,7 @@ func ValidateValueWithPattern(value, pattern interface{}) bool {
|
|||
glog.Warning("Arrays as patterns are not supported")
|
||||
return false
|
||||
default:
|
||||
glog.Warningf("Unknown type as pattern: %T\n", pattern)
|
||||
glog.Warningf("Unknown type as pattern: %v", typedPattern)
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,9 +6,11 @@ import (
|
|||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/engine/anchor"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -60,6 +62,11 @@ func Validate(p kyverno.ClusterPolicy) error {
|
|||
}
|
||||
|
||||
func validateResources(rule kyverno.Rule) (string, error) {
|
||||
// validate userInfo in match and exclude
|
||||
if path, err := validateUserInfo(rule); err != nil {
|
||||
return fmt.Sprintf("resources.%s", path), err
|
||||
}
|
||||
|
||||
// matched resources
|
||||
if path, err := validateMatchedResourceDescription(rule.MatchResources.ResourceDescription); err != nil {
|
||||
return fmt.Sprintf("resources.%s", path), err
|
||||
|
@ -127,6 +134,57 @@ func validateMatchedResourceDescription(rd kyverno.ResourceDescription) (string,
|
|||
return "", nil
|
||||
}
|
||||
|
||||
func validateUserInfo(rule kyverno.Rule) (string, error) {
|
||||
if err := validateRoles(rule.MatchResources.Roles); err != nil {
|
||||
return "match.roles", err
|
||||
}
|
||||
|
||||
if err := validateSubjects(rule.MatchResources.Subjects); err != nil {
|
||||
return "match.subjects", err
|
||||
}
|
||||
|
||||
if err := validateRoles(rule.ExcludeResources.Roles); err != nil {
|
||||
return "exclude.roles", err
|
||||
}
|
||||
|
||||
if err := validateSubjects(rule.ExcludeResources.Subjects); err != nil {
|
||||
return "exclude.subjects", err
|
||||
}
|
||||
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// a role must in format namespace:name
|
||||
func validateRoles(roles []string) error {
|
||||
if len(roles) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, r := range roles {
|
||||
role := strings.Split(r, ":")
|
||||
if len(role) != 2 {
|
||||
return fmt.Errorf("invalid role %s, expect namespace:name", r)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// a namespace should be set in kind ServiceAccount of a subject
|
||||
func validateSubjects(subjects []rbacv1.Subject) error {
|
||||
if len(subjects) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, subject := range subjects {
|
||||
if subject.Kind == "ServiceAccount" {
|
||||
if subject.Namespace == "" {
|
||||
return fmt.Errorf("service account %s in subject expects a namespace", subject.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateExcludeResourceDescription(rd kyverno.ResourceDescription) (string, error) {
|
||||
if reflect.DeepEqual(rd, kyverno.ResourceDescription{}) {
|
||||
// exclude is not mandatory
|
||||
|
|
|
@ -1120,3 +1120,63 @@ func Test_Validate_ErrorFormat(t *testing.T) {
|
|||
err = Validate(policy)
|
||||
assert.Assert(t, err != nil)
|
||||
}
|
||||
|
||||
func Test_Validate_EmptyUserInfo(t *testing.T) {
|
||||
rawRule := []byte(`
|
||||
{
|
||||
"name": "test",
|
||||
"match": {
|
||||
"subjects": null
|
||||
}
|
||||
}`)
|
||||
|
||||
var rule kyverno.Rule
|
||||
err := json.Unmarshal(rawRule, &rule)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, errNew := validateUserInfo(rule)
|
||||
assert.NilError(t, errNew)
|
||||
}
|
||||
|
||||
func Test_Validate_Roles(t *testing.T) {
|
||||
rawRule := []byte(`{
|
||||
"name": "test",
|
||||
"match": {
|
||||
"roles": [
|
||||
"namespace1:name1",
|
||||
"name2"
|
||||
]
|
||||
}
|
||||
}`)
|
||||
|
||||
var rule kyverno.Rule
|
||||
err := json.Unmarshal(rawRule, &rule)
|
||||
assert.NilError(t, err)
|
||||
|
||||
path, err := validateUserInfo(rule)
|
||||
assert.Assert(t, err != nil)
|
||||
assert.Assert(t, path == "match.roles")
|
||||
}
|
||||
|
||||
func Test_Validate_ServiceAccount(t *testing.T) {
|
||||
rawRule := []byte(`
|
||||
{
|
||||
"name": "test",
|
||||
"exclude": {
|
||||
"subjects": [
|
||||
{
|
||||
"kind": "ServiceAccount",
|
||||
"name": "testname"
|
||||
}
|
||||
]
|
||||
}
|
||||
}`)
|
||||
|
||||
var rule kyverno.Rule
|
||||
err := json.Unmarshal(rawRule, &rule)
|
||||
assert.NilError(t, err)
|
||||
|
||||
path, err := validateUserInfo(rule)
|
||||
assert.Assert(t, err != nil)
|
||||
assert.Assert(t, path == "exclude.subjects")
|
||||
}
|
||||
|
|
|
@ -172,7 +172,7 @@ func validatePatterns(resource unstructured.Unstructured, rule kyverno.Rule) (re
|
|||
// rule application failed
|
||||
glog.V(4).Infof("Validation rule '%s' failed at '%s' for resource %s/%s/%s. %s: %v", rule.Name, path, resource.GetKind(), resource.GetNamespace(), resource.GetName(), rule.Validation.Message, err)
|
||||
response.Success = false
|
||||
response.Message = fmt.Sprintf("Validation error: %s\nValidation rule '%s' failed at path '%s'.",
|
||||
response.Message = fmt.Sprintf("Validation error: %s; Validation rule '%s' failed at path '%s'",
|
||||
rule.Validation.Message, rule.Name, path)
|
||||
return response
|
||||
}
|
||||
|
@ -197,7 +197,7 @@ func validatePatterns(resource unstructured.Unstructured, rule kyverno.Rule) (re
|
|||
return response
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(4).Infof("Validation error: %s\nValidation rule %s anyPattern[%d] failed at path %s for %s/%s/%s",
|
||||
glog.V(4).Infof("Validation error: %s; Validation rule %s anyPattern[%d] failed at path %s for %s/%s/%s",
|
||||
rule.Validation.Message, rule.Name, index, path, resource.GetKind(), resource.GetNamespace(), resource.GetName())
|
||||
errs = append(errs, err)
|
||||
failedPaths = append(failedPaths, path)
|
||||
|
@ -213,7 +213,7 @@ func validatePatterns(resource unstructured.Unstructured, rule kyverno.Rule) (re
|
|||
str := fmt.Sprintf("Validation rule %s anyPattern[%d] failed at path %s.", rule.Name, index, failedPaths[index])
|
||||
errorStr = append(errorStr, str)
|
||||
}
|
||||
response.Message = fmt.Sprintf("Validation error: %s\n%s", rule.Validation.Message, strings.Join(errorStr, "\n"))
|
||||
response.Message = fmt.Sprintf("Validation error: %s; %s", rule.Validation.Message, strings.Join(errorStr, ";"))
|
||||
|
||||
return response
|
||||
}
|
||||
|
|
|
@ -1819,7 +1819,7 @@ func TestValidate_image_tag_fail(t *testing.T) {
|
|||
assert.NilError(t, err)
|
||||
msgs := []string{
|
||||
"Validation rule 'validate-tag' succeeded.",
|
||||
"Validation error: imagePullPolicy 'Always' required with tag 'latest'\nValidation rule 'validate-latest' failed at path '/spec/containers/0/imagePullPolicy/'.",
|
||||
"Validation error: imagePullPolicy 'Always' required with tag 'latest'; Validation rule 'validate-latest' failed at path '/spec/containers/0/imagePullPolicy/'",
|
||||
}
|
||||
er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
|
@ -1993,7 +1993,7 @@ func TestValidate_Fail_anyPattern(t *testing.T) {
|
|||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
|
||||
msgs := []string{"Validation error: A namespace is required\nValidation rule check-default-namespace anyPattern[0] failed at path /metadata/namespace/.\nValidation rule check-default-namespace anyPattern[1] failed at path /metadata/namespace/."}
|
||||
msgs := []string{"Validation error: A namespace is required; Validation rule check-default-namespace anyPattern[0] failed at path /metadata/namespace/.;Validation rule check-default-namespace anyPattern[1] failed at path /metadata/namespace/."}
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
}
|
||||
|
@ -2074,7 +2074,7 @@ func TestValidate_host_network_port(t *testing.T) {
|
|||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
|
||||
msgs := []string{"Validation error: Host network and port are not allowed\nValidation rule 'validate-host-network-port' failed at path '/spec/containers/0/ports/0/hostPort/'."}
|
||||
msgs := []string{"Validation error: Host network and port are not allowed; Validation rule 'validate-host-network-port' failed at path '/spec/containers/0/ports/0/hostPort/'"}
|
||||
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
|
@ -2251,7 +2251,7 @@ func TestValidate_anchor_arraymap_fail(t *testing.T) {
|
|||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
|
||||
msgs := []string{"Validation error: Host path '/var/lib/' is not allowed\nValidation rule 'validate-host-path' failed at path '/spec/volumes/0/hostPath/path/'."}
|
||||
msgs := []string{"Validation error: Host path '/var/lib/' is not allowed; Validation rule 'validate-host-path' failed at path '/spec/volumes/0/hostPath/path/'"}
|
||||
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
|
@ -2464,7 +2464,7 @@ func TestValidate_anchor_map_found_invalid(t *testing.T) {
|
|||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
|
||||
msgs := []string{"Validation error: pod: validate run as non root user\nValidation rule 'pod rule 2' failed at path '/spec/securityContext/runAsNonRoot/'."}
|
||||
msgs := []string{"Validation error: pod: validate run as non root user; Validation rule 'pod rule 2' failed at path '/spec/securityContext/runAsNonRoot/'"}
|
||||
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
|
@ -2848,7 +2848,7 @@ func TestValidate_negationAnchor_deny(t *testing.T) {
|
|||
resourceUnstructured, err := ConvertToUnstructured(rawResource)
|
||||
assert.NilError(t, err)
|
||||
er := Validate(PolicyContext{Policy: policy, NewResource: *resourceUnstructured})
|
||||
msgs := []string{"Validation error: Host path is not allowed\nValidation rule 'validate-host-path' failed at path '/spec/volumes/0/hostPath/'."}
|
||||
msgs := []string{"Validation error: Host path is not allowed; Validation rule 'validate-host-path' failed at path '/spec/volumes/0/hostPath/'"}
|
||||
|
||||
for index, r := range er.PolicyResponse.Rules {
|
||||
assert.Equal(t, r.Message, msgs[index])
|
||||
|
|
|
@ -14,14 +14,18 @@ import (
|
|||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
//Generator generate events
|
||||
type Generator struct {
|
||||
client *client.Client
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
client *client.Client
|
||||
// list/get cluster policy
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// returns true if the cluster policy store has been synced at least once
|
||||
pSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
recorder record.EventRecorder
|
||||
}
|
||||
|
@ -38,6 +42,7 @@ func NewEventGenerator(client *client.Client, pInformer kyvernoinformer.ClusterP
|
|||
client: client,
|
||||
pLister: pInformer.Lister(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
|
||||
pSynced: pInformer.Informer().HasSynced,
|
||||
recorder: initRecorder(client),
|
||||
}
|
||||
|
||||
|
@ -86,6 +91,10 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Starting event generator")
|
||||
defer glog.Info("Shutting down event generator")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, gen.pSynced) {
|
||||
glog.Error("event generator: failed to sync informer cache")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(gen.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
|
|
@ -39,16 +39,12 @@ type NamespaceController struct {
|
|||
|
||||
//nsLister provides expansion to the namespace lister to inject GVK for the resource
|
||||
nsLister NamespaceListerExpansion
|
||||
// nLsister can list/get namespaces from the shared informer's store
|
||||
// nsLister v1CoreLister.NamespaceLister
|
||||
// nsListerSynced returns true if the Namespace store has been synced at least once
|
||||
nsListerSynced cache.InformerSynced
|
||||
// nsSynced returns true if the Namespace store has been synced at least once
|
||||
nsSynced cache.InformerSynced
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// pvListerSynced retrns true if the Policy store has been synced at least once
|
||||
pvListerSynced cache.InformerSynced
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// pSynced retrns true if the Policy store has been synced at least once
|
||||
pSynced cache.InformerSynced
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
// eventGen provides interface to generate evenets
|
||||
|
@ -70,7 +66,6 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
|||
client *client.Client,
|
||||
nsInformer v1Informer.NamespaceInformer,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
policyStatus policy.PolicyStatusInterface,
|
||||
eventGen event.Interface,
|
||||
configHandler config.Interface,
|
||||
|
@ -103,10 +98,9 @@ func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
|
|||
nsc.syncHandler = nsc.syncNamespace
|
||||
|
||||
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
|
||||
nsc.nsListerSynced = nsInformer.Informer().HasSynced
|
||||
nsc.nsSynced = nsInformer.Informer().HasSynced
|
||||
nsc.pLister = pInformer.Lister()
|
||||
nsc.pvListerSynced = pInformer.Informer().HasSynced
|
||||
nsc.pvLister = pvInformer.Lister()
|
||||
nsc.pSynced = pInformer.Informer().HasSynced
|
||||
nsc.policyStatus = policyStatus
|
||||
|
||||
// resource manager
|
||||
|
@ -174,7 +168,8 @@ func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Starting namespace controller")
|
||||
defer glog.Info("Shutting down namespace controller")
|
||||
|
||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsListerSynced); !ok {
|
||||
if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
|
||||
glog.Error("namespace generator: failed to sync cache")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ func (pc *PolicyController) cleanUpPolicyViolation(pResponse engine.PolicyRespon
|
|||
|
||||
// there can be multiple violations as a resource can have multiple owners
|
||||
if pResponse.Resource.Namespace == "" {
|
||||
pvs, err := getClusterPVs(pc.pvLister, pc.client, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Name)
|
||||
pvs, err := getClusterPVs(pc.cpvLister, pc.client, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to cleanUp violations: %v", err)
|
||||
return
|
||||
|
@ -32,7 +32,7 @@ func (pc *PolicyController) cleanUpPolicyViolation(pResponse engine.PolicyRespon
|
|||
continue
|
||||
}
|
||||
glog.V(4).Infof("cleanup cluster violation %s on %s", pv.Name, pv.Spec.ResourceSpec.ToKey())
|
||||
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {
|
||||
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
|
||||
glog.Errorf("failed to delete cluster policy violation %s on %s: %v", pv.Name, pv.Spec.ResourceSpec.ToKey(), err)
|
||||
continue
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (pc *PolicyController) cleanUpPolicyViolation(pResponse engine.PolicyRespon
|
|||
}
|
||||
|
||||
for _, pv := range nspvs {
|
||||
if reflect.DeepEqual(pv, kyverno.NamespacedPolicyViolation{}) {
|
||||
if reflect.DeepEqual(pv, kyverno.PolicyViolation{}) {
|
||||
continue
|
||||
}
|
||||
glog.V(4).Infof("cleanup namespaced violation %s on %s", pv.Name, pv.Spec.ResourceSpec.ToKey())
|
||||
|
@ -127,8 +127,8 @@ func getClusterPVonOwnerRef(pvLister kyvernolister.ClusterPolicyViolationLister,
|
|||
return pvs, nil
|
||||
}
|
||||
|
||||
func getNamespacedPVs(nspvLister kyvernolister.NamespacedPolicyViolationLister, client *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.NamespacedPolicyViolation, error) {
|
||||
var pvs []kyverno.NamespacedPolicyViolation
|
||||
func getNamespacedPVs(nspvLister kyvernolister.PolicyViolationLister, client *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.PolicyViolation, error) {
|
||||
var pvs []kyverno.PolicyViolation
|
||||
var err error
|
||||
pv, err := getNamespacedPVOnResource(nspvLister, policyName, kind, namespace, name)
|
||||
if err != nil {
|
||||
|
@ -136,7 +136,7 @@ func getNamespacedPVs(nspvLister kyvernolister.NamespacedPolicyViolationLister,
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(pv, kyverno.NamespacedPolicyViolation{}) {
|
||||
if !reflect.DeepEqual(pv, kyverno.PolicyViolation{}) {
|
||||
// found a violation on resource
|
||||
pvs = append(pvs, pv)
|
||||
return pvs, nil
|
||||
|
@ -151,27 +151,26 @@ func getNamespacedPVs(nspvLister kyvernolister.NamespacedPolicyViolationLister,
|
|||
return pvs, nil
|
||||
}
|
||||
|
||||
func getNamespacedPVOnResource(nspvLister kyvernolister.NamespacedPolicyViolationLister, policyName, kind, namespace, name string) (kyverno.NamespacedPolicyViolation, error) {
|
||||
nspvs, err := nspvLister.List(labels.Everything())
|
||||
func getNamespacedPVOnResource(nspvLister kyvernolister.PolicyViolationLister, policyName, kind, namespace, name string) (kyverno.PolicyViolation, error) {
|
||||
nspvs, err := nspvLister.PolicyViolations(namespace).List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.V(2).Infof("failed to list namespaced pv: %v", err)
|
||||
return kyverno.NamespacedPolicyViolation{}, fmt.Errorf("failed to list namespaced pv: %v", err)
|
||||
return kyverno.PolicyViolation{}, fmt.Errorf("failed to list namespaced pv: %v", err)
|
||||
}
|
||||
|
||||
for _, nspv := range nspvs {
|
||||
// find a policy on same resource and policy combination
|
||||
if nspv.Spec.Policy == policyName &&
|
||||
nspv.Spec.ResourceSpec.Kind == kind &&
|
||||
nspv.Spec.ResourceSpec.Namespace == namespace &&
|
||||
nspv.Spec.ResourceSpec.Name == name {
|
||||
return *nspv, nil
|
||||
}
|
||||
}
|
||||
return kyverno.NamespacedPolicyViolation{}, nil
|
||||
return kyverno.PolicyViolation{}, nil
|
||||
}
|
||||
|
||||
func getNamespacedPVonOwnerRef(nspvLister kyvernolister.NamespacedPolicyViolationLister, dclient *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.NamespacedPolicyViolation, error) {
|
||||
var pvs []kyverno.NamespacedPolicyViolation
|
||||
func getNamespacedPVonOwnerRef(nspvLister kyvernolister.PolicyViolationLister, dclient *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.PolicyViolation, error) {
|
||||
var pvs []kyverno.PolicyViolation
|
||||
// get resource
|
||||
resource, err := dclient.GetResource(kind, namespace, name)
|
||||
if err != nil {
|
||||
|
@ -185,7 +184,7 @@ func getNamespacedPVonOwnerRef(nspvLister kyvernolister.NamespacedPolicyViolatio
|
|||
// as we can have multiple top level owners to a resource
|
||||
// check if pv exists on each one
|
||||
for owner := range owners {
|
||||
pv, err := getNamespacedPVOnResource(nspvLister, policyName, owner.Kind, owner.Namespace, owner.Name)
|
||||
pv, err := getNamespacedPVOnResource(nspvLister, policyName, owner.Kind, namespace, owner.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("error while fetching resource owners: %v", err)
|
||||
continue
|
||||
|
|
132
pkg/policy/clusterpv.go
Normal file
132
pkg/policy/clusterpv.go
Normal file
|
@ -0,0 +1,132 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
func (pc *PolicyController) addClusterPolicyViolation(obj interface{}) {
|
||||
pv := obj.(*kyverno.ClusterPolicyViolation)
|
||||
|
||||
if pv.DeletionTimestamp != nil {
|
||||
// On a restart of the controller manager, it's possible for an object to
|
||||
// show up in a state that is already pending deletion.
|
||||
pc.deleteClusterPolicyViolation(pv)
|
||||
return
|
||||
}
|
||||
// dont manage controller references as the ownerReference is assigned by violation generator
|
||||
|
||||
ps := pc.getPolicyForClusterPolicyViolation(pv)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("Cluster Policy Violation %s does not belong to an active policy, will be cleanedup", pv.Name)
|
||||
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted cluster policy violation %s: %v", pv.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Cluster Policy Violation %s deleted", pv.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Cluster Policy Violation %s added.", pv.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PolicyController) updateClusterPolicyViolation(old, cur interface{}) {
|
||||
curPV := cur.(*kyverno.ClusterPolicyViolation)
|
||||
oldPV := old.(*kyverno.ClusterPolicyViolation)
|
||||
if curPV.ResourceVersion == oldPV.ResourceVersion {
|
||||
// Periodic resync will send update events for all known Policy Violation.
|
||||
// Two different versions of the same replica set will always have different RVs.
|
||||
return
|
||||
}
|
||||
|
||||
ps := pc.getPolicyForClusterPolicyViolation(curPV)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("Cluster Policy Violation %s does not belong to an active policy, will be cleanedup", curPV.Name)
|
||||
if err := pc.pvControl.DeleteClusterPolicyViolation(curPV.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted cluster policy violation %s: %v", curPV.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", curPV.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Cluster PolicyViolation %s updated", curPV.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
|
||||
// deletePolicyViolation enqueues the Policy that manages a PolicyViolation when
|
||||
// the PolicyViolation is deleted. obj could be an *kyverno.CusterPolicyViolation, or
|
||||
// a DeletionFinalStateUnknown marker item.
|
||||
|
||||
func (pc *PolicyController) deleteClusterPolicyViolation(obj interface{}) {
|
||||
pv, ok := obj.(*kyverno.ClusterPolicyViolation)
|
||||
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
|
||||
// changed labels the new Policy will not be woken up till the periodic resync.
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
pv, ok = tombstone.Obj.(*kyverno.ClusterPolicyViolation)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
ps := pc.getPolicyForClusterPolicyViolation(pv)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("Cluster Policy Violation %s does not belong to an active policy, will be cleanedup", pv.Name)
|
||||
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted cluster policy violation %s: %v", pv.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Cluster Policy Violation %s deleted", pv.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Cluster PolicyViolation %s updated", pv.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getPolicyForClusterPolicyViolation(pv *kyverno.ClusterPolicyViolation) []*kyverno.ClusterPolicy {
|
||||
policies, err := pc.pLister.GetPolicyForPolicyViolation(pv)
|
||||
if err != nil || len(policies) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Because all PolicyViolations's belonging to a Policy should have a unique label key,
|
||||
// there should never be more than one Policy returned by the above method.
|
||||
// If that happens we should probably dynamically repair the situation by ultimately
|
||||
// trying to clean up one of the controllers, for now we just return the older one
|
||||
if len(policies) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
glog.V(4).Infof("user error! more than one policy is selecting policy violation %s with labels: %#v, returning %s",
|
||||
pv.Name, pv.Labels, policies[0].Name)
|
||||
}
|
||||
return policies
|
||||
}
|
||||
func (pc *PolicyController) getClusterPolicyViolationForPolicy(policy string) ([]*kyverno.ClusterPolicyViolation, error) {
|
||||
policySelector, err := buildPolicyLabel(policy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Get List of cluster policy violation
|
||||
cpvList, err := pc.cpvLister.List(policySelector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cpvList, nil
|
||||
}
|
23
pkg/policy/common.go
Normal file
23
pkg/policy/common.go
Normal file
|
@ -0,0 +1,23 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
func buildPolicyLabel(policyName string) (labels.Selector, error) {
|
||||
policyLabelmap := map[string]string{"policy": policyName}
|
||||
//NOt using a field selector, as the match function will have to cast the runtime.object
|
||||
// to get the field, while it can get labels directly, saves the cast effort
|
||||
ls := &metav1.LabelSelector{}
|
||||
if err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&policyLabelmap, ls, nil); err != nil {
|
||||
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", policyName, err)
|
||||
}
|
||||
policySelector, err := metav1.LabelSelectorAsSelector(ls)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Policy %s has invalid label selector: %v", policyName, err)
|
||||
}
|
||||
return policySelector, nil
|
||||
}
|
|
@ -1,10 +1,8 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -22,15 +20,9 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
utilerrors "k8s.io/apimachinery/pkg/util/errors"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
webhookinformer "k8s.io/client-go/informers/admissionregistration/v1beta1"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
webhooklister "k8s.io/client-go/listers/admissionregistration/v1beta1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
|
@ -64,19 +56,15 @@ type PolicyController struct {
|
|||
// pLister can list/get policy from the shared informer's store
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
cpvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// nspvLister can list/get namespaced policy violation from the shared informer's store
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
nspvLister kyvernolister.PolicyViolationLister
|
||||
// pListerSynced returns true if the Policy store has been synced at least once
|
||||
pListerSynced cache.InformerSynced
|
||||
// pvListerSynced returns true if the Policy store has been synced at least once
|
||||
pvListerSynced cache.InformerSynced
|
||||
// pvListerSynced returns true if the Policy store has been synced at least once
|
||||
cpvListerSynced cache.InformerSynced
|
||||
// pvListerSynced returns true if the Policy Violation store has been synced at least once
|
||||
nspvListerSynced cache.InformerSynced
|
||||
// mutationwebhookLister can list/get mutatingwebhookconfigurations
|
||||
mutationwebhookLister webhooklister.MutatingWebhookConfigurationLister
|
||||
// WebhookRegistrationClient
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// Resource manager, manages the mapping for already processed resource
|
||||
rm resourceManager
|
||||
// helpers to validate against current loaded configuration
|
||||
|
@ -87,15 +75,21 @@ type PolicyController struct {
|
|||
pMetaStore policystore.UpdateInterface
|
||||
// policy violation generator
|
||||
pvGenerator policyviolation.GeneratorInterface
|
||||
// resourceWebhookWatcher queues the webhook creation request, creates the webhook
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
|
||||
}
|
||||
|
||||
// NewPolicyController create a new PolicyController
|
||||
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer, nspvInformer kyvernoinformer.NamespacedPolicyViolationInformer,
|
||||
eventGen event.Interface, webhookInformer webhookinformer.MutatingWebhookConfigurationInformer,
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient, configHandler config.Interface,
|
||||
func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
|
||||
client *client.Client,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
cpvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
nspvInformer kyvernoinformer.PolicyViolationInformer,
|
||||
configHandler config.Interface,
|
||||
eventGen event.Interface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
pMetaStore policystore.UpdateInterface) (*PolicyController, error) {
|
||||
pMetaStore policystore.UpdateInterface,
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister) (*PolicyController, error) {
|
||||
// Event broad caster
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
|
@ -106,15 +100,15 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
|
|||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
|
||||
|
||||
pc := PolicyController{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
eventGen: eventGen,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
configHandler: configHandler,
|
||||
pMetaStore: pMetaStore,
|
||||
pvGenerator: pvGenerator,
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
eventGen: eventGen,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
|
||||
configHandler: configHandler,
|
||||
pMetaStore: pMetaStore,
|
||||
pvGenerator: pvGenerator,
|
||||
resourceWebhookWatcher: resourceWebhookWatcher,
|
||||
}
|
||||
|
||||
pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder}
|
||||
|
@ -125,10 +119,10 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
|
|||
DeleteFunc: pc.deletePolicy,
|
||||
})
|
||||
|
||||
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: pc.addPolicyViolation,
|
||||
UpdateFunc: pc.updatePolicyViolation,
|
||||
DeleteFunc: pc.deletePolicyViolation,
|
||||
cpvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: pc.addClusterPolicyViolation,
|
||||
UpdateFunc: pc.updateClusterPolicyViolation,
|
||||
DeleteFunc: pc.deleteClusterPolicyViolation,
|
||||
})
|
||||
|
||||
nspvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
|
@ -141,15 +135,12 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, client *client.
|
|||
pc.syncHandler = pc.syncPolicy
|
||||
|
||||
pc.pLister = pInformer.Lister()
|
||||
pc.pvLister = pvInformer.Lister()
|
||||
pc.cpvLister = cpvInformer.Lister()
|
||||
pc.nspvLister = nspvInformer.Lister()
|
||||
|
||||
pc.pListerSynced = pInformer.Informer().HasSynced
|
||||
pc.pvListerSynced = pvInformer.Informer().HasSynced
|
||||
pc.cpvListerSynced = cpvInformer.Informer().HasSynced
|
||||
pc.nspvListerSynced = nspvInformer.Informer().HasSynced
|
||||
|
||||
pc.mutationwebhookLister = webhookInformer.Lister()
|
||||
|
||||
// resource manager
|
||||
// rebuild after 300 seconds/ 5 mins
|
||||
//TODO: pass the time in seconds instead of converting it internally
|
||||
|
@ -201,181 +192,6 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
|
|||
pc.enqueuePolicy(p)
|
||||
}
|
||||
|
||||
func (pc *PolicyController) addPolicyViolation(obj interface{}) {
|
||||
pv := obj.(*kyverno.ClusterPolicyViolation)
|
||||
|
||||
if pv.DeletionTimestamp != nil {
|
||||
// On a restart of the controller manager, it's possible for an object to
|
||||
// show up in a state that is already pending deletion.
|
||||
pc.deletePolicyViolation(pv)
|
||||
return
|
||||
}
|
||||
|
||||
// generate labels to match the policy from the spec, if not present
|
||||
if updatePolicyLabelIfNotDefined(pc.pvControl, pv) {
|
||||
return
|
||||
}
|
||||
|
||||
// If it has a ControllerRef, that's all that matters.
|
||||
if controllerRef := metav1.GetControllerOf(pv); controllerRef != nil {
|
||||
p := pc.resolveControllerRef(controllerRef)
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s added.", pv.Name)
|
||||
pc.enqueuePolicy(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, it's an orphan. Get a list of all matching Policies and sync
|
||||
// them to see if anyone wants to adopt it.
|
||||
ps := pc.getPolicyForPolicyViolation(pv)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", pv.Name)
|
||||
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted policy violation %s: %v", pv.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", pv.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan Policy Violation %s added.", pv.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PolicyController) updatePolicyViolation(old, cur interface{}) {
|
||||
curPV := cur.(*kyverno.ClusterPolicyViolation)
|
||||
oldPV := old.(*kyverno.ClusterPolicyViolation)
|
||||
if curPV.ResourceVersion == oldPV.ResourceVersion {
|
||||
// Periodic resync will send update events for all known Policy Violation.
|
||||
// Two different versions of the same replica set will always have different RVs.
|
||||
return
|
||||
}
|
||||
|
||||
// generate labels to match the policy from the spec, if not present
|
||||
if updatePolicyLabelIfNotDefined(pc.pvControl, curPV) {
|
||||
return
|
||||
}
|
||||
|
||||
curControllerRef := metav1.GetControllerOf(curPV)
|
||||
oldControllerRef := metav1.GetControllerOf(oldPV)
|
||||
controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
|
||||
if controllerRefChanged && oldControllerRef != nil {
|
||||
// The ControllerRef was changed. Sync the old controller, if any.
|
||||
if p := pc.resolveControllerRef(oldControllerRef); p != nil {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
// If it has a ControllerRef, that's all that matters.
|
||||
if curControllerRef != nil {
|
||||
p := pc.resolveControllerRef(curControllerRef)
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s updated.", curPV.Name)
|
||||
pc.enqueuePolicy(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, it's an orphan. If anything changed, sync matching controllers
|
||||
// to see if anyone wants to adopt it now.
|
||||
labelChanged := !reflect.DeepEqual(curPV.Labels, oldPV.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
ps := pc.getPolicyForPolicyViolation(curPV)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", curPV.Name)
|
||||
if err := pc.pvControl.DeletePolicyViolation(curPV.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted policy violation %s: %v", curPV.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", curPV.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan PolicyViolation %s updated", curPV.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// deletePolicyViolation enqueues the Policy that manages a PolicyViolation when
|
||||
// the PolicyViolation is deleted. obj could be an *kyverno.CusterPolicyViolation, or
|
||||
// a DeletionFinalStateUnknown marker item.
|
||||
|
||||
func (pc *PolicyController) deletePolicyViolation(obj interface{}) {
|
||||
pv, ok := obj.(*kyverno.ClusterPolicyViolation)
|
||||
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
|
||||
// changed labels the new Policy will not be woken up till the periodic resync.
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
pv, ok = tombstone.Obj.(*kyverno.ClusterPolicyViolation)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(pv)
|
||||
if controllerRef == nil {
|
||||
// No controller should care about orphans being deleted.
|
||||
return
|
||||
}
|
||||
p := pc.resolveControllerRef(controllerRef)
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", pv.Name)
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
|
||||
// resolveControllerRef returns the controller referenced by a ControllerRef,
|
||||
// or nil if the ControllerRef could not be resolved to a matching controller
|
||||
// of the correct Kind.
|
||||
func (pc *PolicyController) resolveControllerRef(controllerRef *metav1.OwnerReference) *kyverno.ClusterPolicy {
|
||||
// We can't look up by UID, so look up by Name and then verify UID.
|
||||
// Don't even try to look up by Name if it's the wrong Kind.
|
||||
if controllerRef.Kind != controllerRef.Kind {
|
||||
return nil
|
||||
}
|
||||
p, err := pc.pLister.Get(controllerRef.Name)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
if p.UID != controllerRef.UID {
|
||||
// The controller we found with this Name is not the same one that the
|
||||
// ControllerRef points to.
|
||||
return nil
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getPolicyForPolicyViolation(pv *kyverno.ClusterPolicyViolation) []*kyverno.ClusterPolicy {
|
||||
policies, err := pc.pLister.GetPolicyForPolicyViolation(pv)
|
||||
if err != nil || len(policies) == 0 {
|
||||
return nil
|
||||
}
|
||||
// Because all PolicyViolations's belonging to a Policy should have a unique label key,
|
||||
// there should never be more than one Policy returned by the above method.
|
||||
// If that happens we should probably dynamically repair the situation by ultimately
|
||||
// trying to clean up one of the controllers, for now we just return the older one
|
||||
if len(policies) > 1 {
|
||||
// ControllerRef will ensure we don't do anything crazy, but more than one
|
||||
// item in this list nevertheless constitutes user error.
|
||||
glog.V(4).Infof("user error! more than one policy is selecting policy violation %s with labels: %#v, returning %s",
|
||||
pv.Name, pv.Labels, policies[0].Name)
|
||||
}
|
||||
return policies
|
||||
}
|
||||
|
||||
func (pc *PolicyController) enqueue(policy *kyverno.ClusterPolicy) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(policy)
|
||||
if err != nil {
|
||||
|
@ -394,7 +210,8 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Starting policy controller")
|
||||
defer glog.Info("Shutting down policy controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.pvListerSynced, pc.nspvListerSynced) {
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced) {
|
||||
glog.Error("failed to sync informer cache")
|
||||
return
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
|
@ -451,9 +268,17 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
policy, err := pc.pLister.Get(key)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("Policy %v has been deleted", key)
|
||||
|
||||
// delete cluster policy violation
|
||||
if err := pc.deleteClusterPolicyViolations(key); err != nil {
|
||||
return err
|
||||
}
|
||||
// delete namespaced policy violation
|
||||
if err := pc.deleteNamespacedPolicyViolations(key); err != nil {
|
||||
return err
|
||||
}
|
||||
// remove the recorded stats for the policy
|
||||
pc.statusAggregator.RemovePolicyStats(key)
|
||||
|
||||
// remove webhook configurations if there are no policies
|
||||
if err := pc.removeResourceWebhookConfiguration(); err != nil {
|
||||
// do not fail, if unable to delete resource webhook config
|
||||
|
@ -462,37 +287,62 @@ func (pc *PolicyController) syncPolicy(key string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
glog.V(4).Info(err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err := pc.createResourceMutatingWebhookConfigurationIfRequired(*policy); err != nil {
|
||||
glog.V(4).Infof("failed to create resource mutating webhook configurations, policies wont be applied on resources: %v", err)
|
||||
glog.Errorln(err)
|
||||
}
|
||||
|
||||
// Deep-copy otherwise we are mutating our cache.
|
||||
// TODO: Deep-copy only when needed.
|
||||
p := policy.DeepCopy()
|
||||
|
||||
pvList, nspvList, err := pc.getPolicyViolationsForPolicy(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if the policy contains mutating & validation rules and it config does not exist we create one
|
||||
if policy.HasMutateOrValidate() {
|
||||
pc.resourceWebhookWatcher.RegisterResourceWebhook()
|
||||
}
|
||||
|
||||
// cluster policy violations
|
||||
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// namespaced policy violation
|
||||
nspvList, err := pc.getNamespacedPolicyViolationForPolicy(policy.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// process policies on existing resources
|
||||
engineResponses := pc.processExistingResources(*p)
|
||||
engineResponses := pc.processExistingResources(*policy)
|
||||
// report errors
|
||||
pc.cleanupAndReport(engineResponses)
|
||||
// fetch the policy again via the aggreagator to remain consistent
|
||||
return pc.syncStatusOnly(p, pvList, nspvList)
|
||||
// sync active
|
||||
return pc.syncStatusOnly(policy, cpvList, nspvList)
|
||||
}
|
||||
|
||||
func (pc *PolicyController) deleteClusterPolicyViolations(policy string) error {
|
||||
cpvList, err := pc.getClusterPolicyViolationForPolicy(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, cpv := range cpvList {
|
||||
if err := pc.pvControl.DeleteClusterPolicyViolation(cpv.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) deleteNamespacedPolicyViolations(policy string) error {
|
||||
nspvList, err := pc.getNamespacedPolicyViolationForPolicy(policy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, nspv := range nspvList {
|
||||
if err := pc.pvControl.DeleteNamespacedPolicyViolation(nspv.Namespace, nspv.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//syncStatusOnly updates the policy status subresource
|
||||
// status:
|
||||
// - violations : (count of the resources that violate this policy )
|
||||
func (pc *PolicyController) syncStatusOnly(p *kyverno.ClusterPolicy, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.NamespacedPolicyViolation) error {
|
||||
func (pc *PolicyController) syncStatusOnly(p *kyverno.ClusterPolicy, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.PolicyViolation) error {
|
||||
newStatus := pc.calculateStatus(p.Name, pvList, nspvList)
|
||||
if reflect.DeepEqual(newStatus, p.Status) {
|
||||
// no update to status
|
||||
|
@ -505,7 +355,7 @@ func (pc *PolicyController) syncStatusOnly(p *kyverno.ClusterPolicy, pvList []*k
|
|||
return err
|
||||
}
|
||||
|
||||
func (pc *PolicyController) calculateStatus(policyName string, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.NamespacedPolicyViolation) kyverno.PolicyStatus {
|
||||
func (pc *PolicyController) calculateStatus(policyName string, pvList []*kyverno.ClusterPolicyViolation, nspvList []*kyverno.PolicyViolation) kyverno.PolicyStatus {
|
||||
violationCount := len(pvList) + len(nspvList)
|
||||
status := kyverno.PolicyStatus{
|
||||
ViolationCount: violationCount,
|
||||
|
@ -524,346 +374,23 @@ func (pc *PolicyController) calculateStatus(policyName string, pvList []*kyverno
|
|||
return status
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getPolicyViolationsForPolicy(p *kyverno.ClusterPolicy) ([]*kyverno.ClusterPolicyViolation, []*kyverno.NamespacedPolicyViolation, error) {
|
||||
policyLabelmap := map[string]string{"policy": p.Name}
|
||||
//NOt using a field selector, as the match function will have to cash the runtime.object
|
||||
// to get the field, while it can get labels directly, saves the cast effort
|
||||
ls := &metav1.LabelSelector{}
|
||||
if err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&policyLabelmap, ls, nil); err != nil {
|
||||
return nil, nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", p.Name, err)
|
||||
}
|
||||
|
||||
policySelector, err := metav1.LabelSelectorAsSelector(ls)
|
||||
func (pc *PolicyController) getNamespacedPolicyViolationForPolicy(policy string) ([]*kyverno.PolicyViolation, error) {
|
||||
policySelector, err := buildPolicyLabel(policy)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("Policy %s has invalid label selector: %v", p.Name, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// List all PolicyViolation to find those we own but that no longer match our
|
||||
// selector. They will be orphaned by ClaimPolicyViolation().
|
||||
// cluster Policy Violation
|
||||
pvList, err := pc.pvLister.List(labels.Everything())
|
||||
// Get List of cluster policy violation
|
||||
nspvList, err := pc.nspvLister.List(policySelector)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
return nil, err
|
||||
}
|
||||
|
||||
canAdoptFunc := RecheckDeletionTimestamp(func() (metav1.Object, error) {
|
||||
fresh, err := pc.kyvernoClient.KyvernoV1().ClusterPolicies().Get(p.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fresh.UID != p.UID {
|
||||
return nil, fmt.Errorf("original Policy %v is gone: got uid %v, wanted %v", p.Name, fresh.UID, p.UID)
|
||||
}
|
||||
return fresh, nil
|
||||
})
|
||||
|
||||
cm := NewPolicyViolationControllerRefManager(pc.pvControl, p, policySelector, controllerKind, canAdoptFunc)
|
||||
claimedPVList, err := cm.claimPolicyViolations(pvList)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
claimedPVs := claimedPVList.([]*kyverno.ClusterPolicyViolation)
|
||||
|
||||
// namespaced Policy Violation
|
||||
nspvList, err := pc.nspvLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
nscm := NewPolicyViolationControllerRefManager(pc.pvControl, p, policySelector, controllerKind, canAdoptFunc)
|
||||
claimedNSPVList, err := nscm.claimPolicyViolations(nspvList)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
claimedNSPVs := claimedNSPVList.([]*kyverno.NamespacedPolicyViolation)
|
||||
|
||||
return claimedPVs, claimedNSPVs, nil
|
||||
}
|
||||
|
||||
func (m *PolicyViolationControllerRefManager) claimPolicyViolations(sets interface{}) (interface{}, error) {
|
||||
var errlist []error
|
||||
|
||||
match := func(obj metav1.Object) bool {
|
||||
return m.Selector.Matches(labels.Set(obj.GetLabels()))
|
||||
}
|
||||
adopt := func(obj metav1.Object) error {
|
||||
return m.adoptPolicyViolation(obj)
|
||||
}
|
||||
release := func(obj metav1.Object) error {
|
||||
return m.releasePolicyViolation(obj)
|
||||
}
|
||||
|
||||
if pvs, ok := sets.([]*kyverno.ClusterPolicyViolation); ok {
|
||||
var claimed []*kyverno.ClusterPolicyViolation
|
||||
for _, pv := range pvs {
|
||||
ok, err := m.ClaimObject(pv, match, adopt, release)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
claimed = append(claimed, pv)
|
||||
}
|
||||
}
|
||||
return claimed, utilerrors.NewAggregate(errlist)
|
||||
}
|
||||
|
||||
var claimed []*kyverno.NamespacedPolicyViolation
|
||||
for _, pv := range sets.([]*kyverno.NamespacedPolicyViolation) {
|
||||
ok, err := m.ClaimObject(pv, match, adopt, release)
|
||||
if err != nil {
|
||||
errlist = append(errlist, err)
|
||||
continue
|
||||
}
|
||||
if ok {
|
||||
claimed = append(claimed, pv)
|
||||
}
|
||||
}
|
||||
return claimed, utilerrors.NewAggregate(errlist)
|
||||
}
|
||||
|
||||
func (m *PolicyViolationControllerRefManager) adoptPolicyViolation(pv interface{}) error {
|
||||
var ns, pvname string
|
||||
var pvuid types.UID
|
||||
switch typedPV := pv.(type) {
|
||||
case *kyverno.ClusterPolicyViolation:
|
||||
pvname = typedPV.Name
|
||||
pvuid = typedPV.UID
|
||||
case *kyverno.NamespacedPolicyViolation:
|
||||
ns = typedPV.Namespace
|
||||
pvname = typedPV.Name
|
||||
pvuid = typedPV.UID
|
||||
}
|
||||
|
||||
if err := m.CanAdopt(); err != nil {
|
||||
return fmt.Errorf("can't adopt %T name=%s, namespace=%s (%v): %v", pv, pvname, ns, pvuid, err)
|
||||
}
|
||||
// Note that ValidateOwnerReferences() will reject this patch if another
|
||||
// OwnerReference exists with controller=true.
|
||||
//TODO Add JSON Patch Owner reference for resource
|
||||
//TODO Update owner refence for resource
|
||||
controllerFlag := true
|
||||
blockOwnerDeletionFlag := true
|
||||
pOwnerRef := metav1.OwnerReference{APIVersion: m.controllerKind.GroupVersion().String(),
|
||||
Kind: m.controllerKind.Kind,
|
||||
Name: m.Controller.GetName(),
|
||||
UID: m.Controller.GetUID(),
|
||||
Controller: &controllerFlag,
|
||||
BlockOwnerDeletion: &blockOwnerDeletionFlag,
|
||||
}
|
||||
addControllerPatch, err := createOwnerReferencePatch(pOwnerRef)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to add owner reference %v for PolicyViolation %s: %v", pOwnerRef, pvname, err)
|
||||
return err
|
||||
}
|
||||
|
||||
if _, ok := pv.(*kyverno.ClusterPolicyViolation); ok {
|
||||
return m.pvControl.PatchPolicyViolation(pvname, addControllerPatch)
|
||||
}
|
||||
|
||||
return m.pvControl.PatchNamespacedPolicyViolation(ns, pvname, addControllerPatch)
|
||||
}
|
||||
|
||||
type patchOwnerReferenceValue struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value []metav1.OwnerReference `json:"value"`
|
||||
}
|
||||
|
||||
func createOwnerReferencePatch(ownerRef metav1.OwnerReference) ([]byte, error) {
|
||||
payload := []patchOwnerReferenceValue{{
|
||||
Op: "add",
|
||||
Path: "/metadata/ownerReferences",
|
||||
Value: []metav1.OwnerReference{ownerRef},
|
||||
}}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
func removeOwnerReferencePatch(ownerRef metav1.OwnerReference) ([]byte, error) {
|
||||
payload := []patchOwnerReferenceValue{{
|
||||
Op: "remove",
|
||||
Path: "/metadata/ownerReferences",
|
||||
Value: []metav1.OwnerReference{ownerRef},
|
||||
}}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
func (m *PolicyViolationControllerRefManager) releasePolicyViolation(pv interface{}) error {
|
||||
var ns, pvname string
|
||||
switch typedPV := pv.(type) {
|
||||
case *kyverno.ClusterPolicyViolation:
|
||||
pvname = typedPV.Name
|
||||
case *kyverno.NamespacedPolicyViolation:
|
||||
ns = typedPV.Namespace
|
||||
pvname = typedPV.Name
|
||||
}
|
||||
|
||||
glog.V(2).Infof("patching PolicyViolation %s to remove its controllerRef to %s/%s:%s",
|
||||
pvname, m.controllerKind.GroupVersion(), m.controllerKind.Kind, m.Controller.GetName())
|
||||
//TODO JSON patch for owner reference for resources
|
||||
controllerFlag := true
|
||||
blockOwnerDeletionFlag := true
|
||||
pOwnerRef := metav1.OwnerReference{APIVersion: m.controllerKind.GroupVersion().String(),
|
||||
Kind: m.controllerKind.Kind,
|
||||
Name: m.Controller.GetName(),
|
||||
UID: m.Controller.GetUID(),
|
||||
Controller: &controllerFlag,
|
||||
BlockOwnerDeletion: &blockOwnerDeletionFlag,
|
||||
}
|
||||
|
||||
removeControllerPatch, err := removeOwnerReferencePatch(pOwnerRef)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to add owner reference %v for PolicyViolation %s: %v", pOwnerRef, pvname, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// deleteOwnerRefPatch := fmt.Sprintf(`{"metadata":{"ownerReferences":[{"$patch":"delete","uid":"%s"}],"uid":"%s"}}`, m.Controller.GetUID(), pv.UID)
|
||||
|
||||
if _, ok := pv.(*kyverno.ClusterPolicyViolation); ok {
|
||||
err = m.pvControl.PatchPolicyViolation(pvname, removeControllerPatch)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// If the ReplicaSet no longer exists, ignore it.
|
||||
return nil
|
||||
}
|
||||
if errors.IsInvalid(err) {
|
||||
// Invalid error will be returned in two cases: 1. the ReplicaSet
|
||||
// has no owner reference, 2. the uid of the ReplicaSet doesn't
|
||||
// match, which means the ReplicaSet is deleted and then recreated.
|
||||
// In both cases, the error can be ignored.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.pvControl.PatchNamespacedPolicyViolation(ns, pvname, removeControllerPatch)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
// If the ReplicaSet no longer exists, ignore it.
|
||||
return nil
|
||||
}
|
||||
if errors.IsInvalid(err) {
|
||||
// Invalid error will be returned in two cases: 1. the ReplicaSet
|
||||
// has no owner reference, 2. the uid of the ReplicaSet doesn't
|
||||
// match, which means the ReplicaSet is deleted and then recreated.
|
||||
// In both cases, the error can be ignored.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
//PolicyViolationControllerRefManager manages adoption of policy violation by a policy
|
||||
type PolicyViolationControllerRefManager struct {
|
||||
BaseControllerRefManager
|
||||
controllerKind schema.GroupVersionKind
|
||||
pvControl PVControlInterface
|
||||
}
|
||||
|
||||
//NewPolicyViolationControllerRefManager returns new PolicyViolationControllerRefManager
|
||||
func NewPolicyViolationControllerRefManager(
|
||||
pvControl PVControlInterface,
|
||||
controller metav1.Object,
|
||||
selector labels.Selector,
|
||||
controllerKind schema.GroupVersionKind,
|
||||
canAdopt func() error,
|
||||
) *PolicyViolationControllerRefManager {
|
||||
|
||||
m := PolicyViolationControllerRefManager{
|
||||
BaseControllerRefManager: BaseControllerRefManager{
|
||||
Controller: controller,
|
||||
Selector: selector,
|
||||
CanAdoptFunc: canAdopt,
|
||||
},
|
||||
controllerKind: controllerKind,
|
||||
pvControl: pvControl,
|
||||
}
|
||||
return &m
|
||||
}
|
||||
|
||||
//BaseControllerRefManager ...
|
||||
type BaseControllerRefManager struct {
|
||||
Controller metav1.Object
|
||||
Selector labels.Selector
|
||||
canAdoptErr error
|
||||
canAdoptOnce sync.Once
|
||||
CanAdoptFunc func() error
|
||||
}
|
||||
|
||||
//CanAdopt ...
|
||||
func (m *BaseControllerRefManager) CanAdopt() error {
|
||||
m.canAdoptOnce.Do(func() {
|
||||
if m.CanAdoptFunc != nil {
|
||||
m.canAdoptErr = m.CanAdoptFunc()
|
||||
}
|
||||
})
|
||||
return m.canAdoptErr
|
||||
}
|
||||
|
||||
//ClaimObject ...
|
||||
func (m *BaseControllerRefManager) ClaimObject(obj metav1.Object, match func(metav1.Object) bool, adopt, release func(metav1.Object) error) (bool, error) {
|
||||
controllerRef := metav1.GetControllerOf(obj)
|
||||
if controllerRef != nil {
|
||||
if controllerRef.UID != m.Controller.GetUID() {
|
||||
// Owned by someone else. Ignore
|
||||
return false, nil
|
||||
}
|
||||
if match(obj) {
|
||||
// We already own it and the selector matches.
|
||||
// Return true (successfully claimed) before checking deletion timestamp.
|
||||
// We're still allowed to claim things we already own while being deleted
|
||||
// because doing so requires taking no actions.
|
||||
return true, nil
|
||||
|
||||
}
|
||||
// Owned by us but selector doesn't match.
|
||||
// Try to release, unless we're being deleted.
|
||||
if m.Controller.GetDeletionTimestamp() != nil {
|
||||
return false, nil
|
||||
}
|
||||
if err := release(obj); err != nil {
|
||||
// If the PolicyViolation no longer exists, ignore the error.
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
// Either someone else released it, or there was a transient error.
|
||||
// The controller should requeue and try again if it's still stale.
|
||||
return false, err
|
||||
}
|
||||
// Successfully released.
|
||||
return false, nil
|
||||
}
|
||||
// It's an orphan.
|
||||
if m.Controller.GetDeletionTimestamp() != nil || !match(obj) {
|
||||
// Ignore if we're being deleted or selector doesn't match.
|
||||
return false, nil
|
||||
}
|
||||
if obj.GetDeletionTimestamp() != nil {
|
||||
// Ignore if the object is being deleted
|
||||
return false, nil
|
||||
}
|
||||
// Selector matches. Try to adopt.
|
||||
if err := adopt(obj); err != nil {
|
||||
// If the PolicyViolation no longer exists, ignore the error
|
||||
if errors.IsNotFound(err) {
|
||||
return false, nil
|
||||
}
|
||||
// Either someone else claimed it first, or there was a transient error.
|
||||
// The controller should requeue and try again if it's still orphaned.
|
||||
return false, err
|
||||
}
|
||||
// Successfully adopted.
|
||||
return true, nil
|
||||
return nspvList, nil
|
||||
|
||||
}
|
||||
|
||||
//PVControlInterface provides interface to operate on policy violation resource
|
||||
type PVControlInterface interface {
|
||||
PatchPolicyViolation(name string, data []byte) error
|
||||
DeletePolicyViolation(name string) error
|
||||
|
||||
PatchNamespacedPolicyViolation(ns, name string, data []byte) error
|
||||
DeleteClusterPolicyViolation(name string) error
|
||||
DeleteNamespacedPolicyViolation(ns, name string) error
|
||||
}
|
||||
|
||||
|
@ -873,168 +400,14 @@ type RealPVControl struct {
|
|||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
//PatchPolicyViolation patches the policy violation with the provided JSON Patch
|
||||
func (r RealPVControl) PatchPolicyViolation(name string, data []byte) error {
|
||||
_, err := r.Client.KyvernoV1().ClusterPolicyViolations().Patch(name, types.JSONPatchType, data)
|
||||
return err
|
||||
}
|
||||
|
||||
//DeletePolicyViolation deletes the policy violation
|
||||
func (r RealPVControl) DeletePolicyViolation(name string) error {
|
||||
func (r RealPVControl) DeleteClusterPolicyViolation(name string) error {
|
||||
return r.Client.KyvernoV1().ClusterPolicyViolations().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
//PatchNamespacedPolicyViolation patches the namespaced policy violation with the provided JSON Patch
|
||||
func (r RealPVControl) PatchNamespacedPolicyViolation(ns, name string, data []byte) error {
|
||||
_, err := r.Client.KyvernoV1().NamespacedPolicyViolations(ns).Patch(name, types.JSONPatchType, data)
|
||||
return err
|
||||
}
|
||||
|
||||
//DeleteNamespacedPolicyViolation deletes the namespaced policy violation
|
||||
func (r RealPVControl) DeleteNamespacedPolicyViolation(ns, name string) error {
|
||||
return r.Client.KyvernoV1().NamespacedPolicyViolations(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// RecheckDeletionTimestamp returns a CanAdopt() function to recheck deletion.
|
||||
//
|
||||
// The CanAdopt() function calls getObject() to fetch the latest value,
|
||||
// and denies adoption attempts if that object has a non-nil DeletionTimestamp.
|
||||
func RecheckDeletionTimestamp(getObject func() (metav1.Object, error)) func() error {
|
||||
return func() error {
|
||||
obj, err := getObject()
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't recheck DeletionTimestamp: %v", err)
|
||||
}
|
||||
if obj.GetDeletionTimestamp() != nil {
|
||||
return fmt.Errorf("%v/%v has just been deleted at %v", obj.GetNamespace(), obj.GetName(), obj.GetDeletionTimestamp())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
type patchLabelValue struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value string `json:"value"`
|
||||
}
|
||||
|
||||
type patchLabelMapValue struct {
|
||||
Op string `json:"op"`
|
||||
Path string `json:"path"`
|
||||
Value map[string]string `json:"value"`
|
||||
}
|
||||
|
||||
func createPolicyLabelPatch(policy string) ([]byte, error) {
|
||||
payload := []patchLabelValue{{
|
||||
Op: "add",
|
||||
Path: "/metadata/labels/policy",
|
||||
Value: policy,
|
||||
}}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
func createResourceLabelPatch(resource string) ([]byte, error) {
|
||||
payload := []patchLabelValue{{
|
||||
Op: "add",
|
||||
Path: "/metadata/labels/resource",
|
||||
Value: resource,
|
||||
}}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
func createLabelMapPatch(policy string, resource string) ([]byte, error) {
|
||||
payload := []patchLabelMapValue{{
|
||||
Op: "add",
|
||||
Path: "/metadata/labels",
|
||||
Value: map[string]string{"policy": policy, "resource": resource},
|
||||
}}
|
||||
return json.Marshal(payload)
|
||||
}
|
||||
|
||||
//updatePolicyLabelIfNotDefined adds the label 'policy' to the PolicyViolation
|
||||
// label is used here to lookup policyViolation and corresponding Policy
|
||||
func updatePolicyLabelIfNotDefined(pvControl PVControlInterface, pv *kyverno.ClusterPolicyViolation) bool {
|
||||
updateLabel := func() bool {
|
||||
glog.V(4).Infof("adding label 'policy:%s' to PolicyViolation %s", pv.Spec.Policy, pv.Name)
|
||||
glog.V(4).Infof("adding label 'resource:%s' to PolicyViolation %s", pv.Spec.ResourceSpec.ToKey(), pv.Name)
|
||||
// add label based on the policy spec
|
||||
labels := pv.GetLabels()
|
||||
if pv.Spec.Policy == "" {
|
||||
glog.Error("policy not defined for violation")
|
||||
// should be cleaned up
|
||||
return false
|
||||
}
|
||||
if labels == nil {
|
||||
// create a patch to generate the labels map with policy label
|
||||
patch, err := createLabelMapPatch(pv.Spec.Policy, pv.Spec.ResourceSpec.ToKey())
|
||||
if err != nil {
|
||||
glog.Errorf("unable to init label map. %v", err)
|
||||
return false
|
||||
}
|
||||
if err := pvControl.PatchPolicyViolation(pv.Name, patch); err != nil {
|
||||
glog.Errorf("Unable to add 'policy' label to PolicyViolation %s: %v", pv.Name, err)
|
||||
return false
|
||||
}
|
||||
// update successful
|
||||
return true
|
||||
}
|
||||
// JSON Patch to add exact label
|
||||
policyLabelPatch, err := createPolicyLabelPatch(pv.Spec.Policy)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to generate patch to add label 'policy': %v", err)
|
||||
return false
|
||||
}
|
||||
resourceLabelPatch, err := createResourceLabelPatch(pv.Spec.ResourceSpec.ToKey())
|
||||
if err != nil {
|
||||
glog.Errorf("failed to generate patch to add label 'resource': %v", err)
|
||||
return false
|
||||
}
|
||||
//join patches
|
||||
labelPatch := joinPatches(policyLabelPatch, resourceLabelPatch)
|
||||
if labelPatch == nil {
|
||||
glog.Errorf("failed to join patches : %v", err)
|
||||
return false
|
||||
}
|
||||
glog.V(4).Infof("patching policy violation %s with patch %s", pv.Name, string(labelPatch))
|
||||
if err := pvControl.PatchPolicyViolation(pv.Name, labelPatch); err != nil {
|
||||
glog.Errorf("Unable to add 'policy' label to PolicyViolation %s: %v", pv.Name, err)
|
||||
return false
|
||||
}
|
||||
// update successful
|
||||
return true
|
||||
}
|
||||
|
||||
var policy string
|
||||
var ok bool
|
||||
// operate oncopy of resource
|
||||
curLabels := pv.GetLabels()
|
||||
if policy, ok = curLabels["policy"]; !ok {
|
||||
return updateLabel()
|
||||
}
|
||||
// TODO: would be benificial to add a check to verify if the policy in name and resource spec match
|
||||
if policy != pv.Spec.Policy {
|
||||
glog.Errorf("label 'policy:%s' and spec.policy %s dont match ", policy, pv.Spec.Policy)
|
||||
//TODO handle this case
|
||||
return updateLabel()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func joinPatches(patches ...[]byte) []byte {
|
||||
var result []byte
|
||||
if patches == nil {
|
||||
//nothing tot join
|
||||
return result
|
||||
}
|
||||
result = append(result, []byte("[\n")...)
|
||||
for index, patch := range patches {
|
||||
result = append(result, patch...)
|
||||
if index != len(patches)-1 {
|
||||
result = append(result, []byte(",\n")...)
|
||||
}
|
||||
}
|
||||
result = append(result, []byte("\n]")...)
|
||||
return result
|
||||
return r.Client.KyvernoV1().PolicyViolations(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// convertRules converts the internal rule stats to one used in policy.stats struct
|
||||
|
|
|
@ -1,47 +1,27 @@
|
|||
package policy
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) {
|
||||
pv := obj.(*kyverno.NamespacedPolicyViolation)
|
||||
pv := obj.(*kyverno.PolicyViolation)
|
||||
|
||||
if pv.DeletionTimestamp != nil {
|
||||
// On a restart of the controller manager, it's possible for an object to
|
||||
// show up in a state that is already pending deletion.
|
||||
pc.deletePolicyViolation(pv)
|
||||
pc.deleteNamespacedPolicyViolation(pv)
|
||||
return
|
||||
}
|
||||
// dont manage controller references as the ownerReference is assigned by violation generator
|
||||
|
||||
// generate labels to match the policy from the spec, if not present
|
||||
if updateLabels(pv) {
|
||||
return
|
||||
}
|
||||
|
||||
// If it has a ControllerRef, that's all that matters.
|
||||
if controllerRef := metav1.GetControllerOf(pv); controllerRef != nil {
|
||||
p := pc.resolveControllerRef(controllerRef)
|
||||
if p == nil {
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Namespaced policy violation %s added.", pv.Name)
|
||||
pc.enqueuePolicy(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, it's an orphan. Get a list of all matching Policies and sync
|
||||
// them to see if anyone wants to adopt it.
|
||||
ps := pc.getPolicyForNamespacedPolicyViolation(pv)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", pv.Name)
|
||||
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {
|
||||
if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted policy violation %s: %v", pv.Name, err)
|
||||
return
|
||||
}
|
||||
|
@ -55,63 +35,34 @@ func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) {
|
|||
}
|
||||
|
||||
func (pc *PolicyController) updateNamespacedPolicyViolation(old, cur interface{}) {
|
||||
curPV := cur.(*kyverno.NamespacedPolicyViolation)
|
||||
oldPV := old.(*kyverno.NamespacedPolicyViolation)
|
||||
curPV := cur.(*kyverno.PolicyViolation)
|
||||
oldPV := old.(*kyverno.PolicyViolation)
|
||||
if curPV.ResourceVersion == oldPV.ResourceVersion {
|
||||
// Periodic resync will send update events for all known Policy Violation.
|
||||
// Two different versions of the same replica set will always have different RVs.
|
||||
return
|
||||
}
|
||||
|
||||
// generate labels to match the policy from the spec, if not present
|
||||
if updateLabels(curPV) {
|
||||
return
|
||||
}
|
||||
|
||||
curControllerRef := metav1.GetControllerOf(curPV)
|
||||
oldControllerRef := metav1.GetControllerOf(oldPV)
|
||||
controllerRefChanged := !reflect.DeepEqual(curControllerRef, oldControllerRef)
|
||||
if controllerRefChanged && oldControllerRef != nil {
|
||||
// The ControllerRef was changed. Sync the old controller, if any.
|
||||
if p := pc.resolveControllerRef(oldControllerRef); p != nil {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
// If it has a ControllerRef, that's all that matters.
|
||||
if curControllerRef != nil {
|
||||
p := pc.resolveControllerRef(curControllerRef)
|
||||
if p == nil {
|
||||
ps := pc.getPolicyForNamespacedPolicyViolation(curPV)
|
||||
if len(ps) == 0 {
|
||||
// there is no namespaced policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("Namespaced Policy Violation %s does not belong to an active policy, will be cleanedup", curPV.Name)
|
||||
if err := pc.pvControl.DeleteNamespacedPolicyViolation(curPV.Namespace, curPV.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted namespaced policy violation %s: %v", curPV.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s updated.", curPV.Name)
|
||||
glog.V(4).Infof("Namespaced Policy Violation %s deleted", curPV.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Namespaced Policy sViolation %s updated", curPV.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, it's an orphan. If anything changed, sync matching controllers
|
||||
// to see if anyone wants to adopt it now.
|
||||
labelChanged := !reflect.DeepEqual(curPV.Labels, oldPV.Labels)
|
||||
if labelChanged || controllerRefChanged {
|
||||
ps := pc.getPolicyForNamespacedPolicyViolation(curPV)
|
||||
if len(ps) == 0 {
|
||||
// there is no namespaced policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", curPV.Name)
|
||||
if err := pc.pvControl.DeletePolicyViolation(curPV.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted policy violation %s: %v", curPV.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", curPV.Name)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Orphan PolicyViolation %s updated", curPV.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (pc *PolicyController) deleteNamespacedPolicyViolation(obj interface{}) {
|
||||
pv, ok := obj.(*kyverno.NamespacedPolicyViolation)
|
||||
pv, ok := obj.(*kyverno.PolicyViolation)
|
||||
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
|
||||
// in the list, leading to the insertion of a tombstone object which contains
|
||||
// the deleted key/value. Note that this value might be stale. If the PolicyViolation
|
||||
|
@ -122,61 +73,31 @@ func (pc *PolicyController) deleteNamespacedPolicyViolation(obj interface{}) {
|
|||
glog.Infof("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
pv, ok = tombstone.Obj.(*kyverno.NamespacedPolicyViolation)
|
||||
pv, ok = tombstone.Obj.(*kyverno.PolicyViolation)
|
||||
if !ok {
|
||||
glog.Infof("Couldn't get object from tombstone %#v", obj)
|
||||
return
|
||||
}
|
||||
}
|
||||
controllerRef := metav1.GetControllerOf(pv)
|
||||
if controllerRef == nil {
|
||||
// No controller should care about orphans being deleted.
|
||||
|
||||
ps := pc.getPolicyForNamespacedPolicyViolation(pv)
|
||||
if len(ps) == 0 {
|
||||
// there is no cluster policy for this violation, so we can delete this cluster policy violation
|
||||
glog.V(4).Infof("Namespaced Policy Violation %s does not belong to an active policy, will be cleanedup", pv.Name)
|
||||
if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil {
|
||||
glog.Errorf("Failed to deleted namespaced policy violation %s: %v", pv.Name, err)
|
||||
return
|
||||
}
|
||||
glog.V(4).Infof("Namespaced Policy Violation %s deleted", pv.Name)
|
||||
return
|
||||
}
|
||||
p := pc.resolveControllerRef(controllerRef)
|
||||
if p == nil {
|
||||
return
|
||||
glog.V(4).Infof("Namespaced PolicyViolation %s updated", pv.Name)
|
||||
for _, p := range ps {
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
glog.V(4).Infof("PolicyViolation %s deleted", pv.Name)
|
||||
pc.enqueuePolicy(p)
|
||||
}
|
||||
|
||||
func updateLabels(pv *kyverno.NamespacedPolicyViolation) bool {
|
||||
if pv.Spec.Policy == "" {
|
||||
glog.Error("policy not defined for violation")
|
||||
// should be cleaned up
|
||||
return false
|
||||
}
|
||||
|
||||
labels := pv.GetLabels()
|
||||
newLabels := labels
|
||||
if newLabels == nil {
|
||||
newLabels = make(map[string]string)
|
||||
}
|
||||
|
||||
policy, ok := newLabels["policy"]
|
||||
// key 'policy' does not present
|
||||
// or policy name has changed
|
||||
if !ok || policy != pv.Spec.Policy {
|
||||
newLabels["policy"] = pv.Spec.Policy
|
||||
}
|
||||
|
||||
resource, ok := newLabels["resource"]
|
||||
// key 'resource' does not present
|
||||
// or resource defined in policy has changed
|
||||
if !ok || resource != pv.Spec.ResourceSpec.ToKey() {
|
||||
newLabels["resource"] = pv.Spec.ResourceSpec.ToKey()
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(labels, newLabels) {
|
||||
pv.SetLabels(labels)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getPolicyForNamespacedPolicyViolation(pv *kyverno.NamespacedPolicyViolation) []*kyverno.ClusterPolicy {
|
||||
func (pc *PolicyController) getPolicyForNamespacedPolicyViolation(pv *kyverno.PolicyViolation) []*kyverno.ClusterPolicy {
|
||||
policies, err := pc.pLister.GetPolicyForNamespacedPolicyViolation(pv)
|
||||
if err != nil || len(policies) == 0 {
|
||||
return nil
|
||||
|
|
|
@ -7,16 +7,6 @@ import (
|
|||
)
|
||||
|
||||
func (pc *PolicyController) removeResourceWebhookConfiguration() error {
|
||||
removeWebhookConfig := func() error {
|
||||
var err error
|
||||
err = pc.webhookRegistrationClient.RemoveResourceMutatingWebhookConfiguration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(4).Info("removed resource webhook configuration")
|
||||
return nil
|
||||
}
|
||||
|
||||
var err error
|
||||
// get all existing policies
|
||||
policies, err := pc.pLister.List(labels.NewSelector())
|
||||
|
@ -27,28 +17,18 @@ func (pc *PolicyController) removeResourceWebhookConfiguration() error {
|
|||
|
||||
if len(policies) == 0 {
|
||||
glog.V(4).Info("no policies loaded, removing resource webhook configuration if one exists")
|
||||
return removeWebhookConfig()
|
||||
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
|
||||
}
|
||||
|
||||
// if there are policies, check if they contain mutating or validating rule
|
||||
// if polices only have generate rules, we dont need the webhook
|
||||
if !hasMutateOrValidatePolicies(policies) {
|
||||
glog.V(4).Info("no policies with mutating or validating webhook configurations, remove resource webhook configuration if one exists")
|
||||
return removeWebhookConfig()
|
||||
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) createResourceMutatingWebhookConfigurationIfRequired(policy kyverno.ClusterPolicy) error {
|
||||
// if the policy contains mutating & validation rules and it config does not exist we create one
|
||||
if policy.HasMutateOrValidate() {
|
||||
if err := pc.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func hasMutateOrValidatePolicies(policies []*kyverno.ClusterPolicy) bool {
|
||||
for _, policy := range policies {
|
||||
if (*policy).HasMutateOrValidate() {
|
||||
|
|
|
@ -3,8 +3,11 @@ package policystore
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type policyMap map[string]interface{}
|
||||
|
@ -13,9 +16,12 @@ type kindMap map[string]namespaceMap
|
|||
|
||||
//PolicyStore Store the meta-data information to faster lookup policies
|
||||
type PolicyStore struct {
|
||||
data map[string]namespaceMap
|
||||
mu sync.RWMutex
|
||||
data map[string]namespaceMap
|
||||
mu sync.RWMutex
|
||||
// list/get cluster policy
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// returns true if the cluster policy store has been synced at least once
|
||||
pSynched cache.InformerSynced
|
||||
}
|
||||
|
||||
//UpdateInterface provides api to update policies
|
||||
|
@ -33,14 +39,22 @@ type LookupInterface interface {
|
|||
}
|
||||
|
||||
// NewPolicyStore returns a new policy store
|
||||
func NewPolicyStore(pLister kyvernolister.ClusterPolicyLister) *PolicyStore {
|
||||
func NewPolicyStore(pInformer kyvernoinformer.ClusterPolicyInformer) *PolicyStore {
|
||||
ps := PolicyStore{
|
||||
data: make(kindMap),
|
||||
pLister: pLister,
|
||||
data: make(kindMap),
|
||||
pLister: pInformer.Lister(),
|
||||
pSynched: pInformer.Informer().HasSynced,
|
||||
}
|
||||
return &ps
|
||||
}
|
||||
|
||||
//Run checks syncing
|
||||
func (ps *PolicyStore) Run(stopCh <-chan struct{}) {
|
||||
if !cache.WaitForCacheSync(stopCh, ps.pSynched) {
|
||||
glog.Error("policy meta store: failed to sync informer cache")
|
||||
}
|
||||
}
|
||||
|
||||
//Register a new policy
|
||||
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
|
||||
ps.mu.Lock()
|
||||
|
|
|
@ -4,13 +4,16 @@ import (
|
|||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/fake"
|
||||
listerv1 "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes/scheme"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
func Test_Operations(t *testing.T) {
|
||||
|
@ -207,8 +210,8 @@ func Test_Operations(t *testing.T) {
|
|||
polices = append(polices, obj)
|
||||
// Mock Lister
|
||||
client := fake.NewSimpleClientset(polices...)
|
||||
fakeLister := &FakeLister{client: client}
|
||||
store := NewPolicyStore(fakeLister)
|
||||
fakeInformer := &FakeInformer{client: client}
|
||||
store := NewPolicyStore(fakeInformer)
|
||||
// Test Operations
|
||||
// Add
|
||||
store.Register(policy1)
|
||||
|
@ -258,25 +261,77 @@ func Test_Operations(t *testing.T) {
|
|||
|
||||
}
|
||||
|
||||
type FakeInformer struct {
|
||||
client *fake.Clientset
|
||||
}
|
||||
|
||||
func (fi *FakeInformer) Informer() cache.SharedIndexInformer {
|
||||
fsi := &FakeSharedInformer{}
|
||||
return fsi
|
||||
}
|
||||
|
||||
func (fi *FakeInformer) Lister() listerv1.ClusterPolicyLister {
|
||||
fl := &FakeLister{client: fi.client}
|
||||
return fl
|
||||
}
|
||||
|
||||
type FakeLister struct {
|
||||
client *fake.Clientset
|
||||
}
|
||||
|
||||
func (fk *FakeLister) List(selector labels.Selector) (ret []*kyverno.ClusterPolicy, err error) {
|
||||
func (fl *FakeLister) List(selector labels.Selector) (ret []*kyverno.ClusterPolicy, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fk *FakeLister) Get(name string) (*kyverno.ClusterPolicy, error) {
|
||||
return fk.client.KyvernoV1().ClusterPolicies().Get(name, v1.GetOptions{})
|
||||
func (fl *FakeLister) Get(name string) (*kyverno.ClusterPolicy, error) {
|
||||
return fl.client.KyvernoV1().ClusterPolicies().Get(name, v1.GetOptions{})
|
||||
}
|
||||
|
||||
func (fk *FakeLister) GetPolicyForPolicyViolation(pv *kyverno.ClusterPolicyViolation) ([]*kyverno.ClusterPolicy, error) {
|
||||
func (fl *FakeLister) GetPolicyForPolicyViolation(pv *kyverno.ClusterPolicyViolation) ([]*kyverno.ClusterPolicy, error) {
|
||||
return nil, nil
|
||||
}
|
||||
func (fk *FakeLister) ListResources(selector labels.Selector) (ret []*kyverno.ClusterPolicy, err error) {
|
||||
func (fl *FakeLister) ListResources(selector labels.Selector) (ret []*kyverno.ClusterPolicy, err error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (fk *FakeLister) GetPolicyForNamespacedPolicyViolation(pv *kyverno.NamespacedPolicyViolation) ([]*kyverno.ClusterPolicy, error) {
|
||||
func (fl *FakeLister) GetPolicyForNamespacedPolicyViolation(pv *kyverno.PolicyViolation) ([]*kyverno.ClusterPolicy, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
type FakeSharedInformer struct {
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) HasSynced() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) AddEventHandler(handler cache.ResourceEventHandler) {
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) AddEventHandlerWithResyncPeriod(handler cache.ResourceEventHandler, resyncPeriod time.Duration) {
|
||||
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) AddIndexers(indexers cache.Indexers) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) GetIndexer() cache.Indexer {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) GetStore() cache.Store {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) GetController() cache.Controller {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) Run(stopCh <-chan struct{}) {
|
||||
|
||||
}
|
||||
|
||||
func (fsi *FakeSharedInformer) LastSyncResourceVersion() string {
|
||||
return ""
|
||||
}
|
||||
|
|
79
pkg/policyviolation/builder.go
Normal file
79
pkg/policyviolation/builder.go
Normal file
|
@ -0,0 +1,79 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
)
|
||||
|
||||
// Builder builds Policy Violation struct
|
||||
// this is base type of namespaced and cluster policy violation
|
||||
type Builder interface {
|
||||
generate(info Info) []kyverno.PolicyViolationTemplate
|
||||
build(policy, kind, namespace, name string, rules []kyverno.ViolatedRule) *kyverno.PolicyViolationTemplate
|
||||
}
|
||||
|
||||
type pvBuilder struct {
|
||||
// dynamic client
|
||||
dclient *client.Client
|
||||
}
|
||||
|
||||
func newPvBuilder(dclient *client.Client) *pvBuilder {
|
||||
pvb := pvBuilder{
|
||||
dclient: dclient,
|
||||
}
|
||||
return &pvb
|
||||
}
|
||||
func (pvb *pvBuilder) generate(info Info) []kyverno.PolicyViolationTemplate {
|
||||
var owners []kyverno.ResourceSpec
|
||||
// get the owners if the resource is blocked or
|
||||
// TODO: https://github.com/nirmata/kyverno/issues/535
|
||||
if info.Blocked {
|
||||
// get resource owners
|
||||
owners = GetOwners(pvb.dclient, info.Resource)
|
||||
}
|
||||
pvs := pvb.buildPolicyViolations(owners, info)
|
||||
return pvs
|
||||
}
|
||||
|
||||
func (pvb *pvBuilder) buildPolicyViolations(owners []kyverno.ResourceSpec, info Info) []kyverno.PolicyViolationTemplate {
|
||||
var pvs []kyverno.PolicyViolationTemplate
|
||||
if len(owners) != 0 {
|
||||
// there are resource owners
|
||||
// generate PV on them
|
||||
for _, resource := range owners {
|
||||
pv := pvb.build(info.PolicyName, resource.Kind, resource.Namespace, resource.Name, info.Rules)
|
||||
pvs = append(pvs, *pv)
|
||||
}
|
||||
} else {
|
||||
// generate PV on resource
|
||||
pv := pvb.build(info.PolicyName, info.Resource.GetKind(), info.Resource.GetNamespace(), info.Resource.GetName(), info.Rules)
|
||||
pvs = append(pvs, *pv)
|
||||
}
|
||||
return pvs
|
||||
}
|
||||
|
||||
func (pvb *pvBuilder) build(policy, kind, namespace, name string, rules []kyverno.ViolatedRule) *kyverno.PolicyViolationTemplate {
|
||||
pv := &kyverno.PolicyViolationTemplate{
|
||||
Spec: kyverno.PolicyViolationSpec{
|
||||
Policy: policy,
|
||||
ResourceSpec: kyverno.ResourceSpec{
|
||||
Kind: kind,
|
||||
Name: name,
|
||||
Namespace: namespace,
|
||||
},
|
||||
ViolatedRules: rules,
|
||||
},
|
||||
}
|
||||
labelMap := map[string]string{
|
||||
"policy": pv.Spec.Policy,
|
||||
"resource": pv.Spec.ToKey(),
|
||||
}
|
||||
pv.SetLabels(labelMap)
|
||||
if namespace != "" {
|
||||
pv.SetNamespace(namespace)
|
||||
}
|
||||
pv.SetGenerateName(fmt.Sprintf("%s-", policy))
|
||||
return pv
|
||||
}
|
119
pkg/policyviolation/clusterpv.go
Normal file
119
pkg/policyviolation/clusterpv.go
Normal file
|
@ -0,0 +1,119 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
//ClusterPV ...
|
||||
type clusterPV struct {
|
||||
// dynamic client
|
||||
dclient *client.Client
|
||||
// get/list cluster policy violation
|
||||
cpvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// policy violation interface
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
}
|
||||
|
||||
func newClusterPV(dclient *client.Client,
|
||||
cpvLister kyvernolister.ClusterPolicyViolationLister,
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface,
|
||||
) *clusterPV {
|
||||
cpv := clusterPV{
|
||||
dclient: dclient,
|
||||
cpvLister: cpvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
}
|
||||
return &cpv
|
||||
}
|
||||
|
||||
func (cpv *clusterPV) create(pv kyverno.PolicyViolationTemplate) error {
|
||||
newPv := kyverno.ClusterPolicyViolation(pv)
|
||||
// PV already exists
|
||||
oldPv, err := cpv.getExisting(newPv)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if oldPv == nil {
|
||||
// create a new policy violation
|
||||
return cpv.createPV(&newPv)
|
||||
}
|
||||
// policy violation exists
|
||||
// skip if there is not change, else update the violation
|
||||
return cpv.updatePV(&newPv, oldPv)
|
||||
}
|
||||
|
||||
func (cpv *clusterPV) getExisting(newPv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
|
||||
var err error
|
||||
// use labels
|
||||
policyLabelmap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
|
||||
ls, err := converLabelToSelector(policyLabelmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pvs, err := cpv.cpvLister.List(ls)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to list cluster policy violations : %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pv := range pvs {
|
||||
// find a policy on same resource and policy combination
|
||||
if pv.Spec.Policy == newPv.Spec.Policy &&
|
||||
pv.Spec.ResourceSpec.Kind == newPv.Spec.ResourceSpec.Kind &&
|
||||
pv.Spec.ResourceSpec.Name == newPv.Spec.ResourceSpec.Name {
|
||||
return pv, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
|
||||
var err error
|
||||
glog.V(4).Infof("creating new policy violation for policy %s & resource %s/%s", newPv.Spec.Policy, newPv.Spec.ResourceSpec.Kind, newPv.Spec.ResourceSpec.Name)
|
||||
obj, err := retryGetResource(cpv.dclient, newPv.Spec.ResourceSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", newPv.Name, newPv.Spec.Policy, err)
|
||||
}
|
||||
// set owner reference to resource
|
||||
ownerRef := createOwnerReference(obj)
|
||||
newPv.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
|
||||
|
||||
// create resource
|
||||
_, err = cpv.kyvernoInterface.ClusterPolicyViolations().Create(newPv)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to create Cluster Policy Violation: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) error {
|
||||
var err error
|
||||
// check if there is any update
|
||||
if reflect.DeepEqual(newPv.Spec, oldPv.Spec) {
|
||||
glog.V(4).Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
|
||||
return nil
|
||||
}
|
||||
// set name
|
||||
newPv.SetName(oldPv.Name)
|
||||
newPv.SetResourceVersion(oldPv.ResourceVersion)
|
||||
|
||||
// update resource
|
||||
_, err = cpv.kyvernoInterface.ClusterPolicyViolations().Update(newPv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update cluster policy violation: %v", err)
|
||||
}
|
||||
glog.Infof("cluster policy violation updated for resource %v", newPv.Spec.ResourceSpec)
|
||||
|
||||
return nil
|
||||
}
|
116
pkg/policyviolation/common.go
Normal file
116
pkg/policyviolation/common.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
backoff "github.com/cenkalti/backoff"
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
func createOwnerReference(resource *unstructured.Unstructured) metav1.OwnerReference {
|
||||
controllerFlag := true
|
||||
blockOwnerDeletionFlag := true
|
||||
ownerRef := metav1.OwnerReference{
|
||||
APIVersion: resource.GetAPIVersion(),
|
||||
Kind: resource.GetKind(),
|
||||
Name: resource.GetName(),
|
||||
UID: resource.GetUID(),
|
||||
Controller: &controllerFlag,
|
||||
BlockOwnerDeletion: &blockOwnerDeletionFlag,
|
||||
}
|
||||
return ownerRef
|
||||
}
|
||||
|
||||
func retryGetResource(client *client.Client, rspec kyverno.ResourceSpec) (*unstructured.Unstructured, error) {
|
||||
var i int
|
||||
var obj *unstructured.Unstructured
|
||||
var err error
|
||||
getResource := func() error {
|
||||
obj, err = client.GetResource(rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
glog.V(4).Infof("retry %v getting %s/%s/%s", i, rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
i++
|
||||
return err
|
||||
}
|
||||
|
||||
exbackoff := &backoff.ExponentialBackOff{
|
||||
InitialInterval: 500 * time.Millisecond,
|
||||
RandomizationFactor: 0.5,
|
||||
Multiplier: 1.5,
|
||||
MaxInterval: time.Second,
|
||||
MaxElapsedTime: 3 * time.Second,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
|
||||
exbackoff.Reset()
|
||||
err = backoff.Retry(getResource, exbackoff)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
// GetOwners returns a list of owners
|
||||
func GetOwners(dclient *client.Client, resource unstructured.Unstructured) []kyverno.ResourceSpec {
|
||||
ownerMap := map[kyverno.ResourceSpec]interface{}{}
|
||||
GetOwner(dclient, ownerMap, resource)
|
||||
var owners []kyverno.ResourceSpec
|
||||
for owner := range ownerMap {
|
||||
owners = append(owners, owner)
|
||||
}
|
||||
return owners
|
||||
}
|
||||
|
||||
// GetOwner of a resource by iterating over ownerReferences
|
||||
func GetOwner(dclient *client.Client, ownerMap map[kyverno.ResourceSpec]interface{}, resource unstructured.Unstructured) {
|
||||
var emptyInterface interface{}
|
||||
resourceSpec := kyverno.ResourceSpec{
|
||||
Kind: resource.GetKind(),
|
||||
Namespace: resource.GetNamespace(),
|
||||
Name: resource.GetName(),
|
||||
}
|
||||
if _, ok := ownerMap[resourceSpec]; ok {
|
||||
// owner seen before
|
||||
// breaking loop
|
||||
return
|
||||
}
|
||||
rOwners := resource.GetOwnerReferences()
|
||||
// if there are no resource owners then its top level resource
|
||||
if len(rOwners) == 0 {
|
||||
// add resource to map
|
||||
ownerMap[resourceSpec] = emptyInterface
|
||||
return
|
||||
}
|
||||
for _, rOwner := range rOwners {
|
||||
// lookup resource via client
|
||||
// owner has to be in same namespace
|
||||
owner, err := dclient.GetResource(rOwner.Kind, resource.GetNamespace(), rOwner.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get resource owner for %s/%s/%s, err: %v", rOwner.Kind, resource.GetNamespace(), rOwner.Name, err)
|
||||
// as we want to process other owners
|
||||
continue
|
||||
}
|
||||
GetOwner(dclient, ownerMap, *owner)
|
||||
}
|
||||
}
|
||||
|
||||
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
|
||||
ls := &metav1.LabelSelector{}
|
||||
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector: %v", err)
|
||||
}
|
||||
|
||||
return policyViolationSelector, nil
|
||||
}
|
|
@ -1,344 +0,0 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const (
|
||||
// maxRetries is the number of times a PolicyViolation will be retried before it is dropped out of the queue.
|
||||
// With the current rate-limiter in use (5ms*2^(maxRetries-1)) the following numbers represent the times
|
||||
// a deployment is going to be requeued:
|
||||
//
|
||||
// 5ms, 10ms, 20ms, 40ms, 80ms, 160ms, 320ms, 640ms, 1.3s, 2.6s, 5.1s, 10.2s, 20.4s, 41s, 82s
|
||||
maxRetries = 15
|
||||
)
|
||||
|
||||
var controllerKind = kyverno.SchemeGroupVersion.WithKind("ClusterPolicyViolation")
|
||||
|
||||
// PolicyViolationController manages the policy violation resource
|
||||
// - sync the lastupdate time
|
||||
// - check if the resource is active
|
||||
type PolicyViolationController struct {
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
eventRecorder record.EventRecorder
|
||||
syncHandler func(pKey string) error
|
||||
enqueuePolicyViolation func(policy *kyverno.ClusterPolicyViolation)
|
||||
// Policys that need to be synced
|
||||
queue workqueue.RateLimitingInterface
|
||||
// pvLister can list/get policy violation from the shared informer's store
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// pLister can list/get policy from the shared informer's store
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// pListerSynced returns true if the Policy store has been synced at least once
|
||||
pListerSynced cache.InformerSynced
|
||||
// pvListerSynced retrns true if the Policy store has been synced at least once
|
||||
pvListerSynced cache.InformerSynced
|
||||
//pvControl is used for updating status/cleanup policy violation
|
||||
pvControl PVControlInterface
|
||||
}
|
||||
|
||||
//NewPolicyViolationController creates a new NewPolicyViolationController
|
||||
func NewPolicyViolationController(client *client.Client, kyvernoClient *kyvernoclient.Clientset, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.ClusterPolicyViolationInformer) (*PolicyViolationController, error) {
|
||||
// Event broad caster
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventInterface, err := client.GetEventsInterface()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
|
||||
|
||||
pvc := PolicyViolationController{
|
||||
kyvernoClient: kyvernoClient,
|
||||
client: client,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policyviolation_controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policyviolation"),
|
||||
}
|
||||
pvc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pvc.eventRecorder}
|
||||
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: pvc.addPolicyViolation,
|
||||
UpdateFunc: pvc.updatePolicyViolation,
|
||||
DeleteFunc: pvc.deletePolicyViolation,
|
||||
})
|
||||
|
||||
pvc.enqueuePolicyViolation = pvc.enqueue
|
||||
pvc.syncHandler = pvc.syncPolicyViolation
|
||||
|
||||
pvc.pLister = pInformer.Lister()
|
||||
pvc.pvLister = pvInformer.Lister()
|
||||
pvc.pListerSynced = pInformer.Informer().HasSynced
|
||||
pvc.pvListerSynced = pvInformer.Informer().HasSynced
|
||||
|
||||
return &pvc, nil
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) addPolicyViolation(obj interface{}) {
|
||||
pv := obj.(*kyverno.ClusterPolicyViolation)
|
||||
glog.V(4).Infof("Adding PolicyViolation %s", pv.Name)
|
||||
pvc.enqueuePolicyViolation(pv)
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) updatePolicyViolation(old, cur interface{}) {
|
||||
oldPv := old.(*kyverno.ClusterPolicyViolation)
|
||||
curPv := cur.(*kyverno.ClusterPolicyViolation)
|
||||
glog.V(4).Infof("Updating Policy Violation %s", oldPv.Name)
|
||||
if err := pvc.syncLastUpdateTimeStatus(curPv, oldPv); err != nil {
|
||||
glog.Errorf("Failed to update lastUpdateTime in PolicyViolation %s status: %v", curPv.Name, err)
|
||||
}
|
||||
pvc.enqueuePolicyViolation(curPv)
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) deletePolicyViolation(obj interface{}) {
|
||||
pv, ok := obj.(*kyverno.ClusterPolicyViolation)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
pv, ok = tombstone.Obj.(*kyverno.ClusterPolicyViolation)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Tombstone contained object that is not a PolicyViolation %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting PolicyViolation %s", pv.Name)
|
||||
pvc.enqueuePolicyViolation(pv)
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) enqueue(policyViolation *kyverno.ClusterPolicyViolation) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(policyViolation)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
pvc.queue.Add(key)
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
func (pvc *PolicyViolationController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
defer utilruntime.HandleCrash()
|
||||
defer pvc.queue.ShutDown()
|
||||
|
||||
glog.Info("Starting policyviolation controller")
|
||||
defer glog.Info("Shutting down policyviolation controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, pvc.pListerSynced, pvc.pvListerSynced) {
|
||||
return
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(pvc.worker, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (pvc *PolicyViolationController) worker() {
|
||||
for pvc.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) processNextWorkItem() bool {
|
||||
key, quit := pvc.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer pvc.queue.Done(key)
|
||||
|
||||
err := pvc.syncHandler(key.(string))
|
||||
pvc.handleErr(err, key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
pvc.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if pvc.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing PolicyViolation %v: %v", key, err)
|
||||
pvc.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Dropping policyviolation %q out of the queue: %v", key, err)
|
||||
pvc.queue.Forget(key)
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) syncPolicyViolation(key string) error {
|
||||
startTime := time.Now()
|
||||
glog.V(4).Infof("Started syncing policy violation %q (%v)", key, startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing cluster policy violation %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
policyViolation, err := pvc.pvLister.Get(key)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("PolicyViolation %v has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deep-copy otherwise we are mutating our cache.
|
||||
// TODO: Deep-copy only when needed.
|
||||
pv := policyViolation.DeepCopy()
|
||||
// TODO: Update Status to update ObserverdGeneration
|
||||
// TODO: check if the policy violation refers to a resource thats active ? // done by policy controller
|
||||
// TODO: remove the PV, if the corresponding policy is not present
|
||||
// TODO: additional check on deleted webhook for a resource, to delete a policy violation it has a policy violation
|
||||
// list the resource with label selectors, but this can be expensive for each delete request of a resource
|
||||
if err := pvc.syncActiveResource(pv); err != nil {
|
||||
glog.V(4).Infof("not syncing policy violation status")
|
||||
return err
|
||||
}
|
||||
|
||||
return pvc.syncStatusOnly(pv)
|
||||
}
|
||||
|
||||
func (pvc *PolicyViolationController) syncActiveResource(curPv *kyverno.ClusterPolicyViolation) error {
|
||||
// check if the resource is active or not ?
|
||||
rspec := curPv.Spec.ResourceSpec
|
||||
// get resource
|
||||
_, err := pvc.client.GetResource(rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
// TODO: does it help to retry?
|
||||
// resource is not found
|
||||
// remove the violation
|
||||
|
||||
if err := pvc.pvControl.RemovePolicyViolation(curPv.Name); err != nil {
|
||||
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("removing policy violation %s as the corresponding resource %s/%s/%s does not exist anymore", curPv.Name, rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error while retrieved resource %s/%s/%s: %v", rspec.Kind, rspec.Namespace, rspec.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanup pv with dependant
|
||||
if err := pvc.syncBlockedResource(curPv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//TODO- if the policy is not present, remove the policy violation
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncBlockedResource remove inactive policy violation
|
||||
// when rejected resource created in the cluster
|
||||
func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.ClusterPolicyViolation) error {
|
||||
for _, violatedRule := range curPv.Spec.ViolatedRules {
|
||||
if reflect.DeepEqual(violatedRule.ManagedResource, kyverno.ManagedResourceSpec{}) {
|
||||
continue
|
||||
}
|
||||
|
||||
// get resource
|
||||
blockedResource := violatedRule.ManagedResource
|
||||
resources, _ := pvc.client.ListResource(blockedResource.Kind, blockedResource.Namespace, nil)
|
||||
|
||||
for _, resource := range resources.Items {
|
||||
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
|
||||
owners := map[kyverno.ResourceSpec]interface{}{}
|
||||
GetOwner(pvc.client, owners, resource)
|
||||
// owner of resource matches violation resourceSpec
|
||||
// remove policy violation as the blocked request got created
|
||||
if _, ok := owners[curPv.Spec.ResourceSpec]; ok {
|
||||
// pod -> replicaset1; deploy -> replicaset2
|
||||
// if replicaset1 == replicaset2, the pod is
|
||||
// no longer an active child of deploy, skip removing pv
|
||||
if !validDependantForDeployment(pvc.client.GetAppsV1Interface(), curPv.Spec.ResourceSpec, resource) {
|
||||
glog.V(4).Infof("")
|
||||
continue
|
||||
}
|
||||
|
||||
// resource created, remove policy violation
|
||||
if err := pvc.pvControl.RemovePolicyViolation(curPv.Name); err != nil {
|
||||
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("removed policy violation %s as the blocked resource %s/%s successfully created, owner: %s",
|
||||
curPv.Name, blockedResource.Kind, blockedResource.Namespace, strings.ReplaceAll(curPv.Spec.ResourceSpec.ToKey(), ".", "/"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//syncStatusOnly updates the policyviolation status subresource
|
||||
// status:
|
||||
func (pvc *PolicyViolationController) syncStatusOnly(curPv *kyverno.ClusterPolicyViolation) error {
|
||||
// newStatus := calculateStatus(pv)
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO: think this through again
|
||||
//syncLastUpdateTimeStatus updates the policyviolation lastUpdateTime if anything in ViolationSpec changed
|
||||
// - lastUpdateTime : (time stamp when the policy violation changed)
|
||||
func (pvc *PolicyViolationController) syncLastUpdateTimeStatus(curPv *kyverno.ClusterPolicyViolation, oldPv *kyverno.ClusterPolicyViolation) error {
|
||||
// check if there is any change in policy violation information
|
||||
if !updated(curPv, oldPv) {
|
||||
return nil
|
||||
}
|
||||
// update the lastUpdateTime
|
||||
newPolicyViolation := curPv
|
||||
newPolicyViolation.Status = kyverno.PolicyViolationStatus{LastUpdateTime: metav1.Now()}
|
||||
|
||||
return pvc.pvControl.UpdateStatusPolicyViolation(newPolicyViolation)
|
||||
}
|
||||
|
||||
func updated(curPv *kyverno.ClusterPolicyViolation, oldPv *kyverno.ClusterPolicyViolation) bool {
|
||||
return !reflect.DeepEqual(curPv.Spec, oldPv.Spec)
|
||||
//TODO check if owner reference changed, then should we update the lastUpdateTime as well ?
|
||||
}
|
||||
|
||||
type PVControlInterface interface {
|
||||
UpdateStatusPolicyViolation(newPv *kyverno.ClusterPolicyViolation) error
|
||||
RemovePolicyViolation(name string) error
|
||||
}
|
||||
|
||||
// RealPVControl is the default implementation of PVControlInterface.
|
||||
type RealPVControl struct {
|
||||
Client kyvernoclient.Interface
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
//UpdateStatusPolicyViolation updates the status for policy violation
|
||||
func (r RealPVControl) UpdateStatusPolicyViolation(newPv *kyverno.ClusterPolicyViolation) error {
|
||||
_, err := r.Client.KyvernoV1().ClusterPolicyViolations().UpdateStatus(newPv)
|
||||
return err
|
||||
}
|
||||
|
||||
//RemovePolicyViolation removes the policy violation
|
||||
func (r RealPVControl) RemovePolicyViolation(name string) error {
|
||||
return r.Client.KyvernoV1().ClusterPolicyViolations().Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -12,13 +12,15 @@ import (
|
|||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
|
@ -27,15 +29,22 @@ const workQueueRetryLimit = 3
|
|||
|
||||
//Generator creates PV
|
||||
type Generator struct {
|
||||
dclient *dclient.Client
|
||||
pvInterface kyvernov1.KyvernoV1Interface
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
dclient *dclient.Client
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
// get/list cluster policy violation
|
||||
cpvLister kyvernolister.ClusterPolicyViolationLister
|
||||
// get/ist namespaced policy violation
|
||||
nspvLister kyvernolister.PolicyViolationLister
|
||||
// returns true if the cluster policy store has been synced at least once
|
||||
pvSynced cache.InformerSynced
|
||||
// returns true if the namespaced cluster policy store has been synced at at least once
|
||||
nspvSynced cache.InformerSynced
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
}
|
||||
|
||||
func NewDataStore() *dataStore {
|
||||
//NewDataStore returns an instance of data store
|
||||
func newDataStore() *dataStore {
|
||||
ds := dataStore{
|
||||
data: make(map[string]Info),
|
||||
}
|
||||
|
@ -94,16 +103,19 @@ type GeneratorInterface interface {
|
|||
}
|
||||
|
||||
// NewPVGenerator returns a new instance of policy violation generator
|
||||
func NewPVGenerator(client *kyvernoclient.Clientset, dclient *client.Client,
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister,
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister) *Generator {
|
||||
func NewPVGenerator(client *kyvernoclient.Clientset,
|
||||
dclient *client.Client,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
nspvInformer kyvernoinformer.PolicyViolationInformer) *Generator {
|
||||
gen := Generator{
|
||||
pvInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
pvLister: pvLister,
|
||||
nspvLister: nspvLister,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: NewDataStore(),
|
||||
kyvernoInterface: client.KyvernoV1(),
|
||||
dclient: dclient,
|
||||
cpvLister: pvInformer.Lister(),
|
||||
pvSynced: pvInformer.Informer().HasSynced,
|
||||
nspvLister: nspvInformer.Lister(),
|
||||
nspvSynced: nspvInformer.Informer().HasSynced,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
}
|
||||
return &gen
|
||||
}
|
||||
|
@ -130,6 +142,10 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
glog.Info("Start policy violation generator")
|
||||
defer glog.Info("Shutting down policy violation generator")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, gen.pvSynced, gen.nspvSynced) {
|
||||
glog.Error("policy violation generator: failed to sync informer cache")
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(gen.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
@ -201,197 +217,37 @@ func (gen *Generator) processNextWorkitem() bool {
|
|||
|
||||
func (gen *Generator) syncHandler(info Info) error {
|
||||
glog.V(4).Infof("recieved info:%v", info)
|
||||
// cluster policy violations
|
||||
var handler pvGenerator
|
||||
var builder Builder
|
||||
builder = newPvBuilder(gen.dclient)
|
||||
if info.Resource.GetNamespace() == "" {
|
||||
var pvs []kyverno.ClusterPolicyViolation
|
||||
if !info.Blocked {
|
||||
pvs = append(pvs, buildPV(info))
|
||||
} else {
|
||||
// blocked
|
||||
// get owners
|
||||
pvs = buildPVWithOwners(gen.dclient, info)
|
||||
}
|
||||
// create policy violation
|
||||
if err := createPVS(gen.dclient, pvs, gen.pvLister, gen.pvInterface); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Created cluster policy violation policy=%s, resource=%s/%s/%s",
|
||||
info.PolicyName, info.Resource.GetKind(), info.Resource.GetNamespace(), info.Resource.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
// namespaced policy violations
|
||||
var pvs []kyverno.NamespacedPolicyViolation
|
||||
if !info.Blocked {
|
||||
pvs = append(pvs, buildNamespacedPV(info))
|
||||
// cluster scope resource generate a clusterpolicy violation
|
||||
handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface)
|
||||
} else {
|
||||
pvs = buildNamespacedPVWithOwner(gen.dclient, info)
|
||||
// namespaced resources generated a namespaced policy violation in the namespace of the resource
|
||||
handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface)
|
||||
}
|
||||
|
||||
if err := createNamespacedPV(gen.dclient, gen.nspvLister, gen.pvInterface, pvs); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.V(3).Infof("Created namespaced policy violation policy=%s, resource=%s/%s/%s",
|
||||
info.PolicyName, info.Resource.GetKind(), info.Resource.GetNamespace(), info.Resource.GetName())
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPVS(dclient *client.Client, pvs []kyverno.ClusterPolicyViolation, pvLister kyvernolister.ClusterPolicyViolationLister, pvInterface kyvernov1.KyvernoV1Interface) error {
|
||||
failure := false
|
||||
// Generate Policy Violations
|
||||
// as there can be multiple owners we can have multiple violations
|
||||
pvs := builder.generate(info)
|
||||
for _, pv := range pvs {
|
||||
if err := createPVNew(dclient, pv, pvLister, pvInterface); err != nil {
|
||||
return err
|
||||
// Create Policy Violations
|
||||
err := handler.create(pv)
|
||||
if err != nil {
|
||||
failure = true
|
||||
}
|
||||
}
|
||||
if failure {
|
||||
// even if there is a single failure we requeue the request
|
||||
return errors.New("Failed to process some policy violations, re-queuing")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPVNew(dclient *client.Client, pv kyverno.ClusterPolicyViolation, pvLister kyvernolister.ClusterPolicyViolationLister, pvInterface kyvernov1.KyvernoV1Interface) error {
|
||||
var err error
|
||||
// PV already exists
|
||||
ePV, err := getExistingPVIfAny(pvLister, pv)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return fmt.Errorf("failed to get existing pv on resource '%s': %v", pv.Spec.ResourceSpec.ToKey(), err)
|
||||
}
|
||||
if ePV == nil {
|
||||
// Create a New PV
|
||||
glog.V(4).Infof("creating new policy violation for policy %s & resource %s/%s/%s", pv.Spec.Policy, pv.Spec.ResourceSpec.Kind, pv.Spec.ResourceSpec.Namespace, pv.Spec.ResourceSpec.Name)
|
||||
err := retryGetResource(dclient, pv.Spec.ResourceSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", pv.Name, pv.Spec.Policy, err)
|
||||
}
|
||||
|
||||
_, err = pvInterface.ClusterPolicyViolations().Create(&pv)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return fmt.Errorf("failed to create cluster policy violation: %v", err)
|
||||
}
|
||||
glog.Infof("policy violation created for resource %v", pv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
// Update existing PV if there any changes
|
||||
if reflect.DeepEqual(pv.Spec, ePV.Spec) {
|
||||
glog.V(4).Infof("policy violation spec %v did not change so not updating it", pv.Spec)
|
||||
return nil
|
||||
}
|
||||
|
||||
// set newPv Name/ResourceVersion with curPv, as we are updating the resource itself
|
||||
pv.SetName(ePV.Name)
|
||||
pv.SetResourceVersion(ePV.ResourceVersion)
|
||||
_, err = pvInterface.ClusterPolicyViolations().Update(&pv)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return fmt.Errorf("failed to update cluster polciy violation: %v", err)
|
||||
}
|
||||
glog.Infof("policy violation updated for resource %v", pv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
||||
func getExistingPVIfAny(pvLister kyvernolister.ClusterPolicyViolationLister, currpv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
|
||||
pvs, err := pvLister.List(labels.Everything())
|
||||
if err != nil {
|
||||
glog.Errorf("unable to list policy violations : %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pv := range pvs {
|
||||
// find a policy on same resource and policy combination
|
||||
if pv.Spec.Policy == currpv.Spec.Policy &&
|
||||
pv.Spec.ResourceSpec.Kind == currpv.Spec.ResourceSpec.Kind &&
|
||||
pv.Spec.ResourceSpec.Namespace == currpv.Spec.ResourceSpec.Namespace &&
|
||||
pv.Spec.ResourceSpec.Name == currpv.Spec.ResourceSpec.Name {
|
||||
return pv, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// build PV without owners
|
||||
func buildPV(info Info) kyverno.ClusterPolicyViolation {
|
||||
pv := buildPVObj(info.PolicyName, kyverno.ResourceSpec{
|
||||
Kind: info.Resource.GetKind(),
|
||||
Namespace: info.Resource.GetNamespace(),
|
||||
Name: info.Resource.GetName(),
|
||||
}, info.Rules,
|
||||
)
|
||||
return pv
|
||||
}
|
||||
|
||||
// build PV object
|
||||
func buildPVObj(policyName string, resourceSpec kyverno.ResourceSpec, rules []kyverno.ViolatedRule) kyverno.ClusterPolicyViolation {
|
||||
pv := kyverno.ClusterPolicyViolation{
|
||||
Spec: kyverno.PolicyViolationSpec{
|
||||
Policy: policyName,
|
||||
ResourceSpec: resourceSpec,
|
||||
ViolatedRules: rules,
|
||||
},
|
||||
}
|
||||
|
||||
labelMap := map[string]string{
|
||||
"policy": policyName,
|
||||
"resource": resourceSpec.ToKey(),
|
||||
}
|
||||
pv.SetLabels(labelMap)
|
||||
pv.SetGenerateName("pv-")
|
||||
return pv
|
||||
}
|
||||
|
||||
// build PV with owners
|
||||
func buildPVWithOwners(dclient *client.Client, info Info) []kyverno.ClusterPolicyViolation {
|
||||
var pvs []kyverno.ClusterPolicyViolation
|
||||
// as its blocked resource, the violation is created on owner
|
||||
ownerMap := map[kyverno.ResourceSpec]interface{}{}
|
||||
GetOwner(dclient, ownerMap, info.Resource)
|
||||
|
||||
// standaloneresource, set pvResourceSpec with resource itself
|
||||
if len(ownerMap) == 0 {
|
||||
pvResourceSpec := kyverno.ResourceSpec{
|
||||
Namespace: info.Resource.GetNamespace(),
|
||||
Kind: info.Resource.GetKind(),
|
||||
Name: info.Resource.GetName(),
|
||||
}
|
||||
return append(pvs, buildPVObj(info.PolicyName, pvResourceSpec, info.Rules))
|
||||
}
|
||||
|
||||
// Generate owner on all owners
|
||||
for owner := range ownerMap {
|
||||
pv := buildPVObj(info.PolicyName, owner, info.Rules)
|
||||
pvs = append(pvs, pv)
|
||||
}
|
||||
return pvs
|
||||
}
|
||||
|
||||
// GetOwner of a resource by iterating over ownerReferences
|
||||
func GetOwner(dclient *client.Client, ownerMap map[kyverno.ResourceSpec]interface{}, resource unstructured.Unstructured) {
|
||||
var emptyInterface interface{}
|
||||
resourceSpec := kyverno.ResourceSpec{
|
||||
Kind: resource.GetKind(),
|
||||
Namespace: resource.GetNamespace(),
|
||||
Name: resource.GetName(),
|
||||
}
|
||||
if _, ok := ownerMap[resourceSpec]; ok {
|
||||
// owner seen before
|
||||
// breaking loop
|
||||
return
|
||||
}
|
||||
rOwners := resource.GetOwnerReferences()
|
||||
// if there are no resource owners then its top level resource
|
||||
if len(rOwners) == 0 {
|
||||
// add resource to map
|
||||
ownerMap[resourceSpec] = emptyInterface
|
||||
return
|
||||
}
|
||||
for _, rOwner := range rOwners {
|
||||
// lookup resource via client
|
||||
// owner has to be in same namespace
|
||||
owner, err := dclient.GetResource(rOwner.Kind, resource.GetNamespace(), rOwner.Name)
|
||||
if err != nil {
|
||||
glog.Errorf("Failed to get resource owner for %s/%s/%s, err: %v", rOwner.Kind, resource.GetNamespace(), rOwner.Name, err)
|
||||
// as we want to process other owners
|
||||
continue
|
||||
}
|
||||
GetOwner(dclient, ownerMap, *owner)
|
||||
}
|
||||
// Provides an interface to generate policy violations
|
||||
// implementations for namespaced and cluster PV
|
||||
type pvGenerator interface {
|
||||
create(policyViolation kyverno.PolicyViolationTemplate) error
|
||||
}
|
||||
|
|
|
@ -1,107 +0,0 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "k8s.io/api/apps/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
|
||||
deployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
|
||||
)
|
||||
|
||||
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
|
||||
ls := &metav1.LabelSelector{}
|
||||
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid label selector: %v", err)
|
||||
}
|
||||
|
||||
return policyViolationSelector, nil
|
||||
}
|
||||
|
||||
func containsOwner(owners []kyverno.ResourceSpec, pvResourceSpec kyverno.ResourceSpec) bool {
|
||||
curOwner := kyverno.ResourceSpec{
|
||||
Kind: pvResourceSpec.Kind,
|
||||
Namespace: pvResourceSpec.Namespace,
|
||||
Name: pvResourceSpec.Name,
|
||||
}
|
||||
|
||||
for _, targetOwner := range owners {
|
||||
if reflect.DeepEqual(curOwner, targetOwner) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// validDependantForDeployment checks if resource (pod) matches the intent of the given deployment
|
||||
// explicitly handles deployment-replicaset-pod relationship
|
||||
func validDependantForDeployment(client appsv1.AppsV1Interface, pvResourceSpec kyverno.ResourceSpec, resource unstructured.Unstructured) bool {
|
||||
if resource.GetKind() != "Pod" {
|
||||
return false
|
||||
}
|
||||
|
||||
// only handles deploymeny-replicaset-pod relationship
|
||||
if pvResourceSpec.Kind != "Deployment" {
|
||||
return false
|
||||
}
|
||||
|
||||
owner := kyverno.ResourceSpec{
|
||||
Kind: pvResourceSpec.Kind,
|
||||
Namespace: pvResourceSpec.Namespace,
|
||||
Name: pvResourceSpec.Name,
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
deploy, err := client.Deployments(owner.Namespace).Get(owner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err)
|
||||
return false
|
||||
}
|
||||
glog.V(4).Infof("Time getting deployment %v", time.Since(start))
|
||||
|
||||
// TODO(shuting): replace typed client AppsV1Interface
|
||||
expectReplicaset, err := deployutil.GetNewReplicaSet(deploy, client)
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err)
|
||||
return false
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(expectReplicaset, v1.ReplicaSet{}) {
|
||||
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.Namespace, owner.Kind, owner.Name)
|
||||
return false
|
||||
}
|
||||
var actualReplicaset *v1.ReplicaSet
|
||||
for _, podOwner := range resource.GetOwnerReferences() {
|
||||
if podOwner.Kind != "ReplicaSet" {
|
||||
continue
|
||||
}
|
||||
|
||||
actualReplicaset, err = client.ReplicaSets(resource.GetNamespace()).Get(podOwner.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
glog.Errorf("failed to get replicaset from %s/%s/%s: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
|
||||
return false
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(actualReplicaset, v1.ReplicaSet{}) {
|
||||
glog.V(2).Infof("no replicaset found for Pod/%s/%s", resource.GetNamespace(), podOwner.Name)
|
||||
return false
|
||||
}
|
||||
|
||||
if expectReplicaset.Name == actualReplicaset.Name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -8,123 +8,109 @@ import (
|
|||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
dclient "github.com/nirmata/kyverno/pkg/dclient"
|
||||
labels "k8s.io/apimachinery/pkg/labels"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func buildNamespacedPV(info Info) kyverno.NamespacedPolicyViolation {
|
||||
return buildNamespacedPVObj(info.PolicyName,
|
||||
kyverno.ResourceSpec{
|
||||
Kind: info.Resource.GetKind(),
|
||||
Namespace: info.Resource.GetNamespace(),
|
||||
Name: info.Resource.GetName(),
|
||||
},
|
||||
info.Rules)
|
||||
//NamespacedPV ...
|
||||
type namespacedPV struct {
|
||||
// dynamic client
|
||||
dclient *client.Client
|
||||
// get/list namespaced policy violation
|
||||
nspvLister kyvernolister.PolicyViolationLister
|
||||
// policy violation interface
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface
|
||||
}
|
||||
|
||||
//buildNamespacedPVObj returns an value of type PolicyViolation
|
||||
func buildNamespacedPVObj(policy string, resource kyverno.ResourceSpec, fRules []kyverno.ViolatedRule) kyverno.NamespacedPolicyViolation {
|
||||
pv := kyverno.NamespacedPolicyViolation{
|
||||
Spec: kyverno.PolicyViolationSpec{
|
||||
Policy: policy,
|
||||
ResourceSpec: resource,
|
||||
ViolatedRules: fRules,
|
||||
},
|
||||
func newNamespacedPV(dclient *client.Client,
|
||||
nspvLister kyvernolister.PolicyViolationLister,
|
||||
kyvernoInterface kyvernov1.KyvernoV1Interface,
|
||||
) *namespacedPV {
|
||||
nspv := namespacedPV{
|
||||
dclient: dclient,
|
||||
nspvLister: nspvLister,
|
||||
kyvernoInterface: kyvernoInterface,
|
||||
}
|
||||
|
||||
labelMap := map[string]string{
|
||||
"policy": policy,
|
||||
"resource": resource.ToKey(),
|
||||
}
|
||||
pv.SetGenerateName("pv-")
|
||||
pv.SetLabels(labelMap)
|
||||
return pv
|
||||
return &nspv
|
||||
}
|
||||
|
||||
func buildNamespacedPVWithOwner(dclient *dclient.Client, info Info) (pvs []kyverno.NamespacedPolicyViolation) {
|
||||
// create violation on resource owner (if exist) when action is set to enforce
|
||||
ownerMap := map[kyverno.ResourceSpec]interface{}{}
|
||||
GetOwner(dclient, ownerMap, info.Resource)
|
||||
|
||||
// standaloneresource, set pvResourceSpec with resource itself
|
||||
if len(ownerMap) == 0 {
|
||||
pvResourceSpec := kyverno.ResourceSpec{
|
||||
Namespace: info.Resource.GetNamespace(),
|
||||
Kind: info.Resource.GetKind(),
|
||||
Name: info.Resource.GetName(),
|
||||
}
|
||||
return append(pvs, buildNamespacedPVObj(info.PolicyName, pvResourceSpec, info.Rules))
|
||||
}
|
||||
|
||||
for owner := range ownerMap {
|
||||
pvs = append(pvs, buildNamespacedPVObj(info.PolicyName, owner, info.Rules))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func createNamespacedPV(dclient *dclient.Client, pvLister kyvernolister.NamespacedPolicyViolationLister, pvInterface kyvernov1.KyvernoV1Interface, pvs []kyverno.NamespacedPolicyViolation) error {
|
||||
for _, newPv := range pvs {
|
||||
glog.V(4).Infof("creating namespaced policyViolation resource for policy %s and resource %s", newPv.Spec.Policy, newPv.Spec.ResourceSpec.ToKey())
|
||||
// check if there was a previous policy voilation for policy & resource combination
|
||||
curPv, err := getExistingNamespacedPVIfAny(pvLister, newPv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get existing namespaced pv on resource '%s': %v", newPv.Spec.ResourceSpec.ToKey(), err)
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(curPv, kyverno.NamespacedPolicyViolation{}) {
|
||||
// no existing policy violation, create a new one
|
||||
if reflect.DeepEqual(curPv, kyverno.NamespacedPolicyViolation{}) {
|
||||
glog.V(4).Infof("creating new namespaced policy violation for policy %s & resource %s", newPv.Spec.Policy, newPv.Spec.ResourceSpec.ToKey())
|
||||
|
||||
if err := retryGetResource(dclient, newPv.Spec.ResourceSpec); err != nil {
|
||||
return fmt.Errorf("failed to get resource for policy violation on resource '%s': %v", newPv.Spec.ResourceSpec.ToKey(), err)
|
||||
}
|
||||
|
||||
if _, err := pvInterface.NamespacedPolicyViolations(newPv.Spec.ResourceSpec.Namespace).Create(&newPv); err != nil {
|
||||
return fmt.Errorf("failed to create namespaced policy violation: %v", err)
|
||||
}
|
||||
|
||||
glog.Infof("namespaced policy violation created for resource %s", newPv.Spec.ResourceSpec.ToKey())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// compare the policyviolation spec for existing resource if present else
|
||||
if reflect.DeepEqual(curPv.Spec, newPv.Spec) {
|
||||
// if they are equal there has been no change so dont update the polivy violation
|
||||
glog.V(3).Infof("namespaced policy violation '%s' spec did not change so not updating it", newPv.Spec.ToKey())
|
||||
glog.V(4).Infof("namespaced policy violation spec %v did not change so not updating it", newPv.Spec)
|
||||
continue
|
||||
}
|
||||
|
||||
// set newPv Name/ResourceVersion with curPv, as we are updating the resource itself
|
||||
newPv.SetName(curPv.Name)
|
||||
newPv.SetResourceVersion(curPv.ResourceVersion)
|
||||
|
||||
// spec changed so update the policyviolation
|
||||
glog.V(4).Infof("creating new policy violation for policy %s & resource %s", curPv.Spec.Policy, curPv.Spec.ResourceSpec.ToKey())
|
||||
//TODO: using a generic name, but would it be helpful to have naming convention for policy violations
|
||||
// as we can only have one policy violation for each (policy + resource) combination
|
||||
if _, err = pvInterface.NamespacedPolicyViolations(newPv.Spec.ResourceSpec.Namespace).Update(&newPv); err != nil {
|
||||
return fmt.Errorf("failed to update namespaced policy violation: %v", err)
|
||||
}
|
||||
glog.Infof("namespaced policy violation updated for resource %s", newPv.Spec.ResourceSpec.ToKey())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func getExistingNamespacedPVIfAny(nspvLister kyvernolister.NamespacedPolicyViolationLister, newPv kyverno.NamespacedPolicyViolation) (kyverno.NamespacedPolicyViolation, error) {
|
||||
// TODO(shuting): list pvs by labels
|
||||
pvs, err := nspvLister.List(labels.NewSelector())
|
||||
func (nspv *namespacedPV) create(pv kyverno.PolicyViolationTemplate) error {
|
||||
newPv := kyverno.PolicyViolation(pv)
|
||||
// PV already exists
|
||||
oldPv, err := nspv.getExisting(newPv)
|
||||
if err != nil {
|
||||
return kyverno.NamespacedPolicyViolation{}, fmt.Errorf("failed to list namespaced policy violations err: %v", err)
|
||||
return err
|
||||
}
|
||||
if oldPv == nil {
|
||||
// create a new policy violation
|
||||
return nspv.createPV(&newPv)
|
||||
}
|
||||
// policy violation exists
|
||||
// skip if there is not change, else update the violation
|
||||
return nspv.updatePV(&newPv, oldPv)
|
||||
}
|
||||
|
||||
func (nspv *namespacedPV) getExisting(newPv kyverno.PolicyViolation) (*kyverno.PolicyViolation, error) {
|
||||
var err error
|
||||
// use labels
|
||||
policyLabelmap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
|
||||
ls, err := converLabelToSelector(policyLabelmap)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pvs, err := nspv.nspvLister.PolicyViolations(newPv.GetNamespace()).List(ls)
|
||||
if err != nil {
|
||||
glog.Errorf("unable to list namespaced policy violations : %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pv := range pvs {
|
||||
if pv.Spec.Policy == newPv.Spec.Policy && reflect.DeepEqual(pv.Spec.ResourceSpec, newPv.Spec.ResourceSpec) {
|
||||
return *pv, nil
|
||||
// find a policy on same resource and policy combination
|
||||
if pv.Spec.Policy == newPv.Spec.Policy &&
|
||||
pv.Spec.ResourceSpec.Kind == newPv.Spec.ResourceSpec.Kind &&
|
||||
pv.Spec.ResourceSpec.Name == newPv.Spec.ResourceSpec.Name {
|
||||
return pv, nil
|
||||
}
|
||||
}
|
||||
|
||||
return kyverno.NamespacedPolicyViolation{}, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
|
||||
var err error
|
||||
glog.V(4).Infof("creating new policy violation for policy %s & resource %s/%s/%s", newPv.Spec.Policy, newPv.Spec.ResourceSpec.Kind, newPv.Spec.ResourceSpec.Namespace, newPv.Spec.ResourceSpec.Name)
|
||||
obj, err := retryGetResource(nspv.dclient, newPv.Spec.ResourceSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", newPv.Name, newPv.Spec.Policy, err)
|
||||
}
|
||||
// set owner reference to resource
|
||||
ownerRef := createOwnerReference(obj)
|
||||
newPv.SetOwnerReferences([]metav1.OwnerReference{ownerRef})
|
||||
|
||||
// create resource
|
||||
_, err = nspv.kyvernoInterface.PolicyViolations(newPv.GetNamespace()).Create(newPv)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to create Cluster Policy Violation: %v", err)
|
||||
return err
|
||||
}
|
||||
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error {
|
||||
var err error
|
||||
// check if there is any update
|
||||
if reflect.DeepEqual(newPv.Spec, oldPv.Spec) {
|
||||
glog.V(4).Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
|
||||
return nil
|
||||
}
|
||||
// set name
|
||||
newPv.SetName(oldPv.Name)
|
||||
newPv.SetResourceVersion(oldPv.ResourceVersion)
|
||||
// update resource
|
||||
_, err = nspv.kyvernoInterface.PolicyViolations(newPv.GetNamespace()).Update(newPv)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update namespaced polciy violation: %v", err)
|
||||
}
|
||||
glog.Infof("namespced policy violation updated for resource %v", newPv.Spec.ResourceSpec)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,369 +0,0 @@
|
|||
package policyviolation
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
backoff "github.com/cenkalti/backoff"
|
||||
"github.com/golang/glog"
|
||||
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
typedcorev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
var nspvcontrollerKind = kyverno.SchemeGroupVersion.WithKind("NamespacedPolicyViolation")
|
||||
|
||||
// PolicyViolationController manages the policy violation resource
|
||||
// - sync the lastupdate time
|
||||
// - check if the resource is active
|
||||
type NamespacedPolicyViolationController struct {
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
eventRecorder record.EventRecorder
|
||||
syncHandler func(pKey string) error
|
||||
enqueuePolicyViolation func(policy *kyverno.NamespacedPolicyViolation)
|
||||
// Policys that need to be synced
|
||||
queue workqueue.RateLimitingInterface
|
||||
// nspvLister can list/get policy violation from the shared informer's store
|
||||
nspvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
// pLister can list/get policy from the shared informer's store
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// pListerSynced returns true if the Policy store has been synced at least once
|
||||
pListerSynced cache.InformerSynced
|
||||
// pvListerSynced retrns true if the Policy store has been synced at least once
|
||||
nspvListerSynced cache.InformerSynced
|
||||
//pvControl is used for updating status/cleanup policy violation
|
||||
pvControl NamespacedPVControlInterface
|
||||
}
|
||||
|
||||
//NewPolicyViolationController creates a new NewPolicyViolationController
|
||||
func NewNamespacedPolicyViolationController(client *client.Client, kyvernoClient *kyvernoclient.Clientset, pInformer kyvernoinformer.ClusterPolicyInformer, pvInformer kyvernoinformer.NamespacedPolicyViolationInformer) (*NamespacedPolicyViolationController, error) {
|
||||
// Event broad caster
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventInterface, err := client.GetEventsInterface()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
eventBroadcaster.StartRecordingToSink(&typedcorev1.EventSinkImpl{Interface: eventInterface})
|
||||
|
||||
pvc := NamespacedPolicyViolationController{
|
||||
kyvernoClient: kyvernoClient,
|
||||
client: client,
|
||||
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "ns_policyviolation_controller"}),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "ns_policyviolation"),
|
||||
}
|
||||
pvc.pvControl = RealNamespacedPVControl{Client: kyvernoClient, Recorder: pvc.eventRecorder}
|
||||
pvInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: pvc.addPolicyViolation,
|
||||
UpdateFunc: pvc.updatePolicyViolation,
|
||||
DeleteFunc: pvc.deletePolicyViolation,
|
||||
})
|
||||
|
||||
pvc.enqueuePolicyViolation = pvc.enqueue
|
||||
pvc.syncHandler = pvc.syncPolicyViolation
|
||||
|
||||
pvc.pLister = pInformer.Lister()
|
||||
pvc.nspvLister = pvInformer.Lister()
|
||||
pvc.pListerSynced = pInformer.Informer().HasSynced
|
||||
pvc.nspvListerSynced = pvInformer.Informer().HasSynced
|
||||
|
||||
return &pvc, nil
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) addPolicyViolation(obj interface{}) {
|
||||
pv := obj.(*kyverno.NamespacedPolicyViolation)
|
||||
glog.V(4).Infof("Adding Namespaced Policy Violation %s", pv.Name)
|
||||
pvc.enqueuePolicyViolation(pv)
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) updatePolicyViolation(old, cur interface{}) {
|
||||
oldPv := old.(*kyverno.NamespacedPolicyViolation)
|
||||
curPv := cur.(*kyverno.NamespacedPolicyViolation)
|
||||
glog.V(4).Infof("Updating Namespaced Policy Violation %s", oldPv.Name)
|
||||
if err := pvc.syncLastUpdateTimeStatus(curPv, oldPv); err != nil {
|
||||
glog.Errorf("Failed to update lastUpdateTime in NamespacedPolicyViolation %s status: %v", curPv.Name, err)
|
||||
}
|
||||
pvc.enqueuePolicyViolation(curPv)
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) deletePolicyViolation(obj interface{}) {
|
||||
pv, ok := obj.(*kyverno.NamespacedPolicyViolation)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
|
||||
return
|
||||
}
|
||||
pv, ok = tombstone.Obj.(*kyverno.NamespacedPolicyViolation)
|
||||
if !ok {
|
||||
glog.Info(fmt.Errorf("Tombstone contained object that is not a NamespacedPolicyViolation %#v", obj))
|
||||
return
|
||||
}
|
||||
}
|
||||
glog.V(4).Infof("Deleting NamespacedPolicyViolation %s", pv.Name)
|
||||
pvc.enqueuePolicyViolation(pv)
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) enqueue(policyViolation *kyverno.NamespacedPolicyViolation) {
|
||||
key, err := cache.MetaNamespaceKeyFunc(policyViolation)
|
||||
if err != nil {
|
||||
glog.Error(err)
|
||||
return
|
||||
}
|
||||
pvc.queue.Add(key)
|
||||
}
|
||||
|
||||
// Run begins watching and syncing.
|
||||
func (pvc *NamespacedPolicyViolationController) Run(workers int, stopCh <-chan struct{}) {
|
||||
|
||||
defer utilruntime.HandleCrash()
|
||||
defer pvc.queue.ShutDown()
|
||||
|
||||
glog.Info("Starting Namespaced policyviolation controller")
|
||||
defer glog.Info("Shutting down Namespaced policyviolation controller")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, pvc.pListerSynced, pvc.nspvListerSynced) {
|
||||
return
|
||||
}
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(pvc.worker, time.Second, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
|
||||
// It enforces that the syncHandler is never invoked concurrently with the same key.
|
||||
func (pvc *NamespacedPolicyViolationController) worker() {
|
||||
for pvc.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) processNextWorkItem() bool {
|
||||
key, quit := pvc.queue.Get()
|
||||
if quit {
|
||||
return false
|
||||
}
|
||||
defer pvc.queue.Done(key)
|
||||
|
||||
err := pvc.syncHandler(key.(string))
|
||||
pvc.handleErr(err, key)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) handleErr(err error, key interface{}) {
|
||||
if err == nil {
|
||||
pvc.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
if pvc.queue.NumRequeues(key) < maxRetries {
|
||||
glog.V(2).Infof("Error syncing PolicyViolation %v: %v", key, err)
|
||||
pvc.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
utilruntime.HandleError(err)
|
||||
glog.V(2).Infof("Dropping policyviolation %q out of the queue: %v", key, err)
|
||||
pvc.queue.Forget(key)
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) syncPolicyViolation(key string) error {
|
||||
startTime := time.Now()
|
||||
glog.V(4).Infof("Started syncing policy violation %q (%v)", key, startTime)
|
||||
defer func() {
|
||||
glog.V(4).Infof("Finished syncing namespaced policy violation %q (%v)", key, time.Since(startTime))
|
||||
}()
|
||||
|
||||
// tags: NAMESPACE/NAME
|
||||
ns, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error getting namespaced policy violation key %v", key)
|
||||
}
|
||||
|
||||
policyViolation, err := pvc.nspvLister.NamespacedPolicyViolations(ns).Get(name)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(2).Infof("PolicyViolation %v has been deleted", key)
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Deep-copy otherwise we are mutating our cache.
|
||||
// TODO: Deep-copy only when needed.
|
||||
pv := policyViolation.DeepCopy()
|
||||
// TODO: Update Status to update ObserverdGeneration
|
||||
// TODO: check if the policy violation refers to a resource thats active ? // done by policy controller
|
||||
// TODO: remove the PV, if the corresponding policy is not present
|
||||
// TODO: additional check on deleted webhook for a resource, to delete a policy violation it has a policy violation
|
||||
// list the resource with label selectors, but this can be expensive for each delete request of a resource
|
||||
if err := pvc.syncActiveResource(pv); err != nil {
|
||||
glog.V(4).Infof("not syncing policy violation status")
|
||||
return err
|
||||
}
|
||||
|
||||
return pvc.syncStatusOnly(pv)
|
||||
}
|
||||
|
||||
func (pvc *NamespacedPolicyViolationController) syncActiveResource(curPv *kyverno.NamespacedPolicyViolation) error {
|
||||
// check if the resource is active or not ?
|
||||
rspec := curPv.Spec.ResourceSpec
|
||||
// get resource
|
||||
_, err := pvc.client.GetResource(rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
if errors.IsNotFound(err) {
|
||||
// TODO: does it help to retry?
|
||||
// resource is not found
|
||||
// remove the violation
|
||||
|
||||
if err := pvc.pvControl.RemovePolicyViolation(curPv.Namespace, curPv.Name); err != nil {
|
||||
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("removing policy violation %s as the corresponding resource %s/%s/%s does not exist anymore", curPv.Name, rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
glog.V(4).Infof("error while retrieved resource %s/%s/%s: %v", rspec.Kind, rspec.Namespace, rspec.Name, err)
|
||||
return err
|
||||
}
|
||||
|
||||
// cleanup pv with dependant
|
||||
if err := pvc.syncBlockedResource(curPv); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
//TODO- if the policy is not present, remove the policy violation
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncBlockedResource remove inactive policy violation
|
||||
// when rejected resource created in the cluster
|
||||
func (pvc *NamespacedPolicyViolationController) syncBlockedResource(curPv *kyverno.NamespacedPolicyViolation) error {
|
||||
for _, violatedRule := range curPv.Spec.ViolatedRules {
|
||||
if reflect.DeepEqual(violatedRule.ManagedResource, kyverno.ManagedResourceSpec{}) {
|
||||
continue
|
||||
}
|
||||
|
||||
// get resource
|
||||
blockedResource := violatedRule.ManagedResource
|
||||
resources, _ := pvc.client.ListResource(blockedResource.Kind, blockedResource.Namespace, nil)
|
||||
|
||||
for _, resource := range resources.Items {
|
||||
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
|
||||
owners := map[kyverno.ResourceSpec]interface{}{}
|
||||
GetOwner(pvc.client, owners, resource) // owner of resource matches violation resourceSpec
|
||||
// remove policy violation as the blocked request got created
|
||||
if _, ok := owners[curPv.Spec.ResourceSpec]; ok {
|
||||
// pod -> replicaset1; deploy -> replicaset2
|
||||
// if replicaset1 == replicaset2, the pod is
|
||||
// no longer an active child of deploy, skip removing pv
|
||||
if !validDependantForDeployment(pvc.client.GetAppsV1Interface(), curPv.Spec.ResourceSpec, resource) {
|
||||
glog.V(4).Infof("")
|
||||
continue
|
||||
}
|
||||
|
||||
// resource created, remove policy violation
|
||||
if err := pvc.pvControl.RemovePolicyViolation(curPv.Namespace, curPv.Name); err != nil {
|
||||
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
|
||||
return err
|
||||
}
|
||||
glog.V(4).Infof("removed policy violation %s as the blocked resource %s/%s successfully created, owner: %s",
|
||||
curPv.Name, blockedResource.Kind, blockedResource.Namespace, strings.ReplaceAll(curPv.Spec.ResourceSpec.ToKey(), ".", "/"))
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//syncStatusOnly updates the policyviolation status subresource
|
||||
// status:
|
||||
func (pvc *NamespacedPolicyViolationController) syncStatusOnly(curPv *kyverno.NamespacedPolicyViolation) error {
|
||||
// newStatus := calculateStatus(pv)
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO: think this through again
|
||||
//syncLastUpdateTimeStatus updates the policyviolation lastUpdateTime if anything in ViolationSpec changed
|
||||
// - lastUpdateTime : (time stamp when the policy violation changed)
|
||||
func (pvc *NamespacedPolicyViolationController) syncLastUpdateTimeStatus(curPv *kyverno.NamespacedPolicyViolation, oldPv *kyverno.NamespacedPolicyViolation) error {
|
||||
// check if there is any change in policy violation information
|
||||
if !updatedNamespaced(curPv, oldPv) {
|
||||
return nil
|
||||
}
|
||||
// update the lastUpdateTime
|
||||
newPolicyViolation := curPv
|
||||
newPolicyViolation.Status = kyverno.PolicyViolationStatus{LastUpdateTime: metav1.Now()}
|
||||
|
||||
return pvc.pvControl.UpdateStatusPolicyViolation(newPolicyViolation)
|
||||
}
|
||||
|
||||
func updatedNamespaced(curPv *kyverno.NamespacedPolicyViolation, oldPv *kyverno.NamespacedPolicyViolation) bool {
|
||||
return !reflect.DeepEqual(curPv.Spec, oldPv.Spec)
|
||||
//TODO check if owner reference changed, then should we update the lastUpdateTime as well ?
|
||||
}
|
||||
|
||||
type NamespacedPVControlInterface interface {
|
||||
UpdateStatusPolicyViolation(newPv *kyverno.NamespacedPolicyViolation) error
|
||||
RemovePolicyViolation(ns, name string) error
|
||||
}
|
||||
|
||||
// RealNamespacedPVControl is the default implementation of NamespacedPVControlInterface.
|
||||
type RealNamespacedPVControl struct {
|
||||
Client kyvernoclient.Interface
|
||||
Recorder record.EventRecorder
|
||||
}
|
||||
|
||||
//UpdateStatusPolicyViolation updates the status for policy violation
|
||||
func (r RealNamespacedPVControl) UpdateStatusPolicyViolation(newPv *kyverno.NamespacedPolicyViolation) error {
|
||||
_, err := r.Client.KyvernoV1().NamespacedPolicyViolations(newPv.Namespace).UpdateStatus(newPv)
|
||||
return err
|
||||
}
|
||||
|
||||
//RemovePolicyViolation removes the policy violation
|
||||
func (r RealNamespacedPVControl) RemovePolicyViolation(ns, name string) error {
|
||||
return r.Client.KyvernoV1().NamespacedPolicyViolations(ns).Delete(name, &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
func retryGetResource(client *client.Client, rspec kyverno.ResourceSpec) error {
|
||||
var i int
|
||||
getResource := func() error {
|
||||
_, err := client.GetResource(rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
glog.V(5).Infof("retry %v getting %s/%s/%s", i, rspec.Kind, rspec.Namespace, rspec.Name)
|
||||
i++
|
||||
return err
|
||||
}
|
||||
|
||||
exbackoff := &backoff.ExponentialBackOff{
|
||||
InitialInterval: 500 * time.Millisecond,
|
||||
RandomizationFactor: 0.5,
|
||||
Multiplier: 1.5,
|
||||
MaxInterval: time.Second,
|
||||
MaxElapsedTime: 3 * time.Second,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
|
||||
exbackoff.Reset()
|
||||
err := backoff.Retry(getResource, exbackoff)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
43
pkg/signal/signal.go
Normal file
43
pkg/signal/signal.go
Normal file
|
@ -0,0 +1,43 @@
|
|||
package signal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
)
|
||||
|
||||
var onlyOneSignalHandler = make(chan struct{})
|
||||
var shutdownHandler chan os.Signal
|
||||
|
||||
// SetupSignalHandler registered for SIGTERM and SIGINT. A stop channel is returned
|
||||
// which is closed on one of these signals. If a second signal is caught, the program
|
||||
// is terminated with exit code 1.
|
||||
func SetupSignalHandler() <-chan struct{} {
|
||||
close(onlyOneSignalHandler) // panics when called twice
|
||||
|
||||
shutdownHandler = make(chan os.Signal, 2)
|
||||
|
||||
stop := make(chan struct{})
|
||||
signal.Notify(shutdownHandler, shutdownSignals...)
|
||||
go func() {
|
||||
<-shutdownHandler
|
||||
close(stop)
|
||||
<-shutdownHandler
|
||||
os.Exit(1) // second signal. Exit directly.
|
||||
}()
|
||||
|
||||
return stop
|
||||
}
|
||||
|
||||
// RequestShutdown emulates a received event that is considered as shutdown signal (SIGTERM/SIGINT)
|
||||
// This returns whether a handler was notified
|
||||
func RequestShutdown() bool {
|
||||
if shutdownHandler != nil {
|
||||
select {
|
||||
case shutdownHandler <- shutdownSignals[0]:
|
||||
return true
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
8
pkg/signal/signal_posix.go
Normal file
8
pkg/signal/signal_posix.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package signal
|
||||
|
||||
import (
|
||||
"os"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
var shutdownSignals = []os.Signal{os.Interrupt, syscall.SIGTERM}
|
8
pkg/signal/signal_windows.go
Normal file
8
pkg/signal/signal_windows.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package signal
|
||||
|
||||
//TODO: how to pick files based on OS compilation ?
|
||||
// import (
|
||||
// "os"
|
||||
// )
|
||||
|
||||
// var shutdownSignals = []os.Signal{os.Interrupt}
|
|
@ -181,7 +181,8 @@ func runTestCase(t *testing.T, tc scaseT) bool {
|
|||
er = engine.Generate(policyContext)
|
||||
t.Log(("---Generation---"))
|
||||
validateResponse(t, er.PolicyResponse, tc.Expected.Generation.PolicyResponse)
|
||||
validateGeneratedResources(t, client, *policy, tc.Expected.Generation.GeneratedResources)
|
||||
// Expected generate resource will be in same namesapces as resource
|
||||
validateGeneratedResources(t, client, *policy, resource.GetName(), tc.Expected.Generation.GeneratedResources)
|
||||
}
|
||||
}
|
||||
return true
|
||||
|
@ -191,12 +192,12 @@ func createNamespace(client *client.Client, ns *unstructured.Unstructured) error
|
|||
_, err := client.CreateResource("Namespace", "", ns, false)
|
||||
return err
|
||||
}
|
||||
func validateGeneratedResources(t *testing.T, client *client.Client, policy kyverno.ClusterPolicy, expected []kyverno.ResourceSpec) {
|
||||
func validateGeneratedResources(t *testing.T, client *client.Client, policy kyverno.ClusterPolicy, namespace string, expected []kyverno.ResourceSpec) {
|
||||
t.Log("--validate if resources are generated---")
|
||||
// list of expected generated resources
|
||||
for _, resource := range expected {
|
||||
if _, err := client.GetResource(resource.Kind, resource.Namespace, resource.Name); err != nil {
|
||||
t.Errorf("generated resource %s/%s/%s not found. %v", resource.Kind, resource.Namespace, resource.Name, err)
|
||||
if _, err := client.GetResource(resource.Kind, namespace, resource.Name); err != nil {
|
||||
t.Errorf("generated resource %s/%s/%s not found. %v", resource.Kind, namespace, resource.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -367,12 +368,11 @@ func loadResource(t *testing.T, path string) []*unstructured.Unstructured {
|
|||
rBytes := bytes.Split(data, []byte("---"))
|
||||
for _, r := range rBytes {
|
||||
decode := scheme.Codecs.UniversalDeserializer().Decode
|
||||
obj, gvk, err := decode(r, nil, nil)
|
||||
obj, _, err := decode(r, nil, nil)
|
||||
if err != nil {
|
||||
t.Logf("failed to decode resource: %v", err)
|
||||
continue
|
||||
}
|
||||
glog.Info(gvk)
|
||||
|
||||
data, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,24 +1,20 @@
|
|||
package version
|
||||
|
||||
import (
|
||||
"github.com/golang/glog"
|
||||
)
|
||||
|
||||
// These fields are set during an official build
|
||||
// Global vars set from command-line arguments
|
||||
var (
|
||||
BuildVersion = "--"
|
||||
BuildHash = "--"
|
||||
BuildTime = "--"
|
||||
)
|
||||
|
||||
// VersionInfo gets json info about the agent version
|
||||
type VersionInfo struct {
|
||||
BuildVersion string
|
||||
BuildHash string
|
||||
BuildTime string
|
||||
}
|
||||
|
||||
// GetVersion gets the current agent version
|
||||
func GetVersion() *VersionInfo {
|
||||
return &VersionInfo{
|
||||
BuildVersion: BuildVersion,
|
||||
BuildHash: BuildHash,
|
||||
BuildTime: BuildTime,
|
||||
}
|
||||
//PrintVersionInfo displays the kyverno version - git version
|
||||
func PrintVersionInfo() {
|
||||
glog.Infof("Kyverno version: %s\n", BuildVersion)
|
||||
glog.Infof("Kyverno BuildHash: %s\n", BuildHash)
|
||||
glog.Infof("Kyverno BuildTime: %s\n", BuildTime)
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package webhookconfig
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
|
@ -57,8 +58,9 @@ func (wrc *WebhookRegistrationClient) constructDebugVerifyMutatingWebhookConfig(
|
|||
}
|
||||
}
|
||||
|
||||
func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig() {
|
||||
// Muating webhook configuration
|
||||
func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Mutating webhook configuration
|
||||
var err error
|
||||
var mutatingConfig string
|
||||
if wrc.serverIP != "" {
|
||||
|
@ -67,7 +69,7 @@ func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig()
|
|||
mutatingConfig = config.VerifyMutatingWebhookConfigurationName
|
||||
}
|
||||
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
|
||||
err = wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("verify webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||
} else if err != nil {
|
||||
|
|
|
@ -62,12 +62,14 @@ func (wrc *WebhookRegistrationClient) constructOwner() v1.OwnerReference {
|
|||
}
|
||||
|
||||
func generateDebugWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook {
|
||||
sideEffect := admregapi.SideEffectClassNoneOnDryRun
|
||||
return admregapi.Webhook{
|
||||
Name: name,
|
||||
ClientConfig: admregapi.WebhookClientConfig{
|
||||
URL: &url,
|
||||
CABundle: caData,
|
||||
},
|
||||
SideEffects: &sideEffect,
|
||||
Rules: []admregapi.RuleWithOperations{
|
||||
admregapi.RuleWithOperations{
|
||||
Operations: operationTypes,
|
||||
|
@ -84,11 +86,13 @@ func generateDebugWebhook(name, url string, caData []byte, validate bool, timeou
|
|||
},
|
||||
},
|
||||
},
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
AdmissionReviewVersions: []string{"v1beta1"},
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
||||
func generateWebhook(name, servicePath string, caData []byte, validation bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook {
|
||||
sideEffect := admregapi.SideEffectClassNoneOnDryRun
|
||||
return admregapi.Webhook{
|
||||
Name: name,
|
||||
ClientConfig: admregapi.WebhookClientConfig{
|
||||
|
@ -99,6 +103,7 @@ func generateWebhook(name, servicePath string, caData []byte, validation bool, t
|
|||
},
|
||||
CABundle: caData,
|
||||
},
|
||||
SideEffects: &sideEffect,
|
||||
Rules: []admregapi.RuleWithOperations{
|
||||
admregapi.RuleWithOperations{
|
||||
Operations: operationTypes,
|
||||
|
@ -115,6 +120,7 @@ func generateWebhook(name, servicePath string, caData []byte, validation bool, t
|
|||
},
|
||||
},
|
||||
},
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
AdmissionReviewVersions: []string{"v1beta1"},
|
||||
TimeoutSeconds: &timeoutSeconds,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -118,7 +118,7 @@ func (wrc *WebhookRegistrationClient) removePolicyWebhookConfigurations() {
|
|||
validatingConfig = config.PolicyValidatingWebhookConfigurationName
|
||||
}
|
||||
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
|
||||
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
|
||||
err = wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", validatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
|
||||
} else if err != nil {
|
||||
|
@ -136,7 +136,7 @@ func (wrc *WebhookRegistrationClient) removePolicyWebhookConfigurations() {
|
|||
}
|
||||
|
||||
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
|
||||
err = wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||
} else if err != nil {
|
||||
|
|
|
@ -2,6 +2,7 @@ package webhookconfig
|
|||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
|
@ -9,37 +10,35 @@ import (
|
|||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
admregapi "k8s.io/api/admissionregistration/v1beta1"
|
||||
errorsapi "k8s.io/apimachinery/pkg/api/errors"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
admregclient "k8s.io/client-go/kubernetes/typed/admissionregistration/v1beta1"
|
||||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
MutatingWebhookConfigurationKind string = "MutatingWebhookConfiguration"
|
||||
ValidatingWebhookConfigurationKind string = "ValidatingWebhookConfiguration"
|
||||
)
|
||||
|
||||
// WebhookRegistrationClient is client for registration webhooks on cluster
|
||||
type WebhookRegistrationClient struct {
|
||||
registrationClient *admregclient.AdmissionregistrationV1beta1Client
|
||||
client *client.Client
|
||||
clientConfig *rest.Config
|
||||
client *client.Client
|
||||
clientConfig *rest.Config
|
||||
// serverIP should be used if running Kyverno out of clutser
|
||||
serverIP string
|
||||
timeoutSeconds int32
|
||||
}
|
||||
|
||||
// NewWebhookRegistrationClient creates new WebhookRegistrationClient instance
|
||||
func NewWebhookRegistrationClient(clientConfig *rest.Config, client *client.Client, serverIP string, webhookTimeout int32) (*WebhookRegistrationClient, error) {
|
||||
registrationClient, err := admregclient.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
glog.V(4).Infof("Registering webhook client using serverIP %s\n", serverIP)
|
||||
|
||||
func NewWebhookRegistrationClient(
|
||||
clientConfig *rest.Config,
|
||||
client *client.Client,
|
||||
serverIP string,
|
||||
webhookTimeout int32) *WebhookRegistrationClient {
|
||||
return &WebhookRegistrationClient{
|
||||
registrationClient: registrationClient,
|
||||
client: client,
|
||||
clientConfig: clientConfig,
|
||||
serverIP: serverIP,
|
||||
timeoutSeconds: webhookTimeout,
|
||||
}, nil
|
||||
clientConfig: clientConfig,
|
||||
client: client,
|
||||
serverIP: serverIP,
|
||||
timeoutSeconds: webhookTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
// Register creates admission webhooks configs on cluster
|
||||
|
@ -53,6 +52,12 @@ func (wrc *WebhookRegistrationClient) Register() error {
|
|||
// webhook configurations are created dynamically based on the policy resources
|
||||
wrc.removeWebhookConfigurations()
|
||||
|
||||
// create Verify mutating webhook configuration resource
|
||||
// that is used to check if admission control is enabled or not
|
||||
if err := wrc.createVerifyMutatingWebhookConfiguration(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Static Webhook configuration on Policy CRD
|
||||
// create Policy CRD validating webhook configuration resource
|
||||
// used for validating Policy CR
|
||||
|
@ -65,18 +70,12 @@ func (wrc *WebhookRegistrationClient) Register() error {
|
|||
return err
|
||||
}
|
||||
|
||||
// create Verify mutating webhook configuration resource
|
||||
// that is used to check if admission control is enabled or not
|
||||
if err := wrc.createVerifyMutatingWebhookConfiguration(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemovePolicyWebhookConfigurations removes webhook configurations for reosurces and policy
|
||||
// RemoveWebhookConfigurations removes webhook configurations for reosurces and policy
|
||||
// called during webhook server shutdown
|
||||
func (wrc *WebhookRegistrationClient) RemovePolicyWebhookConfigurations(cleanUp chan<- struct{}) {
|
||||
func (wrc *WebhookRegistrationClient) RemoveWebhookConfigurations(cleanUp chan<- struct{}) {
|
||||
//TODO: dupliate, but a placeholder to perform more error handlind during cleanup
|
||||
wrc.removeWebhookConfigurations()
|
||||
// close channel to notify cleanup is complete
|
||||
|
@ -105,8 +104,7 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration
|
|||
// clientConfig - service
|
||||
config = wrc.constructMutatingWebhookConfig(caData)
|
||||
}
|
||||
|
||||
_, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config)
|
||||
_, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false)
|
||||
if errorsapi.IsAlreadyExists(err) {
|
||||
glog.V(4).Infof("resource mutating webhook configuration %s, already exists. not creating one", config.Name)
|
||||
return nil
|
||||
|
@ -118,18 +116,6 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration
|
|||
return nil
|
||||
}
|
||||
|
||||
//GetResourceMutatingWebhookConfiguration returns the MutatingWebhookConfiguration
|
||||
func (wrc *WebhookRegistrationClient) GetResourceMutatingWebhookConfiguration() (*admregapi.MutatingWebhookConfiguration, error) {
|
||||
var name string
|
||||
if wrc.serverIP != "" {
|
||||
name = config.MutatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
name = config.MutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
return wrc.registrationClient.MutatingWebhookConfigurations().Get(name, v1.GetOptions{})
|
||||
}
|
||||
|
||||
//registerPolicyValidatingWebhookConfiguration create a Validating webhook configuration for Policy CRD
|
||||
func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration() error {
|
||||
var caData []byte
|
||||
|
@ -153,7 +139,7 @@ func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration
|
|||
}
|
||||
|
||||
// create validating webhook configuration resource
|
||||
if _, err := wrc.registrationClient.ValidatingWebhookConfigurations().Create(config); err != nil {
|
||||
if _, err := wrc.client.CreateResource(ValidatingWebhookConfigurationKind, "", *config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -183,7 +169,7 @@ func (wrc *WebhookRegistrationClient) createPolicyMutatingWebhookConfiguration()
|
|||
}
|
||||
|
||||
// create mutating webhook configuration resource
|
||||
if _, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config); err != nil {
|
||||
if _, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -213,7 +199,7 @@ func (wrc *WebhookRegistrationClient) createVerifyMutatingWebhookConfiguration()
|
|||
}
|
||||
|
||||
// create mutating webhook configuration resource
|
||||
if _, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config); err != nil {
|
||||
if _, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -230,12 +216,70 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
|
|||
defer func() {
|
||||
glog.V(4).Infof("Finished cleaning up webhookcongfigurations (%v)", time.Since(startTime))
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
||||
wg.Add(4)
|
||||
// mutating and validating webhook configuration for Kubernetes resources
|
||||
wrc.RemoveResourceMutatingWebhookConfiguration()
|
||||
|
||||
go wrc.removeResourceMutatingWebhookConfiguration(&wg)
|
||||
// mutating and validating webhook configurtion for Policy CRD resource
|
||||
wrc.removePolicyWebhookConfigurations()
|
||||
go wrc.removePolicyMutatingWebhookConfiguration(&wg)
|
||||
go wrc.removePolicyValidatingWebhookConfiguration(&wg)
|
||||
// mutating webhook configuration for verifying webhook
|
||||
go wrc.removeVerifyWebhookMutatingWebhookConfig(&wg)
|
||||
|
||||
// muating webhook configuration use to verify if admission control flow is working or not
|
||||
wrc.removeVerifyWebhookMutatingWebhookConfig()
|
||||
// wait for the removal go routines to return
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// wrapper to handle wait group
|
||||
// TODO: re-work with RemoveResourceMutatingWebhookConfiguration, as the only difference is wg handling
|
||||
func (wrc *WebhookRegistrationClient) removeResourceMutatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
wrc.RemoveResourceMutatingWebhookConfiguration()
|
||||
}
|
||||
|
||||
// delete policy mutating webhookconfigurations
|
||||
// handle wait group
|
||||
func (wrc *WebhookRegistrationClient) removePolicyMutatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Mutating webhook configuration
|
||||
var mutatingConfig string
|
||||
if wrc.serverIP != "" {
|
||||
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
|
||||
err := wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
|
||||
} else if err != nil {
|
||||
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
|
||||
} else {
|
||||
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", mutatingConfig)
|
||||
}
|
||||
}
|
||||
|
||||
// delete policy validating webhookconfigurations
|
||||
// handle wait group
|
||||
func (wrc *WebhookRegistrationClient) removePolicyValidatingWebhookConfiguration(wg *sync.WaitGroup) {
|
||||
defer wg.Done()
|
||||
// Validating webhook configuration
|
||||
var validatingConfig string
|
||||
if wrc.serverIP != "" {
|
||||
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
validatingConfig = config.PolicyValidatingWebhookConfigurationName
|
||||
}
|
||||
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
|
||||
err := wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", validatingConfig, false)
|
||||
if errorsapi.IsNotFound(err) {
|
||||
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
|
||||
} else if err != nil {
|
||||
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
|
||||
} else {
|
||||
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", validatingConfig)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -58,16 +58,20 @@ func (wrc *WebhookRegistrationClient) constructMutatingWebhookConfig(caData []by
|
|||
}
|
||||
}
|
||||
|
||||
//GetResourceMutatingWebhookConfigName provi
|
||||
func (wrc *WebhookRegistrationClient) GetResourceMutatingWebhookConfigName() string {
|
||||
if wrc.serverIP != "" {
|
||||
return config.MutatingWebhookConfigurationDebugName
|
||||
}
|
||||
return config.MutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
//RemoveResourceMutatingWebhookConfiguration removes mutating webhook configuration for all resources
|
||||
func (wrc *WebhookRegistrationClient) RemoveResourceMutatingWebhookConfiguration() error {
|
||||
var configName string
|
||||
if wrc.serverIP != "" {
|
||||
configName = config.MutatingWebhookConfigurationDebugName
|
||||
} else {
|
||||
configName = config.MutatingWebhookConfigurationName
|
||||
}
|
||||
|
||||
configName := wrc.GetResourceMutatingWebhookConfigName()
|
||||
// delete webhook configuration
|
||||
err := wrc.registrationClient.MutatingWebhookConfigurations().Delete(configName, &v1.DeleteOptions{})
|
||||
err := wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", configName, false)
|
||||
if errors.IsNotFound(err) {
|
||||
glog.V(4).Infof("resource webhook configuration %s does not exits, so not deleting", configName)
|
||||
return nil
|
||||
|
|
101
pkg/webhookconfig/rwebhookregister.go
Normal file
101
pkg/webhookconfig/rwebhookregister.go
Normal file
|
@ -0,0 +1,101 @@
|
|||
package webhookconfig
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
checker "github.com/nirmata/kyverno/pkg/checker"
|
||||
"github.com/tevino/abool"
|
||||
mconfiginformer "k8s.io/client-go/informers/admissionregistration/v1beta1"
|
||||
mconfiglister "k8s.io/client-go/listers/admissionregistration/v1beta1"
|
||||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type ResourceWebhookRegister struct {
|
||||
// pendingCreation indicates the status of resource webhook creation
|
||||
pendingCreation *abool.AtomicBool
|
||||
LastReqTime *checker.LastReqTime
|
||||
mwebhookconfigSynced cache.InformerSynced
|
||||
// list/get mutatingwebhookconfigurations
|
||||
mWebhookConfigLister mconfiglister.MutatingWebhookConfigurationLister
|
||||
webhookRegistrationClient *WebhookRegistrationClient
|
||||
}
|
||||
|
||||
func NewResourceWebhookRegister(
|
||||
lastReqTime *checker.LastReqTime,
|
||||
mconfigwebhookinformer mconfiginformer.MutatingWebhookConfigurationInformer,
|
||||
webhookRegistrationClient *WebhookRegistrationClient,
|
||||
) *ResourceWebhookRegister {
|
||||
return &ResourceWebhookRegister{
|
||||
pendingCreation: abool.New(),
|
||||
LastReqTime: lastReqTime,
|
||||
mwebhookconfigSynced: mconfigwebhookinformer.Informer().HasSynced,
|
||||
mWebhookConfigLister: mconfigwebhookinformer.Lister(),
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
}
|
||||
}
|
||||
|
||||
func (rww *ResourceWebhookRegister) RegisterResourceWebhook() {
|
||||
// drop the request if creation is in processing
|
||||
if rww.pendingCreation.IsSet() {
|
||||
glog.V(3).Info("resource webhook configuration is in pending creation, skip the request")
|
||||
return
|
||||
}
|
||||
|
||||
// check cache
|
||||
configName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
|
||||
// exsitence of config is all that matters; if error occurs, creates webhook anyway
|
||||
// errors of webhook creation are handled separately
|
||||
config, _ := rww.mWebhookConfigLister.Get(configName)
|
||||
if config != nil {
|
||||
glog.V(4).Info("mutating webhoook configuration already exists, skip the request")
|
||||
return
|
||||
}
|
||||
|
||||
createWebhook := func() {
|
||||
rww.pendingCreation.Set()
|
||||
err := rww.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration()
|
||||
rww.pendingCreation.UnSet()
|
||||
|
||||
if err != nil {
|
||||
glog.Errorf("failed to create resource mutating webhook configuration: %v, re-queue creation request", err)
|
||||
rww.RegisterResourceWebhook()
|
||||
return
|
||||
}
|
||||
glog.V(3).Info("Successfully created mutating webhook configuration for resources")
|
||||
}
|
||||
|
||||
timeDiff := time.Since(rww.LastReqTime.Time())
|
||||
if timeDiff < checker.DefaultDeadline {
|
||||
glog.V(3).Info("Verified webhook status, creating webhook configuration")
|
||||
go createWebhook()
|
||||
}
|
||||
}
|
||||
|
||||
func (rww *ResourceWebhookRegister) Run(stopCh <-chan struct{}) {
|
||||
// wait for cache to populate first time
|
||||
if !cache.WaitForCacheSync(stopCh, rww.mwebhookconfigSynced) {
|
||||
glog.Error("configuration: failed to sync webhook informer cache")
|
||||
}
|
||||
}
|
||||
|
||||
func (rww *ResourceWebhookRegister) RemoveResourceWebhookConfiguration() error {
|
||||
var err error
|
||||
// check informer cache
|
||||
configName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName()
|
||||
config, err := rww.mWebhookConfigLister.Get(configName)
|
||||
if err != nil {
|
||||
glog.V(4).Infof("failed to list mutating webhook config: %v", err)
|
||||
return err
|
||||
}
|
||||
if config == nil {
|
||||
// as no resource is found
|
||||
return nil
|
||||
}
|
||||
err = rww.webhookRegistrationClient.RemoveResourceMutatingWebhookConfiguration()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
glog.V(3).Info("removed resource webhook configuration")
|
||||
return nil
|
||||
}
|
|
@ -37,11 +37,10 @@ func (ws *WebhookServer) handlePolicyValidation(request *v1beta1.AdmissionReques
|
|||
}
|
||||
|
||||
if admissionResp.Allowed {
|
||||
// if the policy contains mutating & validation rules and it config does not exist we create one
|
||||
if policy.HasMutateOrValidate() {
|
||||
// create mutating resource mutatingwebhookconfiguration if not present
|
||||
if err := ws.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration(); err != nil {
|
||||
glog.Error("failed to created resource mutating webhook configuration, policies wont be applied on the resource")
|
||||
}
|
||||
// queue the request
|
||||
ws.resourceWebhookWatcher.RegisterResourceWebhook()
|
||||
}
|
||||
}
|
||||
return admissionResp
|
||||
|
|
|
@ -133,7 +133,6 @@ func buildViolatedRules(er engine.EngineResponse, blocked bool) []kyverno.Violat
|
|||
// if resource was blocked we create dependent
|
||||
dependant := kyverno.ManagedResourceSpec{
|
||||
Kind: er.PolicyResponse.Resource.Kind,
|
||||
Namespace: er.PolicyResponse.Resource.Namespace,
|
||||
CreationBlocked: true,
|
||||
}
|
||||
|
||||
|
|
|
@ -34,18 +34,24 @@ import (
|
|||
// WebhookServer contains configured TLS server with MutationWebhook.
|
||||
// MutationWebhook gets policies from policyController and takes control of the cluster with kubeclient.
|
||||
type WebhookServer struct {
|
||||
server http.Server
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
pvLister kyvernolister.ClusterPolicyViolationLister
|
||||
namespacepvLister kyvernolister.NamespacedPolicyViolationLister
|
||||
pListerSynced cache.InformerSynced
|
||||
pvListerSynced cache.InformerSynced
|
||||
namespacepvListerSynced cache.InformerSynced
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
eventGen event.Interface
|
||||
server http.Server
|
||||
client *client.Client
|
||||
kyvernoClient *kyvernoclient.Clientset
|
||||
// list/get cluster policy resource
|
||||
pLister kyvernolister.ClusterPolicyLister
|
||||
// returns true if the cluster policy store has synced atleast
|
||||
pSynced cache.InformerSynced
|
||||
// list/get role binding resource
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
// return true if role bining store has synced atleast once
|
||||
rbSynced cache.InformerSynced
|
||||
// list/get cluster role binding resource
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
// return true if cluster role binding store has synced atleast once
|
||||
crbSynced cache.InformerSynced
|
||||
// generate events
|
||||
eventGen event.Interface
|
||||
// webhook registration client
|
||||
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
|
||||
// API to send policy stats for aggregation
|
||||
policyStatus policy.PolicyStatusInterface
|
||||
|
@ -58,7 +64,8 @@ type WebhookServer struct {
|
|||
// store to hold policy meta data for faster lookup
|
||||
pMetaStore policystore.LookupInterface
|
||||
// policy violation generator
|
||||
pvGenerator policyviolation.GeneratorInterface
|
||||
pvGenerator policyviolation.GeneratorInterface
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
|
||||
}
|
||||
|
||||
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
|
||||
|
@ -68,8 +75,6 @@ func NewWebhookServer(
|
|||
client *client.Client,
|
||||
tlsPair *tlsutils.TlsPemPair,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
|
||||
namespacepvInformer kyvernoinformer.NamespacedPolicyViolationInformer,
|
||||
rbInformer rbacinformer.RoleBindingInformer,
|
||||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
eventGen event.Interface,
|
||||
|
@ -78,6 +83,7 @@ func NewWebhookServer(
|
|||
configHandler config.Interface,
|
||||
pMetaStore policystore.LookupInterface,
|
||||
pvGenerator policyviolation.GeneratorInterface,
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
|
||||
cleanUp chan<- struct{}) (*WebhookServer, error) {
|
||||
|
||||
if tlsPair == nil {
|
||||
|
@ -92,32 +98,29 @@ func NewWebhookServer(
|
|||
tlsConfig.Certificates = []tls.Certificate{pair}
|
||||
|
||||
ws := &WebhookServer{
|
||||
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
pLister: pInformer.Lister(),
|
||||
pvLister: pvInformer.Lister(),
|
||||
namespacepvLister: namespacepvInformer.Lister(),
|
||||
pListerSynced: pvInformer.Informer().HasSynced,
|
||||
pvListerSynced: pInformer.Informer().HasSynced,
|
||||
namespacepvListerSynced: namespacepvInformer.Informer().HasSynced,
|
||||
pSynced: pInformer.Informer().HasSynced,
|
||||
rbLister: rbInformer.Lister(),
|
||||
rbSynced: rbInformer.Informer().HasSynced,
|
||||
crbLister: crbInformer.Lister(),
|
||||
crbSynced: crbInformer.Informer().HasSynced,
|
||||
eventGen: eventGen,
|
||||
webhookRegistrationClient: webhookRegistrationClient,
|
||||
policyStatus: policyStatus,
|
||||
configHandler: configHandler,
|
||||
rbLister: rbInformer.Lister(),
|
||||
crbLister: crbInformer.Lister(),
|
||||
cleanUp: cleanUp,
|
||||
lastReqTime: checker.NewLastReqTime(),
|
||||
lastReqTime: resourceWebhookWatcher.LastReqTime,
|
||||
pvGenerator: pvGenerator,
|
||||
pMetaStore: pMetaStore,
|
||||
resourceWebhookWatcher: resourceWebhookWatcher,
|
||||
}
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(config.MutatingWebhookServicePath, ws.serve)
|
||||
mux.HandleFunc(config.ValidatingWebhookServicePath, ws.serve)
|
||||
mux.HandleFunc(config.VerifyMutatingWebhookServicePath, ws.serve)
|
||||
mux.HandleFunc(config.PolicyValidatingWebhookServicePath, ws.serve)
|
||||
mux.HandleFunc(config.PolicyMutatingWebhookServicePath, ws.serve)
|
||||
|
||||
ws.server = http.Server{
|
||||
Addr: ":443", // Listen on port for HTTPS requests
|
||||
TLSConfig: &tlsConfig,
|
||||
|
@ -246,6 +249,10 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
|
|||
|
||||
// RunAsync TLS server in separate thread and returns control immediately
|
||||
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
||||
if !cache.WaitForCacheSync(stopCh, ws.pSynced, ws.rbSynced, ws.crbSynced) {
|
||||
glog.Error("webhook: failed to sync informer cache")
|
||||
}
|
||||
|
||||
go func(ws *WebhookServer) {
|
||||
glog.V(3).Infof("serving on %s\n", ws.server.Addr)
|
||||
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
|
||||
|
@ -257,21 +264,21 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
|||
// resync: 60 seconds
|
||||
// deadline: 60 seconds (send request)
|
||||
// max deadline: deadline*3 (set the deployment annotation as false)
|
||||
go ws.lastReqTime.Run(ws.pLister, ws.eventGen, ws.client, 60*time.Second, 60*time.Second, stopCh)
|
||||
go ws.lastReqTime.Run(ws.pLister, ws.eventGen, ws.client, checker.DefaultResync, checker.DefaultDeadline, stopCh)
|
||||
}
|
||||
|
||||
// Stop TLS server and returns control after the server is shut down
|
||||
func (ws *WebhookServer) Stop() {
|
||||
err := ws.server.Shutdown(context.Background())
|
||||
func (ws *WebhookServer) Stop(ctx context.Context) {
|
||||
// cleanUp
|
||||
// remove the static webhookconfigurations
|
||||
go ws.webhookRegistrationClient.RemoveWebhookConfigurations(ws.cleanUp)
|
||||
// shutdown http.Server with context timeout
|
||||
err := ws.server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
// Error from closing listeners, or context timeout:
|
||||
glog.Info("Server Shutdown error: ", err)
|
||||
ws.server.Close()
|
||||
}
|
||||
// cleanUp
|
||||
// remove the static webhookconfigurations for policy CRD
|
||||
ws.webhookRegistrationClient.RemovePolicyWebhookConfigurations(ws.cleanUp)
|
||||
|
||||
}
|
||||
|
||||
// bodyToAdmissionReview creates AdmissionReview object from request body
|
||||
|
|
|
@ -1,9 +1,5 @@
|
|||
Use these scripts to prepare the controller for work.
|
||||
All these scripts should be launched from the root folder of the project, for example:
|
||||
`scripts/compile-image.sh`
|
||||
|
||||
### compile-image.sh ###
|
||||
Compiles the project to go executable, generates docker image and pushes it to the repo. Has no arguments.
|
||||
All these scripts should be launched from the root folder of the project.
|
||||
|
||||
### generate-server-cert.sh ###
|
||||
Generates TLS certificate and key that used by webhook server. Example:
|
||||
|
@ -19,10 +15,8 @@ Prepares controller for free (local) or in-cluster use. Uses `generate-server-ce
|
|||
* `--serverIp` means the same as for `generate-server-cert.sh`
|
||||
Examples:
|
||||
`scripts/deploy-controller.sh --service=my-kyverno --namespace=my_namespace --serverIp=192.168.10.117` - deploy controller to the cluster with master node '192.168.10.117' to the namespace 'my_namespace' as a service 'my-kyverno'
|
||||
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with mnaster node at '192.168.10.117'
|
||||
`scripts/deploy-controller.sh --service=localhost --serverIp=192.168.10.117` - deploy controller locally for usage in cluster with master node at '192.168.10.117'
|
||||
|
||||
### test-web-hook.sh ###
|
||||
Quickly creates and deletes test config map. If your webhook server is running, you should see the corresponding output from it. Use this script after `deploy-controller.sh`.
|
||||
|
||||
### update-codegen.sh ###
|
||||
Generates additional code for controller object. You should resolve all dependencies before using it, see main Readme for details.
|
||||
|
|
|
@ -1,2 +0,0 @@
|
|||
kubectl delete -f definitions/install.yaml
|
||||
kubectl delete csr,MutatingWebhookConfiguration,ValidatingWebhookConfiguration --all
|
|
@ -1,34 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
default_version="dev-testing"
|
||||
version=$1
|
||||
|
||||
if [[ -z "$1" ]]
|
||||
then
|
||||
echo "Using default version: ${default_version}"
|
||||
version="${default_version}"
|
||||
fi
|
||||
|
||||
hub_user_name="nirmata"
|
||||
project_name="kyverno"
|
||||
|
||||
echo "# Ensuring Go dependencies..."
|
||||
dep ensure -v || exit 2
|
||||
|
||||
echo "# Building executable ${project_name}..."
|
||||
chmod +x scripts/update-codegen.sh
|
||||
scripts/update-codegen.sh
|
||||
make build || exit 3
|
||||
|
||||
echo "# Building docker image ${hub_user_name}/${project_name}:${version}"
|
||||
cat <<EOF > Dockerfile
|
||||
FROM alpine:latest
|
||||
WORKDIR ~/
|
||||
ADD ${project_name} ./${project_name}
|
||||
ENTRYPOINT ["./${project_name}"]
|
||||
EOF
|
||||
tag="${hub_user_name}/${project_name}:${version}"
|
||||
docker build --no-cache -t "${tag}" . || exit 4
|
||||
|
||||
echo "# Pushing image to repository..."
|
||||
docker push "${tag}" || exit 5
|
|
@ -1,3 +0,0 @@
|
|||
cd "$(dirname "$0")"
|
||||
kubectl create -f resources/test-configmap.yaml
|
||||
kubectl delete -f resources/test-configmap.yaml
|
|
@ -1,4 +0,0 @@
|
|||
#!/bin/bash
|
||||
# You should see the trace of requests in the output of webhook server
|
||||
kubectl create configmap test-config-map --from-literal="some_var=some_value"
|
||||
kubectl delete configmap test-config-map
|
|
@ -14,5 +14,5 @@ expected:
|
|||
rules:
|
||||
- name: prevent-mounting-default-serviceaccount
|
||||
type: Validation
|
||||
message: "Validation error: Prevent mounting of default service account\nValidation rule 'prevent-mounting-default-serviceaccount' failed at path '/spec/serviceAccountName/'."
|
||||
message: "Validation error: Prevent mounting of default service account; Validation rule 'prevent-mounting-default-serviceaccount' failed at path '/spec/serviceAccountName/'"
|
||||
success: false
|
|
@ -15,5 +15,5 @@ expected:
|
|||
rules:
|
||||
- name: validate-selinux-options
|
||||
type: Validation
|
||||
message: "Validation error: SELinux level is required\nValidation rule 'validate-selinux-options' failed at path '/spec/containers/0/securityContext/seLinuxOptions/'."
|
||||
message: "Validation error: SELinux level is required; Validation rule 'validate-selinux-options' failed at path '/spec/containers/0/securityContext/seLinuxOptions/'"
|
||||
success: false
|
|
@ -14,5 +14,5 @@ expected:
|
|||
rules:
|
||||
- name: validate-docker-sock-mount
|
||||
type: Validation
|
||||
message: "Validation error: Use of the Docker Unix socket is not allowed\nValidation rule 'validate-docker-sock-mount' failed at path '/spec/volumes/0/hostPath/path/'."
|
||||
message: "Validation error: Use of the Docker Unix socket is not allowed; Validation rule 'validate-docker-sock-mount' failed at path '/spec/volumes/0/hostPath/path/'"
|
||||
success: false
|
|
@ -12,5 +12,5 @@ expected:
|
|||
rules:
|
||||
- name: validate-helm-tiller
|
||||
type: Validation
|
||||
message: "Validation error: Helm Tiller is not allowed\nValidation rule 'validate-helm-tiller' failed at path '/spec/containers/0/image/'."
|
||||
message: "Validation error: Helm Tiller is not allowed; Validation rule 'validate-helm-tiller' failed at path '/spec/containers/0/image/'"
|
||||
success: false
|
||||
|
|
5
vendor/github.com/cenkalti/backoff/README.md
generated
vendored
5
vendor/github.com/cenkalti/backoff/README.md
generated
vendored
|
@ -9,7 +9,10 @@ The retries exponentially increase and stop increasing when a certain threshold
|
|||
|
||||
## Usage
|
||||
|
||||
See https://godoc.org/github.com/cenkalti/backoff#pkg-examples
|
||||
Import path is `github.com/cenkalti/backoff/v3`. Please note the version part at the end.
|
||||
|
||||
godoc.org does not support modules yet,
|
||||
so you can use https://godoc.org/gopkg.in/cenkalti/backoff.v3 to view the documentation.
|
||||
|
||||
## Contributing
|
||||
|
||||
|
|
6
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
6
vendor/github.com/cenkalti/backoff/context.go
generated
vendored
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
// BackOffContext is a backoff policy that stops retrying after the context
|
||||
// is canceled.
|
||||
type BackOffContext interface {
|
||||
type BackOffContext interface { // nolint: golint
|
||||
BackOff
|
||||
Context() context.Context
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ type backOffContext struct {
|
|||
// WithContext returns a BackOffContext with context ctx
|
||||
//
|
||||
// ctx must not be nil
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext {
|
||||
func WithContext(b BackOff, ctx context.Context) BackOffContext { // nolint: golint
|
||||
if ctx == nil {
|
||||
panic("nil context")
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ func (b *backOffContext) NextBackOff() time.Duration {
|
|||
default:
|
||||
}
|
||||
next := b.BackOff.NextBackOff()
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next {
|
||||
if deadline, ok := b.ctx.Deadline(); ok && deadline.Sub(time.Now()) < next { // nolint: gosimple
|
||||
return Stop
|
||||
}
|
||||
return next
|
||||
|
|
5
vendor/github.com/cenkalti/backoff/example_test.go
generated
vendored
5
vendor/github.com/cenkalti/backoff/example_test.go
generated
vendored
|
@ -20,7 +20,7 @@ func ExampleRetry() {
|
|||
// Operation is successful.
|
||||
}
|
||||
|
||||
func ExampleRetryContext() {
|
||||
func ExampleRetryContext() { // nolint: govet
|
||||
// A context
|
||||
ctx := context.Background()
|
||||
|
||||
|
@ -52,7 +52,7 @@ func ExampleTicker() {
|
|||
|
||||
// Ticks will continue to arrive when the previous operation is still running,
|
||||
// so operations that take a while to fail could run in quick succession.
|
||||
for _ = range ticker.C {
|
||||
for range ticker.C {
|
||||
if err = operation(); err != nil {
|
||||
log.Println(err, "will retry...")
|
||||
continue
|
||||
|
@ -68,5 +68,4 @@ func ExampleTicker() {
|
|||
}
|
||||
|
||||
// Operation is successful.
|
||||
return
|
||||
}
|
||||
|
|
3
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
3
vendor/github.com/cenkalti/backoff/exponential.go
generated
vendored
|
@ -103,13 +103,14 @@ func (t systemClock) Now() time.Time {
|
|||
var SystemClock = systemClock{}
|
||||
|
||||
// Reset the interval back to the initial retry interval and restarts the timer.
|
||||
// Reset must be called before using b.
|
||||
func (b *ExponentialBackOff) Reset() {
|
||||
b.currentInterval = b.InitialInterval
|
||||
b.startTime = b.Clock.Now()
|
||||
}
|
||||
|
||||
// NextBackOff calculates the next backoff interval using the formula:
|
||||
// Randomized interval = RetryInterval +/- (RandomizationFactor * RetryInterval)
|
||||
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
|
||||
func (b *ExponentialBackOff) NextBackOff() time.Duration {
|
||||
// Make sure we have not gone over the maximum elapsed time.
|
||||
if b.MaxElapsedTime != 0 && b.GetElapsedTime() > b.MaxElapsedTime {
|
||||
|
|
4
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
4
vendor/github.com/cenkalti/backoff/retry.go
generated
vendored
|
@ -74,6 +74,10 @@ func (e *PermanentError) Error() string {
|
|||
return e.Err.Error()
|
||||
}
|
||||
|
||||
func (e *PermanentError) Unwrap() error {
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// Permanent wraps the given err in a *PermanentError.
|
||||
func Permanent(err error) *PermanentError {
|
||||
return &PermanentError{
|
||||
|
|
4
vendor/github.com/cenkalti/backoff/ticker_test.go
generated
vendored
4
vendor/github.com/cenkalti/backoff/ticker_test.go
generated
vendored
|
@ -35,7 +35,7 @@ func TestTicker(t *testing.T) {
|
|||
}
|
||||
|
||||
var err error
|
||||
for _ = range ticker.C {
|
||||
for range ticker.C {
|
||||
if err = f(); err != nil {
|
||||
t.Log(err)
|
||||
continue
|
||||
|
@ -77,7 +77,7 @@ func TestTickerContext(t *testing.T) {
|
|||
ticker := NewTicker(b)
|
||||
|
||||
var err error
|
||||
for _ = range ticker.C {
|
||||
for range ticker.C {
|
||||
if err = f(); err != nil {
|
||||
t.Log(err)
|
||||
continue
|
||||
|
|
1
vendor/github.com/cenkalti/backoff/tries_test.go
generated
vendored
1
vendor/github.com/cenkalti/backoff/tries_test.go
generated
vendored
|
@ -40,7 +40,6 @@ func TestMaxTriesHappy(t *testing.T) {
|
|||
if d == Stop {
|
||||
t.Error("returned Stop after reset")
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func TestMaxTriesZero(t *testing.T) {
|
||||
|
|
38
vendor/github.com/docker/distribution/.gitignore
generated
vendored
38
vendor/github.com/docker/distribution/.gitignore
generated
vendored
|
@ -1,38 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
# never checkin from the bin file (for now)
|
||||
bin/*
|
||||
|
||||
# Test key files
|
||||
*.pem
|
||||
|
||||
# Cover profiles
|
||||
*.out
|
||||
|
||||
# Editor/IDE specific files.
|
||||
*.sublime-project
|
||||
*.sublime-workspace
|
||||
.idea/*
|
16
vendor/github.com/docker/distribution/.gometalinter.json
generated
vendored
16
vendor/github.com/docker/distribution/.gometalinter.json
generated
vendored
|
@ -1,16 +0,0 @@
|
|||
{
|
||||
"Vendor": true,
|
||||
"Deadline": "2m",
|
||||
"Sort": ["linter", "severity", "path", "line"],
|
||||
"EnableGC": true,
|
||||
"Enable": [
|
||||
"structcheck",
|
||||
"staticcheck",
|
||||
"unconvert",
|
||||
|
||||
"gofmt",
|
||||
"goimports",
|
||||
"golint",
|
||||
"vet"
|
||||
]
|
||||
}
|
32
vendor/github.com/docker/distribution/.mailmap
generated
vendored
32
vendor/github.com/docker/distribution/.mailmap
generated
vendored
|
@ -1,32 +0,0 @@
|
|||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@users.noreply.github.com>
|
||||
Stephen J Day <stephen.day@docker.com> Stephen Day <stevvooe@gmail.com>
|
||||
Olivier Gambier <olivier@docker.com> Olivier Gambier <dmp42@users.noreply.github.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <r4nd0m1n4t0r@gmail.com>
|
||||
Brian Bland <brian.bland@docker.com> Brian Bland <brian.t.bland@gmail.com>
|
||||
Josh Hawn <josh.hawn@docker.com> Josh Hawn <jlhawn@berkeley.edu>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard <richard.scothern@gmail.com>
|
||||
Richard Scothern <richard.scothern@docker.com> Richard Scothern <richard.scothern@gmail.com>
|
||||
Andrew Meredith <andymeredith@gmail.com> Andrew Meredith <kendru@users.noreply.github.com>
|
||||
harche <p.harshal@gmail.com> harche <harche@users.noreply.github.com>
|
||||
Jessie Frazelle <jessie@docker.com> <jfrazelle@users.noreply.github.com>
|
||||
Sharif Nassar <sharif@mrwacky.com> Sharif Nassar <mrwacky42@users.noreply.github.com>
|
||||
Sven Dowideit <SvenDowideit@home.org.au> Sven Dowideit <SvenDowideit@users.noreply.github.com>
|
||||
Vincent Giersch <vincent.giersch@ovh.net> Vincent Giersch <vincent@giersch.fr>
|
||||
davidli <wenquan.li@hp.com> davidli <wenquan.li@hpe.com>
|
||||
Omer Cohen <git@omer.io> Omer Cohen <git@omerc.net>
|
||||
Eric Yang <windfarer@gmail.com> Eric Yang <Windfarer@users.noreply.github.com>
|
||||
Nikita Tarasov <nikita@mygento.ru> Nikita <luckyraul@users.noreply.github.com>
|
||||
Yu Wang <yuwa@microsoft.com> yuwaMSFT2 <yuwa@microsoft.com>
|
||||
Yu Wang <yuwa@microsoft.com> Yu Wang (UC) <yuwa@microsoft.com>
|
||||
Olivier Gambier <olivier@docker.com> dmp <dmp@loaner.local>
|
||||
Olivier Gambier <olivier@docker.com> Olivier <o+github@gambier.email>
|
||||
Olivier Gambier <olivier@docker.com> Olivier <dmp42@users.noreply.github.com>
|
||||
Elsan Li 李楠 <elsanli@tencent.com> elsanli(李楠) <elsanli@tencent.com>
|
||||
Rui Cao <ruicao@alauda.io> ruicao <ruicao@alauda.io>
|
||||
Gwendolynne Barr <gwendolynne.barr@docker.com> gbarr01 <gwendolynne.barr@docker.com>
|
||||
Haibing Zhou 周海兵 <zhouhaibing089@gmail.com> zhouhaibing089 <zhouhaibing089@gmail.com>
|
||||
Feng Honglin <tifayuki@gmail.com> tifayuki <tifayuki@gmail.com>
|
||||
Helen Xie <xieyulin821@harmonycloud.cn> Helen-xie <xieyulin821@harmonycloud.cn>
|
||||
Mike Brown <brownwm@us.ibm.com> Mike Brown <mikebrow@users.noreply.github.com>
|
||||
Manish Tomar <manish.tomar@docker.com> Manish Tomar <manishtomar@users.noreply.github.com>
|
||||
Sakeven Jiang <jc5930@sina.cn> sakeven <jc5930@sina.cn>
|
51
vendor/github.com/docker/distribution/.travis.yml
generated
vendored
51
vendor/github.com/docker/distribution/.travis.yml
generated
vendored
|
@ -1,51 +0,0 @@
|
|||
dist: trusty
|
||||
sudo: required
|
||||
# setup travis so that we can run containers for integration tests
|
||||
services:
|
||||
- docker
|
||||
|
||||
language: go
|
||||
|
||||
go:
|
||||
- "1.11.x"
|
||||
|
||||
go_import_path: github.com/docker/distribution
|
||||
|
||||
addons:
|
||||
apt:
|
||||
packages:
|
||||
- python-minimal
|
||||
|
||||
|
||||
env:
|
||||
- TRAVIS_GOOS=linux DOCKER_BUILDTAGS="include_oss include_gcs" TRAVIS_CGO_ENABLED=1
|
||||
|
||||
before_install:
|
||||
- uname -r
|
||||
- sudo apt-get -q update
|
||||
|
||||
install:
|
||||
- go get -u github.com/vbatts/git-validation
|
||||
# TODO: Add enforcement of license
|
||||
# - go get -u github.com/kunalkushwaha/ltag
|
||||
- cd $TRAVIS_BUILD_DIR
|
||||
|
||||
script:
|
||||
- export GOOS=$TRAVIS_GOOS
|
||||
- export CGO_ENABLED=$TRAVIS_CGO_ENABLED
|
||||
- DCO_VERBOSITY=-q script/validate/dco
|
||||
- GOOS=linux script/setup/install-dev-tools
|
||||
- script/validate/vendor
|
||||
- go build -i .
|
||||
- make check
|
||||
- make build
|
||||
- make binaries
|
||||
# Currently takes too long
|
||||
#- if [ "$GOOS" = "linux" ]; then make test-race ; fi
|
||||
- if [ "$GOOS" = "linux" ]; then make coverage ; fi
|
||||
|
||||
after_success:
|
||||
- bash <(curl -s https://codecov.io/bash) -F linux
|
||||
|
||||
before_deploy:
|
||||
# Run tests with storage driver configurations
|
117
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
117
vendor/github.com/docker/distribution/BUILDING.md
generated
vendored
|
@ -1,117 +0,0 @@
|
|||
|
||||
# Building the registry source
|
||||
|
||||
## Use-case
|
||||
|
||||
This is useful if you intend to actively work on the registry.
|
||||
|
||||
### Alternatives
|
||||
|
||||
Most people should use the [official Registry docker image](https://hub.docker.com/r/library/registry/).
|
||||
|
||||
People looking for advanced operational use cases might consider rolling their own image with a custom Dockerfile inheriting `FROM registry:2`.
|
||||
|
||||
OS X users who want to run natively can do so following [the instructions here](https://github.com/docker/docker.github.io/blob/master/registry/recipes/osx-setup-guide.md).
|
||||
|
||||
### Gotchas
|
||||
|
||||
You are expected to know your way around with go & git.
|
||||
|
||||
If you are a casual user with no development experience, and no preliminary knowledge of go, building from source is probably not a good solution for you.
|
||||
|
||||
## Build the development environment
|
||||
|
||||
The first prerequisite of properly building distribution targets is to have a Go
|
||||
development environment setup. Please follow [How to Write Go Code](https://golang.org/doc/code.html)
|
||||
for proper setup. If done correctly, you should have a GOROOT and GOPATH set in the
|
||||
environment.
|
||||
|
||||
If a Go development environment is setup, one can use `go get` to install the
|
||||
`registry` command from the current latest:
|
||||
|
||||
go get github.com/docker/distribution/cmd/registry
|
||||
|
||||
The above will install the source repository into the `GOPATH`.
|
||||
|
||||
Now create the directory for the registry data (this might require you to set permissions properly)
|
||||
|
||||
mkdir -p /var/lib/registry
|
||||
|
||||
... or alternatively `export REGISTRY_STORAGE_FILESYSTEM_ROOTDIRECTORY=/somewhere` if you want to store data into another location.
|
||||
|
||||
The `registry`
|
||||
binary can then be run with the following:
|
||||
|
||||
$ $GOPATH/bin/registry --version
|
||||
$GOPATH/bin/registry github.com/docker/distribution v2.0.0-alpha.1+unknown
|
||||
|
||||
> __NOTE:__ While you do not need to use `go get` to checkout the distribution
|
||||
> project, for these build instructions to work, the project must be checked
|
||||
> out in the correct location in the `GOPATH`. This should almost always be
|
||||
> `$GOPATH/src/github.com/docker/distribution`.
|
||||
|
||||
The registry can be run with the default config using the following
|
||||
incantation:
|
||||
|
||||
$ $GOPATH/bin/registry serve $GOPATH/src/github.com/docker/distribution/cmd/registry/config-example.yml
|
||||
INFO[0000] endpoint local-5003 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] endpoint local-8083 disabled, skipping app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] listening on :5000 app.id=34bbec38-a91a-494a-9a3f-b72f9010081f version=v2.0.0-alpha.1+unknown
|
||||
INFO[0000] debug server listening localhost:5001
|
||||
|
||||
If it is working, one should see the above log messages.
|
||||
|
||||
### Repeatable Builds
|
||||
|
||||
For the full development experience, one should `cd` into
|
||||
`$GOPATH/src/github.com/docker/distribution`. From there, the regular `go`
|
||||
commands, such as `go test`, should work per package (please see
|
||||
[Developing](#developing) if they don't work).
|
||||
|
||||
A `Makefile` has been provided as a convenience to support repeatable builds.
|
||||
Please install the following into `GOPATH` for it to work:
|
||||
|
||||
go get github.com/golang/lint/golint
|
||||
|
||||
Once these commands are available in the `GOPATH`, run `make` to get a full
|
||||
build:
|
||||
|
||||
$ make
|
||||
+ clean
|
||||
+ fmt
|
||||
+ vet
|
||||
+ lint
|
||||
+ build
|
||||
github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar
|
||||
github.com/sirupsen/logrus
|
||||
github.com/docker/libtrust
|
||||
...
|
||||
github.com/yvasiyarov/gorelic
|
||||
github.com/docker/distribution/registry/handlers
|
||||
github.com/docker/distribution/cmd/registry
|
||||
+ test
|
||||
...
|
||||
ok github.com/docker/distribution/digest 7.875s
|
||||
ok github.com/docker/distribution/manifest 0.028s
|
||||
ok github.com/docker/distribution/notifications 17.322s
|
||||
? github.com/docker/distribution/registry [no test files]
|
||||
ok github.com/docker/distribution/registry/api/v2 0.101s
|
||||
? github.com/docker/distribution/registry/auth [no test files]
|
||||
ok github.com/docker/distribution/registry/auth/silly 0.011s
|
||||
...
|
||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry
|
||||
+ /Users/sday/go/src/github.com/docker/distribution/bin/registry-api-descriptor-template
|
||||
+ binaries
|
||||
|
||||
The above provides a repeatable build using the contents of the vendor
|
||||
directory. This includes formatting, vetting, linting, building,
|
||||
testing and generating tagged binaries. We can verify this worked by running
|
||||
the registry binary generated in the "./bin" directory:
|
||||
|
||||
$ ./bin/registry --version
|
||||
./bin/registry github.com/docker/distribution v2.0.0-alpha.2-80-g16d8b2c.m
|
||||
|
||||
### Optional build tags
|
||||
|
||||
Optional [build tags](http://golang.org/pkg/go/build/) can be provided using
|
||||
the environment variable `DOCKER_BUILDTAGS`.
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue