mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
support generation of any resource
This commit is contained in:
parent
81ae5b0eb6
commit
183b17828e
12 changed files with 200 additions and 231 deletions
|
@ -102,12 +102,15 @@ spec:
|
|||
required:
|
||||
- kind
|
||||
- name
|
||||
- namespace
|
||||
properties:
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
copyFrom:
|
||||
namespace:
|
||||
type: string
|
||||
from:
|
||||
type: object
|
||||
required:
|
||||
- namespace
|
||||
|
@ -118,13 +121,7 @@ spec:
|
|||
name:
|
||||
type: string
|
||||
data:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
AnyValue: {}
|
||||
---
|
||||
kind: Namespace
|
||||
apiVersion: v1
|
||||
|
|
36
main.go
36
main.go
|
@ -2,8 +2,9 @@ package main
|
|||
|
||||
import (
|
||||
"flag"
|
||||
"log"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
controller "github.com/nirmata/kyverno/pkg/controller"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
event "github.com/nirmata/kyverno/pkg/event"
|
||||
|
@ -18,45 +19,45 @@ var (
|
|||
)
|
||||
|
||||
func main() {
|
||||
printVersionInfo()
|
||||
defer glog.Flush()
|
||||
|
||||
printVersionInfo()
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
glog.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
||||
client, err := client.NewClient(clientConfig, nil)
|
||||
client, err := client.NewClient(clientConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating client: %v\n", err)
|
||||
glog.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
||||
policyInformerFactory, err := sharedinformer.NewSharedInformerFactory(clientConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating policy sharedinformer: %v\n", err)
|
||||
glog.Fatalf("Error creating policy sharedinformer: %v\n", err)
|
||||
}
|
||||
eventController := event.NewEventController(client, policyInformerFactory, nil)
|
||||
violationBuilder := violation.NewPolicyViolationBuilder(client, policyInformerFactory, eventController, nil)
|
||||
eventController := event.NewEventController(client, policyInformerFactory)
|
||||
violationBuilder := violation.NewPolicyViolationBuilder(client, policyInformerFactory, eventController)
|
||||
|
||||
policyController := controller.NewPolicyController(
|
||||
client,
|
||||
policyInformerFactory,
|
||||
violationBuilder,
|
||||
eventController,
|
||||
nil)
|
||||
eventController)
|
||||
|
||||
tlsPair, err := initTlsPemPair(clientConfig, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
}
|
||||
|
||||
server, err := webhooks.NewWebhookServer(client, tlsPair, policyInformerFactory, nil)
|
||||
server, err := webhooks.NewWebhookServer(client, tlsPair, policyInformerFactory)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
glog.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
|
||||
webhookRegistrationClient, err := webhooks.NewWebhookRegistrationClient(clientConfig, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||
glog.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||
}
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
@ -65,11 +66,11 @@ func main() {
|
|||
eventController.Run(stopCh)
|
||||
|
||||
if err = policyController.Run(stopCh); err != nil {
|
||||
log.Fatalf("Error running PolicyController: %v\n", err)
|
||||
glog.Fatalf("Error running PolicyController: %v\n", err)
|
||||
}
|
||||
|
||||
if err = webhookRegistrationClient.Register(); err != nil {
|
||||
log.Fatalf("Failed registering Admission Webhooks: %v\n", err)
|
||||
glog.Fatalf("Failed registering Admission Webhooks: %v\n", err)
|
||||
}
|
||||
|
||||
server.RunAsync()
|
||||
|
@ -80,5 +81,6 @@ func main() {
|
|||
|
||||
func init() {
|
||||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
config.LogDefaultFlags()
|
||||
flag.Parse()
|
||||
}
|
||||
}
|
|
@ -61,11 +61,11 @@ type Validation struct {
|
|||
|
||||
// Generation describes which resources will be created when other resource is created
|
||||
type Generation struct {
|
||||
Kind string `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
CopyFrom *CopyFrom `json:"copyFrom"`
|
||||
Data map[string]string `json:"data"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Kind string `json:"kind"`
|
||||
Name string `json:"name"`
|
||||
Namespace string `json:"namespace"`
|
||||
Data interface{} `json:"data"`
|
||||
From *CopyFrom `json:"from"`
|
||||
}
|
||||
|
||||
// CopyFrom - location of a Secret or a ConfigMap
|
||||
|
|
|
@ -64,8 +64,8 @@ func (pp *Patch) Validate() error {
|
|||
|
||||
// Validate returns error if generator is configured incompletely
|
||||
func (pcg *Generation) Validate() error {
|
||||
if len(pcg.Data) == 0 && pcg.CopyFrom == nil {
|
||||
return fmt.Errorf("Neither Data nor CopyFrom (source) of %s/%s is specified", pcg.Kind, pcg.Name)
|
||||
if pcg.Data == nil && pcg.From == nil {
|
||||
return fmt.Errorf("Neither Data nor CopyFrom (source) of %s is specified", pcg.Kind)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -93,3 +93,11 @@ func (in *Validation) DeepCopyInto(out *Validation) {
|
|||
*out = *in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is declared because k8s:deepcopy-gen is
|
||||
// not able to generate this method for interface{} member
|
||||
func (in *Generation) DeepCopyInto(out *Generation) {
|
||||
if out != nil {
|
||||
*out = *in
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
package config
|
||||
|
||||
import "flag"
|
||||
|
||||
const (
|
||||
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
||||
KubePolicyNamespace = "kyverno"
|
||||
|
@ -47,3 +49,10 @@ var (
|
|||
"StatefulSet",
|
||||
}
|
||||
)
|
||||
|
||||
//LogDefaults sets default glog flags
|
||||
func LogDefaultFlags() {
|
||||
flag.Set("logtostderr", "true")
|
||||
flag.Set("stderrthreshold", "WARNING")
|
||||
flag.Set("v", "2")
|
||||
}
|
||||
|
|
|
@ -2,10 +2,9 @@ package controller
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
lister "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
|
@ -27,7 +26,6 @@ type PolicyController struct {
|
|||
policySynced cache.InformerSynced
|
||||
violationBuilder violation.Generator
|
||||
eventBuilder event.Generator
|
||||
logger *log.Logger
|
||||
queue workqueue.RateLimitingInterface
|
||||
}
|
||||
|
||||
|
@ -35,19 +33,14 @@ type PolicyController struct {
|
|||
func NewPolicyController(client *client.Client,
|
||||
policyInformer sharedinformer.PolicyInformer,
|
||||
violationBuilder violation.Generator,
|
||||
eventController event.Generator,
|
||||
logger *log.Logger) *PolicyController {
|
||||
eventController event.Generator) *PolicyController {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Policy Controller: ", log.LstdFlags)
|
||||
}
|
||||
controller := &PolicyController{
|
||||
client: client,
|
||||
policyLister: policyInformer.GetLister(),
|
||||
policySynced: policyInformer.GetInfomer().HasSynced,
|
||||
violationBuilder: violationBuilder,
|
||||
eventBuilder: eventController,
|
||||
logger: logger,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), policyWorkQueueName),
|
||||
}
|
||||
|
||||
|
@ -79,7 +72,7 @@ func (pc *PolicyController) deletePolicyHandler(resource interface{}) {
|
|||
utilruntime.HandleError(fmt.Errorf("error decoding object, invalid type"))
|
||||
return
|
||||
}
|
||||
pc.logger.Printf("policy deleted: %s", object.GetName())
|
||||
glog.Infof("policy deleted: %s", object.GetName())
|
||||
}
|
||||
|
||||
func (pc *PolicyController) enqueuePolicy(obj interface{}) {
|
||||
|
@ -104,13 +97,13 @@ func (pc *PolicyController) Run(stopCh <-chan struct{}) error {
|
|||
for i := 0; i < policyControllerWorkerCount; i++ {
|
||||
go wait.Until(pc.runWorker, time.Second, stopCh)
|
||||
}
|
||||
pc.logger.Println("started policy controller workers")
|
||||
glog.Info("started policy controller workers")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) Stop() {
|
||||
pc.logger.Println("shutting down policy controller workers")
|
||||
glog.Info("shutting down policy controller workers")
|
||||
}
|
||||
func (pc *PolicyController) runWorker() {
|
||||
for pc.processNextWorkItem() {
|
||||
|
@ -143,7 +136,7 @@ func (pc *PolicyController) handleErr(err error, key interface{}) {
|
|||
}
|
||||
// This controller retries if something goes wrong. After that, it stops trying.
|
||||
if pc.queue.NumRequeues(key) < policyWorkQueueRetryLimit {
|
||||
pc.logger.Printf("Error syncing events %v: %v", key, err)
|
||||
glog.Warningf("Error syncing events %v: %v", key, err)
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
pc.queue.AddRateLimited(key)
|
||||
|
@ -151,7 +144,7 @@ func (pc *PolicyController) handleErr(err error, key interface{}) {
|
|||
}
|
||||
pc.queue.Forget(key)
|
||||
utilruntime.HandleError(err)
|
||||
pc.logger.Printf("Dropping the key %q out of the queue: %v", key, err)
|
||||
glog.Warning("Dropping the key %q out of the queue: %v", key, err)
|
||||
}
|
||||
|
||||
func (pc *PolicyController) syncHandler(obj interface{}) error {
|
||||
|
@ -179,6 +172,6 @@ func (pc *PolicyController) syncHandler(obj interface{}) error {
|
|||
// get the violations and pass to violation Builder
|
||||
// get the events and pass to event Builder
|
||||
//TODO: processPolicy
|
||||
pc.logger.Printf("process policy %s on existing resources", policy.GetName())
|
||||
glog.Infof("process policy %s on existing resources", policy.GetName())
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -6,6 +6,7 @@ import (
|
|||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
tls "github.com/nirmata/kyverno/pkg/tls"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
|
@ -61,7 +62,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat
|
|||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to delete existing certificate request: %v", err))
|
||||
}
|
||||
c.logger.Printf("Old certificate request is deleted")
|
||||
glog.Info("Old certificate request is deleted")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
@ -70,7 +71,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.logger.Printf("Certificate request %s is created", unstrRes.GetName())
|
||||
glog.Infof("Certificate request %s is created", unstrRes.GetName())
|
||||
|
||||
res, err := convertToCSR(unstrRes)
|
||||
if err != nil {
|
||||
|
@ -85,7 +86,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat
|
|||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to approve certificate request: %v", err))
|
||||
}
|
||||
c.logger.Printf("Certificate request %s is approved", res.ObjectMeta.Name)
|
||||
glog.Infof("Certificate request %s is approved", res.ObjectMeta.Name)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
@ -136,10 +137,10 @@ func (c *Client) ReadRootCASecret() (result []byte) {
|
|||
|
||||
result = tlsca.Data[rootCAKey]
|
||||
if len(result) == 0 {
|
||||
c.logger.Printf("root CA certificate not found in secret %s/%s", certProps.Namespace, tlsca.Name)
|
||||
glog.Warningf("root CA certificate not found in secret %s/%s", certProps.Namespace, tlsca.Name)
|
||||
return result
|
||||
}
|
||||
c.logger.Printf("using CA bundle defined in secret %s/%s to validate the webhook's server certificate", certProps.Namespace, tlsca.Name)
|
||||
glog.Infof("using CA bundle defined in secret %s/%s to validate the webhook's server certificate", certProps.Namespace, tlsca.Name)
|
||||
return result
|
||||
}
|
||||
|
||||
|
@ -151,7 +152,7 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
|
|||
sname := generateTLSPairSecretName(props)
|
||||
unstrSecret, err := c.GetResource(Secrets, props.Namespace, sname)
|
||||
if err != nil {
|
||||
c.logger.Printf("Unable to get secret %s/%s: %s", props.Namespace, sname, err)
|
||||
glog.Warningf("Unable to get secret %s/%s: %s", props.Namespace, sname, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -175,11 +176,11 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
|
|||
PrivateKey: secret.Data[v1.TLSPrivateKeyKey],
|
||||
}
|
||||
if len(pemPair.Certificate) == 0 {
|
||||
c.logger.Printf("TLS Certificate not found in secret %s/%s", props.Namespace, sname)
|
||||
glog.Warningf("TLS Certificate not found in secret %s/%s", props.Namespace, sname)
|
||||
return nil
|
||||
}
|
||||
if len(pemPair.PrivateKey) == 0 {
|
||||
c.logger.Printf("TLS PrivateKey not found in secret %s/%s", props.Namespace, sname)
|
||||
glog.Warningf("TLS PrivateKey not found in secret %s/%s", props.Namespace, sname)
|
||||
return nil
|
||||
}
|
||||
return &pemPair
|
||||
|
@ -209,7 +210,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem
|
|||
|
||||
_, err := c.CreateResource(Secrets, props.Namespace, secret)
|
||||
if err == nil {
|
||||
c.logger.Printf("Secret %s is created", name)
|
||||
glog.Infof("Secret %s is created", name)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -225,7 +226,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.logger.Printf("Secret %s is updated", name)
|
||||
glog.Infof("Secret %s is updated", name)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,11 +1,12 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
|
@ -28,28 +29,22 @@ import (
|
|||
type Client struct {
|
||||
client dynamic.Interface
|
||||
cachedClient discovery.CachedDiscoveryInterface
|
||||
logger *log.Logger
|
||||
clientConfig *rest.Config
|
||||
kclient *kubernetes.Clientset
|
||||
}
|
||||
|
||||
func NewClient(config *rest.Config, logger *log.Logger) (*Client, error) {
|
||||
func NewClient(config *rest.Config) (*Client, error) {
|
||||
client, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Client : ", log.LstdFlags)
|
||||
}
|
||||
|
||||
kclient, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Client{
|
||||
logger: logger,
|
||||
client: client,
|
||||
clientConfig: config,
|
||||
kclient: kclient,
|
||||
|
@ -171,94 +166,89 @@ func ConvertToRuntimeObject(obj *unstructured.Unstructured) (*runtime.Object, er
|
|||
return &runtimeObj, nil
|
||||
}
|
||||
|
||||
//TODO: make this generic for all resource type
|
||||
//GenerateSecret to generate secrets
|
||||
|
||||
func (c *Client) GenerateSecret(generator types.Generation, namespace string) error {
|
||||
c.logger.Printf("Preparing to create secret %s/%s", namespace, generator.Name)
|
||||
secret := v1.Secret{}
|
||||
|
||||
if generator.CopyFrom != nil {
|
||||
c.logger.Printf("Copying data from secret %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
// Get configMap resource
|
||||
unstrSecret, err := c.GetResource(Secrets, generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
// only support 2 levels of keys
|
||||
// To-Do support multiple levels of key
|
||||
func keysExist(data map[string]interface{}, keys ...string) bool {
|
||||
var v interface{}
|
||||
var t map[string]interface{}
|
||||
var ok bool
|
||||
for _, key := range keys {
|
||||
ks := strings.Split(key, ".")
|
||||
if len(ks) > 2 {
|
||||
glog.Error("Only support 2 levels of keys from root. Support to be extendend in future")
|
||||
return false
|
||||
}
|
||||
// typed object
|
||||
secret, err = convertToSecret(unstrSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
if v, ok = data[ks[0]]; !ok {
|
||||
glog.Infof("key %s does not exist", key)
|
||||
return false
|
||||
}
|
||||
if len(ks) == 2 {
|
||||
if t, ok = v.(map[string]interface{}); !ok {
|
||||
glog.Error("expecting type map[string]interface{}")
|
||||
}
|
||||
return keyExist(t, ks[1])
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
secret.ObjectMeta = meta.ObjectMeta{
|
||||
Name: generator.Name,
|
||||
Namespace: namespace,
|
||||
func keyExist(data map[string]interface{}, key string) (ok bool) {
|
||||
if _, ok = data[key]; !ok {
|
||||
glog.Infof("key %s does not exist", key)
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// Copy data from generator to the new secret
|
||||
// support mode 'data' -> create resource
|
||||
// To-Do: support 'from' -> copy/clone the resource
|
||||
func (c *Client) GenerateResource(generator types.Generation, namespace string) error {
|
||||
if generator.Data != nil && generator.From != nil {
|
||||
return errors.New("generate only supports on the modes data or from")
|
||||
}
|
||||
var err error
|
||||
rGVR := c.getGVRFromKind(generator.Kind)
|
||||
resource := &unstructured.Unstructured{}
|
||||
|
||||
var rdata map[string]interface{}
|
||||
// data -> create new resource
|
||||
if generator.Data != nil {
|
||||
if secret.Data == nil {
|
||||
secret.Data = make(map[string][]byte)
|
||||
rdata, err = runtime.DefaultUnstructuredConverter.ToUnstructured(&generator.Data)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range generator.Data {
|
||||
secret.Data[k] = []byte(v)
|
||||
// verify if mandatory attributes have been defined
|
||||
if !keysExist(rdata, "kind", "apiVersion", "metadata.name", "metadata.namespace") {
|
||||
return errors.New("mandatory keys not defined")
|
||||
}
|
||||
}
|
||||
// from -> create new resource
|
||||
if generator.From != nil {
|
||||
resource, err = c.GetResource(rGVR.Resource, generator.From.Namespace, generator.From.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rdata = resource.UnstructuredContent()
|
||||
}
|
||||
|
||||
go c.createSecretAfterNamespaceIsCreated(secret, namespace)
|
||||
resource.SetUnstructuredContent(rdata)
|
||||
resource.SetName(generator.Name)
|
||||
resource.SetNamespace(generator.Namespace)
|
||||
resource.SetResourceVersion("")
|
||||
|
||||
err = c.waitUntilNamespaceIsCreated(namespace)
|
||||
if err != nil {
|
||||
glog.Errorf("Can't create a resource %s: %v", generator.Name, err)
|
||||
return nil
|
||||
}
|
||||
_, err = c.CreateResource(rGVR.Resource, generator.Namespace, resource)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO: make this generic for all resource type
|
||||
//GenerateConfigMap to generate configMap
|
||||
func (c *Client) GenerateConfigMap(generator types.Generation, namespace string) error {
|
||||
c.logger.Printf("Preparing to create configmap %s/%s", namespace, generator.Name)
|
||||
configMap := v1.ConfigMap{}
|
||||
|
||||
if generator.CopyFrom != nil {
|
||||
c.logger.Printf("Copying data from configmap %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
// Get configMap resource
|
||||
unstrConfigMap, err := c.GetResource(ConfigMaps, generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// typed object
|
||||
configMap, err = convertToConfigMap(unstrConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
configMap.ObjectMeta = meta.ObjectMeta{
|
||||
Name: generator.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
// Copy data from generator to the new configmap
|
||||
if generator.Data != nil {
|
||||
if configMap.Data == nil {
|
||||
configMap.Data = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range generator.Data {
|
||||
configMap.Data[k] = v
|
||||
}
|
||||
}
|
||||
go c.createConfigMapAfterNamespaceIsCreated(configMap, namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertToConfigMap(obj *unstructured.Unstructured) (v1.ConfigMap, error) {
|
||||
configMap := v1.ConfigMap{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &configMap); err != nil {
|
||||
return configMap, err
|
||||
}
|
||||
return configMap, nil
|
||||
}
|
||||
|
||||
//To-Do remove this to use unstructured type
|
||||
func convertToSecret(obj *unstructured.Unstructured) (v1.Secret, error) {
|
||||
secret := v1.Secret{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &secret); err != nil {
|
||||
|
@ -267,6 +257,7 @@ func convertToSecret(obj *unstructured.Unstructured) (v1.Secret, error) {
|
|||
return secret, nil
|
||||
}
|
||||
|
||||
//To-Do remove this to use unstructured type
|
||||
func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSigningRequest, error) {
|
||||
csr := certificates.CertificateSigningRequest{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &csr); err != nil {
|
||||
|
@ -275,26 +266,6 @@ func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSign
|
|||
return &csr, nil
|
||||
}
|
||||
|
||||
func (c *Client) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap, namespace string) {
|
||||
err := c.waitUntilNamespaceIsCreated(namespace)
|
||||
if err == nil {
|
||||
_, err = c.CreateResource(ConfigMaps, namespace, configMap)
|
||||
}
|
||||
if err != nil {
|
||||
c.logger.Printf("Can't create a configmap: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) createSecretAfterNamespaceIsCreated(secret v1.Secret, namespace string) {
|
||||
err := c.waitUntilNamespaceIsCreated(namespace)
|
||||
if err == nil {
|
||||
_, err = c.CreateResource(Secrets, namespace, secret)
|
||||
}
|
||||
if err != nil {
|
||||
c.logger.Printf("Can't create a secret: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Waits until namespace is created with maximum duration maxWaitTimeForNamespaceCreation
|
||||
func (c *Client) waitUntilNamespaceIsCreated(name string) error {
|
||||
timeStart := time.Now()
|
||||
|
@ -331,3 +302,25 @@ func (c *Client) getGVR(resource string) schema.GroupVersionResource {
|
|||
}
|
||||
return emptyGVR
|
||||
}
|
||||
|
||||
func (c *Client) getGVRFromKind(kind string) schema.GroupVersionResource {
|
||||
emptyGVR := schema.GroupVersionResource{}
|
||||
serverresources, err := c.cachedClient.ServerPreferredResources()
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return emptyGVR
|
||||
}
|
||||
for _, serverresource := range serverresources {
|
||||
for _, resource := range serverresource.APIResources {
|
||||
if resource.Kind == kind && !strings.Contains(resource.Name, "/") {
|
||||
gv, err := schema.ParseGroupVersion(serverresource.GroupVersion)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return emptyGVR
|
||||
}
|
||||
return gv.WithResource(resource.Name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return emptyGVR
|
||||
}
|
||||
|
|
|
@ -2,15 +2,15 @@ package engine
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
|
||||
"github.com/golang/glog"
|
||||
kubepolicy "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Generate should be called to process generate rules on the resource
|
||||
func Generate(client *client.Client, logger *log.Logger, policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) {
|
||||
func Generate(client *client.Client, policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) {
|
||||
// configMapGenerator and secretGenerator can be applied only to namespaces
|
||||
// TODO: support for any resource
|
||||
if gvk.Kind != "Namespace" {
|
||||
|
@ -21,13 +21,13 @@ func Generate(client *client.Client, logger *log.Logger, policy kubepolicy.Polic
|
|||
ok := ResourceMeetsDescription(rawResource, rule.ResourceDescription, gvk)
|
||||
|
||||
if !ok {
|
||||
logger.Printf("Rule is not applicable to the request: rule name = %s in policy %s \n", rule.Name, policy.ObjectMeta.Name)
|
||||
glog.Infof("Rule is not applicable to the request: rule name = %s in policy %s \n", rule.Name, policy.ObjectMeta.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
err := applyRuleGenerator(client, rawResource, rule.Generation, gvk)
|
||||
if err != nil {
|
||||
logger.Printf("Failed to apply rule generator: %v", err)
|
||||
glog.Warningf("Failed to apply rule generator: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -42,19 +42,10 @@ func applyRuleGenerator(client *client.Client, rawResource []byte, generator *ku
|
|||
var err error
|
||||
|
||||
namespace := ParseNameFromObject(rawResource)
|
||||
switch generator.Kind {
|
||||
case "ConfigMap":
|
||||
err = client.GenerateConfigMap(*generator, namespace)
|
||||
case "Secret":
|
||||
err = client.GenerateSecret(*generator, namespace)
|
||||
default:
|
||||
err = fmt.Errorf("Unsupported config Kind '%s'", generator.Kind)
|
||||
}
|
||||
|
||||
err = client.GenerateResource(*generator, namespace)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to apply generator for %s '%s/%s' : %v", generator.Kind, namespace, generator.Name, err)
|
||||
return fmt.Errorf("Unable to apply generator for %s %s: %v", generator.Kind, namespace, err)
|
||||
}
|
||||
|
||||
log.Printf("Successfully applied generator %s/%s", generator.Kind, generator.Name)
|
||||
glog.Infof("Successfully applied generator %s", generator.Kind)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,10 +2,9 @@ package event
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
policyscheme "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme"
|
||||
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
|
||||
|
@ -26,7 +25,6 @@ type controller struct {
|
|||
policyLister v1alpha1.PolicyLister
|
||||
queue workqueue.RateLimitingInterface
|
||||
recorder record.EventRecorder
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
//Generator to generate event
|
||||
|
@ -43,19 +41,13 @@ type Controller interface {
|
|||
|
||||
//NewEventController to generate a new event controller
|
||||
func NewEventController(client *client.Client,
|
||||
shareInformer sharedinformer.PolicyInformer,
|
||||
logger *log.Logger) Controller {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Event Controller: ", log.LstdFlags)
|
||||
}
|
||||
shareInformer sharedinformer.PolicyInformer) Controller {
|
||||
|
||||
controller := &controller{
|
||||
client: client,
|
||||
policyLister: shareInformer.GetLister(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
|
||||
recorder: initRecorder(client),
|
||||
logger: logger,
|
||||
}
|
||||
return controller
|
||||
}
|
||||
|
@ -68,7 +60,7 @@ func initRecorder(client *client.Client) record.EventRecorder {
|
|||
return nil
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(log.Printf)
|
||||
eventBroadcaster.StartLogging(glog.Infof)
|
||||
eventInterface, err := client.GetEventsInterface()
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err) // TODO: add more specific error
|
||||
|
@ -94,11 +86,11 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
|||
for i := 0; i < eventWorkerThreadCount; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
c.logger.Println("Started eventbuilder controller workers")
|
||||
glog.Info("Started eventbuilder controller workers")
|
||||
}
|
||||
|
||||
func (c *controller) Stop() {
|
||||
c.logger.Println("Shutting down eventbuilder controller workers")
|
||||
glog.Info("Shutting down eventbuilder controller workers")
|
||||
}
|
||||
func (c *controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
|
@ -116,7 +108,7 @@ func (c *controller) processNextWorkItem() bool {
|
|||
var ok bool
|
||||
if key, ok = obj.(Info); !ok {
|
||||
c.queue.Forget(obj)
|
||||
c.logger.Printf("Expecting type info by got %v\n", obj)
|
||||
glog.Warningf("Expecting type info by got %v\n", obj)
|
||||
return nil
|
||||
}
|
||||
// Run the syncHandler, passing the resource and the policy
|
||||
|
@ -128,7 +120,7 @@ func (c *controller) processNextWorkItem() bool {
|
|||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
c.logger.Println((err))
|
||||
glog.Warning(err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -2,9 +2,8 @@ package violation
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
types "github.com/nirmata/kyverno/pkg/apis/policy/v1alpha1"
|
||||
v1alpha1 "github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
|
@ -23,7 +22,6 @@ type builder struct {
|
|||
client *client.Client
|
||||
policyLister v1alpha1.PolicyLister
|
||||
eventBuilder event.Generator
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
//Builder is to build policy violations
|
||||
|
@ -36,18 +34,12 @@ type Builder interface {
|
|||
//NewPolicyViolationBuilder returns new violation builder
|
||||
func NewPolicyViolationBuilder(client *client.Client,
|
||||
sharedInfomer sharedinformer.PolicyInformer,
|
||||
eventController event.Generator,
|
||||
logger *log.Logger) Builder {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Violation Builder: ", log.LstdFlags)
|
||||
}
|
||||
eventController event.Generator) Builder {
|
||||
|
||||
builder := &builder{
|
||||
client: client,
|
||||
policyLister: sharedInfomer.GetLister(),
|
||||
eventBuilder: eventController,
|
||||
logger: logger,
|
||||
}
|
||||
return builder
|
||||
}
|
||||
|
@ -81,7 +73,7 @@ func (b *builder) processViolation(info Info) error {
|
|||
continue
|
||||
}
|
||||
if !ok {
|
||||
b.logger.Printf("removed violation")
|
||||
glog.Info("removed violation")
|
||||
}
|
||||
}
|
||||
// If violation already exists for this rule, we update the violation
|
||||
|
|
|
@ -7,11 +7,10 @@ import (
|
|||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/nirmata/kyverno/pkg/client/listers/policy/v1alpha1"
|
||||
"github.com/nirmata/kyverno/pkg/config"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
|
@ -29,7 +28,6 @@ type WebhookServer struct {
|
|||
server http.Server
|
||||
client *client.Client
|
||||
policyLister v1alpha1.PolicyLister
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
|
||||
|
@ -37,11 +35,7 @@ type WebhookServer struct {
|
|||
func NewWebhookServer(
|
||||
client *client.Client,
|
||||
tlsPair *tlsutils.TlsPemPair,
|
||||
shareInformer sharedinformer.PolicyInformer,
|
||||
logger *log.Logger) (*WebhookServer, error) {
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Webhook Server: ", log.LstdFlags)
|
||||
}
|
||||
shareInformer sharedinformer.PolicyInformer) (*WebhookServer, error) {
|
||||
|
||||
if tlsPair == nil {
|
||||
return nil, errors.New("NewWebhookServer is not initialized properly")
|
||||
|
@ -57,7 +51,6 @@ func NewWebhookServer(
|
|||
ws := &WebhookServer{
|
||||
client: client,
|
||||
policyLister: shareInformer.GetLister(),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
|
@ -68,7 +61,6 @@ func NewWebhookServer(
|
|||
Addr: ":443", // Listen on port for HTTPS requests
|
||||
TLSConfig: &tlsConfig,
|
||||
Handler: mux,
|
||||
ErrorLog: logger,
|
||||
ReadTimeout: 15 * time.Second,
|
||||
WriteTimeout: 15 * time.Second,
|
||||
}
|
||||
|
@ -112,11 +104,10 @@ func (ws *WebhookServer) RunAsync() {
|
|||
go func(ws *WebhookServer) {
|
||||
err := ws.server.ListenAndServeTLS("", "")
|
||||
if err != nil {
|
||||
ws.logger.Fatal(err)
|
||||
glog.Fatal(err)
|
||||
}
|
||||
}(ws)
|
||||
|
||||
ws.logger.Printf("Started Webhook Server")
|
||||
glog.Info("Started Webhook Server")
|
||||
}
|
||||
|
||||
// Stop TLS server and returns control after the server is shut down
|
||||
|
@ -124,25 +115,26 @@ func (ws *WebhookServer) Stop() {
|
|||
err := ws.server.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
// Error from closing listeners, or context timeout:
|
||||
ws.logger.Printf("Server Shutdown error: %v", err)
|
||||
glog.Info("Server Shutdown error: %v", err)
|
||||
ws.server.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// HandleMutation handles mutating webhook admission request
|
||||
func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
ws.logger.Printf("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
glog.Infof("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
|
||||
|
||||
policies, err := ws.policyLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
ws.logger.Printf("%v", err)
|
||||
glog.Warning(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var allPatches []engine.PatchBytes
|
||||
for _, policy := range policies {
|
||||
ws.logger.Printf("Applying policy %s with %d rules\n", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
|
||||
glog.Infof("Applying policy %s with %d rules\n", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
|
||||
policyPatches, _ := engine.Mutate(*policy, request.Object.Raw, request.Kind)
|
||||
allPatches = append(allPatches, policyPatches...)
|
||||
|
@ -150,7 +142,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) *v1be
|
|||
if len(policyPatches) > 0 {
|
||||
namespace := engine.ParseNamespaceFromObject(request.Object.Raw)
|
||||
name := engine.ParseNameFromObject(request.Object.Raw)
|
||||
ws.logger.Printf("Mutation from policy %s has applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name)
|
||||
glog.Infof("Mutation from policy %s has applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,22 +156,22 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) *v1be
|
|||
|
||||
// HandleValidation handles validating webhook admission request
|
||||
func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
ws.logger.Printf("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
glog.Infof("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
|
||||
|
||||
policies, err := ws.policyLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
ws.logger.Printf("%v", err)
|
||||
glog.Warning(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
for _, policy := range policies {
|
||||
// validation
|
||||
ws.logger.Printf("Validating resource with policy %s with %d rules", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
glog.Infof("Validating resource with policy %s with %d rules", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
|
||||
if err := engine.Validate(*policy, request.Object.Raw, request.Kind); err != nil {
|
||||
message := fmt.Sprintf("validation has failed: %s", err.Error())
|
||||
ws.logger.Println(message)
|
||||
glog.Warning(message)
|
||||
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
|
@ -190,10 +182,9 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest) *v1
|
|||
}
|
||||
|
||||
// generation
|
||||
engine.Generate(ws.client, ws.logger, *policy, request.Object.Raw, request.Kind)
|
||||
engine.Generate(ws.client, *policy, request.Object.Raw, request.Kind)
|
||||
}
|
||||
|
||||
ws.logger.Println("Validation is successful")
|
||||
glog.Info("Validation is successful")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
|
@ -210,21 +201,21 @@ func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer htt
|
|||
}
|
||||
}
|
||||
if len(body) == 0 {
|
||||
ws.logger.Print("Error: empty body")
|
||||
glog.Error("Error: empty body")
|
||||
http.Error(writer, "empty body", http.StatusBadRequest)
|
||||
return nil
|
||||
}
|
||||
|
||||
contentType := request.Header.Get("Content-Type")
|
||||
if contentType != "application/json" {
|
||||
ws.logger.Printf("Error: invalid Content-Type: %v", contentType)
|
||||
glog.Error("Error: invalid Content-Type: %v", contentType)
|
||||
http.Error(writer, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
|
||||
return nil
|
||||
}
|
||||
|
||||
admissionReview := &v1beta1.AdmissionReview{}
|
||||
if err := json.Unmarshal(body, &admissionReview); err != nil {
|
||||
ws.logger.Printf("Error: Can't decode body as AdmissionReview: %v", err)
|
||||
glog.Errorf("Error: Can't decode body as AdmissionReview: %v", err)
|
||||
http.Error(writer, "Can't decode body as AdmissionReview", http.StatusExpectationFailed)
|
||||
return nil
|
||||
} else {
|
||||
|
|
Loading…
Reference in a new issue