mirror of
https://github.com/kyverno/kyverno.git
synced 2025-04-09 10:42:22 +00:00
replace typed client with dynamic client
This commit is contained in:
commit
11d0923e36
20 changed files with 691 additions and 932 deletions
|
@ -1,4 +1,4 @@
|
|||
package kubeclient
|
||||
package client
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
@ -7,14 +7,13 @@ import (
|
|||
|
||||
tls "github.com/nirmata/kube-policy/pkg/tls"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Issues TLS certificate for webhook server using given PEM private key
|
||||
// Returns signed and approved TLS certificate in PEM format
|
||||
func (kc *KubeClient) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
||||
func (c *Client) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
||||
privateKey, err := tls.TlsGeneratePrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -25,12 +24,12 @@ func (kc *KubeClient) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.Tl
|
|||
return nil, errors.New(fmt.Sprintf("Unable to create certificate request: %v", err))
|
||||
}
|
||||
|
||||
certRequest, err = kc.submitAndApproveCertificateRequest(certRequest)
|
||||
certRequest, err = c.submitAndApproveCertificateRequest(certRequest)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to submit and approve certificate request: %v", err))
|
||||
}
|
||||
|
||||
tlsCert, err := kc.fetchCertificateFromRequest(certRequest, 10)
|
||||
tlsCert, err := c.fetchCertificateFromRequest(certRequest, 10)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to fetch certificate from request: %v", err))
|
||||
}
|
||||
|
@ -42,31 +41,37 @@ func (kc *KubeClient) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.Tl
|
|||
}
|
||||
|
||||
// Submits and approves certificate request, returns request which need to be fetched
|
||||
func (kc *KubeClient) submitAndApproveCertificateRequest(req *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) {
|
||||
certClient := kc.client.CertificatesV1beta1().CertificateSigningRequests()
|
||||
|
||||
csrList, err := certClient.List(metav1.ListOptions{})
|
||||
func (c *Client) submitAndApproveCertificateRequest(req *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) {
|
||||
certClient, err := c.GetCSRInterface()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
csrList, err := c.ListResource(CSR, "")
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to list existing certificate requests: %v", err))
|
||||
}
|
||||
|
||||
for _, csr := range csrList.Items {
|
||||
if csr.ObjectMeta.Name == req.ObjectMeta.Name {
|
||||
err := certClient.Delete(csr.ObjectMeta.Name, defaultDeleteOptions())
|
||||
if csr.GetName() == req.ObjectMeta.Name {
|
||||
err := c.DeleteResouce(CSR, "", csr.GetName())
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to delete existing certificate request: %v", err))
|
||||
}
|
||||
kc.logger.Printf("Old certificate request is deleted")
|
||||
c.logger.Printf("Old certificate request is deleted")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
res, err := certClient.Create(req)
|
||||
unstrRes, err := c.CreateResource(CSR, "", req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
kc.logger.Printf("Certificate request %s is created", req.ObjectMeta.Name)
|
||||
c.logger.Printf("Certificate request %s is created", unstrRes.GetName())
|
||||
|
||||
res, err := convertToCSR(unstrRes)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res.Status.Conditions = append(res.Status.Conditions, certificates.CertificateSigningRequestCondition{
|
||||
Type: certificates.CertificateApproved,
|
||||
Reason: "NKP-Approve",
|
||||
|
@ -76,20 +81,21 @@ func (kc *KubeClient) submitAndApproveCertificateRequest(req *certificates.Certi
|
|||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to approve certificate request: %v", err))
|
||||
}
|
||||
kc.logger.Printf("Certificate request %s is approved", res.ObjectMeta.Name)
|
||||
c.logger.Printf("Certificate request %s is approved", res.ObjectMeta.Name)
|
||||
|
||||
return res, nil
|
||||
}
|
||||
|
||||
const certificateFetchWaitInterval time.Duration = 200 * time.Millisecond
|
||||
|
||||
// Fetches certificate from given request. Tries to obtain certificate for maxWaitSeconds
|
||||
func (kc *KubeClient) fetchCertificateFromRequest(req *certificates.CertificateSigningRequest, maxWaitSeconds uint8) ([]byte, error) {
|
||||
func (c *Client) fetchCertificateFromRequest(req *certificates.CertificateSigningRequest, maxWaitSeconds uint8) ([]byte, error) {
|
||||
// TODO: react of SIGINT and SIGTERM
|
||||
timeStart := time.Now()
|
||||
certClient := kc.client.CertificatesV1beta1().CertificateSigningRequests()
|
||||
for time.Now().Sub(timeStart) < time.Duration(maxWaitSeconds)*time.Second {
|
||||
r, err := certClient.Get(req.ObjectMeta.Name, metav1.GetOptions{})
|
||||
unstrR, err := c.GetResource(CSR, "", req.ObjectMeta.Name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
r, err := convertToCSR(unstrR)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -111,24 +117,27 @@ const privateKeyField string = "privateKey"
|
|||
const certificateField string = "certificate"
|
||||
|
||||
// Reads the pair of TLS certificate and key from the specified secret.
|
||||
func (kc *KubeClient) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
|
||||
func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
|
||||
name := generateSecretName(props)
|
||||
secret, err := kc.client.CoreV1().Secrets(props.Namespace).Get(name, metav1.GetOptions{})
|
||||
unstrSecret, err := c.GetResource(Secret, props.Namespace, name)
|
||||
if err != nil {
|
||||
c.logger.Printf("Unable to get secret %s/%s: %s", props.Namespace, name, err)
|
||||
return nil
|
||||
}
|
||||
secret, err := convertToSecret(unstrSecret)
|
||||
if err != nil {
|
||||
kc.logger.Printf("Unable to get secret %s/%s: %s", props.Namespace, name, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
pemPair := tls.TlsPemPair{
|
||||
Certificate: secret.Data[certificateField],
|
||||
PrivateKey: secret.Data[privateKeyField],
|
||||
}
|
||||
if len(pemPair.Certificate) == 0 {
|
||||
kc.logger.Printf("TLS Certificate not found in secret %s/%s", props.Namespace, name)
|
||||
c.logger.Printf("TLS Certificate not found in secret %s/%s", props.Namespace, name)
|
||||
return nil
|
||||
}
|
||||
if len(pemPair.PrivateKey) == 0 {
|
||||
kc.logger.Printf("TLS PrivateKey not found in secret %s/%s", props.Namespace, name)
|
||||
c.logger.Printf("TLS PrivateKey not found in secret %s/%s", props.Namespace, name)
|
||||
return nil
|
||||
}
|
||||
return &pemPair
|
||||
|
@ -136,24 +145,28 @@ func (kc *KubeClient) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair
|
|||
|
||||
// Writes the pair of TLS certificate and key to the specified secret.
|
||||
// Updates existing secret or creates new one.
|
||||
func (kc *KubeClient) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPemPair) error {
|
||||
func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPemPair) error {
|
||||
name := generateSecretName(props)
|
||||
secret, err := kc.client.CoreV1().Secrets(props.Namespace).Get(name, metav1.GetOptions{})
|
||||
unstrSecret, err := c.GetResource(Secret, props.Namespace, name)
|
||||
if err == nil {
|
||||
secret, err := convertToSecret(unstrSecret)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err == nil { // Update existing secret
|
||||
if secret.Data == nil {
|
||||
secret.Data = make(map[string][]byte)
|
||||
}
|
||||
secret.Data[certificateField] = pemPair.Certificate
|
||||
secret.Data[privateKeyField] = pemPair.PrivateKey
|
||||
|
||||
secret, err = kc.client.CoreV1().Secrets(props.Namespace).Update(secret)
|
||||
_, err = c.UpdateResource(Secret, props.Namespace, secret)
|
||||
if err == nil {
|
||||
kc.logger.Printf("Secret %s is updated", name)
|
||||
c.logger.Printf("Secret %s is updated", name)
|
||||
}
|
||||
|
||||
} else { // Create new secret
|
||||
secret = &v1.Secret{
|
||||
} else {
|
||||
|
||||
secret := &v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Secret",
|
||||
APIVersion: "v1",
|
||||
|
@ -168,9 +181,9 @@ func (kc *KubeClient) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.T
|
|||
},
|
||||
}
|
||||
|
||||
secret, err = kc.client.CoreV1().Secrets(props.Namespace).Create(secret)
|
||||
_, err := c.CreateResource(Secret, props.Namespace, secret)
|
||||
if err == nil {
|
||||
kc.logger.Printf("Secret %s is created", name)
|
||||
c.logger.Printf("Secret %s is created", name)
|
||||
}
|
||||
}
|
||||
return err
|
317
client/client.go
Normal file
317
client/client.go
Normal file
|
@ -0,0 +1,317 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/config"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/dynamic"
|
||||
csrtype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
|
||||
event "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type Client struct {
|
||||
logger *log.Logger
|
||||
client dynamic.Interface
|
||||
clientConfig *rest.Config
|
||||
}
|
||||
|
||||
func NewClient(config *rest.Config, logger *log.Logger) (*Client, error) {
|
||||
client, err := dynamic.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Client : ", log.LstdFlags)
|
||||
}
|
||||
|
||||
return &Client{
|
||||
logger: logger,
|
||||
client: client,
|
||||
clientConfig: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (c *Client) GetKubePolicyDeployment() (*apps.Deployment, error) {
|
||||
kubePolicyDeployment, err := c.GetResource("deployments", config.KubePolicyNamespace, config.KubePolicyDeploymentName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deploy := apps.Deployment{}
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(kubePolicyDeployment.UnstructuredContent(), &deploy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &deploy, nil
|
||||
}
|
||||
|
||||
//TODO: can we use dynamic client to fetch the typed interface
|
||||
// or generate a kube client value to access the interface
|
||||
//GetEventsInterface provides typed interface for events
|
||||
func (c *Client) GetEventsInterface() (event.EventInterface, error) {
|
||||
kubeClient, err := newKubeClient(c.clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return kubeClient.CoreV1().Events(""), nil
|
||||
}
|
||||
|
||||
func (c *Client) GetCSRInterface() (csrtype.CertificateSigningRequestInterface, error) {
|
||||
kubeClient, err := newKubeClient(c.clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kubeClient.CertificatesV1beta1().CertificateSigningRequests(), nil
|
||||
}
|
||||
|
||||
func (c *Client) getInterface(kind string) dynamic.NamespaceableResourceInterface {
|
||||
return c.client.Resource(c.getGroupVersionMapper(kind))
|
||||
}
|
||||
|
||||
func (c *Client) getResourceInterface(kind string, namespace string) dynamic.ResourceInterface {
|
||||
// Get the resource interface
|
||||
namespaceableInterface := c.getInterface(kind)
|
||||
// Get the namespacable interface
|
||||
var resourceInteface dynamic.ResourceInterface
|
||||
if namespace != "" {
|
||||
resourceInteface = namespaceableInterface.Namespace(namespace)
|
||||
} else {
|
||||
resourceInteface = namespaceableInterface
|
||||
}
|
||||
return resourceInteface
|
||||
}
|
||||
|
||||
// Keep this a stateful as the resource list will be based on the kubernetes version we connect to
|
||||
func (c *Client) getGroupVersionMapper(kind string) schema.GroupVersionResource {
|
||||
//TODO: add checks to see if the kind is supported
|
||||
//TODO: build the resource list dynamically( by querying the registered resource kinds)
|
||||
//TODO: the error scenarios
|
||||
return getGrpVersionMapper(kind, c.clientConfig, false)
|
||||
}
|
||||
|
||||
// GetResource returns the resource in unstructured/json format
|
||||
func (c *Client) GetResource(kind string, namespace string, name string) (*unstructured.Unstructured, error) {
|
||||
return c.getResourceInterface(kind, namespace).Get(name, meta.GetOptions{})
|
||||
}
|
||||
|
||||
// ListResource returns the list of resources in unstructured/json format
|
||||
// Access items using []Items
|
||||
func (c *Client) ListResource(kind string, namespace string) (*unstructured.UnstructuredList, error) {
|
||||
return c.getResourceInterface(kind, namespace).List(meta.ListOptions{})
|
||||
}
|
||||
|
||||
func (c *Client) DeleteResouce(kind string, namespace string, name string) error {
|
||||
return c.getResourceInterface(kind, namespace).Delete(name, &meta.DeleteOptions{})
|
||||
|
||||
}
|
||||
|
||||
// CreateResource creates object for the specified kind/namespace
|
||||
func (c *Client) CreateResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
|
||||
// convert typed to unstructured obj
|
||||
if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil {
|
||||
return c.getResourceInterface(kind, namespace).Create(unstructuredObj, meta.CreateOptions{})
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to create resource ")
|
||||
}
|
||||
|
||||
// UpdateResource updates object for the specified kind/namespace
|
||||
func (c *Client) UpdateResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
|
||||
// convert typed to unstructured obj
|
||||
if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil {
|
||||
return c.getResourceInterface(kind, namespace).Update(unstructuredObj, meta.UpdateOptions{})
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to update resource ")
|
||||
}
|
||||
|
||||
// UpdateStatusResource updates the resource "status" subresource
|
||||
func (c *Client) UpdateStatusResource(kind string, namespace string, obj interface{}) (*unstructured.Unstructured, error) {
|
||||
// convert typed to unstructured obj
|
||||
if unstructuredObj := convertToUnstructured(obj); unstructuredObj != nil {
|
||||
return c.getResourceInterface(kind, namespace).UpdateStatus(unstructuredObj, meta.UpdateOptions{})
|
||||
}
|
||||
return nil, fmt.Errorf("Unable to update resource ")
|
||||
}
|
||||
|
||||
func convertToUnstructured(obj interface{}) *unstructured.Unstructured {
|
||||
unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(obj)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("Unable to convert : %v", err))
|
||||
return nil
|
||||
}
|
||||
return &unstructured.Unstructured{Object: unstructuredObj}
|
||||
}
|
||||
|
||||
//ConvertToRuntimeObject converts unstructed to runtime.Object runtime instance
|
||||
func ConvertToRuntimeObject(obj *unstructured.Unstructured) (*runtime.Object, error) {
|
||||
scheme := runtime.NewScheme()
|
||||
gvk := obj.GroupVersionKind()
|
||||
runtimeObj, err := scheme.New(gvk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &runtimeObj); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &runtimeObj, nil
|
||||
}
|
||||
|
||||
//TODO: make this generic for all resource type
|
||||
//GenerateSecret to generate secrets
|
||||
|
||||
func (c *Client) GenerateSecret(generator types.Generation, namespace string) error {
|
||||
c.logger.Printf("Preparing to create secret %s/%s", namespace, generator.Name)
|
||||
secret := &v1.Secret{}
|
||||
|
||||
// if generator.CopyFrom != nil {
|
||||
c.logger.Printf("Copying data from secret %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
// Get configMap resource
|
||||
unstrSecret, err := c.GetResource(Secret, generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// typed object
|
||||
secret, err = convertToSecret(unstrSecret)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// }
|
||||
|
||||
secret.ObjectMeta = meta.ObjectMeta{
|
||||
Name: generator.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
// Copy data from generator to the new secret
|
||||
if generator.Data != nil {
|
||||
if secret.Data == nil {
|
||||
secret.Data = make(map[string][]byte)
|
||||
}
|
||||
|
||||
for k, v := range generator.Data {
|
||||
secret.Data[k] = []byte(v)
|
||||
}
|
||||
}
|
||||
|
||||
go c.createSecretAfterNamespaceIsCreated(*secret, namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
//TODO: make this generic for all resource type
|
||||
//GenerateConfigMap to generate configMap
|
||||
func (c *Client) GenerateConfigMap(generator types.Generation, namespace string) error {
|
||||
c.logger.Printf("Preparing to create configmap %s/%s", namespace, generator.Name)
|
||||
configMap := &v1.ConfigMap{}
|
||||
|
||||
// if generator.CopyFrom != nil {
|
||||
c.logger.Printf("Copying data from configmap %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
// Get configMap resource
|
||||
unstrConfigMap, err := c.GetResource("configmaps", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// typed object
|
||||
configMap, err = convertToConfigMap(unstrConfigMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// }
|
||||
configMap.ObjectMeta = meta.ObjectMeta{
|
||||
Name: generator.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
// Copy data from generator to the new configmap
|
||||
if generator.Data != nil {
|
||||
if configMap.Data == nil {
|
||||
configMap.Data = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range generator.Data {
|
||||
configMap.Data[k] = v
|
||||
}
|
||||
}
|
||||
go c.createConfigMapAfterNamespaceIsCreated(*configMap, namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
func convertToConfigMap(obj *unstructured.Unstructured) (*v1.ConfigMap, error) {
|
||||
configMap := v1.ConfigMap{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &configMap); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &configMap, nil
|
||||
}
|
||||
|
||||
func convertToSecret(obj *unstructured.Unstructured) (*v1.Secret, error) {
|
||||
secret := v1.Secret{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &secret); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &secret, nil
|
||||
}
|
||||
|
||||
func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSigningRequest, error) {
|
||||
csr := certificates.CertificateSigningRequest{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &csr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &csr, nil
|
||||
}
|
||||
|
||||
func (c *Client) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap, namespace string) {
|
||||
err := c.waitUntilNamespaceIsCreated(namespace)
|
||||
if err == nil {
|
||||
_, err = c.CreateResource("configmaps", namespace, configMap)
|
||||
}
|
||||
if err != nil {
|
||||
c.logger.Printf("Can't create a configmap: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Client) createSecretAfterNamespaceIsCreated(secret v1.Secret, namespace string) {
|
||||
err := c.waitUntilNamespaceIsCreated(namespace)
|
||||
if err == nil {
|
||||
_, err = c.CreateResource(Secret, namespace, secret)
|
||||
}
|
||||
if err != nil {
|
||||
c.logger.Printf("Can't create a secret: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Waits until namespace is created with maximum duration maxWaitTimeForNamespaceCreation
|
||||
func (c *Client) waitUntilNamespaceIsCreated(name string) error {
|
||||
timeStart := time.Now()
|
||||
|
||||
var lastError error = nil
|
||||
for time.Now().Sub(timeStart) < namespaceCreationMaxWaitTime {
|
||||
_, lastError = c.GetResource("namespaces", "", name)
|
||||
if lastError == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(namespaceCreationWaitInterval)
|
||||
}
|
||||
return lastError
|
||||
}
|
||||
|
||||
// KindIsSupported checks if the kind is a registerd GVK
|
||||
func (c *Client) KindIsSupported(kind string) bool {
|
||||
kind = strings.ToLower(kind) + "s"
|
||||
buildGVKMapper(c.clientConfig, false)
|
||||
_, ok := getValue(kind)
|
||||
return ok
|
||||
}
|
6
client/client_test.go
Normal file
6
client/client_test.go
Normal file
|
@ -0,0 +1,6 @@
|
|||
package client
|
||||
|
||||
// GetResource
|
||||
// ListResource
|
||||
// CreateResource
|
||||
// getGroupVersionMapper (valid and invalid resources)
|
99
client/utils.go
Normal file
99
client/utils.go
Normal file
|
@ -0,0 +1,99 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
const (
|
||||
CSR string = "certificatesigningrequests"
|
||||
Secret string = "secrets"
|
||||
)
|
||||
const namespaceCreationMaxWaitTime time.Duration = 30 * time.Second
|
||||
const namespaceCreationWaitInterval time.Duration = 100 * time.Millisecond
|
||||
|
||||
var groupVersionMapper map[string]schema.GroupVersionResource
|
||||
var kubeClient *kubernetes.Clientset
|
||||
|
||||
func getGrpVersionMapper(kind string, clientConfig *rest.Config, refresh bool) schema.GroupVersionResource {
|
||||
// build the GVK mapper
|
||||
buildGVKMapper(clientConfig, refresh)
|
||||
// Query mapper
|
||||
if val, ok := getValue(kind); ok {
|
||||
return *val
|
||||
}
|
||||
utilruntime.HandleError(fmt.Errorf("Resouce '%s' not registered", kind))
|
||||
return schema.GroupVersionResource{}
|
||||
}
|
||||
|
||||
func buildGVKMapper(clientConfig *rest.Config, refresh bool) {
|
||||
if groupVersionMapper == nil || refresh {
|
||||
groupVersionMapper = make(map[string]schema.GroupVersionResource)
|
||||
// refresh the mapper
|
||||
if err := refreshRegisteredResources(groupVersionMapper, clientConfig); err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getValue(kind string) (*schema.GroupVersionResource, bool) {
|
||||
if groupVersionMapper == nil {
|
||||
utilruntime.HandleError(fmt.Errorf("GroupVersionKind mapper is not loaded"))
|
||||
return nil, false
|
||||
}
|
||||
if val, ok := groupVersionMapper[kind]; ok {
|
||||
return &val, true
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func refreshRegisteredResources(mapper map[string]schema.GroupVersionResource, clientConfig *rest.Config) error {
|
||||
// build kubernetes client
|
||||
client, err := newKubeClient(clientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// get registered server groups and resources
|
||||
_, resourceList, err := client.Discovery().ServerGroupsAndResources()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, apiResource := range resourceList {
|
||||
for _, resource := range apiResource.APIResources {
|
||||
grpVersion := strings.Split(apiResource.GroupVersion, "/")
|
||||
if len(grpVersion) == 2 {
|
||||
mapper[resource.Name] = schema.GroupVersionResource{
|
||||
Group: grpVersion[0],
|
||||
Version: grpVersion[1],
|
||||
Resource: resource.Name,
|
||||
}
|
||||
} else {
|
||||
// resources with only versions
|
||||
mapper[resource.Name] = schema.GroupVersionResource{
|
||||
Version: apiResource.GroupVersion,
|
||||
Resource: resource.Name,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func newKubeClient(clientConfig *rest.Config) (*kubernetes.Clientset, error) {
|
||||
var err error
|
||||
if kubeClient == nil {
|
||||
kubeClient, err = kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return kubeClient, nil
|
||||
}
|
|
@ -29,14 +29,14 @@ spec:
|
|||
required:
|
||||
- name
|
||||
- resource
|
||||
properties:
|
||||
parameters:
|
||||
name:
|
||||
type: string
|
||||
resource:
|
||||
type: object
|
||||
required:
|
||||
- kind
|
||||
properties:
|
||||
parameters:
|
||||
kind:
|
||||
type: string
|
||||
enum:
|
||||
|
@ -115,33 +115,36 @@ spec:
|
|||
pattern:
|
||||
AnyValue: {}
|
||||
generate:
|
||||
type: object
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
properties:
|
||||
kind:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
copyFrom:
|
||||
type: object
|
||||
required:
|
||||
- namespace
|
||||
- name
|
||||
properties:
|
||||
namespace:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
data:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
- copyFrom
|
||||
properties:
|
||||
kind:
|
||||
type: string
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
name:
|
||||
type: string
|
||||
copyFrom:
|
||||
type: object
|
||||
required:
|
||||
- namespace
|
||||
- name
|
||||
properties:
|
||||
namespace:
|
||||
type: string
|
||||
name:
|
||||
type: string
|
||||
data:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
labels:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
|
|
@ -78,7 +78,7 @@ spec :
|
|||
data: # data is optional
|
||||
status:
|
||||
events:
|
||||
# log of applied policies. We will need a way to distinguish between failed
|
||||
# log of applied policies. We will need a way to distingush between failed
|
||||
# and succeeded operations
|
||||
|
||||
|
||||
|
|
11
init.go
11
init.go
|
@ -5,9 +5,9 @@ import (
|
|||
"log"
|
||||
"net/url"
|
||||
|
||||
"github.com/nirmata/kube-policy/kubeclient"
|
||||
client "github.com/nirmata/kube-policy/client"
|
||||
"github.com/nirmata/kube-policy/pkg/config"
|
||||
"github.com/nirmata/kube-policy/pkg/tls"
|
||||
tls "github.com/nirmata/kube-policy/pkg/tls"
|
||||
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
|
@ -22,7 +22,7 @@ func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
|||
return clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
}
|
||||
|
||||
func initTLSPemPair(certFile, keyFile string, clientConfig *rest.Config, kubeclient *kubeclient.KubeClient) (*tls.TlsPemPair, error) {
|
||||
func initTlsPemPair(certFile, keyFile string, clientConfig *rest.Config, client *client.Client) (*tls.TlsPemPair, error) {
|
||||
var tlsPair *tls.TlsPemPair
|
||||
if certFile != "" || keyFile != "" {
|
||||
tlsPair = tlsPairFromFiles(certFile, keyFile)
|
||||
|
@ -32,7 +32,7 @@ func initTLSPemPair(certFile, keyFile string, clientConfig *rest.Config, kubecli
|
|||
if tlsPair != nil {
|
||||
return tlsPair, nil
|
||||
}
|
||||
tlsPair, err = tlsPairFromCluster(clientConfig, kubeclient)
|
||||
tlsPair, err = tlsPairFromCluster(clientConfig, client)
|
||||
return tlsPair, err
|
||||
}
|
||||
|
||||
|
@ -63,7 +63,7 @@ func tlsPairFromFiles(certFile, keyFile string) *tls.TlsPemPair {
|
|||
// Loads or creates PEM private key and TLS certificate for webhook server.
|
||||
// Created pair is stored in cluster's secret.
|
||||
// Returns struct with key/certificate pair.
|
||||
func tlsPairFromCluster(configuration *rest.Config, client *kubeclient.KubeClient) (*tls.TlsPemPair, error) {
|
||||
func tlsPairFromCluster(configuration *rest.Config, client *client.Client) (*tls.TlsPemPair, error) {
|
||||
apiServerURL, err := url.Parse(configuration.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -73,7 +73,6 @@ func tlsPairFromCluster(configuration *rest.Config, client *kubeclient.KubeClien
|
|||
Namespace: config.KubePolicyNamespace,
|
||||
ApiServerHost: apiServerURL.Hostname(),
|
||||
}
|
||||
|
||||
tlsPair := client.ReadTlsPair(certProps)
|
||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
log.Printf("Generating new key/certificate pair for TLS")
|
||||
|
|
|
@ -1,603 +0,0 @@
|
|||
package kubeclient
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/config"
|
||||
apps "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
event "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// KubeClient is the api-client for core Kubernetes objects
|
||||
type KubeClient struct {
|
||||
client *kubernetes.Clientset
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// Checks parameters and creates new instance of KubeClient
|
||||
func NewKubeClient(config *rest.Config, logger *log.Logger) (*KubeClient, error) {
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Kubernetes client: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &KubeClient{
|
||||
logger: logger,
|
||||
client: client,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (kc *KubeClient) GetEvents(namespace string) event.EventInterface {
|
||||
return kc.client.CoreV1().Events(namespace)
|
||||
}
|
||||
|
||||
func (kc *KubeClient) GetKubePolicyDeployment() (*apps.Deployment, error) {
|
||||
kubePolicyDeployment, err := kc.client.
|
||||
AppsV1().
|
||||
Deployments(config.KubePolicyNamespace).
|
||||
Get(config.KubePolicyDeploymentName, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return kubePolicyDeployment, nil
|
||||
}
|
||||
|
||||
// Generates new ConfigMap in given namespace. If the namespace does not exists yet,
|
||||
// waits until it is created for maximum namespaceCreationMaxWaitTime (see below)
|
||||
func (kc *KubeClient) GenerateConfigMap(generator types.Generation, namespace string) error {
|
||||
kc.logger.Printf("Preparing to create configmap %s/%s", namespace, generator.Name)
|
||||
configMap := &v1.ConfigMap{}
|
||||
|
||||
var err error
|
||||
|
||||
if generator.CopyFrom != nil {
|
||||
kc.logger.Printf("Copying data from configmap %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
configMap, err = kc.client.CoreV1().ConfigMaps(generator.CopyFrom.Namespace).Get(generator.CopyFrom.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
configMap.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: generator.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
// Copy data from generator to the new configmap
|
||||
if generator.Data != nil {
|
||||
if configMap.Data == nil {
|
||||
configMap.Data = make(map[string]string)
|
||||
}
|
||||
|
||||
for k, v := range generator.Data {
|
||||
configMap.Data[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
go kc.createConfigMapAfterNamespaceIsCreated(*configMap, namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Generates new Secret in given namespace. If the namespace does not exists yet,
|
||||
// waits until it is created for maximum namespaceCreationMaxWaitTime (see below)
|
||||
func (kc *KubeClient) GenerateSecret(generator types.Generation, namespace string) error {
|
||||
kc.logger.Printf("Preparing to create secret %s/%s", namespace, generator.Name)
|
||||
secret := &v1.Secret{}
|
||||
|
||||
var err error
|
||||
|
||||
if generator.CopyFrom != nil {
|
||||
kc.logger.Printf("Copying data from secret %s/%s", generator.CopyFrom.Namespace, generator.CopyFrom.Name)
|
||||
secret, err = kc.client.CoreV1().Secrets(generator.CopyFrom.Namespace).Get(generator.CopyFrom.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
secret.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: generator.Name,
|
||||
Namespace: namespace,
|
||||
}
|
||||
|
||||
// Copy data from generator to the new secret
|
||||
if generator.Data != nil {
|
||||
if secret.Data == nil {
|
||||
secret.Data = make(map[string][]byte)
|
||||
}
|
||||
|
||||
for k, v := range generator.Data {
|
||||
secret.Data[k] = []byte(v)
|
||||
}
|
||||
}
|
||||
|
||||
go kc.createSecretAfterNamespaceIsCreated(*secret, namespace)
|
||||
return nil
|
||||
}
|
||||
|
||||
func defaultDeleteOptions() *metav1.DeleteOptions {
|
||||
var deletePeriod int64 = 0
|
||||
return &metav1.DeleteOptions{
|
||||
GracePeriodSeconds: &deletePeriod,
|
||||
}
|
||||
}
|
||||
|
||||
const namespaceCreationMaxWaitTime time.Duration = 30 * time.Second
|
||||
const namespaceCreationWaitInterval time.Duration = 100 * time.Millisecond
|
||||
|
||||
// Waits until namespace is created with maximum duration maxWaitTimeForNamespaceCreation
|
||||
func (kc *KubeClient) waitUntilNamespaceIsCreated(name string) error {
|
||||
timeStart := time.Now()
|
||||
|
||||
var lastError error = nil
|
||||
for time.Now().Sub(timeStart) < namespaceCreationMaxWaitTime {
|
||||
_, lastError = kc.client.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
|
||||
if lastError == nil {
|
||||
break
|
||||
}
|
||||
time.Sleep(namespaceCreationWaitInterval)
|
||||
}
|
||||
return lastError
|
||||
}
|
||||
|
||||
func (kc *KubeClient) createConfigMapAfterNamespaceIsCreated(configMap v1.ConfigMap, namespace string) {
|
||||
err := kc.waitUntilNamespaceIsCreated(namespace)
|
||||
if err == nil {
|
||||
_, err = kc.client.CoreV1().ConfigMaps(namespace).Create(&configMap)
|
||||
}
|
||||
if err != nil {
|
||||
kc.logger.Printf("Can't create a configmap: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (kc *KubeClient) createSecretAfterNamespaceIsCreated(secret v1.Secret, namespace string) {
|
||||
err := kc.waitUntilNamespaceIsCreated(namespace)
|
||||
if err == nil {
|
||||
_, err = kc.client.CoreV1().Secrets(namespace).Create(&secret)
|
||||
}
|
||||
if err != nil {
|
||||
kc.logger.Printf("Can't create a secret: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
var rMapper = map[string]getter{
|
||||
"ConfigMap": configMapGetter,
|
||||
"Pods": podsGetter,
|
||||
"Deployment": deploymentGetter,
|
||||
"CronJob": cronJobGetter,
|
||||
"Endpoints": endpointsbGetter,
|
||||
"HorizontalPodAutoscaler": horizontalPodAutoscalerGetter,
|
||||
"Ingress": ingressGetter,
|
||||
"Job": jobGetter,
|
||||
"LimitRange": limitRangeGetter,
|
||||
"Namespace": namespaceGetter,
|
||||
"NetworkPolicy": networkPolicyGetter,
|
||||
"PersistentVolumeClaim": persistentVolumeClaimGetter,
|
||||
"PodDisruptionBudget": podDisruptionBudgetGetter,
|
||||
"PodTemplate": podTemplateGetter,
|
||||
"ResourceQuota": resourceQuotaGetter,
|
||||
"Secret": secretGetter,
|
||||
"Service": serviceGetter,
|
||||
"StatefulSet": statefulSetGetter,
|
||||
}
|
||||
|
||||
var lMapper = map[string]lister{
|
||||
"ConfigMap": configMapLister,
|
||||
"Pods": podLister,
|
||||
"Deployment": deploymentLister,
|
||||
"CronJob": cronJobLister,
|
||||
"Endpoints": endpointsLister,
|
||||
"HorizontalPodAutoscaler": horizontalPodAutoscalerLister,
|
||||
"Ingress": ingressLister,
|
||||
"Job": jobLister,
|
||||
"LimitRange": limitRangeLister,
|
||||
"Namespace": namespaceLister,
|
||||
"NetworkPolicy": networkPolicyLister,
|
||||
"PersistentVolumeClaim": persistentVolumeClaimLister,
|
||||
"PodDisruptionBudget": podDisruptionBudgetLister,
|
||||
"PodTemplate": podTemplateLister,
|
||||
"ResourceQuota": resourceQuotaLister,
|
||||
"Secret": secretLister,
|
||||
"Service": serviceLister,
|
||||
"StatefulSet": statefulSetLister,
|
||||
}
|
||||
|
||||
type getter func(*kubernetes.Clientset, string, string) (runtime.Object, error)
|
||||
type lister func(*kubernetes.Clientset, string) ([]runtime.Object, error)
|
||||
|
||||
//ListResource to return resource list
|
||||
func (kc *KubeClient) ListResource(kind string, namespace string) ([]runtime.Object, error) {
|
||||
return lMapper[kind](kc.client, namespace)
|
||||
}
|
||||
|
||||
//GetResource get the resource object
|
||||
func (kc *KubeClient) GetResource(kind string, resource string) (runtime.Object, error) {
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(resource)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", resource))
|
||||
return nil, err
|
||||
}
|
||||
return rMapper[kind](kc.client, namespace, name)
|
||||
}
|
||||
|
||||
//GetSupportedKinds provides list of supported types
|
||||
func GetSupportedKinds() (rTypes []string) {
|
||||
for k := range rMapper {
|
||||
rTypes = append(rTypes, k)
|
||||
}
|
||||
return rTypes
|
||||
}
|
||||
|
||||
func configMapGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().ConfigMaps(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func configMapLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().ConfigMaps(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func podsGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().Pods(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func podLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().Pods(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func deploymentGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.AppsV1().Deployments(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
func deploymentLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.AppsV1().Deployments(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func cronJobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.BatchV1beta1().CronJobs(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func cronJobLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.BatchV1beta1().CronJobs(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func endpointsbGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().Endpoints(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func endpointsLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().Endpoints(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func horizontalPodAutoscalerGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func horizontalPodAutoscalerLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.AutoscalingV1().HorizontalPodAutoscalers(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func ingressGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.ExtensionsV1beta1().Ingresses(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func ingressLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.ExtensionsV1beta1().Ingresses(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func jobGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.BatchV1().Jobs(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func jobLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.BatchV1().Jobs(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func limitRangeGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().LimitRanges(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
func limitRangeLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().LimitRanges(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func namespaceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().Namespaces().Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func namespaceLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func networkPolicyGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.NetworkingV1().NetworkPolicies(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func networkPolicyLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.NetworkingV1().NetworkPolicies(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func persistentVolumeClaimGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().PersistentVolumeClaims(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func persistentVolumeClaimLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().PersistentVolumeClaims(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func podDisruptionBudgetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func podDisruptionBudgetLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.PolicyV1beta1().PodDisruptionBudgets(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func podTemplateGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().PodTemplates(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func podTemplateLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().PodTemplates(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func resourceQuotaGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().ResourceQuotas(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func resourceQuotaLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().ResourceQuotas(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func secretGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().Secrets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func secretLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().Secrets(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func serviceGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.CoreV1().Services(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func serviceLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.CoreV1().Services(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
||||
|
||||
func statefulSetGetter(clientSet *kubernetes.Clientset, namespace string, name string) (runtime.Object, error) {
|
||||
obj, err := clientSet.AppsV1().StatefulSets(namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return obj, nil
|
||||
}
|
||||
|
||||
func statefulSetLister(clientSet *kubernetes.Clientset, namespace string) ([]runtime.Object, error) {
|
||||
list, err := clientSet.AppsV1().StatefulSets(namespace).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
objList := []runtime.Object{}
|
||||
for _, obj := range list.Items {
|
||||
objList = append(objList, &obj)
|
||||
}
|
||||
return objList, nil
|
||||
}
|
46
main.go
46
main.go
|
@ -4,12 +4,11 @@ import (
|
|||
"flag"
|
||||
"log"
|
||||
|
||||
"github.com/nirmata/kube-policy/kubeclient"
|
||||
policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
|
||||
informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions"
|
||||
client "github.com/nirmata/kube-policy/client"
|
||||
controller "github.com/nirmata/kube-policy/pkg/controller"
|
||||
event "github.com/nirmata/kube-policy/pkg/event"
|
||||
violation "github.com/nirmata/kube-policy/pkg/violation"
|
||||
"github.com/nirmata/kube-policy/pkg/sharedinformer"
|
||||
"github.com/nirmata/kube-policy/pkg/violation"
|
||||
"github.com/nirmata/kube-policy/pkg/webhooks"
|
||||
"k8s.io/sample-controller/pkg/signals"
|
||||
)
|
||||
|
@ -23,55 +22,47 @@ var (
|
|||
func main() {
|
||||
clientConfig, err := createClientConfig(kubeconfig)
|
||||
if err != nil {
|
||||
|
||||
log.Fatalf("Error building kubeconfig: %v\n", err)
|
||||
}
|
||||
|
||||
kubeclient, err := kubeclient.NewKubeClient(clientConfig, nil)
|
||||
client, err := client.NewClient(clientConfig, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating kubeclient: %v\n", err)
|
||||
log.Fatalf("Error creating client: %v\n", err)
|
||||
}
|
||||
|
||||
policyClientset, err := policyclientset.NewForConfig(clientConfig)
|
||||
policyInformerFactory, err := sharedinformer.NewSharedInformerFactory(clientConfig)
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating policyClient: %v\n", err)
|
||||
log.Fatalf("Error creating policy sharedinformer: %v\n", err)
|
||||
}
|
||||
eventController := event.NewEventController(client, policyInformerFactory, nil)
|
||||
violationBuilder := violation.NewPolicyViolationBuilder(client, policyInformerFactory, eventController, nil)
|
||||
|
||||
//TODO wrap the policyInformer inside a factory
|
||||
policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0)
|
||||
policyInformer := policyInformerFactory.Kubepolicy().V1alpha1().Policies()
|
||||
|
||||
eventController := event.NewEventController(kubeclient, policyInformer.Lister(), nil)
|
||||
violationBuilder := violation.NewPolicyViolationBuilder(kubeclient, policyInformer.Lister(), policyClientset, eventController, nil)
|
||||
|
||||
policyController := controller.NewPolicyController(policyClientset,
|
||||
policyInformer,
|
||||
policyController := controller.NewPolicyController(
|
||||
client,
|
||||
policyInformerFactory,
|
||||
violationBuilder,
|
||||
eventController,
|
||||
nil,
|
||||
kubeclient)
|
||||
nil)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Error creating mutation webhook: %v\n", err)
|
||||
}
|
||||
|
||||
tlsPair, err := initTLSPemPair(cert, key, clientConfig, kubeclient)
|
||||
tlsPair, err := initTlsPemPair(cert, key, clientConfig, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err)
|
||||
}
|
||||
|
||||
server, err := webhooks.NewWebhookServer(tlsPair, policyInformer.Lister(), kubeclient, nil)
|
||||
server, err := webhooks.NewWebhookServer(client, tlsPair, policyInformerFactory, nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
|
||||
webhookRegistrationClient, err := webhooks.NewWebhookRegistrationClient(clientConfig, kubeclient)
|
||||
webhookRegistrationClient, err := webhooks.NewWebhookRegistrationClient(clientConfig, client)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||
}
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
policyInformerFactory.Start(stopCh)
|
||||
policyInformerFactory.Run(stopCh)
|
||||
eventController.Run(stopCh)
|
||||
|
||||
if err = policyController.Run(stopCh); err != nil {
|
||||
|
@ -85,6 +76,7 @@ func main() {
|
|||
server.RunAsync()
|
||||
<-stopCh
|
||||
server.Stop()
|
||||
policyController.Stop()
|
||||
}
|
||||
|
||||
func init() {
|
||||
|
|
|
@ -5,6 +5,7 @@ import (
|
|||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Policy contains rules to be applied to created resources
|
||||
|
|
|
@ -6,12 +6,11 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
client "github.com/nirmata/kube-policy/client"
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
|
||||
infomertypes "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1"
|
||||
lister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
event "github.com/nirmata/kube-policy/pkg/event"
|
||||
"github.com/nirmata/kube-policy/pkg/sharedinformer"
|
||||
violation "github.com/nirmata/kube-policy/pkg/violation"
|
||||
"k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -23,9 +22,8 @@ import (
|
|||
|
||||
//PolicyController to manage Policy CRD
|
||||
type PolicyController struct {
|
||||
kubeClient *kubeClient.KubeClient
|
||||
client *client.Client
|
||||
policyLister lister.PolicyLister
|
||||
policyInterface policyclientset.Interface
|
||||
policySynced cache.InformerSynced
|
||||
violationBuilder violation.Generator
|
||||
eventBuilder event.Generator
|
||||
|
@ -34,29 +32,26 @@ type PolicyController struct {
|
|||
}
|
||||
|
||||
// NewPolicyController from cmd args
|
||||
func NewPolicyController(policyInterface policyclientset.Interface,
|
||||
policyInformer infomertypes.PolicyInformer,
|
||||
func NewPolicyController(client *client.Client,
|
||||
policyInformer sharedinformer.PolicyInformer,
|
||||
violationBuilder violation.Generator,
|
||||
eventController event.Generator,
|
||||
logger *log.Logger,
|
||||
kubeClient *kubeClient.KubeClient) *PolicyController {
|
||||
logger *log.Logger) *PolicyController {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Policy Controller: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
controller := &PolicyController{
|
||||
kubeClient: kubeClient,
|
||||
policyLister: policyInformer.Lister(),
|
||||
policyInterface: policyInterface,
|
||||
policySynced: policyInformer.Informer().HasSynced,
|
||||
client: client,
|
||||
policyLister: policyInformer.GetLister(),
|
||||
policySynced: policyInformer.GetInfomer().HasSynced,
|
||||
violationBuilder: violationBuilder,
|
||||
eventBuilder: eventController,
|
||||
logger: logger,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), policyWorkQueueName),
|
||||
}
|
||||
|
||||
policyInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
policyInformer.GetInfomer().AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: controller.createPolicyHandler,
|
||||
UpdateFunc: controller.updatePolicyHandler,
|
||||
DeleteFunc: controller.deletePolicyHandler,
|
||||
|
@ -109,11 +104,14 @@ func (pc *PolicyController) Run(stopCh <-chan struct{}) error {
|
|||
for i := 0; i < policyControllerWorkerCount; i++ {
|
||||
go wait.Until(pc.runWorker, time.Second, stopCh)
|
||||
}
|
||||
pc.logger.Println("started policy controller workers")
|
||||
|
||||
pc.logger.Println("Started policy controller")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) Stop() {
|
||||
pc.logger.Println("shutting down policy controller workers")
|
||||
}
|
||||
func (pc *PolicyController) runWorker() {
|
||||
for pc.processNextWorkItem() {
|
||||
}
|
||||
|
@ -162,15 +160,14 @@ func (pc *PolicyController) syncHandler(obj interface{}) error {
|
|||
if key, ok = obj.(string); !ok {
|
||||
return fmt.Errorf("expected string in workqueue but got %#v", obj)
|
||||
}
|
||||
// convert the namespace/name string into distinct namespace and name
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
_, name, err := cache.SplitMetaNamespaceKey(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid policy key: %s", key))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get Policy resource with namespace/name
|
||||
policy, err := pc.policyLister.Policies(namespace).Get(name)
|
||||
policy, err := pc.policyLister.Get(name)
|
||||
if err != nil {
|
||||
if errors.IsNotFound(err) {
|
||||
utilruntime.HandleError(fmt.Errorf("policy '%s' in work queue no longer exists", key))
|
||||
|
@ -181,6 +178,7 @@ func (pc *PolicyController) syncHandler(obj interface{}) error {
|
|||
// process policy on existing resource
|
||||
// get the violations and pass to violation Builder
|
||||
// get the events and pass to event Builder
|
||||
fmt.Println(policy)
|
||||
//TODO: processPolicy
|
||||
pc.logger.Printf("process policy %s on existing resources", policy.GetName())
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,122 +0,0 @@
|
|||
package controller
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
engine "github.com/nirmata/kube-policy/pkg/engine"
|
||||
event "github.com/nirmata/kube-policy/pkg/event"
|
||||
violation "github.com/nirmata/kube-policy/pkg/violation"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
)
|
||||
|
||||
func (pc *PolicyController) runForPolicy(key string) {
|
||||
|
||||
policy, err := pc.getPolicyByKey(key)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s, err: %v", key, err))
|
||||
return
|
||||
}
|
||||
|
||||
if policy == nil {
|
||||
pc.logger.Printf("Counld not find policy by key %s", key)
|
||||
return
|
||||
}
|
||||
|
||||
violations, events, err := pc.processPolicy(*policy)
|
||||
if err != nil {
|
||||
// add Error processing policy event
|
||||
}
|
||||
|
||||
pc.logger.Printf("%v, %v", violations, events)
|
||||
// TODO:
|
||||
// create violations
|
||||
// pc.violationBuilder.Add()
|
||||
// create events
|
||||
// pc.eventBuilder.Add()
|
||||
|
||||
}
|
||||
|
||||
// processPolicy process the policy to all the matched resources
|
||||
func (pc *PolicyController) processPolicy(policy types.Policy) (
|
||||
violations []violation.Info, events []event.Info, err error) {
|
||||
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
resources, err := pc.filterResourceByRule(rule)
|
||||
if err != nil {
|
||||
pc.logger.Printf("Failed to filter resources by rule %s, err: %v\n", rule.Name, err)
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
rawResource, err := json.Marshal(resource)
|
||||
if err != nil {
|
||||
pc.logger.Printf("Failed to marshal resources map to rule %s, err: %v\n", rule.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
violation, eventInfos, err := engine.ProcessExisting(policy, rawResource)
|
||||
if err != nil {
|
||||
pc.logger.Printf("Failed to process rule %s, err: %v\n", rule.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
violations = append(violations, violation...)
|
||||
events = append(events, eventInfos...)
|
||||
}
|
||||
}
|
||||
return violations, events, nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) filterResourceByRule(rule types.Rule) ([]runtime.Object, error) {
|
||||
var targetResources []runtime.Object
|
||||
// TODO: make this namespace all
|
||||
var namespace = "default"
|
||||
if err := rule.Validate(); err != nil {
|
||||
return nil, fmt.Errorf("invalid rule detected: %s, err: %v", rule.Name, err)
|
||||
}
|
||||
|
||||
// Get the resource list from kind
|
||||
resources, err := pc.kubeClient.ListResource(rule.ResourceDescription.Kind, namespace)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, resource := range resources {
|
||||
// TODO:
|
||||
//rawResource, err := json.Marshal(resource)
|
||||
// objKind := resource.GetObjectKind()
|
||||
// codecFactory := serializer.NewCodecFactory(runtime.NewScheme())
|
||||
// codecFactory.EncoderForVersion()
|
||||
|
||||
if err != nil {
|
||||
pc.logger.Printf("failed to marshal object %v", resource)
|
||||
continue
|
||||
}
|
||||
|
||||
// filter the resource by name and label
|
||||
//if ok, _ := mutation.ResourceMeetsRules(rawResource, rule.ResourceDescription); ok {
|
||||
// targetResources = append(targetResources, resource)
|
||||
//}
|
||||
}
|
||||
return targetResources, nil
|
||||
}
|
||||
|
||||
func (pc *PolicyController) getPolicyByKey(key string) (*types.Policy, error) {
|
||||
// Create nil Selector to grab all the policies
|
||||
selector := labels.NewSelector()
|
||||
cachedPolicies, err := pc.policyLister.List(selector)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, elem := range cachedPolicies {
|
||||
if elem.Name == key {
|
||||
return elem, nil
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
|
@ -4,19 +4,13 @@ import (
|
|||
"fmt"
|
||||
"log"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
client "github.com/nirmata/kube-policy/client"
|
||||
kubepolicy "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type GenerationResponse struct {
|
||||
Generator *kubepolicy.Generation
|
||||
Namespace string
|
||||
}
|
||||
|
||||
// Generate should be called to process generate rules on the resource
|
||||
// TODO: extend kubeclient(will change to dynamic client) to create resources
|
||||
func Generate(policy kubepolicy.Policy, rawResource []byte, kubeClient *kubeClient.KubeClient, gvk metav1.GroupVersionKind) {
|
||||
func Generate(client *client.Client, logger *log.Logger, policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) {
|
||||
// configMapGenerator and secretGenerator can be applied only to namespaces
|
||||
// TODO: support for any resource
|
||||
if gvk.Kind != "Namespace" {
|
||||
|
@ -27,20 +21,20 @@ func Generate(policy kubepolicy.Policy, rawResource []byte, kubeClient *kubeClie
|
|||
ok := ResourceMeetsDescription(rawResource, rule.ResourceDescription, gvk)
|
||||
|
||||
if !ok {
|
||||
log.Printf("Rule is not applicable to the request: rule name = %s in policy %s \n", rule.Name, policy.ObjectMeta.Name)
|
||||
logger.Printf("Rule is not applicable to the request: rule name = %s in policy %s \n", rule.Name, policy.ObjectMeta.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
err := applyRuleGenerator(rawResource, rule.Generation, kubeClient)
|
||||
err := applyRuleGenerator(client, rawResource, rule.Generation, gvk)
|
||||
if err != nil {
|
||||
log.Printf("Failed to apply rule generator: %v", err)
|
||||
logger.Printf("Failed to apply rule generator: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Applies "configMapGenerator" and "secretGenerator" described in PolicyRule
|
||||
// TODO: plan to support all kinds of generator
|
||||
func applyRuleGenerator(rawResource []byte, generator *kubepolicy.Generation, kubeClient *kubeClient.KubeClient) error {
|
||||
func applyRuleGenerator(client *client.Client, rawResource []byte, generator *kubepolicy.Generation, gvk metav1.GroupVersionKind) error {
|
||||
if generator == nil {
|
||||
return nil
|
||||
}
|
||||
|
@ -50,9 +44,9 @@ func applyRuleGenerator(rawResource []byte, generator *kubepolicy.Generation, ku
|
|||
namespace := ParseNameFromObject(rawResource)
|
||||
switch generator.Kind {
|
||||
case "ConfigMap":
|
||||
err = kubeClient.GenerateConfigMap(*generator, namespace)
|
||||
err = client.GenerateConfigMap(*generator, namespace)
|
||||
case "Secret":
|
||||
err = kubeClient.GenerateSecret(*generator, namespace)
|
||||
err = client.GenerateSecret(*generator, namespace)
|
||||
default:
|
||||
err = fmt.Errorf("Unsupported config Kind '%s'", generator.Kind)
|
||||
}
|
||||
|
|
|
@ -6,10 +6,11 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
client "github.com/nirmata/kube-policy/client"
|
||||
"github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme"
|
||||
policyscheme "github.com/nirmata/kube-policy/pkg/client/clientset/versioned/scheme"
|
||||
policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
v1alpha1 "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/sharedinformer"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -21,8 +22,8 @@ import (
|
|||
)
|
||||
|
||||
type controller struct {
|
||||
kubeClient *kubeClient.KubeClient
|
||||
policyLister policylister.PolicyLister
|
||||
client *client.Client
|
||||
policyLister v1alpha1.PolicyLister
|
||||
queue workqueue.RateLimitingInterface
|
||||
recorder record.EventRecorder
|
||||
logger *log.Logger
|
||||
|
@ -37,36 +38,45 @@ type Generator interface {
|
|||
type Controller interface {
|
||||
Generator
|
||||
Run(stopCh <-chan struct{})
|
||||
Stop()
|
||||
}
|
||||
|
||||
//NewEventController to generate a new event controller
|
||||
func NewEventController(kubeClient *kubeClient.KubeClient,
|
||||
policyLister policylister.PolicyLister,
|
||||
func NewEventController(client *client.Client,
|
||||
shareInformer sharedinformer.PolicyInformer,
|
||||
logger *log.Logger) Controller {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Event Controller: ", log.LstdFlags)
|
||||
logger = log.New(os.Stdout, "Event Controller: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
controller := &controller{
|
||||
kubeClient: kubeClient,
|
||||
policyLister: policyLister,
|
||||
client: client,
|
||||
policyLister: shareInformer.GetLister(),
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName),
|
||||
recorder: initRecorder(kubeClient),
|
||||
recorder: initRecorder(client),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
return controller
|
||||
}
|
||||
|
||||
func initRecorder(kubeClient *kubeClient.KubeClient) record.EventRecorder {
|
||||
func initRecorder(client *client.Client) record.EventRecorder {
|
||||
// Initliaze Event Broadcaster
|
||||
policyscheme.AddToScheme(scheme.Scheme)
|
||||
err := policyscheme.AddToScheme(scheme.Scheme)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return nil
|
||||
}
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
eventBroadcaster.StartLogging(log.Printf)
|
||||
eventInterface, err := client.GetEventsInterface()
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err) // TODO: add more specific error
|
||||
return nil
|
||||
}
|
||||
eventBroadcaster.StartRecordingToSink(
|
||||
&typedcorev1.EventSinkImpl{
|
||||
Interface: kubeClient.GetEvents("")})
|
||||
Interface: eventInterface})
|
||||
recorder := eventBroadcaster.NewRecorder(
|
||||
scheme.Scheme,
|
||||
v1.EventSource{Component: eventSource})
|
||||
|
@ -84,9 +94,12 @@ func (c *controller) Run(stopCh <-chan struct{}) {
|
|||
for i := 0; i < eventWorkerThreadCount; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
c.logger.Println("Started eventbuilder controller")
|
||||
c.logger.Println("Started eventbuilder controller workers")
|
||||
}
|
||||
|
||||
func (c *controller) Stop() {
|
||||
c.logger.Println("Shutting down eventbuilder controller workers")
|
||||
}
|
||||
func (c *controller) runWorker() {
|
||||
for c.processNextWorkItem() {
|
||||
}
|
||||
|
@ -115,7 +128,7 @@ func (c *controller) processNextWorkItem() bool {
|
|||
}(obj)
|
||||
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
c.logger.Println((err))
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
@ -123,20 +136,25 @@ func (c *controller) processNextWorkItem() bool {
|
|||
func (c *controller) SyncHandler(key Info) error {
|
||||
var resource runtime.Object
|
||||
var err error
|
||||
|
||||
switch key.Kind {
|
||||
case "Policy":
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key.Resource)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to extract namespace and name for %s", key.Resource))
|
||||
return err
|
||||
}
|
||||
resource, err = c.policyLister.Policies(namespace).Get(name)
|
||||
//TODO: policy is clustered resource so wont need namespace
|
||||
resource, err = c.policyLister.Get(key.Resource)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to create event for policy %s, will retry ", key.Resource))
|
||||
return err
|
||||
}
|
||||
default:
|
||||
resource, err = c.kubeClient.GetResource(key.Kind, key.Resource)
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(key.Resource)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", key.Resource))
|
||||
return err
|
||||
}
|
||||
resource, err = c.client.GetResource(key.Kind, namespace, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to create event for resource %s, will retry ", key.Resource))
|
||||
return err
|
||||
|
|
55
pkg/sharedinformer/sharedinformerfactory.go
Normal file
55
pkg/sharedinformer/sharedinformerfactory.go
Normal file
|
@ -0,0 +1,55 @@
|
|||
package sharedinformer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
|
||||
informers "github.com/nirmata/kube-policy/pkg/client/informers/externalversions"
|
||||
infomertypes "github.com/nirmata/kube-policy/pkg/client/informers/externalversions/policy/v1alpha1"
|
||||
v1alpha1 "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
type PolicyInformer interface {
|
||||
GetLister() v1alpha1.PolicyLister
|
||||
GetInfomer() cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
type SharedInfomer interface {
|
||||
PolicyInformer
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
type sharedInfomer struct {
|
||||
policyInformerFactory informers.SharedInformerFactory
|
||||
}
|
||||
|
||||
//NewSharedInformer returns shared informer
|
||||
func NewSharedInformerFactory(clientConfig *rest.Config) (SharedInfomer, error) {
|
||||
// create policy client
|
||||
policyClientset, err := policyclientset.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Error creating policyClient: %v\n", err)
|
||||
}
|
||||
//TODO: replace with NewSharedInformerFactoryWithOptions
|
||||
policyInformerFactory := informers.NewSharedInformerFactory(policyClientset, 0)
|
||||
return &sharedInfomer{
|
||||
policyInformerFactory: policyInformerFactory,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (si *sharedInfomer) Run(stopCh <-chan struct{}) {
|
||||
si.policyInformerFactory.Start(stopCh)
|
||||
}
|
||||
|
||||
func (si *sharedInfomer) getInfomer() infomertypes.PolicyInformer {
|
||||
return si.policyInformerFactory.Kubepolicy().V1alpha1().Policies()
|
||||
}
|
||||
func (si *sharedInfomer) GetInfomer() cache.SharedIndexInformer {
|
||||
return si.getInfomer().Informer()
|
||||
}
|
||||
|
||||
func (si *sharedInfomer) GetLister() v1alpha1.PolicyLister {
|
||||
return si.getInfomer().Lister()
|
||||
}
|
|
@ -3,12 +3,13 @@ package violation
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
client "github.com/nirmata/kube-policy/client"
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
policyclientset "github.com/nirmata/kube-policy/pkg/client/clientset/versioned"
|
||||
policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
v1alpha1 "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
event "github.com/nirmata/kube-policy/pkg/event"
|
||||
"github.com/nirmata/kube-policy/pkg/sharedinformer"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
@ -19,11 +20,10 @@ type Generator interface {
|
|||
}
|
||||
|
||||
type builder struct {
|
||||
kubeClient *kubeClient.KubeClient
|
||||
policyLister policylister.PolicyLister
|
||||
policyInterface policyclientset.Interface
|
||||
eventBuilder event.Generator
|
||||
logger *log.Logger
|
||||
client *client.Client
|
||||
policyLister v1alpha1.PolicyLister
|
||||
eventBuilder event.Generator
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
//Builder is to build policy violations
|
||||
|
@ -34,19 +34,20 @@ type Builder interface {
|
|||
}
|
||||
|
||||
//NewPolicyViolationBuilder returns new violation builder
|
||||
func NewPolicyViolationBuilder(
|
||||
kubeClient *kubeClient.KubeClient,
|
||||
policyLister policylister.PolicyLister,
|
||||
policyInterface policyclientset.Interface,
|
||||
func NewPolicyViolationBuilder(client *client.Client,
|
||||
sharedInfomer sharedinformer.PolicyInformer,
|
||||
eventController event.Generator,
|
||||
logger *log.Logger) Builder {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Violation Builder: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
builder := &builder{
|
||||
kubeClient: kubeClient,
|
||||
policyLister: policyLister,
|
||||
policyInterface: policyInterface,
|
||||
eventBuilder: eventController,
|
||||
logger: logger,
|
||||
client: client,
|
||||
policyLister: sharedInfomer.GetLister(),
|
||||
eventBuilder: eventController,
|
||||
logger: logger,
|
||||
}
|
||||
return builder
|
||||
}
|
||||
|
@ -62,7 +63,7 @@ func (b *builder) processViolation(info Info) error {
|
|||
utilruntime.HandleError(fmt.Errorf("unable to extract namespace and name for %s", info.Policy))
|
||||
return err
|
||||
}
|
||||
policy, err := b.policyLister.Policies(namespace).Get(name)
|
||||
policy, err := b.policyLister.Get(name)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return err
|
||||
|
@ -89,7 +90,7 @@ func (b *builder) processViolation(info Info) error {
|
|||
|
||||
modifiedPolicy.Status.Violations = modifiedViolations
|
||||
// Violations are part of the status sub resource, so we can use the Update Status api instead of updating the policy object
|
||||
_, err = b.policyInterface.KubepolicyV1alpha1().Policies(namespace).UpdateStatus(modifiedPolicy)
|
||||
_, err = b.client.UpdateStatusResource("policies", namespace, modifiedPolicy)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -97,8 +98,13 @@ func (b *builder) processViolation(info Info) error {
|
|||
}
|
||||
|
||||
func (b *builder) isActive(kind string, resource string) (bool, error) {
|
||||
namespace, name, err := cache.SplitMetaNamespaceKey(resource)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("invalid resource key: %s", resource))
|
||||
return false, err
|
||||
}
|
||||
// Generate Merge Patch
|
||||
_, err := b.kubeClient.GetResource(kind, resource)
|
||||
_, err = b.client.GetResource(kind, namespace, name)
|
||||
if err != nil {
|
||||
utilruntime.HandleError(fmt.Errorf("unable to get resource %s ", resource))
|
||||
return false, err
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"errors"
|
||||
"io/ioutil"
|
||||
|
||||
kubeclient "github.com/nirmata/kube-policy/kubeclient"
|
||||
"github.com/nirmata/kube-policy/client"
|
||||
"github.com/nirmata/kube-policy/pkg/config"
|
||||
|
||||
admregapi "k8s.io/api/admissionregistration/v1beta1"
|
||||
|
@ -16,12 +16,12 @@ import (
|
|||
// WebhookRegistrationClient is client for registration webhooks on cluster
|
||||
type WebhookRegistrationClient struct {
|
||||
registrationClient *admregclient.AdmissionregistrationV1beta1Client
|
||||
kubeclient *kubeclient.KubeClient
|
||||
client *client.Client
|
||||
clientConfig *rest.Config
|
||||
}
|
||||
|
||||
// NewWebhookRegistrationClient creates new WebhookRegistrationClient instance
|
||||
func NewWebhookRegistrationClient(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient) (*WebhookRegistrationClient, error) {
|
||||
func NewWebhookRegistrationClient(clientConfig *rest.Config, client *client.Client) (*WebhookRegistrationClient, error) {
|
||||
registrationClient, err := admregclient.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -29,7 +29,7 @@ func NewWebhookRegistrationClient(clientConfig *rest.Config, kubeclient *kubecli
|
|||
|
||||
return &WebhookRegistrationClient{
|
||||
registrationClient: registrationClient,
|
||||
kubeclient: kubeclient,
|
||||
client: client,
|
||||
clientConfig: clientConfig,
|
||||
}, nil
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ func constructWebhook(name, servicePath string, caData []byte) admregapi.Webhook
|
|||
}
|
||||
|
||||
func (wrc *WebhookRegistrationClient) constructOwner() meta.OwnerReference {
|
||||
kubePolicyDeployment, err := wrc.kubeclient.GetKubePolicyDeployment()
|
||||
kubePolicyDeployment, err := wrc.client.GetKubePolicyDeployment()
|
||||
|
||||
if err != nil {
|
||||
return meta.OwnerReference{}
|
||||
|
|
|
@ -12,11 +12,12 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/client"
|
||||
"github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/config"
|
||||
engine "github.com/nirmata/kube-policy/pkg/engine"
|
||||
"github.com/nirmata/kube-policy/pkg/engine/mutation"
|
||||
"github.com/nirmata/kube-policy/pkg/sharedinformer"
|
||||
tlsutils "github.com/nirmata/kube-policy/pkg/tls"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -27,17 +28,17 @@ import (
|
|||
// MutationWebhook gets policies from policyController and takes control of the cluster with kubeclient.
|
||||
type WebhookServer struct {
|
||||
server http.Server
|
||||
policyLister policylister.PolicyLister
|
||||
kubeClient *kubeClient.KubeClient
|
||||
client *client.Client
|
||||
policyLister v1alpha1.PolicyLister
|
||||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
|
||||
// Policy Controller and Kubernetes Client should be initialized in configuration
|
||||
func NewWebhookServer(
|
||||
client *client.Client,
|
||||
tlsPair *tlsutils.TlsPemPair,
|
||||
policyLister policylister.PolicyLister,
|
||||
kubeClient *kubeClient.KubeClient,
|
||||
shareInformer sharedinformer.PolicyInformer,
|
||||
logger *log.Logger) (*WebhookServer, error) {
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Webhook Server: ", log.LstdFlags)
|
||||
|
@ -55,8 +56,8 @@ func NewWebhookServer(
|
|||
tlsConfig.Certificates = []tls.Certificate{pair}
|
||||
|
||||
ws := &WebhookServer{
|
||||
policyLister: policyLister,
|
||||
kubeClient: kubeClient,
|
||||
client: client,
|
||||
policyLister: shareInformer.GetLister(),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
|
@ -86,8 +87,7 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
|
|||
admissionReview.Response = &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
|
||||
if KindIsSupported(admissionReview.Request.Kind.Kind) {
|
||||
if ws.client.KindIsSupported(admissionReview.Request.Kind.Kind) {
|
||||
switch r.URL.Path {
|
||||
case config.MutatingWebhookServicePath:
|
||||
admissionReview.Response = ws.HandleMutation(admissionReview.Request)
|
||||
|
@ -193,7 +193,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest) *v1
|
|||
}
|
||||
|
||||
// generation
|
||||
engine.Generate(*policy, request.Object.Raw, ws.kubeClient, request.Kind)
|
||||
engine.Generate(ws.client, ws.logger, *policy, request.Object.Raw, request.Kind)
|
||||
}
|
||||
|
||||
ws.logger.Println("Validation is successful")
|
||||
|
|
|
@ -1,16 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
kubeclient "github.com/nirmata/kube-policy/kubeclient"
|
||||
)
|
||||
|
||||
// KindIsSupported checks kind to be prensent in
|
||||
// SupportedKinds defined in config
|
||||
func KindIsSupported(kind string) bool {
|
||||
for _, k := range kubeclient.GetSupportedKinds() {
|
||||
if k == kind {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
|
@ -83,4 +83,3 @@ spec :
|
|||
data :
|
||||
foo : bar
|
||||
secretData: "data from sg"
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue