mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-15 17:51:20 +00:00
Merge pull request #36 from nirmata/validation-patterns
Validation patterns
This commit is contained in:
commit
36b5507389
20 changed files with 503 additions and 322 deletions
|
@ -2,22 +2,48 @@ package config
|
|||
|
||||
const (
|
||||
// These constants MUST be equal to the corresponding names in service definition in definitions/install.yaml
|
||||
KubePolicyDeploymentName = "kube-policy-deployment"
|
||||
KubePolicyNamespace = "kube-system"
|
||||
WebhookServiceName = "kube-policy-svc"
|
||||
WebhookConfigName = "nirmata-kube-policy-webhook-cfg"
|
||||
MutationWebhookName = "webhook.nirmata.kube-policy"
|
||||
KubePolicyNamespace = "kube-system"
|
||||
WebhookServiceName = "kube-policy-svc"
|
||||
|
||||
MutatingWebhookConfigurationName = "kube-policy-mutating-webhook-cfg"
|
||||
MutatingWebhookName = "nirmata.kube-policy.mutating-webhook"
|
||||
|
||||
ValidatingWebhookConfigurationName = "kube-policy-validating-webhook-cfg"
|
||||
ValidatingWebhookName = "nirmata.kube-policy.validating-webhook"
|
||||
|
||||
// Due to kubernetes issue, we must use next literal constants instead of deployment TypeMeta fields
|
||||
// Pull request: https://github.com/kubernetes/kubernetes/pull/63972
|
||||
// When pull request is closed, we should use TypeMeta struct instead of this constants
|
||||
DeploymentKind = "Deployment"
|
||||
DeploymentAPIVersion = "extensions/v1beta1"
|
||||
// Issue: https://github.com/kubernetes/kubernetes/pull/63972
|
||||
// When the issue is closed, we should use TypeMeta struct instead of this constants
|
||||
DeploymentKind = "Deployment"
|
||||
DeploymentAPIVersion = "extensions/v1beta1"
|
||||
KubePolicyDeploymentName = "kube-policy-deployment"
|
||||
)
|
||||
|
||||
var (
|
||||
WebhookServicePath = "/mutate"
|
||||
WebhookConfigLabels = map[string]string{
|
||||
MutatingWebhookServicePath = "/mutate"
|
||||
ValidatingWebhookServicePath = "/validate"
|
||||
KubePolicyAppLabels = map[string]string{
|
||||
"app": "kube-policy",
|
||||
}
|
||||
|
||||
SupportedKinds = []string{
|
||||
"ConfigMap",
|
||||
"CronJob",
|
||||
"DaemonSet",
|
||||
"Deployment",
|
||||
"Endpoints",
|
||||
"HorizontalPodAutoscaler",
|
||||
"Ingress",
|
||||
"Job",
|
||||
"LimitRange",
|
||||
"Namespace",
|
||||
"NetworkPolicy",
|
||||
"PersistentVolumeClaim",
|
||||
"PodDisruptionBudget",
|
||||
"PodTemplate",
|
||||
"ResourceQuota",
|
||||
"Secret",
|
||||
"Service",
|
||||
"StatefulSet",
|
||||
}
|
||||
)
|
||||
|
|
|
@ -1,19 +1,20 @@
|
|||
apiVersion : policy.nirmata.io/v1alpha1
|
||||
kind : Policy
|
||||
apiVersion : kubepolicy.nirmata.io/v1alpha1
|
||||
kind: Policy
|
||||
metadata :
|
||||
name : policy-test
|
||||
spec :
|
||||
failurePolicy: stopOnError
|
||||
name: policy-configmap-test
|
||||
spec:
|
||||
rules:
|
||||
- resource:
|
||||
kind : ConfigMap
|
||||
name: "game-config"
|
||||
patch:
|
||||
- path : "/data/newKey"
|
||||
op : add
|
||||
value : newValue
|
||||
- path : "/data/secretData"
|
||||
op : remove
|
||||
- path : "/data/secretDatatoreplace"
|
||||
op : replace
|
||||
value : "data is replaced"
|
||||
- name: "Policy ConfigMap sample rule"
|
||||
resource:
|
||||
kind : ConfigMap
|
||||
name: "game-config"
|
||||
mutate:
|
||||
patches:
|
||||
- path: "/data/newKey"
|
||||
op: add
|
||||
value: newValue
|
||||
- path: "/data/secretData"
|
||||
op: remove
|
||||
- path: "/data/secretDatatoreplace"
|
||||
op: replace
|
||||
value: "data is replaced"
|
||||
|
|
20
init.go
20
init.go
|
@ -7,7 +7,7 @@ import (
|
|||
|
||||
"github.com/nirmata/kube-policy/config"
|
||||
"github.com/nirmata/kube-policy/kubeclient"
|
||||
"github.com/nirmata/kube-policy/utils"
|
||||
"github.com/nirmata/kube-policy/pkg/tls"
|
||||
|
||||
rest "k8s.io/client-go/rest"
|
||||
clientcmd "k8s.io/client-go/tools/clientcmd"
|
||||
|
@ -23,27 +23,23 @@ func createClientConfig(kubeconfig string) (*rest.Config, error) {
|
|||
}
|
||||
}
|
||||
|
||||
func initTlsPemPair(certFile, keyFile string, clientConfig *rest.Config, kubeclient *kubeclient.KubeClient) (*utils.TlsPemPair, error) {
|
||||
var tlsPair *utils.TlsPemPair
|
||||
func initTlsPemPair(certFile, keyFile string, clientConfig *rest.Config, kubeclient *kubeclient.KubeClient) (*tls.TlsPemPair, error) {
|
||||
var tlsPair *tls.TlsPemPair
|
||||
if certFile != "" || keyFile != "" {
|
||||
tlsPair = tlsPairFromFiles(certFile, keyFile)
|
||||
}
|
||||
|
||||
var err error
|
||||
if tlsPair != nil {
|
||||
log.Print("Using given TLS key/certificate pair")
|
||||
return tlsPair, nil
|
||||
} else {
|
||||
tlsPair, err = tlsPairFromCluster(clientConfig, kubeclient)
|
||||
if err == nil {
|
||||
log.Printf("Using TLS key/certificate from cluster")
|
||||
}
|
||||
return tlsPair, err
|
||||
}
|
||||
}
|
||||
|
||||
// Loads PEM private key and TLS certificate from given files
|
||||
func tlsPairFromFiles(certFile, keyFile string) *utils.TlsPemPair {
|
||||
func tlsPairFromFiles(certFile, keyFile string) *tls.TlsPemPair {
|
||||
if certFile == "" || keyFile == "" {
|
||||
return nil
|
||||
}
|
||||
|
@ -60,7 +56,7 @@ func tlsPairFromFiles(certFile, keyFile string) *utils.TlsPemPair {
|
|||
return nil
|
||||
}
|
||||
|
||||
return &utils.TlsPemPair{
|
||||
return &tls.TlsPemPair{
|
||||
Certificate: certContent,
|
||||
PrivateKey: keyContent,
|
||||
}
|
||||
|
@ -69,19 +65,19 @@ func tlsPairFromFiles(certFile, keyFile string) *utils.TlsPemPair {
|
|||
// Loads or creates PEM private key and TLS certificate for webhook server.
|
||||
// Created pair is stored in cluster's secret.
|
||||
// Returns struct with key/certificate pair.
|
||||
func tlsPairFromCluster(configuration *rest.Config, client *kubeclient.KubeClient) (*utils.TlsPemPair, error) {
|
||||
func tlsPairFromCluster(configuration *rest.Config, client *kubeclient.KubeClient) (*tls.TlsPemPair, error) {
|
||||
apiServerUrl, err := url.Parse(configuration.Host)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
certProps := utils.TlsCertificateProps{
|
||||
certProps := tls.TlsCertificateProps{
|
||||
Service: config.WebhookServiceName,
|
||||
Namespace: config.KubePolicyNamespace,
|
||||
ApiServerHost: apiServerUrl.Hostname(),
|
||||
}
|
||||
|
||||
tlsPair := client.ReadTlsPair(certProps)
|
||||
if utils.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
if tls.IsTlsPairShouldBeUpdated(tlsPair) {
|
||||
log.Printf("Generating new key/certificate pair for TLS")
|
||||
tlsPair, err = client.GenerateTlsPemPair(certProps)
|
||||
if err != nil {
|
||||
|
|
|
@ -5,22 +5,22 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/nirmata/kube-policy/utils"
|
||||
|
||||
tls "github.com/nirmata/kube-policy/pkg/tls"
|
||||
certificates "k8s.io/api/certificates/v1beta1"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Issues TLS certificate for webhook server using given PEM private key
|
||||
// Returns signed and approved TLS certificate in PEM format
|
||||
func (kc *KubeClient) GenerateTlsPemPair(props utils.TlsCertificateProps) (*utils.TlsPemPair, error) {
|
||||
privateKey, err := utils.TlsGeneratePrivateKey()
|
||||
func (kc *KubeClient) GenerateTlsPemPair(props tls.TlsCertificateProps) (*tls.TlsPemPair, error) {
|
||||
privateKey, err := tls.TlsGeneratePrivateKey()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
certRequest, err := utils.TlsCertificateGenerateRequest(privateKey, props)
|
||||
certRequest, err := tls.TlsCertificateGenerateRequest(privateKey, props)
|
||||
if err != nil {
|
||||
return nil, errors.New(fmt.Sprintf("Unable to create certificate request: %v", err))
|
||||
}
|
||||
|
@ -35,9 +35,9 @@ func (kc *KubeClient) GenerateTlsPemPair(props utils.TlsCertificateProps) (*util
|
|||
return nil, errors.New(fmt.Sprintf("Unable to fetch certificate from request: %v", err))
|
||||
}
|
||||
|
||||
return &utils.TlsPemPair{
|
||||
return &tls.TlsPemPair{
|
||||
Certificate: tlsCert,
|
||||
PrivateKey: utils.TlsPrivateKeyToPem(privateKey),
|
||||
PrivateKey: tls.TlsPrivateKeyToPem(privateKey),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
@ -111,7 +111,7 @@ const privateKeyField string = "privateKey"
|
|||
const certificateField string = "certificate"
|
||||
|
||||
// Reads the pair of TLS certificate and key from the specified secret.
|
||||
func (kc *KubeClient) ReadTlsPair(props utils.TlsCertificateProps) *utils.TlsPemPair {
|
||||
func (kc *KubeClient) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair {
|
||||
name := generateSecretName(props)
|
||||
secret, err := kc.client.CoreV1().Secrets(props.Namespace).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
|
@ -119,7 +119,7 @@ func (kc *KubeClient) ReadTlsPair(props utils.TlsCertificateProps) *utils.TlsPem
|
|||
return nil
|
||||
}
|
||||
|
||||
pemPair := utils.TlsPemPair{
|
||||
pemPair := tls.TlsPemPair{
|
||||
Certificate: secret.Data[certificateField],
|
||||
PrivateKey: secret.Data[privateKeyField],
|
||||
}
|
||||
|
@ -136,7 +136,7 @@ func (kc *KubeClient) ReadTlsPair(props utils.TlsCertificateProps) *utils.TlsPem
|
|||
|
||||
// Writes the pair of TLS certificate and key to the specified secret.
|
||||
// Updates existing secret or creates new one.
|
||||
func (kc *KubeClient) WriteTlsPair(props utils.TlsCertificateProps, pemPair *utils.TlsPemPair) error {
|
||||
func (kc *KubeClient) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPemPair) error {
|
||||
name := generateSecretName(props)
|
||||
secret, err := kc.client.CoreV1().Secrets(props.Namespace).Get(name, metav1.GetOptions{})
|
||||
|
||||
|
@ -176,6 +176,6 @@ func (kc *KubeClient) WriteTlsPair(props utils.TlsCertificateProps, pemPair *uti
|
|||
return err
|
||||
}
|
||||
|
||||
func generateSecretName(props utils.TlsCertificateProps) string {
|
||||
return utils.GenerateInClusterServiceName(props) + ".kube-policy-tls-pair"
|
||||
func generateSecretName(props tls.TlsCertificateProps) string {
|
||||
return tls.GenerateInClusterServiceName(props) + ".kube-policy-tls-pair"
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ type KubeClient struct {
|
|||
// Checks parameters and creates new instance of KubeClient
|
||||
func NewKubeClient(config *rest.Config, logger *log.Logger) (*KubeClient, error) {
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Kubernetes client: ", log.LstdFlags|log.Lshortfile)
|
||||
logger = log.New(os.Stdout, "Kubernetes client: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
client, err := kubernetes.NewForConfig(config)
|
||||
|
|
16
main.go
16
main.go
|
@ -68,18 +68,26 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatalf("Unable to create webhook server: %v\n", err)
|
||||
}
|
||||
server.RunAsync()
|
||||
|
||||
webhookRegistrationClient, err := webhooks.NewWebhookRegistrationClient(clientConfig, kubeclient)
|
||||
if err != nil {
|
||||
log.Fatalf("Unable to register admission webhooks on cluster: %v\n", err)
|
||||
}
|
||||
|
||||
stopCh := signals.SetupSignalHandler()
|
||||
|
||||
policyInformerFactory.Start(stopCh)
|
||||
if err = eventController.Run(stopCh); err != nil {
|
||||
log.Fatalf("Error running EventController: %v\n", err)
|
||||
}
|
||||
eventController.Run(stopCh)
|
||||
|
||||
if err = policyController.Run(stopCh); err != nil {
|
||||
log.Fatalf("Error running PolicyController: %v\n", err)
|
||||
}
|
||||
|
||||
if err = webhookRegistrationClient.Register(); err != nil {
|
||||
log.Fatalf("Failed registering Admission Webhooks: %v\n", err)
|
||||
}
|
||||
|
||||
server.RunAsync()
|
||||
<-stopCh
|
||||
server.Stop()
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package event
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
|
@ -35,13 +36,18 @@ type Generator interface {
|
|||
//Controller api
|
||||
type Controller interface {
|
||||
Generator
|
||||
Run(stopCh <-chan struct{}) error
|
||||
Run(stopCh <-chan struct{})
|
||||
}
|
||||
|
||||
//NewEventController to generate a new event controller
|
||||
func NewEventController(kubeClient *kubeClient.KubeClient,
|
||||
policyLister policylister.PolicyLister,
|
||||
logger *log.Logger) Controller {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Event Controller: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
controller := &controller{
|
||||
kubeClient: kubeClient,
|
||||
policyLister: policyLister,
|
||||
|
@ -49,6 +55,7 @@ func NewEventController(kubeClient *kubeClient.KubeClient,
|
|||
recorder: initRecorder(kubeClient),
|
||||
logger: logger,
|
||||
}
|
||||
|
||||
return controller
|
||||
}
|
||||
|
||||
|
@ -70,20 +77,14 @@ func (c *controller) Add(info Info) {
|
|||
c.queue.Add(info)
|
||||
}
|
||||
|
||||
func (c *controller) Run(stopCh <-chan struct{}) error {
|
||||
func (c *controller) Run(stopCh <-chan struct{}) {
|
||||
defer utilruntime.HandleCrash()
|
||||
defer c.queue.ShutDown()
|
||||
|
||||
log.Println("starting eventbuilder controller")
|
||||
|
||||
log.Println("Starting eventbuilder controller workers")
|
||||
for i := 0; i < eventWorkerThreadCount; i++ {
|
||||
go wait.Until(c.runWorker, time.Second, stopCh)
|
||||
}
|
||||
log.Println("Started eventbuilder controller workers")
|
||||
<-stopCh
|
||||
log.Println("Shutting down eventbuilder controller workers")
|
||||
return nil
|
||||
c.logger.Println("Started eventbuilder controller")
|
||||
}
|
||||
|
||||
func (c *controller) runWorker() {
|
||||
|
@ -102,7 +103,7 @@ func (c *controller) processNextWorkItem() bool {
|
|||
var ok bool
|
||||
if key, ok = obj.(Info); !ok {
|
||||
c.queue.Forget(obj)
|
||||
log.Printf("Expecting type info by got %v", obj)
|
||||
c.logger.Printf("Expecting type info by got %v\n", obj)
|
||||
return nil
|
||||
}
|
||||
// Run the syncHandler, passing the resource and the policy
|
||||
|
|
|
@ -3,10 +3,11 @@ package policyengine
|
|||
import (
|
||||
kubepolicy "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/policyengine/mutation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Mutate performs mutation. Overlay first and then mutation patches
|
||||
func (p *policyEngine) Mutate(policy kubepolicy.Policy, rawResource []byte) []mutation.PatchBytes {
|
||||
func (p *policyEngine) Mutate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) []mutation.PatchBytes {
|
||||
var policyPatches []mutation.PatchBytes
|
||||
|
||||
for i, rule := range policy.Spec.Rules {
|
||||
|
@ -22,7 +23,7 @@ func (p *policyEngine) Mutate(policy kubepolicy.Policy, rawResource []byte) []mu
|
|||
continue
|
||||
}
|
||||
|
||||
ok, err := mutation.IsRuleApplicableToResource(rawResource, rule.ResourceDescription)
|
||||
ok, err := mutation.ResourceMeetsRules(rawResource, rule.ResourceDescription, gvk)
|
||||
if err != nil {
|
||||
p.logger.Printf("Rule has invalid data: rule number = %d, rule name = %s in policy %s, err: %v\n", i, rule.Name, policy.ObjectMeta.Name, err)
|
||||
continue
|
||||
|
@ -33,6 +34,10 @@ func (p *policyEngine) Mutate(policy kubepolicy.Policy, rawResource []byte) []mu
|
|||
continue
|
||||
}
|
||||
|
||||
if rule.Mutation == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
// Process Overlay
|
||||
|
||||
if rule.Mutation.Overlay != nil {
|
||||
|
@ -54,7 +59,6 @@ func (p *policyEngine) Mutate(policy kubepolicy.Policy, rawResource []byte) []mu
|
|||
policyPatches = append(policyPatches, processedPatches...)
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return policyPatches
|
||||
|
|
|
@ -1,44 +0,0 @@
|
|||
package mutation
|
||||
|
||||
import (
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// kind is the type of object being manipulated
|
||||
// Checks requests kind, name and labels to fit the policy
|
||||
func IsRuleApplicableToResource(resourceRaw []byte, description types.ResourceDescription) (bool, error) {
|
||||
kind := ParseKindFromObject(resourceRaw)
|
||||
if description.Kind != kind {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if resourceRaw != nil {
|
||||
meta := ParseMetadataFromObject(resourceRaw)
|
||||
name := ParseNameFromObject(resourceRaw)
|
||||
|
||||
if description.Name != nil {
|
||||
|
||||
if !wildcard.Match(*description.Name, name) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if description.Selector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(description.Selector)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
labelMap := ParseLabelsFromMetadata(meta)
|
||||
|
||||
if !selector.Matches(labelMap) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
|
@ -10,7 +10,6 @@ import (
|
|||
|
||||
type PatchBytes []byte
|
||||
|
||||
// Test patches on given document according to given sets.
|
||||
// Returns array from separate patches that can be applied to the document
|
||||
// Returns error ONLY in case when creation of resource should be denied.
|
||||
func ProcessPatches(patches []kubepolicy.Patch, resource []byte) ([]PatchBytes, error) {
|
||||
|
|
|
@ -4,6 +4,9 @@ import (
|
|||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
kubepolicy "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
|
@ -65,3 +68,38 @@ func ParseRegexPolicyResourceName(policyResourceName string) (string, bool) {
|
|||
}
|
||||
return strings.Trim(regex[1], " "), true
|
||||
}
|
||||
|
||||
// ResourceMeetsRules checks requests kind, name and labels to fit the policy
|
||||
func ResourceMeetsRules(resourceRaw []byte, description kubepolicy.ResourceDescription, gvk metav1.GroupVersionKind) (bool, error) {
|
||||
if description.Kind != gvk.Kind {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if resourceRaw != nil {
|
||||
meta := ParseMetadataFromObject(resourceRaw)
|
||||
name := ParseNameFromObject(resourceRaw)
|
||||
|
||||
if description.Name != nil {
|
||||
|
||||
if !wildcard.Match(*description.Name, name) {
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
if description.Selector != nil {
|
||||
selector, err := metav1.LabelSelectorAsSelector(description.Selector)
|
||||
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
labelMap := ParseLabelsFromMetadata(meta)
|
||||
|
||||
if !selector.Matches(labelMap) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
|
|
@ -9,18 +9,18 @@ import (
|
|||
event "github.com/nirmata/kube-policy/pkg/event"
|
||||
"github.com/nirmata/kube-policy/pkg/policyengine/mutation"
|
||||
policyviolation "github.com/nirmata/kube-policy/pkg/policyviolation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type PolicyEngine interface {
|
||||
// ProcessMutation should be called from admission contoller
|
||||
// Mutate should be called from admission contoller
|
||||
// when there is an creation / update of the resource
|
||||
// ProcessMutation(policy types.Policy, rawResource []byte) (patchBytes []byte, events []Events, err error)
|
||||
Mutate(policy types.Policy, rawResource []byte) []mutation.PatchBytes
|
||||
Mutate(policy types.Policy, rawResource []byte, gvk metav1.GroupVersionKind) []mutation.PatchBytes
|
||||
|
||||
// ProcessValidation should be called from admission contoller
|
||||
// Validate should be called from admission contoller
|
||||
// when there is an creation / update of the resource
|
||||
// TODO: Change name to Validate
|
||||
ProcessValidation(policy types.Policy, rawResource []byte)
|
||||
Validate(policy types.Policy, rawResource []byte, gvk metav1.GroupVersionKind) bool
|
||||
|
||||
// ProcessExisting should be called from policy controller
|
||||
// when there is an create / update of the policy
|
||||
|
@ -36,6 +36,7 @@ type policyEngine struct {
|
|||
logger *log.Logger
|
||||
}
|
||||
|
||||
// NewPolicyEngine creates new instance of policyEngine
|
||||
func NewPolicyEngine(kubeClient *kubeClient.KubeClient, logger *log.Logger) PolicyEngine {
|
||||
return &policyEngine{
|
||||
kubeClient: kubeClient,
|
||||
|
@ -54,10 +55,10 @@ func (p *policyEngine) ProcessExisting(policy types.Policy, rawResource []byte)
|
|||
continue
|
||||
}
|
||||
|
||||
if ok, err := mutation.IsRuleApplicableToResource(rawResource, rule.ResourceDescription); !ok {
|
||||
p.logger.Printf("Rule %s of policy %s is not applicable to the request", rule.Name, policy.Name)
|
||||
return nil, nil, err
|
||||
}
|
||||
//if ok, err := mutation.ResourceMeetsRules(rawResource, rule.ResourceDescription); !ok {
|
||||
// p.logger.Printf("Rule %s of policy %s is not applicable to the request", rule.Name, policy.Name)
|
||||
// return nil, nil, err
|
||||
//}
|
||||
|
||||
violation, eventInfos, err := p.processRuleOnResource(policy.Name, rule, rawResource)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,5 +1,101 @@
|
|||
package policyengine
|
||||
|
||||
import types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
func (p *policyEngine) ProcessValidation(policy types.Policy, rawResource []byte) {}
|
||||
kubepolicy "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/policyengine/mutation"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (p *policyEngine) Validate(policy kubepolicy.Policy, rawResource []byte, gvk metav1.GroupVersionKind) bool {
|
||||
var resource interface{}
|
||||
json.Unmarshal(rawResource, &resource)
|
||||
|
||||
allowed := true
|
||||
for i, rule := range policy.Spec.Rules {
|
||||
|
||||
// Checks for preconditions
|
||||
// TODO: Rework PolicyEngine interface that it receives not a policy, but mutation object for
|
||||
// Mutate, validation for Validate and so on. It will allow to bring this checks outside of PolicyEngine
|
||||
// to common part as far as they present for all: mutation, validation, generation
|
||||
|
||||
err := rule.Validate()
|
||||
if err != nil {
|
||||
p.logger.Printf("Rule has invalid structure: rule number = %d, rule name = %s in policy %s, err: %v\n", i, rule.Name, policy.ObjectMeta.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
ok, err := mutation.ResourceMeetsRules(rawResource, rule.ResourceDescription, gvk)
|
||||
if err != nil {
|
||||
p.logger.Printf("Rule has invalid data: rule number = %d, rule name = %s in policy %s, err: %v\n", i, rule.Name, policy.ObjectMeta.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if !ok {
|
||||
p.logger.Printf("Rule is not applicable t the request: rule number = %d, rule name = %s in policy %s, err: %v\n", i, rule.Name, policy.ObjectMeta.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
if rule.Validation == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := traverseAndValidate(resource, rule.Validation.Pattern); err != nil {
|
||||
p.logger.Printf("Validation with the rule %s has failed %s: %s\n", rule.Name, err.Error(), *rule.Validation.Message)
|
||||
allowed = false
|
||||
} else {
|
||||
p.logger.Printf("Validation rule %s is successful %s: %s\n", rule.Name, err.Error(), *rule.Validation.Message)
|
||||
}
|
||||
}
|
||||
|
||||
return allowed
|
||||
}
|
||||
|
||||
func traverseAndValidate(resourcePart, patternPart interface{}) error {
|
||||
switch pattern := patternPart.(type) {
|
||||
case map[string]interface{}:
|
||||
dictionary, ok := resourcePart.(map[string]interface{})
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("Validating error: expected %T, found %T", patternPart, resourcePart)
|
||||
}
|
||||
|
||||
var err error
|
||||
for key, value := range pattern {
|
||||
err = traverseAndValidate(dictionary[key], value)
|
||||
}
|
||||
return err
|
||||
|
||||
case []interface{}:
|
||||
array, ok := resourcePart.([]interface{})
|
||||
|
||||
if !ok {
|
||||
return fmt.Errorf("Validating error: expected %T, found %T", patternPart, resourcePart)
|
||||
}
|
||||
|
||||
var err error
|
||||
for i, value := range pattern {
|
||||
err = traverseAndValidate(array[i], value)
|
||||
}
|
||||
return err
|
||||
case string:
|
||||
str := resourcePart.(string)
|
||||
if !checkForWildcard(str, pattern) {
|
||||
return fmt.Errorf("Value %s has not passed wildcard check %s", str, pattern)
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("Received unknown type: %T", patternPart)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkForWildcard(value, pattern string) bool {
|
||||
return value == pattern
|
||||
}
|
||||
|
||||
func checkForOperator(value int, pattern string) bool {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package utils
|
||||
package tls
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
|
@ -13,32 +13,48 @@ import (
|
|||
rest "k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
type MutationWebhookRegistration struct {
|
||||
// WebhookRegistrationClient is client for registration webhooks on cluster
|
||||
type WebhookRegistrationClient struct {
|
||||
registrationClient *admregclient.AdmissionregistrationV1beta1Client
|
||||
kubeclient *kubeclient.KubeClient
|
||||
clientConfig *rest.Config
|
||||
}
|
||||
|
||||
func NewMutationWebhookRegistration(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient) (*MutationWebhookRegistration, error) {
|
||||
// NewWebhookRegistrationClient creates new WebhookRegistrationClient instance
|
||||
func NewWebhookRegistrationClient(clientConfig *rest.Config, kubeclient *kubeclient.KubeClient) (*WebhookRegistrationClient, error) {
|
||||
registrationClient, err := admregclient.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &MutationWebhookRegistration{
|
||||
return &WebhookRegistrationClient{
|
||||
registrationClient: registrationClient,
|
||||
kubeclient: kubeclient,
|
||||
clientConfig: clientConfig,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (mwr *MutationWebhookRegistration) Register() error {
|
||||
webhookConfig, err := mwr.constructWebhookConfig(mwr.clientConfig)
|
||||
// Register creates admission webhooks configs on cluster
|
||||
func (wrc *WebhookRegistrationClient) Register() error {
|
||||
// For the case if cluster already has this configs
|
||||
wrc.Deregister()
|
||||
|
||||
mutatingWebhookConfig, err := wrc.constructMutatingWebhookConfig(wrc.clientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = mwr.registrationClient.MutatingWebhookConfigurations().Create(webhookConfig)
|
||||
_, err = wrc.registrationClient.MutatingWebhookConfigurations().Create(mutatingWebhookConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
validationWebhookConfig, err := wrc.constructValidatingWebhookConfig(wrc.clientConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = wrc.registrationClient.ValidatingWebhookConfigurations().Create(validationWebhookConfig)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -46,70 +62,109 @@ func (mwr *MutationWebhookRegistration) Register() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (mwr *MutationWebhookRegistration) Deregister() error {
|
||||
return mwr.registrationClient.MutatingWebhookConfigurations().Delete(config.MutationWebhookName, &meta.DeleteOptions{})
|
||||
// Deregister deletes webhook configs from cluster
|
||||
// This function does not fail on error:
|
||||
// Register will fail if the config exists, so there is no need to fail on error
|
||||
func (wrc *WebhookRegistrationClient) Deregister() {
|
||||
wrc.registrationClient.MutatingWebhookConfigurations().Delete(config.MutatingWebhookConfigurationName, &meta.DeleteOptions{})
|
||||
wrc.registrationClient.ValidatingWebhookConfigurations().Delete(config.ValidatingWebhookConfigurationName, &meta.DeleteOptions{})
|
||||
}
|
||||
|
||||
func (mwr *MutationWebhookRegistration) constructWebhookConfig(configuration *rest.Config) (*admregapi.MutatingWebhookConfiguration, error) {
|
||||
caData := ExtractCA(configuration)
|
||||
func (wrc *WebhookRegistrationClient) constructMutatingWebhookConfig(configuration *rest.Config) (*admregapi.MutatingWebhookConfiguration, error) {
|
||||
caData := extractCA(configuration)
|
||||
if len(caData) == 0 {
|
||||
return nil, errors.New("Unable to extract CA data from configuration")
|
||||
}
|
||||
|
||||
kubePolicyDeployment, err := mwr.kubeclient.GetKubePolicyDeployment()
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &admregapi.MutatingWebhookConfiguration{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: config.WebhookConfigName,
|
||||
Labels: config.WebhookConfigLabels,
|
||||
Name: config.MutatingWebhookConfigurationName,
|
||||
Labels: config.KubePolicyAppLabels,
|
||||
OwnerReferences: []meta.OwnerReference{
|
||||
meta.OwnerReference{
|
||||
APIVersion: config.DeploymentAPIVersion,
|
||||
Kind: config.DeploymentKind,
|
||||
Name: kubePolicyDeployment.ObjectMeta.Name,
|
||||
UID: kubePolicyDeployment.ObjectMeta.UID,
|
||||
},
|
||||
wrc.constructOwner(),
|
||||
},
|
||||
},
|
||||
Webhooks: []admregapi.Webhook{
|
||||
admregapi.Webhook{
|
||||
Name: config.MutationWebhookName,
|
||||
ClientConfig: admregapi.WebhookClientConfig{
|
||||
Service: &admregapi.ServiceReference{
|
||||
Namespace: config.KubePolicyNamespace,
|
||||
Name: config.WebhookServiceName,
|
||||
Path: &config.WebhookServicePath,
|
||||
},
|
||||
CABundle: caData,
|
||||
},
|
||||
Rules: []admregapi.RuleWithOperations{
|
||||
admregapi.RuleWithOperations{
|
||||
Operations: []admregapi.OperationType{
|
||||
admregapi.Create,
|
||||
},
|
||||
Rule: admregapi.Rule{
|
||||
APIGroups: []string{
|
||||
"*",
|
||||
},
|
||||
APIVersions: []string{
|
||||
"*",
|
||||
},
|
||||
Resources: []string{
|
||||
"*/*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
constructWebhook(
|
||||
config.MutatingWebhookName,
|
||||
config.MutatingWebhookServicePath,
|
||||
caData),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func ExtractCA(config *rest.Config) (result []byte) {
|
||||
func (wrc *WebhookRegistrationClient) constructValidatingWebhookConfig(configuration *rest.Config) (*admregapi.ValidatingWebhookConfiguration, error) {
|
||||
caData := extractCA(configuration)
|
||||
if len(caData) == 0 {
|
||||
return nil, errors.New("Unable to extract CA data from configuration")
|
||||
}
|
||||
|
||||
return &admregapi.ValidatingWebhookConfiguration{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: config.ValidatingWebhookConfigurationName,
|
||||
Labels: config.KubePolicyAppLabels,
|
||||
OwnerReferences: []meta.OwnerReference{
|
||||
wrc.constructOwner(),
|
||||
},
|
||||
},
|
||||
Webhooks: []admregapi.Webhook{
|
||||
constructWebhook(
|
||||
config.ValidatingWebhookName,
|
||||
config.ValidatingWebhookServicePath,
|
||||
caData),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func constructWebhook(name, servicePath string, caData []byte) admregapi.Webhook {
|
||||
return admregapi.Webhook{
|
||||
Name: name,
|
||||
ClientConfig: admregapi.WebhookClientConfig{
|
||||
Service: &admregapi.ServiceReference{
|
||||
Namespace: config.KubePolicyNamespace,
|
||||
Name: config.WebhookServiceName,
|
||||
Path: &servicePath,
|
||||
},
|
||||
CABundle: caData,
|
||||
},
|
||||
Rules: []admregapi.RuleWithOperations{
|
||||
admregapi.RuleWithOperations{
|
||||
Operations: []admregapi.OperationType{
|
||||
admregapi.Create,
|
||||
},
|
||||
Rule: admregapi.Rule{
|
||||
APIGroups: []string{
|
||||
"*",
|
||||
},
|
||||
APIVersions: []string{
|
||||
"*",
|
||||
},
|
||||
Resources: []string{
|
||||
"*/*",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (wrc *WebhookRegistrationClient) constructOwner() meta.OwnerReference {
|
||||
kubePolicyDeployment, err := wrc.kubeclient.GetKubePolicyDeployment()
|
||||
|
||||
if err != nil {
|
||||
return meta.OwnerReference{}
|
||||
}
|
||||
|
||||
return meta.OwnerReference{
|
||||
APIVersion: config.DeploymentAPIVersion,
|
||||
Kind: config.DeploymentKind,
|
||||
Name: kubePolicyDeployment.ObjectMeta.Name,
|
||||
UID: kubePolicyDeployment.ObjectMeta.UID,
|
||||
}
|
||||
}
|
||||
|
||||
// ExtractCA used for extraction CA from config
|
||||
func extractCA(config *rest.Config) (result []byte) {
|
||||
fileName := config.TLSClientConfig.CAFile
|
||||
|
||||
if fileName != "" {
|
||||
|
@ -120,7 +175,7 @@ func ExtractCA(config *rest.Config) (result []byte) {
|
|||
}
|
||||
|
||||
return result
|
||||
} else {
|
||||
return config.TLSClientConfig.CAData
|
||||
}
|
||||
|
||||
return config.TLSClientConfig.CAData
|
||||
}
|
||||
|
|
|
@ -10,16 +10,14 @@ import (
|
|||
"log"
|
||||
"net/http"
|
||||
"os"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/nirmata/kube-policy/config"
|
||||
"github.com/nirmata/kube-policy/kubeclient"
|
||||
kubepolicy "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
policylister "github.com/nirmata/kube-policy/pkg/client/listers/policy/v1alpha1"
|
||||
"github.com/nirmata/kube-policy/pkg/policyengine"
|
||||
"github.com/nirmata/kube-policy/pkg/policyengine/mutation"
|
||||
"github.com/nirmata/kube-policy/utils"
|
||||
tlsutils "github.com/nirmata/kube-policy/pkg/tls"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -37,12 +35,12 @@ type WebhookServer struct {
|
|||
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
|
||||
// Policy Controller and Kubernetes Client should be initialized in configuration
|
||||
func NewWebhookServer(
|
||||
tlsPair *utils.TlsPemPair,
|
||||
kubeclient *kubeclient.KubeClient,
|
||||
tlsPair *tlsutils.TlsPemPair,
|
||||
kubeClient *kubeclient.KubeClient,
|
||||
policyLister policylister.PolicyLister,
|
||||
logger *log.Logger) (*WebhookServer, error) {
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "HTTPS Server: ", log.LstdFlags|log.Lshortfile)
|
||||
logger = log.New(os.Stdout, "Webhook Server: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
if tlsPair == nil {
|
||||
|
@ -55,7 +53,7 @@ func NewWebhookServer(
|
|||
return nil, err
|
||||
}
|
||||
tlsConfig.Certificates = []tls.Certificate{pair}
|
||||
policyEngine := policyengine.NewPolicyEngine(kubeclient, logger)
|
||||
policyEngine := policyengine.NewPolicyEngine(kubeClient, logger)
|
||||
|
||||
ws := &WebhookServer{
|
||||
policyEngine: policyEngine,
|
||||
|
@ -64,7 +62,8 @@ func NewWebhookServer(
|
|||
}
|
||||
|
||||
mux := http.NewServeMux()
|
||||
mux.HandleFunc(config.WebhookServicePath, ws.serve)
|
||||
mux.HandleFunc(config.MutatingWebhookServicePath, ws.serve)
|
||||
mux.HandleFunc(config.ValidatingWebhookServicePath, ws.serve)
|
||||
|
||||
ws.server = http.Server{
|
||||
Addr: ":443", // Listen on port for HTTPS requests
|
||||
|
@ -80,44 +79,125 @@ func NewWebhookServer(
|
|||
|
||||
// Main server endpoint for all requests
|
||||
func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
|
||||
if r.URL.Path == config.WebhookServicePath {
|
||||
admissionReview := ws.parseAdmissionReview(r, w)
|
||||
if admissionReview == nil {
|
||||
return
|
||||
}
|
||||
admissionReview := ws.bodyToAdmissionReview(r, w)
|
||||
if admissionReview == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var admissionResponse *v1beta1.AdmissionResponse
|
||||
if AdmissionIsRequired(admissionReview.Request) {
|
||||
admissionResponse = ws.Mutate(admissionReview.Request)
|
||||
}
|
||||
admissionReview.Response = &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
|
||||
if admissionResponse == nil {
|
||||
admissionResponse = &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
}
|
||||
if KindIsSupported(admissionReview.Request.Kind.Kind) {
|
||||
switch r.URL.Path {
|
||||
case config.MutatingWebhookServicePath:
|
||||
admissionReview.Response = ws.HandleMutation(admissionReview.Request)
|
||||
case config.ValidatingWebhookServicePath:
|
||||
admissionReview.Response = ws.HandleValidation(admissionReview.Request)
|
||||
}
|
||||
}
|
||||
|
||||
admissionReview.Response = admissionResponse
|
||||
admissionReview.Response.UID = admissionReview.Request.UID
|
||||
admissionReview.Response.UID = admissionReview.Request.UID
|
||||
|
||||
responseJson, err := json.Marshal(admissionReview)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Could not encode response: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
responseJson, err := json.Marshal(admissionReview)
|
||||
if err != nil {
|
||||
http.Error(w, fmt.Sprintf("Could not encode response: %v", err), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
|
||||
ws.logger.Printf("Response body\n:%v", string(responseJson))
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
if _, err := w.Write(responseJson); err != nil {
|
||||
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
} else {
|
||||
http.Error(w, fmt.Sprintf("Unexpected method path: %v", r.URL.Path), http.StatusNotFound)
|
||||
w.Header().Set("Content-Type", "application/json; charset=utf-8")
|
||||
if _, err := w.Write(responseJson); err != nil {
|
||||
http.Error(w, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
|
||||
}
|
||||
}
|
||||
|
||||
// RunAsync TLS server in separate thread and returns control immediately
|
||||
func (ws *WebhookServer) RunAsync() {
|
||||
go func(ws *WebhookServer) {
|
||||
err := ws.server.ListenAndServeTLS("", "")
|
||||
if err != nil {
|
||||
ws.logger.Fatal(err)
|
||||
}
|
||||
}(ws)
|
||||
|
||||
ws.logger.Printf("Started Webhook Server")
|
||||
}
|
||||
|
||||
// Stop TLS server and returns control after the server is shut down
|
||||
func (ws *WebhookServer) Stop() {
|
||||
err := ws.server.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
// Error from closing listeners, or context timeout:
|
||||
ws.logger.Printf("Server Shutdown error: %v", err)
|
||||
ws.server.Close()
|
||||
}
|
||||
}
|
||||
|
||||
// HandleMutation handles mutating webhook admission request
|
||||
func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
ws.logger.Printf("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
|
||||
|
||||
policies, err := ws.policyLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
var allPatches []mutation.PatchBytes
|
||||
for _, policy := range policies {
|
||||
ws.logger.Printf("Applying policy %s with %d rules\n", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
|
||||
policyPatches := ws.policyEngine.Mutate(*policy, request.Object.Raw, request.Kind)
|
||||
allPatches = append(allPatches, policyPatches...)
|
||||
|
||||
if len(policyPatches) > 0 {
|
||||
namespace := mutation.ParseNamespaceFromObject(request.Object.Raw)
|
||||
name := mutation.ParseNameFromObject(request.Object.Raw)
|
||||
ws.logger.Printf("Policy %s applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name)
|
||||
}
|
||||
}
|
||||
|
||||
patchType := v1beta1.PatchTypeJSONPatch
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Patch: mutation.JoinPatches(allPatches),
|
||||
PatchType: &patchType,
|
||||
}
|
||||
}
|
||||
|
||||
// HandleValidation handles validating webhook admission request
|
||||
func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
ws.logger.Printf("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
|
||||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
|
||||
|
||||
policies, err := ws.policyLister.List(labels.NewSelector())
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return nil
|
||||
}
|
||||
|
||||
allowed := true
|
||||
for _, policy := range policies {
|
||||
ws.logger.Printf("Validating resource with policy %s with %d rules", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
|
||||
if ok := ws.policyEngine.Validate(*policy, request.Object.Raw, request.Kind); !ok {
|
||||
ws.logger.Printf("Validation has failed: %v\n", err)
|
||||
utilruntime.HandleError(err)
|
||||
allowed = false
|
||||
} else {
|
||||
ws.logger.Println("Validation is successful")
|
||||
}
|
||||
}
|
||||
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: allowed,
|
||||
}
|
||||
}
|
||||
|
||||
// bodyToAdmissionReview creates AdmissionReview object from request body
|
||||
// Answers to the http.ResponseWriter if request is not valid
|
||||
func (ws *WebhookServer) parseAdmissionReview(request *http.Request, writer http.ResponseWriter) *v1beta1.AdmissionReview {
|
||||
func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer http.ResponseWriter) *v1beta1.AdmissionReview {
|
||||
var body []byte
|
||||
if request.Body != nil {
|
||||
if data, err := ioutil.ReadAll(request.Body); err == nil {
|
||||
|
@ -143,81 +223,6 @@ func (ws *WebhookServer) parseAdmissionReview(request *http.Request, writer http
|
|||
http.Error(writer, "Can't decode body as AdmissionReview", http.StatusExpectationFailed)
|
||||
return nil
|
||||
} else {
|
||||
ws.logger.Printf("Request body:\n%v", string(body))
|
||||
return admissionReview
|
||||
}
|
||||
}
|
||||
|
||||
// Runs TLS server in separate thread and returns control immediately
|
||||
func (ws *WebhookServer) RunAsync() {
|
||||
go func(ws *WebhookServer) {
|
||||
err := ws.server.ListenAndServeTLS("", "")
|
||||
if err != nil {
|
||||
ws.logger.Fatal(err)
|
||||
}
|
||||
}(ws)
|
||||
}
|
||||
|
||||
// Stops TLS server and returns control after the server is shut down
|
||||
func (ws *WebhookServer) Stop() {
|
||||
err := ws.server.Shutdown(context.Background())
|
||||
if err != nil {
|
||||
// Error from closing listeners, or context timeout:
|
||||
ws.logger.Printf("Server Shutdown error: %v", err)
|
||||
ws.server.Close()
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) Mutate(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
ws.logger.Printf("AdmissionReview for Kind=%v, Namespace=%v Name=%v UID=%v patchOperation=%v UserInfo=%v",
|
||||
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, request.UserInfo)
|
||||
|
||||
policies, err := ws.getPolicies()
|
||||
if err != nil {
|
||||
utilruntime.HandleError(err)
|
||||
return nil
|
||||
}
|
||||
if len(policies) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var allPatches []mutation.PatchBytes
|
||||
for _, policy := range policies {
|
||||
ws.logger.Printf("Applying policy %s with %d rules", policy.ObjectMeta.Name, len(policy.Spec.Rules))
|
||||
|
||||
policyPatches := ws.policyEngine.Mutate(policy, request.Object.Raw)
|
||||
allPatches = append(allPatches, policyPatches...)
|
||||
|
||||
if len(policyPatches) > 0 {
|
||||
namespace := mutation.ParseNamespaceFromObject(request.Object.Raw)
|
||||
name := mutation.ParseNameFromObject(request.Object.Raw)
|
||||
ws.logger.Printf("Policy %s applied to %s %s/%s", policy.Name, request.Kind.Kind, namespace, name)
|
||||
}
|
||||
}
|
||||
|
||||
patchType := v1beta1.PatchTypeJSONPatch
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Patch: mutation.JoinPatches(allPatches),
|
||||
PatchType: &patchType,
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) getPolicies() ([]kubepolicy.Policy, error) {
|
||||
selector := labels.NewSelector()
|
||||
cachedPolicies, err := ws.policyLister.List(selector)
|
||||
if err != nil {
|
||||
ws.logger.Printf("Error: %v", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var policies []kubepolicy.Policy
|
||||
for _, elem := range cachedPolicies {
|
||||
policies = append(policies, *elem.DeepCopy())
|
||||
}
|
||||
|
||||
sort.Slice(policies, func(i, j int) bool {
|
||||
return policies[i].CreationTimestamp.Time.Before(policies[j].CreationTimestamp.Time)
|
||||
})
|
||||
return policies, nil
|
||||
}
|
||||
|
|
|
@ -2,12 +2,11 @@ package webhooks
|
|||
|
||||
import (
|
||||
kubeclient "github.com/nirmata/kube-policy/kubeclient"
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
mutation "github.com/nirmata/kube-policy/pkg/policyengine/mutation"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
)
|
||||
|
||||
func kindIsSupported(kind string) bool {
|
||||
// KindIsSupported checks kind to be prensent in
|
||||
// SupportedKinds defined in config
|
||||
func KindIsSupported(kind string) bool {
|
||||
for _, k := range kubeclient.GetSupportedKinds() {
|
||||
if k == kind {
|
||||
return true
|
||||
|
@ -15,14 +14,3 @@ func kindIsSupported(kind string) bool {
|
|||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Checks for admission if kind is supported
|
||||
func AdmissionIsRequired(request *v1beta1.AdmissionRequest) bool {
|
||||
// Here you can make additional hardcoded checks
|
||||
return kindIsSupported(request.Kind.Kind)
|
||||
}
|
||||
|
||||
// Checks requests kind, name and labels to fit the policy
|
||||
func IsRuleApplicableToRequest(policyResource types.ResourceDescription, request *v1beta1.AdmissionRequest) (bool, error) {
|
||||
return mutation.IsRuleApplicableToResource(request.Object.Raw, policyResource)
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package policycontroller
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
kubeClient "github.com/nirmata/kube-policy/kubeclient"
|
||||
|
@ -43,6 +44,10 @@ func NewPolicyController(policyInterface policyclientset.Interface,
|
|||
logger *log.Logger,
|
||||
kubeClient *kubeClient.KubeClient) *PolicyController {
|
||||
|
||||
if logger == nil {
|
||||
logger = log.New(os.Stdout, "Policy Controller: ", log.LstdFlags)
|
||||
}
|
||||
|
||||
controller := &PolicyController{
|
||||
kubeClient: kubeClient,
|
||||
policyLister: policyInformer.Lister(),
|
||||
|
@ -101,21 +106,15 @@ func (pc *PolicyController) Run(stopCh <-chan struct{}) error {
|
|||
defer utilruntime.HandleCrash()
|
||||
defer pc.queue.ShutDown()
|
||||
|
||||
pc.logger.Printf("starting policy controller")
|
||||
|
||||
pc.logger.Printf("waiting for infomer caches to sync")
|
||||
if ok := cache.WaitForCacheSync(stopCh, pc.policySynced); !ok {
|
||||
return fmt.Errorf("failed to wait for caches to sync")
|
||||
}
|
||||
|
||||
pc.logger.Println("starting policy controller workers")
|
||||
for i := 0; i < policyControllerWorkerCount; i++ {
|
||||
go wait.Until(pc.runWorker, time.Second, stopCh)
|
||||
}
|
||||
|
||||
pc.logger.Println("started policy controller workers")
|
||||
<-stopCh
|
||||
pc.logger.Println("shutting down policy controller workers")
|
||||
pc.logger.Println("Started policy controller")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
|
||||
types "github.com/nirmata/kube-policy/pkg/apis/policy/v1alpha1"
|
||||
event "github.com/nirmata/kube-policy/pkg/event"
|
||||
"github.com/nirmata/kube-policy/pkg/policyengine/mutation"
|
||||
policyviolation "github.com/nirmata/kube-policy/pkg/policyviolation"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
|
@ -86,7 +85,7 @@ func (pc *PolicyController) filterResourceByRule(rule types.Rule) ([]runtime.Obj
|
|||
|
||||
for _, resource := range resources {
|
||||
// TODO:
|
||||
rawResource, err := json.Marshal(resource)
|
||||
//rawResource, err := json.Marshal(resource)
|
||||
// objKind := resource.GetObjectKind()
|
||||
// codecFactory := serializer.NewCodecFactory(runtime.NewScheme())
|
||||
// codecFactory.EncoderForVersion()
|
||||
|
@ -97,9 +96,9 @@ func (pc *PolicyController) filterResourceByRule(rule types.Rule) ([]runtime.Obj
|
|||
}
|
||||
|
||||
// filter the resource by name and label
|
||||
if ok, _ := mutation.IsRuleApplicableToResource(rawResource, rule.ResourceDescription); ok {
|
||||
targetResources = append(targetResources, resource)
|
||||
}
|
||||
//if ok, _ := mutation.ResourceMeetsRules(rawResource, rule.ResourceDescription); ok {
|
||||
// targetResources = append(targetResources, resource)
|
||||
//}
|
||||
}
|
||||
return targetResources, nil
|
||||
}
|
||||
|
|
|
@ -1,7 +1,16 @@
|
|||
#!/bin/bash
|
||||
|
||||
default_version="dev-testing"
|
||||
version=$1
|
||||
|
||||
if [[ -z "$1" ]]
|
||||
then
|
||||
echo "Using default version: ${default_version}"
|
||||
version="${default_version}"
|
||||
fi
|
||||
|
||||
hub_user_name="nirmata"
|
||||
project_name="kube-policy"
|
||||
version="latest"
|
||||
|
||||
echo "# Ensuring Go dependencies..."
|
||||
dep ensure || exit 2
|
||||
|
|
Loading…
Reference in a new issue