1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-08 10:04:25 +00:00

Merge branch 'master' into 345_support_usergroup_info

# Conflicts:
#	pkg/engine/validation_test.go
#	pkg/webhooks/annotations.go
#	pkg/webhooks/annotations_test.go
#	pkg/webhooks/mutation.go
#	pkg/webhooks/server.go
#	pkg/webhooks/validation.go
This commit is contained in:
Shuting Zhao 2019-11-11 19:19:08 -08:00
commit 5a3ed62b13
77 changed files with 1173 additions and 379 deletions

14
main.go
View file

@ -31,9 +31,6 @@ var (
filterK8Resources string
)
// TODO: tune resync time differently for each informer
const defaultReSyncTime = 10 * time.Second
func main() {
defer glog.Flush()
printVersionInfo()
@ -155,16 +152,19 @@ func main() {
// Start the components
pInformer.Start(stopCh)
kubeInformer.Start(stopCh)
if err := configData.Run(kubeInformer.Core().V1().ConfigMaps(), stopCh); err != nil {
glog.Fatalf("Unable loading dynamic configuration: %v\n", err)
if err := configData.Run(stopCh); err != nil {
glog.Fatalf("Unable to load dynamic configuration: %v\n", err)
}
go pc.Run(1, stopCh)
go pvc.Run(1, stopCh)
go egen.Run(1, stopCh)
go nsc.Run(1, stopCh)
//TODO add WG for the go routines?
server.RunAsync()
// verifys if the admission control is enabled and active
// resync: 60 seconds
// deadline: 60 seconds (send request)
// max deadline: deadline*3 (set the deployment annotation as false)
server.RunAsync(stopCh)
<-stopCh
disableProfiling(prof)

View file

@ -62,3 +62,13 @@ func (rs ResourceSpec) ToKey() string {
}
return rs.Kind + "." + rs.Namespace + "." + rs.Name
}
//BuildKey builds the key
func BuildResourceKey(kind, namespace, name string) string {
resource := ResourceSpec{
Kind: kind,
Namespace: namespace,
Name: name,
}
return resource.ToKey()
}

114
pkg/checker/checker.go Normal file
View file

@ -0,0 +1,114 @@
package checker
import (
"sync"
"time"
"github.com/golang/glog"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"k8s.io/apimachinery/pkg/labels"
)
//MaxRetryCount defines the max deadline count
const MaxRetryCount int = 3
// LastReqTime
type LastReqTime struct {
t time.Time
mu sync.RWMutex
}
func (t *LastReqTime) Time() time.Time {
t.mu.RLock()
defer t.mu.RUnlock()
return t.t
}
func (t *LastReqTime) SetTime(tm time.Time) {
t.mu.Lock()
defer t.mu.Unlock()
glog.V(4).Info("updating last request time")
t.t = tm
}
func NewLastReqTime() *LastReqTime {
return &LastReqTime{
t: time.Now(),
}
}
func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolicyLister) bool {
policies, err := pLister.ListResources(labels.NewSelector())
if err != nil {
glog.Error()
}
for _, policy := range policies {
if policy.HasMutateOrValidate() {
// as there exists one policy with mutate or validate rule
// so there must be a webhook configuration on resource
return true
}
}
return false
}
//Run runs the checker and verify the resource update
func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister, client *dclient.Client, defaultResync time.Duration, deadline time.Duration, stopCh <-chan struct{}) {
glog.V(2).Infof("starting default resync for webhook checker with resync time %d", defaultResync)
maxDeadline := deadline * time.Duration(MaxRetryCount)
ticker := time.NewTicker(defaultResync)
var statuscontrol StatusInterface
/// interface to update and increment kyverno webhook status via annotations
statuscontrol = NewVerifyControl(client)
// send the initial update status
if checkIfPolicyWithMutateAndGenerateExists(pLister) {
if err := statuscontrol.SuccessStatus(); err != nil {
glog.Error(err)
}
}
defer ticker.Stop()
// - has recieved request -> set webhookstatus as "True"
// - no requests recieved
// -> if greater than deadline, send update request
// -> if greater than maxDeadline, send failed status update
for {
select {
case <-ticker.C:
// if there are no policies then we dont have a webhook on resource.
// we indirectly check if the resource
if !checkIfPolicyWithMutateAndGenerateExists(pLister) {
continue
}
// get current time
timeDiff := time.Since(t.Time())
if timeDiff > maxDeadline {
glog.Infof("failed to recieve any request for more than %v ", maxDeadline)
glog.Info("Admission Control failing: Webhook is not recieving requests forwarded by api-server as per webhook configurations")
// set the status unavailable
if err := statuscontrol.FailedStatus(); err != nil {
glog.Error(err)
}
continue
}
if timeDiff > deadline {
glog.Info("Admission Control failing: Webhook is not recieving requests forwarded by api-server as per webhook configurations")
// send request to update the kyverno deployment
if err := statuscontrol.IncrementAnnotation(); err != nil {
glog.Error(err)
}
continue
}
// if the status was false before then we update it to true
// send request to update the kyverno deployment
if err := statuscontrol.SuccessStatus(); err != nil {
glog.Error(err)
}
case <-stopCh:
// handler termination signal
glog.V(2).Infof("stopping default resync for webhook checker")
return
}
}
}

114
pkg/checker/status.go Normal file
View file

@ -0,0 +1,114 @@
package checker
import (
"strconv"
"github.com/golang/glog"
dclient "github.com/nirmata/kyverno/pkg/dclient"
)
const deployName string = "kyverno"
const deployNamespace string = "kyverno"
const annCounter string = "kyverno.io/generationCounter"
const annWebhookStats string = "kyverno.io/webhookActive"
//StatusInterface provides api to update webhook active annotations on kyverno deployments
type StatusInterface interface {
// Increments generation counter annotation
IncrementAnnotation() error
// update annotation to inform webhook is active
SuccessStatus() error
// update annotation to inform webhook is inactive
FailedStatus() error
}
//StatusControl controls the webhook status
type StatusControl struct {
client *dclient.Client
}
//SuccessStatus ...
func (vc StatusControl) SuccessStatus() error {
return vc.setStatus("true")
}
//FailedStatus ...
func (vc StatusControl) FailedStatus() error {
return vc.setStatus("false")
}
// NewVerifyControl ...
func NewVerifyControl(client *dclient.Client) *StatusControl {
return &StatusControl{
client: client,
}
}
func (vc StatusControl) setStatus(status string) error {
glog.Infof("setting deployment %s in ns %s annotation %s to %s", deployName, deployNamespace, annWebhookStats, status)
var ann map[string]string
var err error
deploy, err := vc.client.GetResource("Deployment", deployNamespace, deployName)
if err != nil {
glog.V(4).Infof("failed to get deployment %s in namespace %s: %v", deployName, deployNamespace, err)
return err
}
ann = deploy.GetAnnotations()
if ann == nil {
ann = map[string]string{}
ann[annWebhookStats] = status
}
webhookAction, ok := ann[annWebhookStats]
if ok {
// annotatiaion is present
if webhookAction == status {
glog.V(4).Infof("annotation %s already set to '%s'", annWebhookStats, status)
return nil
}
}
// set the status
ann[annWebhookStats] = status
deploy.SetAnnotations(ann)
// update counter
_, err = vc.client.UpdateResource("Deployment", deployNamespace, deploy, false)
if err != nil {
glog.V(4).Infof("failed to update annotation %s for deployment %s in namespace %s: %v", annWebhookStats, deployName, deployNamespace, err)
return err
}
return nil
}
//IncrementAnnotation ...
func (vc StatusControl) IncrementAnnotation() error {
glog.Infof("setting deployment %s in ns %s annotation %s", deployName, deployNamespace, annCounter)
var ann map[string]string
var err error
deploy, err := vc.client.GetResource("Deployment", deployNamespace, deployName)
if err != nil {
glog.V(4).Infof("failed to get deployment %s in namespace %s: %v", deployName, deployNamespace, err)
return err
}
ann = deploy.GetAnnotations()
if ann == nil {
ann = map[string]string{}
ann[annCounter] = "0"
}
counter, err := strconv.Atoi(ann[annCounter])
if err != nil {
glog.V(4).Infof("failed to parse string: %v", err)
return err
}
// increment counter
counter++
ann[annCounter] = strconv.Itoa(counter)
glog.Infof("incrementing annotation %s counter to %d", annCounter, counter)
deploy.SetAnnotations(ann)
// update counter
_, err = vc.client.UpdateResource("Deployment", deployNamespace, deploy, false)
if err != nil {
glog.V(4).Infof("failed to update annotation %s for deployment %s in namespace %s: %v", annCounter, deployName, deployNamespace, err)
return err
}
return nil
}

View file

@ -15,6 +15,10 @@ const (
// ValidatingWebhookConfigurationDebug = "kyverno-validating-webhook-cfg-debug"
// ValidatingWebhookName = "nirmata.kyverno.policy-validating-webhook"
VerifyMutatingWebhookConfigurationName = "kyverno-verify-mutating-webhook-cfg"
VerifyMutatingWebhookConfigurationDebugName = "kyverno-verify-mutating-webhook-cfg-debug"
VerifyMutatingWebhookName = "nirmata.kyverno.verify-mutating-webhook"
PolicyValidatingWebhookConfigurationName = "kyverno-policy-validating-webhook-cfg"
PolicyValidatingWebhookConfigurationDebugName = "kyverno-policy-validating-webhook-cfg-debug"
PolicyValidatingWebhookName = "nirmata.kyverno.policy-validating-webhook"
@ -36,6 +40,7 @@ var (
ValidatingWebhookServicePath = "/validate"
PolicyValidatingWebhookServicePath = "/policyvalidate"
PolicyMutatingWebhookServicePath = "/policymutate"
VerifyMutatingWebhookServicePath = "/verifymutate"
SupportedKinds = []string{
"ConfigMap",

View file

@ -29,6 +29,8 @@ type ConfigData struct {
mux sync.RWMutex
// configuration data
filters []k8Resource
// hasynced
cmListerSycned cache.InformerSynced
}
// ToFilter checks if the given resource is set to be filtered in the configuration
@ -55,8 +57,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration")
}
cd := ConfigData{
client: rclient,
cmName: os.Getenv(cmNameEnv),
client: rclient,
cmName: os.Getenv(cmNameEnv),
cmListerSycned: cmInformer.Informer().HasSynced,
}
//TODO: this has been added to backward support command line arguments
// will be removed in future and the configuration will be set only via configmaps
@ -73,9 +76,9 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI
return &cd
}
func (cd *ConfigData) Run(cmInformer informers.ConfigMapInformer, stopCh <-chan struct{}) error {
func (cd *ConfigData) Run(stopCh <-chan struct{}) error {
// wait for cache to populate first time
if !cache.WaitForCacheSync(stopCh, cmInformer.Informer().HasSynced) {
if !cache.WaitForCacheSync(stopCh, cd.cmListerSycned) {
return fmt.Errorf("Configuration: Failed to sync informer cache")
}
return nil

View file

@ -1,7 +1,6 @@
package engine
import (
"reflect"
"time"
"github.com/golang/glog"
@ -61,8 +60,9 @@ func Mutate(policyContext PolicyContext) (response EngineResponse) {
if rule.Mutation.Overlay != nil {
var ruleResponse RuleResponse
ruleResponse, patchedResource = processOverlay(rule, resource)
if reflect.DeepEqual(ruleResponse, (RuleResponse{})) {
if ruleResponse.Success == true && ruleResponse.Patches == nil {
// overlay pattern does not match the resource conditions
glog.Infof(ruleResponse.Message)
continue
}
response.PolicyResponse.Rules = append(response.PolicyResponse.Rules, ruleResponse)

View file

@ -6,7 +6,6 @@ import (
"fmt"
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/glog"
@ -28,20 +27,39 @@ func processOverlay(rule kyverno.Rule, resource unstructured.Unstructured) (resp
glog.V(4).Infof("finished applying overlay rule %q (%v)", response.Name, response.RuleStats.ProcessingTime)
}()
patches, err := processOverlayPatches(resource.UnstructuredContent(), rule.Mutation.Overlay)
// resource does not satisfy the overlay pattern, we dont apply this rule
if err != nil && strings.Contains(err.Error(), "Conditions are not met") {
glog.Errorf("Resource %s/%s/%s does not meet the conditions in the rule %s with overlay pattern %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), rule.Name, rule.Mutation.Overlay)
//TODO: send zero response and not consider this as applied?
return RuleResponse{}, resource
patches, overlayerr := processOverlayPatches(resource.UnstructuredContent(), rule.Mutation.Overlay)
// resource does not satisfy the overlay pattern, we don't apply this rule
if !reflect.DeepEqual(overlayerr, overlayError{}) {
switch overlayerr.statusCode {
// condition key is not present in the resource, don't apply this rule
// consider as success
case conditionNotPresent:
glog.Infof("Resource %s/%s/%s: %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg())
response.Success = true
response.Message = overlayerr.ErrorMsg()
return response, resource
// conditions are not met, don't apply this rule
// consider as failure
case conditionFailure:
glog.Errorf("Resource %s/%s/%s does not meet the conditions in the rule %s with overlay pattern %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), rule.Name, rule.Mutation.Overlay)
//TODO: send zero response and not consider this as applied?
response.Success = false
response.Message = overlayerr.ErrorMsg()
return response, resource
// rule application failed
case overlayFailure:
glog.Errorf("Resource %s/%s/%s: failed to process overlay: %v in the rule %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg(), rule.Name)
response.Success = false
response.Message = fmt.Sprintf("failed to process overlay: %v", overlayerr.ErrorMsg())
return response, resource
default:
glog.Errorf("Resource %s/%s/%s: Unknown type of error: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.Error())
response.Success = false
response.Message = fmt.Sprintf("Unknown type of error: %v", overlayerr.Error())
return response, resource
}
}
if err != nil {
// rule application failed
response.Success = false
response.Message = fmt.Sprintf("failed to process overlay: %v", err)
return response, resource
}
// convert to RAW
resourceRaw, err := resource.MarshalJSON()
if err != nil {
@ -75,14 +93,26 @@ func processOverlay(rule kyverno.Rule, resource unstructured.Unstructured) (resp
return response, patchedResource
}
func processOverlayPatches(resource, overlay interface{}) ([][]byte, error) {
if path, err := meetConditions(resource, overlay); err != nil {
glog.V(4).Infof("Mutate rule: failed to validate condition at %s, err: %v", path, err)
return nil, fmt.Errorf("Conditions are not met at %s, %v", path, err)
func processOverlayPatches(resource, overlay interface{}) ([][]byte, overlayError) {
if path, overlayerr := meetConditions(resource, overlay); !reflect.DeepEqual(overlayerr, overlayError{}) {
switch overlayerr.statusCode {
// anchor key does not exist in the resource, skip applying policy
case conditionNotPresent:
glog.V(4).Infof("Mutate rule: policy not applied: %v at %s", overlayerr, path)
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("policy not applied: %v at %s", overlayerr.ErrorMsg(), path))
// anchor key is not satisfied in the resource, skip applying policy
case conditionFailure:
glog.V(4).Infof("Mutate rule: failed to validate condition at %s, err: %v", path, overlayerr)
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("Conditions are not met at %s, %v", path, overlayerr))
}
}
return mutateResourceWithOverlay(resource, overlay)
patchBytes, err := mutateResourceWithOverlay(resource, overlay)
if err != nil {
return patchBytes, newOverlayError(overlayFailure, err.Error())
}
return patchBytes, overlayError{}
}
// mutateResourceWithOverlay is a start of overlaying process

View file

@ -9,15 +9,15 @@ import (
"github.com/nirmata/kyverno/pkg/engine/anchor"
)
func meetConditions(resource, overlay interface{}) (string, error) {
func meetConditions(resource, overlay interface{}) (string, overlayError) {
return checkConditions(resource, overlay, "/")
}
// resource and overlay should be the same type
func checkConditions(resource, overlay interface{}, path string) (string, error) {
func checkConditions(resource, overlay interface{}, path string) (string, overlayError) {
// overlay has no anchor, return true
if !hasNestedAnchors(overlay) {
return "", nil
return "", overlayError{}
}
// resource item exists but has different type
@ -26,10 +26,11 @@ func checkConditions(resource, overlay interface{}, path string) (string, error)
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
if hasNestedAnchors(overlay) {
glog.V(4).Infof("Found anchor on different types of element at path %s: overlay %T, resource %T", path, overlay, resource)
return path, fmt.Errorf("Found anchor on different types of element at path %s: overlay %T %v, resource %T %v", path, overlay, overlay, resource, resource)
return path, newOverlayError(conditionFailure,
fmt.Sprintf("Found anchor on different types of element at path %s: overlay %T %v, resource %T %v", path, overlay, overlay, resource, resource))
}
return "", nil
return "", overlayError{}
}
switch typedOverlay := overlay.(type) {
@ -43,42 +44,43 @@ func checkConditions(resource, overlay interface{}, path string) (string, error)
// anchor on non map/array is invalid:
// - anchor defined on values
glog.Warningln("Found invalid conditional anchor: anchor defined on values")
return "", nil
return "", overlayError{}
}
}
func checkConditionOnMap(resourceMap, overlayMap map[string]interface{}, path string) (string, error) {
func checkConditionOnMap(resourceMap, overlayMap map[string]interface{}, path string) (string, overlayError) {
anchors, overlayWithoutAnchor := getAnchorAndElementsFromMap(overlayMap)
// validate resource with conditions
if newPath, err := validateConditionAnchorMap(resourceMap, anchors, path); err != nil {
if newPath, err := validateConditionAnchorMap(resourceMap, anchors, path); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
// traverse overlay pattern to further validate conditions
if newPath, err := validateNonAnchorOverlayMap(resourceMap, overlayWithoutAnchor, path); err != nil {
if newPath, err := validateNonAnchorOverlayMap(resourceMap, overlayWithoutAnchor, path); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
// empty overlayMap
return "", nil
return "", overlayError{}
}
func checkConditionOnArray(resource, overlay []interface{}, path string) (string, error) {
func checkConditionOnArray(resource, overlay []interface{}, path string) (string, overlayError) {
if 0 == len(overlay) {
glog.Infof("Mutate overlay pattern is empty, path %s", path)
return "", nil
return "", overlayError{}
}
if reflect.TypeOf(resource[0]) != reflect.TypeOf(overlay[0]) {
glog.V(4).Infof("Overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0])
return path, fmt.Errorf("Overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0])
return path, newOverlayError(conditionFailure,
fmt.Sprintf("Overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0]))
}
return checkConditionsOnArrayOfSameTypes(resource, overlay, path)
}
func validateConditionAnchorMap(resourceMap, anchors map[string]interface{}, path string) (string, error) {
func validateConditionAnchorMap(resourceMap, anchors map[string]interface{}, path string) (string, overlayError) {
for key, overlayValue := range anchors {
// skip if key does not have condition anchor
if !anchor.IsConditionAnchor(key) {
@ -91,25 +93,25 @@ func validateConditionAnchorMap(resourceMap, anchors map[string]interface{}, pat
if resourceValue, ok := resourceMap[noAnchorKey]; ok {
// compare entire resourceValue block
// return immediately on err since condition fails on this block
if newPath, err := compareOverlay(resourceValue, overlayValue, curPath); err != nil {
if newPath, err := compareOverlay(resourceValue, overlayValue, curPath); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
} else {
// noAnchorKey doesn't exist in resource
return curPath, fmt.Errorf("resource field %s is not present", noAnchorKey)
return curPath, newOverlayError(conditionNotPresent, fmt.Sprintf("resource field is not present %s", noAnchorKey))
}
}
return "", nil
return "", overlayError{}
}
// compareOverlay compare values in anchormap and resourcemap
// i.e. check if B1 == B2
// overlay - (A): B1
// resource - A: B2
func compareOverlay(resource, overlay interface{}, path string) (string, error) {
func compareOverlay(resource, overlay interface{}, path string) (string, overlayError) {
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
glog.Errorf("Found anchor on different types of element: overlay %T, resource %T\nSkip processing overlay.", overlay, resource)
return path, fmt.Errorf("")
glog.V(4).Infof("Found anchor on different types of element: overlay %T, resource %T\nSkip processing overlay.", overlay, resource)
return path, newOverlayError(conditionFailure, fmt.Sprintf("Found anchor on different types of element: overlay %T, resource %T\nSkip processing overlay.", overlay, resource))
}
switch typedOverlay := overlay.(type) {
@ -120,9 +122,9 @@ func compareOverlay(resource, overlay interface{}, path string) (string, error)
curPath := path + noAnchorKey + "/"
resourceVal, ok := typedResource[noAnchorKey]
if !ok {
return curPath, fmt.Errorf("Field %s is not present", noAnchorKey)
return curPath, newOverlayError(conditionFailure, fmt.Sprintf("field %s is not present", noAnchorKey))
}
if newPath, err := compareOverlay(resourceVal, overlayVal, curPath); err != nil {
if newPath, err := compareOverlay(resourceVal, overlayVal, curPath); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
}
@ -130,7 +132,7 @@ func compareOverlay(resource, overlay interface{}, path string) (string, error)
typedResource := resource.([]interface{})
for _, overlayElement := range typedOverlay {
for _, resourceElement := range typedResource {
if newPath, err := compareOverlay(resourceElement, overlayElement, path); err != nil {
if newPath, err := compareOverlay(resourceElement, overlayElement, path); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
}
@ -138,17 +140,17 @@ func compareOverlay(resource, overlay interface{}, path string) (string, error)
case string, float64, int, int64, bool, nil:
if !ValidateValueWithPattern(resource, overlay) {
glog.V(4).Infof("Mutate rule: failed validating value %v with overlay %v", resource, overlay)
return path, fmt.Errorf("failed validating value %v with overlay %v", resource, overlay)
return path, newOverlayError(conditionFailure, fmt.Sprintf("failed validating value %v with overlay %v", resource, overlay))
}
default:
return path, fmt.Errorf("overlay has unknown type %T, value %v", overlay, overlay)
return path, newOverlayError(conditionFailure, fmt.Sprintf("overlay has unknown type %T, value %v", overlay, overlay))
}
return "", nil
return "", overlayError{}
}
// validateNonAnchorOverlayMap validate anchor condition in overlay block without anchor
func validateNonAnchorOverlayMap(resourceMap, overlayWithoutAnchor map[string]interface{}, path string) (string, error) {
func validateNonAnchorOverlayMap(resourceMap, overlayWithoutAnchor map[string]interface{}, path string) (string, overlayError) {
// validate resource map (anchors could exist in resource)
for key, overlayValue := range overlayWithoutAnchor {
curPath := path + key + "/"
@ -160,14 +162,14 @@ func validateNonAnchorOverlayMap(resourceMap, overlayWithoutAnchor map[string]in
// the above case should be allowed
continue
}
if newPath, err := checkConditions(resourceValue, overlayValue, curPath); err != nil {
if newPath, err := checkConditions(resourceValue, overlayValue, curPath); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
}
return "", nil
return "", overlayError{}
}
func checkConditionsOnArrayOfSameTypes(resource, overlay []interface{}, path string) (string, error) {
func checkConditionsOnArrayOfSameTypes(resource, overlay []interface{}, path string) (string, overlayError) {
switch overlay[0].(type) {
case map[string]interface{}:
return checkConditionsOnArrayOfMaps(resource, overlay, path)
@ -175,17 +177,17 @@ func checkConditionsOnArrayOfSameTypes(resource, overlay []interface{}, path str
for i, overlayElement := range overlay {
curPath := path + strconv.Itoa(i) + "/"
path, err := checkConditions(resource[i], overlayElement, curPath)
if err != nil {
if !reflect.DeepEqual(err, overlayError{}) {
return path, err
}
}
}
return "", nil
return "", overlayError{}
}
func checkConditionsOnArrayOfMaps(resource, overlay []interface{}, path string) (string, error) {
func checkConditionsOnArrayOfMaps(resource, overlay []interface{}, path string) (string, overlayError) {
var newPath string
var err error
var err overlayError
for i, overlayElement := range overlay {
for _, resourceMap := range resource {
@ -194,8 +196,8 @@ func checkConditionsOnArrayOfMaps(resource, overlay []interface{}, path string)
// when resource has multiple same blocks of the overlay block
// return true if there is one resource block meet the overlay pattern
// reference: TestMeetConditions_AtleastOneExist
if err == nil {
return "", nil
if reflect.DeepEqual(err, overlayError{}) {
return "", overlayError{}
}
}
}

View file

@ -2,6 +2,7 @@ package engine
import (
"encoding/json"
"reflect"
"strings"
"testing"
@ -28,7 +29,7 @@ func TestMeetConditions_NoAnchor(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(nil, overlay)
assert.Assert(t, err == nil)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
func TestMeetConditions_conditionalAnchorOnMap(t *testing.T) {
@ -81,7 +82,7 @@ func TestMeetConditions_conditionalAnchorOnMap(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.Assert(t, err != nil)
assert.Assert(t, !reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`
{
@ -100,8 +101,8 @@ func TestMeetConditions_conditionalAnchorOnMap(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
assert.NilError(t, err)
_, overlayerr := meetConditions(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
}
func TestMeetConditions_DifferentTypes(t *testing.T) {
@ -193,7 +194,7 @@ func TestMeetConditions_anchosInSameObject(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.Error(t, err, "failed validating value 443 with overlay 444")
assert.Error(t, err, "[overlayError:0] failed validating value 443 with overlay 444")
}
func TestMeetConditions_anchorOnPeer(t *testing.T) {
@ -251,7 +252,7 @@ func TestMeetConditions_anchorOnPeer(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
func TestMeetConditions_anchorsOnMetaAndSpec(t *testing.T) {
@ -328,7 +329,7 @@ func TestMeetConditions_anchorsOnMetaAndSpec(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
var resourceRawAnchorOnPeers = []byte(`{
@ -409,7 +410,7 @@ func TestMeetConditions_anchorsOnPeer_single(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
@ -443,7 +444,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.Error(t, err, "failed validating value true with overlay false")
assert.Error(t, err, "[overlayError:0] failed validating value true with overlay false")
overlayRaw = []byte(`{
"spec": {
@ -472,7 +473,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`{
"spec": {
@ -501,7 +502,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
@ -535,7 +536,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`{
"spec": {
@ -564,7 +565,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`{
"spec": {
@ -593,7 +594,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
assert.Error(t, err, "failed validating value ENV_VALUE with overlay ENV_VALUE1")
assert.Error(t, err, "[overlayError:0] failed validating value ENV_VALUE with overlay ENV_VALUE1")
}
func TestMeetConditions_AtleastOneExist(t *testing.T) {
@ -652,6 +653,6 @@ func TestMeetConditions_AtleastOneExist(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
path, err := meetConditions(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
assert.Assert(t, len(path) == 0)
}

View file

@ -0,0 +1,36 @@
package engine
import "fmt"
type codeKey int
const (
conditionFailure codeKey = iota
conditionNotPresent
overlayFailure
)
type overlayError struct {
statusCode codeKey
errorMsg string
}
// newOverlayError returns an overlay error using the statusCode and errorMsg
func newOverlayError(code codeKey, msg string) overlayError {
return overlayError{statusCode: code, errorMsg: msg}
}
// StatusCode returns the codeKey wrapped with status code of the overlay error
func (e overlayError) StatusCode() codeKey {
return e.statusCode
}
// ErrorMsg returns the string representation of the overlay error message
func (e overlayError) ErrorMsg() string {
return e.errorMsg
}
// Error returns the string representation of the overlay error
func (e overlayError) Error() string {
return fmt.Sprintf("[overlayError:%v] %v", e.statusCode, e.errorMsg)
}

View file

@ -1,25 +0,0 @@
package engine
import (
"encoding/json"
"strings"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
)
func patchOverlay(rule kyverno.Rule, rawResource []byte) ([][]byte, error) {
var resource interface{}
if err := json.Unmarshal(rawResource, &resource); err != nil {
return nil, err
}
//TODO: evaluate, Unmarshall called thrice
resourceInfo := ParseResourceInfoFromObject(rawResource)
patches, err := processOverlayPatches(resource, rule.Mutation.Overlay)
if err != nil && strings.Contains(err.Error(), "Conditions are not met") {
glog.Infof("Resource does not meet conditions in overlay pattern, resource=%s, rule=%s\n", resourceInfo, rule.Name)
return nil, nil
}
return patches, err
}

View file

@ -65,8 +65,8 @@ func TestProcessOverlayPatches_NestedListWithAnchor(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, patches != nil)
patch := JoinPatches(patches)
@ -165,8 +165,8 @@ func TestProcessOverlayPatches_InsertIntoArray(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, patches != nil)
patch := JoinPatches(patches)
@ -286,8 +286,8 @@ func TestProcessOverlayPatches_TestInsertToArray(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, patches != nil)
patch := JoinPatches(patches)
@ -369,8 +369,8 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err := ApplyPatches(resourceRaw, patches)
@ -458,7 +458,7 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err = ApplyPatches(resourceRaw, patches)
@ -494,7 +494,7 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
assert.Error(t, err, "Conditions are not met at /spec/template/metadata/labels/app/, failed validating value nginx with overlay nginx1")
assert.Error(t, err, "[overlayError:0] Conditions are not met at /spec/template/metadata/labels/app/, [overlayError:0] failed validating value nginx with overlay nginx1")
assert.Assert(t, len(patches) == 0)
}
@ -522,8 +522,8 @@ func TestProcessOverlayPatches_AddingAnchor(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err := ApplyPatches(resourceRaw, patches)
@ -607,8 +607,8 @@ func TestProcessOverlayPatches_AddingAnchorInsideListElement(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err := ApplyPatches(resourceRaw, patches)
@ -686,7 +686,7 @@ func TestProcessOverlayPatches_AddingAnchorInsideListElement(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
assert.NilError(t, err)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err = ApplyPatches(resourceRaw, patches)
@ -749,8 +749,8 @@ func TestProcessOverlayPatches_anchorOnPeer(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err := ApplyPatches(resourceRaw, patches)
@ -807,7 +807,7 @@ func TestProcessOverlayPatches_anchorOnPeer(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
assert.Error(t, err, "Conditions are not met at /subsets/0/ports/0/port/, failed validating value 443 with overlay 444")
assert.Error(t, err, "[overlayError:0] Conditions are not met at /subsets/0/ports/0/port/, [overlayError:0] failed validating value 443 with overlay 444")
assert.Assert(t, len(patches) == 0)
}
@ -888,8 +888,8 @@ func TestProcessOverlayPatches_insertWithCondition(t *testing.T) {
json.Unmarshal(resourceRawAnchorOnPeers, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err := ApplyPatches(resourceRaw, patches)
@ -999,8 +999,8 @@ func TestProcessOverlayPatches_InsertIfNotPresentWithConditions(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, err := processOverlayPatches(resource, overlay)
assert.NilError(t, err)
patches, overlayerr := processOverlayPatches(resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
doc, err := ApplyPatches(resourceRaw, patches)

View file

@ -90,13 +90,14 @@ func validatePatterns(resource unstructured.Unstructured, rule kyverno.Rule) (re
// rule application failed
glog.V(4).Infof("Validation rule '%s' failed at '%s' for resource %s/%s/%s. %s: %v", rule.Name, path, resource.GetKind(), resource.GetNamespace(), resource.GetName(), rule.Validation.Message, err)
response.Success = false
response.Message = fmt.Sprintf("Validation rule '%s' failed at '%s' for resource %s/%s/%s. %s.", rule.Name, path, resource.GetKind(), resource.GetNamespace(), resource.GetName(), rule.Validation.Message)
response.Message = fmt.Sprintf("Validation error: %s\nValidation rule '%s' failed at path '%s'.",
rule.Validation.Message, rule.Name, path)
return response
}
// rule application succesful
glog.V(4).Infof("rule %s pattern validated succesfully on resource %s/%s/%s", rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName())
response.Success = true
response.Message = fmt.Sprintf("Validation rule '%s' succesfully validated", rule.Name)
response.Message = fmt.Sprintf("Validation rule '%s' succeeded.", rule.Name)
return response
}
@ -110,11 +111,12 @@ func validatePatterns(resource unstructured.Unstructured, rule kyverno.Rule) (re
// this pattern was succesfully validated
glog.V(4).Infof("anyPattern %v succesfully validated on resource %s/%s/%s", pattern, resource.GetKind(), resource.GetNamespace(), resource.GetName())
response.Success = true
response.Message = fmt.Sprintf("Validation rule '%s' anyPattern[%d] succesfully validated", rule.Name, index)
response.Message = fmt.Sprintf("Validation rule '%s' anyPattern[%d] succeeded.", rule.Name, index)
return response
}
if err != nil {
glog.V(4).Infof("anyPattern %v, failed to validate on resource %s/%s/%s at path %s: %v", pattern, resource.GetKind(), resource.GetNamespace(), resource.GetName(), path, err)
glog.V(4).Infof("Validation error: %s\nValidation rule %s anyPattern[%d] failed at path %s for %s/%s/%s",
rule.Validation.Message, rule.Name, index, path, resource.GetKind(), resource.GetNamespace(), resource.GetName())
errs = append(errs, err)
failedPaths = append(failedPaths, path)
}
@ -125,13 +127,13 @@ func validatePatterns(resource unstructured.Unstructured, rule kyverno.Rule) (re
response.Success = false
response.Success = false
var errorStr []string
errorStr = append(errorStr, fmt.Sprintf("Validation rule '%s' failed to validate patterns defined in anyPattern. %s.", rule.Name, rule.Validation.Message))
for index, err := range errs {
glog.V(4).Infof("anyPattern[%d] failed at path %s: %v", index, failedPaths[index], err)
str := fmt.Sprintf("anyPattern[%d] failed at path %s", index, failedPaths[index])
str := fmt.Sprintf("Validation rule %s anyPattern[%d] failed at path %s.", rule.Name, index, failedPaths[index])
errorStr = append(errorStr, str)
}
response.Message = strings.Join(errorStr, "; ")
response.Message = fmt.Sprintf("Validation error: %s\n%s", rule.Validation.Message, strings.Join(errorStr, "\n"))
return response
}
}

View file

@ -1818,8 +1818,8 @@ func TestValidate_image_tag_fail(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
msgs := []string{
"Validation rule 'validate-tag' succesfully validated",
"Validation rule 'validate-latest' failed at '/spec/containers/0/imagePullPolicy/' for resource Pod//myapp-pod. imagePullPolicy 'Always' required with tag 'latest'.",
"Validation rule 'validate-tag' succeeded.",
"Validation error: imagePullPolicy 'Always' required with tag 'latest'\nValidation rule 'validate-latest' failed at path '/spec/containers/0/imagePullPolicy/'.",
}
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
for index, r := range er.PolicyResponse.Rules {
@ -1916,8 +1916,8 @@ func TestValidate_image_tag_pass(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
msgs := []string{
"Validation rule 'validate-tag' succesfully validated",
"Validation rule 'validate-latest' succesfully validated",
"Validation rule 'validate-tag' succeeded.",
"Validation rule 'validate-latest' succeeded.",
}
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
for index, r := range er.PolicyResponse.Rules {
@ -1993,7 +1993,7 @@ func TestValidate_Fail_anyPattern(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'check-default-namespace' failed to validate patterns defined in anyPattern. A namespace is required.; anyPattern[0] failed at path /metadata/namespace/; anyPattern[1] failed at path /metadata/namespace/"}
msgs := []string{"Validation error: A namespace is required\nValidation rule check-default-namespace anyPattern[0] failed at path /metadata/namespace/.\nValidation rule check-default-namespace anyPattern[1] failed at path /metadata/namespace/."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
}
@ -2074,7 +2074,7 @@ func TestValidate_host_network_port(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'validate-host-network-port' failed at '/spec/containers/0/ports/0/hostPort/' for resource Pod//nginx-host-network. Host network and port are not allowed."}
msgs := []string{"Validation error: Host network and port are not allowed\nValidation rule 'validate-host-network-port' failed at path '/spec/containers/0/ports/0/hostPort/'."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2163,7 +2163,7 @@ func TestValidate_anchor_arraymap_pass(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'validate-host-path' succesfully validated"}
msgs := []string{"Validation rule 'validate-host-path' succeeded."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2251,7 +2251,7 @@ func TestValidate_anchor_arraymap_fail(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'validate-host-path' failed at '/spec/volumes/0/hostPath/path/' for resource Pod//image-with-hostpath. Host path '/var/lib/' is not allowed."}
msgs := []string{"Validation error: Host path '/var/lib/' is not allowed\nValidation rule 'validate-host-path' failed at path '/spec/volumes/0/hostPath/path/'."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2320,7 +2320,7 @@ func TestValidate_anchor_map_notfound(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'pod rule 2' succesfully validated"}
msgs := []string{"Validation rule 'pod rule 2' succeeded."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2392,7 +2392,7 @@ func TestValidate_anchor_map_found_valid(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'pod rule 2' succesfully validated"}
msgs := []string{"Validation rule 'pod rule 2' succeeded."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2464,7 +2464,7 @@ func TestValidate_anchor_map_found_invalid(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'pod rule 2' failed at '/spec/securityContext/runAsNonRoot/' for resource Pod//myapp-pod. pod: validate run as non root user."}
msgs := []string{"Validation error: pod: validate run as non root user\nValidation rule 'pod rule 2' failed at path '/spec/securityContext/runAsNonRoot/'."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2538,7 +2538,7 @@ func TestValidate_AnchorList_pass(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'pod image rule' succesfully validated"}
msgs := []string{"Validation rule 'pod image rule' succeeded."}
for index, r := range er.PolicyResponse.Rules {
t.Log(r.Message)
@ -2761,7 +2761,7 @@ func TestValidate_existenceAnchor_pass(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'pod image rule' succesfully validated"}
msgs := []string{"Validation rule 'pod image rule' succeeded."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2848,7 +2848,7 @@ func TestValidate_negationAnchor_deny(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'validate-host-path' failed at '/spec/volumes/0/hostPath/' for resource Pod//image-with-hostpath. Host path is not allowed."}
msgs := []string{"Validation error: Host path is not allowed\nValidation rule 'validate-host-path' failed at path '/spec/volumes/0/hostPath/'."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])
@ -2934,7 +2934,7 @@ func TestValidate_negationAnchor_pass(t *testing.T) {
resourceUnstructured, err := ConvertToUnstructured(rawResource)
assert.NilError(t, err)
er := Validate(PolicyContext{Policy: policy, Resource: *resourceUnstructured})
msgs := []string{"Validation rule 'validate-host-path' succesfully validated"}
msgs := []string{"Validation rule 'validate-host-path' succeeded."}
for index, r := range er.PolicyResponse.Rules {
assert.Equal(t, r.Message, msgs[index])

127
pkg/policy/cleanup.go Normal file
View file

@ -0,0 +1,127 @@
package policy
import (
"fmt"
"reflect"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/policyviolation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
func (pc *PolicyController) cleanUpPolicyViolation(pResponse engine.PolicyResponse) {
// 1- check if there is violation on resource (label:Selector)
// 2- check if there is violation on owner
// - recursively get owner by queries the api server for owner information of the resource
// there can be multiple violations as a resource can have multiple owners
pvs, err := getPv(pc.pvLister, pc.client, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Namespace, pResponse.Resource.Name)
if err != nil {
glog.Errorf("failed to cleanUp violations: %v", err)
}
for _, pv := range pvs {
if reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) {
continue
}
glog.V(4).Infof("cleanup violations %s, on %s/%s/%s", pv.Name, pv.Spec.Kind, pv.Spec.Namespace, pv.Spec.Name)
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {
glog.Errorf("failed to delete policy violation: %v", err)
continue
}
}
}
func getPv(pvLister kyvernolister.ClusterPolicyViolationLister, client *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.ClusterPolicyViolation, error) {
var pvs []kyverno.ClusterPolicyViolation
var err error
// Check Violation on resource
pv, err := getPVOnResource(pvLister, policyName, kind, namespace, name)
if err != nil {
glog.V(4).Infof("error while fetching pv: %v", err)
return []kyverno.ClusterPolicyViolation{}, err
}
if !reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) {
// found a pv on resource
pvs = append(pvs, pv)
return pvs, nil
}
// Check Violations on owner
pvs, err = getPVonOwnerRef(pvLister, client, policyName, kind, namespace, name)
if err != nil {
glog.V(4).Infof("error while fetching pv: %v", err)
return []kyverno.ClusterPolicyViolation{}, err
}
return pvs, nil
}
func getPVonOwnerRef(pvLister kyvernolister.ClusterPolicyViolationLister, dclient *dclient.Client, policyName, kind, namespace, name string) ([]kyverno.ClusterPolicyViolation, error) {
var pvs []kyverno.ClusterPolicyViolation
// get resource
resource, err := dclient.GetResource(kind, namespace, name)
if err != nil {
glog.V(4).Infof("error while fetching the resource: %v", err)
return pvs, err
}
// get owners
// getOwners returns nil if there is any error
owners := policyviolation.GetOwners(dclient, *resource)
// as we can have multiple top level owners to a resource
// check if pv exists on each one
// does not check for cycles
for _, owner := range owners {
pv, err := getPVOnResource(pvLister, policyName, owner.Kind, owner.Namespace, owner.Name)
if err != nil {
glog.Errorf("error while fetching resource owners: %v", err)
continue
}
pvs = append(pvs, pv)
}
return pvs, nil
}
// Wont do the claiming of objects, just lookup based on selectors and owner references
func getPVOnResource(pvLister kyvernolister.ClusterPolicyViolationLister, policyName, kind, namespace, name string) (kyverno.ClusterPolicyViolation, error) {
resourceKey := kyverno.BuildResourceKey(kind, namespace, name)
labelMap := map[string]string{"policy": policyName, "resource": resourceKey}
pvSelector, err := converLabelToSelector(labelMap)
if err != nil {
glog.Errorf("failed to generate label sector for policy %s and resourcr %s", policyName, resourceKey)
return kyverno.ClusterPolicyViolation{}, fmt.Errorf("failed to generate label sector for policy %s and resourcr %s", policyName, resourceKey)
}
pvs, err := pvLister.List(pvSelector)
if err != nil {
glog.Errorf("unable to list policy violations with label selector %v: %v", pvSelector, err)
return kyverno.ClusterPolicyViolation{}, err
}
if len(pvs) > 1 {
glog.Errorf("more than one policy violation exists with labels %v", labelMap)
return kyverno.ClusterPolicyViolation{}, fmt.Errorf("more than one policy violation exists with labels %v", labelMap)
}
if len(pvs) == 0 {
glog.V(4).Infof("policy violation does not exist with labels %v", labelMap)
return kyverno.ClusterPolicyViolation{}, nil
}
// return a copy of pv
return *pvs[0], nil
}
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
return policyViolationSelector, nil
}

View file

@ -387,7 +387,6 @@ func (pc *PolicyController) processNextWorkItem() bool {
return false
}
defer pc.queue.Done(key)
err := pc.syncHandler(key.(string))
pc.handleErr(err, key)
@ -451,11 +450,10 @@ func (pc *PolicyController) syncPolicy(key string) error {
return err
}
// process policies on existing resources
policyInfos := pc.processExistingResources(*p)
engineResponses := pc.processExistingResources(*p)
// report errors
pc.report(policyInfos)
pc.cleanupAndReport(engineResponses)
// fetch the policy again via the aggreagator to remain consistent
// return pc.statusAggregator.UpdateViolationCount(p.Name, pvList)
return pc.syncStatusOnly(p, pvList)
}

View file

@ -9,20 +9,24 @@ import (
"github.com/nirmata/kyverno/pkg/policyviolation"
)
func (pc *PolicyController) report(engineResponses []engine.EngineResponse) {
// generate events
// generate policy violations
for _, policyInfo := range engineResponses {
// events
// success - policy applied on resource
// failure - policy/rule failed to apply on the resource
reportEvents(policyInfo, pc.eventGen)
// policy violations
// failure - policy/rule failed to apply on the resource
// for each policy-resource response
// - has violation -> report
// - no violation -> cleanup policy violations(resource or resource owner)
func (pc *PolicyController) cleanupAndReport(engineResponses []engine.EngineResponse) {
for _, eResponse := range engineResponses {
if !eResponse.IsSuccesful() {
// failure - policy/rule failed to apply on the resource
reportEvents(eResponse, pc.eventGen)
// generate policy violation
// Only created on resource, not resource owners
policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses)
} else {
// cleanup existing violations if any
// if there is any error in clean up, we dont re-queue the resource
// it will be re-tried in the next controller cache resync
pc.cleanUpPolicyViolation(eResponse.PolicyResponse)
}
}
// generate policy violation
policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses)
}
//reportEvents generates events for the failed resources

View file

@ -269,7 +269,7 @@ func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.Cluster
for _, resource := range resources.Items {
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
owners := getOwners(pvc.client, resource)
owners := GetOwners(pvc.client, resource)
// owner of resource matches violation resourceSpec
// remove policy violation as the blocked request got created
if containsOwner(owners, curPv) {

View file

@ -133,7 +133,7 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []
violatedRules := newViolatedRules(er, msg)
// create violation on resource owner (if exist) when action is set to enforce
owners := getOwners(dclient, er.PatchedResource)
owners := GetOwners(dclient, er.PatchedResource)
// standaloneresource, set pvResourceSpec with resource itself
if len(owners) == 0 {
@ -146,13 +146,7 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []
}
for _, owner := range owners {
// resource has owner, set pvResourceSpec with owner info
pvResourceSpec := kyverno.ResourceSpec{
Namespace: owner.namespace,
Kind: owner.kind,
Name: owner.name,
}
pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules))
pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, owner, violatedRules))
}
return
}
@ -208,32 +202,18 @@ func converLabelToSelector(labelMap map[string]string) (labels.Selector, error)
return policyViolationSelector, nil
}
type pvResourceOwner struct {
kind string
namespace string
name string
}
func (o pvResourceOwner) toKey() string {
if o.namespace == "" {
return o.kind + "." + o.name
}
return o.kind + "." + o.namespace + "." + o.name
}
// pass in unstr rather than using the client to get the unstr
//GetOwners pass in unstr rather than using the client to get the unstr
// as if name is empty then GetResource panic as it returns a list
func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvResourceOwner {
func GetOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []kyverno.ResourceSpec {
resourceOwners := unstr.GetOwnerReferences()
if len(resourceOwners) == 0 {
return []pvResourceOwner{pvResourceOwner{
kind: unstr.GetKind(),
namespace: unstr.GetNamespace(),
name: unstr.GetName(),
return []kyverno.ResourceSpec{kyverno.ResourceSpec{
Kind: unstr.GetKind(),
Namespace: unstr.GetNamespace(),
Name: unstr.GetName(),
}}
}
var owners []pvResourceOwner
var owners []kyverno.ResourceSpec
for _, resourceOwner := range resourceOwners {
unstrParent, err := dclient.GetResource(resourceOwner.Kind, unstr.GetNamespace(), resourceOwner.Name)
if err != nil {
@ -241,7 +221,7 @@ func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvRes
return nil
}
owners = append(owners, getOwners(dclient, *unstrParent)...)
owners = append(owners, GetOwners(dclient, *unstrParent)...)
}
return owners
}
@ -274,11 +254,11 @@ func newViolatedRules(er engine.EngineResponse, msg string) (violatedRules []kyv
return
}
func containsOwner(owners []pvResourceOwner, pv *kyverno.ClusterPolicyViolation) bool {
curOwner := pvResourceOwner{
kind: pv.Spec.ResourceSpec.Kind,
name: pv.Spec.ResourceSpec.Name,
namespace: pv.Spec.ResourceSpec.Namespace,
func containsOwner(owners []kyverno.ResourceSpec, pv *kyverno.ClusterPolicyViolation) bool {
curOwner := kyverno.ResourceSpec{
Kind: pv.Spec.ResourceSpec.Kind,
Namespace: pv.Spec.ResourceSpec.Namespace,
Name: pv.Spec.ResourceSpec.Name,
}
for _, targetOwner := range owners {
@ -301,26 +281,26 @@ func validDependantForDeployment(client appsv1.AppsV1Interface, curPv kyverno.Cl
return false
}
owner := pvResourceOwner{
kind: curPv.Spec.ResourceSpec.Kind,
namespace: curPv.Spec.ResourceSpec.Namespace,
name: curPv.Spec.ResourceSpec.Name,
owner := kyverno.ResourceSpec{
Kind: curPv.Spec.ResourceSpec.Kind,
Namespace: curPv.Spec.ResourceSpec.Namespace,
Name: curPv.Spec.ResourceSpec.Name,
}
deploy, err := client.Deployments(owner.namespace).Get(owner.name, metav1.GetOptions{})
deploy, err := client.Deployments(owner.Namespace).Get(owner.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err)
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err)
return false
}
expectReplicaset, err := deployutil.GetNewReplicaSet(deploy, client)
if err != nil {
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err)
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.Kind, owner.Namespace, owner.Name, err)
return false
}
if reflect.DeepEqual(expectReplicaset, v1.ReplicaSet{}) {
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.namespace, owner.kind, owner.name)
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.Namespace, owner.Kind, owner.Name)
return false
}
var actualReplicaset *v1.ReplicaSet

View file

@ -3,6 +3,7 @@ package testrunner
import (
"bytes"
"encoding/json"
"flag"
"io/ioutil"
"os"
ospath "path"
@ -133,25 +134,21 @@ func runScenario(t *testing.T, s *scenarioT) bool {
}
func runTestCase(t *testing.T, tc scaseT) bool {
// apply policy
// convert policy -> kyverno.Policy
policy := loadPolicy(t, tc.Input.Policy)
if policy == nil {
t.Error("Policy no loaded")
t.Error("Policy not loaded")
t.FailNow()
}
// convert resource -> unstructured.Unstructured
resource := loadPolicyResource(t, tc.Input.Resource)
if resource == nil {
t.Error("Resources no loaded")
t.Error("Resources not loaded")
t.FailNow()
}
var er engine.EngineResponse
// Mutation
er = engine.Mutate(*policy, *resource)
// validate te response
er = engine.Mutate(engine.PolicyContext{Policy: *policy, Resource: *resource})
t.Log("---Mutation---")
validateResource(t, er.PatchedResource, tc.Expected.Mutation.PatchedResource)
validateResponse(t, er.PolicyResponse, tc.Expected.Mutation.PolicyResponse)
@ -161,9 +158,7 @@ func runTestCase(t *testing.T, tc scaseT) bool {
resource = &er.PatchedResource
}
// Validation
er = engine.Validate(*policy, *resource)
// validate the response
er = engine.Validate(engine.PolicyContext{Policy: *policy, Resource: *resource})
t.Log("---Validation---")
validateResponse(t, er.PolicyResponse, tc.Expected.Validation.PolicyResponse)
@ -201,8 +196,8 @@ func validateGeneratedResources(t *testing.T, client *client.Client, policy kyve
}
func validateResource(t *testing.T, responseResource unstructured.Unstructured, expectedResourceFile string) {
resourcePrint := func(obj unstructured.Unstructured) {
t.Log("-----patched resource----")
resourcePrint := func(obj unstructured.Unstructured, msg string) {
t.Logf("-----%s----", msg)
if data, err := obj.MarshalJSON(); err == nil {
t.Log(string(data))
}
@ -218,8 +213,8 @@ func validateResource(t *testing.T, responseResource unstructured.Unstructured,
return
}
resourcePrint(responseResource)
resourcePrint(*expectedResource)
resourcePrint(responseResource, "response resource")
resourcePrint(*expectedResource, "expected resource")
// compare the resources
if !reflect.DeepEqual(responseResource, *expectedResource) {
t.Error("failed: response resource returned does not match expected resource")
@ -291,7 +286,8 @@ func compareRules(t *testing.T, rule engine.RuleResponse, expectedRule engine.Ru
t.Errorf("rule type: expected %s, recieved %s", expectedRule.Type, rule.Type)
}
// message
if rule.Message != expectedRule.Message {
// compare messages if expected rule message is not empty
if expectedRule.Message != "" && rule.Message != expectedRule.Message {
t.Errorf("rule message: expected %s, recieved %s", expectedRule.Message, rule.Message)
}
// //TODO patches
@ -442,6 +438,9 @@ func loadPolicy(t *testing.T, path string) *kyverno.ClusterPolicy {
}
func testScenario(t *testing.T, path string) {
flag.Set("logtostderr", "true")
// flag.Set("v", "8")
scenario, err := loadScenario(t, path)
if err != nil {
t.Error(err)

View file

@ -22,10 +22,6 @@ func Test_validate_healthChecks(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_validate_healthChecks.yaml")
}
func Test_validate_nonRootUsers(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/scenario_validate_nonRootUser.yaml")
}
func Test_generate_networkPolicy(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/scenario_generate_networkPolicy.yaml")
}
@ -127,6 +123,18 @@ func Test_validate_disallow_helm_tiller(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_helm_tiller.yaml")
}
func Test_mutate_add_safe_to_evict_annotation(t *testing.T) {
func Test_add_safe_to_evict_annotation(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_mutate_safe-to-evict.yaml")
}
func Test_add_safe_to_evict_annotation2(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_mutate_safe-to-evict2.yaml")
}
func Test_known_ingress(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_known_ingress_class.yaml")
}
func Test_unknown_ingress(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_unknown_ingress_class.yaml")
}

View file

@ -0,0 +1,78 @@
package webhookconfig
import (
"fmt"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
admregapi "k8s.io/api/admissionregistration/v1beta1"
errorsapi "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (wrc *WebhookRegistrationClient) constructVerifyMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
return &admregapi.MutatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{
Name: config.VerifyMutatingWebhookConfigurationName,
OwnerReferences: []v1.OwnerReference{
wrc.constructOwner(),
},
},
Webhooks: []admregapi.Webhook{
generateWebhook(
config.VerifyMutatingWebhookName,
config.VerifyMutatingWebhookServicePath,
caData,
true,
wrc.timeoutSeconds,
"deployments/*",
"apps",
"v1",
[]admregapi.OperationType{admregapi.Update},
),
},
}
}
func (wrc *WebhookRegistrationClient) constructDebugVerifyMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.VerifyMutatingWebhookServicePath)
glog.V(4).Infof("Debug VerifyMutatingWebhookConfig is registered with url %s\n", url)
return &admregapi.MutatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{
Name: config.VerifyMutatingWebhookConfigurationDebugName,
},
Webhooks: []admregapi.Webhook{
generateDebugWebhook(
config.VerifyMutatingWebhookName,
url,
caData,
true,
wrc.timeoutSeconds,
"deployments/*",
"apps",
"v1",
[]admregapi.OperationType{admregapi.Update},
),
},
}
}
func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig() {
// Muating webhook configuration
var err error
var mutatingConfig string
if wrc.serverIP != "" {
mutatingConfig = config.VerifyMutatingWebhookConfigurationDebugName
} else {
mutatingConfig = config.VerifyMutatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("verify webhook configuration %s, does not exits. not deleting", mutatingConfig)
} else if err != nil {
glog.Errorf("failed to delete verify webhook configuration %s: %v", mutatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted verify webhook configuration %s", mutatingConfig)
}
}

View file

@ -6,6 +6,7 @@ import (
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
admregapi "k8s.io/api/admissionregistration/v1beta1"
errorsapi "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
@ -104,3 +105,43 @@ func (wrc *WebhookRegistrationClient) contructDebugPolicyMutatingWebhookConfig(c
},
}
}
// removePolicyWebhookConfigurations removes mutating and validating webhook configurations, if already presnt
// webhookConfigurations are re-created later
func (wrc *WebhookRegistrationClient) removePolicyWebhookConfigurations() {
// Validating webhook configuration
var err error
var validatingConfig string
if wrc.serverIP != "" {
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
} else {
validatingConfig = config.PolicyValidatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", validatingConfig)
}
// Mutating webhook configuration
var mutatingConfig string
if wrc.serverIP != "" {
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
} else {
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", mutatingConfig)
}
}

View file

@ -64,6 +64,13 @@ func (wrc *WebhookRegistrationClient) Register() error {
if err := wrc.createPolicyMutatingWebhookConfiguration(); err != nil {
return err
}
// create Verify mutating webhook configuration resource
// that is used to check if admission control is enabled or not
if err := wrc.createVerifyMutatingWebhookConfiguration(); err != nil {
return err
}
return nil
}
@ -184,6 +191,36 @@ func (wrc *WebhookRegistrationClient) createPolicyMutatingWebhookConfiguration()
return nil
}
func (wrc *WebhookRegistrationClient) createVerifyMutatingWebhookConfiguration() error {
var caData []byte
var config *admregapi.MutatingWebhookConfiguration
// read CA data from
// 1) secret(config)
// 2) kubeconfig
if caData = wrc.readCaData(); caData == nil {
return errors.New("Unable to extract CA data from configuration")
}
// if serverIP is specified we assume its debug mode
if wrc.serverIP != "" {
// debug mode
// clientConfig - URL
config = wrc.constructDebugVerifyMutatingWebhookConfig(caData)
} else {
// clientConfig - service
config = wrc.constructVerifyMutatingWebhookConfig(caData)
}
// create mutating webhook configuration resource
if _, err := wrc.registrationClient.MutatingWebhookConfigurations().Create(config); err != nil {
return err
}
glog.V(4).Infof("created Mutating Webhook Configuration %s ", config.Name)
return nil
}
// DeregisterAll deletes webhook configs from cluster
// This function does not fail on error:
// Register will fail if the config exists, so there is no need to fail on error
@ -198,44 +235,7 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
// mutating and validating webhook configurtion for Policy CRD resource
wrc.removePolicyWebhookConfigurations()
}
// removePolicyWebhookConfigurations removes mutating and validating webhook configurations, if already presnt
// webhookConfigurations are re-created later
func (wrc *WebhookRegistrationClient) removePolicyWebhookConfigurations() {
// Validating webhook configuration
var err error
var validatingConfig string
if wrc.serverIP != "" {
validatingConfig = config.PolicyValidatingWebhookConfigurationDebugName
} else {
validatingConfig = config.PolicyValidatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
err = wrc.registrationClient.ValidatingWebhookConfigurations().Delete(validatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", validatingConfig)
}
// Mutating webhook configuration
var mutatingConfig string
if wrc.serverIP != "" {
mutatingConfig = config.PolicyMutatingWebhookConfigurationDebugName
} else {
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
err = wrc.registrationClient.MutatingWebhookConfigurations().Delete(mutatingConfig, &v1.DeleteOptions{})
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
} else {
glog.V(4).Infof("succesfully deleted policy webhook configuration %s", mutatingConfig)
}
// muating webhook configuration use to verify if admission control flow is working or not
wrc.removeVerifyWebhookMutatingWebhookConfig()
}

View file

@ -3,10 +3,9 @@ package webhooks
import (
"encoding/json"
"github.com/nirmata/kyverno/pkg/engine"
jsonpatch "github.com/evanphx/json-patch"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine"
)
const (

14
pkg/webhooks/checker.go Normal file
View file

@ -0,0 +1,14 @@
package webhooks
import (
"github.com/golang/glog"
"k8s.io/api/admission/v1beta1"
)
func (ws *WebhookServer) handleVerifyRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
glog.V(4).Infof("Receive request in mutating webhook '/verify': Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation)
return &v1beta1.AdmissionResponse{
Allowed: true,
}
}

View file

@ -27,7 +27,6 @@ func (ws *WebhookServer) handlePolicyValidation(request *v1beta1.AdmissionReques
Message: fmt.Sprintf("Failed to unmarshal policy admission request err %v", err),
}}
}
if err := policyvalidate.Validate(*policy); err != nil {
admissionResp = &v1beta1.AdmissionResponse{
Allowed: false,

View file

@ -11,6 +11,7 @@ import (
"time"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/checker"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1alpha1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
@ -49,6 +50,8 @@ type WebhookServer struct {
configHandler config.Interface
// channel for cleanup notification
cleanUp chan<- struct{}
// last request time
lastReqTime *checker.LastReqTime
}
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
@ -93,6 +96,7 @@ func NewWebhookServer(
rbLister: rbInformer.Lister(),
crbLister: crbInformer.Lister(),
cleanUp: cleanUp,
lastReqTime: checker.NewLastReqTime(),
}
mux := http.NewServeMux()
mux.HandleFunc(config.MutatingWebhookServicePath, ws.serve)
@ -113,6 +117,9 @@ func NewWebhookServer(
// Main server endpoint for all requests
func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
// for every request recieved on the ep update last request time,
// this is used to verify admission control
ws.lastReqTime.SetTime(time.Now())
admissionReview := ws.bodyToAdmissionReview(r, w)
if admissionReview == nil {
return
@ -124,19 +131,24 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) {
// Do not process the admission requests for kinds that are in filterKinds for filtering
request := admissionReview.Request
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
// Resource CREATE
// Resource UPDATE
switch r.URL.Path {
case config.MutatingWebhookServicePath:
switch r.URL.Path {
case config.VerifyMutatingWebhookServicePath:
// we do not apply filters as this endpoint is used explicity
// to watch kyveno deployment and verify if admission control is enabled
admissionReview.Response = ws.handleVerifyRequest(request)
case config.MutatingWebhookServicePath:
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
admissionReview.Response = ws.handleAdmissionRequest(request)
case config.PolicyValidatingWebhookServicePath:
}
case config.PolicyValidatingWebhookServicePath:
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
admissionReview.Response = ws.handlePolicyValidation(request)
case config.PolicyMutatingWebhookServicePath:
}
case config.PolicyMutatingWebhookServicePath:
if !ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
admissionReview.Response = ws.handlePolicyMutation(request)
}
}
admissionReview.Response.UID = request.UID
responseJSON, err := json.Marshal(admissionReview)
@ -216,7 +228,7 @@ func (ws *WebhookServer) handleAdmissionRequest(request *v1beta1.AdmissionReques
}
// RunAsync TLS server in separate thread and returns control immediately
func (ws *WebhookServer) RunAsync() {
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
go func(ws *WebhookServer) {
glog.V(3).Infof("serving on %s\n", ws.server.Addr)
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
@ -224,6 +236,11 @@ func (ws *WebhookServer) RunAsync() {
}
}(ws)
glog.Info("Started Webhook Server")
// verifys if the admission control is enabled and active
// resync: 60 seconds
// deadline: 60 seconds (send request)
// max deadline: deadline*3 (set the deployment annotation as false)
go ws.lastReqTime.Run(ws.pLister, ws.client, 60*time.Second, 60*time.Second, stopCh)
}
// Stop TLS server and returns control after the server is shut down

View file

@ -33,8 +33,12 @@ func toBlockResource(engineReponses []engine.EngineResponse) bool {
func getErrorMsg(engineReponses []engine.EngineResponse) string {
var str []string
var resourceInfo string
for _, er := range engineReponses {
if !er.IsSuccesful() {
// resource in engineReponses is identical as this was called per admission request
resourceInfo = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
str = append(str, fmt.Sprintf("failed policy %s", er.PolicyResponse.Policy))
for _, rule := range er.PolicyResponse.Rules {
if !rule.Success {
@ -43,7 +47,7 @@ func getErrorMsg(engineReponses []engine.EngineResponse) string {
}
}
}
return strings.Join(str, "\n")
return fmt.Sprintf("Resource %s: %s", resourceInfo, strings.Join(str, "\n"))
}
//ArrayFlags to store filterkinds

View file

@ -11,7 +11,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
)
// HandleValidation handles validating webhook admission request
// handleValidation handles validating webhook admission request
// If there are no errors in validating rule we apply generation rules
// patchedResource is the (resource + patches) after applying mutation rules
func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest,

View file

@ -12,18 +12,25 @@ To avoid pod container from having visibility to host process space, validate th
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-hostpid-hostipc
name: validate-host-pid-ipc
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: Sharing the host's PID namespace allows visibility of process
on the host, potentially exposing process information. Sharing the host's IPC namespace allows
the container process to communicate with processes on the host. To avoid pod container from
having visibility to host process space, validate that 'hostPID' and 'hostIPC' are set to 'false'.
spec:
validationFailureAction: enforce
rules:
- name: validate-hostpid-hostipc
- name: validate-host-pid-ipc
match:
resources:
kinds:
- Pod
validate:
message: "Disallow use of host's pid namespace and host's ipc namespace"
message: "Use of host PID and IPC namespaces is not allowed"
pattern:
spec:
(hostPID): "!true"
hostIPC: false
=(hostPID): "false"
=(hostIPC): "false"
````

View file

@ -21,10 +21,9 @@ spec:
kinds:
- Pod
validate:
message: "Deny untrusted registries"
message: "Unknown image registry"
pattern:
spec:
containers:
- image: "k8s.gcr.io/* | gcr.io/*"
````

View file

@ -0,0 +1,32 @@
# Require a known ingress class
It can be useful to restrict Ingress resources to use a known ingress class that are allowed in the cluster.
You can customize this policy to allow ingress classes that are configured in the cluster.
## Policy YAML
[known_ingress.yaml](best_practices/known_ingress.yaml)
````yaml
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: known-ingress
annotations:
policies.kyverno.io/category: Ingress
policies.kyverno.io/description:
spec:
rules:
- name: known-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"
````

View file

@ -0,0 +1,51 @@
# Mutate pods with `emptyDir` and `hostPath` with `safe-to-evict`
The Kubernetes cluster autoscaler does not evict pods that use `hostPath` or `emptyDir` volumes. To allow eviction of these pods, the following annotation must be added to the pods:
````yaml
cluster-autoscaler.kubernetes.io/safe-to-evict: true
````
This policy matches and mutates pods with `emptyDir` and `hostPath` volumes, to add the `safe-to-evict` annotation if it is not specified.
## Policy YAML
[add_safe_to_evict_annotation.yaml](best_practices/add_safe-to-evict_annotation.yaml)
````yaml
apiVersion: "kyverno.io/v1alpha1"
kind: "ClusterPolicy"
metadata:
name: "annotate-emptydir-hostpath"
spec:
rules:
- name: "empty-dir-add-safe-to-evict"
match:
resources:
kinds:
- "Pod"
mutate:
overlay:
metadata:
annotations:
+(cluster-autoscaler.kubernetes.io/safe-to-evict): true
spec:
volumes:
- (emptyDir): {}
- name: "host-path-add-safe-to-evict"
match:
resources:
kinds:
- "Pod"
mutate:
overlay:
metadata:
annotations:
+(cluster-autoscaler.kubernetes.io/safe-to-evict): true
spec:
volumes:
- (hostPath):
path: "*"
````

View file

@ -39,7 +39,7 @@ These policies are highly recommended.
1. [Run as non-root user](RunAsNonRootUser.md)
2. [Disable privileged containers and disallow privilege escalation](DisablePrivilegedContainers.md)
3. [Disallow new capabilities](DisallowNewCapabilities.md)
4. [Require Read-only root filesystem](RequireReadOnlyFS.md)
4. [Require read-only root filesystem](RequireReadOnlyFS.md)
5. [Disallow use of bind mounts (`hostPath` volumes)](DisallowHostFS.md)
6. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
7. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
@ -52,15 +52,14 @@ These policies are highly recommended.
14. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
15. [Default deny all ingress traffic](DefaultDenyAllIngress.md)
16. [Disallow Helm Tiller](DisallowHelmTiller.md)
17. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](MutateSafeToEvict.md)
## Additional Policies
The policies provide additional best practices and are worthy of close consideration. These policies may require workload specific changes.
16. [Limit use of `NodePort` services](LimitNodePort.md)
17. [Limit automount of Service Account credentials](DisallowAutomountSACredentials.md)
18. [Configure Linux Capabilities](AssignLinuxCapabilities.md)
19. [Limit Kernel parameter access](ConfigureKernelParmeters.md)
18. [Limit use of `NodePort` services](LimitNodePort.md)
19. [Limit automount of Service Account credentials](DisallowAutomountSACredentials.md)
20. [Configure Linux Capabilities](AssignLinuxCapabilities.md)
21. [Limit Kernel parameter access](ConfigureKernelParmeters.md)
22. [Restrict ingress class](KnownIngressClass.md)

View file

@ -1,13 +1,15 @@
apiVersion: "kyverno.io/v1alpha1"
kind: "ClusterPolicy"
metadata:
name: "annotate-emptydir"
name: "annotate-emptydir-hostpath"
annotations:
policies.kyverno.io/category: AutoScaling
policies.kyverno.io/description:
policies.kyverno.io/description: The Kubernetes cluster autoscaler does not evict pods that
use hostPath or emptyDir volumes. To allow eviction of these pods, the annotation
cluster-autoscaler.kubernetes.io/safe-to-evict=true must be added to the pods.
spec:
rules:
- name: "add-safe-to-evict-annotation"
- name: "empty-dir-add-safe-to-evict"
match:
resources:
kinds:
@ -19,4 +21,18 @@ spec:
+(cluster-autoscaler.kubernetes.io/safe-to-evict): "true"
spec:
volumes:
- (emptyDir): {}
- (emptyDir): {}
- name: "host-path-add-safe-to-evict"
match:
resources:
kinds:
- "Pod"
mutate:
overlay:
metadata:
annotations:
+(cluster-autoscaler.kubernetes.io/safe-to-evict): "true"
spec:
volumes:
- (hostPath):
path: "*"

View file

@ -1,24 +1,33 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-host-network-hostport
name: host-network-port
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: Using 'hostPort' and 'hostNetwork' allows pods to share
the host network stack, allowing potential snooping of network traffic from an application pod.
spec:
rules:
- name: validate-host-network-hostport
- name: validate-host-network
match:
resources:
kinds:
- Pod
validate:
message: "Defining hostNetwork and hostPort are not allowed"
message: "Use of hostNetwork is not allowed"
pattern:
spec:
=(hostNetwork): false
- name: validate-host-port
match:
resources:
kinds:
- Pod
validate:
message: "Use of hostPort is not allowed"
pattern:
spec:
(hostNetwork): false
containers:
- name: "*"
ports:
- hostPort: null
=(ports):
- X(hostPort): null

View file

@ -1,7 +1,7 @@
apiVersion: kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: validate-hostpid-hostipc
name: validate-host-pid-ipc
annotations:
policies.kyverno.io/category: Security
policies.kyverno.io/description: Sharing the host's PID namespace allows visibility of process
@ -9,15 +9,16 @@ metadata:
the container process to communicate with processes on the host. To avoid pod container from
having visibility to host process space, validate that 'hostPID' and 'hostIPC' are set to 'false'.
spec:
validationFailureAction: audit
rules:
- name: validate-hostpid-hostipc
- name: validate-host-pid-ipc
match:
resources:
kinds:
- Pod
validate:
message: "Disallow use of host's pid namespace and host's ipc namespace"
message: "Use of host PID and IPC namespaces is not allowed"
pattern:
spec:
(hostPID): "!true"
hostIPC: false
=(hostPID): "false"
=(hostIPC): "false"

View file

@ -0,0 +1,20 @@
apiVersion : kyverno.io/v1alpha1
kind: ClusterPolicy
metadata:
name: known-ingress
annotations:
policies.kyverno.io/category: Ingress
policies.kyverno.io/description:
spec:
rules:
- name: known-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"

View file

@ -2,8 +2,9 @@ apiVersion: v1
kind: Pod
metadata:
name: pod-with-emptydir
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: true
metadata:
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: true
spec:
containers:
- image: k8s.gcr.io/test-webserver

View file

@ -0,0 +1,17 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-with-hostpath
annotations:
cluster-autoscaler.kubernetes.io/safe-to-evict: true
spec:
containers:
- image: k8s.gcr.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /tmp/foo
name: host-volume
volumes:
- name: host-volume
hostPath:
path: "/tmp/foo"

View file

@ -0,0 +1,14 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: test-ingress
annotations:
kubernetes.io/ingress.class: haproxy
spec:
rules:
- http:
paths:
- path: /testpath
backend:
serviceName: test
servicePort: 80

View file

@ -0,0 +1,15 @@
apiVersion: networking.k8s.io/v1beta1
kind: Ingress
metadata:
name: test-ingress
annotations:
kubernetes.io/ingress.class: nginx
nginx.ingress.kubernetes.io/rewrite-target: /
spec:
rules:
- http:
paths:
- path: /testpath
backend:
serviceName: test
servicePort: 80

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: pod-with-hostpath
spec:
containers:
- image: k8s.gcr.io/test-webserver
name: test-container
volumeMounts:
- mountPath: /tmp/foo
name: host-volume
volumes:
- name: host-volume
hostPath:
path: "/tmp/foo"

View file

@ -28,5 +28,5 @@ expected:
rules:
- name: check-cpu-memory-limits
type: Validation
message: Validation rule 'check-cpu-memory-limits' succesfully validated
message: Validation rule 'check-cpu-memory-limits' succeeded.
success: true

View file

@ -15,5 +15,5 @@ expected:
rules:
- name: validate-default-proc-mount
type: Validation
message: "Validation rule 'validate-default-proc-mount' succesfully validated"
message: "Validation rule 'validate-default-proc-mount' succeeded."
success: true

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: prevent-mounting-default-serviceaccount
type: Validation
message: Validation rule 'prevent-mounting-default-serviceaccount' failed at '/spec/serviceAccountName/' for resource Pod//pod-with-default-sa. Prevent mounting of default service account.
message: "Validation error: Prevent mounting of default service account\nValidation rule 'prevent-mounting-default-serviceaccount' failed at path '/spec/serviceAccountName/'."
success: false

View file

@ -14,9 +14,9 @@ expected:
rules:
- name: check-readinessProbe-exists
type: Validation
message: Validation rule 'check-readinessProbe-exists' succesfully validated
message: Validation rule 'check-readinessProbe-exists' succeeded.
success: true
- name: check-livenessProbe-exists
type: Validation
message: Validation rule 'check-livenessProbe-exists' succesfully validated
message: Validation rule 'check-livenessProbe-exists' succeeded.
success: true

View file

@ -15,5 +15,5 @@ expected:
rules:
- name: validate-selinux-options
type: Validation
message: "Validation rule 'validate-selinux-options' failed at '/spec/containers/0/securityContext/seLinuxOptions/' for resource Pod/default/busybox-selinux. SELinux level is required."
message: "Validation error: SELinux level is required\nValidation rule 'validate-selinux-options' failed at path '/spec/containers/0/securityContext/seLinuxOptions/'."
success: false

View file

@ -15,5 +15,5 @@ expected:
rules:
- name: validate-volumes-whitelist
type: Validation
message: "Validation rule 'validate-volumes-whitelist' anyPattern[2] succesfully validated"
message: "Validation rule 'validate-volumes-whitelist' anyPattern[2] succeeded."
success: true

View file

@ -6,14 +6,14 @@ expected:
mutation:
patchedresource: test/output/pod-with-emptydir.yaml
policyresponse:
policy: annotate-emptydir
policy: annotate-emptydir-hostpath
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: pod-with-emptydir
rules:
- name: add-safe-to-evict-annotation
- name: empty-dir-add-safe-to-evict
type: Mutation
success: true
message: "successfully processed overlay"

View file

@ -0,0 +1,19 @@
# file path is relative to project root
input:
policy: samples/best_practices/add_safe-to-evict_annotation.yaml
resource: test/resources/pod-with-hostpath.yaml
expected:
mutation:
patchedresource: test/output/pod-with-hostpath.yaml
policyresponse:
policy: annotate-emptydir-hostpath
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: pod-with-hostpath
rules:
- name: host-path-add-safe-to-evict
type: Mutation
success: true
message: "successfully processed overlay"

View file

@ -14,9 +14,9 @@ expected:
rules:
- name: image-tag-notspecified
type: Validation
message: "Validation rule 'image-tag-notspecified' succesfully validated"
message: "Validation rule 'image-tag-notspecified' succeeded."
success: true
- name: image-tag-not-latest
type: Validation
message: "Validation rule 'image-tag-not-latest' failed at '/spec/containers/0/image/' for resource Pod//myapp-pod. Using 'latest' image tag is restricted. Set image tag to a specific version."
message: "Validation error: Using 'latest' image tag is restricted. Set image tag to a specific version\nValidation rule 'image-tag-not-latest' failed at path '/spec/containers/0/image/'."
success: false

View file

@ -14,9 +14,9 @@ expected:
rules:
- name: image-tag-notspecified
type: Validation
message: "Validation rule 'image-tag-notspecified' succesfully validated"
message: "Validation rule 'image-tag-notspecified' succeeded."
success: true
- name: image-tag-not-latest
type: Validation
message: "Validation rule 'image-tag-not-latest' succesfully validated"
message: "Validation rule 'image-tag-not-latest' succeeded."
success: true

View file

@ -14,6 +14,6 @@ expected:
rules:
- name: deny-runasrootuser
type: Validation
message: "Validation rule 'deny-runasrootuser' anyPattern[1] succesfully validated"
message: "Validation rule 'deny-runasrootuser' anyPattern[1] succeeded."
success: true

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: disallow-automoutingapicred
type: Validation
message: Validation rule 'disallow-automoutingapicred' succesfully validated
message: Validation rule 'disallow-automoutingapicred' succeeded.
success: true

View file

@ -16,10 +16,10 @@ expected:
rules:
- name: check-default-namespace
type: Validation
message: "Validation rule 'check-default-namespace' failed at '/metadata/namespace/' for resource Pod/default/myapp-pod. Using 'default' namespace is restricted."
message: "Validation error: Using 'default' namespace is restricted\nValidation rule 'check-default-namespace' failed at path '/metadata/namespace/'."
success: false
- name: check-namespace-exist
type: Validation
message: "Validation rule 'check-namespace-exist' succesfully validated"
message: "Validation rule 'check-namespace-exist' succeeded."
success: true

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: validate-docker-sock-mount
type: Validation
message: Validation rule 'validate-docker-sock-mount' failed at '/spec/volumes/' for resource Pod//pod-with-docker-sock-mount. Use of the Docker Unix socket is not allowed.
message: "Validation error: Use of the Docker Unix socket is not allowed\nValidation rule 'validate-docker-sock-mount' failed at path '/spec/volumes/'."
success: false

View file

@ -12,5 +12,5 @@ expected:
rules:
- name: validate-helm-tiller
type: Validation
message: "Validation rule 'validate-helm-tiller' failed at '/spec/containers/0/image/' for resource Pod//pod-helm-tiller. Helm Tiller is not allowed."
message: "Validation error: Helm Tiller is not allowed\nValidation rule 'validate-helm-tiller' failed at path '/spec/containers/0/image/'."
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: deny-use-of-host-fs
type: Validation
message: Validation rule 'deny-use-of-host-fs' failed at '/spec/volumes/0/hostPath/' for resource Pod//image-with-hostpath. Host path is not allowed.
message: "Validation error: Host path is not allowed\nValidation rule 'deny-use-of-host-fs' failed at path '/spec/volumes/0/hostPath/'."
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: deny-use-of-host-fs
type: Validation
message: Validation rule 'deny-use-of-host-fs' succesfully validated
message: Validation rule 'deny-use-of-host-fs' succeeded.
success: true

View file

@ -5,14 +5,16 @@ input:
expected:
validation:
policyresponse:
policy: validate-host-network-hostport
policy: host-network-port
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: "nginx-host-network"
rules:
- name: validate-host-network-hostport
- name: validate-host-network
type: Validation
message: "Validation rule 'validate-host-network-hostport' failed at '/spec/containers/0/ports/0/hostPort/' for resource Pod//nginx-host-network. Defining hostNetwork and hostPort are not allowed."
success: false
success: true
- name: validate-host-port
type: Validation
success: false

View file

@ -5,14 +5,13 @@ input:
expected:
validation:
policyresponse:
policy: validate-hostpid-hostipc
policy: validate-host-pid-ipc
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: "nginx-with-hostpid"
rules:
- name: validate-hostpid-hostipc
- name: validate-host-pid-ipc
type: Validation
message: Validation rule 'validate-hostpid-hostipc' failed at '/spec/hostIPC/' for resource Pod//nginx-with-hostpid. Disallow use of host's pid namespace and host's ipc namespace.
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: deny-new-capabilities
type: Validation
message: Validation rule 'deny-new-capabilities' failed to validate patterns defined in anyPattern. Capabilities cannot be added.; anyPattern[0] failed at path /spec/; anyPattern[1] failed at path /spec/containers/0/securityContext/capabilities/add/
message: "Validation error: Capabilities cannot be added\nValidation rule deny-new-capabilities anyPattern[0] failed at path /spec/.\nValidation rule deny-new-capabilities anyPattern[1] failed at path /spec/containers/0/securityContext/capabilities/add/."
success: false

View file

@ -13,5 +13,5 @@ expected:
rules:
- name: disallow-node-port
type: Validation
message: Validation rule 'disallow-node-port' failed at '/spec/type/' for resource Service//my-service. Disallow service of type NodePort.
message: "Validation error: Disallow service of type NodePort\nValidation rule 'disallow-node-port' failed at path '/spec/type/'."
success: false

View file

@ -14,6 +14,6 @@ expected:
rules:
- name: deny-privileged-priviligedescalation
type: Validation
message: "Validation rule 'deny-privileged-priviligedescalation' failed to validate patterns defined in anyPattern. Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false.; anyPattern[0] failed at path /spec/securityContext/; anyPattern[1] failed at path /spec/containers/0/securityContext/allowPrivilegeEscalation/"
message: "Validation error: Privileged mode is not allowed. Set allowPrivilegeEscalation and privileged to false\nValidation rule deny-privileged-priviligedescalation anyPattern[0] failed at path /spec/securityContext/.\nValidation rule deny-privileged-priviligedescalation anyPattern[1] failed at path /spec/containers/0/securityContext/allowPrivilegeEscalation/."
success: false

View file

@ -0,0 +1,18 @@
# file path relative to project root
input:
policy: samples/best_practices/known_ingress.yaml
resource: test/resources/ingress-nginx.yaml
expected:
validation:
policyresponse:
policy: known-ingress
resource:
kind: Ingress
apiVersion: v1
namespace: ''
name: test-ingress
rules:
- name: known-ingress
type: Validation
message: Validation rule 'known-ingress' succeeded.
success: true

View file

@ -1,18 +0,0 @@
# file path relative to project root
input:
policy: samples/best_practices/deny_runasrootuser.yaml
resource: test/resources/resource_validate_nonRootUser.yaml
expected:
validation:
policyresponse:
policy: validate-deny-runasrootuser
resource:
kind: Pod
apiVersion: v1
namespace: ''
name: sec-ctx-unprivileged
rules:
- name: deny-runasrootuser
type: Validation
message: Validation rule 'deny-runasrootuser' failed to validate patterns defined in anyPattern. Root user is not allowed. Set runAsNonRoot to true.; anyPattern[0] failed at path /spec/securityContext/; anyPattern[1] failed at path /spec/containers/0/securityContext/
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: check-probes
type: Validation
message: Validation rule 'check-probes' failed at '/spec/containers/0/livenessProbe/' for resource Pod//myapp-pod. Liveness and readiness probes are required.
message: "Validation error: Liveness and readiness probes are required\nValidation rule 'check-probes' failed at path '/spec/containers/0/livenessProbe/'."
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: check-resource-request-limit
type: Validation
message: Validation rule 'check-resource-request-limit' failed at '/spec/containers/0/resources/limits/cpu/' for resource Pod//myapp-pod. CPU and memory resource requests and limits are required.
message: "Validation error: CPU and memory resource requests and limits are required\nValidation rule 'check-resource-request-limit' failed at path '/spec/containers/0/resources/limits/cpu/'."
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: validate-readonly-rootfilesystem
type: Validation
message: Validation rule 'validate-readonly-rootfilesystem' failed at '/spec/containers/0/securityContext/readOnlyRootFilesystem/' for resource Pod//ghost-with-readonly-rootfilesystem. Container require read-only rootfilesystem.
message: "Validation error: Container require read-only rootfilesystem\nValidation rule 'validate-readonly-rootfilesystem' failed at path '/spec/containers/0/securityContext/readOnlyRootFilesystem/'."
success: false

View file

@ -14,5 +14,5 @@ expected:
rules:
- name: trusted-registries
type: Validation
message: Validation rule 'trusted-registries' succesfully validated
message: Validation rule 'trusted-registries' succeeded.
success: true

View file

@ -0,0 +1,18 @@
# file path relative to project root
input:
policy: samples/best_practices/known_ingress.yaml
resource: test/resources/ingress-haproxy.yaml
expected:
validation:
policyresponse:
policy: known-ingress
resource:
kind: Ingress
apiVersion: v1
namespace: ''
name: test-ingress
rules:
- name: known-ingress
type: Validation
message: "Validation error: Unknown ingress class\nValidation rule 'known-ingress' failed at path '/metadata/annotations/kubernetes.io/ingress.class/'."
success: false

View file

@ -15,5 +15,5 @@ expected:
rules:
- name: validate-container-capablities
type: Validation
message: "Validation rule 'validate-container-capablities' failed at '/spec/containers/0/securityContext/capabilities/add/0/' for resource Pod//add-capabilities. Allow certain linux capability."
message: "Validation error: Allow certain linux capability\nValidation rule 'validate-container-capablities' failed at path '/spec/containers/0/securityContext/capabilities/add/0/'."
success: false

View file

@ -15,13 +15,13 @@ expected:
rules:
- name: validate-userid
type: Validation
message: Validation rule 'validate-userid' succesfully validated
message: Validation rule 'validate-userid' succeeded.
success: true
- name: validate-groupid
type: Validation
message: Validation rule 'validate-groupid' succesfully validated
message: Validation rule 'validate-groupid' succeeded.
success: true
- name: validate-fsgroup
type: Validation
message: Validation rule 'validate-fsgroup' succesfully validated
message: Validation rule 'validate-fsgroup' succeeded.
success: true

View file

@ -15,5 +15,5 @@ expected:
rules:
- name: allow-portrange-with-sysctl
type: Validation
message: "Validation rule 'allow-portrange-with-sysctl' failed at '/spec/securityContext/sysctls/0/value/' for resource Pod//nginx. Allowed port range is from 1024 to 65535."
message: "Validation error: Allowed port range is from 1024 to 65535\nValidation rule 'allow-portrange-with-sysctl' failed at path '/spec/securityContext/sysctls/0/value/'."
success: false