1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2024-12-14 11:57:48 +00:00

Bugfix/659 support wildcards for namespaces (#871)

* - support wildcards for namespaces

* do not annotate resource, unless policy is an autogen policy

* close HTTP body

* improve messages

* remove policy store

Policy store was not fully implemented and simply provided a way
to list all polices and get a policy by name, which can be done via
standard client-go interfaces.

We need to revisit and design a better PolicyStore that provides fast
lookups for matching policies based on names, namespaces, etc.

* handle wildcard namespaces in background processing

* fix unit tests 1) remove platform dependent path usage 2) remove policy store

* add test case for mutate with wildcard namespaces
This commit is contained in:
Jim Bugwadia 2020-05-26 10:36:56 -07:00 committed by GitHub
parent 2bf5149415
commit 838d02c475
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
27 changed files with 356 additions and 344 deletions

View file

@ -55,7 +55,8 @@ KYVERNO_PATH := cmd/kyverno
KYVERNO_IMAGE := kyverno
local:
go build -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/
go build -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)
go build -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)
kyverno:
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go

View file

@ -19,7 +19,6 @@ import (
generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/signal"
"github.com/nirmata/kyverno/pkg/utils"
@ -74,7 +73,7 @@ func main() {
os.Exit(1)
}
// KYVENO CRD CLIENT
// KYVERNO CRD CLIENT
// access CRD resources
// - Policy
// - PolicyViolation
@ -95,7 +94,7 @@ func main() {
// CRD CHECK
// - verify if the CRD for Policy & PolicyViolation are available
if !utils.CRDInstalled(client.DiscoveryClient, log.Log) {
setupLog.Error(fmt.Errorf("pre-requisite CRDs not installed"), "Failed to create watch on kyverno CRDs")
setupLog.Error(fmt.Errorf("CRDs not installed"), "Failed to access Kyverno CRDs")
os.Exit(1)
}
@ -146,20 +145,18 @@ func main() {
log.Log.WithName("ConfigData"),
)
// Policy meta-data store
policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies(), log.Log.WithName("PolicyStore"))
// EVENT GENERATOR
// - generate event with retry mechanism
egen := event.NewEventGenerator(
eventGenerator := event.NewEventGenerator(
client,
pInformer.Kyverno().V1().ClusterPolicies(),
log.Log.WithName("EventGenerator"))
// Policy Status Handler - deals with all logic related to policy status
statusSync := policystatus.NewSync(
pclient,
policyMetaStore)
pInformer.Kyverno().V1().ClusterPolicies().Lister(),)
// POLICY VIOLATION GENERATOR
// -- generate policy violation
@ -181,10 +178,10 @@ func main() {
pInformer.Kyverno().V1().ClusterPolicyViolations(),
pInformer.Kyverno().V1().PolicyViolations(),
configData,
egen,
eventGenerator,
pvgen,
policyMetaStore,
rWebhookWatcher,
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyController"),
)
@ -203,7 +200,7 @@ func main() {
client,
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().GenerateRequests(),
egen,
eventGenerator,
pvgen,
kubedynamicInformer,
statusSync.Listener,
@ -247,7 +244,7 @@ func main() {
// Sync openAPI definitions of resources
openAPISync := openapi.NewCRDSync(client, openAPIController)
// WEBHOOOK
// WEBHOOK
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
// - reports the results based on the response from the policy engine:
// -- annotations on resources with update details on mutation JSON patches
@ -260,11 +257,10 @@ func main() {
pInformer.Kyverno().V1().ClusterPolicies(),
kubeInformer.Rbac().V1().RoleBindings(),
kubeInformer.Rbac().V1().ClusterRoleBindings(),
egen,
eventGenerator,
webhookRegistrationClient,
statusSync.Listener,
configData,
policyMetaStore,
pvgen,
grgen,
rWebhookWatcher,
@ -285,9 +281,8 @@ func main() {
go grgen.Run(1)
go rWebhookWatcher.Run(stopCh)
go configData.Run(stopCh)
go policyMetaStore.Run(stopCh)
go policyCtrl.Run(3, stopCh)
go egen.Run(1, stopCh)
go eventGenerator.Run(1, stopCh)
go grc.Run(1, stopCh)
go grcc.Run(1, stopCh)
go pvgen.Run(1, stopCh)

1
go.mod
View file

@ -14,6 +14,7 @@ require (
github.com/json-iterator/go v1.1.9 // indirect
github.com/julienschmidt/httprouter v1.3.0
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
github.com/ory/go-acc v0.2.1 // indirect
github.com/rogpeppe/godef v1.1.2 // indirect
github.com/spf13/cobra v0.0.5
github.com/tevino/abool v0.0.0-20170917061928-9b9efcf221b5

6
go.sum
View file

@ -426,6 +426,7 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.0 h1:Jf4mxPC/ziBnoPIdpQdPJ9OeiomAUHLvxmPRSPH9m4s=
github.com/google/uuid v1.1.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
@ -680,6 +681,8 @@ github.com/ory/fosite v0.29.0/go.mod h1:0atSZmXO7CAcs6NPMI/Qtot8tmZYj04Nddoold4S
github.com/ory/go-acc v0.0.0-20181118080137-ddc355013f90/go.mod h1:sxnvPCxChFuSmTJGj8FdMupeq1BezCiEpDjTUXQ4hf4=
github.com/ory/go-acc v0.1.0 h1:ibxdv3n2k4cqh0VXWa9RHARtbR7tcT7jsnSnmGZkDIM=
github.com/ory/go-acc v0.1.0/go.mod h1:0omgy2aa3nDBJ45VAKeLHH8ccPBudxLeic4xiDRtug0=
github.com/ory/go-acc v0.2.1 h1:Pwcmwd/cSnwJsYN76+w3HU7oXeWFTkwj/KUj1qGDrVw=
github.com/ory/go-acc v0.2.1/go.mod h1:0omgy2aa3nDBJ45VAKeLHH8ccPBudxLeic4xiDRtug0=
github.com/ory/go-convenience v0.1.0/go.mod h1:uEY/a60PL5c12nYz4V5cHY03IBmwIAEm8TWB0yn9KNs=
github.com/ory/gojsonreference v0.0.0-20190720135523-6b606c2d8ee8/go.mod h1:wsH1C4nIeeQClDtD5AH7kF1uTS6zWyqfjVDTmB0Em7A=
github.com/ory/gojsonschema v1.1.1-0.20190919112458-f254ca73d5e9/go.mod h1:BNZpdJgB74KOLSsWFvzw6roXg1I6O51WO8roMmW+T7Y=
@ -1140,6 +1143,7 @@ k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad/go.mod h1:I4A+glKBHiTgiEj
k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.17.4 h1:UzM+38cPUJnzqSQ+E1PY4YxMHIzQyCg29LOoGfo79Zw=
k8s.io/apimachinery v0.17.4/go.mod h1:gxLnyZcGNdZTCLnq3fgzyg2A5BVCHTNDFrw8AmuJ+0g=
k8s.io/apimachinery v0.18.3 h1:pOGcbVAhxADgUYnjS08EFXs9QMl8qaH5U4fr5LGUrSk=
k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
k8s.io/cli-runtime v0.0.0-20191004110135-b9eb767d2e1a h1:REMzGxu+NpG9dPRsE9my/fw9iYIecz1S8UFFl6hbe18=
k8s.io/cli-runtime v0.0.0-20191004110135-b9eb767d2e1a/go.mod h1:qWnH3/b8sp/l7EvlDh7ulDU3UWA4P4N1NFbEEP791tM=
@ -1148,6 +1152,8 @@ k8s.io/cli-runtime v0.17.4/go.mod h1:IVW4zrKKx/8gBgNNkhiUIc7nZbVVNhc1+HcQh+PiNHc
k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
k8s.io/client-go v0.17.4 h1:VVdVbpTY70jiNHS1eiFkUt7ZIJX3txd29nDxxXH4en8=
k8s.io/client-go v0.17.4/go.mod h1:ouF6o5pz3is8qU0/qYL2RnoxOPqgfuidYLowytyLJmc=
k8s.io/client-go v1.5.1 h1:XaX/lo2/u3/pmFau8HN+sB5C/b4dc4Dmm2eXjBH4p1E=
k8s.io/client-go v11.0.0+incompatible h1:LBbX2+lOwY9flffWlJM7f1Ct8V2SRNiMRDFeiwnJo9o=
k8s.io/client-go v11.0.1-0.20190516230509-ae8359b20417+incompatible h1:bK03DJulJi9j05gwnXUufcs2j7h4M85YFvJ0dIlQ9k4=
k8s.io/client-go v11.0.1-0.20190516230509-ae8359b20417+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyodBc8nIyt8L5s=
k8s.io/code-generator v0.0.0-20200306081859-6a048a382944/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=

View file

@ -3,7 +3,7 @@ package v1
import "reflect"
//HasMutateOrValidateOrGenerate checks for rule types
func (p ClusterPolicy) HasMutateOrValidateOrGenerate() bool {
func (p *ClusterPolicy) HasMutateOrValidateOrGenerate() bool {
for _, rule := range p.Spec.Rules {
if rule.HasMutate() || rule.HasValidate() || rule.HasGenerate() {
return true
@ -12,6 +12,14 @@ func (p ClusterPolicy) HasMutateOrValidateOrGenerate() bool {
return false
}
func (p *ClusterPolicy) BackgroundProcessingEnabled() bool {
if p.Spec.Background == nil {
return true
}
return *p.Spec.Background
}
//HasMutate checks for mutate rule
func (r Rule) HasMutate() bool {
return !reflect.DeepEqual(r.Mutation, Mutation{})

View file

@ -24,7 +24,7 @@ import (
func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resource unstructured.Unstructured) (resp response.RuleResponse, patchedResource unstructured.Unstructured) {
startTime := time.Now()
logger := log.WithValues("rule", ruleName)
logger.V(4).Info("started applying overlay rule ", "startTime", startTime)
logger.V(4).Info("started applying overlay rule", "startTime", startTime)
resp.Name = ruleName
resp.Type = utils.Mutation.String()
defer func() {
@ -33,28 +33,27 @@ func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resou
}()
patches, overlayerr := processOverlayPatches(logger, resource.UnstructuredContent(), overlay)
// resource does not satisfy the overlay pattern, we don't apply this rule
if !reflect.DeepEqual(overlayerr, overlayError{}) {
switch overlayerr.statusCode {
// condition key is not present in the resource, don't apply this rule
// consider as success
case conditionNotPresent:
logger.V(3).Info("skip applying rule")
logger.V(3).Info("skip applying rule", "reason", "conditionNotPresent")
resp.Success = true
return resp, resource
// conditions are not met, don't apply this rule
case conditionFailure:
logger.V(3).Info("skip applying rule")
logger.V(3).Info("skip applying rule", "reason", "conditionFailure")
//TODO: send zero response and not consider this as applied?
resp.Success = true
resp.Message = overlayerr.ErrorMsg()
return resp, resource
// rule application failed
case overlayFailure:
logger.Info("failed to process overlay")
resp.Success = false
resp.Message = fmt.Sprintf("failed to process overlay: %v", overlayerr.ErrorMsg())
return resp, resource
default:
logger.Info("failed to process overlay")
resp.Success = false
@ -63,6 +62,7 @@ func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resou
}
}
logger.V(4).Info("processing overlay rule", "patches", len(patches))
if len(patches) == 0 {
resp.Success = true
return resp, resource
@ -78,15 +78,16 @@ func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resou
}
var patchResource []byte
logger.V(5).Info("applying overlay patches", "patches", string(utils.JoinPatches(patches)))
patchResource, err = utils.ApplyPatches(resourceRaw, patches)
if err != nil {
msg := fmt.Sprintf("failed to apply JSON patches: %v", err)
logger.V(2).Info("applying patches", "patches", string(utils.JoinPatches(patches)))
resp.Success = false
resp.Message = msg
return resp, resource
}
logger.V(5).Info("patched resource", "patches", string(patchResource))
err = patchedResource.UnmarshalJSON(patchResource)
if err != nil {
logger.Error(err, "failed to unmarshal resource")

View file

@ -30,7 +30,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
resource := policyContext.NewResource
ctx := policyContext.Context
logger := log.Log.WithName("Mutate").WithValues("policy", policy.Name, "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName())
logger.V(4).Info("start processing", "startTime", startTime)
logger.V(4).Info("start policy processing", "startTime", startTime)
startMutateResultResponse(&resp, policy, resource)
defer endMutateResultResponse(logger, &resp, startTime)
@ -47,7 +47,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont satisfy a policy rule resource description
if err := MatchesResourceDescription(resource, rule, policyContext.AdmissionInfo); err != nil {
logger.V(4).Info("resource fails the match description", "reason", err.Error())
logger.V(3).Info("resource not matched", "reason", err.Error())
continue
}
@ -56,7 +56,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
// evaluate pre-conditions
// - handle variable subsitutions
if !variables.EvaluateConditions(logger, ctx, copyConditions) {
logger.V(4).Info("resource fails the preconditions")
logger.V(3).Info("resource fails the preconditions")
continue
}
@ -97,9 +97,9 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
}
}
pocliyAnnotation := policy.GetAnnotations()
givenPodControllers := pocliyAnnotation[PodControllersAnnotation]
if strings.Contains(givenPodControllers, resource.GetKind()) {
// insert annotation to podtemplate if resource is pod controller
// skip inserting on existing resource
if autoGenPolicy(&policy) && strings.Contains(PodControllers, resource.GetKind()) {
if !patchedResourceHasPodControllerAnnotation(patchedResource) {
var ruleResponse response.RuleResponse
ruleResponse, patchedResource = mutate.ProcessOverlay(logger, "podControllerAnnotation", podTemplateRule.Mutation.Overlay, patchedResource)
@ -113,11 +113,18 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
}
}
}
// send the patched resource
resp.PatchedResource = patchedResource
return resp
}
func autoGenPolicy(policy *kyverno.ClusterPolicy) bool {
annotations := policy.GetObjectMeta().GetAnnotations()
_, ok := annotations[PodControllersAnnotation]
return ok
}
func patchedResourceHasPodControllerAnnotation(resource unstructured.Unstructured) bool {
var podController struct {
Spec struct {
@ -132,7 +139,10 @@ func patchedResourceHasPodControllerAnnotation(resource unstructured.Unstructure
resourceRaw, _ := json.Marshal(resource.Object)
_ = json.Unmarshal(resourceRaw, &podController)
_, ok := podController.Spec.Template.Metadata.Annotations[PodTemplateAnnotation]
val, ok := podController.Spec.Template.Metadata.Annotations[PodTemplateAnnotation]
log.Log.Info("patchedResourceHasPodControllerAnnotation", "resourceRaw", string(resourceRaw), "val", val, "ok", ok)
return ok
}
func incrementAppliedRuleCount(resp *response.EngineResponse) {
@ -152,7 +162,7 @@ func startMutateResultResponse(resp *response.EngineResponse, policy kyverno.Clu
func endMutateResultResponse(logger logr.Logger, resp *response.EngineResponse, startTime time.Time) {
resp.PolicyResponse.ProcessingTime = time.Since(startTime)
logger.V(4).Info("finshed processing", "processingTime", resp.PolicyResponse.ProcessingTime, "mutationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
logger.V(4).Info("finished processing policy", "processingTime", resp.PolicyResponse.ProcessingTime, "mutationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
}
// podTemplateRule mutate pod template with annotation

View file

@ -44,7 +44,7 @@ func checkName(name, resourceName string) bool {
func checkNameSpace(namespaces []string, resourceNameSpace string) bool {
for _, namespace := range namespaces {
if resourceNameSpace == namespace {
if wildcard.Match(namespace, resourceNameSpace) {
return true
}
}
@ -69,26 +69,26 @@ func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription,
var errs []error
if len(conditionBlock.Kinds) > 0 {
if !checkKind(conditionBlock.Kinds, resource.GetKind()) {
errs = append(errs, fmt.Errorf("resource kind does not match conditionBlock"))
errs = append(errs, fmt.Errorf("kind does not match"))
}
}
if conditionBlock.Name != "" {
if !checkName(conditionBlock.Name, resource.GetName()) {
errs = append(errs, fmt.Errorf("resource name does not match conditionBlock"))
errs = append(errs, fmt.Errorf("name does not match"))
}
}
if len(conditionBlock.Namespaces) > 0 {
if !checkNameSpace(conditionBlock.Namespaces, resource.GetNamespace()) {
errs = append(errs, fmt.Errorf("resource namespace does not match conditionBlock"))
errs = append(errs, fmt.Errorf("namespace does not match"))
}
}
if conditionBlock.Selector != nil {
hasPassed, err := checkSelector(conditionBlock.Selector, resource.GetLabels())
if err != nil {
errs = append(errs, fmt.Errorf("could not parse selector block of the policy in conditionBlock: %v", err))
errs = append(errs, fmt.Errorf("failed to parse selector: %v", err))
} else {
if !hasPassed {
errs = append(errs, fmt.Errorf("resource does not match selector of given conditionBlock"))
errs = append(errs, fmt.Errorf("selector does not match"))
}
}
}
@ -167,7 +167,7 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
matchErrs := doesResourceMatchConditionBlock(rule.MatchResources.ResourceDescription, rule.MatchResources.UserInfo, admissionInfo, resource)
reasonsForFailure = append(reasonsForFailure, matchErrs...)
} else {
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("match block in rule cannot be empty"))
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("match cannot be empty"))
}
// checking if resource has been excluded
@ -175,15 +175,15 @@ func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef k
!reflect.DeepEqual(rule.ExcludeResources.UserInfo, kyverno.UserInfo{}) {
excludeErrs := doesResourceMatchConditionBlock(rule.ExcludeResources.ResourceDescription, rule.ExcludeResources.UserInfo, admissionInfo, resource)
if excludeErrs == nil {
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("resource has been excluded since it matches the exclude block"))
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("resource excluded"))
}
}
// creating final error
var errorMessage = "rule has failed to match resource for the following reasons:"
var errorMessage = "rule not matched:"
for i, reasonForFailure := range reasonsForFailure {
if reasonForFailure != nil {
errorMessage += "\n" + fmt.Sprint(i+1) + ". " + reasonForFailure.Error()
errorMessage += "\n " + fmt.Sprint(i+1) + ". " + reasonForFailure.Error()
}
}

View file

@ -30,7 +30,7 @@ func (ri RuleType) String() string {
}
// ApplyPatches patches given resource with given patches and returns patched document
// return origin resource if any error occurs
// return original resource if any error occurs
func ApplyPatches(resource []byte, patches [][]byte) ([]byte, error) {
joinedPatches := JoinPatches(patches)
patch, err := jsonpatch.DecodePatch(joinedPatches)
@ -42,6 +42,7 @@ func ApplyPatches(resource []byte, patches [][]byte) ([]byte, error) {
if err != nil {
return resource, err
}
return patchedDocument, err
}
@ -56,6 +57,7 @@ func ApplyPatchNew(resource, patch []byte) ([]byte, error) {
if err != nil {
return nil, err
}
return patchedResource, err
}

View file

@ -3,7 +3,7 @@ package validate
import (
"errors"
"fmt"
"path/filepath"
"path"
"reflect"
"strconv"
"strings"
@ -188,11 +188,11 @@ func valFromReferenceToString(value interface{}, operator string) (string, error
// returns absolute path
func formAbsolutePath(referencePath, absolutePath string) string {
if filepath.IsAbs(referencePath) {
if path.IsAbs(referencePath) {
return referencePath
}
return filepath.Join(absolutePath, referencePath)
return path.Join(absolutePath, referencePath)
}
//Prepares original pattern, path to value, and call traverse function

View file

@ -221,8 +221,9 @@ func validatePatterns(log logr.Logger, ctx context.EvalInterface, resource unstr
resp.Type = utils.Validation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
logger.V(4).Info("finshed processing", "processingTime", resp.RuleStats.ProcessingTime)
logger.V(4).Info("finished processing rule", "processingTime", resp.RuleStats.ProcessingTime)
}()
// work on a copy of validation rule
validationRule := rule.Validation.DeepCopy()

View file

@ -1,6 +1,7 @@
package policy
import (
informers "k8s.io/client-go/informers/core/v1"
"time"
"github.com/go-logr/logr"
@ -13,7 +14,6 @@ import (
"github.com/nirmata/kyverno/pkg/constant"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/webhookconfig"
v1 "k8s.io/api/core/v1"
@ -48,30 +48,42 @@ type PolicyController struct {
//pvControl is used for adoptin/releasing policy violation
pvControl PVControlInterface
// Policys that need to be synced
queue workqueue.RateLimitingInterface
// pLister can list/get policy from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// pvLister can list/get policy violation from the shared informer's store
cpvLister kyvernolister.ClusterPolicyViolationLister
// nspvLister can list/get namespaced policy violation from the shared informer's store
nspvLister kyvernolister.PolicyViolationLister
// pListerSynced returns true if the Policy store has been synced at least once
pListerSynced cache.InformerSynced
// pvListerSynced returns true if the Policy store has been synced at least once
cpvListerSynced cache.InformerSynced
// pvListerSynced returns true if the Policy Violation store has been synced at least once
nspvListerSynced cache.InformerSynced
nsInformer informers.NamespaceInformer
// Resource manager, manages the mapping for already processed resource
rm resourceManager
// helpers to validate against current loaded configuration
configHandler config.Interface
// store to hold policy meta data for faster lookup
pMetaStore policystore.UpdateInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
// resourceWebhookWatcher queues the webhook creation request, creates the webhook
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
log logr.Logger
}
@ -81,12 +93,12 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
pInformer kyvernoinformer.ClusterPolicyInformer,
cpvInformer kyvernoinformer.ClusterPolicyViolationInformer,
nspvInformer kyvernoinformer.PolicyViolationInformer,
configHandler config.Interface,
eventGen event.Interface,
configHandler config.Interface, eventGen event.Interface,
pvGenerator policyviolation.GeneratorInterface,
pMetaStore policystore.UpdateInterface,
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
namespaces informers.NamespaceInformer,
log logr.Logger) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartLogging(log.V(5).Info)
@ -103,9 +115,9 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
eventRecorder: eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: "policy_controller"}),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "policy"),
configHandler: configHandler,
pMetaStore: pMetaStore,
pvGenerator: pvGenerator,
resourceWebhookWatcher: resourceWebhookWatcher,
nsInformer: namespaces,
log: log,
}
@ -147,32 +159,30 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
return &pc, nil
}
func (pc *PolicyController) canBackgroundProcess(p *kyverno.ClusterPolicy) bool {
logger := pc.log.WithValues("policy", p.Name)
if !p.BackgroundProcessingEnabled() {
logger.V(4).Info("background processed is disabled")
return false
}
if err := ContainsVariablesOtherThanObject(*p); err != nil {
logger.V(4).Info("policy cannot be processed in the background")
return false
}
return true
}
func (pc *PolicyController) addPolicy(obj interface{}) {
logger := pc.log
p := obj.(*kyverno.ClusterPolicy)
// Only process policies that are enabled for "background" execution
// policy.spec.background -> "True"
// register with policy meta-store
pc.pMetaStore.Register(*p)
// TODO: code might seem vague, awaiting resolution of issue https://github.com/nirmata/kyverno/issues/598
if p.Spec.Background == nil {
// if userInfo is not defined in policy we process the policy
if err := ContainsVariablesOtherThanObject(*p); err != nil {
return
}
} else {
if !*p.Spec.Background {
return
}
// If userInfo is used then skip the policy
// ideally this should be handled by background flag only
if err := ContainsVariablesOtherThanObject(*p); err != nil {
// contains userInfo used in policy
return
}
if !pc.canBackgroundProcess(p) {
return
}
logger.V(4).Info("adding policy", "name", p.Name)
logger.V(4).Info("queuing policy for background processing", "name", p.Name)
pc.enqueuePolicy(p)
}
@ -180,32 +190,9 @@ func (pc *PolicyController) updatePolicy(old, cur interface{}) {
logger := pc.log
oldP := old.(*kyverno.ClusterPolicy)
curP := cur.(*kyverno.ClusterPolicy)
// TODO: optimize this : policy meta-store
// Update policy-> (remove,add)
err := pc.pMetaStore.UnRegister(*oldP)
if err != nil {
logger.Error(err, "failed to unregister policy", "name", oldP.Name)
}
pc.pMetaStore.Register(*curP)
// Only process policies that are enabled for "background" execution
// policy.spec.background -> "True"
// TODO: code might seem vague, awaiting resolution of issue https://github.com/nirmata/kyverno/issues/598
if curP.Spec.Background == nil {
// if userInfo is not defined in policy we process the policy
if err := ContainsVariablesOtherThanObject(*curP); err != nil {
return
}
} else {
if !*curP.Spec.Background {
return
}
// If userInfo is used then skip the policy
// ideally this should be handled by background flag only
if err := ContainsVariablesOtherThanObject(*curP); err != nil {
// contains userInfo used in policy
return
}
if !pc.canBackgroundProcess(curP) {
return
}
logger.V(4).Info("updating policy", "name", oldP.Name)
@ -221,6 +208,7 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
logger.Info("couldnt get object from tomstone", "obj", obj)
return
}
p, ok = tombstone.Obj.(*kyverno.ClusterPolicy)
if !ok {
logger.Info("tombstone container object that is not a policy", "obj", obj)
@ -229,10 +217,6 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
}
logger.V(4).Info("deleting policy", "name", p.Name)
// Unregister from policy meta-store
if err := pc.pMetaStore.UnRegister(*p); err != nil {
logger.Error(err, "failed to unregister policy", "name", p.Name)
}
// we process policies that are not set of background processing as we need to perform policy violation
// cleanup when a policy is deleted.

View file

@ -1,7 +1,9 @@
package policy
import (
informers "k8s.io/client-go/informers/core/v1"
"reflect"
"strings"
"sync"
"time"
@ -25,7 +27,7 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
pc.rm.Drop()
var engineResponses []response.EngineResponse
// get resource that are satisfy the resource description defined in the rules
resourceMap := listResources(pc.client, policy, pc.configHandler, logger)
resourceMap := pc.listResources(policy)
for _, resource := range resourceMap {
// pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {
@ -53,35 +55,27 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic
return engineResponses
}
func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHandler config.Interface, log logr.Logger) map[string]unstructured.Unstructured {
func (pc *PolicyController) listResources(policy kyverno.ClusterPolicy) map[string]unstructured.Unstructured {
// key uid
resourceMap := map[string]unstructured.Unstructured{}
for _, rule := range policy.Spec.Rules {
for _, k := range rule.MatchResources.Kinds {
resourceSchema, _, err := client.DiscoveryClient.FindResource(k)
resourceSchema, _, err := pc.client.DiscoveryClient.FindResource(k)
if err != nil {
log.Error(err, "failed to find resource", "kind", k)
pc.log.Error(err, "failed to find resource", "kind", k)
continue
}
if !resourceSchema.Namespaced {
rMap := getResourcesPerNamespace(k, client, "", rule, configHandler, log)
mergeresources(resourceMap, rMap)
rMap := getResourcesPerNamespace(k, pc.client, "", rule, pc.configHandler, pc.log)
mergeResources(resourceMap, rMap)
} else {
var namespaces []string
if len(rule.MatchResources.Namespaces) > 0 {
log.V(4).Info("namespaces included", "namespaces", rule.MatchResources.Namespaces)
namespaces = append(namespaces, rule.MatchResources.Namespaces...)
} else {
log.V(4).Info("processing all namespaces", "rule", rule.Name)
namespaces = getAllNamespaces(client, log)
}
namespaces := getNamespacesForRule(&rule, pc.nsInformer, pc.log)
for _, ns := range namespaces {
rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler, log)
mergeresources(resourceMap, rMap)
rMap := getResourcesPerNamespace(k, pc.client, ns, rule, pc.configHandler, pc.log)
mergeResources(resourceMap, rMap)
}
}
}
@ -90,6 +84,70 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHa
return resourceMap
}
func getNamespacesForRule(rule *kyverno.Rule, nsInformer informers.NamespaceInformer, log logr.Logger) []string {
if len(rule.MatchResources.Namespaces) > 0 {
return getAllNamespaces(nsInformer, log)
}
var wildcards []string
var results []string
for _, nsName := range rule.MatchResources.Namespaces {
if hasWildcard(nsName) {
wildcards = append(wildcards, nsName)
}
results = append(results, nsName)
}
if len(wildcards) > 0 {
wildcardMatches := getMatchingNamespaces(wildcards, nsInformer, log)
results = append (results, wildcardMatches...)
}
return results
}
func hasWildcard(s string) bool {
if s == "" {
return false
}
return strings.Contains(s, "*") || strings.Contains(s, "?")
}
func getMatchingNamespaces(wildcards []string, nsInformer informers.NamespaceInformer, log logr.Logger) []string {
all := getAllNamespaces(nsInformer, log)
if len(all) == 0 {
return all
}
var results []string
for _, wc := range wildcards {
for _, ns := range all {
if wildcard.Match(wc, ns) {
results = append(results, ns)
}
}
}
return results
}
func getAllNamespaces(nsInformer informers.NamespaceInformer, log logr.Logger) []string {
var results []string
namespaces, err := nsInformer.Lister().List(labels.NewSelector())
if err != nil {
log.Error(err, "Failed to list namespaces")
}
for _, n := range namespaces {
name := n.GetName()
results = append(results, name)
}
return results
}
func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, configHandler config.Interface, log logr.Logger) map[string]unstructured.Unstructured {
resourceMap := map[string]unstructured.Unstructured{}
// merge include and exclude label selector values
@ -243,27 +301,12 @@ const (
)
// merge b into a map
func mergeresources(a, b map[string]unstructured.Unstructured) {
func mergeResources(a, b map[string]unstructured.Unstructured) {
for k, v := range b {
a[k] = v
}
}
func getAllNamespaces(client *client.Client, log logr.Logger) []string {
var namespaces []string
// get all namespaces
nsList, err := client.ListResource("Namespace", "", nil)
if err != nil {
log.Error(err, "failed to list namespaces")
return namespaces
}
for _, ns := range nsList.Items {
namespaces = append(namespaces, ns.GetName())
}
return namespaces
}
//NewResourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) *ResourceManager {
rm := ResourceManager{

View file

@ -3,6 +3,7 @@ package policystatus
import (
"encoding/json"
"fmt"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
"sync"
"time"
@ -35,10 +36,6 @@ type statusUpdater interface {
UpdateStatus(status v1.PolicyStatus) v1.PolicyStatus
}
type policyStore interface {
Get(policyName string) (*v1.ClusterPolicy, error)
}
type Listener chan statusUpdater
func (l Listener) Send(s statusUpdater) {
@ -53,7 +50,7 @@ type Sync struct {
cache *cache
Listener Listener
client *versioned.Clientset
policyStore policyStore
lister kyvernolister.ClusterPolicyLister
}
type cache struct {
@ -62,7 +59,7 @@ type cache struct {
keyToMutex *keyToMutex
}
func NewSync(c *versioned.Clientset, p policyStore) *Sync {
func NewSync(c *versioned.Clientset, lister kyvernolister.ClusterPolicyLister) *Sync {
return &Sync{
cache: &cache{
dataMu: sync.RWMutex{},
@ -70,7 +67,7 @@ func NewSync(c *versioned.Clientset, p policyStore) *Sync {
keyToMutex: newKeyToMutex(),
},
client: c,
policyStore: p,
lister: lister,
Listener: make(chan statusUpdater, 20),
}
}
@ -96,7 +93,7 @@ func (s *Sync) updateStatusCache(stopCh <-chan struct{}) {
status, exist := s.cache.data[statusUpdater.PolicyName()]
s.cache.dataMu.RUnlock()
if !exist {
policy, _ := s.policyStore.Get(statusUpdater.PolicyName())
policy, _ := s.lister.Get(statusUpdater.PolicyName())
if policy != nil {
status = policy.Status
}
@ -129,10 +126,11 @@ func (s *Sync) updatePolicyStatus() {
s.cache.dataMu.Unlock()
for policyName, status := range nameToStatus {
policy, err := s.policyStore.Get(policyName)
policy, err := s.lister.Get(policyName)
if err != nil {
continue
}
policy.Status = status
_, err = s.client.KyvernoV1().ClusterPolicies().UpdateStatus(policy)
if err != nil {

View file

@ -2,6 +2,8 @@ package policystatus
import (
"encoding/json"
"fmt"
"k8s.io/apimachinery/pkg/labels"
"testing"
"time"
@ -27,11 +29,35 @@ func (d dummyStatusUpdater) PolicyName() string {
return "policy1"
}
type dummyLister struct {
}
func (dl dummyLister) List(selector labels.Selector) (ret []*v1.ClusterPolicy, err error) {
return nil, fmt.Errorf("not implemented")
}
func (dl dummyLister) Get(name string) (*v1.ClusterPolicy, error) {
return nil, fmt.Errorf("not implemented")
}
func (dl dummyLister) GetPolicyForPolicyViolation(pv *v1.ClusterPolicyViolation) ([]*v1.ClusterPolicy, error) {
return nil, fmt.Errorf("not implemented")
}
func (dl dummyLister) GetPolicyForNamespacedPolicyViolation(pv *v1.PolicyViolation) ([]*v1.ClusterPolicy, error) {
return nil, fmt.Errorf("not implemented")
}
func (dl dummyLister) ListResources(selector labels.Selector) (ret []*v1.ClusterPolicy, err error) {
return nil, fmt.Errorf("not implemented")
}
func TestKeyToMutex(t *testing.T) {
expectedCache := `{"policy1":{"rulesAppliedCount":100}}`
stopCh := make(chan struct{})
s := NewSync(nil, dummyStore{})
s := NewSync(nil, dummyLister{})
for i := 0; i < 100; i++ {
go s.updateStatusCache(stopCh)
}

View file

@ -1,168 +0,0 @@
package policystore
import (
"sync"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
)
type policyMap map[string]interface{}
type namespaceMap map[string]policyMap
type kindMap map[string]namespaceMap
//PolicyStore Store the meta-data information to faster lookup policies
type PolicyStore struct {
data map[string]namespaceMap
mu sync.RWMutex
// list/get cluster policy
pLister kyvernolister.ClusterPolicyLister
// returns true if the cluster policy store has been synced at least once
pSynched cache.InformerSynced
log logr.Logger
}
//UpdateInterface provides api to update policies
type UpdateInterface interface {
// Register a new policy
Register(policy kyverno.ClusterPolicy)
// Remove policy information
UnRegister(policy kyverno.ClusterPolicy) error
}
//LookupInterface provides api to lookup policies
type LookupInterface interface {
ListAll() ([]kyverno.ClusterPolicy, error)
}
// NewPolicyStore returns a new policy store
func NewPolicyStore(pInformer kyvernoinformer.ClusterPolicyInformer,
log logr.Logger) *PolicyStore {
ps := PolicyStore{
data: make(kindMap),
pLister: pInformer.Lister(),
pSynched: pInformer.Informer().HasSynced,
log: log,
}
return &ps
}
//Run checks syncing
func (ps *PolicyStore) Run(stopCh <-chan struct{}) {
logger := ps.log
if !cache.WaitForCacheSync(stopCh, ps.pSynched) {
logger.Info("failed to sync informer cache")
}
}
//Register a new policy
func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) {
logger := ps.log
logger.V(4).Info("adding policy", "name", policy.Name)
ps.mu.Lock()
defer ps.mu.Unlock()
var pmap policyMap
// add an entry for each rule in policy
for _, rule := range policy.Spec.Rules {
// rule.MatchResources.Kinds - List - mandatory - atleast on entry
for _, kind := range rule.MatchResources.Kinds {
kindMap := ps.addKind(kind)
// namespaces
if len(rule.MatchResources.Namespaces) == 0 {
// all namespaces - *
pmap = addNamespace(kindMap, "*")
} else {
for _, ns := range rule.MatchResources.Namespaces {
pmap = addNamespace(kindMap, ns)
}
}
// add policy to the pmap
addPolicyElement(pmap, policy.Name)
}
}
}
func (ps *PolicyStore) ListAll() ([]kyverno.ClusterPolicy, error) {
policyPointers, err := ps.pLister.List(labels.NewSelector())
if err != nil {
return nil, err
}
var policies = make([]kyverno.ClusterPolicy, 0, len(policyPointers))
for _, policy := range policyPointers {
policies = append(policies, *policy)
}
return policies, nil
}
func (ps *PolicyStore) Get(policyName string) (*kyverno.ClusterPolicy, error) {
return ps.pLister.Get(policyName)
}
//UnRegister Remove policy information
func (ps *PolicyStore) UnRegister(policy kyverno.ClusterPolicy) error {
ps.mu.Lock()
defer ps.mu.Unlock()
for _, rule := range policy.Spec.Rules {
for _, kind := range rule.MatchResources.Kinds {
// get kind Map
kindMap := ps.getKind(kind)
if kindMap == nil {
// kind does not exist
return nil
}
if len(rule.MatchResources.Namespaces) == 0 {
namespace := "*"
pmap := getNamespace(kindMap, namespace)
// remove element
delete(pmap, policy.Name)
} else {
for _, ns := range rule.MatchResources.Namespaces {
pmap := getNamespace(kindMap, ns)
// remove element
delete(pmap, policy.Name)
}
}
}
}
return nil
}
func (ps *PolicyStore) addKind(kind string) namespaceMap {
val, ok := ps.data[kind]
if ok {
return val
}
ps.data[kind] = make(namespaceMap)
return ps.data[kind]
}
func (ps *PolicyStore) getKind(kind string) namespaceMap {
return ps.data[kind]
}
func addNamespace(kindMap map[string]policyMap, namespace string) policyMap {
val, ok := kindMap[namespace]
if ok {
return val
}
kindMap[namespace] = make(policyMap)
return kindMap[namespace]
}
func getNamespace(kindMap map[string]policyMap, namespace string) policyMap {
return kindMap[namespace]
}
func addPolicyElement(pmap policyMap, name string) {
var emptyInterface interface{}
if _, ok := pmap[name]; !ok {
pmap[name] = emptyInterface
}
}

View file

@ -161,7 +161,7 @@ func runTestCase(t *testing.T, tc scaseT) bool {
er = engine.Generate(policyContext)
t.Log(("---Generation---"))
validateResponse(t, er.PolicyResponse, tc.Expected.Generation.PolicyResponse)
// Expected generate resource will be in same namesapces as resource
// Expected generate resource will be in same namespaces as resource
validateGeneratedResources(t, client, *policy, resource.GetName(), tc.Expected.Generation.GeneratedResources)
}
}
@ -196,7 +196,7 @@ func validateResource(t *testing.T, responseResource unstructured.Unstructured,
// load expected resource
expectedResource := loadPolicyResource(t, expectedResourceFile)
if expectedResource == nil {
t.Log("failed to get the expected resource")
t.Logf("failed to get the expected resource: %s", expectedResourceFile)
return
}

View file

@ -137,3 +137,7 @@ func Test_known_ingress(t *testing.T) {
func Test_unknown_ingress(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/unknown_ingress_class.yaml")
}
func Test_mutate_pod_spec(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_mutate_pod_spec.yaml")
}

View file

@ -115,7 +115,7 @@ func processResourceWithPatches(patch []byte, resource []byte, log logr.Logger)
return resource
}
func containRBACinfo(policies []kyverno.ClusterPolicy) bool {
func containRBACinfo(policies []*kyverno.ClusterPolicy) bool {
for _, policy := range policies {
for _, rule := range policy.Spec.Rules {
if len(rule.MatchResources.Roles) > 0 || len(rule.MatchResources.ClusterRoles) > 0 || len(rule.ExcludeResources.Roles) > 0 || len(rule.ExcludeResources.ClusterRoles) > 0 {

View file

@ -16,7 +16,7 @@ import (
)
//HandleGenerate handles admission-requests for policies with generate rules
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo) (bool, string) {
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo) (bool, string) {
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
logger.V(4).Info("incoming request")
var engineResponses []response.EngineResponse
@ -39,7 +39,7 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
// engine.Generate returns a list of rules that are applicable on this resource
for _, policy := range policies {
policyContext.Policy = policy
policyContext.Policy = *policy
engineResponse := engine.Generate(policyContext)
if len(engineResponse.PolicyResponse.Rules) > 0 {
// some generate rules do apply to the resource

View file

@ -21,7 +21,7 @@ import (
func (ws *WebhookServer) HandleMutation(
request *v1beta1.AdmissionRequest,
resource unstructured.Unstructured,
policies []kyverno.ClusterPolicy,
policies []*kyverno.ClusterPolicy,
ctx *context.Context,
userRequestInfo kyverno.RequestInfo) []byte {
@ -43,7 +43,7 @@ func (ws *WebhookServer) HandleMutation(
for _, policy := range policies {
logger.V(3).Info("evaluating policy", "policy", policy.Name)
policyContext.Policy = policy
policyContext.Policy = *policy
engineResponse := engine.Mutate(policyContext)
engineResponses = append(engineResponses, engineResponse)

View file

@ -7,6 +7,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"k8s.io/apimachinery/pkg/labels"
"net/http"
"time"
@ -23,7 +24,6 @@ import (
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/openapi"
"github.com/nirmata/kyverno/pkg/policystatus"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
tlsutils "github.com/nirmata/kyverno/pkg/tls"
userinfo "github.com/nirmata/kyverno/pkg/userinfo"
@ -42,36 +42,49 @@ type WebhookServer struct {
server http.Server
client *client.Client
kyvernoClient *kyvernoclient.Clientset
// list/get cluster policy resource
pLister kyvernolister.ClusterPolicyLister
// returns true if the cluster policy store has synced atleast
pSynced cache.InformerSynced
// list/get role binding resource
rbLister rbaclister.RoleBindingLister
// return true if role bining store has synced atleast once
rbSynced cache.InformerSynced
// list/get cluster role binding resource
crbLister rbaclister.ClusterRoleBindingLister
// return true if cluster role binding store has synced atleast once
crbSynced cache.InformerSynced
// generate events
eventGen event.Interface
// webhook registration client
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient
// API to send policy stats for aggregation
statusListener policystatus.Listener
// helpers to validate against current loaded configuration
configHandler config.Interface
// channel for cleanup notification
cleanUp chan<- struct{}
// last request time
lastReqTime *checker.LastReqTime
// store to hold policy meta data for faster lookup
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
// generate request generator
grGenerator *generate.Generator
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
log logr.Logger
openAPIController *openapi.Controller
@ -90,7 +103,6 @@ func NewWebhookServer(
webhookRegistrationClient *webhookconfig.WebhookRegistrationClient,
statusSync policystatus.Listener,
configHandler config.Interface,
pMetaStore policystore.LookupInterface,
pvGenerator policyviolation.GeneratorInterface,
grGenerator *generate.Generator,
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
@ -126,7 +138,6 @@ func NewWebhookServer(
cleanUp: cleanUp,
lastReqTime: resourceWebhookWatcher.LastReqTime,
pvGenerator: pvGenerator,
pMetaStore: pMetaStore,
grGenerator: grGenerator,
resourceWebhookWatcher: resourceWebhookWatcher,
log: log,
@ -208,7 +219,7 @@ func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1
}
logger := ws.log.WithName("resourceMutation").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
policies, err := ws.pMetaStore.ListAll()
policies, err := ws.pLister.ListResources(labels.NewSelector())
if err != nil {
// Unable to connect to policy Lister to access policies
logger.Error(err, "failed to list policies. Policies are NOT being applied")
@ -355,7 +366,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
}
}
policies, err := ws.pMetaStore.ListAll()
policies, err := ws.pLister.ListResources(labels.NewSelector())
if err != nil {
// Unable to connect to policy Lister to access policies
logger.Error(err, "failed to list policies. Policies are NOT being applied")
@ -369,7 +380,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request)
if err != nil {
// TODO(shuting): continue apply policy if error getting roleRef?
logger.Error(err, "failed to get RBAC infromation for request")
logger.Error(err, "failed to get RBAC information for request")
}
}
@ -482,18 +493,19 @@ func (ws *WebhookServer) Stop(ctx context.Context) {
// Answers to the http.ResponseWriter if request is not valid
func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer http.ResponseWriter) *v1beta1.AdmissionReview {
logger := ws.log
var body []byte
if request.Body != nil {
if data, err := ioutil.ReadAll(request.Body); err == nil {
body = data
}
}
if len(body) == 0 {
logger.Info("empty body")
if request.Body == nil {
logger.Info("empty body", "req", request.URL.String())
http.Error(writer, "empty body", http.StatusBadRequest)
return nil
}
defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
if err != nil {
logger.Info("failed to read HTTP body", "req", request.URL.String())
http.Error(writer, "failed to read HTTP body", http.StatusBadRequest)
}
contentType := request.Header.Get("Content-Type")
if contentType != "application/json" {
logger.Info("invalid Content-Type", "contextType", contentType)

View file

@ -23,7 +23,7 @@ import (
// patchedResource is the (resource + patches) after applying mutation rules
func (ws *WebhookServer) HandleValidation(
request *v1beta1.AdmissionRequest,
policies []kyverno.ClusterPolicy,
policies []*kyverno.ClusterPolicy,
patchedResource []byte,
ctx *context.Context,
userRequestInfo kyverno.RequestInfo) (bool, string) {
@ -64,7 +64,7 @@ func (ws *WebhookServer) HandleValidation(
var engineResponses []response.EngineResponse
for _, policy := range policies {
logger.V(3).Info("evaluating policy", "policy", policy.Name)
policyContext.Policy = policy
policyContext.Policy = *policy
engineResponse := engine.Validate(policyContext)
if reflect.DeepEqual(engineResponse, response.EngineResponse{}) {
// we get an empty response if old and new resources created the same response

View file

@ -0,0 +1,24 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
namespace: test-foo-aaaaaaaaa-bbbbbbbb
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
enableServiceLinks: false
automountServiceAccountToken: false
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80

View file

@ -0,0 +1,23 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: mutate-pods-spec
spec:
rules:
- name: "disable-servicelink-and-token"
match:
resources:
kinds:
- DaemonSet
- Deployment
- Job
- StatefulSet
namespaces:
- test-foo-*
mutate:
overlay:
spec:
template:
spec:
automountServiceAccountToken: false
enableServiceLinks: false

View file

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
namespace: test-foo-aaaaaaaaa-bbbbbbbb
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80

View file

@ -0,0 +1,19 @@
# file path relative to project root
input:
policy: test/policy/mutate/policy_mutate_pod_spec.yaml
resource: test/resources/resource_mutate_pod_spec.yaml
expected:
mutation:
patchedresource: test/output/output_mutate_pod_spec.yaml
policyresponse:
policy: mutate-pods-spec
resource:
kind: Deployment
apiVersion: apps/v1
namespace: test-foo-aaaaaaaaa-bbbbbbbb
name: nginx-deployment
rules:
- name: disable-servicelink-and-token
type: Mutation
success: true
message: successfully processed overlay