1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

Merge pull request #662 from shravanshetty1/644_match-exclude

#644 - Policy Rule Exclude conditions should be processed as a logical AND instead of a logical OR
This commit is contained in:
shuting 2020-02-19 18:26:06 -08:00 committed by GitHub
commit 8725c5a424
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
15 changed files with 280 additions and 1304 deletions

View file

@ -4,7 +4,6 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/variables"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -26,10 +25,8 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission
if !rule.HasGenerate() {
return nil
}
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
return nil
}
if !MatchesResourceDescription(resource, rule) {
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
glog.V(4).Infof(err.Error())
return nil
}
// operate on the copy of the conditions, as we perform variable substitution

View file

@ -8,7 +8,6 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/mutate"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/variables"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -42,19 +41,13 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
continue
}
startTime := time.Now()
if !rbac.MatchAdmissionInfo(rule, policyContext.AdmissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), policyContext.AdmissionInfo)
continue
}
glog.V(4).Infof("Time: Mutate matchAdmissionInfo %v", time.Since(startTime))
// check if the resource satisfies the filter conditions defined in the rule
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont statisfy a policy rule resource description
ok := MatchesResourceDescription(resource, rule)
if !ok {
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
if err := MatchesResourceDescription(resource, rule, policyContext.AdmissionInfo); err != nil {
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule:\n%s", resource.GetNamespace(), resource.GetName(), err.Error())
continue
}

View file

@ -1,117 +0,0 @@
package rbac
import (
"reflect"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
utils "github.com/nirmata/kyverno/pkg/utils"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
)
const (
//SaPrefix defines the prefix for service accounts
SaPrefix = "system:serviceaccount:"
)
// MatchAdmissionInfo return true if the rule can be applied to the request
func MatchAdmissionInfo(rule kyverno.Rule, requestInfo kyverno.RequestInfo) bool {
// when processing existing resource, it does not contain requestInfo
// skip permission checking
if reflect.DeepEqual(requestInfo, kyverno.RequestInfo{}) {
return true
}
if !validateMatch(rule.MatchResources, requestInfo) {
return false
}
return validateExclude(rule.ExcludeResources, requestInfo)
}
// match:
// roles: role1, role2
// clusterRoles: clusterRole1,clusterRole2
// subjects: subject1, subject2
// validateMatch return true if (role1 || role2) and (clusterRole1 || clusterRole2)
// and (subject1 || subject2) are found in requestInfo, OR operation for each list
func validateMatch(match kyverno.MatchResources, requestInfo kyverno.RequestInfo) bool {
if len(match.Roles) > 0 {
if !matchRoleRefs(match.Roles, requestInfo.Roles) {
return false
}
}
if len(match.ClusterRoles) > 0 {
if !matchRoleRefs(match.ClusterRoles, requestInfo.ClusterRoles) {
return false
}
}
if len(match.Subjects) > 0 {
if !matchSubjects(match.Subjects, requestInfo.AdmissionUserInfo) {
return false
}
}
return true
}
// exclude:
// roles: role1, role2
// clusterRoles: clusterRole1,clusterRole2
// subjects: subject1, subject2
// validateExclude return true if none of the above found in requestInfo
// otherwise return false immediately means rule should not be applied
func validateExclude(exclude kyverno.ExcludeResources, requestInfo kyverno.RequestInfo) bool {
if len(exclude.Roles) > 0 {
if matchRoleRefs(exclude.Roles, requestInfo.Roles) {
return false
}
}
if len(exclude.ClusterRoles) > 0 {
if matchRoleRefs(exclude.ClusterRoles, requestInfo.ClusterRoles) {
return false
}
}
if len(exclude.Subjects) > 0 {
if matchSubjects(exclude.Subjects, requestInfo.AdmissionUserInfo) {
return false
}
}
return true
}
// matchRoleRefs return true if one of ruleRoleRefs exist in resourceRoleRefs
func matchRoleRefs(ruleRoleRefs, resourceRoleRefs []string) bool {
for _, ruleRoleRef := range ruleRoleRefs {
if utils.ContainsString(resourceRoleRefs, ruleRoleRef) {
return true
}
}
return false
}
// matchSubjects return true if one of ruleSubjects exist in userInfo
func matchSubjects(ruleSubjects []rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
userGroups := append(userInfo.Groups, userInfo.Username)
for _, subject := range ruleSubjects {
switch subject.Kind {
case "ServiceAccount":
if len(userInfo.Username) <= len(SaPrefix) {
continue
}
subjectServiceAccount := subject.Namespace + ":" + subject.Name
if userInfo.Username[len(SaPrefix):] == subjectServiceAccount {
return true
}
case "User", "Group":
if utils.ContainsString(userGroups, subject.Name) {
return true
}
}
}
return false
}

View file

@ -1,305 +0,0 @@
package rbac
import (
"flag"
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"gotest.tools/assert"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
)
func Test_matchAdmissionInfo(t *testing.T) {
flag.Parse()
flag.Set("logtostderr", "true")
flag.Set("v", "3")
tests := []struct {
rule kyverno.Rule
info kyverno.RequestInfo
expected bool
}{
{
rule: kyverno.Rule{
MatchResources: kyverno.MatchResources{},
},
info: kyverno.RequestInfo{},
expected: true,
},
{
rule: kyverno.Rule{
MatchResources: kyverno.MatchResources{
UserInfo: kyverno.UserInfo{
Roles: []string{"ns-a:role-a"},
},
},
},
info: kyverno.RequestInfo{
Roles: []string{"ns-a:role-a"},
},
expected: true,
},
{
rule: kyverno.Rule{
MatchResources: kyverno.MatchResources{
UserInfo: kyverno.UserInfo{
Roles: []string{"ns-a:role-a"},
},
},
},
info: kyverno.RequestInfo{
Roles: []string{"ns-a:role"},
},
expected: false,
},
{
rule: kyverno.Rule{
MatchResources: kyverno.MatchResources{
UserInfo: kyverno.UserInfo{
Subjects: testSubjects(),
},
},
},
info: kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
Username: "serviceaccount:mynamespace:mysa",
},
},
expected: false,
},
{
rule: kyverno.Rule{
ExcludeResources: kyverno.ExcludeResources{
UserInfo: kyverno.UserInfo{
Subjects: testSubjects(),
},
},
},
info: kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
UID: "1",
},
},
expected: true,
},
{
rule: kyverno.Rule{
ExcludeResources: kyverno.ExcludeResources{
UserInfo: kyverno.UserInfo{
Subjects: testSubjects(),
},
},
},
info: kyverno.RequestInfo{
AdmissionUserInfo: authenticationv1.UserInfo{
Username: "kubernetes-admin",
Groups: []string{"system:masters", "system:authenticated"},
},
},
expected: false,
},
}
for _, test := range tests {
assert.Assert(t, test.expected == MatchAdmissionInfo(test.rule, test.info))
}
}
func Test_validateMatch(t *testing.T) {
requestInfo := []struct {
info kyverno.RequestInfo
expected bool
}{
{
info: kyverno.RequestInfo{
Roles: []string{},
},
expected: false,
},
{
info: kyverno.RequestInfo{
Roles: []string{"ns-b:role-b"},
},
expected: true,
},
{
info: kyverno.RequestInfo{
Roles: []string{"ns:role"},
},
expected: false,
},
}
matchRoles := kyverno.MatchResources{
UserInfo: kyverno.UserInfo{
Roles: []string{"ns-a:role-a", "ns-b:role-b"},
},
}
for _, info := range requestInfo {
assert.Assert(t, info.expected == validateMatch(matchRoles, info.info))
}
requestInfo = []struct {
info kyverno.RequestInfo
expected bool
}{
{
info: kyverno.RequestInfo{
ClusterRoles: []string{},
},
expected: false,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"role-b"},
},
expected: false,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"clusterrole-b"},
},
expected: true,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"clusterrole-a", "clusterrole-b"},
},
expected: true,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"fake-a", "fake-b"},
},
expected: false,
},
}
matchClusterRoles := kyverno.MatchResources{
UserInfo: kyverno.UserInfo{
ClusterRoles: []string{"clusterrole-a", "clusterrole-b"},
},
}
for _, info := range requestInfo {
assert.Assert(t, info.expected == validateMatch(matchClusterRoles, info.info))
}
}
func Test_validateExclude(t *testing.T) {
requestInfo := []struct {
info kyverno.RequestInfo
expected bool
}{
{
info: kyverno.RequestInfo{
Roles: []string{},
},
expected: true,
},
{
info: kyverno.RequestInfo{
Roles: []string{"ns-b:role-b"},
},
expected: false,
},
{
info: kyverno.RequestInfo{
Roles: []string{"ns:role"},
},
expected: true,
},
}
excludeRoles := kyverno.ExcludeResources{
UserInfo: kyverno.UserInfo{
Roles: []string{"ns-a:role-a", "ns-b:role-b"},
},
}
for _, info := range requestInfo {
assert.Assert(t, info.expected == validateExclude(excludeRoles, info.info))
}
requestInfo = []struct {
info kyverno.RequestInfo
expected bool
}{
{
info: kyverno.RequestInfo{
ClusterRoles: []string{},
},
expected: true,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"role-b"},
},
expected: true,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"clusterrole-b"},
},
expected: false,
},
{
info: kyverno.RequestInfo{
ClusterRoles: []string{"fake-a", "fake-b"},
},
expected: true,
},
}
excludeClusterRoles := kyverno.ExcludeResources{
UserInfo: kyverno.UserInfo{
ClusterRoles: []string{"clusterrole-a", "clusterrole-b"},
},
}
for _, info := range requestInfo {
assert.Assert(t, info.expected == validateExclude(excludeClusterRoles, info.info))
}
}
func Test_matchSubjects(t *testing.T) {
group := authenticationv1.UserInfo{
Username: "kubernetes-admin",
Groups: []string{"system:masters", "system:authenticated"},
}
sa := authenticationv1.UserInfo{
Username: "system:serviceaccount:mynamespace:mysa",
Groups: []string{"system:serviceaccounts", "system:serviceaccounts:mynamespace", "system:authenticated"},
}
user := authenticationv1.UserInfo{
Username: "system:kube-scheduler",
Groups: []string{"system:authenticated"},
}
subjects := testSubjects()
assert.Assert(t, matchSubjects(subjects, sa))
assert.Assert(t, !matchSubjects(subjects, user))
assert.Assert(t, matchSubjects(subjects, group))
}
func testSubjects() []rbacv1.Subject {
return []rbacv1.Subject{
{
Kind: "User",
Name: "kube-scheduler",
},
{
Kind: "Group",
Name: "system:masters",
},
{
Kind: "ServiceAccount",
Name: "mysa",
Namespace: "mynamespace",
},
}
}

View file

@ -1,15 +1,19 @@
package engine
import (
"encoding/json"
"strings"
"errors"
"fmt"
"reflect"
"time"
"github.com/nirmata/kyverno/pkg/utils"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
"github.com/golang/glog"
"github.com/minio/minio/pkg/wildcard"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/utils"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
@ -23,204 +27,172 @@ type EngineStats struct {
RulesAppliedCount int
}
//MatchesResourceDescription checks if the resource matches resource desription of the rule or not
func MatchesResourceDescription(resource unstructured.Unstructured, rule kyverno.Rule) bool {
matches := rule.MatchResources.ResourceDescription
exclude := rule.ExcludeResources.ResourceDescription
if len(matches.Kinds) > 0 {
if !findKind(matches.Kinds, resource.GetKind()) {
return false
}
}
name := resource.GetName()
namespace := resource.GetNamespace()
if matches.Name != "" {
// Matches
if !wildcard.Match(matches.Name, name) {
return false
}
}
// Matches
// check if the resource namespace is defined in the list of namespace pattern
if len(matches.Namespaces) > 0 && !utils.ContainsNamepace(matches.Namespaces, namespace) {
return false
}
// Matches
if matches.Selector != nil {
selector, err := metav1.LabelSelectorAsSelector(matches.Selector)
if err != nil {
glog.Error(err)
return false
}
if !selector.Matches(labels.Set(resource.GetLabels())) {
return false
}
}
excludeName := func(name string) Condition {
if exclude.Name == "" {
return NotEvaluate
}
if wildcard.Match(exclude.Name, name) {
return Skip
}
return Process
}
excludeNamespace := func(namespace string) Condition {
if len(exclude.Namespaces) == 0 {
return NotEvaluate
}
if utils.ContainsNamepace(exclude.Namespaces, namespace) {
return Skip
}
return Process
}
excludeSelector := func(labelsMap map[string]string) Condition {
if exclude.Selector == nil {
return NotEvaluate
}
selector, err := metav1.LabelSelectorAsSelector(exclude.Selector)
// if the label selector is incorrect, should be fail or
if err != nil {
glog.Error(err)
return Skip
}
if selector.Matches(labels.Set(labelsMap)) {
return Skip
}
return Process
}
excludeKind := func(kind string) Condition {
if len(exclude.Kinds) == 0 {
return NotEvaluate
}
if findKind(exclude.Kinds, kind) {
return Skip
}
return Process
}
// 0 -> dont check
// 1 -> is not to be exclude
// 2 -> to be exclude
excludeEval := []Condition{}
if ret := excludeName(resource.GetName()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
if ret := excludeNamespace(resource.GetNamespace()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
if ret := excludeSelector(resource.GetLabels()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
if ret := excludeKind(resource.GetKind()); ret != NotEvaluate {
excludeEval = append(excludeEval, ret)
}
// Filtered NotEvaluate
if len(excludeEval) == 0 {
// nothing to exclude
return true
}
return func() bool {
for _, ret := range excludeEval {
if ret == Process {
return true
}
}
return false
}()
}
//Condition type for conditions
type Condition int
const (
// NotEvaluate to not-evaluate to condition
NotEvaluate Condition = 0
// Process to process the condition
Process Condition = 1
// Skip to skip the condition
Skip Condition = 2
)
// ParseResourceInfoFromObject get kind/namepace/name from resource
func ParseResourceInfoFromObject(rawResource []byte) string {
kind := ParseKindFromObject(rawResource)
namespace := ParseNamespaceFromObject(rawResource)
name := ParseNameFromObject(rawResource)
return strings.Join([]string{kind, namespace, name}, "/")
}
//ParseKindFromObject get kind from resource
func ParseKindFromObject(bytes []byte) string {
var objectJSON map[string]interface{}
json.Unmarshal(bytes, &objectJSON)
return objectJSON["kind"].(string)
}
//ParseNameFromObject extracts resource name from JSON obj
func ParseNameFromObject(bytes []byte) string {
var objectJSON map[string]interface{}
json.Unmarshal(bytes, &objectJSON)
meta, ok := objectJSON["metadata"]
if !ok {
return ""
}
metaMap, ok := meta.(map[string]interface{})
if !ok {
return ""
}
if name, ok := metaMap["name"].(string); ok {
return name
}
return ""
}
// ParseNamespaceFromObject extracts the namespace from the JSON obj
func ParseNamespaceFromObject(bytes []byte) string {
var objectJSON map[string]interface{}
json.Unmarshal(bytes, &objectJSON)
meta, ok := objectJSON["metadata"]
if !ok {
return ""
}
metaMap, ok := meta.(map[string]interface{})
if !ok {
return ""
}
if name, ok := metaMap["namespace"].(string); ok {
return name
}
return ""
}
func findKind(kinds []string, kindGVK string) bool {
func checkKind(kinds []string, resourceKind string) bool {
for _, kind := range kinds {
if kind == kindGVK {
if resourceKind == kind {
return true
}
}
return false
}
func checkName(name, resourceName string) bool {
return wildcard.Match(name, resourceName)
}
func checkNameSpace(namespaces []string, resourceNameSpace string) bool {
for _, namespace := range namespaces {
if resourceNameSpace == namespace {
return true
}
}
return false
}
func checkSelector(labelSelector *metav1.LabelSelector, resourceLabels map[string]string) (bool, error) {
selector, err := metav1.LabelSelectorAsSelector(labelSelector)
if err != nil {
glog.Error(err)
return false, err
}
if selector.Matches(labels.Set(resourceLabels)) {
return true, nil
}
return false, nil
}
func doesResourceMatchConditionBlock(conditionBlock kyverno.ResourceDescription, userInfo kyverno.UserInfo, admissionInfo kyverno.RequestInfo, resource unstructured.Unstructured) []error {
var errs []error
if len(conditionBlock.Kinds) > 0 {
if !checkKind(conditionBlock.Kinds, resource.GetKind()) {
errs = append(errs, fmt.Errorf("resource kind does not match conditionBlock"))
}
}
if conditionBlock.Name != "" {
if !checkName(conditionBlock.Name, resource.GetName()) {
errs = append(errs, fmt.Errorf("resource name does not match conditionBlock"))
}
}
if len(conditionBlock.Namespaces) > 0 {
if !checkNameSpace(conditionBlock.Namespaces, resource.GetNamespace()) {
errs = append(errs, fmt.Errorf("resource namespace does not match conditionBlock"))
}
}
if conditionBlock.Selector != nil {
hasPassed, err := checkSelector(conditionBlock.Selector, resource.GetLabels())
if err != nil {
errs = append(errs, fmt.Errorf("could not parse selector block of the policy in conditionBlock: %v", err))
} else {
if !hasPassed {
errs = append(errs, fmt.Errorf("resource does not match selector of given conditionBlock"))
}
}
}
if len(userInfo.Roles) > 0 {
if !doesSliceContainsAnyOfTheseValues(userInfo.Roles, admissionInfo.Roles...) {
errs = append(errs, fmt.Errorf("user info does not match roles for the given conditionBlock"))
}
}
if len(userInfo.ClusterRoles) > 0 {
if !doesSliceContainsAnyOfTheseValues(userInfo.ClusterRoles, admissionInfo.ClusterRoles...) {
errs = append(errs, fmt.Errorf("user info does not match clustersRoles for the given conditionBlock"))
}
}
if len(userInfo.Subjects) > 0 {
if !matchSubjects(userInfo.Subjects, admissionInfo.AdmissionUserInfo) {
errs = append(errs, fmt.Errorf("user info does not match subject for the given conditionBlock"))
}
}
return errs
}
// matchSubjects return true if one of ruleSubjects exist in userInfo
func matchSubjects(ruleSubjects []rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
const SaPrefix = "system:serviceaccount:"
userGroups := append(userInfo.Groups, userInfo.Username)
for _, subject := range ruleSubjects {
switch subject.Kind {
case "ServiceAccount":
if len(userInfo.Username) <= len(SaPrefix) {
continue
}
subjectServiceAccount := subject.Namespace + ":" + subject.Name
if userInfo.Username[len(SaPrefix):] == subjectServiceAccount {
return true
}
case "User", "Group":
if utils.ContainsString(userGroups, subject.Name) {
return true
}
}
}
return false
}
func doesSliceContainsAnyOfTheseValues(slice []string, values ...string) bool {
var sliceElementsMap = make(map[string]bool, len(slice))
for _, sliceElement := range slice {
sliceElementsMap[sliceElement] = true
}
for _, value := range values {
if sliceElementsMap[value] {
return true
}
}
return false
}
//MatchesResourceDescription checks if the resource matches resource description of the rule or not
func MatchesResourceDescription(resourceRef unstructured.Unstructured, ruleRef kyverno.Rule, admissionInfoRef kyverno.RequestInfo) error {
rule := *ruleRef.DeepCopy()
resource := *resourceRef.DeepCopy()
admissionInfo := *admissionInfoRef.DeepCopy()
var reasonsForFailure []error
if reflect.DeepEqual(admissionInfo, kyverno.RequestInfo{}) {
rule.MatchResources.UserInfo = kyverno.UserInfo{}
}
// checking if resource matches the rule
if !reflect.DeepEqual(rule.MatchResources.ResourceDescription, kyverno.ResourceDescription{}) {
matchErrs := doesResourceMatchConditionBlock(rule.MatchResources.ResourceDescription, rule.MatchResources.UserInfo, admissionInfo, resource)
reasonsForFailure = append(reasonsForFailure, matchErrs...)
} else {
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("match block in rule cannot be empty"))
}
// checking if resource has been excluded
if !reflect.DeepEqual(rule.ExcludeResources.ResourceDescription, kyverno.ResourceDescription{}) {
excludeErrs := doesResourceMatchConditionBlock(rule.ExcludeResources.ResourceDescription, rule.ExcludeResources.UserInfo, admissionInfo, resource)
if excludeErrs == nil {
reasonsForFailure = append(reasonsForFailure, fmt.Errorf("resource has been excluded since it matches the exclude block"))
}
}
// creating final error
var errorMessage = "rule has failed to match resource for the following reasons:"
for i, reasonForFailure := range reasonsForFailure {
if reasonForFailure != nil {
errorMessage += "\n" + fmt.Sprint(i+1) + ". " + reasonForFailure.Error()
}
}
if len(reasonsForFailure) > 0 {
return errors.New(errorMessage)
}
return nil
}
func copyConditions(original []kyverno.Condition) []kyverno.Condition {
var copy []kyverno.Condition
for _, condition := range original {

View file

@ -1,14 +1,89 @@
package engine
import (
"encoding/json"
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/utils"
"gotest.tools/assert"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func TestMatchesResourceDescription(t *testing.T) {
tcs := []struct {
Description string
AdmissionInfo kyverno.RequestInfo
Resource []byte
Policy []byte
areErrorsExpected bool
}{
{
Description: "Should match pod and not exclude it",
AdmissionInfo: kyverno.RequestInfo{
ClusterRoles: []string{"admin"},
},
Resource: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"hello-world","labels":{"name":"hello-world"}},"spec":{"containers":[{"name":"hello-world","image":"hello-world","ports":[{"containerPort":81}],"resources":{"limits":{"memory":"30Mi","cpu":"0.2"},"requests":{"memory":"20Mi","cpu":"0.1"}}}]}}`),
Policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"hello-world-policy"},"spec":{"background":false,"rules":[{"name":"hello-world-policy","match":{"resources":{"kinds":["Pod"]}},"exclude":{"resources":{"name":"hello-world"},"clusterRoles":["system:node"]},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":"IfNotPresent"}]}}}}]}}`),
areErrorsExpected: false,
},
{
Description: "Should exclude resource since it matches the exclude block",
AdmissionInfo: kyverno.RequestInfo{
ClusterRoles: []string{"system:node"},
},
Resource: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"hello-world","labels":{"name":"hello-world"}},"spec":{"containers":[{"name":"hello-world","image":"hello-world","ports":[{"containerPort":81}],"resources":{"limits":{"memory":"30Mi","cpu":"0.2"},"requests":{"memory":"20Mi","cpu":"0.1"}}}]}}`),
Policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"hello-world-policy"},"spec":{"background":false,"rules":[{"name":"hello-world-policy","match":{"resources":{"kinds":["Pod"]}},"exclude":{"resources":{"name":"hello-world"},"clusterRoles":["system:node"]},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":"IfNotPresent"}]}}}}]}}`),
areErrorsExpected: true,
},
{
Description: "Should not fail if in sync mode, if admission info is empty it should still match resources with specific clusterRoles",
Resource: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"hello-world","labels":{"name":"hello-world"}},"spec":{"containers":[{"name":"hello-world","image":"hello-world","ports":[{"containerPort":81}],"resources":{"limits":{"memory":"30Mi","cpu":"0.2"},"requests":{"memory":"20Mi","cpu":"0.1"}}}]}}`),
Policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"hello-world-policy"},"spec":{"background":false,"rules":[{"name":"hello-world-policy","match":{"resources":{"kinds":["Pod"]}},"exclude":{"resources":{"name":"hello-world"},"clusterRoles":["system:node"]},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":"IfNotPresent"}]}}}}]}}`),
areErrorsExpected: false,
},
{
Description: "Should fail since resource does not match policy",
AdmissionInfo: kyverno.RequestInfo{
ClusterRoles: []string{"admin"},
},
Resource: []byte(`{"apiVersion":"v1","kind":"Service","metadata":{"name":"hello-world","labels":{"name":"hello-world"}},"spec":{"containers":[{"name":"hello-world","image":"hello-world","ports":[{"containerPort":81}],"resources":{"limits":{"memory":"30Mi","cpu":"0.2"},"requests":{"memory":"20Mi","cpu":"0.1"}}}]}}`),
Policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"hello-world-policy"},"spec":{"background":false,"rules":[{"name":"hello-world-policy","match":{"resources":{"kinds":["Pod"]}},"exclude":{"resources":{"name":"hello-world"},"clusterRoles":["system:node"]},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":"IfNotPresent"}]}}}}]}}`),
areErrorsExpected: true,
},
{
Description: "Should not fail since resource does not match exclude block",
AdmissionInfo: kyverno.RequestInfo{
ClusterRoles: []string{"system:node"},
},
Resource: []byte(`{"apiVersion":"v1","kind":"Pod","metadata":{"name":"hello-world2","labels":{"name":"hello-world"}},"spec":{"containers":[{"name":"hello-world","image":"hello-world","ports":[{"containerPort":81}],"resources":{"limits":{"memory":"30Mi","cpu":"0.2"},"requests":{"memory":"20Mi","cpu":"0.1"}}}]}}`),
Policy: []byte(`{"apiVersion":"kyverno.io/v1","kind":"ClusterPolicy","metadata":{"name":"hello-world-policy"},"spec":{"background":false,"rules":[{"name":"hello-world-policy","match":{"resources":{"kinds":["Pod"]}},"exclude":{"resources":{"name":"hello-world"},"clusterRoles":["system:node"]},"mutate":{"overlay":{"spec":{"containers":[{"(image)":"*","imagePullPolicy":"IfNotPresent"}]}}}}]}}`),
areErrorsExpected: false,
},
}
for i, tc := range tcs {
var policy kyverno.Policy
err := json.Unmarshal(tc.Policy, &policy)
if err != nil {
t.Errorf("Testcase %d invalid policy raw", i+1)
}
resource, _ := utils.ConvertToUnstructured(tc.Resource)
for _, rule := range policy.Spec.Rules {
err := MatchesResourceDescription(*resource, rule, tc.AdmissionInfo)
if err != nil {
if !tc.areErrorsExpected {
t.Errorf("Testcase %d Unexpected error: %v", i+1, err)
}
} else {
if tc.areErrorsExpected {
t.Errorf("Testcase %d Expected Error but recieved no error", i+1)
}
}
}
}
}
// Match multiple kinds
func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
rawResource := []byte(`{
@ -63,7 +138,10 @@ func TestResourceDescriptionMatch_MultipleKind(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
// Match resource name
@ -121,7 +199,9 @@ func TestResourceDescriptionMatch_Name(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
// Match resource regex
@ -179,7 +259,9 @@ func TestResourceDescriptionMatch_Name_Regex(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
// Match expressions for labels to not match
@ -245,7 +327,9 @@ func TestResourceDescriptionMatch_Label_Expression_NotMatch(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
// Match label expression in matching set
@ -312,7 +396,9 @@ func TestResourceDescriptionMatch_Label_Expression_Match(t *testing.T) {
}
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription}}
assert.Assert(t, MatchesResourceDescription(*resource, rule))
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err != nil {
t.Errorf("Testcase has failed due to the following:%v", err)
}
}
// check for exclude conditions
@ -390,5 +476,7 @@ func TestResourceDescriptionExclude_Label_Expression_Match(t *testing.T) {
rule := kyverno.Rule{MatchResources: kyverno.MatchResources{ResourceDescription: resourceDescription},
ExcludeResources: kyverno.ExcludeResources{ResourceDescription: resourceDescriptionExclude}}
assert.Assert(t, !MatchesResourceDescription(*resource, rule))
if err := MatchesResourceDescription(*resource, rule, kyverno.RequestInfo{}); err == nil {
t.Errorf("Testcase has failed due to the following:\n Function has returned no error, even though it was suposed to fail")
}
}

View file

@ -8,7 +8,6 @@ import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/rbac"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/engine/validate"
@ -92,20 +91,13 @@ func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, r
continue
}
startTime := time.Now()
if !rbac.MatchAdmissionInfo(rule, admissionInfo) {
glog.V(3).Infof("rule '%s' cannot be applied on %s/%s/%s, admission permission: %v",
rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), admissionInfo)
continue
}
glog.V(4).Infof("Time: Validate matchAdmissionInfo %v", time.Since(startTime))
// check if the resource satisfies the filter conditions defined in the rule
// TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
// dont statisfy a policy rule resource description
ok := MatchesResourceDescription(resource, rule)
if !ok {
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule ", resource.GetNamespace(), resource.GetName())
if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil {
glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule:\n%s", resource.GetNamespace(), resource.GetName(), err.Error())
continue
}

View file

@ -16,7 +16,6 @@ const variableRegex = `\{\{([^{}]*)\}\}`
//SubstituteVars replaces the variables with the values defined in the context
// - if any variable is invaid or has nil value, it is considered as a failed varable substitution
func SubstituteVars(ctx context.EvalInterface, pattern interface{}) (interface{}, error) {
println(&pattern)
errs := []error{}
pattern = subVars(ctx, pattern, "", &errs)
if len(errs) == 0 {

View file

@ -1,250 +0,0 @@
package namespace
import (
"time"
"k8s.io/apimachinery/pkg/util/wait"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
"k8s.io/apimachinery/pkg/api/errors"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
v1 "k8s.io/api/core/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
v1Informer "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const (
// maxRetries is the number of times a Namespace will be processed for a policy before its dropped from the queue
maxRetries = 15
)
//NamespaceController watches the 'Namespace' resource creation/update and applied the generation rules on them
type NamespaceController struct {
client *client.Client
kyvernoClient *kyvernoclient.Clientset
syncHandler func(nsKey string) error
enqueueNs func(ns *v1.Namespace)
//nsLister provides expansion to the namespace lister to inject GVK for the resource
nsLister NamespaceListerExpansion
// nsSynced returns true if the Namespace store has been synced at least once
nsSynced cache.InformerSynced
// pvLister can list/get policy violation from the shared informer's store
pLister kyvernolister.ClusterPolicyLister
// pSynced retrns true if the Policy store has been synced at least once
pSynced cache.InformerSynced
// API to send policy stats for aggregation
policyStatus policy.PolicyStatusInterface
// eventGen provides interface to generate evenets
eventGen event.Interface
// Namespaces that need to be synced
queue workqueue.RateLimitingInterface
// Resource manager, manages the mapping for already processed resource
rm resourceManager
// helpers to validate against current loaded configuration
configHandler config.Interface
// store to hold policy meta data for faster lookup
pMetaStore policystore.LookupInterface
// policy violation generator
pvGenerator policyviolation.GeneratorInterface
}
//NewNamespaceController returns a new Controller to manage generation rules
func NewNamespaceController(kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
nsInformer v1Informer.NamespaceInformer,
pInformer kyvernoinformer.ClusterPolicyInformer,
policyStatus policy.PolicyStatusInterface,
eventGen event.Interface,
configHandler config.Interface,
pvGenerator policyviolation.GeneratorInterface,
pMetaStore policystore.LookupInterface) *NamespaceController {
//TODO: do we need to event recorder for this controller?
// create the controller
nsc := &NamespaceController{
client: client,
kyvernoClient: kyvernoClient,
eventGen: eventGen,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), "namespace"),
configHandler: configHandler,
pMetaStore: pMetaStore,
pvGenerator: pvGenerator,
}
nsInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addNamespace,
UpdateFunc: nsc.updateNamespace,
DeleteFunc: nsc.deleteNamespace,
})
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: nsc.addPolicy,
UpdateFunc: nsc.updatePolicy,
})
nsc.enqueueNs = nsc.enqueue
nsc.syncHandler = nsc.syncNamespace
nsc.nsLister = NewNamespaceLister(nsInformer.Lister())
nsc.nsSynced = nsInformer.Informer().HasSynced
nsc.pLister = pInformer.Lister()
nsc.pSynced = pInformer.Informer().HasSynced
nsc.policyStatus = policyStatus
// resource manager
// rebuild after 300 seconds/ 5 mins
nsc.rm = NewResourceManager(300)
return nsc
}
func (nsc *NamespaceController) addPolicy(obj interface{}) {
p := obj.(*kyverno.ClusterPolicy)
// check if the policy has generate rule
if generateRuleExists(p) {
// process policy
nsc.processPolicy(p)
}
}
func (nsc *NamespaceController) updatePolicy(old, cur interface{}) {
curP := cur.(*kyverno.ClusterPolicy)
// check if the policy has generate rule
if generateRuleExists(curP) {
// process policy
nsc.processPolicy(curP)
}
}
func (nsc *NamespaceController) addNamespace(obj interface{}) {
ns := obj.(*v1.Namespace)
glog.V(4).Infof("Adding Namespace %s", ns.Name)
nsc.enqueueNs(ns)
}
func (nsc *NamespaceController) updateNamespace(old, cur interface{}) {
oldNs := old.(*v1.Namespace)
curNs := cur.(*v1.Namespace)
if curNs.ResourceVersion == oldNs.ResourceVersion {
// Periodic resync will send update events for all known Namespace.
// Two different versions of the same replica set will always have different RVs.
return
}
glog.V(4).Infof("Updating Namesapce %s", curNs.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) deleteNamespace(obj interface{}) {
ns, _ := obj.(*v1.Namespace)
glog.V(4).Infof("Deleting Namespace %s", ns.Name)
//TODO: anything to be done here?
}
func (nsc *NamespaceController) enqueue(ns *v1.Namespace) {
key, err := cache.MetaNamespaceKeyFunc(ns)
if err != nil {
glog.Error(err)
return
}
nsc.queue.Add(key)
}
//Run to run the controller
func (nsc *NamespaceController) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer nsc.queue.ShutDown()
glog.Info("Starting namespace controller")
defer glog.Info("Shutting down namespace controller")
if ok := cache.WaitForCacheSync(stopCh, nsc.nsSynced, nsc.pSynced); !ok {
glog.Error("namespace generator: failed to sync cache")
return
}
for i := 0; i < workers; i++ {
go wait.Until(nsc.worker, time.Second, stopCh)
}
<-stopCh
}
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (nsc *NamespaceController) worker() {
for nsc.processNextWorkItem() {
}
}
func (nsc *NamespaceController) processNextWorkItem() bool {
key, quit := nsc.queue.Get()
if quit {
return false
}
defer nsc.queue.Done(key)
err := nsc.syncHandler(key.(string))
nsc.handleErr(err, key)
return true
}
func (nsc *NamespaceController) handleErr(err error, key interface{}) {
if err == nil {
nsc.queue.Forget(key)
return
}
if nsc.queue.NumRequeues(key) < maxRetries {
glog.V(2).Infof("Error syncing namespace %v: %v", key, err)
nsc.queue.AddRateLimited(key)
return
}
utilruntime.HandleError(err)
glog.V(2).Infof("Dropping namespace %q out of the queue: %v", key, err)
nsc.queue.Forget(key)
}
func (nsc *NamespaceController) syncNamespace(key string) error {
startTime := time.Now()
glog.V(4).Infof("Started syncing namespace %q (%v)", key, startTime)
defer func() {
glog.V(4).Infof("Finished syncing namespace %q (%v)", key, time.Since(startTime))
}()
namespace, err := nsc.nsLister.GetResource(key)
if errors.IsNotFound(err) {
glog.V(2).Infof("namespace %v has been deleted", key)
return nil
}
if err != nil {
return err
}
// Deep-copy otherwise we are mutating our cache.
// TODO: Deep-copy only when needed.
n := namespace.DeepCopy()
// skip processing namespace if its been filtered
// exclude the filtered resources
if nsc.configHandler.ToFilter("", namespace.Name, "") {
//TODO: improve the text
glog.V(4).Infof("excluding namespace %s as its a filtered resource", namespace.Name)
return nil
}
// process generate rules
engineResponses := nsc.processNamespace(*n)
// report errors
nsc.report(engineResponses)
return nil
}

View file

@ -1,50 +0,0 @@
package namespace
import (
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/labels"
v1CoreLister "k8s.io/client-go/listers/core/v1"
)
//NamespaceListerExpansion ...
type NamespaceListerExpansion interface {
v1CoreLister.NamespaceLister
// List lists all Namespaces in the indexer.
ListResources(selector labels.Selector) (ret []*v1.Namespace, err error)
// GetsResource and injects gvk
GetResource(name string) (*v1.Namespace, error)
}
//NamespaceLister ...
type NamespaceLister struct {
v1CoreLister.NamespaceLister
}
//NewNamespaceLister returns a new NamespaceLister
func NewNamespaceLister(nsLister v1CoreLister.NamespaceLister) NamespaceListerExpansion {
nsl := NamespaceLister{
nsLister,
}
return &nsl
}
//ListResources is a wrapper to List and adds the resource kind information
// as the lister is specific to a gvk we can harcode the values here
func (nsl *NamespaceLister) ListResources(selector labels.Selector) (ret []*v1.Namespace, err error) {
namespaces, err := nsl.List(selector)
for index := range namespaces {
namespaces[index].SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Namespace"))
}
return namespaces, err
}
//GetResource is a wrapper to get the resource and inject the GVK
func (nsl *NamespaceLister) GetResource(name string) (*v1.Namespace, error) {
namespace, err := nsl.Get(name)
if err != nil {
return nil, err
}
namespace.SetGroupVersionKind(v1.SchemeGroupVersion.WithKind("Namespace"))
return namespace, err
}

View file

@ -1,252 +0,0 @@
package namespace
import (
"sync"
"time"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
policyctr "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/utils"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
)
type resourceManager interface {
ProcessResource(policy, pv, kind, ns, name, rv string) bool
//TODO removeResource(kind, ns, name string) error
RegisterResource(policy, pv, kind, ns, name, rv string)
// reload
Drop()
}
// ResourceManager stores the details on already processed resources for caching
type ResourceManager struct {
// we drop and re-build the cache
// based on the memory consumer of by the map
data map[string]interface{}
mux sync.RWMutex
time time.Time
rebuildTime int64 // after how many seconds should we rebuild the cache
}
//NewResourceManager returns a new ResourceManager
func NewResourceManager(rebuildTime int64) *ResourceManager {
rm := ResourceManager{
data: make(map[string]interface{}),
time: time.Now(),
rebuildTime: rebuildTime,
}
// set time it was built
return &rm
}
var empty struct{}
//RegisterResource stores if the policy is processed on this resource version
func (rm *ResourceManager) RegisterResource(policy, pv, kind, ns, name, rv string) {
rm.mux.Lock()
defer rm.mux.Unlock()
// add the resource
key := buildKey(policy, pv, kind, ns, name, rv)
rm.data[key] = empty
}
//ProcessResource returns true if the policy was not applied on the resource
func (rm *ResourceManager) ProcessResource(policy, pv, kind, ns, name, rv string) bool {
rm.mux.RLock()
defer rm.mux.RUnlock()
key := buildKey(policy, pv, kind, ns, name, rv)
_, ok := rm.data[key]
return !ok
}
//Drop drop the cache after every rebuild interval mins
//TODO: or drop based on the size
func (rm *ResourceManager) Drop() {
timeSince := time.Since(rm.time)
glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince)
glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second)
if timeSince > time.Duration(rm.rebuildTime)*time.Second {
rm.mux.Lock()
defer rm.mux.Unlock()
rm.data = map[string]interface{}{}
rm.time = time.Now()
glog.V(4).Infof("dropping cache at time %v", rm.time)
}
}
func buildKey(policy, pv, kind, ns, name, rv string) string {
return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv
}
func (nsc *NamespaceController) processNamespace(namespace corev1.Namespace) []response.EngineResponse {
// convert to unstructured
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&namespace)
if err != nil {
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
return nil
}
nsc.rm.Drop()
ns := unstructured.Unstructured{Object: unstr}
// get all the policies that have a generate rule and resource description satisfies the namespace
// apply policy on resource
policies := listpolicies(ns, nsc.pMetaStore)
var engineResponses []response.EngineResponse
for _, policy := range policies {
// pre-processing, check if the policy and resource version has been processed before
if !nsc.rm.ProcessResource(policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion()) {
glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
continue
}
engineResponse := applyPolicy(nsc.client, ns, policy, nsc.policyStatus)
engineResponses = append(engineResponses, engineResponse)
// post-processing, register the resource as processed
nsc.rm.RegisterResource(policy.GetName(), policy.GetResourceVersion(), ns.GetKind(), ns.GetNamespace(), ns.GetName(), ns.GetResourceVersion())
}
return engineResponses
}
func generateRuleExists(policy *kyverno.ClusterPolicy) bool {
for _, rule := range policy.Spec.Rules {
if rule.Generation != (kyverno.Generation{}) {
return true
}
}
return false
}
func (nsc *NamespaceController) processPolicy(policy *kyverno.ClusterPolicy) {
filteredNamespaces := []*corev1.Namespace{}
// get namespaces that policy applies on
namespaces, err := nsc.nsLister.ListResources(labels.NewSelector())
if err != nil {
glog.Errorf("failed to get list namespaces: %v", err)
return
}
for _, namespace := range namespaces {
// convert to unstructured
unstr, err := runtime.DefaultUnstructuredConverter.ToUnstructured(namespace)
if err != nil {
glog.Infof("unable to convert to unstructured, not processing any policies: %v", err)
continue
}
ns := unstructured.Unstructured{Object: unstr}
for _, rule := range policy.Spec.Rules {
if rule.Generation == (kyverno.Generation{}) {
continue
}
ok := engine.MatchesResourceDescription(ns, rule)
if !ok {
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
continue
}
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
filteredNamespaces = append(filteredNamespaces, namespace)
}
}
// list of namespaces that the policy applies on
for _, ns := range filteredNamespaces {
glog.V(4).Infof("policy %s with generate rule: namespace %s to be processed ", policy.Name, ns.Name)
nsc.addNamespace(ns)
}
}
func listpolicies(ns unstructured.Unstructured, pMetaStore policystore.LookupInterface) []kyverno.ClusterPolicy {
var filteredpolicies []kyverno.ClusterPolicy
glog.V(4).Infof("listing policies for namespace %s", ns.GetName())
policies, err := pMetaStore.ListAll()
if err != nil {
glog.Errorf("failed to get list policies: %v", err)
return nil
}
for _, policy := range policies {
for _, rule := range policy.Spec.Rules {
if rule.Generation == (kyverno.Generation{}) {
continue
}
ok := engine.MatchesResourceDescription(ns, rule)
if !ok {
glog.V(4).Infof("namespace %s does not satisfy the resource description for the policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
continue
}
glog.V(4).Infof("namespace %s satisfies resource description for policy %s rule %s", ns.GetName(), policy.Name, rule.Name)
filteredpolicies = append(filteredpolicies, policy)
}
}
return filteredpolicies
}
func applyPolicy(client *client.Client, resource unstructured.Unstructured, p kyverno.ClusterPolicy, policyStatus policyctr.PolicyStatusInterface) response.EngineResponse {
var policyStats []policyctr.PolicyStat
// gather stats from the engine response
gatherStat := func(policyName string, policyResponse response.PolicyResponse) {
ps := policyctr.PolicyStat{}
ps.PolicyName = policyName
ps.Stats.GenerationExecutionTime = policyResponse.ProcessingTime
ps.Stats.RulesAppliedCount = policyResponse.RulesAppliedCount
// capture rule level stats
for _, rule := range policyResponse.Rules {
rs := policyctr.RuleStatinfo{}
rs.RuleName = rule.Name
rs.ExecutionTime = rule.RuleStats.ProcessingTime
if rule.Success {
rs.RuleAppliedCount++
} else {
rs.RulesFailedCount++
}
ps.Stats.Rules = append(ps.Stats.Rules, rs)
}
policyStats = append(policyStats, ps)
}
// send stats for aggregation
sendStat := func(blocked bool) {
for _, stat := range policyStats {
stat.Stats.ResourceBlocked = utils.Btoi(blocked)
//SEND
policyStatus.SendStat(stat)
}
}
startTime := time.Now()
glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime)
defer func() {
glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", p.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime))
}()
// build context
ctx := context.NewContext()
ctx.AddResource(transformResource(resource))
policyContext := engine.PolicyContext{
NewResource: resource,
Policy: p,
Client: client,
Context: ctx,
}
engineResponse := engine.Generate(policyContext)
// gather stats
gatherStat(p.Name, engineResponse.PolicyResponse)
//send stats
sendStat(false)
return engineResponse
}
func transformResource(resource unstructured.Unstructured) []byte {
data, err := resource.MarshalJSON()
if err != nil {
glog.Errorf("failed to marshall resource %v: %v", resource, err)
return nil
}
return data
}

View file

@ -1,63 +0,0 @@
package namespace
import (
"fmt"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
)
func (nsc *NamespaceController) report(engineResponses []response.EngineResponse) {
// generate events
eventInfos := generateEvents(engineResponses)
nsc.eventGen.Add(eventInfos...)
// generate policy violations
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses)
nsc.pvGenerator.Add(pvInfos...)
}
func generateEvents(ers []response.EngineResponse) []event.Info {
var eventInfos []event.Info
for _, er := range ers {
if er.IsSuccesful() {
continue
}
eventInfos = append(eventInfos, generateEventsPerEr(er)...)
}
return eventInfos
}
func generateEventsPerEr(er response.EngineResponse) []event.Info {
var eventInfos []event.Info
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
// generate event on resource for each failed rule
glog.V(4).Infof("generation event on resource '%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = er.PolicyResponse.Resource.Kind
e.Namespace = "" // event generate on namespace resource
e.Name = er.PolicyResponse.Resource.Name
e.Reason = "Failure"
e.Source = event.GeneratePolicyController
e.Message = fmt.Sprintf("policy '%s' (%s) rule '%s' not satisfied. %v", er.PolicyResponse.Policy, rule.Type, rule.Name, rule.Message)
eventInfos = append(eventInfos, e)
}
if er.IsSuccesful() {
return eventInfos
}
// generate a event on policy for all failed rules
glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
e := event.Info{}
e.Kind = "ClusterPolicy"
e.Namespace = ""
e.Name = er.PolicyResponse.Policy
e.Reason = "Failure"
e.Source = event.GeneratePolicyController
e.Message = fmt.Sprintf("policy '%s' rules '%v' on resource '%s/%s/%s' not stasified", er.PolicyResponse.Policy, er.GetFailedRules(), er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
return eventInfos
}

View file

@ -238,23 +238,23 @@ func generateEmptyResource(kindSchema *openapi_v2.Schema) interface{} {
case "integer":
if kindSchema.GetDefault() != nil {
val, _ := strconv.Atoi(string(kindSchema.GetDefault().Value.Value))
return val
return int64(val)
}
if kindSchema.GetExample() != nil {
val, _ := strconv.Atoi(string(kindSchema.GetExample().GetValue().Value))
return val
return int64(val)
}
return 0
return int64(0)
case "number":
if kindSchema.GetDefault() != nil {
val, _ := strconv.Atoi(string(kindSchema.GetDefault().Value.Value))
return val
return int64(val)
}
if kindSchema.GetExample() != nil {
val, _ := strconv.Atoi(string(kindSchema.GetExample().GetValue().Value))
return val
return int64(val)
}
return 0
return int64(0)
case "boolean":
if kindSchema.GetDefault() != nil {
return string(kindSchema.GetDefault().Value.Value) == "true"

View file

@ -5,7 +5,6 @@ import (
"strings"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine/rbac"
v1beta1 "k8s.io/api/admission/v1beta1"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
@ -16,6 +15,7 @@ import (
const (
clusterrolekind = "ClusterRole"
rolekind = "Role"
SaPrefix = "system:serviceaccount:"
)
//GetRoleRef gets the list of roles and cluster roles for the incoming api-request
@ -88,27 +88,20 @@ func getRoleRefByClusterRoleBindings(clusterroleBindings []*rbacv1.ClusterRoleBi
// subject.kind can only be ServiceAccount, User and Group
func matchSubjectsMap(subject rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
// ServiceAccount
if isServiceaccountUserInfo(userInfo.Username) {
if strings.Contains(userInfo.Username, SaPrefix) {
return matchServiceAccount(subject, userInfo)
} else {
// User or Group
return matchUserOrGroup(subject, userInfo)
}
// User or Group
return matchUserOrGroup(subject, userInfo)
}
func isServiceaccountUserInfo(username string) bool {
if strings.Contains(username, rbac.SaPrefix) {
return true
}
return false
}
// matchServiceAccount checks if userInfo sa matche the subject sa
// serviceaccount represents as saPrefix:namespace:name in userInfo
func matchServiceAccount(subject rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
subjectServiceAccount := subject.Namespace + ":" + subject.Name
if userInfo.Username[len(rbac.SaPrefix):] != subjectServiceAccount {
glog.V(3).Infof("service account not match, expect %s, got %s", subjectServiceAccount, userInfo.Username[len(rbac.SaPrefix):])
if userInfo.Username[len(SaPrefix):] != subjectServiceAccount {
glog.V(3).Infof("service account not match, expect %s, got %s", subjectServiceAccount, userInfo.Username[len(SaPrefix):])
return false
}

View file

@ -11,27 +11,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func Test_isServiceaccountUserInfo(t *testing.T) {
tests := []struct {
username string
expected bool
}{
{
username: "system:serviceaccount:default:saconfig",
expected: true,
},
{
username: "serviceaccount:default:saconfig",
expected: false,
},
}
for _, test := range tests {
res := isServiceaccountUserInfo(test.username)
assert.Assert(t, test.expected == res)
}
}
func Test_matchServiceAccount_subject_variants(t *testing.T) {
userInfo := authenticationv1.UserInfo{
Username: "system:serviceaccount:default:saconfig",