1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

cleanup pv with dependant when blocked admission request pass

This commit is contained in:
Shuting Zhao 2019-10-23 23:18:58 -07:00
parent 1db901cca6
commit 6e69c8b69b
13 changed files with 238 additions and 30 deletions

View file

@ -1,5 +1,9 @@
required = ["k8s.io/code-generator/cmd/client-gen"]
[[override]]
name = "k8s.io/kubernetes"
branch = "release-1.14"
[[constraint]]
name = "k8s.io/code-generator"
version = "kubernetes-1.14.1"
@ -12,14 +16,14 @@ required = ["k8s.io/code-generator/cmd/client-gen"]
name = "k8s.io/api"
version = "kubernetes-1.14.1"
[[constraint]]
name = "k8s.io/client-go"
version = "kubernetes-1.14.0"
[[constraint]]
name = "github.com/minio/minio"
branch = "master"
[[override]]
name = "k8s.io/client-go"
version = "kubernetes-1.14.2"
[[override]]
name = "github.com/gotestyourself/gotest.tools"
branch = "master"
@ -35,3 +39,11 @@ required = ["k8s.io/code-generator/cmd/client-gen"]
[[constraint]]
name = "gopkg.in/yaml.v2"
version = "2.2.2"
[[override]]
name = "k8s.io/apiserver"
branch = "release-1.14"
[[override]]
name = "k8s.io/apiextensions-apiserver"
branch = "release-1.14"

View file

@ -230,6 +230,17 @@ spec:
type: string
message:
type: string
dependant:
type: object
required:
- kind
properties:
kind:
type: string
namespace:
type: string
creationBlocked:
type: bool
---
kind: Namespace
apiVersion: v1

View file

@ -173,9 +173,16 @@ type ResourceSpec struct {
// ViolatedRule stores the information regarding the rule
type ViolatedRule struct {
Name string `json:"name"`
Type string `json:"type"`
Message string `json:"message"`
Name string `json:"name"`
Type string `json:"type"`
Message string `json:"message"`
Dependant `json:"dependant,omitempty"`
}
type Dependant struct {
Kind string `json:"kind,omitempty"`
Namespace string `json:"namespace,omitempty"`
CreationBlocked bool `json:"creationBlocked,omitempty"`
}
//PolicyViolationStatus provides information regarding policyviolation status

View file

@ -163,6 +163,22 @@ func (in *ClusterPolicyViolationList) DeepCopyObject() runtime.Object {
return nil
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Dependant) DeepCopyInto(out *Dependant) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Dependant.
func (in *Dependant) DeepCopy() *Dependant {
if in == nil {
return nil
}
out := new(Dependant)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ExcludeResources) DeepCopyInto(out *ExcludeResources) {
*out = *in
@ -437,6 +453,7 @@ func (in *Validation) DeepCopy() *Validation {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ViolatedRule) DeepCopyInto(out *ViolatedRule) {
*out = *in
out.Dependant = in.Dependant
return
}

View file

@ -140,7 +140,7 @@ func (c *Client) ReadRootCASecret() (result []byte) {
glog.Warningf("root CA certificate not found in secret %s/%s", certProps.Namespace, tlsca.Name)
return result
}
glog.Infof("using CA bundle defined in secret %s/%s to validate the webhook's server certificate", certProps.Namespace, tlsca.Name)
glog.V(4).Infof("using CA bundle defined in secret %s/%s to validate the webhook's server certificate", certProps.Namespace, tlsca.Name)
return result
}

View file

@ -21,6 +21,7 @@ import (
"k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/kubernetes"
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
csrtype "k8s.io/client-go/kubernetes/typed/certificates/v1beta1"
event "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/rest"
@ -71,6 +72,10 @@ func (c *Client) GetKubePolicyDeployment() (*apps.Deployment, error) {
return &deploy, nil
}
func (c *Client) GetAppsV1Interface() appsv1.AppsV1Interface {
return c.kclient.AppsV1()
}
//GetEventsInterface provides typed interface for events
//TODO: can we use dynamic client to fetch the typed interface
// or generate a kube client value to access the interface

View file

@ -37,7 +37,7 @@ func ValidateValueWithPattern(value, pattern interface{}) bool {
case bool:
typedValue, ok := value.(bool)
if !ok {
glog.Warningf("Expected bool, found %T", value)
glog.V(4).Infof("Expected bool, found %T", value)
return false
}
return typedPattern == typedValue

View file

@ -198,7 +198,6 @@ func (pc *PolicyController) addPolicyViolation(obj interface{}) {
// them to see if anyone wants to adopt it.
ps := pc.getPolicyForPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
// there is no cluster policy for this violation, so we can delete this cluster policy violation
glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", pv.Name)
if err := pc.pvControl.DeletePolicyViolation(pv.Name); err != nil {

View file

@ -3,6 +3,7 @@ package policyviolation
import (
"fmt"
"reflect"
"strings"
"time"
"github.com/golang/glog"
@ -220,9 +221,53 @@ func (pvc *PolicyViolationController) syncPolicyViolation(key string) error {
return err
}
if err := pvc.syncBlockedResource(pv); err != nil {
glog.V(4).Infof("not syncing policy violation status")
return err
}
return pvc.syncStatusOnly(pv)
}
// syncBlockedResource remove inactive policy violation
// when rejected resource created in the cluster
func (pvc *PolicyViolationController) syncBlockedResource(curPv *kyverno.ClusterPolicyViolation) error {
for _, violatedRule := range curPv.Spec.ViolatedRules {
if reflect.DeepEqual(violatedRule.Dependant, kyverno.Dependant{}) {
continue
}
// get resource
blockedResource := violatedRule.Dependant
resources, _ := pvc.client.ListResource(blockedResource.Kind, blockedResource.Namespace, nil)
for _, resource := range resources.Items {
glog.V(4).Infof("getting owners for %s/%s/%s\n", resource.GetKind(), resource.GetNamespace(), resource.GetName())
owners := getOwners(pvc.client, resource)
// owner of resource matches violation resourceSpec
// remove policy violation as the blocked request got created
if containsOwner(owners, curPv) {
// pod -> replicaset1; deploy -> replicaset2
// if replicaset1 == replicaset2, the pod is
// no longer an active child of deploy, skip removing pv
if !validDependantForDeployment(pvc.client.GetAppsV1Interface(), *curPv, resource) {
glog.V(4).Infof("")
continue
}
// resource created, remove policy violation
if err := pvc.pvControl.RemovePolicyViolation(curPv.Name); err != nil {
glog.Infof("unable to delete the policy violation %s: %v", curPv.Name, err)
return err
}
glog.V(4).Infof("removed policy violation %s as the blocked resource %s/%s successfully created, owner: %s",
curPv.Name, blockedResource.Kind, blockedResource.Namespace, strings.ReplaceAll(curPv.Spec.ResourceSpec.ToKey(), ".", "/"))
}
}
}
return nil
}
func (pvc *PolicyViolationController) syncActiveResource(curPv *kyverno.ClusterPolicyViolation) error {
// check if the resource is active or not ?
rspec := curPv.Spec.ResourceSpec

View file

@ -3,6 +3,7 @@ package policyviolation
import (
"fmt"
"reflect"
"strings"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
@ -10,17 +11,15 @@ import (
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
v1 "k8s.io/api/apps/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
"k8s.io/client-go/tools/cache"
deployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
type pvResourceOwner struct {
kind string
namespace string
name string
}
//BuildPolicyViolation returns an value of type PolicyViolation
func BuildPolicyViolation(policy string, resource kyverno.ResourceSpec, fRules []kyverno.ViolatedRule) kyverno.ClusterPolicyViolation {
pv := kyverno.ClusterPolicyViolation{
@ -48,6 +47,8 @@ func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyver
glog.V(4).Infof("Building policy violation for denied admission request, engineResponse: %v", er)
if pvList := buildPVWithOwner(dclient, er); len(pvList) != 0 {
pvs = append(pvs, pvList...)
glog.V(3).Infof("Built policy violation for denied admission request %s/%s/%s",
er.PatchedResource.GetKind(), er.PatchedResource.GetNamespace(), er.PatchedResource.GetName())
}
continue
}
@ -84,13 +85,15 @@ func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyver
_, err := client.KyvernoV1alpha1().ClusterPolicyViolations().Create(&newPv)
if err != nil {
glog.Error(err)
} else {
glog.Infof("policy violation created for resource %s", newPv.Spec.ResourceSpec.ToKey())
}
continue
}
// compare the policyviolation spec for existing resource if present else
if reflect.DeepEqual(curPv.Spec, newPv.Spec) {
// if they are equal there has been no change so dont update the polivy violation
glog.Infof("policy violation '%s/%s/%s' spec did not change so not updating it", newPv.Spec.Kind, newPv.Spec.Namespace, newPv.Spec.Name)
glog.V(3).Infof("policy violation '%s/%s/%s' spec did not change so not updating it", newPv.Spec.Kind, newPv.Spec.Namespace, newPv.Spec.Name)
glog.V(4).Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
continue
}
@ -103,6 +106,7 @@ func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyver
glog.Error(err)
continue
}
glog.Infof("policy violation updated for resource %s", newPv.Spec.ResourceSpec.ToKey())
}
}
@ -119,7 +123,7 @@ func buildPVForPolicy(er engine.EngineResponse) kyverno.ClusterPolicyViolation {
}
func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []kyverno.ClusterPolicyViolation) {
msg := fmt.Sprintf("Request Blocked for resource %s/%s; ", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Name)
msg := fmt.Sprintf("Request Blocked for resource %s/%s; ", er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Kind)
violatedRules := newViolatedRules(er, msg)
// create violation on resource owner (if exist) when action is set to enforce
@ -152,16 +156,9 @@ func getExistingPolicyViolationIfAny(pvListerSynced cache.InformerSynced, pvList
// TODO: check for existing ov using label selectors on resource and policy
// TODO: there can be duplicates, as the labels have not been assigned to the policy violation yet
labelMap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
policyViolationSelector, err := converLabelToSelector(labelMap)
if err != nil {
glog.Errorf("failed to generate label sector of Policy name %s: %v", newPv.Spec.Policy, err)
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
glog.Errorf("invalid label selector: %v", err)
return nil, err
return nil, fmt.Errorf("failed to generate label sector of Policy name %s: %v", newPv.Spec.Policy, err)
}
//TODO: sync the cache before reading from it ?
@ -190,6 +187,34 @@ func getExistingPolicyViolationIfAny(pvListerSynced cache.InformerSynced, pvList
return pvs[0], nil
}
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
return policyViolationSelector, nil
}
type pvResourceOwner struct {
kind string
namespace string
name string
}
func (o pvResourceOwner) toKey() string {
if o.namespace == "" {
return o.kind + "." + o.name
}
return o.kind + "." + o.namespace + "." + o.name
}
// pass in unstr rather than using the client to get the unstr
// as if name is empty then GetResource panic as it returns a list
func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvResourceOwner {
@ -216,6 +241,13 @@ func getOwners(dclient *dclient.Client, unstr unstructured.Unstructured) []pvRes
}
func newViolatedRules(er engine.EngineResponse, msg string) (violatedRules []kyverno.ViolatedRule) {
unstr := er.PatchedResource
dependant := kyverno.Dependant{
Kind: unstr.GetKind(),
Namespace: unstr.GetNamespace(),
CreationBlocked: true,
}
for _, r := range er.PolicyResponse.Rules {
// filter failed/violated rules
if !r.Success {
@ -224,8 +256,88 @@ func newViolatedRules(er engine.EngineResponse, msg string) (violatedRules []kyv
Type: r.Type,
Message: msg + r.Message,
}
// resource creation blocked
// set resource itself as dependant
if strings.Contains(msg, "Request Blocked") {
vrule.Dependant = dependant
}
violatedRules = append(violatedRules, vrule)
}
}
return
}
func containsOwner(owners []pvResourceOwner, pv *kyverno.ClusterPolicyViolation) bool {
curOwner := pvResourceOwner{
kind: pv.Spec.ResourceSpec.Kind,
name: pv.Spec.ResourceSpec.Name,
namespace: pv.Spec.ResourceSpec.Namespace,
}
for _, targetOwner := range owners {
if reflect.DeepEqual(curOwner, targetOwner) {
return true
}
}
return false
}
// validDependantForDeployment checks if resource (pod) matches the intent of the given deployment
// explicitly handles deployment-replicaset-pod relationship
func validDependantForDeployment(client appsv1.AppsV1Interface, curPv kyverno.ClusterPolicyViolation, resource unstructured.Unstructured) bool {
if resource.GetKind() != "Pod" {
return false
}
// only handles deploymeny-replicaset-pod relationship
if curPv.Spec.ResourceSpec.Kind != "Deployment" {
return false
}
owner := pvResourceOwner{
kind: curPv.Spec.ResourceSpec.Kind,
namespace: curPv.Spec.ResourceSpec.Namespace,
name: curPv.Spec.ResourceSpec.Name,
}
deploy, err := client.Deployments(owner.namespace).Get(owner.name, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get resourceOwner deployment %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err)
return false
}
expectReplicaset, err := deployutil.GetNewReplicaSet(deploy, client)
if err != nil {
glog.Errorf("failed to get replicaset owned by %s/%s/%s: %v", owner.kind, owner.namespace, owner.name, err)
return false
}
if reflect.DeepEqual(expectReplicaset, v1.ReplicaSet{}) {
glog.V(2).Infof("no replicaset found for deploy %s/%s/%s", owner.namespace, owner.kind, owner.name)
return false
}
var actualReplicaset *v1.ReplicaSet
for _, podOwner := range resource.GetOwnerReferences() {
if podOwner.Kind != "ReplicaSet" {
continue
}
actualReplicaset, err = client.ReplicaSets(resource.GetNamespace()).Get(podOwner.Name, metav1.GetOptions{})
if err != nil {
glog.Errorf("failed to get replicaset from %s/%s/%s: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), err)
return false
}
if reflect.DeepEqual(actualReplicaset, v1.ReplicaSet{}) {
glog.V(2).Infof("no replicaset found for Pod/%s/%s", resource.GetNamespace(), podOwner.Name)
return false
}
if expectReplicaset.Name == actualReplicaset.Name {
return true
}
}
return false
}

View file

@ -12,7 +12,7 @@ import (
func (wrc *WebhookRegistrationClient) contructDebugMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.MutatingWebhookServicePath)
glog.V(3).Infof("Debug MutatingWebhookConfig is registered with url %s\n", url)
glog.V(4).Infof("Debug MutatingWebhookConfig is registered with url %s\n", url)
return &admregapi.MutatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{

View file

@ -78,7 +78,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest) (bool
continue
}
glog.V(4).Infof("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
glog.V(2).Infof("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation)
// TODO: this can be
engineResponse := engine.Mutate(*policy, *resource)

View file

@ -87,7 +87,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pat
continue
}
glog.V(4).Infof("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
glog.V(2).Infof("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s",
resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation)
// glog.V(4).Infof("Validating resource %s/%s/%s with policy %s with %d rules\n", resource.GetKind(), resource.GetNamespace(), resource.GetName(), policy.ObjectMeta.Name, len(policy.Spec.Rules))