2019-08-09 11:08:02 -07:00
package policyviolation
import (
2019-08-13 13:15:04 -07:00
"fmt"
"reflect"
2019-10-23 23:18:58 -07:00
"strings"
2019-08-13 13:15:04 -07:00
"github.com/golang/glog"
2019-08-09 11:08:02 -07:00
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1alpha1"
2019-08-17 09:58:14 -07:00
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
2019-10-15 20:56:41 -07:00
dclient "github.com/nirmata/kyverno/pkg/dclient"
2019-08-26 13:34:42 -07:00
"github.com/nirmata/kyverno/pkg/engine"
2019-10-23 23:18:58 -07:00
v1 "k8s.io/api/apps/v1"
2019-08-13 13:15:04 -07:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2019-10-15 20:56:41 -07:00
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
2019-10-23 23:18:58 -07:00
"k8s.io/apimachinery/pkg/labels"
appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1"
2019-08-13 13:15:04 -07:00
"k8s.io/client-go/tools/cache"
2019-10-23 23:18:58 -07:00
deployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
2019-08-09 11:08:02 -07:00
)
//BuildPolicyViolation returns an value of type PolicyViolation
2019-09-03 14:51:51 -07:00
func BuildPolicyViolation ( policy string , resource kyverno . ResourceSpec , fRules [ ] kyverno . ViolatedRule ) kyverno . ClusterPolicyViolation {
pv := kyverno . ClusterPolicyViolation {
2019-08-09 11:08:02 -07:00
Spec : kyverno . PolicyViolationSpec {
Policy : policy ,
ResourceSpec : resource ,
ViolatedRules : fRules ,
} ,
}
2019-08-09 19:12:50 -07:00
//TODO: check if this can be removed or use unstructured?
// pv.Kind = "PolicyViolation"
2019-08-12 10:02:07 -07:00
pv . SetGenerateName ( "pv-" )
2019-08-09 11:08:02 -07:00
return pv
}
2019-08-13 13:15:04 -07:00
2019-08-26 13:34:42 -07:00
//CreatePV creates policy violation resource based on the engine responses
2019-10-24 15:50:11 -07:00
func CreatePV ( pvLister kyvernolister . ClusterPolicyViolationLister , client * kyvernoclient . Clientset , engineResponses [ ] engine . EngineResponse ) {
2019-09-03 14:51:51 -07:00
var pvs [ ] kyverno . ClusterPolicyViolation
2019-08-26 13:34:42 -07:00
for _ , er := range engineResponses {
2019-09-04 15:30:09 -07:00
// ignore creation of PV for resoruces that are yet to be assigned a name
if er . PolicyResponse . Resource . Name == "" {
2019-10-23 09:58:42 -07:00
glog . V ( 4 ) . Infof ( "resource %v, has not been assigned a name, not creating a policy violation for it" , er . PolicyResponse . Resource )
2019-09-04 15:30:09 -07:00
continue
}
2019-10-15 20:56:41 -07:00
2019-08-26 13:34:42 -07:00
if ! er . IsSuccesful ( ) {
2019-10-15 20:56:41 -07:00
glog . V ( 4 ) . Infof ( "Building policy violation for engine response %v" , er )
2019-09-03 14:51:51 -07:00
if pv := buildPVForPolicy ( er ) ; ! reflect . DeepEqual ( pv , kyverno . ClusterPolicyViolation { } ) {
2019-08-26 13:34:42 -07:00
pvs = append ( pvs , pv )
}
}
}
2019-10-15 20:56:41 -07:00
2019-10-24 15:50:11 -07:00
createPV ( pvLister , client , pvs )
}
// CreatePVWhenBlocked creates pv on resource owner only when admission request is denied
func CreatePVWhenBlocked ( pvLister kyvernolister . ClusterPolicyViolationLister , client * kyvernoclient . Clientset ,
dclient * dclient . Client , engineResponses [ ] engine . EngineResponse ) {
var pvs [ ] kyverno . ClusterPolicyViolation
for _ , er := range engineResponses {
// child resource is not created in this case thus it won't have a name
glog . V ( 4 ) . Infof ( "Building policy violation for denied admission request, engineResponse: %v" , er )
if pvList := buildPVWithOwner ( dclient , er ) ; len ( pvList ) != 0 {
pvs = append ( pvs , pvList ... )
glog . V ( 3 ) . Infof ( "Built policy violation for denied admission request %s/%s/%s" ,
er . PatchedResource . GetKind ( ) , er . PatchedResource . GetNamespace ( ) , er . PatchedResource . GetName ( ) )
}
}
createPV ( pvLister , client , pvs )
}
func createPV ( pvLister kyvernolister . ClusterPolicyViolationLister , client * kyvernoclient . Clientset , pvs [ ] kyverno . ClusterPolicyViolation ) {
2019-08-26 13:34:42 -07:00
if len ( pvs ) == 0 {
return
}
2019-10-15 20:56:41 -07:00
2019-08-26 13:34:42 -07:00
for _ , newPv := range pvs {
glog . V ( 4 ) . Infof ( "creating policyViolation resource for policy %s and resource %s/%s/%s" , newPv . Spec . Policy , newPv . Spec . Kind , newPv . Spec . Namespace , newPv . Spec . Name )
// check if there was a previous policy voilation for policy & resource combination
curPv , err := getExistingPolicyViolationIfAny ( nil , pvLister , newPv )
if err != nil {
glog . Error ( err )
continue
}
if curPv == nil {
2019-08-26 16:10:19 -07:00
glog . V ( 4 ) . Infof ( "creating new policy violation for policy %s & resource %s/%s/%s" , newPv . Spec . Policy , newPv . Spec . ResourceSpec . Kind , newPv . Spec . ResourceSpec . Namespace , newPv . Spec . ResourceSpec . Name )
2019-08-26 13:34:42 -07:00
// no existing policy violation, create a new one
2019-09-03 14:51:51 -07:00
_ , err := client . KyvernoV1alpha1 ( ) . ClusterPolicyViolations ( ) . Create ( & newPv )
2019-08-26 13:34:42 -07:00
if err != nil {
glog . Error ( err )
2019-10-23 23:18:58 -07:00
} else {
glog . Infof ( "policy violation created for resource %s" , newPv . Spec . ResourceSpec . ToKey ( ) )
2019-08-26 13:34:42 -07:00
}
continue
}
// compare the policyviolation spec for existing resource if present else
if reflect . DeepEqual ( curPv . Spec , newPv . Spec ) {
// if they are equal there has been no change so dont update the polivy violation
2019-10-23 23:18:58 -07:00
glog . V ( 3 ) . Infof ( "policy violation '%s/%s/%s' spec did not change so not updating it" , newPv . Spec . Kind , newPv . Spec . Namespace , newPv . Spec . Name )
2019-10-15 20:56:41 -07:00
glog . V ( 4 ) . Infof ( "policy violation spec %v did not change so not updating it" , newPv . Spec )
2019-08-26 13:34:42 -07:00
continue
}
// spec changed so update the policyviolation
glog . V ( 4 ) . Infof ( "creating new policy violation for policy %s & resource %s/%s/%s" , curPv . Spec . Policy , curPv . Spec . ResourceSpec . Kind , curPv . Spec . ResourceSpec . Namespace , curPv . Spec . ResourceSpec . Name )
//TODO: using a generic name, but would it be helpful to have naming convention for policy violations
// as we can only have one policy violation for each (policy + resource) combination
2019-09-03 14:51:51 -07:00
_ , err = client . KyvernoV1alpha1 ( ) . ClusterPolicyViolations ( ) . Update ( & newPv )
2019-08-26 13:34:42 -07:00
if err != nil {
glog . Error ( err )
continue
}
2019-10-23 23:18:58 -07:00
glog . Infof ( "policy violation updated for resource %s" , newPv . Spec . ResourceSpec . ToKey ( ) )
2019-08-26 13:34:42 -07:00
}
}
2019-10-15 20:56:41 -07:00
func buildPVForPolicy ( er engine . EngineResponse ) kyverno . ClusterPolicyViolation {
pvResourceSpec := kyverno . ResourceSpec {
Kind : er . PolicyResponse . Resource . Kind ,
Namespace : er . PolicyResponse . Resource . Namespace ,
Name : er . PolicyResponse . Resource . Name ,
}
violatedRules := newViolatedRules ( er , "" )
return BuildPolicyViolation ( er . PolicyResponse . Policy , pvResourceSpec , violatedRules )
}
func buildPVWithOwner ( dclient * dclient . Client , er engine . EngineResponse ) ( pvs [ ] kyverno . ClusterPolicyViolation ) {
2019-10-15 20:56:41 -07:00
msg := fmt . Sprintf ( "Request Blocked for resource %s/%s; " , er . PolicyResponse . Resource . Kind , er . PolicyResponse . Resource . Name )
2019-10-15 20:56:41 -07:00
violatedRules := newViolatedRules ( er , msg )
// create violation on resource owner (if exist) when action is set to enforce
owners := getOwners ( dclient , er . PatchedResource )
// standaloneresource, set pvResourceSpec with resource itself
if len ( owners ) == 0 {
pvResourceSpec := kyverno . ResourceSpec {
Namespace : er . PolicyResponse . Resource . Namespace ,
Kind : er . PolicyResponse . Resource . Kind ,
Name : er . PolicyResponse . Resource . Name ,
}
return append ( pvs , BuildPolicyViolation ( er . PolicyResponse . Policy , pvResourceSpec , violatedRules ) )
}
for _ , owner := range owners {
// resource has owner, set pvResourceSpec with owner info
pvResourceSpec := kyverno . ResourceSpec {
Namespace : owner . namespace ,
Kind : owner . kind ,
Name : owner . name ,
}
pvs = append ( pvs , BuildPolicyViolation ( er . PolicyResponse . Policy , pvResourceSpec , violatedRules ) )
}
return
}
2019-08-13 13:15:04 -07:00
//TODO: change the name
2019-09-03 14:51:51 -07:00
func getExistingPolicyViolationIfAny ( pvListerSynced cache . InformerSynced , pvLister kyvernolister . ClusterPolicyViolationLister , newPv kyverno . ClusterPolicyViolation ) ( * kyverno . ClusterPolicyViolation , error ) {
2019-08-13 13:15:04 -07:00
// TODO: check for existing ov using label selectors on resource and policy
2019-08-26 13:34:42 -07:00
// TODO: there can be duplicates, as the labels have not been assigned to the policy violation yet
2019-08-13 13:15:04 -07:00
labelMap := map [ string ] string { "policy" : newPv . Spec . Policy , "resource" : newPv . Spec . ResourceSpec . ToKey ( ) }
2019-10-23 23:18:58 -07:00
policyViolationSelector , err := converLabelToSelector ( labelMap )
2019-08-13 13:15:04 -07:00
if err != nil {
2019-10-23 23:18:58 -07:00
return nil , fmt . Errorf ( "failed to generate label sector of Policy name %s: %v" , newPv . Spec . Policy , err )
2019-08-13 13:15:04 -07:00
}
//TODO: sync the cache before reading from it ?
// check is this is needed ?
// stopCh := make(chan struct{}, 0)
// if !cache.WaitForCacheSync(stopCh, pvListerSynced) {
// //TODO: can this be handled or avoided ?
// glog.Info("unable to sync policy violation shared informer cache, might be out of sync")
// }
pvs , err := pvLister . List ( policyViolationSelector )
if err != nil {
glog . Errorf ( "unable to list policy violations with label selector %v: %v" , policyViolationSelector , err )
return nil , err
}
//TODO: ideally there should be only one policy violation returned
if len ( pvs ) > 1 {
glog . Errorf ( "more than one policy violation exists with labels %v" , labelMap )
return nil , fmt . Errorf ( "more than one policy violation exists with labels %v" , labelMap )
}
if len ( pvs ) == 0 {
glog . Infof ( "policy violation does not exist with labels %v" , labelMap )
return nil , nil
}
return pvs [ 0 ] , nil
}
2019-10-15 20:56:41 -07:00
2019-10-23 23:18:58 -07:00
func converLabelToSelector ( labelMap map [ string ] string ) ( labels . Selector , error ) {
ls := & metav1 . LabelSelector { }
err := metav1 . Convert_Map_string_To_string_To_v1_LabelSelector ( & labelMap , ls , nil )
if err != nil {
return nil , err
}
policyViolationSelector , err := metav1 . LabelSelectorAsSelector ( ls )
if err != nil {
return nil , fmt . Errorf ( "invalid label selector: %v" , err )
}
return policyViolationSelector , nil
}
type pvResourceOwner struct {
kind string
namespace string
name string
}
func ( o pvResourceOwner ) toKey ( ) string {
if o . namespace == "" {
return o . kind + "." + o . name
}
return o . kind + "." + o . namespace + "." + o . name
}
2019-10-23 09:58:42 -07:00
// pass in unstr rather than using the client to get the unstr
// as if name is empty then GetResource panic as it returns a list
2019-10-15 20:56:41 -07:00
func getOwners ( dclient * dclient . Client , unstr unstructured . Unstructured ) [ ] pvResourceOwner {
resourceOwners := unstr . GetOwnerReferences ( )
if len ( resourceOwners ) == 0 {
return [ ] pvResourceOwner { pvResourceOwner {
kind : unstr . GetKind ( ) ,
namespace : unstr . GetNamespace ( ) ,
name : unstr . GetName ( ) ,
} }
}
var owners [ ] pvResourceOwner
for _ , resourceOwner := range resourceOwners {
unstrParent , err := dclient . GetResource ( resourceOwner . Kind , unstr . GetNamespace ( ) , resourceOwner . Name )
if err != nil {
glog . Errorf ( "Failed to get resource owner for %s/%s/%s, err: %v" , resourceOwner . Kind , unstr . GetNamespace ( ) , resourceOwner . Name , err )
return nil
}
owners = append ( owners , getOwners ( dclient , * unstrParent ) ... )
}
return owners
}
func newViolatedRules ( er engine . EngineResponse , msg string ) ( violatedRules [ ] kyverno . ViolatedRule ) {
2019-10-23 23:18:58 -07:00
unstr := er . PatchedResource
2019-10-28 11:44:48 -07:00
dependant := kyverno . ManagedResourceSpec {
2019-10-23 23:18:58 -07:00
Kind : unstr . GetKind ( ) ,
Namespace : unstr . GetNamespace ( ) ,
CreationBlocked : true ,
}
2019-10-15 20:56:41 -07:00
for _ , r := range er . PolicyResponse . Rules {
// filter failed/violated rules
if ! r . Success {
vrule := kyverno . ViolatedRule {
Name : r . Name ,
Type : r . Type ,
Message : msg + r . Message ,
}
2019-10-23 23:18:58 -07:00
// resource creation blocked
// set resource itself as dependant
if strings . Contains ( msg , "Request Blocked" ) {
2019-10-24 15:50:11 -07:00
vrule . ManagedResource = dependant
2019-10-23 23:18:58 -07:00
}
2019-10-15 20:56:41 -07:00
violatedRules = append ( violatedRules , vrule )
}
}
return
}
2019-10-23 23:18:58 -07:00
func containsOwner ( owners [ ] pvResourceOwner , pv * kyverno . ClusterPolicyViolation ) bool {
curOwner := pvResourceOwner {
kind : pv . Spec . ResourceSpec . Kind ,
name : pv . Spec . ResourceSpec . Name ,
namespace : pv . Spec . ResourceSpec . Namespace ,
}
for _ , targetOwner := range owners {
if reflect . DeepEqual ( curOwner , targetOwner ) {
return true
}
}
return false
}
// validDependantForDeployment checks if resource (pod) matches the intent of the given deployment
// explicitly handles deployment-replicaset-pod relationship
func validDependantForDeployment ( client appsv1 . AppsV1Interface , curPv kyverno . ClusterPolicyViolation , resource unstructured . Unstructured ) bool {
if resource . GetKind ( ) != "Pod" {
return false
}
// only handles deploymeny-replicaset-pod relationship
if curPv . Spec . ResourceSpec . Kind != "Deployment" {
return false
}
owner := pvResourceOwner {
kind : curPv . Spec . ResourceSpec . Kind ,
namespace : curPv . Spec . ResourceSpec . Namespace ,
name : curPv . Spec . ResourceSpec . Name ,
}
deploy , err := client . Deployments ( owner . namespace ) . Get ( owner . name , metav1 . GetOptions { } )
if err != nil {
glog . Errorf ( "failed to get resourceOwner deployment %s/%s/%s: %v" , owner . kind , owner . namespace , owner . name , err )
return false
}
expectReplicaset , err := deployutil . GetNewReplicaSet ( deploy , client )
if err != nil {
glog . Errorf ( "failed to get replicaset owned by %s/%s/%s: %v" , owner . kind , owner . namespace , owner . name , err )
return false
}
if reflect . DeepEqual ( expectReplicaset , v1 . ReplicaSet { } ) {
glog . V ( 2 ) . Infof ( "no replicaset found for deploy %s/%s/%s" , owner . namespace , owner . kind , owner . name )
return false
}
var actualReplicaset * v1 . ReplicaSet
for _ , podOwner := range resource . GetOwnerReferences ( ) {
if podOwner . Kind != "ReplicaSet" {
continue
}
actualReplicaset , err = client . ReplicaSets ( resource . GetNamespace ( ) ) . Get ( podOwner . Name , metav1 . GetOptions { } )
if err != nil {
glog . Errorf ( "failed to get replicaset from %s/%s/%s: %v" , resource . GetKind ( ) , resource . GetNamespace ( ) , resource . GetName ( ) , err )
return false
}
if reflect . DeepEqual ( actualReplicaset , v1 . ReplicaSet { } ) {
glog . V ( 2 ) . Infof ( "no replicaset found for Pod/%s/%s" , resource . GetNamespace ( ) , podOwner . Name )
return false
}
if expectReplicaset . Name == actualReplicaset . Name {
return true
}
}
return false
}