1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-09 10:42:22 +00:00

rename policyviolation related package/function to clusterpolicyviolation

This commit is contained in:
Shuting Zhao 2019-11-12 11:22:06 -08:00
parent 1f2b71ace8
commit 14769936a2
7 changed files with 39 additions and 37 deletions

View file

@ -7,12 +7,12 @@ import (
"github.com/golang/glog"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
clusterpv "github.com/nirmata/kyverno/pkg/clusterpolicyviolation"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/namespace"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policyviolation"
"github.com/nirmata/kyverno/pkg/utils"
"github.com/nirmata/kyverno/pkg/webhookconfig"
"github.com/nirmata/kyverno/pkg/webhooks"
@ -114,7 +114,7 @@ func main() {
// POLICY VIOLATION CONTROLLER
// policy violation cleanup if the corresponding resource is deleted
// status: lastUpdatTime
pvc, err := policyviolation.NewPolicyViolationController(client, pclient, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations())
pvc, err := clusterpv.NewPolicyViolationController(client, pclient, pInformer.Kyverno().V1alpha1().ClusterPolicies(), pInformer.Kyverno().V1alpha1().ClusterPolicyViolations())
if err != nil {
glog.Fatalf("error creating policy violation controller: %v\n", err)
}

View file

@ -1,4 +1,4 @@
package policyviolation
package clusterpolicyviolation
import (
"fmt"

View file

@ -1,4 +1,4 @@
package policyviolation
package clusterpolicyviolation
import (
"fmt"
@ -20,23 +20,8 @@ import (
deployutil "k8s.io/kubernetes/pkg/controller/deployment/util"
)
//BuildPolicyViolation returns an value of type PolicyViolation
func BuildPolicyViolation(policy string, resource kyverno.ResourceSpec, fRules []kyverno.ViolatedRule) kyverno.ClusterPolicyViolation {
pv := kyverno.ClusterPolicyViolation{
Spec: kyverno.PolicyViolationSpec{
Policy: policy,
ResourceSpec: resource,
ViolatedRules: fRules,
},
}
//TODO: check if this can be removed or use unstructured?
// pv.Kind = "PolicyViolation"
pv.SetGenerateName("pv-")
return pv
}
//CreatePV creates policy violation resource based on the engine responses
func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset, engineResponses []engine.EngineResponse) {
//CreateClusterPV creates policy violation resource based on the engine responses
func CreateClusterPV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset, engineResponses []engine.EngineResponse) {
var pvs []kyverno.ClusterPolicyViolation
for _, er := range engineResponses {
// ignore creation of PV for resoruces that are yet to be assigned a name
@ -53,11 +38,11 @@ func CreatePV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyver
}
}
createPV(pvLister, client, pvs)
createClusterPV(pvLister, client, pvs)
}
// CreatePVWhenBlocked creates pv on resource owner only when admission request is denied
func CreatePVWhenBlocked(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset,
func CreateClusterPVWhenBlocked(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset,
dclient *dclient.Client, engineResponses []engine.EngineResponse) {
var pvs []kyverno.ClusterPolicyViolation
for _, er := range engineResponses {
@ -69,10 +54,10 @@ func CreatePVWhenBlocked(pvLister kyvernolister.ClusterPolicyViolationLister, cl
er.PatchedResource.GetKind(), er.PatchedResource.GetNamespace(), er.PatchedResource.GetName())
}
}
createPV(pvLister, client, pvs)
createClusterPV(pvLister, client, pvs)
}
func createPV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset, pvs []kyverno.ClusterPolicyViolation) {
func createClusterPV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyvernoclient.Clientset, pvs []kyverno.ClusterPolicyViolation) {
if len(pvs) == 0 {
return
}
@ -116,6 +101,21 @@ func createPV(pvLister kyvernolister.ClusterPolicyViolationLister, client *kyver
}
}
//buildClusterPolicyViolation returns an value of type PolicyViolation
func buildClusterPolicyViolation(policy string, resource kyverno.ResourceSpec, fRules []kyverno.ViolatedRule) kyverno.ClusterPolicyViolation {
pv := kyverno.ClusterPolicyViolation{
Spec: kyverno.PolicyViolationSpec{
Policy: policy,
ResourceSpec: resource,
ViolatedRules: fRules,
},
}
//TODO: check if this can be removed or use unstructured?
// pv.Kind = "PolicyViolation"
pv.SetGenerateName("pv-")
return pv
}
func buildPVForPolicy(er engine.EngineResponse) kyverno.ClusterPolicyViolation {
pvResourceSpec := kyverno.ResourceSpec{
Kind: er.PolicyResponse.Resource.Kind,
@ -125,7 +125,7 @@ func buildPVForPolicy(er engine.EngineResponse) kyverno.ClusterPolicyViolation {
violatedRules := newViolatedRules(er, "")
return BuildPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules)
return buildClusterPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules)
}
func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []kyverno.ClusterPolicyViolation) {
@ -142,11 +142,11 @@ func buildPVWithOwner(dclient *dclient.Client, er engine.EngineResponse) (pvs []
Kind: er.PolicyResponse.Resource.Kind,
Name: er.PolicyResponse.Resource.Name,
}
return append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules))
return append(pvs, buildClusterPolicyViolation(er.PolicyResponse.Policy, pvResourceSpec, violatedRules))
}
for _, owner := range owners {
pvs = append(pvs, BuildPolicyViolation(er.PolicyResponse.Policy, owner, violatedRules))
pvs = append(pvs, buildClusterPolicyViolation(er.PolicyResponse.Policy, owner, violatedRules))
}
return
}

View file

@ -6,7 +6,7 @@ import (
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
clusterpv "github.com/nirmata/kyverno/pkg/clusterpolicyviolation"
)
func (nsc *NamespaceController) report(engineResponses []engine.EngineResponse) {
@ -21,7 +21,7 @@ func (nsc *NamespaceController) report(engineResponses []engine.EngineResponse)
// failure - policy/rule failed to apply on the resource
}
// generate policy violation
policyviolation.CreatePV(nsc.pvLister, nsc.kyvernoClient, engineResponses)
clusterpv.CreateClusterPV(nsc.pvLister, nsc.kyvernoClient, engineResponses)
}
//reportEvents generates events for the failed resources

View file

@ -9,7 +9,7 @@ import (
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1alpha1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/policyviolation"
clusterpv "github.com/nirmata/kyverno/pkg/clusterpolicyviolation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
)
@ -69,7 +69,7 @@ func getPVonOwnerRef(pvLister kyvernolister.ClusterPolicyViolationLister, dclien
}
// get owners
// getOwners returns nil if there is any error
owners := policyviolation.GetOwners(dclient, *resource)
owners := clusterpv.GetOwners(dclient, *resource)
// as we can have multiple top level owners to a resource
// check if pv exists on each one
// does not check for cycles

View file

@ -6,7 +6,7 @@ import (
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/engine"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
clusterpv "github.com/nirmata/kyverno/pkg/clusterpolicyviolation"
)
// for each policy-resource response
@ -19,7 +19,7 @@ func (pc *PolicyController) cleanupAndReport(engineResponses []engine.EngineResp
reportEvents(eResponse, pc.eventGen)
// generate policy violation
// Only created on resource, not resource owners
policyviolation.CreatePV(pc.pvLister, pc.kyvernoClient, engineResponses)
clusterpv.CreateClusterPV(pc.pvLister, pc.kyvernoClient, engineResponses)
} else {
// cleanup existing violations if any
// if there is any error in clean up, we dont re-queue the resource

View file

@ -4,7 +4,7 @@ import (
"github.com/golang/glog"
engine "github.com/nirmata/kyverno/pkg/engine"
policyctr "github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policyviolation"
clusterpv "github.com/nirmata/kyverno/pkg/clusterpolicyviolation"
"github.com/nirmata/kyverno/pkg/utils"
v1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/labels"
@ -108,14 +108,16 @@ func (ws *WebhookServer) handleValidation(request *v1beta1.AdmissionRequest, pat
// and if there are any then we dont block the resource creation
// Even if one the policy being applied
if !isResponseSuccesful(engineResponses) && toBlockResource(engineResponses) {
policyviolation.CreatePVWhenBlocked(ws.pvLister, ws.kyvernoClient, ws.client, engineResponses)
clusterpv.CreateClusterPVWhenBlocked(ws.pvLister, ws.kyvernoClient, ws.client, engineResponses)
sendStat(true)
return false, getErrorMsg(engineResponses)
}
// ADD POLICY VIOLATIONS
// violations are created with resource on "audit"
policyviolation.CreatePV(ws.pvLister, ws.kyvernoClient, engineResponses)
if resource.GetNamespace() == "" {
clusterpv.CreateClusterPV(ws.pvLister, ws.kyvernoClient, engineResponses)
}
sendStat(false)
return true, ""
}