1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-04-08 18:15:48 +00:00

policy report added

This commit is contained in:
Yuvraj 2020-08-26 18:50:38 +05:30
parent 875f9716e8
commit 251521a77a
95 changed files with 2405 additions and 8 deletions

22
cmd/kyverno/main.go Normal file → Executable file
View file

@ -15,6 +15,7 @@ import (
"github.com/nirmata/kyverno/pkg/checker"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
policyreportinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
"github.com/nirmata/kyverno/pkg/config"
dclient "github.com/nirmata/kyverno/pkg/dclient"
event "github.com/nirmata/kyverno/pkg/event"
@ -50,8 +51,9 @@ var (
excludeGroupRole string
excludeUsername string
// User FQDN as CSR CN
fqdncn bool
setupLog = log.Log.WithName("setup")
fqdncn bool
policyReport string
setupLog = log.Log.WithName("setup")
)
func main() {
@ -65,6 +67,7 @@ func main() {
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&runValidationInMutatingWebhook, "runValidationInMutatingWebhook", "", "Validation will also be done using the mutation webhook, set to 'true' to enable. Older kubernetes versions do not work properly when a validation webhook is registered.")
flag.BoolVar(&profile, "profile", false, "Set this flag to 'true', to enable profiling.")
flag.StringVar(&policyReport, "policyreport", "policyviolation", "Report Type")
if err := flag.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level")
os.Exit(1)
@ -77,7 +80,11 @@ func main() {
if profile {
go http.ListenAndServe("localhost:6060", nil)
}
os.Setenv("POLICY-TYPE", "POLICYVIOLATION")
if policyReport == "policyreport" {
os.Setenv("POLICY-TYPE", "POLICYREPORT")
}
setupLog.Info(os.Getenv("POLICY-TYPE"))
version.PrintVersionInfo(log.Log)
cleanUp := make(chan struct{})
stopCh := signal.SetupSignalHandler()
@ -97,6 +104,11 @@ func main() {
os.Exit(1)
}
// Policy Report CRD INFORMER
// watches CRD resources:
// - PolicyReport
prInformer := policyreportinformer.NewSharedInformerFactoryWithOptions(pclient, resyncPeriod)
// DYNAMIC CLIENT
// - client for all registered resources
client, err := dclient.NewClient(clientConfig, 5*time.Minute, stopCh, log.Log)
@ -179,8 +191,11 @@ func main() {
client,
pInformer.Kyverno().V1().ClusterPolicyViolations(),
pInformer.Kyverno().V1().PolicyViolations(),
pInformer.Policy().V1alpha1().ClusterPolicyReports(),
pInformer.Policy().V1alpha1().PolicyReports(),
statusSync.Listener,
log.Log.WithName("PolicyViolationGenerator"),
stopCh,
)
// POLICY CONTROLLER
@ -189,6 +204,7 @@ func main() {
// - status aggregator: receives stats when a policy is applied & updates the policy status
policyCtrl, err := policy.NewPolicyController(pclient,
client,
prInformer.Policy().V1alpha1(),
pInformer.Kyverno().V1().ClusterPolicies(),
pInformer.Kyverno().V1().Policies(),
pInformer.Kyverno().V1().ClusterPolicyViolations(),

0
definitions/crds/crds.yaml Normal file → Executable file
View file

4
definitions/crds/kustomization.yaml Normal file → Executable file
View file

@ -2,4 +2,6 @@ apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- ./crds.yaml
- ./crds.yaml
- ./policy.kubernetes.io_clusterpolicyreports.yaml
- ./policy.kubernetes.io_policyreports.yaml

View file

View file

0
definitions/debug/kustomization.yaml Normal file → Executable file
View file

0
definitions/github/rbac.yaml Normal file → Executable file
View file

415
definitions/install.yaml Normal file → Executable file
View file

@ -278,6 +278,179 @@ spec:
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.5
creationTimestamp: null
name: clusterpolicyreports.policy.kubernetes.io
spec:
additionalPrinterColumns:
- JSONPath: .scope.kind
name: Kind
priority: 1
type: string
- JSONPath: .scope.name
name: Name
priority: 1
type: string
- JSONPath: .summary.pass
name: Pass
type: integer
- JSONPath: .summary.fail
name: Fail
type: integer
- JSONPath: .summary.warn
name: Warn
type: integer
- JSONPath: .summary.error
name: Error
type: integer
- JSONPath: .summary.skip
name: Skip
type: integer
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: policy.kubernetes.io
names:
kind: ClusterPolicyReport
listKind: ClusterPolicyReportList
plural: clusterpolicyreports
singular: clusterpolicyreport
scope: Namespaced
subresources: {}
validation:
openAPIV3Schema:
description: ClusterPolicyReport is the Schema for the clusterpolicyreports API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
results:
description: PolicyReportResult provides result details
items:
description: PolicyReportResult provides the result for an individual policy or rule
properties:
data:
additionalProperties:
type: string
description: Data provides additional information for the policy rule
type: object
message:
description: Message is a short user friendly description of the policy rule
type: string
policy:
description: Policy is the name of the policy
type: string
resource:
description: Resource is an optional reference to the resource check bu the policy rule
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
rule:
description: Rule is the name of the policy rule
type: string
scored:
description: Scored indicates if this policy rule is scored
type: boolean
status:
description: Status indicates the result of the policy rule check
enum:
- Pass
- Fail
- Warn
- Error
- Skip
type: string
required:
- policy
type: object
type: array
scope:
description: Scope is an optional reference to the report scope (e.g. a Deployment, Namespace, or Node)
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
summary:
description: PolicyReportSummary provides a summary of results
properties:
error:
type: integer
fail:
type: integer
pass:
type: integer
skip:
type: integer
warn:
type: integer
required:
- error
- fail
- pass
- skip
- warn
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterpolicyviolations.kyverno.io
spec:
@ -679,6 +852,179 @@ spec:
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.5
creationTimestamp: null
name: policyreports.policy.kubernetes.io
spec:
additionalPrinterColumns:
- JSONPath: .scope.kind
name: Kind
priority: 1
type: string
- JSONPath: .scope.name
name: Name
priority: 1
type: string
- JSONPath: .summary.pass
name: Pass
type: integer
- JSONPath: .summary.fail
name: Fail
type: integer
- JSONPath: .summary.warn
name: Warn
type: integer
- JSONPath: .summary.error
name: Error
type: integer
- JSONPath: .summary.skip
name: Skip
type: integer
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: policy.kubernetes.io
names:
kind: PolicyReport
listKind: PolicyReportList
plural: policyreports
singular: policyreport
scope: Namespaced
subresources: {}
validation:
openAPIV3Schema:
description: PolicyReport is the Schema for the policyreports API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
results:
description: PolicyReportResult provides result details
items:
description: PolicyReportResult provides the result for an individual policy or rule
properties:
data:
additionalProperties:
type: string
description: Data provides additional information for the policy rule
type: object
message:
description: Message is a short user friendly description of the policy rule
type: string
policy:
description: Policy is the name of the policy
type: string
resource:
description: Resource is an optional reference to the resource check bu the policy rule
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
rule:
description: Rule is the name of the policy rule
type: string
scored:
description: Scored indicates if this policy rule is scored
type: boolean
status:
description: Status indicates the result of the policy rule check
enum:
- Pass
- Fail
- Warn
- Error
- Skip
type: string
required:
- policy
type: object
type: array
scope:
description: Scope is an optional reference to the report scope (e.g. a Deployment, Namespace, or Node)
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
summary:
description: PolicyReportSummary provides a summary of results
properties:
error:
type: integer
fail:
type: integer
pass:
type: integer
skip:
type: integer
warn:
type: integer
required:
- error
- fail
- pass
- skip
- warn
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
@ -767,6 +1113,10 @@ rules:
- policies/status
- clusterpolicies
- clusterpolicies/status
- policyreport
- policyreport/status
- clusterpolicyreport
- clusterpolicyreport/status
- clusterpolicyviolations
- clusterpolicyviolations/status
- policyviolations
@ -901,6 +1251,24 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: kyverno:edit-policies-policyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreports
- clusterpolicyreports
- policies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
@ -918,6 +1286,21 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyreport
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreport
- clusterpolicyreport
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyviolations
rules:
@ -932,6 +1315,22 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:view-clusterpolicyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- clusterpolicyreports
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
@ -963,6 +1362,22 @@ rules:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: kyverno:view-policyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreports
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:

415
definitions/install_debug.yaml Normal file → Executable file
View file

@ -278,6 +278,179 @@ spec:
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.5
creationTimestamp: null
name: clusterpolicyreports.policy.kubernetes.io
spec:
additionalPrinterColumns:
- JSONPath: .scope.kind
name: Kind
priority: 1
type: string
- JSONPath: .scope.name
name: Name
priority: 1
type: string
- JSONPath: .summary.pass
name: Pass
type: integer
- JSONPath: .summary.fail
name: Fail
type: integer
- JSONPath: .summary.warn
name: Warn
type: integer
- JSONPath: .summary.error
name: Error
type: integer
- JSONPath: .summary.skip
name: Skip
type: integer
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: policy.kubernetes.io
names:
kind: ClusterPolicyReport
listKind: ClusterPolicyReportList
plural: clusterpolicyreports
singular: clusterpolicyreport
scope: Namespaced
subresources: {}
validation:
openAPIV3Schema:
description: ClusterPolicyReport is the Schema for the clusterpolicyreports API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
results:
description: PolicyReportResult provides result details
items:
description: PolicyReportResult provides the result for an individual policy or rule
properties:
data:
additionalProperties:
type: string
description: Data provides additional information for the policy rule
type: object
message:
description: Message is a short user friendly description of the policy rule
type: string
policy:
description: Policy is the name of the policy
type: string
resource:
description: Resource is an optional reference to the resource check bu the policy rule
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
rule:
description: Rule is the name of the policy rule
type: string
scored:
description: Scored indicates if this policy rule is scored
type: boolean
status:
description: Status indicates the result of the policy rule check
enum:
- Pass
- Fail
- Warn
- Error
- Skip
type: string
required:
- policy
type: object
type: array
scope:
description: Scope is an optional reference to the report scope (e.g. a Deployment, Namespace, or Node)
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
summary:
description: PolicyReportSummary provides a summary of results
properties:
error:
type: integer
fail:
type: integer
pass:
type: integer
skip:
type: integer
warn:
type: integer
required:
- error
- fail
- pass
- skip
- warn
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: clusterpolicyviolations.kyverno.io
spec:
@ -679,6 +852,179 @@ spec:
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
annotations:
controller-gen.kubebuilder.io/version: v0.2.5
creationTimestamp: null
name: policyreports.policy.kubernetes.io
spec:
additionalPrinterColumns:
- JSONPath: .scope.kind
name: Kind
priority: 1
type: string
- JSONPath: .scope.name
name: Name
priority: 1
type: string
- JSONPath: .summary.pass
name: Pass
type: integer
- JSONPath: .summary.fail
name: Fail
type: integer
- JSONPath: .summary.warn
name: Warn
type: integer
- JSONPath: .summary.error
name: Error
type: integer
- JSONPath: .summary.skip
name: Skip
type: integer
- JSONPath: .metadata.creationTimestamp
name: Age
type: date
group: policy.kubernetes.io
names:
kind: PolicyReport
listKind: PolicyReportList
plural: policyreports
singular: policyreport
scope: Namespaced
subresources: {}
validation:
openAPIV3Schema:
description: PolicyReport is the Schema for the policyreports API
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
metadata:
type: object
results:
description: PolicyReportResult provides result details
items:
description: PolicyReportResult provides the result for an individual policy or rule
properties:
data:
additionalProperties:
type: string
description: Data provides additional information for the policy rule
type: object
message:
description: Message is a short user friendly description of the policy rule
type: string
policy:
description: Policy is the name of the policy
type: string
resource:
description: Resource is an optional reference to the resource check bu the policy rule
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
rule:
description: Rule is the name of the policy rule
type: string
scored:
description: Scored indicates if this policy rule is scored
type: boolean
status:
description: Status indicates the result of the policy rule check
enum:
- Pass
- Fail
- Warn
- Error
- Skip
type: string
required:
- policy
type: object
type: array
scope:
description: Scope is an optional reference to the report scope (e.g. a Deployment, Namespace, or Node)
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an entire object, this string should contain a valid JSON/Go field access statement, such as desiredState.manifest.containers[2]. For example, if the object reference is to a container within a pod, this would take on a value like: "spec.containers{name}" (where "name" refers to the name of the container that triggered the event) or if no container name is specified "spec.containers[2]" (container with index 2 in this pod). This syntax is chosen only to have some well-defined way of referencing a part of an object. TODO: this design is not final and this field is subject to change in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is made, if any. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
summary:
description: PolicyReportSummary provides a summary of results
properties:
error:
type: integer
fail:
type: integer
pass:
type: integer
skip:
type: integer
warn:
type: integer
required:
- error
- fail
- pass
- skip
- warn
type: object
type: object
version: v1alpha1
versions:
- name: v1alpha1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: policyviolations.kyverno.io
spec:
@ -767,6 +1113,10 @@ rules:
- policies/status
- clusterpolicies
- clusterpolicies/status
- policyreport
- policyreport/status
- clusterpolicyreport
- clusterpolicyreport/status
- clusterpolicyviolations
- clusterpolicyviolations/status
- policyviolations
@ -901,6 +1251,24 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: kyverno:edit-policies-policyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreports
- clusterpolicyreports
- policies
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
@ -918,6 +1286,21 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyreport
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreport
- clusterpolicyreport
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyviolations
rules:
@ -932,6 +1315,22 @@ rules:
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: kyverno:view-clusterpolicyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- clusterpolicyreports
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
@ -963,6 +1362,22 @@ rules:
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
name: kyverno:view-policyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreports
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:

0
definitions/k8s-resource/configmap.yaml Normal file → Executable file
View file

0
definitions/k8s-resource/kustomization.yaml Normal file → Executable file
View file

58
definitions/k8s-resource/rbac.yaml Normal file → Executable file
View file

@ -174,6 +174,10 @@ rules:
- policies/status
- clusterpolicies
- clusterpolicies/status
- policyreport
- policyreport/status
- clusterpolicyreport
- clusterpolicyreport/status
- clusterpolicyviolations
- clusterpolicyviolations/status
- policyviolations
@ -291,3 +295,57 @@ rules:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:policyreport
rules:
- apiGroups: ["policy.kubernetes.io"]
resources:
- policyreport
- clusterpolicyreport
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:view-policyreports
labels:
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: [ "policy.kubernetes.io" ]
resources:
- policyreports
verbs: [ "get", "list", "watch" ]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: kyverno:view-clusterpolicyreports
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rules:
- apiGroups: ["policy.kubernetes.io"]
resources:
- clusterpolicyreports
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-edit: "true"
name: kyverno:edit-policies-policyreports
rules:
- apiGroups:
- policy.kubernetes.io
resources:
- policyreports
- clusterpolicyreports
- policies
verbs:
- get
- list
- watch

0
definitions/kustomization.yaml Normal file → Executable file
View file

0
definitions/manifest/deployment.yaml Normal file → Executable file
View file

0
definitions/manifest/kustomization.yaml Normal file → Executable file
View file

0
definitions/release/install.yaml Normal file → Executable file
View file

0
pkg/api/kyverno/register.go Normal file → Executable file
View file

0
pkg/api/kyverno/v1/doc.go Normal file → Executable file
View file

0
pkg/api/kyverno/v1/register.go Normal file → Executable file
View file

2
pkg/api/kyverno/v1/types.go Normal file → Executable file
View file

@ -394,6 +394,8 @@ type ViolatedRule struct {
Type string `json:"type" yaml:"type"`
// Specifies violation message
Message string `json:"message" yaml:"message"`
// +optional
Check string `json:"check" yaml:"check"`
}
//PolicyViolationStatus provides information regarding policyviolation status

0
pkg/api/kyverno/v1/utils.go Normal file → Executable file
View file

0
pkg/api/kyverno/v1/zz_generated.deepcopy.go Normal file → Executable file
View file

View file

0
pkg/api/policyreport/v1alpha1/doc.go Normal file → Executable file
View file

0
pkg/api/policyreport/v1alpha1/groupversion_info.go Normal file → Executable file
View file

0
pkg/api/policyreport/v1alpha1/policyreport_types.go Normal file → Executable file
View file

0
pkg/api/policyreport/v1alpha1/zz_generated.deepcopy.go Normal file → Executable file
View file

0
pkg/client/clientset/versioned/clientset.go Normal file → Executable file
View file

0
pkg/client/clientset/versioned/doc.go Normal file → Executable file
View file

View file

0
pkg/client/clientset/versioned/fake/doc.go Normal file → Executable file
View file

0
pkg/client/clientset/versioned/fake/register.go Normal file → Executable file
View file

0
pkg/client/clientset/versioned/scheme/doc.go Normal file → Executable file
View file

0
pkg/client/clientset/versioned/scheme/register.go Normal file → Executable file
View file

View file

0
pkg/client/clientset/versioned/typed/kyverno/v1/doc.go Normal file → Executable file
View file

View file

View file

View file

View file

View file

View file

View file

View file

View file

0
pkg/client/informers/externalversions/factory.go Normal file → Executable file
View file

0
pkg/client/informers/externalversions/generic.go Normal file → Executable file
View file

View file

View file

View file

View file

View file

View file

View file

0
pkg/client/listers/kyverno/v1/clusterpolicy.go Normal file → Executable file
View file

View file

0
pkg/client/listers/kyverno/v1/expansion_generated.go Normal file → Executable file
View file

0
pkg/client/listers/kyverno/v1/generaterequest.go Normal file → Executable file
View file

0
pkg/client/listers/kyverno/v1/policy.go Normal file → Executable file
View file

0
pkg/client/listers/kyverno/v1/policyviolation.go Normal file → Executable file
View file

View file

View file

View file

103
pkg/policyreport/builder.go Executable file
View file

@ -0,0 +1,103 @@
package policyreport
import (
"fmt"
"os"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/response"
)
//GeneratePRsFromEngineResponse generate Violations from engine responses
func GeneratePRsFromEngineResponse(ers []response.EngineResponse, log logr.Logger) (pvInfos []Info) {
for _, er := range ers {
// ignore creation of PV for resources that are yet to be assigned a name
if er.PolicyResponse.Resource.Name == "" {
log.V(4).Info("resource does no have a name assigned yet, not creating a policy violation", "resource", er.PolicyResponse.Resource)
continue
}
// skip when response succeed
if os.Getenv("POLICY-TYPE") != "POLICYREPORT" {
if er.IsSuccessful() {
continue
}
}
// build policy violation info
pvInfos = append(pvInfos, buildPVInfo(er))
}
return pvInfos
}
// Builder builds Policy Violation struct
// this is base type of namespaced and cluster policy violation
type Builder interface {
generate(info Info) kyverno.PolicyViolationTemplate
build(policy, kind, namespace, name string, rules []kyverno.ViolatedRule) *kyverno.PolicyViolationTemplate
}
type pvBuilder struct{}
func newPrBuilder() *pvBuilder {
return &pvBuilder{}
}
func (pvb *pvBuilder) generate(info Info) kyverno.PolicyViolationTemplate {
pv := pvb.build(info.PolicyName, info.Resource.GetKind(), info.Resource.GetNamespace(), info.Resource.GetName(), info.Rules)
return *pv
}
func (pvb *pvBuilder) build(policy, kind, namespace, name string, rules []kyverno.ViolatedRule) *kyverno.PolicyViolationTemplate {
pv := &kyverno.PolicyViolationTemplate{
Spec: kyverno.PolicyViolationSpec{
Policy: policy,
ResourceSpec: kyverno.ResourceSpec{
Kind: kind,
Name: name,
Namespace: namespace,
},
ViolatedRules: rules,
},
}
labelMap := map[string]string{
"policy": pv.Spec.Policy,
"resource": pv.Spec.ToKey(),
}
pv.SetLabels(labelMap)
if namespace != "" {
pv.SetNamespace(namespace)
}
pv.SetGenerateName(fmt.Sprintf("%s-", policy))
return pv
}
func buildPVInfo(er response.EngineResponse) Info {
info := Info{
PolicyName: er.PolicyResponse.Policy,
Resource: er.PatchedResource,
Rules: buildViolatedRules(er),
}
return info
}
func buildViolatedRules(er response.EngineResponse) []kyverno.ViolatedRule {
var violatedRules []kyverno.ViolatedRule
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
vrule := kyverno.ViolatedRule{
Name: rule.Name,
Type: rule.Type,
Message: rule.Message,
}
vrule.Check = "Fail"
if rule.Success {
vrule.Check = "Pass"
}
violatedRules = append(violatedRules, vrule)
}
return violatedRules
}

223
pkg/policyreport/clusterpr.go Executable file
View file

@ -0,0 +1,223 @@
package policyreport
import (
"errors"
"fmt"
policyreportv1alpha12 "github.com/nirmata/kyverno/pkg/api/policyreport/v1alpha1"
"github.com/nirmata/kyverno/pkg/constant"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"reflect"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policyreport/v1alpha1"
policyreportlister "github.com/nirmata/kyverno/pkg/client/listers/policyreport/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/policystatus"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const clusterWorkQueueName = "policy-report-cluster"
const clusterWorkQueueRetryLimit = 3
//clusterPR ...
type clusterPR struct {
// dynamic client
dclient *client.Client
// get/list cluster policy report
cprLister policyreportlister.ClusterPolicyReportLister
// policy violation interface
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface
// logger
log logr.Logger
// update policy stats with violationCount
policyStatusListener policystatus.Listener
dataStore *dataStore
queue workqueue.RateLimitingInterface
}
func newClusterPR(log logr.Logger, dclient *client.Client,
cprLister policyreportlister.ClusterPolicyReportLister,
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface,
policyStatus policystatus.Listener,
) *clusterPR {
cpv := clusterPR{
dclient: dclient,
cprLister: cprLister,
policyreportInterface: policyreportInterface,
log: log,
policyStatusListener: policyStatus,
dataStore: newDataStore(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), clusterWorkQueueName),
}
return &cpv
}
func (cpr *clusterPR) enqueue(info Info) {
// add to data map
keyHash := info.toKey()
// add to
// queue the key hash
cpr.dataStore.add(keyHash, info)
cpr.queue.Add(keyHash)
}
//Add queues a policy violation create request
func (cpr *clusterPR) Add(infos ...Info) {
for _, info := range infos {
cpr.enqueue(info)
}
}
// Run starts the workers
func (cpr *clusterPR) Run(workers int, stopCh <-chan struct{}) {
logger := cpr.log
defer utilruntime.HandleCrash()
logger.Info("start")
defer logger.Info("shutting down")
for i := 0; i < workers; i++ {
go wait.Until(cpr.runWorker, constant.PolicyViolationControllerResync, stopCh)
}
<-stopCh
}
func (cpr *clusterPR) runWorker() {
for cpr.processNextWorkItem() {
}
}
func (cpr *clusterPR) handleErr(err error, key interface{}) {
logger := cpr.log
if err == nil {
cpr.queue.Forget(key)
return
}
// retires requests if there is error
if cpr.queue.NumRequeues(key) < clusterWorkQueueRetryLimit {
logger.Error(err, "failed to sync policy violation", "key", key)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
cpr.queue.AddRateLimited(key)
return
}
cpr.queue.Forget(key)
// remove from data store
if keyHash, ok := key.(string); ok {
cpr.dataStore.delete(keyHash)
}
logger.Error(err, "dropping key out of the queue", "key", key)
}
func (cpr *clusterPR) processNextWorkItem() bool {
logger := cpr.log
obj, shutdown := cpr.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer cpr.queue.Done(obj)
var keyHash string
var ok bool
if keyHash, ok = obj.(string); !ok {
cpr.queue.Forget(obj)
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
return nil
}
// lookup data store
info := cpr.dataStore.lookup(keyHash)
if reflect.DeepEqual(info, Info{}) {
// empty key
cpr.queue.Forget(obj)
logger.Info("empty key")
return nil
}
err := cpr.syncHandler(info)
cpr.handleErr(err, obj)
return nil
}(obj)
if err != nil {
logger.Error(err, "failed to process item")
return true
}
return true
}
func (cpr *clusterPR) syncHandler(info Info) error {
logger := cpr.log
failure := false
builder := newPrBuilder()
pv := builder.generate(info)
if info.FromSync {
pv.Annotations = map[string]string{
"fromSync": "true",
}
}
// Create Policy Violations
logger.V(4).Info("creating policy violation", "key", info.toKey())
if err := cpr.create(pv, ""); err != nil {
failure = true
logger.Error(err, "failed to create policy violation")
}
if failure {
// even if there is a single failure we requeue the request
return errors.New("Failed to process some policy violations, re-queuing")
}
return nil
}
func (cpr *clusterPR) create(pv kyverno.PolicyViolationTemplate, appName string) error {
reportName := fmt.Sprintf("kyverno-clusterpolicyreport-%s", pv.Spec.Policy)
clusterpr, err := cpr.policyreportInterface.ClusterPolicyReports().Get(reportName, v1.GetOptions{})
if err != nil {
if !k8serror.IsNotFound(err) {
return err
}
clusterpr = &policyreportv1alpha12.ClusterPolicyReport{
Scope: &corev1.ObjectReference{
Kind: "Cluster",
},
Summary: policyreportv1alpha12.PolicyReportSummary{},
Results: []*policyreportv1alpha12.PolicyReportResult{},
}
labelMap := map[string]string{
"policy-scope": "cluster",
}
clusterpr.SetLabels(labelMap)
clusterpr.ObjectMeta.Name = reportName
prObj := NewPolicyReport(nil, clusterpr, &pv, cpr.dclient)
clusterpr := prObj.CreateClusterPolicyViolationsToClusterPolicyReport()
_, err = cpr.policyreportInterface.ClusterPolicyReports().Create(clusterpr)
if err != nil {
return err
}
return nil
}
prObj := NewPolicyReport(nil, clusterpr, &pv, cpr.dclient)
clusterpr = prObj.CreateClusterPolicyViolationsToClusterPolicyReport()
_, err = cpr.policyreportInterface.ClusterPolicyReports().Update(clusterpr)
if err != nil {
return err
}
return nil
}

130
pkg/policyreport/common.go Executable file
View file

@ -0,0 +1,130 @@
package policyreport
import (
"fmt"
"reflect"
"time"
backoff "github.com/cenkalti/backoff"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func createOwnerReference(resource *unstructured.Unstructured) (metav1.OwnerReference, bool) {
controllerFlag := true
blockOwnerDeletionFlag := true
apiversion := resource.GetAPIVersion()
kind := resource.GetKind()
name := resource.GetName()
uid := resource.GetUID()
if apiversion == "" || kind == "" || name == "" || uid == "" {
return metav1.OwnerReference{}, false
}
ownerRef := metav1.OwnerReference{
APIVersion: resource.GetAPIVersion(),
Kind: resource.GetKind(),
Name: resource.GetName(),
UID: resource.GetUID(),
Controller: &controllerFlag,
BlockOwnerDeletion: &blockOwnerDeletionFlag,
}
return ownerRef, true
}
func retryGetResource(client *client.Client, rspec kyverno.ResourceSpec) (*unstructured.Unstructured, error) {
var i int
var obj *unstructured.Unstructured
var err error
getResource := func() error {
obj, err = client.GetResource("", rspec.Kind, rspec.Namespace, rspec.Name)
log.Log.V(4).Info(fmt.Sprintf("retry %v getting %s/%s/%s", i, rspec.Kind, rspec.Namespace, rspec.Name))
i++
return err
}
exbackoff := &backoff.ExponentialBackOff{
InitialInterval: 500 * time.Millisecond,
RandomizationFactor: 0.5,
Multiplier: 1.5,
MaxInterval: time.Second,
MaxElapsedTime: 3 * time.Second,
Clock: backoff.SystemClock,
}
exbackoff.Reset()
err = backoff.Retry(getResource, exbackoff)
if err != nil {
return nil, err
}
return obj, nil
}
func converLabelToSelector(labelMap map[string]string) (labels.Selector, error) {
ls := &metav1.LabelSelector{}
err := metav1.Convert_Map_string_To_string_To_v1_LabelSelector(&labelMap, ls, nil)
if err != nil {
return nil, err
}
policyViolationSelector, err := metav1.LabelSelectorAsSelector(ls)
if err != nil {
return nil, fmt.Errorf("invalid label selector: %v", err)
}
return policyViolationSelector, nil
}
type violationCount struct {
policyName string
violatedRules []v1.ViolatedRule
}
func (vc violationCount) PolicyName() string {
return vc.policyName
}
func (vc violationCount) UpdateStatus(status kyverno.PolicyStatus) kyverno.PolicyStatus {
var ruleNameToViolations = make(map[string]int)
for _, rule := range vc.violatedRules {
ruleNameToViolations[rule.Name]++
}
for i := range status.Rules {
status.ViolationCount += ruleNameToViolations[status.Rules[i].Name]
status.Rules[i].ViolationCount += ruleNameToViolations[status.Rules[i].Name]
}
return status
}
// hasViolationSpecChanged returns true if oldSpec & newSpec
// are identical, exclude message in violated rules
func hasViolationSpecChanged(new, old *kyverno.PolicyViolationSpec) bool {
if new.Policy != old.Policy {
return true
}
if new.ResourceSpec.ToKey() != old.ResourceSpec.ToKey() {
return true
}
for i := range new.ViolatedRules {
new.ViolatedRules[i].Message = ""
}
for i := range old.ViolatedRules {
old.ViolatedRules[i].Message = ""
}
return !reflect.DeepEqual(*new, *old)
}

280
pkg/policyreport/generator.go Executable file
View file

@ -0,0 +1,280 @@
package policyreport
import (
"errors"
"fmt"
"reflect"
"strconv"
"strings"
"sync"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
policyreportclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policyreport/v1alpha1"
policyreportinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
policyreportlister "github.com/nirmata/kyverno/pkg/client/listers/policyreport/v1alpha1"
"github.com/nirmata/kyverno/pkg/constant"
"github.com/nirmata/kyverno/pkg/policystatus"
dclient "github.com/nirmata/kyverno/pkg/dclient"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
)
const workQueueName = "policy-violation-controller"
const workQueueRetryLimit = 3
//Generator creates PV
type Generator struct {
dclient *dclient.Client
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface
// get/list cluster policy report
cprLister policyreportlister.ClusterPolicyReportLister
// get/ist namespaced policy report
nsprLister policyreportlister.PolicyReportLister
// returns true if the cluster policy store has been synced at least once
prSynced cache.InformerSynced
// returns true if the namespaced cluster policy store has been synced at at least once
log logr.Logger
nsprSynced cache.InformerSynced
queue workqueue.RateLimitingInterface
dataStore *dataStore
policyStatusListener policystatus.Listener
}
//NewDataStore returns an instance of data store
func newDataStore() *dataStore {
ds := dataStore{
data: make(map[string]Info),
}
return &ds
}
type dataStore struct {
data map[string]Info
mu sync.RWMutex
}
func (ds *dataStore) add(keyHash string, info Info) {
ds.mu.Lock()
defer ds.mu.Unlock()
// queue the key hash
ds.data[keyHash] = info
}
func (ds *dataStore) lookup(keyHash string) Info {
ds.mu.RLock()
defer ds.mu.RUnlock()
return ds.data[keyHash]
}
func (ds *dataStore) delete(keyHash string) {
ds.mu.Lock()
defer ds.mu.Unlock()
delete(ds.data, keyHash)
}
//Info is a request to create PV
type Info struct {
PolicyName string
Resource unstructured.Unstructured
Rules []kyverno.ViolatedRule
FromSync bool
}
func (i Info) toKey() string {
keys := []string{
i.PolicyName,
i.Resource.GetKind(),
i.Resource.GetNamespace(),
i.Resource.GetName(),
strconv.Itoa(len(i.Rules)),
}
return strings.Join(keys, "/")
}
// make the struct hashable
//GeneratorInterface provides API to create PVs
type GeneratorInterface interface {
Add(infos ...Info)
}
// NewPRGenerator returns a new instance of policy violation generator
func NewPRGenerator(client *policyreportclient.Clientset,
dclient *dclient.Client,
prInformer policyreportinformer.ClusterPolicyReportInformer,
nsprInformer policyreportinformer.PolicyReportInformer,
policyStatus policystatus.Listener,
log logr.Logger) *Generator {
gen := Generator{
policyreportInterface: client.PolicyV1alpha1(),
dclient: dclient,
cprLister: prInformer.Lister(),
prSynced: prInformer.Informer().HasSynced,
nsprLister: nsprInformer.Lister(),
nsprSynced: nsprInformer.Informer().HasSynced,
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
dataStore: newDataStore(),
log: log,
policyStatusListener: policyStatus,
}
return &gen
}
func (gen *Generator) enqueue(info Info) {
// add to data map
keyHash := info.toKey()
// add to
// queue the key hash
gen.dataStore.add(keyHash, info)
gen.queue.Add(keyHash)
}
//Add queues a policy violation create request
func (gen *Generator) Add(infos ...Info) {
for _, info := range infos {
gen.enqueue(info)
}
}
// Run starts the workers
func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
logger := gen.log
defer utilruntime.HandleCrash()
logger.Info("start")
defer logger.Info("shutting down")
if !cache.WaitForCacheSync(stopCh, gen.prSynced, gen.nsprSynced) {
logger.Info("failed to sync informer cache")
}
for i := 0; i < workers; i++ {
go wait.Until(gen.runWorker, constant.PolicyViolationControllerResync, stopCh)
}
<-stopCh
}
func (gen *Generator) runWorker() {
for gen.processNextWorkItem() {
}
}
func (gen *Generator) handleErr(err error, key interface{}) {
logger := gen.log
if err == nil {
gen.queue.Forget(key)
return
}
// retires requests if there is error
if gen.queue.NumRequeues(key) < workQueueRetryLimit {
logger.Error(err, "failed to sync policy violation", "key", key)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
gen.queue.AddRateLimited(key)
return
}
gen.queue.Forget(key)
// remove from data store
if keyHash, ok := key.(string); ok {
gen.dataStore.delete(keyHash)
}
logger.Error(err, "dropping key out of the queue", "key", key)
}
func (gen *Generator) processNextWorkItem() bool {
logger := gen.log
obj, shutdown := gen.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer gen.queue.Done(obj)
var keyHash string
var ok bool
if keyHash, ok = obj.(string); !ok {
gen.queue.Forget(obj)
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
return nil
}
// lookup data store
info := gen.dataStore.lookup(keyHash)
if reflect.DeepEqual(info, Info{}) {
// empty key
gen.queue.Forget(obj)
logger.Info("empty key")
return nil
}
err := gen.syncHandler(info)
gen.handleErr(err, obj)
return nil
}(obj)
if err != nil {
logger.Error(err, "failed to process item")
return true
}
return true
}
func (gen *Generator) syncHandler(info Info) error {
logger := gen.log
var handler prGenerator
resource, err := gen.dclient.GetResource(info.Resource.GetAPIVersion(), info.Resource.GetKind(), info.Resource.GetNamespace(), info.Resource.GetName())
if err != nil {
logger.Error(err, "failed to get resource")
return err
}
labels := resource.GetLabels()
_, okChart := labels["app"]
_, okRelease := labels["release"]
var appName string
if okChart && okRelease {
// cluster scope resource generate a helm package report
appName = fmt.Sprintf("%s-%s", labels["app"], info.Resource.GetNamespace())
handler = newHelmPR(gen.log.WithName("HelmPR"), gen.dclient, gen.nsprLister, gen.policyreportInterface, gen.policyStatusListener)
} else if info.Resource.GetNamespace() == "" {
// cluster scope resource generate a clusterpolicy violation
handler = newClusterPR(gen.log.WithName("ClusterPV"), gen.dclient, gen.cprLister, gen.policyreportInterface, gen.policyStatusListener)
} else {
// namespaced resources generated a namespaced policy violation in the namespace of the resource
appName = info.Resource.GetNamespace()
handler = newNamespacedPR(gen.log.WithName("NamespacedPV"), gen.dclient, gen.nsprLister, gen.policyreportInterface, gen.policyStatusListener)
}
failure := false
builder := newPrBuilder()
pv := builder.generate(info)
// Create Policy Violations
logger.V(4).Info("creating policy violation", "key", info.toKey())
if err := handler.create(pv, appName); err != nil {
failure = true
logger.Error(err, "failed to create policy violation")
}
if failure {
// even if there is a single failure we requeue the request
return errors.New("Failed to process some policy violations, re-queuing")
}
return nil
}
// Provides an interface to generate policy report
// implementations for namespaced and cluster PR
type prGenerator interface {
create(policyViolation kyverno.PolicyViolationTemplate, appName string) error
}

221
pkg/policyreport/helmpr.go Executable file
View file

@ -0,0 +1,221 @@
package policyreport
import (
"errors"
"fmt"
policyreportv1alpha12 "github.com/nirmata/kyverno/pkg/api/policyreport/v1alpha1"
"github.com/nirmata/kyverno/pkg/constant"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
"reflect"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policyreport/v1alpha1"
policyreportlister "github.com/nirmata/kyverno/pkg/client/listers/policyreport/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/policystatus"
)
const helmWorkQueueName = "policy-report-helm"
const helmWorkQueueRetryLimit = 3
//helmPR ...
type helmPR struct {
// dynamic client
dclient *client.Client
// get/list namespaced policy violation
nsprLister policyreportlister.PolicyReportLister
// policy violation interface
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface
// logger
log logr.Logger
// update policy status with violationCount
policyStatusListener policystatus.Listener
dataStore *dataStore
queue workqueue.RateLimitingInterface
}
func newHelmPR(log logr.Logger, dclient *client.Client,
nsprLister policyreportlister.PolicyReportLister,
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface,
policyStatus policystatus.Listener,
) *helmPR {
nspr := helmPR{
dclient: dclient,
nsprLister: nsprLister,
policyreportInterface: policyreportInterface,
log: log,
policyStatusListener: policyStatus,
dataStore: newDataStore(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), helmWorkQueueName),
}
return &nspr
}
func (hpr *helmPR) enqueue(info Info) {
// add to data map
keyHash := info.toKey()
// add to
// queue the key hash
hpr.dataStore.add(keyHash, info)
hpr.queue.Add(keyHash)
}
//Add queues a policy violation create request
func (hpr *helmPR) Add(infos ...Info) {
for _, info := range infos {
hpr.enqueue(info)
}
}
// Run starts the workers
func (hpr *helmPR) Run(workers int, stopCh <-chan struct{}) {
logger := hpr.log
defer utilruntime.HandleCrash()
logger.Info("start")
defer logger.Info("shutting down")
for i := 0; i < workers; i++ {
go wait.Until(hpr.runWorker, constant.PolicyViolationControllerResync, stopCh)
}
<-stopCh
}
func (hpr *helmPR) runWorker() {
for hpr.processNextWorkItem() {
}
}
func (hpr *helmPR) handleErr(err error, key interface{}) {
logger := hpr.log
if err == nil {
hpr.queue.Forget(key)
return
}
// retires requests if there is error
if hpr.queue.NumRequeues(key) < helmWorkQueueRetryLimit {
logger.Error(err, "failed to sync policy violation", "key", key)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
hpr.queue.AddRateLimited(key)
return
}
hpr.queue.Forget(key)
// remove from data store
if keyHash, ok := key.(string); ok {
hpr.dataStore.delete(keyHash)
}
logger.Error(err, "dropping key out of the queue", "key", key)
}
func (hpr *helmPR) processNextWorkItem() bool {
logger := hpr.log
obj, shutdown := hpr.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer hpr.queue.Done(obj)
var keyHash string
var ok bool
if keyHash, ok = obj.(string); !ok {
hpr.queue.Forget(obj)
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
return nil
}
// lookup data store
info := hpr.dataStore.lookup(keyHash)
if reflect.DeepEqual(info, Info{}) {
// empty key
hpr.queue.Forget(obj)
logger.Info("empty key")
return nil
}
err := hpr.syncHandler(info)
hpr.handleErr(err, obj)
return nil
}(obj)
if err != nil {
logger.Error(err, "failed to process item")
return true
}
return true
}
func (hpr *helmPR) syncHandler(info Info) error {
logger := hpr.log
failure := false
builder := newPrBuilder()
pv := builder.generate(info)
resource, err := hpr.dclient.GetResource(info.Resource.GetAPIVersion(), info.Resource.GetKind(), info.Resource.GetName(), info.Resource.GetNamespace())
if err != nil {
logger.Error(err, "failed to get resource")
}
labels := resource.GetLabels()
// Create Policy Violations
logger.V(4).Info("creating policy violation", "key", info.toKey())
if err := hpr.create(pv, labels["helm.sh/chart"]); err != nil {
failure = true
logger.Error(err, "failed to create policy violation")
}
if failure {
// even if there is a single failure we requeue the request
return errors.New("Failed to process some policy violations, re-queuing")
}
return nil
}
func (hpr *helmPR) create(pv kyverno.PolicyViolationTemplate, appName string) error {
reportName := fmt.Sprintf("kyverno-policyreport-%s-%s", appName, pv.Spec.Policy)
pr, err := hpr.policyreportInterface.PolicyReports(pv.Spec.Namespace).Get(reportName, v1.GetOptions{})
if err != nil {
if !k8serror.IsNotFound(err) {
return err
}
pr = &policyreportv1alpha12.PolicyReport{
Scope: &corev1.ObjectReference{
Kind: "Helm",
Namespace: pv.Spec.Namespace,
},
Summary: policyreportv1alpha12.PolicyReportSummary{},
Results: []*policyreportv1alpha12.PolicyReportResult{},
}
labelMap := map[string]string{
"policy-scope": "application",
"helm.sh/chart": appName,
}
pr.SetLabels(labelMap)
pr.ObjectMeta.Name = reportName
prObj := NewPolicyReport(pr, nil, &pv, hpr.dclient)
pr = prObj.CreatePolicyViolationToPolicyReport()
_, err = hpr.policyreportInterface.PolicyReports(pv.Spec.Namespace).Create(pr)
if err != nil {
return err
}
return nil
}
prObj := NewPolicyReport(pr, nil, &pv, hpr.dclient)
pr = prObj.CreatePolicyViolationToPolicyReport()
_, err = hpr.policyreportInterface.PolicyReports(pv.Spec.Namespace).Update(pr)
if err != nil {
return err
}
return nil
}

222
pkg/policyreport/namespacedpr.go Executable file
View file

@ -0,0 +1,222 @@
package policyreport
import (
"errors"
"fmt"
policyreportv1alpha12 "github.com/nirmata/kyverno/pkg/api/policyreport/v1alpha1"
corev1 "k8s.io/api/core/v1"
k8serror "k8s.io/apimachinery/pkg/api/errors"
"reflect"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/policyreport/v1alpha1"
policyreportlister "github.com/nirmata/kyverno/pkg/client/listers/policyreport/v1alpha1"
"github.com/nirmata/kyverno/pkg/constant"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/policystatus"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/workqueue"
)
const nsWorkQueueName = "policy-report-namespace"
const nsWorkQueueRetryLimit = 3
//namespacedPR ...
type namespacedPR struct {
// dynamic client
dclient *client.Client
// get/list namespaced policy violation
nsprLister policyreportlister.PolicyReportLister
// policy violation interface
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface
// logger
log logr.Logger
// update policy status with violationCount
policyStatusListener policystatus.Listener
dataStore *dataStore
queue workqueue.RateLimitingInterface
}
func newNamespacedPR(log logr.Logger, dclient *client.Client,
nsprLister policyreportlister.PolicyReportLister,
policyreportInterface policyreportv1alpha1.PolicyV1alpha1Interface,
policyStatus policystatus.Listener,
) *namespacedPR {
nspr := namespacedPR{
dclient: dclient,
nsprLister: nsprLister,
policyreportInterface: policyreportInterface,
log: log,
policyStatusListener: policyStatus,
dataStore: newDataStore(),
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), nsWorkQueueName),
}
return &nspr
}
func (nspr *namespacedPR) enqueue(info Info) {
// add to data map
keyHash := info.toKey()
// add to
// queue the key hash
nspr.dataStore.add(keyHash, info)
nspr.queue.Add(keyHash)
}
//Add queues a policy violation create request
func (nspr *namespacedPR) Add(infos ...Info) {
for _, info := range infos {
nspr.enqueue(info)
}
}
// Run starts the workers
func (nspr *namespacedPR) Run(workers int, stopCh <-chan struct{}) {
logger := nspr.log
defer utilruntime.HandleCrash()
logger.Info("start")
defer logger.Info("shutting down")
for i := 0; i < workers; i++ {
go wait.Until(nspr.runWorker, constant.PolicyViolationControllerResync, stopCh)
}
<-stopCh
}
func (nspr *namespacedPR) runWorker() {
for nspr.processNextWorkItem() {
}
}
func (nspr *namespacedPR) handleErr(err error, key interface{}) {
logger := nspr.log
if err == nil {
nspr.queue.Forget(key)
return
}
// retires requests if there is error
if nspr.queue.NumRequeues(key) < nsWorkQueueRetryLimit {
logger.Error(err, "failed to sync policy violation", "key", key)
// Re-enqueue the key rate limited. Based on the rate limiter on the
// queue and the re-enqueue history, the key will be processed later again.
nspr.queue.AddRateLimited(key)
return
}
nspr.queue.Forget(key)
// remove from data store
if keyHash, ok := key.(string); ok {
nspr.dataStore.delete(keyHash)
}
logger.Error(err, "dropping key out of the queue", "key", key)
}
func (nspr *namespacedPR) processNextWorkItem() bool {
logger := nspr.log
obj, shutdown := nspr.queue.Get()
if shutdown {
return false
}
err := func(obj interface{}) error {
defer nspr.queue.Done(obj)
var keyHash string
var ok bool
if keyHash, ok = obj.(string); !ok {
nspr.queue.Forget(obj)
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
return nil
}
// lookup data store
info := nspr.dataStore.lookup(keyHash)
if reflect.DeepEqual(info, Info{}) {
// empty key
nspr.queue.Forget(obj)
logger.Info("empty key")
return nil
}
err := nspr.syncHandler(info)
nspr.handleErr(err, obj)
return nil
}(obj)
if err != nil {
logger.Error(err, "failed to process item")
return true
}
return true
}
func (nspr *namespacedPR) syncHandler(info Info) error {
logger := nspr.log
failure := false
builder := newPrBuilder()
pv := builder.generate(info)
if info.FromSync {
pv.Annotations = map[string]string{
"fromSync": "true",
}
}
// Create Policy Violations
logger.V(4).Info("creating policy violation", "key", info.toKey())
if err := nspr.create(pv, ""); err != nil {
failure = true
logger.Error(err, "failed to create policy violation")
}
if failure {
// even if there is a single failure we requeue the request
return errors.New("Failed to process some policy violations, re-queuing")
}
return nil
}
func (nspr *namespacedPR) create(pv kyverno.PolicyViolationTemplate, appName string) error {
reportName := fmt.Sprintf("kyverno-policyreport-%s-%s", appName, pv.Spec.Policy)
pr, err := nspr.policyreportInterface.PolicyReports(pv.Spec.Namespace).Get(reportName, v1.GetOptions{})
if err != nil {
if !k8serror.IsNotFound(err) {
return err
}
pr = &policyreportv1alpha12.PolicyReport{
Scope: &corev1.ObjectReference{
Kind: "Namespace",
Namespace: pv.Spec.Namespace,
},
Summary: policyreportv1alpha12.PolicyReportSummary{},
Results: []*policyreportv1alpha12.PolicyReportResult{},
}
labelMap := map[string]string{
"policy-scope": "namespace",
"policy": pv.Spec.Policy,
}
pr.SetLabels(labelMap)
pr.ObjectMeta.Name = reportName
prObj := NewPolicyReport(pr, nil, &pv, nspr.dclient)
cpr := prObj.CreatePolicyViolationToPolicyReport()
_, err = nspr.policyreportInterface.PolicyReports(pv.Spec.Namespace).Create(cpr)
if err != nil {
return err
}
return nil
}
prObj := NewPolicyReport(pr, nil, &pv, nspr.dclient)
cpr := prObj.CreatePolicyViolationToPolicyReport()
cpr, err = nspr.policyreportInterface.PolicyReports(pv.Spec.Namespace).Update(cpr)
if err != nil {
return err
}
return nil
}

288
pkg/policyreport/policyreport.go Executable file
View file

@ -0,0 +1,288 @@
package policyreport
import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/api/policyreport/v1alpha1"
client "github.com/nirmata/kyverno/pkg/dclient"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"sync"
)
type PolicyReport struct {
report *policyreportv1alpha1.PolicyReport
clusterReport *policyreportv1alpha1.ClusterPolicyReport
violation *kyverno.PolicyViolationTemplate
k8sClient *client.Client
mux sync.Mutex
}
func NewPolicyReport(report *policyreportv1alpha1.PolicyReport, clusterReport *policyreportv1alpha1.ClusterPolicyReport, violation *kyverno.PolicyViolationTemplate, client *client.Client) *PolicyReport {
return &PolicyReport{
report: report,
clusterReport: clusterReport,
violation: violation,
k8sClient: client,
}
}
// RemovePolicyViolation
func (p *PolicyReport) RemovePolicyViolation(name string, pvInfo []Info) *policyreportv1alpha1.PolicyReport {
defer func() {
p.mux.Unlock()
}()
p.mux.Lock()
if len(pvInfo) > 0 {
for _, info := range pvInfo {
for j, v := range pvInfo[0].Rules {
for i, result := range p.report.Results {
if result.Resource.Name == info.Resource.GetName() && result.Policy == info.PolicyName && result.Rule == v.Name {
_, err := p.k8sClient.GetResource(result.Resource.APIVersion, result.Resource.Kind, result.Resource.Namespace, result.Resource.Name)
if err != nil {
if errors.IsNotFound(err) {
p.report.Results = append(p.report.Results[:i], p.report.Results[i+1:]...)
p.DecreaseCount(string(result.Status), "NAMESPACE")
}
} else {
if v.Check != string(result.Status) {
result.Message = v.Message
p.DecreaseCount(string(result.Status), "NAMESPACE")
result.Status = policyreportv1alpha1.PolicyStatus(v.Check)
p.IncreaseCount(string(v.Check), "NAMESPACE")
}
info.Rules = append(info.Rules[:j], info.Rules[j+1:]...)
}
}
}
}
}
} else {
for i, result := range p.report.Results {
if result.Policy == name {
p.report.Results = append(p.report.Results[:i], p.report.Results[i+1:]...)
p.DecreaseCount(string(result.Status), "NAMESPACE")
}
}
}
return p.report
}
//RemoveClusterPolicyViolation
func (p *PolicyReport) RemoveClusterPolicyViolation(name string, pvInfo []Info) *policyreportv1alpha1.ClusterPolicyReport {
defer func() {
p.mux.Unlock()
}()
p.mux.Lock()
if len(pvInfo) > 0 {
for _, info := range pvInfo {
for j, v := range info.Rules {
for i, result := range p.clusterReport.Results {
if result.Resource.Name == info.Resource.GetName() && result.Policy == info.PolicyName && result.Rule == v.Name {
_, err := p.k8sClient.GetResource(result.Resource.APIVersion, result.Resource.Kind, result.Resource.Namespace, result.Resource.Name)
if err != nil {
if errors.IsNotFound(err) {
p.report.Results = append(p.report.Results[:i], p.report.Results[i+1:]...)
p.DecreaseCount(string(result.Status), "CLUSTER")
}
} else {
if v.Check != string(result.Status) {
result.Message = v.Message
p.DecreaseCount(string(result.Status), "CLUSTER")
result.Status = policyreportv1alpha1.PolicyStatus(v.Check)
p.IncreaseCount(string(v.Check), "CLUSTER")
}
info.Rules = append(info.Rules[:j], info.Rules[j+1:]...)
}
}
}
}
}
} else {
for i, result := range p.clusterReport.Results {
if result.Policy == name {
p.clusterReport.Results = append(p.clusterReport.Results[:i], p.clusterReport.Results[i+1:]...)
p.DecreaseCount(string(result.Status), "CLUSTER")
}
}
}
return p.clusterReport
}
// CreatePolicyViolationToPolicyReport
func (p *PolicyReport) CreatePolicyViolationToPolicyReport() *policyreportv1alpha1.PolicyReport {
defer func() {
p.mux.Unlock()
}()
p.mux.Lock()
for _, result := range p.report.Results {
for i, rule := range p.violation.Spec.ViolatedRules {
if result.Policy == p.violation.Spec.Policy && result.Rule == rule.Name && result.Resource.Name == p.violation.Spec.Name {
_, err := p.k8sClient.GetResource(result.Resource.APIVersion, result.Resource.Kind, result.Resource.Namespace, result.Resource.Name)
if err != nil {
if errors.IsNotFound(err) {
p.report.Results = append(p.report.Results[:i], p.report.Results[i+1:]...)
}
} else {
if rule.Check != string(result.Status) {
p.DecreaseCount(string(result.Status), "NAMESPACE")
p.IncreaseCount(rule.Check, "NAMESPACE")
result.Message = rule.Message
}
p.violation.Spec.ViolatedRules = append(p.violation.Spec.ViolatedRules[:i], p.violation.Spec.ViolatedRules[i+1:]...)
}
}
}
}
for _, rule := range p.violation.Spec.ViolatedRules {
_, err := p.k8sClient.GetResource(p.violation.Spec.APIVersion, p.violation.Spec.Kind, p.violation.Spec.Namespace, p.violation.Spec.Name)
if err != nil {
if errors.IsNotFound(err) {
continue
}
} else {
result := &policyreportv1alpha1.PolicyReportResult{
Policy: p.violation.Spec.Policy,
Rule: rule.Name,
Message: rule.Message,
Status: policyreportv1alpha1.PolicyStatus(rule.Check),
Resource: &corev1.ObjectReference{
Kind: p.violation.Spec.Kind,
Namespace: p.violation.Spec.Namespace,
APIVersion: p.violation.Spec.APIVersion,
Name: p.violation.Spec.Name,
},
}
p.IncreaseCount(rule.Check, "NAMESPACE")
p.report.Results = append(p.report.Results, result)
}
}
return p.report
}
// ClusterPolicyViolationsToClusterPolicyReport
func (p *PolicyReport) CreateClusterPolicyViolationsToClusterPolicyReport() *policyreportv1alpha1.ClusterPolicyReport {
defer func() {
p.mux.Unlock()
}()
p.mux.Lock()
for _, result := range p.clusterReport.Results {
for i, rule := range p.violation.Spec.ViolatedRules {
if result.Policy == p.violation.Spec.Policy && result.Rule == rule.Name && result.Resource.Name == p.violation.Spec.Name {
_, err := p.k8sClient.GetResource(result.Resource.APIVersion, result.Resource.Kind, result.Resource.Namespace, result.Resource.Name)
if err != nil {
if errors.IsNotFound(err) {
p.clusterReport.Results = append(p.clusterReport.Results[:i], p.clusterReport.Results[i+1:]...)
continue
}
} else {
if rule.Check != string(result.Status) {
result.Message = rule.Message
p.DecreaseCount(string(result.Status), "CLUSTER")
p.IncreaseCount(rule.Check, "CLUSTER")
}
p.violation.Spec.ViolatedRules = append(p.violation.Spec.ViolatedRules[:i], p.violation.Spec.ViolatedRules[i+1:]...)
}
}
}
}
for _, rule := range p.violation.Spec.ViolatedRules {
_, err := p.k8sClient.GetResource(p.violation.Spec.APIVersion, p.violation.Spec.Kind, p.violation.Spec.Namespace, p.violation.Spec.Name)
if err != nil {
if errors.IsNotFound(err) {
continue
}
} else {
result := &policyreportv1alpha1.PolicyReportResult{
Policy: p.violation.Spec.Policy,
Rule: rule.Name,
Message: rule.Message,
Status: policyreportv1alpha1.PolicyStatus(rule.Check),
Resource: &corev1.ObjectReference{
Kind: p.violation.Spec.Kind,
Namespace: p.violation.Spec.Namespace,
APIVersion: p.violation.Spec.APIVersion,
Name: p.violation.Spec.Name,
},
}
p.IncreaseCount(rule.Check, "CLUSTER")
p.clusterReport.Results = append(p.clusterReport.Results, result)
}
}
return p.clusterReport
}
func (p *PolicyReport) DecreaseCount(status string, scope string) {
if scope == "CLUSTER" {
switch status {
case "Pass":
if p.clusterReport.Summary.Pass--; p.clusterReport.Summary.Pass < 0 {
p.clusterReport.Summary.Pass = 0
}
break
case "Fail":
if p.clusterReport.Summary.Fail--; p.clusterReport.Summary.Fail < 0 {
p.clusterReport.Summary.Fail = 0
}
break
default:
if p.clusterReport.Summary.Skip--; p.clusterReport.Summary.Skip < 0 {
p.clusterReport.Summary.Skip = 0
}
break
}
} else {
switch status {
case "Pass":
if p.report.Summary.Pass--; p.report.Summary.Pass < 0 {
p.report.Summary.Pass = 0
}
break
case "Fail":
if p.report.Summary.Fail--; p.report.Summary.Fail < 0 {
p.report.Summary.Fail = 0
}
break
default:
if p.report.Summary.Skip--; p.report.Summary.Skip < 0 {
p.report.Summary.Skip = 0
}
break
}
}
}
func (p *PolicyReport) IncreaseCount(status string, scope string) {
if scope == "CLUSTER" {
switch status {
case "Pass":
p.clusterReport.Summary.Pass++
break
case "Fail":
p.clusterReport.Summary.Fail++
break
default:
p.clusterReport.Summary.Skip++
break
}
} else {
switch status {
case "Pass":
p.report.Summary.Pass++
break
case "Fail":
p.report.Summary.Fail++
break
default:
p.report.Summary.Skip++
break
}
}
}

7
pkg/policyviolation/builder.go Normal file → Executable file
View file

@ -81,14 +81,15 @@ func buildPVInfo(er response.EngineResponse) Info {
func buildViolatedRules(er response.EngineResponse) []kyverno.ViolatedRule {
var violatedRules []kyverno.ViolatedRule
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
vrule := kyverno.ViolatedRule{
Name: rule.Name,
Type: rule.Type,
Message: rule.Message,
}
vrule.Check = "Fail"
if rule.Success {
vrule.Check = "Pass"
}
violatedRules = append(violatedRules, vrule)
}
return violatedRules

0
pkg/policyviolation/builder_test.go Normal file → Executable file
View file

0
pkg/policyviolation/clusterpv.go Normal file → Executable file
View file

0
pkg/policyviolation/common.go Normal file → Executable file
View file

23
pkg/policyviolation/generator.go Normal file → Executable file
View file

@ -2,6 +2,9 @@ package policyviolation
import (
"errors"
policyreportinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
"github.com/nirmata/kyverno/pkg/policyreport"
"os"
"reflect"
"strconv"
"strings"
@ -43,6 +46,7 @@ type Generator struct {
queue workqueue.RateLimitingInterface
dataStore *dataStore
policyStatusListener policystatus.Listener
prgen *policyreport.Generator
}
//NewDataStore returns an instance of data store
@ -108,8 +112,11 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
dclient *dclient.Client,
pvInformer kyvernoinformer.ClusterPolicyViolationInformer,
nspvInformer kyvernoinformer.PolicyViolationInformer,
prInformer policyreportinformer.ClusterPolicyReportInformer,
nsprInformer policyreportinformer.PolicyReportInformer,
policyStatus policystatus.Listener,
log logr.Logger) *Generator {
log logr.Logger,
stopChna <-chan struct{}) *Generator {
gen := Generator{
kyvernoInterface: client.KyvernoV1(),
dclient: dclient,
@ -122,6 +129,16 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
log: log,
policyStatusListener: policyStatus,
}
if os.Getenv("POLICY-TYPE") == "POLICYREPORT" {
gen.prgen = policyreport.NewPRGenerator(client,
dclient,
prInformer,
nsprInformer,
policyStatus,
log,
)
go gen.prgen.Run(1, stopChna)
}
return &gen
}
@ -228,6 +245,10 @@ func (gen *Generator) processNextWorkItem() bool {
func (gen *Generator) syncHandler(info Info) error {
logger := gen.log
if os.Getenv("POLICY-TYPE") == "POLICYREPORT" {
gen.prgen.Add(policyreport.Info(info))
return nil
}
var handler pvGenerator
builder := newPvBuilder()
if info.Resource.GetNamespace() == "" {

0
pkg/policyviolation/namespacedpv.go Normal file → Executable file
View file

0
pkg/policyviolation/policyStatus_test.go Normal file → Executable file
View file