1
0
Fork 0
mirror of https://github.com/kyverno/policy-reporter.git synced 2024-12-14 11:57:32 +00:00

Leader election (#175)

* leaderelection
* Helm Chart updates
* SkipTLS for MS Teams

Signed-off-by: Frank Jogeleit <frank.jogeleit@web.de>
This commit is contained in:
Frank Jogeleit 2022-07-25 17:34:41 +02:00 committed by GitHub
parent 07fceb7371
commit 0b9f1262a1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
63 changed files with 1318 additions and 108 deletions

2
.gitignore vendored
View file

@ -3,7 +3,7 @@
build
/test.yaml
**/test.db
sqlite-database.db
sqlite-database*.db
values.yaml
coverage.out
heap*

View file

@ -1,5 +1,23 @@
# Changelog
# 2.11.0
* Policy Reporter
* High Availability support with leaderelection for necessary features like target pushes, to avoid duplicated pushes by multiple instances
* Add new `role` and `rolebinding` to manage lease objects if leaderelection is enabled
* Add redis configuration to the Helm Chart for external cache storage
* Add PodDisruptionBudget for HA Deployments (replicaCount > 1)
* Add `skipTLS` configuration for MS Teams Webhook
* Policy Reporter KyvernoPlugin
* High Availability support with leaderelection for necessary features like PolicyReport management for blocked resources
* Add new `role` and `rolebinding` to manage lease objects if leaderelection is enabled
* Add PodDisruptionBudget for HA Deployments (replicaCount > 1)
* Internal refactoring for better CRD management
* Policy Reporter UI
* Add redis as possible log storage to support high availability deployments
* Add PodDisruptionBudget for HA Deployments (replicaCount > 1)
# 2.10.3
* Policy Reporter
* Add new config `target.loki.path` to overwrite the deprected prom push API

View file

@ -1,7 +1,7 @@
GO ?= go
BUILD ?= build
REPO ?= ghcr.io/kyverno/policy-reporter
IMAGE_TAG ?= 2.3.2
IMAGE_TAG ?= 2.8.0
LD_FLAGS='-s -w -linkmode external -extldflags "-static"'
PLATFORMS ?= linux/arm64,linux/amd64,linux/s390x

View file

@ -1,12 +1,12 @@
dependencies:
- name: monitoring
repository: ""
version: 2.3.4
version: 2.4.0
- name: ui
repository: ""
version: 2.5.0
version: 2.6.0
- name: kyvernoPlugin
repository: ""
version: 1.3.1
digest: sha256:a7747a3012887b28cdea945bdc9350b8868086caff02ee629e868bb004b76228
generated: "2022-07-20T10:33:42.22334+02:00"
version: 1.4.0
digest: sha256:028e4f27759ec84c5d465416c26930fc31478fe26dbdc283a713c283433b4a26
generated: "2022-07-25T12:52:39.651749+02:00"

View file

@ -5,8 +5,8 @@ description: |
It creates Prometheus Metrics and can send rule validation events to different targets like Loki, Elasticsearch, Slack or Discord
type: application
version: 2.10.3
appVersion: 2.7.1
version: 2.11.0
appVersion: 2.8.0
icon: https://github.com/kyverno/kyverno/raw/main/img/logo.png
home: https://kyverno.github.io/policy-reporter
@ -18,10 +18,10 @@ maintainers:
dependencies:
- name: monitoring
condition: monitoring.enabled
version: "2.3.4"
version: "2.4.0"
- name: ui
condition: ui.enabled
version: "2.5.0"
version: "2.6.0"
- name: kyvernoPlugin
condition: kyvernoPlugin.enabled
version: "1.3.1"
version: "1.4.0"

View file

@ -3,5 +3,5 @@ name: kyvernoPlugin
description: Policy Reporter Kyverno Plugin
type: application
version: 1.3.1
appVersion: 1.3.1
version: 1.4.0
appVersion: 1.4.0

View file

@ -1,2 +1,9 @@
blockReports:
{{- toYaml .Values.blockReports | nindent 2 }}
{{- toYaml .Values.blockReports | nindent 2 }}
leaderElection:
enabled: {{ or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1) }}
releaseOnCancel: {{ .Values.leaderElection.releaseOnCancel }}
leaseDuration: {{ .Values.leaderElection.leaseDuration }}
renewDeadline: {{ .Values.leaderElection.renewDeadline }}
retryPeriod: {{ .Values.leaderElection.retryPeriod }}

View file

@ -69,7 +69,7 @@ app.kubernetes.io/name: ui
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{- define "kyverno.securityContext" -}}
{{- define "kyvernoplugin.securityContext" -}}
{{- if semverCompare "<1.19" .Capabilities.KubeVersion.Version }}
{{ toYaml (omit .Values.securityContext "seccompProfile") }}
{{- else }}

View file

@ -3,7 +3,6 @@ kind: Deployment
metadata:
name: {{ include "kyvernoplugin.fullname" . }}
annotations:
checksum/secret: {{ include (print .Template.BasePath "/config-secret.yaml") . | sha256sum | quote }}
{{- if .Values.annotations }}
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
@ -28,15 +27,14 @@ spec:
{{- with .Values.global.labels }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if or .Values.annotations .Values.podAnnotations }}
annotations:
checksum/secret: {{ include (print .Template.BasePath "/config-secret.yaml") . | sha256sum | quote }}
{{- with .Values.annotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
{{- end }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
@ -49,12 +47,13 @@ spec:
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.securityContext }}
securityContext: {{ include "kyverno.securityContext" . | nindent 12 }}
securityContext: {{ include "kyvernoplugin.securityContext" . | nindent 12 }}
{{- end }}
args:
- --port={{ .Values.port.number }}
- --metrics-enabled={{ .Values.metrics.enabled }}
- --rest-enabled={{ .Values.rest.enabled }}
- --lease-name={{ include "kyvernoplugin.fullname" . }}
ports:
- name: {{ .Values.port.name }}
containerPort: {{ .Values.port.number }}
@ -70,6 +69,17 @@ spec:
mountPath: /app/config.yaml
subPath: config.yaml
readOnly: true
{{- if or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1) }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
volumes:
- name: config-file
secret:

View file

@ -0,0 +1,21 @@
{{- if (gt (int .Values.replicaCount) 1) }}
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ template "kyvernoplugin.fullname" . }}
labels:
{{- include "kyvernoplugin.labels" . | nindent 4 }}
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
spec:
{{- include "policyreporter.podDisruptionBudget" . | indent 2 }}
selector:
matchLabels:
{{- include "kyvernoplugin.selectorLabels" . | nindent 6 }}
{{- end }}

View file

@ -0,0 +1,23 @@
{{- if and (and .Values.serviceAccount.create .Values.rbac.enabled) (and .Values.blockReports.enabled (or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1))) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kyvernoplugin.labels" . | nindent 4 }}
name: {{ include "kyvernoplugin.fullname" . }}-leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
{{- end -}}

View file

@ -0,0 +1,20 @@
{{- if and (and .Values.serviceAccount.create .Values.rbac.enabled) (and .Values.blockReports.enabled (or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1))) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "kyvernoplugin.fullname" . }}-leaderelection
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "kyvernoplugin.labels" . | nindent 4 }}
roleRef:
kind: Role
name: {{ include "kyvernoplugin.fullname" . }}-leaderelection
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: "ServiceAccount"
name: {{ include "kyvernoplugin.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View file

@ -2,13 +2,10 @@ image:
registry: ghcr.io
repository: kyverno/policy-reporter-kyverno-plugin
pullPolicy: IfNotPresent
tag: 1.3.1
tag: 1.4.0
imagePullSecrets: []
# Deploy not more than one replica
# Policy Reporter doesn't scale yet.
# Each pod will report each change.
replicaCount: 1
deploymentStrategy: {}
@ -110,6 +107,8 @@ rest:
metrics:
enabled: true
# create PolicyReports for enforce policies,
# based on Events created by Kyverno (>= v1.7.0)
blockReports:
enabled: false
eventNamespace: default
@ -117,6 +116,25 @@ blockReports:
maxPerReport: 200
keepOnlyLatest: false
# required if policy-reporter-kyverno-plugin should run in HA mode and the "blockReports" feature is enabled
# if "blockReports" is disabled, leaderElection is also disabled automatically
# will be enabled when replicaCount > 1
leaderElection:
enabled: false
releaseOnCancel: true
leaseDuration: 15
renewDeadline: 10
retryPeriod: 2
# enabled if replicaCount > 1
podDisruptionBudget:
# -- Configures the minimum available pods for kyvernoPlugin disruptions.
# Cannot be used if `maxUnavailable` is set.
minAvailable: 1
# -- Configures the maximum unavailable pods for kyvernoPlugin disruptions.
# Cannot be used if `minAvailable` is set.
maxUnavailable:
# Enable a NetworkPolicy for this chart. Useful on clusters where Network Policies are
# used and configured in a default-deny fashion.
networkPolicy:

View file

@ -3,5 +3,5 @@ name: monitoring
description: Policy Reporter Monitoring with predefined ServiceMonitor and Grafana Dashboards
type: application
version: 2.3.4
version: 2.4.0
appVersion: 0.0.0

View file

@ -49,4 +49,9 @@ app.kubernetes.io/instance: {{ .Release.Name }}
{{- else -}}
{{- .Release.Namespace -}}
{{- end }}
{{- end }}
{{- end }}
{{- define "kyvernoplugin.selectorLabels" -}}
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}

View file

@ -21,8 +21,13 @@ spec:
{{- include "kyvernoplugin.selectorLabels" . | nindent 8 }}
endpoints:
- port: rest
{{- with .Values.kyverno.serviceMonitor.relabelings }}
relabelings:
- action: labeldrop
regex: pod|service|container
- targetLabel: instance
replacement: policy-reporter
action: replace
{{- with .Values.kyverno.serviceMonitor.relabelings }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.kyverno.serviceMonitor.metricRelabelings }}

View file

@ -20,8 +20,13 @@ spec:
{{- include "policyreporter.selectorLabels" . | nindent 8 }}
endpoints:
- port: http
{{- with .Values.serviceMonitor.relabelings }}
relabelings:
- action: labeldrop
regex: pod|service|container
- targetLabel: instance
replacement: policy-reporter
action: replace
{{- with .Values.serviceMonitor.relabelings }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.serviceMonitor.metricRelabelings }}

View file

@ -3,5 +3,5 @@ name: ui
description: Policy Reporter UI
type: application
version: 2.5.0
appVersion: 1.5.0
version: 2.6.0
appVersion: 1.6.0

View file

@ -104,7 +104,7 @@ Create the name of the service account to use
{{- end }}
{{- end }}
{{- define "kyverno.securityContext" -}}
{{- define "ui.securityContext" -}}
{{- if semverCompare "<1.19" .Capabilities.KubeVersion.Version }}
{{ toYaml (omit .Values.securityContext "seccompProfile") }}
{{- else }}

View file

@ -25,3 +25,9 @@ data:
clusters:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- with .Values.redis }}
redis:
{{- toYaml . | nindent 6 }}
{{- end }}

View file

@ -40,7 +40,7 @@ spec:
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.securityContext }}
securityContext: {{ include "kyverno.securityContext" . | nindent 12 }}
securityContext: {{ include "ui.securityContext" . | nindent 12 }}
{{- end }}
args:
- -config=/app/config.yaml

View file

@ -0,0 +1,21 @@
{{- if (gt (int .Values.replicaCount) 1) }}
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ template "ui.fullname" . }}
labels:
{{- include "ui.labels" . | nindent 4 }}
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
spec:
{{- include "policyreporter.podDisruptionBudget" . | indent 2 }}
selector:
matchLabels:
{{- include "ui.selectorLabels" . | nindent 6 }}
{{- end }}

View file

@ -4,7 +4,7 @@ image:
registry: ghcr.io
repository: kyverno/policy-reporter-ui
pullPolicy: IfNotPresent
tag: 1.5.0
tag: 1.6.0
# possible default displayModes: light/dark
displayMode: ""
@ -38,6 +38,17 @@ clusters: []
# api: https://policy-reporter.external.cluster # reachable external Policy Reporter REST API
# kyvernoApi: https://policy-reporter-kyverno-plugin.external.cluster # (optional) reachable external Policy Reporter Kyverno Plugin REST API
# use redis as external log storage instead of an in memory store
# recommended when using a HA setup with more then one replica
# to get all logs on each instance
redis:
enabled: false
address: ""
database: 0
prefix: "policy-reporter-ui"
username: ""
password: ""
# configurations related to the PolicyReporter API
policyReporter:
port: 8080
@ -106,6 +117,15 @@ service:
# integer nubmer. This is port for service
port: 8080
# enabled if replicaCount > 1
podDisruptionBudget:
# -- Configures the minimum available pods for policy-reporter-ui disruptions.
# Cannot be used if `maxUnavailable` is set.
minAvailable: 1
# -- Configures the maximum unavailable pods for policy-reporter-ui disruptions.
# Cannot be used if `minAvailable` is set.
maxUnavailable:
## Set to true to enable ingress record generation
# ref to: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:

View file

@ -75,6 +75,7 @@ discord:
teams:
webhook: {{ .Values.target.teams.webhook | quote }}
skipTLS: {{ .Values.target.teams.skipTLS }}
minimumPriority: {{ .Values.target.teams.minimumPriority | quote }}
skipExistingOnStartup: {{ .Values.target.teams.skipExistingOnStartup }}
{{- with .Values.target.teams.sources }}
@ -187,3 +188,15 @@ reportFilter:
{{- end }}
clusterReports:
disabled: {{ .Values.reportFilter.clusterReports.disabled }}
leaderElection:
enabled: {{ or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1) }}
releaseOnCancel: {{ .Values.leaderElection.releaseOnCancel }}
leaseDuration: {{ .Values.leaderElection.leaseDuration }}
renewDeadline: {{ .Values.leaderElection.renewDeadline }}
retryPeriod: {{ .Values.leaderElection.retryPeriod }}
{{- with .Values.redis }}
redis:
{{- toYaml . | nindent 2 }}
{{- end }}

View file

@ -76,10 +76,22 @@ Create UI target host based on configuration
{{- end }}
{{- end }}
{{- define "kyverno.securityContext" -}}
{{- define "policyreporter.securityContext" -}}
{{- if semverCompare "<1.19" .Capabilities.KubeVersion.Version }}
{{- toYaml (omit .Values.securityContext "seccompProfile") }}
{{- else }}
{{- toYaml .Values.securityContext }}
{{- end }}
{{- end }}
{{- define "policyreporter.podDisruptionBudget" -}}
{{- if and .Values.podDisruptionBudget.minAvailable .Values.podDisruptionBudget.maxUnavailable }}
{{- fail "Cannot set both .Values.podDisruptionBudget.minAvailable and .Values.podDisruptionBudget.maxUnavailable" -}}
{{- end }}
{{- if not .Values.podDisruptionBudget.maxUnavailable }}
minAvailable: {{ default 1 .Values.podDisruptionBudget.minAvailable }}
{{- end }}
{{- if .Values.podDisruptionBudget.maxUnavailable }}
maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }}
{{- end }}
{{- end }}

View file

@ -51,7 +51,7 @@ spec:
image: "{{ .Values.image.registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
{{- if .Values.securityContext }}
securityContext: {{ include "kyverno.securityContext" . | nindent 12 }}
securityContext: {{ include "policyreporter.securityContext" . | nindent 12 }}
{{- end }}
args:
- --port={{ .Values.port.number }}
@ -60,6 +60,7 @@ spec:
- --metrics-enabled={{ or .Values.metrics.enabled .Values.monitoring.enabled }}
- --rest-enabled={{ or .Values.rest.enabled .Values.ui.enabled }}
- --profile={{ .Values.profiling.enabled }}
- --lease-name={{ include "policyreporter.fullname" . }}
ports:
- name: {{ .Values.port.name }}
containerPort: {{ .Values.port.number }}
@ -81,6 +82,17 @@ spec:
subPath: config.yaml
{{- end }}
readOnly: true
{{- if or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1) }}
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- end }}
volumes:
- name: sqlite
emptyDir: {}

View file

@ -0,0 +1,21 @@
{{- if (gt (int .Values.replicaCount) 1) }}
{{- if .Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }}
apiVersion: policy/v1
{{- else }}
apiVersion: policy/v1beta1
{{- end }}
kind: PodDisruptionBudget
metadata:
name: {{ template "policyreporter.fullname" . }}
labels:
{{- include "policyreporter.labels" . | nindent 4 }}
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
spec:
{{- include "policyreporter.podDisruptionBudget" . | indent 2 }}
selector:
matchLabels:
{{- include "policyreporter.selectorLabels" . | nindent 6 }}
{{- end }}

View file

@ -0,0 +1,23 @@
{{- if and (and .Values.serviceAccount.create .Values.rbac.enabled) (or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1)) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "policyreporter.labels" . | nindent 4 }}
name: {{ include "policyreporter.fullname" . }}-leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
{{- end -}}

View file

@ -0,0 +1,20 @@
{{- if and (and .Values.serviceAccount.create .Values.rbac.enabled) (or .Values.leaderElection.enabled (gt (int .Values.replicaCount) 1)) -}}
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: {{ include "policyreporter.fullname" . }}-leaderelection
{{- if .Values.annotations }}
annotations:
{{- toYaml .Values.annotations | nindent 4 }}
{{- end }}
labels:
{{- include "policyreporter.labels" . | nindent 4 }}
roleRef:
kind: Role
name: {{ include "policyreporter.fullname" . }}-leaderelection
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: "ServiceAccount"
name: {{ include "policyreporter.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
{{- end -}}

View file

@ -2,13 +2,10 @@ image:
registry: ghcr.io
repository: kyverno/policy-reporter
pullPolicy: IfNotPresent
tag: 2.7.1
tag: 2.8.0
imagePullSecrets: []
# Deploy not more than one replica
# Policy Reporter doesn't scale yet.
# Each pod will report each change.
replicaCount: 1
deploymentStrategy: {}
@ -323,6 +320,8 @@ target:
teams:
# teams webhook address
webhook: ""
# skip TLS
skipTLS: false
# minimum priority "" < info < warning < critical < error
minimumPriority: ""
# list of sources which should send to teams
@ -406,6 +405,34 @@ target:
# add additional s3 channels with different configurations and filters
channels: []
# required when policy-reporter runs in HA mode and you have targets configured
# if no targets are configured, leaderElection is disabled automatically
# will be enabled when replicaCount > 1
leaderElection:
enabled: false
releaseOnCancel: true
leaseDuration: 15
renewDeadline: 10
retryPeriod: 2
# use redis as external result cache instead of the in memory cache
redis:
enabled: false
address: ""
database: 0
prefix: "policy-reporter"
username: ""
password: ""
# enabled if replicaCount > 1
podDisruptionBudget:
# -- Configures the minimum available pods for policy-reporter disruptions.
# Cannot be used if `maxUnavailable` is set.
minAvailable: 1
# -- Configures the maximum unavailable pods for policy-reporter disruptions.
# Cannot be used if `minAvailable` is set.
maxUnavailable:
# Node labels for pod assignment
# ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}

View file

@ -1,15 +1,18 @@
package cmd
import (
"context"
"flag"
"log"
"github.com/kyverno/policy-reporter/pkg/config"
"github.com/kyverno/policy-reporter/pkg/listener"
"github.com/spf13/cobra"
"golang.org/x/sync/errgroup"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/clientcmd"
"k8s.io/klog"
)
func newRunCMD() *cobra.Command {
@ -41,8 +44,6 @@ func newRunCMD() *cobra.Command {
server := resolver.APIServer(client.HasSynced)
resolver.RegisterSendResultListener()
g := &errgroup.Group{}
if c.REST.Enabled {
@ -73,6 +74,33 @@ func newRunCMD() *cobra.Command {
server.RegisterProfilingHandler()
}
if resolver.HasTargets() && c.LeaderElection.Enabled {
elector, err := resolver.LeaderElectionClient()
if err != nil {
return err
}
elector.RegisterOnStart(func(c context.Context) {
klog.Info("started leadership")
resolver.RegisterSendResultListener()
}).RegisterOnNew(func(currentID, lockID string) {
if currentID != lockID {
klog.Infof("leadership by %s", currentID)
}
}).RegisterOnStop(func() {
klog.Info("stopped leadership")
resolver.EventPublisher().UnregisterListener(listener.NewResults)
})
g.Go(func() error {
return elector.Run(cmd.Context())
})
} else if resolver.HasTargets() {
resolver.RegisterSendResultListener()
}
g.Go(server.Start)
stop := make(chan struct{})
@ -95,6 +123,7 @@ func newRunCMD() *cobra.Command {
cmd.PersistentFlags().BoolP("metrics-enabled", "m", false, "Enable Policy Reporter's Metrics API")
cmd.PersistentFlags().BoolP("rest-enabled", "r", false, "Enable Policy Reporter's REST API")
cmd.PersistentFlags().Bool("profile", false, "Enable application profiling with pprof")
cmd.PersistentFlags().String("lease-name", "policy-reporter", "name of the LeaseLock")
flag.Parse()

1
go.mod
View file

@ -64,6 +64,7 @@ require (
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog v1.0.0 // indirect
k8s.io/klog/v2 v2.70.0 // indirect
k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect

2
go.sum
View file

@ -1014,6 +1014,8 @@ k8s.io/code-generator v0.24.2/go.mod h1:dpVhs00hTuTdTY6jvVxvTFCk6gSMrtfRydbhZwHI
k8s.io/component-base v0.24.2/go.mod h1:ucHwW76dajvQ9B7+zecZAP3BVqvrHoOxm8olHEg0nmM=
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo v0.0.0-20211129171323-c02415ce4185/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=

View file

@ -1,22 +1,23 @@
# Installation Manifests for Policy Reporter
You can use this manifests to install Policy Reporter without additional tools like Helm or Kustomize. The manifests are structured into three installations.
You can use this manifests to install Policy Reporter without additional tools like Helm or Kustomize. The manifests are structured into five installations.
The installation requires a `policy-reporter` namespace. Because the installation includes RBAC resources which requires a serviceAccountName and a namespace configuration. The default namespace is `policy-reporter`. If this namespace will be created if it does not exist.
The installation requires a `policy-reporter` namespace. Because the installation includes RBAC resources which requires a serviceAccountName and a namespace configuration. The default namespace is `policy-reporter`. This namespace will be created if it does not exist.
## Policy Reporter
The `policy-reporter` folder is the basic installation for Policy Reporter without the UI. Includes a basic Configuration Secret `policy-reporter-targets`, empty by default and the `http://policy-reporter:8080/metrics` Endpoint.
The `policy-reporter` folder is the basic installation for Policy Reporter without the UI or other components. Includes a basic Configuration Secret `policy-reporter-targets`, empty by default and the `http://policy-reporter:8080/metrics` Endpoint.
### Installation
```bash
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter/install.yaml
```
## Default Policy Reporter UI
## Policy Reporter + UI
The `default-policy-reporter-ui` folder is the extended Policy Reporter and the default Policy Reporter UI installation.
The `policy-reporter-ui` contains manifests for Policy Reporter and the Policy Reporter UI.
Enables:
* Policy Reporter REST API (`http://policy-reporter:8080`)
@ -27,14 +28,14 @@ Configures Policy Reporter UI as Target for Policy Reporter.
### Installation
```bash
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/default-policy-reporter-ui/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/default-policy-reporter-ui/target-secret.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/default-policy-reporter-ui/install.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-ui/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-ui/target-secret.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-ui/install.yaml
```
## Kyverno Policy Reporter UI
## Policy Reporter + KyvernoPlugin + UI
The `kyverno-policy-reporter-ui` folder is the extended Policy Reporter, Policy Reporter Kyverno Plugin and the extended Policy Reporter UI installation.
The `policy-reporter-kyverno-ui` contains manifests for Policy Reporter, Policy Reporter Kyverno Plugin and Policy Reporter UI.
Enables:
* Policy Reporter REST API (`http://policy-reporter:8080`)
@ -48,11 +49,41 @@ Configures Policy Reporter UI as Target for Policy Reporter and enables the Kyve
### Installation
```bash
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/kyverno-policy-reporter-ui/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/kyverno-policy-reporter-ui/target-secret.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/kyverno-policy-reporter-ui/install.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui/target-secret.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui/install.yaml
```
## High Available Policy Reporter + KyvernoPlugin + UI
The `policy-reporter-kyverno-ui-ha` contains a high available setup for Policy Reporter, Policy Reporter Kyverno Plugin and Policy Reporter UI, it enabled leaderelection and uses redis as a external and central storage for shared caches and Logs (UI)
Enables:
* Policy Reporter REST API (`http://policy-reporter:8080`)
* Policy Reporter Metrics API (`http://policy-reporter:8080/metrics`)
* Kyverno Plugin Rest API (`http://policy-reporter-kyverno-plugin:8080/policies`)
* Kyverno Plugin Metrics API (`http://policy-reporter-kyverno-plugin:8080/metrics`)
* Kyverno Plugin PolicyReport creation for blocked resources (by __Kyverno__ enforce policies)
* Policy Reporter UI Endpoint (`http://policy-reporter-ui:8080`).
Additional resources:
* `PodDisruptionBudget` for each component
* `Role` and `RoleBinding` for Policy Reporter and the KyvernoPlugin to manage Lease resources for leaderelection
* Basic `Redis`, used as central and external cache for Policy Reporter and as central Log storage for Policy Reporter UI
### Installation
```bash
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui-ha/namespace.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui-ha/config-core.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui-ha/config-ui.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui-ha/config-kyverno-plugin.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui-ha/redis.yaml
kubectl apply -f https://raw.githubusercontent.com/kyverno/policy-reporter/main/manifest/policy-reporter-kyverno-ui-ha/install.yaml
```
See `complete-ha/README.md` for details about the used configuration values.
## Policy Reporter Configuration
To configure policy-reporter, for example your notification targets, create a secret called `policy-reporter-targets` in the `policy-reporter` namespace with an key `config.yaml` as key and the following structure as value:
@ -129,6 +160,22 @@ reportFilter:
exclucde: []
clusterReports:
disabled: false
# optional external result caching
redis:
enabled: false
address: ""
database: 0
prefix: "policy-reporter"
username: ""
password: ""
leaderElection:
enabled: false
releaseOnCancel: true
leaseDuration: 15
renewDeadline: 10
retryPeriod: 2
```
The `kyverno-policy-reporter-ui` and `default-policy-reporter-ui` installation has an optional preconfigured `target-security.yaml` to apply. This secret configures the Policy Reporter UI as target for Policy Reporter.

View file

@ -0,0 +1,48 @@
# Policy Reporter Configuration
Encoded configuraion from `config-core.yaml`
Seet the [Documentation](https://kyverno.github.io/policy-reporter/core/config-reference) for a full reference of all possible configurations
```yaml
# send pushes to the Policy Reporter UI
ui:
host: http://policy-reporter-ui:8082
minimumPriority: "warning"
skipExistingOnStartup: true
# (optional) cache results in an central, external redis
redis:
enabled: true
address: "redis:6379"
database: 1
prefix: "policy-reporter"
```
# Policy Reporter UI Configuration
Encoded configuraion from `config-ui.yaml`
Seet the [Documentation](https://kyverno.github.io/policy-reporter/ui/config-reference) for a full reference of all possible configurations
```yaml
# central and external Log storage
# only needed if you use the "Logs" page and want to have all logs on each instance available
redis:
enabled: true
address: "redis:6379"
database: 1
prefix: "policy-reporter-ui"
```
# Policy Reporter Kyverno Plugin Configuration
Encoded configuraion from `config-kyverno-plugin.yaml`
Seet the [Documentation](https://kyverno.github.io/policy-reporter/kyverno-plugin/config-reference) for a full reference of all possible configurations
```yaml
# enables the creation of PolicyReports for blocked resources by enforce policies
blockReports:
enabled: true
```

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: policy-reporter-config
labels:
app.kubernetes.io/name: policy-reporter
app.kubernetes.io/instance: policy-reporter
type: Opaque
data:
config.yaml: dWk6CiAgaG9zdDogaHR0cDovL3BvbGljeS1yZXBvcnRlci11aTo4MDgyCiAgbWluaW11bVByaW9yaXR5OiAid2FybmluZyIKICBza2lwRXhpc3RpbmdPblN0YXJ0dXA6IHRydWUKCnJlZGlzOgogIGVuYWJsZWQ6IHRydWUKICBhZGRyZXNzOiAicmVkaXM6NjM3OSIKICBkYXRhYmFzZTogMQogIHByZWZpeDogInBvbGljeS1yZXBvcnRlciIK

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: policy-reporter-kyverno-plugin-config
labels:
app.kubernetes.io/name: policy-reporter-kyverno-plugin
app.kubernetes.io/instance: policy-reporter
type: Opaque
data:
config.yaml: YmxvY2tSZXBvcnRzOgogIGVuYWJsZWQ6IHRydWU=

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: policy-reporter-ui-config
labels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/instance: policy-reporter
type: Opaque
data:
config.yaml: cmVkaXM6CiAgZW5hYmxlZDogdHJ1ZQogIGFkZHJlc3M6ICJyZWRpczo2Mzc5IgogIGRhdGFiYXNlOiAxCiAgcHJlZml4OiAicG9saWN5LXJlcG9ydGVyLXVpIgo=

View file

@ -0,0 +1,467 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: policy-reporter
namespace: policy-reporter
labels:
app.kubernetes.io/name: policy-reporter
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: policy-reporter-leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: policy-reporter-leaderelection
roleRef:
kind: Role
name: policy-reporter-leaderelection
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: "ServiceAccount"
name: policy-reporter
namespace: policy-reporter
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: policy-reporter
rules:
- apiGroups:
- '*'
resources:
- policyreports
- policyreports/status
- clusterpolicyreports
- clusterpolicyreports/status
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: policy-reporter
roleRef:
kind: ClusterRole
name: policy-reporter
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: "ServiceAccount"
name: policy-reporter
namespace: policy-reporter
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: policy-reporter-kyverno-plugin
namespace: policy-reporter
labels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: policy-reporter-kyverno-plugin-leaderelection
rules:
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- delete
- get
- patch
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: policy-reporter-kyverno-plugin-leaderelection
roleRef:
kind: Role
name: policy-reporter-kyverno-plugin-leaderelection
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: "ServiceAccount"
name: policy-reporter-kyverno-plugin
namespace: policy-reporter
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
name: policy-reporter-kyverno-plugin
rules:
- apiGroups:
- '*'
resources:
- policies
- policies/status
- clusterpolicies
- clusterpolicies/status
verbs:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- get
- list
- watch
- apiGroups:
- '*'
resources:
- policyreports
- policyreports/status
- clusterpolicyreports
- clusterpolicyreports/status
verbs:
- get
- create
- update
- delete
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: policy-reporter-kyverno-plugin
roleRef:
kind: ClusterRole
name: policy-reporter-kyverno-plugin
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: "ServiceAccount"
name: policy-reporter-kyverno-plugin
namespace: policy-reporter
---
apiVersion: v1
kind: Service
metadata:
name: policy-reporter-kyverno-plugin
namespace: policy-reporter
labels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
---
apiVersion: v1
kind: Service
metadata:
name: policy-reporter-ui
namespace: policy-reporter
labels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/instance: policy-reporter
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/instance: policy-reporter
---
apiVersion: v1
kind: Service
metadata:
name: policy-reporter
namespace: policy-reporter
labels:
app.kubernetes.io/name: policy-reporter
spec:
type: ClusterIP
ports:
- port: 8080
targetPort: http
protocol: TCP
name: http
selector:
app.kubernetes.io/name: policy-reporter
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: policy-reporter-kyverno-plugin
namespace: policy-reporter
labels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
app.kubernetes.io/part-of: policy-reporter
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
template:
metadata:
labels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
spec:
serviceAccountName: policy-reporter-kyverno-plugin
automountServiceAccountToken: true
containers:
- name: "kyverno-plugin"
image: "ghcr.io/kyverno/policy-reporter-kyverno-plugin:1.4.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1234
args:
- --config=/app/config.yaml
- --rest-enabled
- --lease-name=policy-reporter-kyverno-plugin
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /policies
port: http
readinessProbe:
httpGet:
path: /policies
port: http
resources: {}
volumeMounts:
- name: config-file
mountPath: /app/config.yaml
subPath: config.yaml
readOnly: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: config-file
secret:
secretName: policy-reporter-kyverno-plugin-config
optional: false
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: policy-reporter-ui
namespace: policy-reporter
labels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/part-of: policy-reporter
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/instance: policy-reporter
template:
metadata:
labels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/instance: policy-reporter
spec:
containers:
- name: ui
image: "ghcr.io/kyverno/policy-reporter-ui:1.6.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1234
args:
- -config=/app/config.yaml
- -policy-reporter=http://policy-reporter:8080
- -kyverno-plugin=http://policy-reporter-kyverno-plugin:8080
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /
port: http
readinessProbe:
httpGet:
path: /
port: http
resources: {}
volumeMounts:
- name: config-file
mountPath: /app/config.yaml
subPath: config.yaml
readOnly: true
volumes:
- name: config-file
secret:
secretName: policy-reporter-ui-config
optional: true
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: policy-reporter
namespace: policy-reporter
labels:
app.kubernetes.io/name: policy-reporter
app.kubernetes.io/part-of: policy-reporter
spec:
replicas: 2
selector:
matchLabels:
app.kubernetes.io/name: policy-reporter
template:
metadata:
labels:
app.kubernetes.io/name: policy-reporter
spec:
serviceAccountName: policy-reporter
automountServiceAccountToken: true
securityContext:
fsGroup: 1234
containers:
- name: policy-reporter
image: "ghcr.io/kyverno/policy-reporter:2.8.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1234
args:
- --config=/app/config.yaml
- --dbfile=/sqlite/database.db
- --rest-enabled
- --lease-name=policy-reporter
ports:
- name: http
containerPort: 8080
protocol: TCP
livenessProbe:
httpGet:
path: /ready
port: http
readinessProbe:
httpGet:
path: /healthz
port: http
resources: {}
volumeMounts:
- name: sqlite
mountPath: /sqlite
- name: config-file
mountPath: /app/config.yaml
subPath: config.yaml
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumes:
- name: sqlite
emptyDir: {}
- name: config-file
secret:
secretName: policy-reporter-config
optional: false
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: policy-reporter
labels:
app.kubernetes.io/name: policy-reporter
app.kubernetes.io/part-of: policy-reporter
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/name: policy-reporter
app.kubernetes.io/instance: policy-reporter
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: kyverno-plugin
labels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/part-of: policy-reporter
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/name: kyverno-plugin
app.kubernetes.io/instance: policy-reporter
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: policy-reporter-ui
labels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/part-of: policy-reporter
spec:
minAvailable: 1
selector:
matchLabels:
app.kubernetes.io/name: policy-reporter-ui
app.kubernetes.io/instance: policy-reporter
---

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: policy-reporter
spec: {}
status: {}

View file

@ -0,0 +1,30 @@
---
apiVersion: v1
kind: Pod
metadata:
name: redis
label:
app: redis
namespace: policy-reporter
spec:
containers:
- image: redis:alpine
imagePullPolicy: IfNotPresent
name: redis
resources: {}
---
apiVersion: v1
kind: Service
metadata:
labels:
app: redis
name: redis
namespace: policy-reporter
spec:
ports:
- port: 6379
protocol: TCP
targetPort: 6379
selector:
app: redis
type: ClusterIP

View file

@ -158,7 +158,7 @@ spec:
automountServiceAccountToken: true
containers:
- name: "kyverno-plugin"
image: "ghcr.io/kyverno/policy-reporter-kyverno-plugin:1.3.1"
image: "ghcr.io/kyverno/policy-reporter-kyverno-plugin:1.4.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
@ -206,7 +206,7 @@ spec:
spec:
containers:
- name: ui
image: "ghcr.io/kyverno/policy-reporter-ui:1.5.0"
image: "ghcr.io/kyverno/policy-reporter-ui:1.6.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
@ -259,7 +259,7 @@ spec:
fsGroup: 1234
containers:
- name: policy-reporter
image: "ghcr.io/kyverno/policy-reporter:2.7.1"
image: "ghcr.io/kyverno/policy-reporter:2.8.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false

View file

@ -94,7 +94,7 @@ spec:
automountServiceAccountToken: false
containers:
- name: ui
image: "ghcr.io/kyverno/policy-reporter-ui:1.5.0"
image: "ghcr.io/kyverno/policy-reporter-ui:1.6.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false
@ -147,7 +147,7 @@ spec:
fsGroup: 1234
containers:
- name: policy-reporter
image: "ghcr.io/kyverno/policy-reporter:2.7.1"
image: "ghcr.io/kyverno/policy-reporter:2.8.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false

View file

@ -1,12 +1,5 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: policy-reporter
spec: {}
status: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: policy-reporter
@ -85,7 +78,7 @@ spec:
automountServiceAccountToken: true
containers:
- name: policy-reporter
image: "ghcr.io/kyverno/policy-reporter:2.7.1"
image: "ghcr.io/kyverno/policy-reporter:2.8.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false

View file

@ -0,0 +1,7 @@
---
apiVersion: v1
kind: Namespace
metadata:
name: policy-reporter
spec: {}
status: {}

View file

@ -26,7 +26,7 @@ spec:
restartPolicy: Never
containers:
- name: policy-reporter
image: "ghcr.io/kyverno/policy-reporter:2.7.1"
image: "ghcr.io/kyverno/policy-reporter:2.8.0"
imagePullPolicy: IfNotPresent
securityContext:
allowPrivilegeEscalation: false

View file

@ -77,6 +77,7 @@ type Discord struct {
type Teams struct {
Name string `mapstructure:"name"`
Webhook string `mapstructure:"webhook"`
SkipTLS bool `mapstructure:"skipTLS"`
SkipExisting bool `mapstructure:"skipExistingOnStartup"`
MinimumPriority string `mapstructure:"minimumPriority"`
Filter TargetFilter `mapstructure:"filter"`
@ -214,25 +215,38 @@ type Redis struct {
Database int `mapstructure:"database"`
}
// LeaderElection configuration
type LeaderElection struct {
LockName string `mapstructure:"lockName"`
PodName string `mapstructure:"podName"`
Namespace string `mapstructure:"namespace"`
LeaseDuration int `mapstructure:"leaseDuration"`
RenewDeadline int `mapstructure:"renewDeadline"`
RetryPeriod int `mapstructure:"retryPeriod"`
ReleaseOnCancel bool `mapstructure:"releaseOnCancel"`
Enabled bool `mapstructure:"enabled"`
}
// Config of the PolicyReporter
type Config struct {
Loki Loki `mapstructure:"loki"`
Elasticsearch Elasticsearch `mapstructure:"elasticsearch"`
Slack Slack `mapstructure:"slack"`
Discord Discord `mapstructure:"discord"`
Teams Teams `mapstructure:"teams"`
S3 S3 `mapstructure:"s3"`
Kinesis Kinesis `mapstructure:"kinesis"`
UI UI `mapstructure:"ui"`
Webhook Webhook `mapstructure:"webhook"`
API API `mapstructure:"api"`
Kubeconfig string `mapstructure:"kubeconfig"`
DBFile string `mapstructure:"dbfile"`
Metrics Metrics `mapstructure:"metrics"`
REST REST `mapstructure:"rest"`
PriorityMap PriorityMap `mapstructure:"priorityMap"`
ReportFilter ReportFilter `mapstructure:"reportFilter"`
Redis Redis `mapstructure:"redis"`
Profiling Profiling `mapstructure:"profiling"`
EmailReports EmailReports `mapstructure:"emailReports"`
Loki Loki `mapstructure:"loki"`
Elasticsearch Elasticsearch `mapstructure:"elasticsearch"`
Slack Slack `mapstructure:"slack"`
Discord Discord `mapstructure:"discord"`
Teams Teams `mapstructure:"teams"`
S3 S3 `mapstructure:"s3"`
Kinesis Kinesis `mapstructure:"kinesis"`
UI UI `mapstructure:"ui"`
Webhook Webhook `mapstructure:"webhook"`
API API `mapstructure:"api"`
Kubeconfig string `mapstructure:"kubeconfig"`
DBFile string `mapstructure:"dbfile"`
Metrics Metrics `mapstructure:"metrics"`
REST REST `mapstructure:"rest"`
PriorityMap PriorityMap `mapstructure:"priorityMap"`
ReportFilter ReportFilter `mapstructure:"reportFilter"`
Redis Redis `mapstructure:"redis"`
Profiling Profiling `mapstructure:"profiling"`
EmailReports EmailReports `mapstructure:"emailReports"`
LeaderElection LeaderElection `mapstructure:"leaderElection"`
}

View file

@ -10,6 +10,11 @@ import (
func Load(cmd *cobra.Command) (*Config, error) {
v := viper.New()
v.SetDefault("leaderElection.releaseOnCancel", true)
v.SetDefault("leaderElection.leaseDuration", 15)
v.SetDefault("leaderElection.renewDeadline", 10)
v.SetDefault("leaderElection.retryPeriod", 2)
cfgFile := ""
configFlag := cmd.Flags().Lookup("config")
@ -58,6 +63,17 @@ func Load(cmd *cobra.Command) (*Config, error) {
v.BindPFlag("emailReports.templates.dir", flag)
}
if flag := cmd.Flags().Lookup("lease-name"); flag != nil {
v.BindPFlag("leaderElection.lockName", flag)
}
if err := v.BindEnv("leaderElection.podName", "POD_NAME"); err != nil {
log.Printf("[WARNING] failed to bind env POD_NAME")
}
if err := v.BindEnv("leaderElection.namespace", "POD_NAMESPACE"); err != nil {
log.Printf("[WARNING] failed to bind env POD_NAMESPACE")
}
c := &Config{}
err := v.Unmarshal(c)

View file

@ -1,6 +1,7 @@
package config
import (
"crypto/tls"
"database/sql"
"fmt"
"log"
@ -14,6 +15,7 @@ import (
"github.com/kyverno/policy-reporter/pkg/email/violations"
"github.com/kyverno/policy-reporter/pkg/helper"
"github.com/kyverno/policy-reporter/pkg/kubernetes"
"github.com/kyverno/policy-reporter/pkg/leaderelection"
"github.com/kyverno/policy-reporter/pkg/listener"
"github.com/kyverno/policy-reporter/pkg/listener/metrics"
"github.com/kyverno/policy-reporter/pkg/redis"
@ -36,6 +38,7 @@ import (
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
wgpolicyk8sv1alpha2 "github.com/kyverno/kyverno/pkg/client/clientset/versioned/typed/policyreport/v1alpha2"
_ "github.com/mattn/go-sqlite3"
k8s "k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
)
@ -47,6 +50,7 @@ type Resolver struct {
publisher report.EventPublisher
policyStore sqlite3.PolicyReportStore
policyReportClient report.PolicyReportClient
leaderElector *leaderelection.Client
targetClients []target.Client
resultCache cache.Cache
}
@ -77,6 +81,31 @@ func (r *Resolver) PolicyReportStore(db *sql.DB) (sqlite3.PolicyReportStore, err
return r.policyStore, err
}
// LeaderElectionClient resolver method
func (r *Resolver) LeaderElectionClient() (*leaderelection.Client, error) {
if r.leaderElector != nil {
return r.leaderElector, nil
}
clientset, err := k8s.NewForConfig(r.k8sConfig)
if err != nil {
return nil, err
}
r.leaderElector = leaderelection.New(
clientset.CoordinationV1(),
r.config.LeaderElection.LockName,
r.config.LeaderElection.Namespace,
r.config.LeaderElection.PodName,
time.Duration(r.config.LeaderElection.LeaseDuration)*time.Second,
time.Duration(r.config.LeaderElection.RenewDeadline)*time.Second,
time.Duration(r.config.LeaderElection.RetryPeriod)*time.Second,
r.config.LeaderElection.ReleaseOnCancel,
)
return r.leaderElector, nil
}
// EventPublisher resolver method
func (r *Resolver) EventPublisher() report.EventPublisher {
if r.publisher != nil {
@ -96,18 +125,18 @@ func (r *Resolver) RegisterSendResultListener() {
newResultListener := listener.NewResultListener(r.SkipExistingOnStartup(), r.ResultCache(), time.Now())
newResultListener.RegisterListener(listener.NewSendResultListener(targets))
r.EventPublisher().RegisterListener(newResultListener.Listen)
r.EventPublisher().RegisterListener(listener.NewResults, newResultListener.Listen)
}
}
// RegisterSendResultListener resolver method
func (r *Resolver) RegisterStoreListener(store report.PolicyReportStore) {
r.EventPublisher().RegisterListener(listener.NewStoreListener(store))
r.EventPublisher().RegisterListener(listener.Store, listener.NewStoreListener(store))
}
// RegisterMetricsListener resolver method
func (r *Resolver) RegisterMetricsListener() {
r.EventPublisher().RegisterListener(listener.NewMetricsListener(
r.EventPublisher().RegisterListener(listener.Metrics, listener.NewMetricsListener(
metrics.NewResultFilter(
ToRuleSet(r.config.Metrics.Filter.Namespaces),
ToRuleSet(r.config.Metrics.Filter.Status),
@ -367,6 +396,10 @@ func (r *Resolver) TargetClients() []target.Client {
return r.targetClients
}
func (r *Resolver) HasTargets() bool {
return len(r.TargetClients()) > 0
}
// SkipExistingOnStartup config method
func (r *Resolver) SkipExistingOnStartup() bool {
for _, client := range r.TargetClients() {
@ -630,14 +663,27 @@ func createTeamsClient(config Teams, parent Teams) target.Client {
config.SkipExisting = parent.SkipExisting
}
if !config.SkipTLS {
config.SkipTLS = parent.SkipTLS
}
log.Printf("[INFO] %s configured", config.Name)
client := &http.Client{}
if config.SkipTLS {
client.Transport = http.DefaultTransport.(*http.Transport).Clone()
client.Transport.(*http.Transport).TLSClientConfig = &tls.Config{
InsecureSkipVerify: config.SkipTLS,
}
}
return teams.NewClient(
config.Name,
config.Webhook,
config.SkipExisting,
createTargetFilter(config.Filter, config.MinimumPriority, config.Sources),
&http.Client{},
client,
)
}

View file

@ -48,6 +48,7 @@ var testConfig = &config.Config{
},
Teams: config.Teams{
Webhook: "http://hook.teams:80",
SkipTLS: true,
SkipExisting: true,
MinimumPriority: "debug",
Channels: []config.Teams{{
@ -166,9 +167,16 @@ func Test_ResolveTarget(t *testing.T) {
func Test_ResolveTargets(t *testing.T) {
resolver := config.NewResolver(testConfig, nil)
clients := resolver.TargetClients()
if count := len(clients); count != 17 {
t.Errorf("Expected 15 Clients, got %d", count)
if count := len(resolver.TargetClients()); count != 17 {
t.Errorf("Expected 17 Clients, got %d", count)
}
}
func Test_ResolveHasTargets(t *testing.T) {
resolver := config.NewResolver(testConfig, nil)
if !resolver.HasTargets() {
t.Errorf("Expected 'true'")
}
}
@ -405,6 +413,20 @@ func Test_ResolvePolicyClient(t *testing.T) {
}
}
func Test_ResolveLeaderElectionClient(t *testing.T) {
resolver := config.NewResolver(&config.Config{DBFile: "test.db"}, &rest.Config{})
client1, err := resolver.LeaderElectionClient()
if err != nil {
t.Errorf("Unexpected Error: %s", err)
}
client2, _ := resolver.LeaderElectionClient()
if client1 != client2 {
t.Error("A second call resolver.LeaderElectionClient() should return the cached first client")
}
}
func Test_ResolvePolicyStore(t *testing.T) {
resolver := config.NewResolver(&config.Config{DBFile: "test.db"}, &rest.Config{})
db, _ := resolver.Database()
@ -500,6 +522,18 @@ func Test_ResolveClientWithInvalidK8sConfig(t *testing.T) {
}
}
func Test_ResolveLeaderElectionWithInvalidK8sConfig(t *testing.T) {
k8sConfig := &rest.Config{}
k8sConfig.Host = "invalid/url"
resolver := config.NewResolver(testConfig, k8sConfig)
_, err := resolver.LeaderElectionClient()
if err == nil {
t.Error("Error: 'host must be a URL or a host:port pair' was expected")
}
}
func Test_ResolveCRDClient(t *testing.T) {
resolver := config.NewResolver(testConfig, &rest.Config{})

View file

@ -16,7 +16,7 @@ func Test_Debouncer(t *testing.T) {
wg.Add(2)
publisher := report.NewEventPublisher()
publisher.RegisterListener(func(event report.LifecycleEvent) {
publisher.RegisterListener("test", func(event report.LifecycleEvent) {
counter++
wg.Done()
})

View file

@ -25,7 +25,7 @@ func Test_PolicyReportWatcher(t *testing.T) {
store := newStore(3)
publisher := report.NewEventPublisher()
publisher.RegisterListener(func(event report.LifecycleEvent) {
publisher.RegisterListener("test", func(event report.LifecycleEvent) {
store.Add(event)
wg.Done()
})
@ -59,7 +59,7 @@ func Test_ClusterPolicyReportWatcher(t *testing.T) {
store := newStore(3)
publisher := report.NewEventPublisher()
publisher.RegisterListener(func(event report.LifecycleEvent) {
publisher.RegisterListener("test", func(event report.LifecycleEvent) {
store.Add(event)
wg.Done()
})

View file

@ -0,0 +1,102 @@
package leaderelection
import (
"context"
"errors"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/client-go/kubernetes/typed/coordination/v1"
k8sleaderelection "k8s.io/client-go/tools/leaderelection"
"k8s.io/client-go/tools/leaderelection/resourcelock"
)
type Client struct {
client v1.CoordinationV1Interface
lockName string
namespace string
identity string
leaseDuration time.Duration
renewDeadline time.Duration
retryPeriod time.Duration
releaseOnCancel bool
onStartedLeading func(c context.Context)
onStoppedLeading func()
onNewLeader func(currentID, lockID string)
}
func (c *Client) RegisterOnStart(callback func(c context.Context)) *Client {
c.onStartedLeading = callback
return c
}
func (c *Client) RegisterOnStop(callback func()) *Client {
c.onStoppedLeading = callback
return c
}
func (c *Client) RegisterOnNew(callback func(currentID string, lockID string)) *Client {
c.onNewLeader = callback
return c
}
func (c *Client) Run(ctx context.Context) error {
k8sleaderelection.RunOrDie(ctx, k8sleaderelection.LeaderElectionConfig{
Lock: c.createLock(),
ReleaseOnCancel: c.releaseOnCancel,
LeaseDuration: c.leaseDuration,
RenewDeadline: c.renewDeadline,
RetryPeriod: c.retryPeriod,
Callbacks: k8sleaderelection.LeaderCallbacks{
OnStartedLeading: c.onStartedLeading,
OnStoppedLeading: c.onStoppedLeading,
OnNewLeader: func(identity string) {
c.onNewLeader(identity, c.identity)
},
},
})
return errors.New("leaderelection stopped")
}
func (c *Client) createLock() *resourcelock.LeaseLock {
return &resourcelock.LeaseLock{
LeaseMeta: metav1.ObjectMeta{
Name: c.lockName,
Namespace: c.namespace,
},
Client: c.client,
LockConfig: resourcelock.ResourceLockConfig{
Identity: c.identity,
},
}
}
func New(
client v1.CoordinationV1Interface,
lockName string,
namespace string,
identity string,
leaseDuration time.Duration,
renewDeadline time.Duration,
retryPeriod time.Duration,
releaseOnCancel bool,
) *Client {
return &Client{
client,
lockName,
namespace,
identity,
leaseDuration,
renewDeadline,
retryPeriod,
releaseOnCancel,
func(c context.Context) {},
func() {},
func(currentID, lockID string) {},
}
}

View file

@ -10,6 +10,8 @@ var (
ClusterResultGaugeName = "cluster_policy_report_result"
)
const Metrics = "metric_listener"
// NewMetricsListener for PolicyReport watch.Events
func NewMetricsListener(
filter *report.ResultFilter,

View file

@ -8,6 +8,8 @@ import (
"github.com/kyverno/policy-reporter/pkg/report"
)
const NewResults = "new_results_listener"
type ResultListener struct {
skipExisting bool
listener []report.PolicyReportResultListener

View file

@ -7,6 +7,8 @@ import (
"github.com/kyverno/policy-reporter/pkg/target"
)
const SendResults = "send_results_listener"
func NewSendResultListener(clients []target.Client) report.PolicyReportResultListener {
return func(r report.Result, e bool) {
wg := &sync.WaitGroup{}

View file

@ -6,6 +6,8 @@ import (
"github.com/kyverno/policy-reporter/pkg/report"
)
const Store = "store_listener"
func NewStoreListener(store report.PolicyReportStore) report.PolicyReportListener {
return func(event report.LifecycleEvent) {
if event.Type == report.Deleted {

View file

@ -5,25 +5,34 @@ import (
)
type EventPublisher interface {
// RegisterListener register Handlers called on each PolicyReport watch.Event
RegisterListener(PolicyReportListener)
// RegisterListener register Handlers called on each PolicyReport Event
RegisterListener(string, PolicyReportListener)
// UnregisterListener removes an registered handler
UnregisterListener(string)
// GetListener returns a list of all registered Listeners
GetListener() []PolicyReportListener
GetListener() map[string]PolicyReportListener
// Process LifecycleEvent with all registered listeners
Publish(event LifecycleEvent)
}
type lifecycleEventPublisher struct {
listeners []PolicyReportListener
listeners map[string]PolicyReportListener
listenerCount int
}
func (p *lifecycleEventPublisher) RegisterListener(listener PolicyReportListener) {
p.listeners = append(p.listeners, listener)
func (p *lifecycleEventPublisher) RegisterListener(name string, listener PolicyReportListener) {
p.listeners[name] = listener
p.listenerCount++
}
func (p *lifecycleEventPublisher) GetListener() []PolicyReportListener {
func (p *lifecycleEventPublisher) UnregisterListener(name string) {
if _, ok := p.listeners[name]; ok {
delete(p.listeners, name)
p.listenerCount--
}
}
func (p *lifecycleEventPublisher) GetListener() map[string]PolicyReportListener {
return p.listeners
}
@ -42,5 +51,8 @@ func (p *lifecycleEventPublisher) Publish(event LifecycleEvent) {
}
func NewEventPublisher() EventPublisher {
return &lifecycleEventPublisher{}
return &lifecycleEventPublisher{
listeners: make(map[string]func(LifecycleEvent)),
listenerCount: 0,
}
}

View file

@ -14,7 +14,7 @@ func Test_PublishLifecycleEvents(t *testing.T) {
wg.Add(1)
publisher := report.NewEventPublisher()
publisher.RegisterListener(func(le report.LifecycleEvent) {
publisher.RegisterListener("test", func(le report.LifecycleEvent) {
event = le
wg.Done()
})
@ -35,7 +35,7 @@ func Test_PublishDeleteLifecycleEvents(t *testing.T) {
wg.Add(2)
publisher := report.NewEventPublisher()
publisher.RegisterListener(func(le report.LifecycleEvent) {
publisher.RegisterListener("test", func(le report.LifecycleEvent) {
event = le
wg.Done()
})
@ -52,9 +52,19 @@ func Test_PublishDeleteLifecycleEvents(t *testing.T) {
func Test_GetReisteredListeners(t *testing.T) {
publisher := report.NewEventPublisher()
publisher.RegisterListener(func(le report.LifecycleEvent) {})
publisher.RegisterListener("test", func(le report.LifecycleEvent) {})
if len(publisher.GetListener()) != 1 {
t.Error("Expected to get one registered listener back")
}
}
func Test_UnreisteredListeners(t *testing.T) {
publisher := report.NewEventPublisher()
publisher.RegisterListener("test", func(le report.LifecycleEvent) {})
publisher.UnregisterListener("test")
if len(publisher.GetListener()) != 0 {
t.Error("Expected to get 0 listeners back after unregistration")
}
}