1
0
Fork 0
mirror of https://github.com/kyverno/policy-reporter.git synced 2024-12-14 11:57:32 +00:00

Add concurrency to metrics and loki client

This commit is contained in:
Frank Jogeleit 2021-02-20 11:00:10 +01:00
parent 438bd923d4
commit 4cfde4347b
6 changed files with 107 additions and 47 deletions

View file

@ -1,3 +1,4 @@
config.yaml
build
README.md
README.md
docs

View file

@ -1,8 +1,8 @@
# PolicyReporter
PolicyReporter is a simple tool to watch for PolicyReports in your cluster.
## Motivation
It uses this resources to create Prometheus Metrics from it. It also provides a configuration to push rule validation results to Grafana Loki.
Kyverno ships with two types of validation. You can either enforce a rule or audit it. If you don't want to block developers or if you want to try out what a new rule you can audit it. The audit configuration creates PolicyReports which you can describe or read with over `kubectl` but it's not that easy to get a good overview. To solve this problem this tool sends informations from PolicyReports to Loki and provide a Metrics endpoint to get metrics about summaries and each rule result.
## Installation with Helm v3
@ -34,3 +34,7 @@ policyPriorities:
![Grafana Loki](https://github.com/fjogeleit/policy-reporter/blob/main/docs/images/grafana-loki.png?raw=true)
![Prometheus Metrics](https://github.com/fjogeleit/policy-reporter/blob/main/docs/images/prometheus.png?raw=true)
# Todos
* Support for ClusterPolicyReports
* Additional Targets

View file

@ -6,7 +6,6 @@ import (
"net/http"
"github.com/fjogeleit/policy-reporter/pkg/config"
"github.com/fjogeleit/policy-reporter/pkg/metrics"
"github.com/fjogeleit/policy-reporter/pkg/report"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/spf13/cobra"
@ -46,11 +45,16 @@ func NewCLI() *cobra.Command {
if loki != nil {
go client.WatchRuleValidation(func(r report.Result) {
loki.Send(r)
go loki.Send(r)
})
}
go metrics.GenerateMetrics(client)
metrics, err := resolver.Metrics()
if err != nil {
return err
}
go metrics.GenerateMetrics()
http.Handle("/metrics", promhttp.Handler())
http.ListenAndServe(":2112", nil)

Binary file not shown.

Before

Width:  |  Height:  |  Size: 184 KiB

After

Width:  |  Height:  |  Size: 265 KiB

View file

@ -2,13 +2,15 @@ package config
import (
"github.com/fjogeleit/policy-reporter/pkg/kubernetes"
"github.com/fjogeleit/policy-reporter/pkg/metrics"
"github.com/fjogeleit/policy-reporter/pkg/target"
"github.com/fjogeleit/policy-reporter/pkg/target/loki"
)
var (
kubeClient kubernetes.Client
lokiClient target.Client
kubeClient kubernetes.Client
lokiClient target.Client
metricsGenerator *metrics.Metrics
)
type Resolver struct {
@ -35,6 +37,19 @@ func (r *Resolver) LokiClient() target.Client {
return loki.NewClient(r.config.Loki.Host)
}
func (r *Resolver) Metrics() (*metrics.Metrics, error) {
if metricsGenerator != nil {
return metricsGenerator, nil
}
client, err := r.KubernetesClient()
if err != nil {
return nil, err
}
return metrics.NewMetrics(client), nil
}
func NewResolver(config *Config) Resolver {
return Resolver{config}
}

View file

@ -1,6 +1,8 @@
package metrics
import (
"sync"
"github.com/fjogeleit/policy-reporter/pkg/kubernetes"
"github.com/fjogeleit/policy-reporter/pkg/report"
"github.com/prometheus/client_golang/prometheus"
@ -8,57 +10,83 @@ import (
"k8s.io/apimachinery/pkg/watch"
)
func GenerateMetrics(client kubernetes.Client) {
type Metrics struct {
client kubernetes.Client
cache map[string]report.PolicyReport
rwmutex *sync.RWMutex
}
func (m Metrics) getCachedReport(i string) report.PolicyReport {
m.rwmutex.RLock()
defer m.rwmutex.RUnlock()
return m.cache[i]
}
func (m Metrics) cachedReport(r report.PolicyReport) {
m.rwmutex.Lock()
m.cache[r.GetIdentifier()] = r
m.rwmutex.Unlock()
}
func (m Metrics) removeCachedReport(i string) {
m.rwmutex.Lock()
delete(m.cache, i)
m.rwmutex.Unlock()
}
func (m Metrics) GenerateMetrics() {
policyGauge := promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "policy_report",
Name: "policy_report_summary",
Help: "Summary of all PolicyReports",
}, []string{"namespace", "name", "status"})
ruleGauge := promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "rule_validation",
Name: "policy_report_result",
Help: "List of all PolicyReport Results",
}, []string{"namespace", "rule", "policy", "kind", "name", "status"})
prometheus.Register(policyGauge)
prometheus.Register(ruleGauge)
cache := make(map[string]report.PolicyReport)
m.client.WatchPolicyReports(func(e watch.EventType, r report.PolicyReport) {
go func(event watch.EventType, report report.PolicyReport) {
switch event {
case watch.Added:
updatePolicyGauge(policyGauge, report)
client.WatchPolicyReports(func(s watch.EventType, report report.PolicyReport) {
switch s {
case watch.Added:
updatePolicyGauge(policyGauge, report)
for _, rule := range report.Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(1)
}
for _, rule := range report.Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(1)
m.cachedReport(report)
case watch.Modified:
updatePolicyGauge(policyGauge, report)
for _, rule := range m.getCachedReport(report.GetIdentifier()).Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(0)
}
for _, rule := range report.Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(1)
}
case watch.Deleted:
policyGauge.WithLabelValues(report.Namespace, report.Name, "Pass").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Fail").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Warn").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Error").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Skip").Set(0)
for _, rule := range report.Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(0)
}
m.removeCachedReport(report.GetIdentifier())
}
cache[report.GetIdentifier()] = report
case watch.Modified:
updatePolicyGauge(policyGauge, report)
for _, rule := range cache[report.GetIdentifier()].Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(0)
}
for _, rule := range report.Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(1)
}
case watch.Deleted:
policyGauge.WithLabelValues(report.Namespace, report.Name, "Pass").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Fail").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Warn").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Error").Set(0)
policyGauge.WithLabelValues(report.Namespace, report.Name, "Skip").Set(0)
for _, rule := range report.Results {
res := rule.Resources[0]
ruleGauge.WithLabelValues(report.Namespace, rule.Rule, rule.Policy, res.Kind, res.Name, rule.Status).Set(0)
}
delete(cache, report.GetIdentifier())
}
}(e, r)
})
}
@ -79,3 +107,11 @@ func updatePolicyGauge(policyGauge *prometheus.GaugeVec, report report.PolicyRep
WithLabelValues(report.Namespace, report.Name, "Skip").
Set(float64(report.Summary.Skip))
}
func NewMetrics(client kubernetes.Client) *Metrics {
return &Metrics{
client: client,
cache: make(map[string]report.PolicyReport),
rwmutex: new(sync.RWMutex),
}
}