mirror of
https://github.com/kyverno/policy-reporter.git
synced 2024-12-14 11:57:32 +00:00
Configure DebounceTime for CleanupEvents (#33)
* Configure DebounceTime for CleanupEvents * Fix Chart * Update Helm Charts
This commit is contained in:
parent
9cac2de3fb
commit
5b7a553aa4
16 changed files with 163 additions and 73 deletions
|
@ -1,5 +1,11 @@
|
|||
# Changelog
|
||||
|
||||
## 1.3.4
|
||||
|
||||
* Configure Debounce Time in seconds for Cleanup Events over Helm Chart
|
||||
* Helm Value `cleanupDebounceTime` - default: 20
|
||||
* Improved securityContext defaults
|
||||
|
||||
## 1.3.3
|
||||
|
||||
* Update Policy Reporter UI to v0.9.0
|
||||
|
|
|
@ -4,6 +4,6 @@ dependencies:
|
|||
version: 1.1.0
|
||||
- name: ui
|
||||
repository: ""
|
||||
version: 1.3.0
|
||||
digest: sha256:2e8942d0223c917557d3c6352a61baba02c15f26b1cad276c34c6609111b3682
|
||||
generated: "2021-04-29T11:45:31.55116+02:00"
|
||||
version: 1.3.1
|
||||
digest: sha256:9d4e26e7bdc5a7feaab8bbdf23568151506640645da104afece06a27d1608560
|
||||
generated: "2021-04-30T11:40:03.769829+02:00"
|
||||
|
|
|
@ -5,8 +5,8 @@ description: |
|
|||
It creates Prometheus Metrics and can send rule validation events to different targets like Loki, Elasticsearch, Slack or Discord
|
||||
|
||||
type: application
|
||||
version: 1.3.3
|
||||
appVersion: 1.3.3
|
||||
version: 1.3.4
|
||||
appVersion: 1.3.4
|
||||
|
||||
dependencies:
|
||||
- name: monitoring
|
||||
|
@ -16,4 +16,4 @@ dependencies:
|
|||
- name: ui
|
||||
condition: ui.enabled
|
||||
repository: ""
|
||||
version: "1.3.0"
|
||||
version: "1.3.1"
|
||||
|
|
|
@ -3,5 +3,5 @@ name: ui
|
|||
description: Policy Reporter UI
|
||||
|
||||
type: application
|
||||
version: 1.3.0
|
||||
version: 1.3.1
|
||||
appVersion: 0.9.0
|
||||
|
|
|
@ -19,13 +19,15 @@ deploymentStrategy: {}
|
|||
# maxUnavailable: 25%
|
||||
# type: RollingUpdate
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
securityContext:
|
||||
runAsUser: 1234
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
|
||||
# Key/value pairs that are attached to pods.
|
||||
podAnnotations: {}
|
||||
|
|
|
@ -48,6 +48,7 @@ spec:
|
|||
args:
|
||||
- --config=/app/config.yaml
|
||||
- --crd-version={{ .Values.crdVersion }}
|
||||
- --cleanup-debounce-time={{ .Values.cleanupDebounceTime }}
|
||||
{{- if or .Values.api.enabled .Values.ui.enabled }}
|
||||
- --apiPort=8080
|
||||
{{- end }}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
repository: fjogeleit/policy-reporter
|
||||
pullPolicy: IfNotPresent
|
||||
tag: 1.3.2
|
||||
tag: 1.3.4
|
||||
|
||||
imagePullSecrets: []
|
||||
|
||||
|
@ -43,13 +43,15 @@ service:
|
|||
# integer nubmer. This is port for service
|
||||
port: 2112
|
||||
|
||||
securityContext: {}
|
||||
# capabilities:
|
||||
# drop:
|
||||
# - ALL
|
||||
# readOnlyRootFilesystem: true
|
||||
# runAsNonRoot: true
|
||||
# runAsUser: 1000
|
||||
securityContext:
|
||||
runAsUser: 1234
|
||||
runAsNonRoot: true
|
||||
privileged: false
|
||||
allowPrivilegeEscalation: false
|
||||
readOnlyRootFilesystem: true
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
|
||||
# Key/value pairs that are attached to pods.
|
||||
podAnnotations: {}
|
||||
|
@ -90,6 +92,13 @@ global:
|
|||
# PolicyReport CRD Version to use
|
||||
crdVersion: v1alpha1
|
||||
|
||||
# Dounce Time in seconds for Modify Events after a cleanup event (Report with 0 Results)
|
||||
# Used to prevent Policy Reporter from resending existing violations after Kyverno recreates PolicyReports
|
||||
# When an existing Report get an Modify Event with 0 results it waits for the defined amount of time
|
||||
# for new Report Events and process the latest incomming Event for this Report which should be the complete recreated Report
|
||||
# the required amount of time can be different depending on the amount of validated resources and policies
|
||||
cleanupDebounceTime: 20
|
||||
|
||||
api:
|
||||
enabled: false
|
||||
|
||||
|
|
|
@ -67,6 +67,10 @@ func loadConfig(cmd *cobra.Command) (*config.Config, error) {
|
|||
v.BindPFlag("crdVersion", flag)
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("cleanup-debounce-time"); flag != nil {
|
||||
v.BindPFlag("cleanupDebounceTime", flag)
|
||||
}
|
||||
|
||||
if flag := cmd.Flags().Lookup("apiPort"); flag != nil {
|
||||
v.BindPFlag("api.port", flag)
|
||||
v.BindPFlag("api.enabled", flag)
|
||||
|
|
|
@ -3,6 +3,7 @@ package cmd
|
|||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/fjogeleit/policy-reporter/pkg/config"
|
||||
|
@ -25,6 +26,8 @@ func newRunCMD() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
|
||||
log.Printf("[INFO] Configured DebounceTime %d", c.CleanupDebounceTime)
|
||||
|
||||
var k8sConfig *rest.Config
|
||||
if c.Kubeconfig != "" {
|
||||
k8sConfig, err = clientcmd.BuildConfigFromFlags("", c.Kubeconfig)
|
||||
|
@ -96,6 +99,7 @@ func newRunCMD() *cobra.Command {
|
|||
cmd.PersistentFlags().StringP("kubeconfig", "k", "", "absolute path to the kubeconfig file")
|
||||
cmd.PersistentFlags().StringP("config", "c", "", "target configuration file")
|
||||
cmd.PersistentFlags().StringP("crd-version", "v", "v1alpha1", "Policy Reporter CRD Version")
|
||||
cmd.PersistentFlags().IntP("cleanup-debounce-time", "t", 20, "DebounceTime in Seconds after a Report cleanup started.")
|
||||
cmd.PersistentFlags().IntP("apiPort", "a", 0, "http port for the optional rest api")
|
||||
|
||||
cmd.PersistentFlags().String("loki", "", "loki host: http://loki:3100")
|
||||
|
|
|
@ -52,14 +52,15 @@ type API struct {
|
|||
|
||||
// Config of the PolicyReporter
|
||||
type Config struct {
|
||||
Loki Loki `mapstructure:"loki"`
|
||||
Elasticsearch Elasticsearch `mapstructure:"elasticsearch"`
|
||||
Slack Slack `mapstructure:"slack"`
|
||||
Discord Discord `mapstructure:"discord"`
|
||||
Teams Teams `mapstructure:"teams"`
|
||||
UI UI `mapstructure:"ui"`
|
||||
API API `mapstructure:"api"`
|
||||
Kubeconfig string `mapstructure:"kubeconfig"`
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
CRDVersion string `mapstructure:"crdVersion"`
|
||||
Loki Loki `mapstructure:"loki"`
|
||||
Elasticsearch Elasticsearch `mapstructure:"elasticsearch"`
|
||||
Slack Slack `mapstructure:"slack"`
|
||||
Discord Discord `mapstructure:"discord"`
|
||||
Teams Teams `mapstructure:"teams"`
|
||||
UI UI `mapstructure:"ui"`
|
||||
API API `mapstructure:"api"`
|
||||
Kubeconfig string `mapstructure:"kubeconfig"`
|
||||
Namespace string `mapstructure:"namespace"`
|
||||
CRDVersion string `mapstructure:"crdVersion"`
|
||||
CleanupDebounceTime int `mapstructure:"cleanupDebounceTime"`
|
||||
}
|
||||
|
|
|
@ -113,6 +113,7 @@ func (r *Resolver) PolicyReportClient(ctx context.Context) (report.PolicyClient,
|
|||
r.PolicyReportStore(),
|
||||
mapper,
|
||||
time.Now(),
|
||||
time.Duration(r.config.CleanupDebounceTime),
|
||||
)
|
||||
|
||||
r.policyClient = client
|
||||
|
@ -141,6 +142,7 @@ func (r *Resolver) ClusterPolicyReportClient(ctx context.Context) (report.Cluste
|
|||
r.ClusterPolicyReportStore(),
|
||||
mapper,
|
||||
time.Now(),
|
||||
time.Duration(r.config.CleanupDebounceTime),
|
||||
)
|
||||
|
||||
return r.clusterPolicyClient, nil
|
||||
|
|
|
@ -17,9 +17,10 @@ type clusterPolicyReportEvent struct {
|
|||
}
|
||||
|
||||
type clusterPolicyReportEventDebouncer struct {
|
||||
events map[string]clusterPolicyReportEvent
|
||||
channel chan<- clusterPolicyReportEvent
|
||||
mutx *sync.Mutex
|
||||
events map[string]clusterPolicyReportEvent
|
||||
channel chan clusterPolicyReportEvent
|
||||
mutx *sync.Mutex
|
||||
debounceTime time.Duration
|
||||
}
|
||||
|
||||
func (d *clusterPolicyReportEventDebouncer) Add(e clusterPolicyReportEvent) {
|
||||
|
@ -41,7 +42,7 @@ func (d *clusterPolicyReportEventDebouncer) Add(e clusterPolicyReportEvent) {
|
|||
d.mutx.Unlock()
|
||||
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(d.debounceTime * time.Second)
|
||||
|
||||
d.mutx.Lock()
|
||||
if event, ok := d.events[e.report.GetIdentifier()]; ok {
|
||||
|
@ -65,6 +66,16 @@ func (d *clusterPolicyReportEventDebouncer) Add(e clusterPolicyReportEvent) {
|
|||
d.channel <- e
|
||||
}
|
||||
|
||||
func (d *clusterPolicyReportEventDebouncer) Reset() {
|
||||
d.mutx.Lock()
|
||||
d.events = make(map[string]clusterPolicyReportEvent)
|
||||
d.mutx.Unlock()
|
||||
}
|
||||
|
||||
func (d *clusterPolicyReportEventDebouncer) ReportChan() chan clusterPolicyReportEvent {
|
||||
return d.channel
|
||||
}
|
||||
|
||||
type clusterPolicyReportClient struct {
|
||||
policyAPI PolicyReportAdapter
|
||||
store *report.ClusterPolicyReportStore
|
||||
|
@ -75,6 +86,7 @@ type clusterPolicyReportClient struct {
|
|||
skipExisting bool
|
||||
started bool
|
||||
modifyHash map[string]string
|
||||
debouncer clusterPolicyReportEventDebouncer
|
||||
}
|
||||
|
||||
func (c *clusterPolicyReportClient) RegisterCallback(cb report.ClusterPolicyReportCallback) {
|
||||
|
@ -124,8 +136,6 @@ func (c *clusterPolicyReportClient) StartWatching() error {
|
|||
}
|
||||
|
||||
c.started = true
|
||||
reportChan := make(chan clusterPolicyReportEvent)
|
||||
|
||||
errorChan := make(chan error)
|
||||
go func() {
|
||||
for {
|
||||
|
@ -135,16 +145,12 @@ func (c *clusterPolicyReportClient) StartWatching() error {
|
|||
errorChan <- err
|
||||
}
|
||||
|
||||
debouncer := clusterPolicyReportEventDebouncer{
|
||||
events: make(map[string]clusterPolicyReportEvent, 0),
|
||||
mutx: new(sync.Mutex),
|
||||
channel: reportChan,
|
||||
}
|
||||
c.debouncer.Reset()
|
||||
|
||||
for result := range result.ResultChan() {
|
||||
if item, ok := result.Object.(*unstructured.Unstructured); ok {
|
||||
report := c.mapper.MapClusterPolicyReport(item.Object)
|
||||
debouncer.Add(clusterPolicyReportEvent{report, result.Type})
|
||||
c.debouncer.Add(clusterPolicyReportEvent{report, result.Type})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,7 +160,7 @@ func (c *clusterPolicyReportClient) StartWatching() error {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
for event := range reportChan {
|
||||
for event := range c.debouncer.ReportChan() {
|
||||
c.executeClusterPolicyReportHandler(event.eventType, event.report)
|
||||
}
|
||||
|
||||
|
@ -273,12 +279,24 @@ func (c *clusterPolicyReportClient) RegisterPolicyResultWatcher(skipExisting boo
|
|||
}
|
||||
|
||||
// NewPolicyReportClient creates a new PolicyReportClient based on the kubernetes go-client
|
||||
func NewClusterPolicyReportClient(client PolicyReportAdapter, store *report.ClusterPolicyReportStore, mapper Mapper, startUp time.Time) report.ClusterPolicyClient {
|
||||
func NewClusterPolicyReportClient(
|
||||
client PolicyReportAdapter,
|
||||
store *report.ClusterPolicyReportStore,
|
||||
mapper Mapper,
|
||||
startUp time.Time,
|
||||
debounceTime time.Duration,
|
||||
) report.ClusterPolicyClient {
|
||||
return &clusterPolicyReportClient{
|
||||
policyAPI: client,
|
||||
store: store,
|
||||
mapper: mapper,
|
||||
startUp: startUp,
|
||||
modifyHash: make(map[string]string),
|
||||
debouncer: clusterPolicyReportEventDebouncer{
|
||||
events: make(map[string]clusterPolicyReportEvent, 0),
|
||||
mutx: new(sync.Mutex),
|
||||
channel: make(chan clusterPolicyReportEvent),
|
||||
debounceTime: debounceTime,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ func Test_FetchClusterPolicyReports(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
fakeAdapter.clusterPolicies = append(fakeAdapter.clusterPolicies, unstructured.Unstructured{Object: clusterPolicyMap})
|
||||
|
@ -58,6 +59,7 @@ func Test_FetchClusterPolicyReportsError(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
_, err := client.FetchClusterPolicyReports()
|
||||
|
@ -80,6 +82,7 @@ func Test_FetchClusterPolicyResults(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
fakeAdapter.clusterPolicies = append(fakeAdapter.clusterPolicies, unstructured.Unstructured{Object: clusterPolicyMap})
|
||||
|
@ -105,6 +108,7 @@ func Test_FetchClusterPolicyResultsError(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
_, err := client.FetchPolicyResults()
|
||||
|
@ -123,6 +127,7 @@ func Test_ClusterPolicyWatcher(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -158,6 +163,7 @@ func Test_ClusterPolicyWatcherTwice(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
go client.StartWatching()
|
||||
|
@ -213,6 +219,7 @@ func Test_SkipExisting(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(true)
|
||||
|
@ -253,6 +260,7 @@ func Test_WatcherError(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -273,6 +281,7 @@ func Test_WatchDeleteEvent(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -309,6 +318,7 @@ func Test_WatchDelayEvents(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -340,6 +350,7 @@ func Test_WatchDelayEventsWithoutClearEvent(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -370,6 +381,7 @@ func Test_WatchModifiedEvent(t *testing.T) {
|
|||
report.NewClusterPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
|
|
@ -17,9 +17,10 @@ type policyReportEvent struct {
|
|||
}
|
||||
|
||||
type policyReportEventDebouncer struct {
|
||||
events map[string]policyReportEvent
|
||||
channel chan<- policyReportEvent
|
||||
mutx *sync.Mutex
|
||||
events map[string]policyReportEvent
|
||||
channel chan policyReportEvent
|
||||
mutx *sync.Mutex
|
||||
debounceTime time.Duration
|
||||
}
|
||||
|
||||
func (d *policyReportEventDebouncer) Add(e policyReportEvent) {
|
||||
|
@ -41,7 +42,7 @@ func (d *policyReportEventDebouncer) Add(e policyReportEvent) {
|
|||
d.mutx.Unlock()
|
||||
|
||||
go func() {
|
||||
time.Sleep(10 * time.Second)
|
||||
time.Sleep(d.debounceTime * time.Second)
|
||||
|
||||
d.mutx.Lock()
|
||||
if event, ok := d.events[e.report.GetIdentifier()]; ok {
|
||||
|
@ -65,6 +66,16 @@ func (d *policyReportEventDebouncer) Add(e policyReportEvent) {
|
|||
d.channel <- e
|
||||
}
|
||||
|
||||
func (d *policyReportEventDebouncer) Reset() {
|
||||
d.mutx.Lock()
|
||||
d.events = make(map[string]policyReportEvent)
|
||||
d.mutx.Unlock()
|
||||
}
|
||||
|
||||
func (d *policyReportEventDebouncer) ReportChan() chan policyReportEvent {
|
||||
return d.channel
|
||||
}
|
||||
|
||||
type policyReportClient struct {
|
||||
policyAPI PolicyReportAdapter
|
||||
store *report.PolicyReportStore
|
||||
|
@ -75,6 +86,7 @@ type policyReportClient struct {
|
|||
skipExisting bool
|
||||
started bool
|
||||
modifyHash map[string]string
|
||||
debouncer policyReportEventDebouncer
|
||||
}
|
||||
|
||||
func (c *policyReportClient) RegisterCallback(cb report.PolicyReportCallback) {
|
||||
|
@ -124,7 +136,6 @@ func (c *policyReportClient) StartWatching() error {
|
|||
}
|
||||
|
||||
c.started = true
|
||||
reportChan := make(chan policyReportEvent)
|
||||
errorChan := make(chan error)
|
||||
|
||||
go func() {
|
||||
|
@ -135,16 +146,12 @@ func (c *policyReportClient) StartWatching() error {
|
|||
errorChan <- err
|
||||
}
|
||||
|
||||
debouncer := policyReportEventDebouncer{
|
||||
events: make(map[string]policyReportEvent, 0),
|
||||
mutx: new(sync.Mutex),
|
||||
channel: reportChan,
|
||||
}
|
||||
c.debouncer.Reset()
|
||||
|
||||
for result := range result.ResultChan() {
|
||||
if item, ok := result.Object.(*unstructured.Unstructured); ok {
|
||||
report := c.mapper.MapPolicyReport(item.Object)
|
||||
debouncer.Add(policyReportEvent{report, result.Type})
|
||||
c.debouncer.Add(policyReportEvent{report, result.Type})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -154,7 +161,7 @@ func (c *policyReportClient) StartWatching() error {
|
|||
}()
|
||||
|
||||
go func() {
|
||||
for event := range reportChan {
|
||||
for event := range c.debouncer.ReportChan() {
|
||||
c.executePolicyReportHandler(event.eventType, event.report)
|
||||
}
|
||||
|
||||
|
@ -274,12 +281,24 @@ func (c *policyReportClient) RegisterPolicyResultWatcher(skipExisting bool) {
|
|||
}
|
||||
|
||||
// NewPolicyReportClient creates a new PolicyReportClient based on the kubernetes go-client
|
||||
func NewPolicyReportClient(client PolicyReportAdapter, store *report.PolicyReportStore, mapper Mapper, startUp time.Time) report.PolicyClient {
|
||||
func NewPolicyReportClient(
|
||||
client PolicyReportAdapter,
|
||||
store *report.PolicyReportStore,
|
||||
mapper Mapper,
|
||||
startUp time.Time,
|
||||
debounceTime time.Duration,
|
||||
) report.PolicyClient {
|
||||
return &policyReportClient{
|
||||
policyAPI: client,
|
||||
store: store,
|
||||
mapper: mapper,
|
||||
startUp: startUp,
|
||||
modifyHash: make(map[string]string),
|
||||
debouncer: policyReportEventDebouncer{
|
||||
events: make(map[string]policyReportEvent, 0),
|
||||
mutx: new(sync.Mutex),
|
||||
channel: make(chan policyReportEvent),
|
||||
debounceTime: debounceTime,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@ func Test_FetchPolicyReports(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
fakeAdapter.policies = append(fakeAdapter.policies, unstructured.Unstructured{Object: policyMap})
|
||||
|
@ -58,6 +59,7 @@ func Test_FetchPolicyReportsError(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
_, err := client.FetchPolicyReports()
|
||||
|
@ -77,6 +79,7 @@ func Test_FetchPolicyResults(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
fakeAdapter.policies = append(fakeAdapter.policies, unstructured.Unstructured{Object: policyMap})
|
||||
|
@ -102,6 +105,7 @@ func Test_FetchPolicyResultsError(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
_, err := client.FetchPolicyResults()
|
||||
|
@ -120,6 +124,7 @@ func Test_PolicyWatcher(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -155,6 +160,7 @@ func Test_PolicyWatcherTwice(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
go client.StartWatching()
|
||||
|
@ -212,6 +218,7 @@ func Test_PolicySkipExisting(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(true)
|
||||
|
@ -253,6 +260,7 @@ func Test_PolicyWatcherError(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -273,6 +281,7 @@ func Test_PolicyWatchDeleteEvent(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -309,6 +318,7 @@ func Test_PolicyDelayReset(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -340,6 +350,7 @@ func Test_PolicyDelayWithoutClearEvent(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
@ -370,6 +381,7 @@ func Test_PolicyWatchModifiedEvent(t *testing.T) {
|
|||
report.NewPolicyReportStore(),
|
||||
NewMapper(k8sCMClient),
|
||||
time.Now(),
|
||||
10,
|
||||
)
|
||||
|
||||
client.RegisterPolicyResultWatcher(false)
|
||||
|
|
|
@ -65,8 +65,8 @@ func Test_ResultClient_FetchPolicyResults(t *testing.T) {
|
|||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(
|
||||
kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now()),
|
||||
kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now()),
|
||||
kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5),
|
||||
kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5),
|
||||
)
|
||||
|
||||
fakeAdapter.policies = append(fakeAdapter.policies, unstructured.Unstructured{Object: policyMap})
|
||||
|
@ -92,8 +92,8 @@ func Test_ResultClient_FetchPolicyResultsPolicyReportError(t *testing.T) {
|
|||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(
|
||||
kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now()),
|
||||
kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now()),
|
||||
kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5),
|
||||
kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5),
|
||||
)
|
||||
|
||||
_, err := client.FetchPolicyResults()
|
||||
|
@ -112,8 +112,8 @@ func Test_ResultClient_FetchPolicyResultsClusterPolicyReportError(t *testing.T)
|
|||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(
|
||||
kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now()),
|
||||
kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now()),
|
||||
kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5),
|
||||
kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5),
|
||||
)
|
||||
|
||||
_, err := client.FetchPolicyResults()
|
||||
|
@ -129,8 +129,8 @@ func Test_ResultClient_RegisterPolicyResultWatcher(t *testing.T) {
|
|||
|
||||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now())
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now())
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5)
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(pClient, cpClient)
|
||||
|
||||
|
@ -166,8 +166,8 @@ func Test_ResultClient_SkipReportsWithoutResults(t *testing.T) {
|
|||
|
||||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now())
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now())
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5)
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(pClient, cpClient)
|
||||
|
||||
|
@ -239,8 +239,8 @@ func Test_ResultClient_SkipReportsCleanUpEvents(t *testing.T) {
|
|||
|
||||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now())
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now())
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5)
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(pClient, cpClient)
|
||||
|
||||
|
@ -312,8 +312,8 @@ func Test_ResultClient_SkipReportsReconnectEvents(t *testing.T) {
|
|||
|
||||
mapper := NewMapper(k8sCMClient)
|
||||
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now())
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now())
|
||||
pClient := kubernetes.NewPolicyReportClient(fakeAdapter, report.NewPolicyReportStore(), mapper, time.Now(), 5)
|
||||
cpClient := kubernetes.NewClusterPolicyReportClient(fakeAdapter, report.NewClusterPolicyReportStore(), mapper, time.Now(), 5)
|
||||
|
||||
client := kubernetes.NewPolicyResultClient(pClient, cpClient)
|
||||
|
||||
|
|
Loading…
Reference in a new issue