mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-29 02:45:06 +00:00
- remove job controller; - remove in-memory configmap; - clean up kustomize manifest
This commit is contained in:
parent
7a1630ca05
commit
25409e42ea
22 changed files with 27 additions and 1513 deletions
|
@ -1,25 +0,0 @@
|
|||
{{- if (not .Values.job.enableBackgroundScan) }}
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: {{ .Values.job.name | quote }}
|
||||
labels: {{ include "kyverno.labels" . | nindent 4 }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
schedule: {{ .Values.job.schedule | quote }}
|
||||
concurrencyPolicy : Forbid
|
||||
suspend : true
|
||||
successfulJobsHistoryLimit : 4
|
||||
failedJobsHistoryLimit : 4
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: {{ .Values.job.name | quote }}
|
||||
image: {{ .Values.job.image | quote }}
|
||||
args:
|
||||
- report
|
||||
- all
|
||||
restartPolicy: OnFailure
|
||||
{{- end -}}
|
|
@ -128,9 +128,3 @@ service:
|
|||
# If letting Kyverno create its own CA or providing your own, make createSelfSignedCert is false
|
||||
createSelfSignedCert: false
|
||||
|
||||
|
||||
job:
|
||||
name: policyreport-background-scan
|
||||
enableBackgroundScan : true
|
||||
schedule : "*/6 * * * *"
|
||||
image : "nirmata/kyverno-cli:latest"
|
|
@ -22,7 +22,6 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/policy"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport/jobs"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/policyviolation"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
|
@ -192,10 +191,6 @@ func main() {
|
|||
pInformer.Kyverno().V1().ClusterPolicies().Lister(),
|
||||
pInformer.Kyverno().V1().Policies().Lister())
|
||||
|
||||
// Job Controller
|
||||
// - Create Jobs for report
|
||||
jobController := jobs.NewJobsJob(client, configData, log.Log.WithName("jobController"))
|
||||
|
||||
// POLICY VIOLATION GENERATOR
|
||||
// -- generate policy violation
|
||||
var pvgen *policyviolation.Generator
|
||||
|
@ -207,7 +202,6 @@ func main() {
|
|||
pInformer.Policy().V1alpha1().ClusterPolicyReports(),
|
||||
pInformer.Policy().V1alpha1().PolicyReports(),
|
||||
statusSync.Listener,
|
||||
jobController,
|
||||
log.Log.WithName("PolicyViolationGenerator"),
|
||||
stopCh,
|
||||
)
|
||||
|
@ -222,7 +216,6 @@ func main() {
|
|||
pInformer.Policy().V1alpha1().ClusterPolicyReports(),
|
||||
pInformer.Policy().V1alpha1().PolicyReports(),
|
||||
statusSync.Listener,
|
||||
jobController,
|
||||
log.Log.WithName("PolicyReportGenerator"),
|
||||
)
|
||||
}
|
||||
|
@ -246,7 +239,6 @@ func main() {
|
|||
kubeInformer.Core().V1().Namespaces(),
|
||||
log.Log.WithName("PolicyController"),
|
||||
rCache,
|
||||
jobController,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
|
@ -388,7 +380,6 @@ func main() {
|
|||
go statusSync.Run(1, stopCh)
|
||||
go pCacheController.Run(1, stopCh)
|
||||
go auditHandler.Run(10, stopCh)
|
||||
go jobController.Run(2, stopCh)
|
||||
openAPISync.Run(1, stopCh)
|
||||
|
||||
// verifies if the admission control is enabled and active
|
||||
|
@ -397,11 +388,6 @@ func main() {
|
|||
// max deadline: deadline*3 (set the deployment annotation as false)
|
||||
server.RunAsync(stopCh)
|
||||
|
||||
// Create a sync Job for policy report
|
||||
jobController.Add(jobs.JobInfo{
|
||||
JobType: "POLICYSYNC",
|
||||
JobData: "",
|
||||
})
|
||||
<-stopCh
|
||||
|
||||
// by default http.Server waits indefinitely for connections to return to idle and then shuts down
|
||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.2.4
|
||||
controller-gen.kubebuilder.io/version: v0.2.5
|
||||
creationTimestamp: null
|
||||
name: clusterpolicyreports.policy.kubernetes.io
|
||||
spec:
|
||||
|
@ -66,6 +66,9 @@ spec:
|
|||
description: PolicyReportResult provides the result for an individual
|
||||
policy
|
||||
properties:
|
||||
category:
|
||||
description: Category indicates policy category
|
||||
type: string
|
||||
data:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -196,6 +199,13 @@ spec:
|
|||
scored:
|
||||
description: Scored indicates if this policy rule is scored
|
||||
type: boolean
|
||||
severity:
|
||||
description: Severity indicates policy severity
|
||||
enum:
|
||||
- High
|
||||
- Low
|
||||
- Medium
|
||||
type: string
|
||||
status:
|
||||
description: Status indicates the result of the policy rule check
|
||||
enum:
|
||||
|
|
|
@ -4,7 +4,7 @@ apiVersion: apiextensions.k8s.io/v1beta1
|
|||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.2.4
|
||||
controller-gen.kubebuilder.io/version: v0.2.5
|
||||
creationTimestamp: null
|
||||
name: policyreports.policy.kubernetes.io
|
||||
spec:
|
||||
|
@ -65,6 +65,9 @@ spec:
|
|||
description: PolicyReportResult provides the result for an individual
|
||||
policy
|
||||
properties:
|
||||
category:
|
||||
description: Category indicates policy category
|
||||
type: string
|
||||
data:
|
||||
additionalProperties:
|
||||
type: string
|
||||
|
@ -195,6 +198,13 @@ spec:
|
|||
scored:
|
||||
description: Scored indicates if this policy rule is scored
|
||||
type: boolean
|
||||
severity:
|
||||
description: Severity indicates policy severity
|
||||
enum:
|
||||
- High
|
||||
- Low
|
||||
- Medium
|
||||
type: string
|
||||
status:
|
||||
description: Status indicates the result of the policy rule check
|
||||
enum:
|
||||
|
|
|
@ -1452,7 +1452,6 @@ rules:
|
|||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- jobs
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- clusterroles
|
||||
|
@ -1619,7 +1618,6 @@ rules:
|
|||
resources:
|
||||
- policyreports
|
||||
- clusterpolicyreports
|
||||
- jobs
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
|
@ -1794,14 +1792,6 @@ metadata:
|
|||
namespace: kyverno
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
App: ""
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kyverno-event
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -1881,26 +1871,3 @@ spec:
|
|||
imagePullPolicy: Always
|
||||
name: kyverno-pre
|
||||
serviceAccountName: kyverno-service-account
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: policyreport-background-scan
|
||||
namespace: kyverno
|
||||
spec:
|
||||
concurrencyPolicy: Forbid
|
||||
failedJobsHistoryLimit: 4
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- report
|
||||
- all
|
||||
image: evalsocket/kyverno-cli:latest
|
||||
name: policyreport-background-scan
|
||||
restartPolicy: OnFailure
|
||||
schedule: '*/6 * * * *'
|
||||
successfulJobsHistoryLimit: 4
|
||||
suspend: true
|
||||
|
|
|
@ -1452,7 +1452,6 @@ rules:
|
|||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- jobs
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- clusterroles
|
||||
|
@ -1619,7 +1618,6 @@ rules:
|
|||
resources:
|
||||
- policyreports
|
||||
- clusterpolicyreports
|
||||
- jobs
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
|
@ -1794,14 +1792,6 @@ metadata:
|
|||
namespace: kyverno
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
App: ""
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kyverno-event
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
|
@ -1814,26 +1804,3 @@ spec:
|
|||
targetPort: 443
|
||||
selector:
|
||||
app: kyverno
|
||||
---
|
||||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: policyreport-background-scan
|
||||
namespace: kyverno
|
||||
spec:
|
||||
concurrencyPolicy: Forbid
|
||||
failedJobsHistoryLimit: 4
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- report
|
||||
- all
|
||||
image: evalsocket/kyverno-cli:latest
|
||||
name: policyreport-background-scan
|
||||
restartPolicy: OnFailure
|
||||
schedule: '*/6 * * * *'
|
||||
successfulJobsHistoryLimit: 4
|
||||
suspend: true
|
||||
|
|
|
@ -6,11 +6,3 @@ kind: ConfigMap
|
|||
metadata:
|
||||
name: init-config
|
||||
namespace: kyverno
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
App: ''
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: kyverno-event
|
||||
namespace: kyverno
|
||||
|
|
|
@ -1,22 +0,0 @@
|
|||
apiVersion: batch/v1beta1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: policyreport-background-scan
|
||||
namespace: kyverno
|
||||
spec:
|
||||
schedule: "*/6 * * * *"
|
||||
concurrencyPolicy : Forbid
|
||||
suspend : true
|
||||
successfulJobsHistoryLimit : 4
|
||||
failedJobsHistoryLimit : 4
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: policyreport-background-scan
|
||||
image: evalsocket/kyverno-cli:latest
|
||||
args:
|
||||
- report
|
||||
- all
|
||||
restartPolicy: OnFailure
|
|
@ -3,5 +3,4 @@ kind: Kustomization
|
|||
|
||||
resources:
|
||||
- ./rbac.yaml
|
||||
- ./configmap.yaml
|
||||
- ./cronjob.yaml
|
||||
- ./configmap.yaml
|
|
@ -223,7 +223,6 @@ rules:
|
|||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- jobs
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- clusterroles
|
||||
|
@ -291,7 +290,6 @@ rules:
|
|||
resources:
|
||||
- policyreports
|
||||
- clusterpolicyreports
|
||||
- jobs
|
||||
- pods
|
||||
verbs: ["get", "list", "watch", "delete"]
|
||||
---
|
||||
|
|
|
@ -61,9 +61,6 @@ var (
|
|||
// Kyverno CLI Image
|
||||
KyvernoCliImage = "nirmata/kyverno-cli:latest"
|
||||
|
||||
// ConfimapNameForPolicyReport
|
||||
ConfimapNameForPolicyReport = "kyverno-event"
|
||||
|
||||
//WebhookServiceName default kyverno webhook service name
|
||||
WebhookServiceName = getWebhookServiceName()
|
||||
|
||||
|
|
|
@ -1,67 +0,0 @@
|
|||
package report
|
||||
|
||||
// import (
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "sync"
|
||||
// "time"
|
||||
|
||||
// "github.com/kyverno/kyverno/pkg/common"
|
||||
// "github.com/kyverno/kyverno/pkg/constant"
|
||||
// "github.com/kyverno/kyverno/pkg/utils"
|
||||
// "github.com/spf13/cobra"
|
||||
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// "k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
// log "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
// )
|
||||
|
||||
// func AllReportsCommand() *cobra.Command {
|
||||
// kubernetesConfig := genericclioptions.NewConfigFlags(true)
|
||||
// var mode, namespace, policy string
|
||||
// cmd := &cobra.Command{
|
||||
// Use: "all",
|
||||
// Short: "generate report for all scope",
|
||||
// Example: fmt.Sprintf("To create a namespace report from background scan:\nkyverno report namespace --namespace=defaults \n kyverno report namespace"),
|
||||
// RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// os.Setenv("POLICY-TYPE", common.PolicyReport)
|
||||
// logger := log.Log.WithName("Report")
|
||||
// restConfig, err := kubernetesConfig.ToRESTConfig()
|
||||
// if err != nil {
|
||||
// logger.Error(err, "failed to create rest config of kubernetes cluster ")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// const resyncPeriod = 1 * time.Second
|
||||
// kubeClient, err := utils.NewKubeClient(restConfig)
|
||||
// if err != nil {
|
||||
// log.Log.Error(err, "Failed to create kubernetes client")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// var wg sync.WaitGroup
|
||||
// if mode == "cli" {
|
||||
// if namespace != "" {
|
||||
// wg.Add(1)
|
||||
// go backgroundScan(namespace, constant.All, policy, &wg, restConfig, logger)
|
||||
// } else {
|
||||
// ns, err := kubeClient.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
// if err != nil {
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// wg.Add(len(ns.Items))
|
||||
// for _, n := range ns.Items {
|
||||
// go backgroundScan(n.GetName(), constant.All, policy, &wg, restConfig, logger)
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// wg.Add(1)
|
||||
// go configmapScan(constant.All, &wg, restConfig, logger)
|
||||
// }
|
||||
// wg.Wait()
|
||||
// os.Exit(0)
|
||||
// return nil
|
||||
// },
|
||||
// }
|
||||
// cmd.Flags().StringVarP(&namespace, "namespace", "n", "", "define specific namespace")
|
||||
// cmd.Flags().StringVarP(&policy, "policy", "p", "", "define specific policy")
|
||||
// cmd.Flags().StringVarP(&mode, "mode", "m", "cli", "mode")
|
||||
// return cmd
|
||||
// }
|
|
@ -1,47 +0,0 @@
|
|||
package report
|
||||
|
||||
// import (
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "sync"
|
||||
|
||||
// "github.com/kyverno/kyverno/pkg/common"
|
||||
// "github.com/kyverno/kyverno/pkg/constant"
|
||||
// "github.com/spf13/cobra"
|
||||
// "k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
// log "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
// )
|
||||
|
||||
// func ClusterCommand() *cobra.Command {
|
||||
// kubernetesConfig := genericclioptions.NewConfigFlags(true)
|
||||
// var mode, policy string
|
||||
// cmd := &cobra.Command{
|
||||
// Use: "cluster",
|
||||
// Short: "generate report for cluster scope",
|
||||
// Example: fmt.Sprintf("To create a cluster report from background scan: kyverno report cluster"),
|
||||
// RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// os.Setenv("POLICY-TYPE", common.PolicyReport)
|
||||
// logger := log.Log.WithName("Report")
|
||||
// restConfig, err := kubernetesConfig.ToRESTConfig()
|
||||
// if err != nil {
|
||||
// logger.Error(err, "failed to create rest config of kubernetes cluster ")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// var wg sync.WaitGroup
|
||||
// wg.Add(1)
|
||||
// if mode == "cli" {
|
||||
// go backgroundScan("", constant.Cluster, policy, &wg, restConfig, logger)
|
||||
// wg.Wait()
|
||||
// os.Exit(0)
|
||||
// }
|
||||
// go configmapScan(constant.Cluster, &wg, restConfig, logger)
|
||||
// wg.Wait()
|
||||
// os.Exit(0)
|
||||
// return nil
|
||||
// },
|
||||
// }
|
||||
// cmd.Flags().StringVarP(&mode, "mode", "m", "cli", "mode of cli")
|
||||
// cmd.Flags().StringVarP(&policy, "policy", "p", "", "define specific policy")
|
||||
|
||||
// return cmd
|
||||
// }
|
|
@ -1,32 +0,0 @@
|
|||
package report
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type resultCounts struct {
|
||||
pass int
|
||||
fail int
|
||||
warn int
|
||||
error int
|
||||
skip int
|
||||
}
|
||||
|
||||
func Command() *cobra.Command {
|
||||
var cmd *cobra.Command
|
||||
cmd = &cobra.Command{
|
||||
Use: "report",
|
||||
Short: "generate report",
|
||||
Example: fmt.Sprintf("To create a report from background scan:\nkyverno report"),
|
||||
RunE: func(cmd *cobra.Command, policyPaths []string) (err error) {
|
||||
cmd.Help()
|
||||
return err
|
||||
},
|
||||
}
|
||||
// cmd.AddCommand(NamespaceCommand())
|
||||
// cmd.AddCommand(ClusterCommand())
|
||||
// cmd.AddCommand(AllReportsCommand())
|
||||
return cmd
|
||||
}
|
|
@ -1,647 +0,0 @@
|
|||
package report
|
||||
|
||||
// import (
|
||||
// "encoding/json"
|
||||
// "fmt"
|
||||
|
||||
// "github.com/go-logr/logr"
|
||||
// kyvernov1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
// report "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
|
||||
// kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
// kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||
// "github.com/kyverno/kyverno/pkg/config"
|
||||
// "github.com/kyverno/kyverno/pkg/constant"
|
||||
// client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
// "github.com/kyverno/kyverno/pkg/engine"
|
||||
// "github.com/kyverno/kyverno/pkg/engine/context"
|
||||
// "github.com/kyverno/kyverno/pkg/engine/response"
|
||||
// "github.com/kyverno/kyverno/pkg/policy"
|
||||
// "github.com/kyverno/kyverno/pkg/policyreport"
|
||||
// "github.com/kyverno/kyverno/pkg/utils"
|
||||
// corev1 "k8s.io/api/core/v1"
|
||||
// v1 "k8s.io/api/core/v1"
|
||||
// apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
// "k8s.io/apimachinery/pkg/labels"
|
||||
// "k8s.io/client-go/tools/cache"
|
||||
|
||||
// "os"
|
||||
// "strings"
|
||||
// "sync"
|
||||
// "time"
|
||||
|
||||
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
// "k8s.io/apimachinery/pkg/runtime"
|
||||
// kubeinformers "k8s.io/client-go/informers"
|
||||
// "k8s.io/client-go/rest"
|
||||
// log "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
// )
|
||||
|
||||
// func backgroundScan(n, scope, policychange string, wg *sync.WaitGroup, restConfig *rest.Config, logger logr.Logger) {
|
||||
// lgr := logger.WithValues("namespace", n, "scope", scope, "policychange", policychange)
|
||||
// defer func() {
|
||||
// wg.Done()
|
||||
// }()
|
||||
// dClient, err := client.NewClient(restConfig, 5*time.Minute, make(chan struct{}), lgr)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in creating dcclient with provided rest config")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
|
||||
// kclient, err := kyvernoclient.NewForConfig(restConfig)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in creating kyverno client with provided rest config")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// kubeClient, err := utils.NewKubeClient(restConfig)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in creating kube client with provided rest config")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// pclient, err := kyvernoclient.NewForConfig(restConfig)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in creating kyverno client for policy with provided rest config")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// var stopCh <-chan struct{}
|
||||
// const resyncPeriod = 15 * time.Minute
|
||||
|
||||
// kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
|
||||
// pInformer := kyvernoinformer.NewSharedInformerFactoryWithOptions(pclient, resyncPeriod)
|
||||
// ci := kubeInformer.Core().V1().ConfigMaps()
|
||||
// pi := pInformer.Kyverno().V1().Policies()
|
||||
// np := kubeInformer.Core().V1().Namespaces()
|
||||
|
||||
// go np.Informer().Run(stopCh)
|
||||
|
||||
// nSynced := np.Informer().HasSynced
|
||||
|
||||
// cpi := pInformer.Kyverno().V1().ClusterPolicies()
|
||||
// go ci.Informer().Run(stopCh)
|
||||
// go pi.Informer().Run(stopCh)
|
||||
// go cpi.Informer().Run(stopCh)
|
||||
// cSynced := ci.Informer().HasSynced
|
||||
// piSynced := pi.Informer().HasSynced
|
||||
// cpiSynced := cpi.Informer().HasSynced
|
||||
// if !cache.WaitForCacheSync(stopCh, cSynced, piSynced, cpiSynced, nSynced) {
|
||||
// lgr.Error(err, "Failed to Create kubernetes client")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
|
||||
// configData := config.NewConfigData(
|
||||
// kubeClient,
|
||||
// ci,
|
||||
// "",
|
||||
// "",
|
||||
// "",
|
||||
// lgr.WithName("ConfigData"),
|
||||
// )
|
||||
// var cpolicies []*kyvernov1.ClusterPolicy
|
||||
// removePolicy := []string{}
|
||||
// policySelector := strings.Split(policychange, ",")
|
||||
// if len(policySelector) > 0 && policychange != "" {
|
||||
// for _, v := range policySelector {
|
||||
// cpolicy, err := cpi.Lister().Get(v)
|
||||
// if err != nil {
|
||||
// if apierrors.IsNotFound(err) {
|
||||
// removePolicy = append(removePolicy, v)
|
||||
// }
|
||||
// } else {
|
||||
// cpolicies = append(cpolicies, cpolicy)
|
||||
// }
|
||||
// for _, v := range policySelector {
|
||||
// policies, err := pi.Lister().List(labels.Everything())
|
||||
// if err == nil {
|
||||
// for _, p := range policies {
|
||||
// if v == p.GetName() {
|
||||
// cp := policy.ConvertPolicyToClusterPolicy(p)
|
||||
// cpolicies = append(cpolicies, cp)
|
||||
// }
|
||||
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// }
|
||||
// } else {
|
||||
// cpolicies, err = cpi.Lister().List(labels.Everything())
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in geting cluster policy list")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// policies, err := pi.Lister().List(labels.Everything())
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in geting policy list")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
|
||||
// for _, p := range policies {
|
||||
// cp := policy.ConvertPolicyToClusterPolicy(p)
|
||||
// cpolicies = append(cpolicies, cp)
|
||||
// }
|
||||
// }
|
||||
|
||||
// // key uid
|
||||
// resourceMap := map[string]map[string]unstructured.Unstructured{}
|
||||
// for _, p := range cpolicies {
|
||||
// for _, rule := range p.Spec.Rules {
|
||||
// for _, k := range rule.MatchResources.Kinds {
|
||||
// resourceSchema, _, err := dClient.DiscoveryClient.FindResource("", k)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "failed to find resource", "kind", k)
|
||||
// continue
|
||||
// }
|
||||
// if !resourceSchema.Namespaced {
|
||||
// rMap := policy.GetResourcesPerNamespace(k, dClient, "", rule, configData, log.Log)
|
||||
// if len(resourceMap[constant.Cluster]) == 0 {
|
||||
// resourceMap[constant.Cluster] = make(map[string]unstructured.Unstructured)
|
||||
// }
|
||||
// policy.MergeResources(resourceMap[constant.Cluster], rMap)
|
||||
// } else {
|
||||
// namespaces := policy.GetNamespacesForRule(&rule, np.Lister(), log.Log)
|
||||
// for _, ns := range namespaces {
|
||||
// if ns == n {
|
||||
// rMap := policy.GetResourcesPerNamespace(k, dClient, ns, rule, configData, log.Log)
|
||||
|
||||
// if len(resourceMap[constant.Namespace]) == 0 {
|
||||
// resourceMap[constant.Namespace] = make(map[string]unstructured.Unstructured)
|
||||
// }
|
||||
// policy.MergeResources(resourceMap[constant.Namespace], rMap)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// if p.HasAutoGenAnnotation() {
|
||||
// switch scope {
|
||||
// case constant.Cluster:
|
||||
// resourceMap[constant.Cluster] = policy.ExcludePod(resourceMap[constant.Cluster], log.Log)
|
||||
// delete(resourceMap, constant.Namespace)
|
||||
// break
|
||||
// case constant.Namespace:
|
||||
// resourceMap[constant.Namespace] = policy.ExcludePod(resourceMap[constant.Namespace], log.Log)
|
||||
// delete(resourceMap, constant.Cluster)
|
||||
// break
|
||||
// case constant.All:
|
||||
// resourceMap[constant.Cluster] = policy.ExcludePod(resourceMap[constant.Cluster], log.Log)
|
||||
// resourceMap[constant.Namespace] = policy.ExcludePod(resourceMap[constant.Namespace], log.Log)
|
||||
// }
|
||||
// }
|
||||
|
||||
// results := make(map[string][]policyreport.PolicyReportResult)
|
||||
// for key := range resourceMap {
|
||||
// for _, resource := range resourceMap[key] {
|
||||
// policyContext := engine.PolicyContext{
|
||||
// NewResource: resource,
|
||||
// Context: context.NewContext(),
|
||||
// Policy: *p,
|
||||
// ExcludeGroupRole: configData.GetExcludeGroupRole(),
|
||||
// }
|
||||
|
||||
// results = createResults(policyContext, key, results)
|
||||
// }
|
||||
// }
|
||||
|
||||
// for k := range results {
|
||||
// if k == "" {
|
||||
// continue
|
||||
// }
|
||||
|
||||
// err := createReport(kclient, k, results[k], removePolicy, lgr)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// func createReport(kclient *kyvernoclient.Clientset, name string, results []policyreport.PolicyReportResult, removePolicy []string, lgr logr.Logger) error {
|
||||
|
||||
// var scope, ns string
|
||||
// if strings.Contains(name, "clusterpolicyreport") {
|
||||
// scope = constant.Cluster
|
||||
// } else if strings.Contains(name, "policyreport-ns-") {
|
||||
// scope = constant.Namespace
|
||||
// ns = strings.ReplaceAll(name, "policyreport-ns-", "")
|
||||
// }
|
||||
|
||||
// if scope == constant.Namespace {
|
||||
// availablepr, err := kclient.PolicyV1alpha1().PolicyReports(ns).Get(name, metav1.GetOptions{})
|
||||
// if err != nil {
|
||||
// if apierrors.IsNotFound(err) {
|
||||
// availablepr = initPolicyReport(scope, ns, name)
|
||||
// } else {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// availablepr, action := mergeReport(availablepr, results, removePolicy)
|
||||
|
||||
// if action == "Create" {
|
||||
// availablepr.SetLabels(map[string]string{
|
||||
// "policy-state": "state",
|
||||
// })
|
||||
// _, err := kclient.PolicyV1alpha1().PolicyReports(availablepr.GetNamespace()).Create(availablepr)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in Create policy report", "appreport", name)
|
||||
// return err
|
||||
// }
|
||||
// } else {
|
||||
// _, err := kclient.PolicyV1alpha1().PolicyReports(availablepr.GetNamespace()).Update(availablepr)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in update policy report", "appreport", name)
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// availablepr, err := kclient.PolicyV1alpha1().ClusterPolicyReports().Get(name, metav1.GetOptions{})
|
||||
// if err != nil {
|
||||
// if apierrors.IsNotFound(err) {
|
||||
// availablepr = initClusterPolicyReport(scope, name)
|
||||
// } else {
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// availablepr, action := mergeClusterReport(availablepr, results, removePolicy)
|
||||
|
||||
// if action == "Create" {
|
||||
// _, err := kclient.PolicyV1alpha1().ClusterPolicyReports().Create(availablepr)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in Create policy report", "appreport", availablepr)
|
||||
// return err
|
||||
// }
|
||||
// } else {
|
||||
// _, err := kclient.PolicyV1alpha1().ClusterPolicyReports().Update(availablepr)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in update policy report", "appreport", name)
|
||||
// return err
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return nil
|
||||
// }
|
||||
|
||||
// func createResults(policyContext engine.PolicyContext, key string, results map[string][]policyreport.PolicyReportResult) map[string][]policyreport.PolicyReportResult {
|
||||
|
||||
// var engineResponses []response.EngineResponse
|
||||
// engineResponse := engine.Validate(policyContext)
|
||||
|
||||
// if len(engineResponse.PolicyResponse.Rules) > 0 {
|
||||
// engineResponses = append(engineResponses, engineResponse)
|
||||
// }
|
||||
|
||||
// engineResponse = engine.Mutate(policyContext)
|
||||
// if len(engineResponse.PolicyResponse.Rules) > 0 {
|
||||
// engineResponses = append(engineResponses, engineResponse)
|
||||
// }
|
||||
|
||||
// pv := policyreport.GeneratePRsFromEngineResponse(engineResponses, log.Log)
|
||||
|
||||
// for _, v := range pv {
|
||||
// var appname string
|
||||
// if key == constant.Namespace {
|
||||
// appname = fmt.Sprintf("policyreport-ns-%s", policyContext.NewResource.GetNamespace())
|
||||
// } else {
|
||||
// appname = fmt.Sprintf("clusterpolicyreport")
|
||||
// }
|
||||
|
||||
// if appname != "" {
|
||||
// builder := policyreport.NewPrBuilder()
|
||||
// pv := builder.Generate(v)
|
||||
|
||||
// for _, e := range pv.Spec.ViolatedRules {
|
||||
// result := &policyreport.PolicyReportResult{
|
||||
// Policy: pv.Spec.Policy,
|
||||
// Rule: e.Name,
|
||||
// Message: e.Message,
|
||||
// }
|
||||
// rd := &policyreport.ResourceStatus{
|
||||
// Resource: &corev1.ObjectReference{
|
||||
// Kind: pv.Spec.Kind,
|
||||
// Namespace: pv.Spec.Namespace,
|
||||
// APIVersion: pv.Spec.APIVersion,
|
||||
// Name: pv.Spec.Name,
|
||||
// },
|
||||
// Status: policyreport.PolicyStatus(e.Check),
|
||||
// }
|
||||
// result.Resources = append(result.Resources, rd)
|
||||
// results[appname] = append(results[appname], *result)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return results
|
||||
// }
|
||||
|
||||
// func configmapScan(scope string, wg *sync.WaitGroup, restConfig *rest.Config, logger logr.Logger) {
|
||||
// defer func() {
|
||||
// wg.Done()
|
||||
// }()
|
||||
|
||||
// lgr := logger.WithValues("scope", scope)
|
||||
// dClient, err := client.NewClient(restConfig, 5*time.Minute, make(chan struct{}), lgr)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in creating dcclient with provided rest config")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
|
||||
// kclient, err := kyvernoclient.NewForConfig(restConfig)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in creating kyverno client with provided rest config")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
|
||||
// configmap, err := dClient.GetResource("", "ConfigMap", config.KubePolicyNamespace, config.ConfimapNameForPolicyReport)
|
||||
// if err != nil {
|
||||
// lgr.Error(err, "Error in getting configmap")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// var job *v1.ConfigMap
|
||||
// if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configmap.UnstructuredContent(), &job); err != nil {
|
||||
// lgr.Error(err, "Error in converting resource to Default Unstructured Converter")
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// response := make(map[string]map[string][]policyreport.Info)
|
||||
// var temp = map[string][]policyreport.Info{}
|
||||
// if scope == constant.Cluster {
|
||||
// if err := json.Unmarshal([]byte(job.Data[constant.Cluster]), &temp); err != nil {
|
||||
// lgr.Error(err, "Error in json marshal of namespace data")
|
||||
// }
|
||||
// response[constant.Cluster] = temp
|
||||
// delete(job.Data, constant.Namespace)
|
||||
// } else if scope == constant.Namespace {
|
||||
// if err := json.Unmarshal([]byte(job.Data[constant.Namespace]), &temp); err != nil {
|
||||
// lgr.Error(err, "Error in json marshal of namespace data")
|
||||
// }
|
||||
// response[constant.Namespace] = temp
|
||||
// delete(job.Data, constant.Cluster)
|
||||
// } else {
|
||||
// if err := json.Unmarshal([]byte(job.Data[constant.Cluster]), &temp); err != nil {
|
||||
// lgr.Error(err, "Error in json marshal of namespace data")
|
||||
// }
|
||||
// response[constant.Cluster] = temp
|
||||
|
||||
// temp = make(map[string][]policyreport.Info)
|
||||
// if err := json.Unmarshal([]byte(job.Data[constant.Namespace]), &temp); err != nil {
|
||||
// lgr.Error(err, "Error in json marshal of namespace data")
|
||||
// }
|
||||
// response[constant.Namespace] = temp
|
||||
// }
|
||||
// var results = make(map[string][]policyreport.PolicyReportResult)
|
||||
// var ns []string
|
||||
// for k := range response {
|
||||
// for n, infos := range response[k] {
|
||||
// for _, v := range infos {
|
||||
// for _, r := range v.Rules {
|
||||
// builder := policyreport.NewPrBuilder()
|
||||
// pv := builder.Generate(v)
|
||||
// result := &policyreport.PolicyReportResult{
|
||||
// Policy: pv.Spec.Policy,
|
||||
// Rule: r.Name,
|
||||
// Message: r.Message,
|
||||
// }
|
||||
// rd := &policyreport.ResourceStatus{
|
||||
// Resource: &corev1.ObjectReference{
|
||||
// Kind: pv.Spec.Kind,
|
||||
// Namespace: pv.Spec.Namespace,
|
||||
// APIVersion: pv.Spec.APIVersion,
|
||||
// Name: pv.Spec.Name,
|
||||
// },
|
||||
// Status: policyreport.PolicyStatus(r.Check),
|
||||
// }
|
||||
// result.Resources = append(result.Resources, rd)
|
||||
|
||||
// if !strings.Contains(strings.Join(ns, ","), v.Resource.GetNamespace()) {
|
||||
// ns = append(ns, n)
|
||||
// }
|
||||
|
||||
// var appname string
|
||||
// if k == constant.Cluster {
|
||||
// appname = fmt.Sprintf("clusterpolicyreport")
|
||||
// }
|
||||
|
||||
// if k == constant.Namespace {
|
||||
// appname = fmt.Sprintf("policyreport-ns-%s", v.Resource.GetNamespace())
|
||||
// }
|
||||
|
||||
// results[appname] = append(results[appname], *result)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// for k := range results {
|
||||
// if k == "" {
|
||||
// continue
|
||||
// }
|
||||
// err := createReport(kclient, k, results[k], []string{}, lgr)
|
||||
// if err != nil {
|
||||
// continue
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// func mergeReport(pr *policyreport.PolicyReport, results []policyreport.PolicyReportResult, removePolicy []string) (*policyreport.PolicyReport, string) {
|
||||
// labels := pr.GetLabels()
|
||||
// var action string
|
||||
// if labels["policy-state"] == "init" {
|
||||
// action = "Create"
|
||||
// pr.SetLabels(map[string]string{
|
||||
// "policy-state": "Process",
|
||||
// })
|
||||
// } else {
|
||||
// action = "Update"
|
||||
// }
|
||||
// rules := make(map[string]*policyreport.PolicyReportResult, 0)
|
||||
|
||||
// for _, v := range pr.Results {
|
||||
// for _, r := range v.Resources {
|
||||
// key := fmt.Sprintf("%s-%s-%s", v.Policy, v.Rule, pr.GetName())
|
||||
// if _, ok := rules[key]; ok {
|
||||
// isExist := false
|
||||
// for _, resourceStatus := range rules[key].Resources {
|
||||
// if resourceStatus.Resource.APIVersion == r.Resource.APIVersion && r.Resource.Kind == resourceStatus.Resource.Kind && r.Resource.Namespace == resourceStatus.Resource.Namespace && r.Resource.Name == resourceStatus.Resource.Name {
|
||||
// isExist = true
|
||||
// resourceStatus = r
|
||||
// }
|
||||
// }
|
||||
// if !isExist {
|
||||
// rules[key].Resources = append(rules[key].Resources, r)
|
||||
// }
|
||||
// } else {
|
||||
// rules[key] = &policyreport.PolicyReportResult{
|
||||
// Policy: v.Policy,
|
||||
// Rule: v.Rule,
|
||||
// Message: v.Message,
|
||||
// // Status: v.Status,
|
||||
// Resources: make([]*policyreport.ResourceStatus, 0),
|
||||
// }
|
||||
|
||||
// rules[key].Resources = append(rules[key].Resources, r)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// for _, v := range results {
|
||||
// for _, r := range v.Resources {
|
||||
// key := fmt.Sprintf("%s-%s-%s", v.Policy, v.Rule, pr.GetName())
|
||||
// if _, ok := rules[key]; ok {
|
||||
// isExist := false
|
||||
// for _, resourceStatus := range rules[key].Resources {
|
||||
// if resourceStatus.Resource.APIVersion == r.Resource.APIVersion && r.Resource.Kind == resourceStatus.Resource.Kind && r.Resource.Namespace == resourceStatus.Resource.Namespace && r.Resource.Name == resourceStatus.Resource.Name {
|
||||
// isExist = true
|
||||
// resourceStatus = r
|
||||
// }
|
||||
// }
|
||||
// if !isExist {
|
||||
// rules[key].Resources = append(rules[key].Resources, r)
|
||||
// }
|
||||
// } else {
|
||||
// rules[key] = &policyreport.PolicyReportResult{
|
||||
// Policy: v.Policy,
|
||||
// Rule: v.Rule,
|
||||
// Message: v.Message,
|
||||
// // Status: v.Status,
|
||||
// Resources: make([]*policyreport.ResourceStatus, 0),
|
||||
// }
|
||||
// rules[key].Resources = append(rules[key].Resources, r)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// if len(removePolicy) > 0 {
|
||||
// for _, v := range removePolicy {
|
||||
// for k, r := range rules {
|
||||
// if r.Policy == v {
|
||||
// delete(rules, k)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// pr.Summary.Pass = 0
|
||||
// pr.Summary.Fail = 0
|
||||
// pr.Results = make([]*policyreport.PolicyReportResult, 0)
|
||||
// for k := range rules {
|
||||
// pr.Results = append(pr.Results, rules[k])
|
||||
// for _, r := range rules[k].Resources {
|
||||
// if string(r.Status) == "Pass" {
|
||||
// pr.Summary.Pass++
|
||||
// } else {
|
||||
// pr.Summary.Fail++
|
||||
// }
|
||||
|
||||
// }
|
||||
// }
|
||||
// return pr, action
|
||||
// }
|
||||
|
||||
// func mergeClusterReport(pr *policyreport.ClusterPolicyReport, results []policyreport.PolicyReportResult, removePolicy []string) (*policyreport.ClusterPolicyReport, string) {
|
||||
// labels := pr.GetLabels()
|
||||
// var action string
|
||||
// if labels["policy-state"] == "init" {
|
||||
// action = "Create"
|
||||
// pr.SetLabels(map[string]string{
|
||||
// "policy-state": "Process",
|
||||
// })
|
||||
// } else {
|
||||
// action = "Update"
|
||||
// }
|
||||
|
||||
// for _, r := range pr.Results {
|
||||
// for _, v := range results {
|
||||
// if r.Policy == v.Policy && r.Rule == v.Rule {
|
||||
// for i, result := range r.Resources {
|
||||
// for k, event := range v.Resources {
|
||||
// if event.Resource.APIVersion == result.Resource.APIVersion && result.Resource.Kind == event.Resource.Kind && result.Resource.Namespace == event.Resource.Namespace && result.Resource.Name == event.Resource.Name {
|
||||
// r.Resources[i] = v.Resources[k]
|
||||
// if string(event.Status) != string(result.Status) {
|
||||
// pr = changeClusterReportCount(string(event.Status), string(result.Status), pr)
|
||||
// }
|
||||
// v.Resources = append(v.Resources[:k], v.Resources[k+1:]...)
|
||||
// break
|
||||
// }
|
||||
// }
|
||||
// for _, resource := range v.Resources {
|
||||
// pr = changeClusterReportCount(string(resource.Status), string(""), pr)
|
||||
// r.Resources = append(r.Resources, resource)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
// if len(removePolicy) > 0 {
|
||||
// for _, v := range removePolicy {
|
||||
// for i, r := range pr.Results {
|
||||
// if r.Policy == v {
|
||||
// for _, v := range r.Resources {
|
||||
// pr = changeClusterReportCount("", string(v.Status), pr)
|
||||
// }
|
||||
// pr.Results = append(pr.Results[:i], pr.Results[i+1:]...)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// return pr, action
|
||||
// }
|
||||
|
||||
// func changeClusterReportCount(status, oldStatus string, report *policyreport.ClusterPolicyReport) *policyreport.ClusterPolicyReport {
|
||||
// switch oldStatus {
|
||||
// case "Pass":
|
||||
// if report.Summary.Pass--; report.Summary.Pass < 0 {
|
||||
// report.Summary.Pass = 0
|
||||
// }
|
||||
// break
|
||||
// case "Fail":
|
||||
// if report.Summary.Fail--; report.Summary.Fail < 0 {
|
||||
// report.Summary.Fail = 0
|
||||
// }
|
||||
// break
|
||||
// default:
|
||||
// break
|
||||
// }
|
||||
// switch status {
|
||||
// case "Pass":
|
||||
// report.Summary.Pass++
|
||||
// break
|
||||
// case "Fail":
|
||||
// report.Summary.Fail++
|
||||
// break
|
||||
// default:
|
||||
// break
|
||||
// }
|
||||
// return report
|
||||
// }
|
||||
|
||||
// func initPolicyReport(scope, namespace, name string) *policyreport.PolicyReport {
|
||||
// availablepr := &policyreport.PolicyReport{
|
||||
// Scope: &corev1.ObjectReference{
|
||||
// Kind: scope,
|
||||
// Namespace: namespace,
|
||||
// },
|
||||
// Summary: policyreport.PolicyReportSummary{},
|
||||
// Results: []*policyreport.PolicyReportResult{},
|
||||
// }
|
||||
// labelMap := map[string]string{
|
||||
// "policy-scope": scope,
|
||||
// "policy-state": "init",
|
||||
// }
|
||||
// availablepr.SetName(name)
|
||||
// availablepr.SetNamespace(namespace)
|
||||
// availablepr.SetLabels(labelMap)
|
||||
// return availablepr
|
||||
// }
|
||||
|
||||
// func initClusterPolicyReport(scope, name string) *policyreport.ClusterPolicyReport {
|
||||
// availablepr := &policyreport.ClusterPolicyReport{
|
||||
// Scope: &corev1.ObjectReference{
|
||||
// Kind: scope,
|
||||
// },
|
||||
// Summary: policyreport.PolicyReportSummary{},
|
||||
// Results: []*policyreport.PolicyReportResult{},
|
||||
// }
|
||||
// labelMap := map[string]string{
|
||||
// "policy-scope": scope,
|
||||
// "policy-state": "init",
|
||||
// }
|
||||
// availablepr.SetName(name)
|
||||
// availablepr.SetLabels(labelMap)
|
||||
// return availablepr
|
||||
// }
|
|
@ -1,71 +0,0 @@
|
|||
package report
|
||||
|
||||
// import (
|
||||
// "fmt"
|
||||
// "os"
|
||||
// "sync"
|
||||
// "time"
|
||||
|
||||
// "github.com/kyverno/kyverno/pkg/common"
|
||||
// "github.com/kyverno/kyverno/pkg/constant"
|
||||
// client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
// cliutil "github.com/kyverno/kyverno/pkg/kyverno/common"
|
||||
// "github.com/spf13/cobra"
|
||||
// metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
// "k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
// log "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
// )
|
||||
|
||||
// func NamespaceCommand() *cobra.Command {
|
||||
// var mode, namespace, policy string
|
||||
// cmd := &cobra.Command{
|
||||
// Use: "namespace",
|
||||
// Short: "generate report for scope namespace",
|
||||
// Example: fmt.Sprintf("To create a namespace report from background scan:\nkyverno report namespace --namespace=defaults \n kyverno report namespace"),
|
||||
// RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
// os.Setenv("POLICY-TYPE", common.PolicyReport)
|
||||
// logger := log.Log.WithName("Report")
|
||||
|
||||
// // with cluster
|
||||
// kubernetesConfig := genericclioptions.NewConfigFlags(true)
|
||||
// restConfig, err := kubernetesConfig.ToRESTConfig()
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// dClient, err := client.NewClient(restConfig, 5*time.Minute, make(chan struct{}), log.Log)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
|
||||
// resources, err := cliutil.GetResources(nil, []string{}, dClient)
|
||||
|
||||
// var wg sync.WaitGroup
|
||||
// if mode == "cli" {
|
||||
// if namespace != "" {
|
||||
// wg.Add(1)
|
||||
// go backgroundScan(namespace, constant.Namespace, policy, &wg, restConfig, logger)
|
||||
// } else {
|
||||
// ns, err := kubeClient.CoreV1().Namespaces().List(metav1.ListOptions{})
|
||||
// if err != nil {
|
||||
// os.Exit(1)
|
||||
// }
|
||||
// wg.Add(len(ns.Items))
|
||||
// for _, n := range ns.Items {
|
||||
// go backgroundScan(n.GetName(), constant.Namespace, policy, &wg, restConfig, logger)
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// wg.Add(1)
|
||||
// go configmapScan(constant.Namespace, &wg, restConfig, logger)
|
||||
// }
|
||||
|
||||
// wg.Wait()
|
||||
// return nil
|
||||
// },
|
||||
// }
|
||||
// cmd.Flags().StringVarP(&namespace, "namespace", "n", "", "define specific namespace")
|
||||
// cmd.Flags().StringVarP(&policy, "policy", "p", "", "define specific policy")
|
||||
// cmd.Flags().StringVarP(&mode, "mode", "m", "cli", "mode")
|
||||
// return cmd
|
||||
// }
|
|
@ -5,7 +5,6 @@ import (
|
|||
"fmt"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
|
@ -21,7 +20,6 @@ import (
|
|||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport/jobs"
|
||||
"github.com/kyverno/kyverno/pkg/policyviolation"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
"github.com/kyverno/kyverno/pkg/webhookconfig"
|
||||
|
@ -112,8 +110,6 @@ type PolicyController struct {
|
|||
// resourceWebhookWatcher queues the webhook creation request, creates the webhook
|
||||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister
|
||||
|
||||
job *jobs.Job
|
||||
|
||||
policySync *PolicySync
|
||||
|
||||
// resCache - controls creation and fetching of resource informer cache
|
||||
|
@ -142,8 +138,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
|
|||
resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister,
|
||||
namespaces informers.NamespaceInformer,
|
||||
log logr.Logger,
|
||||
resCache resourcecache.ResourceCacheIface,
|
||||
job *jobs.Job) (*PolicyController, error) {
|
||||
resCache resourcecache.ResourceCacheIface) (*PolicyController, error) {
|
||||
|
||||
// Event broad caster
|
||||
eventBroadcaster := record.NewBroadcaster()
|
||||
|
@ -164,7 +159,6 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
|
|||
pvGenerator: pvGenerator,
|
||||
prGenerator: prGenerator,
|
||||
resourceWebhookWatcher: resourceWebhookWatcher,
|
||||
job: job,
|
||||
log: log,
|
||||
resCache: resCache,
|
||||
}
|
||||
|
@ -368,23 +362,6 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
|||
|
||||
}
|
||||
|
||||
if os.Getenv("POLICY-TYPE") == common.PolicyReport {
|
||||
go func(pc *PolicyController) {
|
||||
ticker := time.Tick(constant.PolicyReportPolicyChangeResync)
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
if len(pc.policySync.policy) > 0 {
|
||||
pc.job.Add(jobs.JobInfo{
|
||||
JobType: "POLICYSYNC",
|
||||
JobData: strings.Join(pc.policySync.policy, ","),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}(pc)
|
||||
}
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(pc.worker, constant.PolicyControllerResync, stopCh)
|
||||
}
|
||||
|
|
|
@ -1,25 +1,19 @@
|
|||
package policyreport
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
policyreportclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
policyreportinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/policyreport/v1alpha1"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/constant"
|
||||
dclient "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport/jobs"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -40,10 +34,6 @@ type Generator struct {
|
|||
log logr.Logger
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
|
||||
inMemoryConfigMap *PVEvent
|
||||
mux sync.Mutex
|
||||
job *jobs.Job
|
||||
}
|
||||
|
||||
//NewDataStore returns an instance of data store
|
||||
|
@ -115,7 +105,6 @@ func NewPRGenerator(client *policyreportclient.Clientset,
|
|||
prInformer policyreportinformer.ClusterPolicyReportInformer,
|
||||
nsprInformer policyreportinformer.PolicyReportInformer,
|
||||
policyStatus policystatus.Listener,
|
||||
job *jobs.Job,
|
||||
log logr.Logger) *Generator {
|
||||
gen := Generator{
|
||||
dclient: dclient,
|
||||
|
@ -124,11 +113,6 @@ func NewPRGenerator(client *policyreportclient.Clientset,
|
|||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
log: log,
|
||||
inMemoryConfigMap: &PVEvent{
|
||||
Namespace: make(map[string][]Info),
|
||||
Cluster: make(map[string][]Info),
|
||||
},
|
||||
job: job,
|
||||
}
|
||||
|
||||
return &gen
|
||||
|
@ -162,34 +146,6 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
logger.Info("failed to sync informer cache")
|
||||
}
|
||||
|
||||
go func(gen *Generator) {
|
||||
ticker := time.Tick(constant.PolicyReportResourceChangeResync)
|
||||
for {
|
||||
select {
|
||||
case <-ticker:
|
||||
err := gen.createConfigmap()
|
||||
scops := []string{}
|
||||
if len(gen.inMemoryConfigMap.Namespace) > 0 {
|
||||
scops = append(scops, constant.Namespace)
|
||||
}
|
||||
if len(gen.inMemoryConfigMap.Cluster["cluster"]) > 0 {
|
||||
scops = append(scops, constant.Cluster)
|
||||
}
|
||||
gen.job.Add(jobs.JobInfo{
|
||||
JobType: constant.ConfigmapMode,
|
||||
JobData: strings.Join(scops, ","),
|
||||
})
|
||||
if err != nil {
|
||||
gen.log.Error(err, "configmap error")
|
||||
}
|
||||
gen.inMemoryConfigMap = &PVEvent{
|
||||
Namespace: make(map[string][]Info),
|
||||
Cluster: make(map[string][]Info),
|
||||
}
|
||||
}
|
||||
}
|
||||
}(gen)
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(gen.runWorker, constant.PolicyViolationControllerResync, stopCh)
|
||||
}
|
||||
|
@ -264,43 +220,8 @@ func (gen *Generator) processNextWorkItem() bool {
|
|||
|
||||
return true
|
||||
}
|
||||
func (gen *Generator) createConfigmap() error {
|
||||
defer func() {
|
||||
gen.mux.Unlock()
|
||||
}()
|
||||
gen.mux.Lock()
|
||||
configmap, err := gen.dclient.GetResource("", "ConfigMap", config.KubePolicyNamespace, "kyverno-event")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cm := v1.ConfigMap{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configmap.UnstructuredContent(), &cm); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
rawData, _ := json.Marshal(gen.inMemoryConfigMap.Cluster)
|
||||
cm.Data[constant.Cluster] = string(rawData)
|
||||
rawData, _ = json.Marshal(gen.inMemoryConfigMap.Namespace)
|
||||
cm.Data[constant.Namespace] = string(rawData)
|
||||
|
||||
_, err = gen.dclient.UpdateResource("", "ConfigMap", config.KubePolicyNamespace, cm, false)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (gen *Generator) syncHandler(info Info) error {
|
||||
gen.mux.Lock()
|
||||
defer gen.mux.Unlock()
|
||||
|
||||
if info.Resource.GetNamespace() == "" {
|
||||
// cluster scope resource generate a clusterpolicy violation
|
||||
gen.inMemoryConfigMap.Cluster["cluster"] = append(gen.inMemoryConfigMap.Cluster["cluster"], info)
|
||||
return nil
|
||||
}
|
||||
|
||||
// namespaced resources generated a namespaced policy violation in the namespace of the resource
|
||||
if _, ok := gen.inMemoryConfigMap.Namespace[info.Resource.GetNamespace()]; ok {
|
||||
gen.inMemoryConfigMap.Namespace[info.Resource.GetNamespace()] = append(gen.inMemoryConfigMap.Namespace[info.Resource.GetNamespace()], info)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,386 +0,0 @@
|
|||
package jobs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/cenkalti/backoff"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/constant"
|
||||
dclient "github.com/kyverno/kyverno/pkg/dclient"
|
||||
v1 "k8s.io/api/batch/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/util/workqueue"
|
||||
)
|
||||
|
||||
const workQueueName = "policy-violation-controller"
|
||||
const workQueueRetryLimit = 3
|
||||
|
||||
//Job creates policy report
|
||||
type Job struct {
|
||||
dclient *dclient.Client
|
||||
log logr.Logger
|
||||
queue workqueue.RateLimitingInterface
|
||||
dataStore *dataStore
|
||||
configHandler config.Interface
|
||||
mux sync.Mutex
|
||||
}
|
||||
|
||||
// JobInfo defines Job Type
|
||||
type JobInfo struct {
|
||||
JobType string
|
||||
JobData string
|
||||
}
|
||||
|
||||
func (i JobInfo) toKey() string {
|
||||
return fmt.Sprintf("kyverno-%v", i.JobType)
|
||||
}
|
||||
|
||||
//NewDataStore returns an instance of data store
|
||||
func newDataStore() *dataStore {
|
||||
ds := dataStore{
|
||||
data: make(map[string]JobInfo),
|
||||
}
|
||||
return &ds
|
||||
}
|
||||
|
||||
type dataStore struct {
|
||||
data map[string]JobInfo
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func (ds *dataStore) add(keyHash string, info JobInfo) {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
ds.data[keyHash] = info
|
||||
}
|
||||
|
||||
func (ds *dataStore) lookup(keyHash string) JobInfo {
|
||||
ds.mu.RLock()
|
||||
defer ds.mu.RUnlock()
|
||||
return ds.data[keyHash]
|
||||
}
|
||||
|
||||
func (ds *dataStore) delete(keyHash string) {
|
||||
ds.mu.Lock()
|
||||
defer ds.mu.Unlock()
|
||||
delete(ds.data, keyHash)
|
||||
}
|
||||
|
||||
//JobsInterface provides API to create PVs
|
||||
type JobsInterface interface {
|
||||
Add(infos ...JobInfo)
|
||||
}
|
||||
|
||||
// NewJobsJob returns a new instance of jobs generator
|
||||
func NewJobsJob(dclient *dclient.Client,
|
||||
configHandler config.Interface,
|
||||
log logr.Logger) *Job {
|
||||
gen := Job{
|
||||
dclient: dclient,
|
||||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
configHandler: configHandler,
|
||||
log: log,
|
||||
}
|
||||
return &gen
|
||||
}
|
||||
|
||||
func (j *Job) enqueue(info JobInfo) {
|
||||
keyHash := info.toKey()
|
||||
|
||||
j.dataStore.add(keyHash, info)
|
||||
j.queue.Add(keyHash)
|
||||
j.log.V(4).Info("job added to the queue", "keyhash", keyHash)
|
||||
}
|
||||
|
||||
//Add queues a job creation request
|
||||
func (j *Job) Add(infos ...JobInfo) {
|
||||
for _, info := range infos {
|
||||
j.enqueue(info)
|
||||
}
|
||||
}
|
||||
|
||||
// Run starts the workers
|
||||
func (j *Job) Run(workers int, stopCh <-chan struct{}) {
|
||||
logger := j.log
|
||||
defer utilruntime.HandleCrash()
|
||||
logger.Info("start")
|
||||
defer logger.Info("shutting down")
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(j.runWorker, constant.PolicyViolationControllerResync, stopCh)
|
||||
}
|
||||
<-stopCh
|
||||
}
|
||||
|
||||
func (j *Job) runWorker() {
|
||||
for j.processNextWorkItem() {
|
||||
}
|
||||
}
|
||||
|
||||
func (j *Job) handleErr(err error, key interface{}) {
|
||||
logger := j.log
|
||||
if err == nil {
|
||||
j.queue.Forget(key)
|
||||
return
|
||||
}
|
||||
|
||||
// retires requests if there is error
|
||||
if j.queue.NumRequeues(key) < workQueueRetryLimit {
|
||||
logger.Error(err, "failed to sync queued jobs", "key", key)
|
||||
// Re-enqueue the key rate limited. Based on the rate limiter on the
|
||||
// queue and the re-enqueue history, the key will be processed later again.
|
||||
j.queue.AddRateLimited(key)
|
||||
return
|
||||
}
|
||||
|
||||
j.queue.Forget(key)
|
||||
if keyHash, ok := key.(string); ok {
|
||||
j.dataStore.delete(keyHash)
|
||||
}
|
||||
|
||||
logger.Error(err, "dropping key out of the queue", "key", key)
|
||||
}
|
||||
|
||||
func (j *Job) processNextWorkItem() bool {
|
||||
logger := j.log
|
||||
obj, shutdown := j.queue.Get()
|
||||
if shutdown {
|
||||
return false
|
||||
}
|
||||
defer j.queue.Done(obj)
|
||||
|
||||
var keyHash string
|
||||
var ok bool
|
||||
if keyHash, ok = obj.(string); !ok {
|
||||
j.queue.Forget(obj)
|
||||
logger.Info("incorrect type; expecting type 'string'", "obj", obj)
|
||||
return true
|
||||
}
|
||||
|
||||
// lookup data store
|
||||
info := j.dataStore.lookup(keyHash)
|
||||
err := j.syncHandler(info)
|
||||
j.handleErr(err, keyHash)
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (j *Job) syncHandler(info JobInfo) error {
|
||||
defer func() {
|
||||
j.mux.Unlock()
|
||||
}()
|
||||
j.mux.Lock()
|
||||
|
||||
var err error
|
||||
var wg sync.WaitGroup
|
||||
if info.JobType == constant.BackgroundPolicySync {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = j.syncKyverno(&wg, constant.All, constant.BackgroundPolicySync, info.JobData)
|
||||
}()
|
||||
}
|
||||
|
||||
if info.JobType == constant.ConfigmapMode {
|
||||
// shuting?
|
||||
if info.JobData == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
scopes := strings.Split(info.JobData, ",")
|
||||
if len(scopes) == 1 {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
err = j.syncKyverno(&wg, constant.All, constant.ConfigmapMode, "")
|
||||
}()
|
||||
} else {
|
||||
wg.Add(len(scopes))
|
||||
for _, scope := range scopes {
|
||||
go func(scope string) {
|
||||
err = j.syncKyverno(&wg, scope, constant.ConfigmapMode, "")
|
||||
}(scope)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
func (j *Job) syncKyverno(wg *sync.WaitGroup, scope, jobType, data string) error {
|
||||
defer wg.Done()
|
||||
|
||||
go j.cleanupCompletedJobs()
|
||||
|
||||
mode := "cli"
|
||||
args := []string{
|
||||
"report",
|
||||
"all",
|
||||
fmt.Sprintf("--mode=%s", "configmap"),
|
||||
}
|
||||
|
||||
if jobType == constant.BackgroundPolicySync || jobType == constant.BackgroundSync {
|
||||
switch scope {
|
||||
case constant.Namespace:
|
||||
args = []string{
|
||||
"report",
|
||||
"namespace",
|
||||
fmt.Sprintf("--mode=%s", mode),
|
||||
}
|
||||
case constant.Cluster:
|
||||
args = []string{
|
||||
"report",
|
||||
"cluster",
|
||||
fmt.Sprintf("--mode=%s", mode),
|
||||
}
|
||||
case constant.All:
|
||||
args = []string{
|
||||
"report",
|
||||
"all",
|
||||
fmt.Sprintf("--mode=%s", mode),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if jobType == constant.BackgroundPolicySync && data != "" {
|
||||
args = append(args, fmt.Sprintf("-p=%s", data))
|
||||
}
|
||||
|
||||
exbackoff := &backoff.ExponentialBackOff{
|
||||
InitialInterval: backoff.DefaultInitialInterval,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: time.Second,
|
||||
MaxElapsedTime: 5 * time.Minute,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
|
||||
exbackoff.Reset()
|
||||
err := backoff.Retry(func() error {
|
||||
resourceList, err := j.dclient.ListResource("", "Job", config.KubePolicyNamespace, &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"scope": scope,
|
||||
"type": jobType,
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list jobs: %v", err)
|
||||
}
|
||||
|
||||
if len(resourceList.Items) != 0 {
|
||||
return fmt.Errorf("found %d Jobs", len(resourceList.Items))
|
||||
}
|
||||
return nil
|
||||
}, exbackoff)
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return j.CreateJob(args, jobType, scope)
|
||||
}
|
||||
|
||||
// CreateJob will create Job template for background scan
|
||||
func (j *Job) CreateJob(args []string, jobType, scope string) error {
|
||||
ttl := new(int32)
|
||||
*ttl = 60
|
||||
|
||||
job := &v1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Namespace: config.KubePolicyNamespace,
|
||||
Labels: map[string]string{
|
||||
"scope": scope,
|
||||
"type": jobType,
|
||||
},
|
||||
},
|
||||
Spec: v1.JobSpec{
|
||||
Template: apiv1.PodTemplateSpec{
|
||||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Name: strings.ToLower(fmt.Sprintf("%s-%s", jobType, scope)),
|
||||
Image: config.KyvernoCliImage,
|
||||
ImagePullPolicy: apiv1.PullAlways,
|
||||
Args: args,
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "kyverno-service-account",
|
||||
RestartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
job.Spec.TTLSecondsAfterFinished = ttl
|
||||
job.SetGenerateName("kyverno-policyreport-")
|
||||
if _, err := j.dclient.CreateResource("", "Job", config.KubePolicyNamespace, job, false); err != nil {
|
||||
return fmt.Errorf("failed to create job: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Job) cleanupCompletedJobs() {
|
||||
logger := j.log.WithName("cleanup jobs")
|
||||
|
||||
exbackoff := &backoff.ExponentialBackOff{
|
||||
InitialInterval: backoff.DefaultInitialInterval,
|
||||
RandomizationFactor: backoff.DefaultRandomizationFactor,
|
||||
Multiplier: backoff.DefaultMultiplier,
|
||||
MaxInterval: time.Second,
|
||||
MaxElapsedTime: 2 * time.Minute,
|
||||
Clock: backoff.SystemClock,
|
||||
}
|
||||
|
||||
exbackoff.Reset()
|
||||
err := backoff.Retry(func() error {
|
||||
resourceList, err := j.dclient.ListResource("", "Job", config.KubePolicyNamespace, nil)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list jobs : %v", err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to list pods : %v", err)
|
||||
}
|
||||
|
||||
for _, job := range resourceList.Items {
|
||||
succeeded, ok, _ := unstructured.NestedInt64(job.Object, "status", "succeeded")
|
||||
if ok && succeeded > 0 {
|
||||
if errnew := j.dclient.DeleteResource("", "Job", job.GetNamespace(), job.GetName(), false); errnew != nil {
|
||||
err = errnew
|
||||
continue
|
||||
}
|
||||
|
||||
podList, errNew := j.dclient.ListResource("", "Pod", config.KubePolicyNamespace, &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"job-name": job.GetName(),
|
||||
},
|
||||
})
|
||||
|
||||
if errNew != nil {
|
||||
err = errNew
|
||||
continue
|
||||
}
|
||||
|
||||
for _, pod := range podList.Items {
|
||||
if errpod := j.dclient.DeleteResource("", "Pod", pod.GetNamespace(), pod.GetName(), false); errpod != nil {
|
||||
logger.Error(errpod, "failed to delete pod", "name", pod.GetName())
|
||||
err = errpod
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}, exbackoff)
|
||||
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to clean up completed jobs")
|
||||
}
|
||||
}
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/constant"
|
||||
dclient "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport/jobs"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
|
||||
|
@ -48,7 +47,6 @@ type Generator struct {
|
|||
dataStore *dataStore
|
||||
policyStatusListener policystatus.Listener
|
||||
prgen *policyreport.Generator
|
||||
job *jobs.Job
|
||||
}
|
||||
|
||||
//NewDataStore returns an instance of data store
|
||||
|
@ -117,7 +115,6 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
|
|||
prInformer policyreportinformer.ClusterPolicyReportInformer,
|
||||
nsprInformer policyreportinformer.PolicyReportInformer,
|
||||
policyStatus policystatus.Listener,
|
||||
job *jobs.Job,
|
||||
log logr.Logger,
|
||||
stopChna <-chan struct{}) *Generator {
|
||||
gen := Generator{
|
||||
|
@ -130,7 +127,6 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
|
|||
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName),
|
||||
dataStore: newDataStore(),
|
||||
log: log,
|
||||
job: job,
|
||||
policyStatusListener: policyStatus,
|
||||
}
|
||||
return &gen
|
||||
|
|
|
@ -10,14 +10,10 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/checker"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
|
||||
|
@ -28,8 +24,10 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/openapi"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/policyviolation"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
tlsutils "github.com/kyverno/kyverno/pkg/tls"
|
||||
userinfo "github.com/kyverno/kyverno/pkg/userinfo"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
|
@ -37,11 +35,10 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/webhooks/generate"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
rbacinformer "k8s.io/client-go/informers/rbac/v1"
|
||||
rbaclister "k8s.io/client-go/listers/rbac/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
)
|
||||
|
||||
// WebhookServer contains configured TLS server with MutationWebhook.
|
||||
|
|
Loading…
Add table
Reference in a new issue