mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-31 03:45:17 +00:00
added logic for cli
This commit is contained in:
parent
22437289f5
commit
0bc1b3b3e8
13 changed files with 141 additions and 66 deletions
|
@ -346,6 +346,7 @@ func main() {
|
|||
go statusSync.Run(1, stopCh)
|
||||
go pCacheController.Run(1, stopCh)
|
||||
go auditHandler.Run(10, stopCh)
|
||||
go jobController.Run(1,stopCh)
|
||||
openAPISync.Run(1, stopCh)
|
||||
|
||||
// verifys if the admission control is enabled and active
|
||||
|
|
|
@ -1144,6 +1144,7 @@ rules:
|
|||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- jobs
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- clusterroles
|
||||
|
@ -1510,7 +1511,6 @@ spec:
|
|||
- args:
|
||||
- --filterK8Resources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*]
|
||||
- -v=2
|
||||
- --policyreport=policyreport
|
||||
env:
|
||||
- name: INIT_CONFIG
|
||||
value: init-config
|
||||
|
@ -1520,7 +1520,7 @@ spec:
|
|||
fieldPath: metadata.namespace
|
||||
- name: KYVERNO_SVC
|
||||
value: kyverno-svc
|
||||
image: evalsocket/kyverno:v1.1.10-25-g3ebf9d43fc6c
|
||||
image: nirmata/kyverno:v1.1.10
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
failureThreshold: 4
|
||||
|
|
|
@ -1144,6 +1144,7 @@ rules:
|
|||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- jobs
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- clusterroles
|
||||
|
|
|
@ -221,6 +221,7 @@ rules:
|
|||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- jobs
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- clusterroles
|
||||
|
|
8
go.mod
8
go.mod
|
@ -4,6 +4,11 @@ go 1.13
|
|||
|
||||
require (
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0
|
||||
github.com/docker/go-connections v0.4.0 // indirect
|
||||
github.com/docker/go-metrics v0.0.1 // indirect
|
||||
github.com/evanphx/json-patch v4.5.0+incompatible
|
||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
|
||||
github.com/go-logr/logr v0.1.0
|
||||
|
@ -16,8 +21,11 @@ require (
|
|||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a
|
||||
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/onsi/ginkgo v1.11.0
|
||||
github.com/onsi/gomega v1.8.1
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.0.1 // indirect
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/spf13/cobra v1.0.0
|
||||
github.com/stretchr/testify v1.4.0
|
||||
|
|
|
@ -6,8 +6,11 @@ import (
|
|||
v1 "k8s.io/api/batch/v1"
|
||||
apiv1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
||||
|
@ -222,7 +225,7 @@ func (j *Job) syncNamespace(wg *sync.WaitGroup, jobType, scope, policy string) {
|
|||
}()
|
||||
var args []string
|
||||
var mode string
|
||||
if len(policy) > 0 {
|
||||
if policy == "POLICY" {
|
||||
mode = "cli"
|
||||
} else {
|
||||
mode = "configmap"
|
||||
|
@ -236,7 +239,7 @@ func (j *Job) syncNamespace(wg *sync.WaitGroup, jobType, scope, policy string) {
|
|||
"helm",
|
||||
fmt.Sprintf("--mode=%s", mode),
|
||||
}
|
||||
job = CreateJob(append(args, "helm"), jobType, scope)
|
||||
job = CreateJob(args, jobType, scope)
|
||||
break
|
||||
case "NAMESPACE":
|
||||
args = []string{
|
||||
|
@ -244,7 +247,7 @@ func (j *Job) syncNamespace(wg *sync.WaitGroup, jobType, scope, policy string) {
|
|||
"namespace",
|
||||
fmt.Sprintf("--mode=%s", mode),
|
||||
}
|
||||
job = CreateJob(append(args, "namespace"), jobType, scope)
|
||||
job = CreateJob(args, jobType, scope)
|
||||
break
|
||||
case "CLUSTER":
|
||||
args = []string{
|
||||
|
@ -252,20 +255,42 @@ func (j *Job) syncNamespace(wg *sync.WaitGroup, jobType, scope, policy string) {
|
|||
"cluster",
|
||||
fmt.Sprintf("--mode=%s", mode),
|
||||
}
|
||||
job = CreateJob(append(args, "cluster"), jobType, scope)
|
||||
job = CreateJob(args, jobType, scope)
|
||||
break
|
||||
}
|
||||
_, err := j.dclient.UpdateStatusResource("", "Job", config.KubePolicyNamespace, job, false)
|
||||
_, err := j.dclient.CreateResource("", "Job", config.KubePolicyNamespace, job, false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
deadline := time.Now().Add(15 * time.Second)
|
||||
var failure bool
|
||||
for {
|
||||
resource, err := j.dclient.GetResource("", "Job", config.KubePolicyNamespace, job.GetName())
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
job := v1.Job{}
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(resource.UnstructuredContent(), &job); err != nil {
|
||||
failure = true
|
||||
break
|
||||
}
|
||||
if job.Status.Active == 0 || time.Now().After(deadline) {
|
||||
failure = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if failure {
|
||||
err := j.dclient.DeleteResource("", "Job", config.KubePolicyNamespace, job.GetName(),false)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CreateJob(args []string, jobType, scope string) *v1.Job {
|
||||
return &v1.Job{
|
||||
job := &v1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", jobType, scope),
|
||||
Namespace: config.KubePolicyNamespace,
|
||||
},
|
||||
Spec: v1.JobSpec{
|
||||
|
@ -273,13 +298,18 @@ func CreateJob(args []string, jobType, scope string) *v1.Job {
|
|||
Spec: apiv1.PodSpec{
|
||||
Containers: []apiv1.Container{
|
||||
{
|
||||
Name: fmt.Sprintf("%s-%s", jobType, scope),
|
||||
Image: "nirmata/kyverno-cli:latest",
|
||||
Name: strings.ToLower(fmt.Sprintf("%s-%s", jobType, scope)),
|
||||
Image: "evalsocket/kyverno-cli:latest",
|
||||
ImagePullPolicy: "Always",
|
||||
Args: args,
|
||||
|
||||
},
|
||||
},
|
||||
RestartPolicy: "OnFailure",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
job.SetGenerateName("kyverno-policyreport-")
|
||||
return job
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func ClusterCommand() *cobra.Command {
|
|||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
if mode == "cli" {
|
||||
go createEngineRespone("", "CLUSTER", &wg, restConfig)
|
||||
go configmapScan("", "CLUSTER", &wg, restConfig)
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
package report
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"encoding/json"
|
||||
kyvernov1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
|
||||
policyreportv1alpha1 "github.com/nirmata/kyverno/pkg/api/policyreport/v1alpha1"
|
||||
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
|
||||
|
@ -15,9 +15,12 @@ import (
|
|||
"github.com/nirmata/kyverno/pkg/policyreport"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/rest"
|
||||
"os"
|
||||
|
@ -34,7 +37,7 @@ const (
|
|||
Cluster string = "Cluster"
|
||||
)
|
||||
|
||||
func createEngineRespone(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
|
||||
func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -73,14 +76,16 @@ func createEngineRespone(n, scope string, wg *sync.WaitGroup, restConfig *rest.C
|
|||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
cpolicies = &kyvernov1.ClusterPolicyList{}
|
||||
policies, err := kclient.KyvernoV1().Policies(n).List(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
for _, p := range policies.Items {
|
||||
cp := policy.ConvertPolicyToClusterPolicy(&p)
|
||||
cpolicies.Items = append(cpolicies.Items, *cp)
|
||||
}
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// key uid
|
||||
|
@ -267,7 +272,7 @@ func createEngineRespone(n, scope string, wg *sync.WaitGroup, restConfig *rest.C
|
|||
// Create Policy Report
|
||||
}
|
||||
|
||||
func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
|
||||
func configmapScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config) {
|
||||
defer func() {
|
||||
wg.Done()
|
||||
}()
|
||||
|
@ -291,31 +296,38 @@ func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config
|
|||
|
||||
_ = kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
|
||||
|
||||
configmap, err := dClient.GetResource("", "Configmap", config.KubePolicyNamespace, "kyverno-event")
|
||||
configmap, err := dClient.GetResource("", "ConfigMap", config.KubePolicyNamespace, "kyverno-event")
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
genData, _, err := unstructured.NestedMap(configmap.Object, "data")
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
jsonString, _ := json.Marshal(genData)
|
||||
events := policyreport.PVEvent{}
|
||||
json.Unmarshal(jsonString, &events)
|
||||
var job *v1.ConfigMap
|
||||
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(configmap.UnstructuredContent(), &job); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
var response map[string][]policyreport.Info
|
||||
var data []policyreport.Info
|
||||
if scope == Cluster {
|
||||
data = events.Cluster
|
||||
if err := json.Unmarshal([]byte(job.Data["Namespace"]), &response); err != nil {
|
||||
log.Log.Error(err,"")
|
||||
}
|
||||
data = response["cluster"]
|
||||
} else if scope == Helm {
|
||||
data = events.Helm[n]
|
||||
if err := json.Unmarshal([]byte(job.Data["Helm"]), &response); err != nil {
|
||||
log.Log.Error(err,"")
|
||||
}
|
||||
data = response[n]
|
||||
} else {
|
||||
data = events.Namespace[n]
|
||||
if err := json.Unmarshal([]byte(job.Data["Namespace"]), &response); err != nil {
|
||||
log.Log.Error(err,"")
|
||||
}
|
||||
data = response[n]
|
||||
}
|
||||
var results map[string][]policyreportv1alpha1.PolicyReportResult
|
||||
|
||||
var ns []string
|
||||
for _, v := range data {
|
||||
for _, r := range v.Rules {
|
||||
log.Log.Error(nil, "failed to get resource","",r)
|
||||
builder := policyreport.NewPrBuilder()
|
||||
pv := builder.Generate(v)
|
||||
result := &policyreportv1alpha1.PolicyReportResult{
|
||||
|
@ -356,6 +368,8 @@ func backgroundScan(n, scope string, wg *sync.WaitGroup, restConfig *rest.Config
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for k, _ := range results {
|
||||
if scope == Helm || scope == Namespace {
|
||||
str := strings.Split(k, "-")
|
||||
|
|
|
@ -34,20 +34,21 @@ func HelmCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
|
||||
|
||||
ns, err := kubeInformer.Core().V1().Namespaces().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ns))
|
||||
for _, n := range ns {
|
||||
if mode == "cli" {
|
||||
go createEngineRespone(n.GetName(), "HELM", &wg, restConfig)
|
||||
wg.Wait()
|
||||
return nil
|
||||
if mode == "cli" {
|
||||
ns, err := kubeInformer.Core().V1().Namespaces().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
go backgroundScan(n.GetName(), "HELM", &wg, restConfig)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ns))
|
||||
for _, n := range ns {
|
||||
go configmapScan(n.GetName(), "Helm", &wg, restConfig)
|
||||
}
|
||||
wg.Wait()
|
||||
}else{
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go backgroundScan("", "Helm", &wg, restConfig)
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,10 +2,11 @@ package report
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
client "github.com/nirmata/kyverno/pkg/dclient"
|
||||
"github.com/nirmata/kyverno/pkg/utils"
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/cli-runtime/pkg/genericclioptions"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"os"
|
||||
log "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"sync"
|
||||
|
@ -24,23 +25,30 @@ func NamespaceCommand() *cobra.Command {
|
|||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
dClient, err := client.NewClient(restConfig, 5*time.Minute, make(chan struct{}), log.Log)
|
||||
const resyncPeriod = 15 * time.Minute
|
||||
kubeClient, err := utils.NewKubeClient(restConfig)
|
||||
if err != nil {
|
||||
log.Log.Error(err, "Failed to create kubernetes client")
|
||||
os.Exit(1)
|
||||
}
|
||||
ns, err := dClient.ListResource("", "Namespace", "", &metav1.LabelSelector{})
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ns.Items))
|
||||
for _, n := range ns.Items {
|
||||
if mode == "cli" {
|
||||
go createEngineRespone(n.GetName(), mode, &wg, restConfig)
|
||||
wg.Wait()
|
||||
kubeInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod)
|
||||
if mode == "cli" {
|
||||
ns, err := kubeInformer.Core().V1().Namespaces().Lister().List(labels.Everything())
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(len(ns))
|
||||
for _, n := range ns {
|
||||
go configmapScan(n.GetName(), "Namespace", &wg, restConfig)
|
||||
}
|
||||
go backgroundScan(n.GetName(), "HELM", &wg, restConfig)
|
||||
wg.Wait()
|
||||
}else{
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go backgroundScan("", "Namespace", &wg, restConfig)
|
||||
wg.Wait()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
},
|
||||
|
|
|
@ -317,11 +317,20 @@ func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) {
|
|||
logger.Info("starting")
|
||||
defer logger.Info("shutting down")
|
||||
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced, pc.nsListerSynced) {
|
||||
logger.Info("failed to sync informer cache")
|
||||
return
|
||||
if os.Getenv("POLICY-TYPE") == "POLICYREPORT" {
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.nsListerSynced) {
|
||||
logger.Info("failed to sync informer cache")
|
||||
return
|
||||
}
|
||||
|
||||
}else{
|
||||
if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced, pc.nsListerSynced) {
|
||||
logger.Info("failed to sync informer cache")
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
for i := 0; i < workers; i++ {
|
||||
go wait.Until(pc.worker, constant.PolicyControllerResync, stopCh)
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ type GeneratorInterface interface {
|
|||
type PVEvent struct {
|
||||
Helm map[string][]Info
|
||||
Namespace map[string][]Info
|
||||
Cluster []Info
|
||||
Cluster map[string][]Info
|
||||
}
|
||||
|
||||
// NewPRGenerator returns a new instance of policy violation generator
|
||||
|
@ -147,7 +147,7 @@ func NewPRGenerator(client *policyreportclient.Clientset,
|
|||
inMemoryConfigMap: &PVEvent{
|
||||
Helm: make(map[string][]Info),
|
||||
Namespace: make(map[string][]Info),
|
||||
Cluster: make([]Info, 0, 100),
|
||||
Cluster: make(map[string][]Info),
|
||||
},
|
||||
job : job,
|
||||
}
|
||||
|
@ -192,7 +192,9 @@ func (gen *Generator) Run(workers int, stopCh <-chan struct{}) {
|
|||
select {
|
||||
case <-ticker.C:
|
||||
err := gen.createConfigmap()
|
||||
gen.job.Add(jobs.JobInfo{})
|
||||
gen.job.Add(jobs.JobInfo{
|
||||
JobType: "background",
|
||||
})
|
||||
if err != nil {
|
||||
logger.Error(err, "configmap error")
|
||||
}
|
||||
|
@ -298,7 +300,7 @@ func (gen *Generator) createConfigmap() error {
|
|||
gen.inMemoryConfigMap = &PVEvent{
|
||||
Helm: make(map[string][]Info),
|
||||
Namespace: make(map[string][]Info),
|
||||
Cluster: make([]Info, 0, 100),
|
||||
Cluster: make(map[string][]Info),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -323,7 +325,7 @@ func (gen *Generator) syncHandler(info Info) error {
|
|||
return nil
|
||||
} else if info.Resource.GetNamespace() == "" {
|
||||
// cluster scope resource generate a clusterpolicy violation
|
||||
gen.inMemoryConfigMap.Cluster = append(gen.inMemoryConfigMap.Cluster, info)
|
||||
gen.inMemoryConfigMap.Cluster["cluster"] = append(gen.inMemoryConfigMap.Cluster["cluster"], info)
|
||||
|
||||
return nil
|
||||
} else {
|
||||
|
|
|
@ -143,7 +143,7 @@ func NewPVGenerator(client *kyvernoclient.Clientset,
|
|||
log,
|
||||
stopChna,
|
||||
)
|
||||
go gen.prgen.Run(1, stopChna)
|
||||
go gen.prgen.Run(4, stopChna)
|
||||
|
||||
}
|
||||
return &gen
|
||||
|
|
Loading…
Add table
Reference in a new issue