diff --git a/cmd/initContainer/main.go b/cmd/initContainer/main.go index 9c5a9141ba..7a9de3557f 100644 --- a/cmd/initContainer/main.go +++ b/cmd/initContainer/main.go @@ -5,23 +5,26 @@ package main import ( "flag" + "fmt" "os" "regexp" "strconv" "sync" "time" - "github.com/golang/glog" "github.com/nirmata/kyverno/pkg/config" client "github.com/nirmata/kyverno/pkg/dclient" + "github.com/nirmata/kyverno/pkg/log" "github.com/nirmata/kyverno/pkg/signal" "k8s.io/apimachinery/pkg/api/errors" rest "k8s.io/client-go/rest" clientcmd "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( kubeconfig string + setupLog = log.Log.WithName("setup") ) const ( @@ -30,20 +33,21 @@ const ( ) func main() { - defer glog.Flush() // os signal handler stopCh := signal.SetupSignalHandler() // create client config clientConfig, err := createClientConfig(kubeconfig) if err != nil { - glog.Fatalf("Error building kubeconfig: %v\n", err) + setupLog.Error(err, "Failed to build kubeconfig") + os.Exit(1) } // DYNAMIC CLIENT // - client for all registered resources - client, err := client.NewClient(clientConfig, 10*time.Second, stopCh) + client, err := client.NewClient(clientConfig, 10*time.Second, stopCh, log.Log) if err != nil { - glog.Fatalf("Error creating client: %v\n", err) + setupLog.Error(err, "Failed to create client") + os.Exit(1) } // Exit for unsupported version of kubernetes cluster @@ -78,12 +82,12 @@ func main() { for err := range merge(done, stopCh, p1, p2) { if err != nil { failure = true - glog.Errorf("failed to cleanup: %v", err) + log.Log.Error(err, "failed to cleanup resource") } } // if there is any failure then we fail process if failure { - glog.Errorf("failed to cleanup webhook configurations") + log.Log.Info("failed to cleanup webhook configurations") os.Exit(1) } } @@ -95,36 +99,41 @@ func init() { flag.Set("stderrthreshold", "WARNING") flag.Set("v", "2") flag.Parse() + log.SetLogger(zap.New(func(o *zap.Options) { + o.Development = true + })) } func removeWebhookIfExists(client *client.Client, kind string, name string) error { + logger := log.Log.WithName("removeExistingWebhook").WithValues("kind", kind, "name", name) var err error // Get resource _, err = client.GetResource(kind, "", name) if errors.IsNotFound(err) { - glog.V(4).Infof("%s(%s) not found", name, kind) + logger.V(4).Info("resource not found") return nil } if err != nil { - glog.Errorf("failed to get resource %s(%s)", name, kind) + logger.Error(err, "failed to get resource") return err } // Delete resource err = client.DeleteResource(kind, "", name, false) if err != nil { - glog.Errorf("failed to delete resource %s(%s)", name, kind) + logger.Error(err, "failed to delete resource") return err } - glog.Infof("cleaned up resource %s(%s)", name, kind) + logger.Info("removed the resource") return nil } func createClientConfig(kubeconfig string) (*rest.Config, error) { + logger := log.Log if kubeconfig == "" { - glog.Info("Using in-cluster configuration") + logger.Info("Using in-cluster configuration") return rest.InClusterConfig() } - glog.Infof("Using configuration from '%s'", kubeconfig) + logger.Info(fmt.Sprintf("Using configuration from '%s'", kubeconfig)) return clientcmd.BuildConfigFromFlags("", kubeconfig) } @@ -163,6 +172,7 @@ func gen(done <-chan struct{}, stopCh <-chan struct{}, requests ...request) <-ch // processes the requests func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{}, requests <-chan request) <-chan error { + logger := log.Log.WithName("process") out := make(chan error) go func() { defer close(out) @@ -170,10 +180,10 @@ func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{} select { case out <- removeWebhookIfExists(client, req.kind, req.name): case <-done: - println("done process") + logger.Info("done") return case <-stopCh: - println("shutting down process") + logger.Info("shutting down") return } } @@ -183,6 +193,7 @@ func process(client *client.Client, done <-chan struct{}, stopCh <-chan struct{} // waits for all processes to be complete and merges result func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan error) <-chan error { + logger := log.Log.WithName("merge") var wg sync.WaitGroup out := make(chan error) // gets the output from each process @@ -192,10 +203,10 @@ func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan err select { case out <- err: case <-done: - println("done merge") + logger.Info("done") return case <-stopCh: - println("shutting down merge") + logger.Info("shutting down") return } } @@ -215,30 +226,37 @@ func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan err } func isVersionSupported(client *client.Client) { + logger := log.Log serverVersion, err := client.DiscoveryClient.GetServerVersion() if err != nil { - glog.Fatalf("Failed to get kubernetes server version: %v\n", err) + logger.Error(err, "Failed to get kubernetes server version") + os.Exit(1) } exp := regexp.MustCompile(`v(\d*).(\d*).(\d*)`) groups := exp.FindAllStringSubmatch(serverVersion.String(), -1) if len(groups) != 1 || len(groups[0]) != 4 { - glog.Fatalf("Failed to extract kubernetes server version: %v.err %v\n", serverVersion, err) + logger.Error(err, "Failed to extract kubernetes server version", "serverVersion", serverVersion) + os.Exit(1) } // convert string to int // assuming the version are always intergers major, err := strconv.Atoi(groups[0][1]) if err != nil { - glog.Fatalf("Failed to extract kubernetes major server version: %v.err %v\n", serverVersion, err) + logger.Error(err, "Failed to extract kubernetes major server version", "serverVersion", serverVersion) + os.Exit(1) } minor, err := strconv.Atoi(groups[0][2]) if err != nil { - glog.Fatalf("Failed to extract kubernetes minor server version: %v.err %v\n", serverVersion, err) + logger.Error(err, "Failed to extract kubernetes minor server version", "serverVersion", serverVersion) + os.Exit(1) } sub, err := strconv.Atoi(groups[0][3]) if err != nil { - glog.Fatalf("Failed to extract kubernetes sub minor server version:%v. err %v\n", serverVersion, err) + logger.Error(err, "Failed to extract kubernetes sub minor server version", "serverVersion", serverVersion) + os.Exit(1) } if major <= 1 && minor <= 12 && sub < 7 { - glog.Fatalf("Unsupported kubernetes server version %s. Kyverno is supported from version v1.12.7+", serverVersion) + logger.Info("Unsupported kubernetes server version %s. Kyverno is supported from version v1.12.7+", "serverVersion", serverVersion) + os.Exit(1) } } diff --git a/cmd/kyverno/main.go b/cmd/kyverno/main.go index 62cefceeb9..8ecf67cefd 100644 --- a/cmd/kyverno/main.go +++ b/cmd/kyverno/main.go @@ -3,6 +3,8 @@ package main import ( "context" "flag" + "fmt" + "os" "time" "github.com/golang/glog" @@ -14,6 +16,7 @@ import ( event "github.com/nirmata/kyverno/pkg/event" "github.com/nirmata/kyverno/pkg/generate" generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup" + "github.com/nirmata/kyverno/pkg/log" "github.com/nirmata/kyverno/pkg/policy" "github.com/nirmata/kyverno/pkg/policystore" "github.com/nirmata/kyverno/pkg/policyviolation" @@ -24,6 +27,7 @@ import ( "github.com/nirmata/kyverno/pkg/webhooks" webhookgenerate "github.com/nirmata/kyverno/pkg/webhooks/generate" kubeinformers "k8s.io/client-go/informers" + "sigs.k8s.io/controller-runtime/pkg/log/zap" ) var ( @@ -35,20 +39,23 @@ var ( // will be removed in future and the configuration will be set only via configmaps filterK8Resources string // User FQDN as CSR CN - fqdncn bool + fqdncn bool + verbosity string + setupLog = log.Log.WithName("setup") ) func main() { defer glog.Flush() - version.PrintVersionInfo() + version.PrintVersionInfo(log.Log) // cleanUp Channel cleanUp := make(chan struct{}) // handle os signals stopCh := signal.SetupSignalHandler() // CLIENT CONFIG - clientConfig, err := config.CreateClientConfig(kubeconfig) + clientConfig, err := config.CreateClientConfig(kubeconfig, log.Log) if err != nil { - glog.Fatalf("Error building kubeconfig: %v\n", err) + setupLog.Error(err, "Failed to build kubeconfig") + os.Exit(1) } // KYVENO CRD CLIENT @@ -57,29 +64,33 @@ func main() { // - PolicyViolation pclient, err := kyvernoclient.NewForConfig(clientConfig) if err != nil { - glog.Fatalf("Error creating client: %v\n", err) + setupLog.Error(err, "Failed to create client") + os.Exit(1) } // DYNAMIC CLIENT // - client for all registered resources // - invalidate local cache of registered resource every 10 seconds - client, err := dclient.NewClient(clientConfig, 10*time.Second, stopCh) + client, err := dclient.NewClient(clientConfig, 10*time.Second, stopCh, log.Log) if err != nil { - glog.Fatalf("Error creating client: %v\n", err) + setupLog.Error(err, "Failed to create client") + os.Exit(1) } // CRD CHECK // - verify if the CRD for Policy & PolicyViolation are available - if !utils.CRDInstalled(client.DiscoveryClient) { - glog.Fatalf("Required CRDs unavailable") + if !utils.CRDInstalled(client.DiscoveryClient, log.Log) { + setupLog.Error(fmt.Errorf("pre-requisite CRDs not installed"), "Failed to create watch on kyverno CRDs") + os.Exit(1) } // KUBERNETES CLIENT kubeClient, err := utils.NewKubeClient(clientConfig) if err != nil { - glog.Fatalf("Error creating kubernetes client: %v\n", err) + setupLog.Error(err, "Failed to create kubernetes client") + os.Exit(1) } // TODO(shuting): To be removed for v1.2.0 - utils.CleanupOldCrd(client) + utils.CleanupOldCrd(client, log.Log) // KUBERNETES RESOURCES INFORMER // watches namespace resource @@ -96,16 +107,18 @@ func main() { clientConfig, client, serverIP, - int32(webhookTimeout)) + int32(webhookTimeout), + log.Log) // Resource Mutating Webhook Watcher - lastReqTime := checker.NewLastReqTime() + lastReqTime := checker.NewLastReqTime(log.Log.WithName("LastReqTime")) rWebhookWatcher := webhookconfig.NewResourceWebhookRegister( lastReqTime, kubeInformer.Admissionregistration().V1beta1().MutatingWebhookConfigurations(), kubeInformer.Admissionregistration().V1beta1().ValidatingWebhookConfigurations(), webhookRegistrationClient, runValidationInMutatingWebhook, + log.Log.WithName("ResourceWebhookRegister"), ) // KYVERNO CRD INFORMER @@ -124,23 +137,28 @@ func main() { configData := config.NewConfigData( kubeClient, kubeInformer.Core().V1().ConfigMaps(), - filterK8Resources) + filterK8Resources, + log.Log.WithName("ConfigData"), + ) // Policy meta-data store - policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies()) + policyMetaStore := policystore.NewPolicyStore(pInformer.Kyverno().V1().ClusterPolicies(), log.Log.WithName("PolicyStore")) // EVENT GENERATOR // - generate event with retry mechanism egen := event.NewEventGenerator( client, - pInformer.Kyverno().V1().ClusterPolicies()) + pInformer.Kyverno().V1().ClusterPolicies(), + log.Log.WithName("EventGenerator")) // POLICY VIOLATION GENERATOR // -- generate policy violation pvgen := policyviolation.NewPVGenerator(pclient, client, pInformer.Kyverno().V1().ClusterPolicyViolations(), - pInformer.Kyverno().V1().PolicyViolations()) + pInformer.Kyverno().V1().PolicyViolations(), + log.Log.WithName("PolicyViolationGenerator"), + ) // POLICY CONTROLLER // - reconciliation policy and policy violation @@ -156,13 +174,16 @@ func main() { egen, pvgen, policyMetaStore, - rWebhookWatcher) + rWebhookWatcher, + log.Log.WithName("PolicyController"), + ) if err != nil { - glog.Fatalf("error creating policy controller: %v\n", err) + setupLog.Error(err, "Failed to create policy controller") + os.Exit(1) } // GENERATE REQUEST GENERATOR - grgen := webhookgenerate.NewGenerator(pclient, stopCh) + grgen := webhookgenerate.NewGenerator(pclient, stopCh, log.Log.WithName("GenerateRequestGenerator")) // GENERATE CONTROLLER // - applies generate rules on resources based on generate requests created by webhook @@ -174,6 +195,7 @@ func main() { egen, pvgen, kubedynamicInformer, + log.Log.WithName("GenerateController"), ) // GENERATE REQUEST CLEANUP // -- cleans up the generate requests that have not been processed(i.e. state = [Pending, Failed]) for more than defined timeout @@ -183,12 +205,14 @@ func main() { pInformer.Kyverno().V1().ClusterPolicies(), pInformer.Kyverno().V1().GenerateRequests(), kubedynamicInformer, + log.Log.WithName("GenerateCleanUpController"), ) // CONFIGURE CERTIFICATES tlsPair, err := client.InitTLSPemPair(clientConfig, fqdncn) if err != nil { - glog.Fatalf("Failed to initialize TLS key/certificate pair: %v\n", err) + setupLog.Error(err, "Failed to initialize TLS key/certificate pair") + os.Exit(1) } // WEBHOOK REGISTRATION @@ -197,7 +221,8 @@ func main() { // resource webhook confgiuration is generated dynamically in the webhook server and policy controller // based on the policy resources created if err = webhookRegistrationClient.Register(); err != nil { - glog.Fatalf("Failed registering Admission Webhooks: %v\n", err) + setupLog.Error(err, "Failed to register Admission webhooks") + os.Exit(1) } // WEBHOOOK @@ -221,9 +246,12 @@ func main() { pvgen, grgen, rWebhookWatcher, - cleanUp) + cleanUp, + log.Log.WithName("WebhookServer"), + ) if err != nil { - glog.Fatalf("Unable to create webhook server: %v\n", err) + setupLog.Error(err, "Failed to create webhook server") + os.Exit(1) } // Start the components pInformer.Start(stopCh) @@ -259,7 +287,7 @@ func main() { // resource cleanup // remove webhook configurations <-cleanUp - glog.Info("successful shutdown of kyverno controller") + setupLog.Info("Kyverno shutdown successful") } func init() { @@ -271,6 +299,10 @@ func init() { // Generate CSR with CN as FQDN due to https://github.com/nirmata/kyverno/issues/542 flag.BoolVar(&fqdncn, "fqdn-as-cn", false, "use FQDN as Common Name in CSR") - config.LogDefaultFlags() + config.LogDefaultFlags(setupLog) + flag.StringVar(&verbosity, "verbosity", "", "set verbosity for logs") flag.Parse() + log.SetLogger(zap.New(func(o *zap.Options) { + o.Development = true + })) } diff --git a/pkg/api/kyverno/v1/zz_generated.deepcopy.go b/pkg/api/kyverno/v1/zz_generated.deepcopy.go index d4d7cc6820..ff1a7c63e4 100644 --- a/pkg/api/kyverno/v1/zz_generated.deepcopy.go +++ b/pkg/api/kyverno/v1/zz_generated.deepcopy.go @@ -74,7 +74,7 @@ func (in *ClusterPolicy) DeepCopyObject() runtime.Object { func (in *ClusterPolicyList) DeepCopyInto(out *ClusterPolicyList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterPolicy, len(*in)) @@ -135,7 +135,7 @@ func (in *ClusterPolicyViolation) DeepCopyObject() runtime.Object { func (in *ClusterPolicyViolationList) DeepCopyInto(out *ClusterPolicyViolationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]ClusterPolicyViolation, len(*in)) @@ -241,7 +241,7 @@ func (in *GenerateRequestContext) DeepCopy() *GenerateRequestContext { func (in *GenerateRequestList) DeepCopyInto(out *GenerateRequestList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]GenerateRequest, len(*in)) @@ -420,7 +420,7 @@ func (in *PolicyViolation) DeepCopyObject() runtime.Object { func (in *PolicyViolationList) DeepCopyInto(out *PolicyViolationList) { *out = *in out.TypeMeta = in.TypeMeta - out.ListMeta = in.ListMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]PolicyViolation, len(*in)) diff --git a/pkg/auth/auth.go b/pkg/auth/auth.go index 06d8ea22a2..d2ab06ffd1 100644 --- a/pkg/auth/auth.go +++ b/pkg/auth/auth.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "github.com/go-logr/logr" client "github.com/nirmata/kyverno/pkg/dclient" authorizationv1 "k8s.io/api/authorization/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -17,12 +17,14 @@ type CanIOptions struct { verb string kind string client *client.Client + log logr.Logger } //NewCanI returns a new instance of operation access controler evaluator -func NewCanI(client *client.Client, kind, namespace, verb string) *CanIOptions { +func NewCanI(client *client.Client, kind, namespace, verb string, log logr.Logger) *CanIOptions { o := CanIOptions{ client: client, + log: log, } o.namespace = namespace @@ -64,11 +66,12 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) { // - verb // - resource // - subresource + logger := o.log.WithValues("kind", sar.Kind, "namespace", sar.Namespace, "name", sar.Name) // Create the Resource resp, err := o.client.CreateResource("SelfSubjectAccessReview", "", sar, false) if err != nil { - glog.Errorf("failed to create resource %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Error(err, "failed to create resource") return false, err } @@ -76,9 +79,9 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) { allowed, ok, err := unstructured.NestedBool(resp.Object, "status", "allowed") if !ok { if err != nil { - glog.Errorf("unexpected error when getting status.allowed for %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Error(err, "failed to get the field", "field", "status.allowed") } - glog.Errorf("status.allowed not found for %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Info("field not found", "field", "status.allowed") } if !allowed { @@ -86,22 +89,21 @@ func (o *CanIOptions) RunAccessCheck() (bool, error) { reason, ok, err := unstructured.NestedString(resp.Object, "status", "reason") if !ok { if err != nil { - glog.Errorf("unexpected error when getting status.reason for %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Error(err, "failed to get the field", "field", "status.reason") } - glog.Errorf("status.reason not found for %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Info("field not found", "field", "status.reason") } // status.evaluationError evaluationError, ok, err := unstructured.NestedString(resp.Object, "status", "evaludationError") if !ok { if err != nil { - glog.Errorf("unexpected error when getting status.evaluationError for %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Error(err, "failed to get the field", "field", "status.evaluationError") } - glog.Errorf("status.evaluationError not found for %s/%s/%s", sar.Kind, sar.Namespace, sar.Name) + logger.Info("field not found", "field", "status.evaluationError") } // Reporting ? (just logs) - glog.Errorf("reason to disallow operation: %s", reason) - glog.Errorf("evaluationError to disallow operation: %s", evaluationError) + logger.Info("disallowed operation", "reason", reason, "evaluationError", evaluationError) } return allowed, nil diff --git a/pkg/checker/checker.go b/pkg/checker/checker.go index 762776dffd..16b8276d40 100644 --- a/pkg/checker/checker.go +++ b/pkg/checker/checker.go @@ -4,7 +4,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" dclient "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/event" @@ -20,8 +20,9 @@ const ( // LastReqTime stores the lastrequest times for incoming api-requests type LastReqTime struct { - t time.Time - mu sync.RWMutex + t time.Time + mu sync.RWMutex + log logr.Logger } //Time returns the lastrequest time @@ -39,16 +40,17 @@ func (t *LastReqTime) SetTime(tm time.Time) { } //NewLastReqTime returns a new instance of LastRequestTime store -func NewLastReqTime() *LastReqTime { +func NewLastReqTime(log logr.Logger) *LastReqTime { return &LastReqTime{ - t: time.Now(), + t: time.Now(), + log: log, } } -func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolicyLister) bool { +func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolicyLister, log logr.Logger) bool { policies, err := pLister.ListResources(labels.NewSelector()) if err != nil { - glog.Error() + log.Error(err, "failed to list cluster policies") } for _, policy := range policies { if policy.HasMutateOrValidateOrGenerate() { @@ -62,15 +64,16 @@ func checkIfPolicyWithMutateAndGenerateExists(pLister kyvernolister.ClusterPolic //Run runs the checker and verify the resource update func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister, eventGen event.Interface, client *dclient.Client, defaultResync time.Duration, deadline time.Duration, stopCh <-chan struct{}) { - glog.V(2).Infof("starting default resync for webhook checker with resync time %d nanoseconds", defaultResync) + logger := t.log + logger.V(2).Info("tarting default resync for webhook checker", "resyncTime", defaultResync) maxDeadline := deadline * time.Duration(MaxRetryCount) ticker := time.NewTicker(defaultResync) /// interface to update and increment kyverno webhook status via annotations - statuscontrol := NewVerifyControl(client, eventGen) + statuscontrol := NewVerifyControl(client, eventGen, logger.WithName("StatusControl")) // send the initial update status - if checkIfPolicyWithMutateAndGenerateExists(pLister) { + if checkIfPolicyWithMutateAndGenerateExists(pLister, logger) { if err := statuscontrol.SuccessStatus(); err != nil { - glog.Error(err) + logger.Error(err, "failed to set 'success' status") } } @@ -84,36 +87,36 @@ func (t *LastReqTime) Run(pLister kyvernolister.ClusterPolicyLister, eventGen ev case <-ticker.C: // if there are no policies then we dont have a webhook on resource. // we indirectly check if the resource - if !checkIfPolicyWithMutateAndGenerateExists(pLister) { + if !checkIfPolicyWithMutateAndGenerateExists(pLister, logger) { continue } // get current time timeDiff := time.Since(t.Time()) if timeDiff > maxDeadline { - glog.Infof("failed to receive any request for more than %v ", maxDeadline) - glog.Info("Admission Control failing: Webhook is not receiving requests forwarded by api-server as per webhook configurations") + logger.Info("request exceeded max deadline", "deadline", maxDeadline) + logger.Info("Admission Control failing: Webhook is not receiving requests forwarded by api-server as per webhook configurations") // set the status unavailable if err := statuscontrol.FailedStatus(); err != nil { - glog.Error(err) + logger.Error(err, "failed to set 'failed' status") } continue } if timeDiff > deadline { - glog.Info("Admission Control failing: Webhook is not receiving requests forwarded by api-server as per webhook configurations") + logger.Info("Admission Control failing: Webhook is not receiving requests forwarded by api-server as per webhook configurations") // send request to update the kyverno deployment if err := statuscontrol.IncrementAnnotation(); err != nil { - glog.Error(err) + logger.Error(err, "failed to increment annotation") } continue } // if the status was false before then we update it to true // send request to update the kyverno deployment if err := statuscontrol.SuccessStatus(); err != nil { - glog.Error(err) + logger.Error(err, "failed to update success status") } case <-stopCh: // handler termination signal - glog.V(2).Infof("stopping default resync for webhook checker") + logger.V(2).Info("stopping default resync for webhook checker") return } } diff --git a/pkg/checker/status.go b/pkg/checker/status.go index 39b19189dd..110abf8eef 100644 --- a/pkg/checker/status.go +++ b/pkg/checker/status.go @@ -4,7 +4,7 @@ import ( "fmt" "strconv" - "github.com/golang/glog" + "github.com/go-logr/logr" dclient "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/event" ) @@ -29,6 +29,7 @@ type StatusInterface interface { type StatusControl struct { client *dclient.Client eventGen event.Interface + log logr.Logger } //SuccessStatus ... @@ -42,20 +43,22 @@ func (vc StatusControl) FailedStatus() error { } // NewVerifyControl ... -func NewVerifyControl(client *dclient.Client, eventGen event.Interface) *StatusControl { +func NewVerifyControl(client *dclient.Client, eventGen event.Interface, log logr.Logger) *StatusControl { return &StatusControl{ client: client, eventGen: eventGen, + log: log, } } func (vc StatusControl) setStatus(status string) error { - glog.Infof("setting deployment %s in ns %s annotation %s to %s", deployName, deployNamespace, annWebhookStats, status) + logger := vc.log + logger.Info(fmt.Sprintf("setting deployment %s in ns %s annotation %s to %s", deployName, deployNamespace, annWebhookStats, status)) var ann map[string]string var err error deploy, err := vc.client.GetResource("Deployment", deployNamespace, deployName) if err != nil { - glog.V(4).Infof("failed to get deployment %s in namespace %s: %v", deployName, deployNamespace, err) + logger.Error(err, "failed to get deployment resource") return err } ann = deploy.GetAnnotations() @@ -67,7 +70,7 @@ func (vc StatusControl) setStatus(status string) error { if ok { // annotatiaion is present if webhookAction == status { - glog.V(4).Infof("annotation %s already set to '%s'", annWebhookStats, status) + logger.V(4).Info(fmt.Sprintf("annotation %s already set to '%s'", annWebhookStats, status)) return nil } } @@ -77,7 +80,7 @@ func (vc StatusControl) setStatus(status string) error { // update counter _, err = vc.client.UpdateResource("Deployment", deployNamespace, deploy, false) if err != nil { - glog.V(4).Infof("failed to update annotation %s for deployment %s in namespace %s: %v", annWebhookStats, deployName, deployNamespace, err) + logger.Error(err, fmt.Sprintf("failed to update annotation %s for deployment %s in namespace %s", annWebhookStats, deployName, deployNamespace)) return err } // create event on kyverno deployment @@ -97,12 +100,13 @@ func createStatusUpdateEvent(status string, eventGen event.Interface) { //IncrementAnnotation ... func (vc StatusControl) IncrementAnnotation() error { - glog.Infof("setting deployment %s in ns %s annotation %s", deployName, deployNamespace, annCounter) + logger := vc.log + logger.Info(fmt.Sprintf("setting deployment %s in ns %s annotation %s", deployName, deployNamespace, annCounter)) var ann map[string]string var err error deploy, err := vc.client.GetResource("Deployment", deployNamespace, deployName) if err != nil { - glog.V(4).Infof("failed to get deployment %s in namespace %s: %v", deployName, deployNamespace, err) + logger.Error(err, "failed to get deployment %s in namespace %s", deployName, deployNamespace) return err } ann = deploy.GetAnnotations() @@ -112,18 +116,18 @@ func (vc StatusControl) IncrementAnnotation() error { } counter, err := strconv.Atoi(ann[annCounter]) if err != nil { - glog.V(4).Infof("failed to parse string: %v", err) + logger.Error(err, "Failed to parse string") return err } // increment counter counter++ ann[annCounter] = strconv.Itoa(counter) - glog.Infof("incrementing annotation %s counter to %d", annCounter, counter) + logger.Info("incrementing annotation", "old", annCounter, "new", counter) deploy.SetAnnotations(ann) // update counter _, err = vc.client.UpdateResource("Deployment", deployNamespace, deploy, false) if err != nil { - glog.V(4).Infof("failed to update annotation %s for deployment %s in namespace %s: %v", annCounter, deployName, deployNamespace, err) + logger.Error(err, fmt.Sprintf("failed to update annotation %s for deployment %s in namespace %s", annCounter, deployName, deployNamespace)) return err } return nil diff --git a/pkg/client/clientset/versioned/clientset.go b/pkg/client/clientset/versioned/clientset.go index 5fbce444ff..8aa7b0e529 100644 --- a/pkg/client/clientset/versioned/clientset.go +++ b/pkg/client/clientset/versioned/clientset.go @@ -19,6 +19,8 @@ limitations under the License. package versioned import ( + "fmt" + kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1" discovery "k8s.io/client-go/discovery" rest "k8s.io/client-go/rest" @@ -51,9 +53,14 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface { } // NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. func NewForConfig(c *rest.Config) (*Clientset, error) { configShallowCopy := *c if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("Burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) } var cs Clientset diff --git a/pkg/client/clientset/versioned/fake/clientset_generated.go b/pkg/client/clientset/versioned/fake/clientset_generated.go index 215b1d997a..c3264dcb64 100644 --- a/pkg/client/clientset/versioned/fake/clientset_generated.go +++ b/pkg/client/clientset/versioned/fake/clientset_generated.go @@ -41,7 +41,7 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { } } - cs := &Clientset{} + cs := &Clientset{tracker: o} cs.discovery = &fakediscovery.FakeDiscovery{Fake: &cs.Fake} cs.AddReactor("*", "*", testing.ObjectReaction(o)) cs.AddWatchReactor("*", func(action testing.Action) (handled bool, ret watch.Interface, err error) { @@ -63,12 +63,17 @@ func NewSimpleClientset(objects ...runtime.Object) *Clientset { type Clientset struct { testing.Fake discovery *fakediscovery.FakeDiscovery + tracker testing.ObjectTracker } func (c *Clientset) Discovery() discovery.DiscoveryInterface { return c.discovery } +func (c *Clientset) Tracker() testing.ObjectTracker { + return c.tracker +} + var _ clientset.Interface = &Clientset{} // KyvernoV1 retrieves the KyvernoV1Client diff --git a/pkg/client/clientset/versioned/typed/kyverno/v1/kyverno_client.go b/pkg/client/clientset/versioned/typed/kyverno/v1/kyverno_client.go index 54d8944680..8c1b5a0ea8 100644 --- a/pkg/client/clientset/versioned/typed/kyverno/v1/kyverno_client.go +++ b/pkg/client/clientset/versioned/typed/kyverno/v1/kyverno_client.go @@ -21,7 +21,6 @@ package v1 import ( v1 "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme" - serializer "k8s.io/apimachinery/pkg/runtime/serializer" rest "k8s.io/client-go/rest" ) @@ -86,7 +85,7 @@ func setConfigDefaults(config *rest.Config) error { gv := v1.SchemeGroupVersion config.GroupVersion = &gv config.APIPath = "/apis" - config.NegotiatedSerializer = serializer.DirectCodecFactory{CodecFactory: scheme.Codecs} + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() if config.UserAgent == "" { config.UserAgent = rest.DefaultKubernetesUserAgent() diff --git a/pkg/config/config.go b/pkg/config/config.go index 241728a2c7..bb9dfae909 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -2,8 +2,9 @@ package config import ( "flag" + "os" - "github.com/golang/glog" + "github.com/go-logr/logr" rest "k8s.io/client-go/rest" clientcmd "k8s.io/client-go/tools/clientcmd" ) @@ -74,29 +75,34 @@ var ( VerifyMutatingWebhookServicePath = "/verifymutate" ) -//LogDefaultFlags sets default glog flags -func LogDefaultFlags() { +//LogDefaultFlags sets default flags +func LogDefaultFlags(log logr.Logger) { + logger := log.WithName("LogDefaultFlags") var err error err = flag.Set("logtostderr", "true") if err != nil { - glog.Fatalf("failed to set flag 'logtostderr' to 'true':%v", err) + logger.Error(err, "failed to set flag", "flag", "logtostderr", "value", "true") + os.Exit(1) } err = flag.Set("stderrthreshold", "WARNING") if err != nil { - glog.Fatalf("failed to set flag 'stderrthreshold' to 'WARNING':%v", err) + logger.Error(err, "failed to set flag", "flag", "stderrthreshold", "value", "WARNING") + os.Exit(1) } flag.Set("v", "2") if err != nil { - glog.Fatalf("failed to set flag 'v' to '2':%v", err) + logger.Error(err, "failed to set flag", "flag", "v", "value", "2") + os.Exit(1) } } //CreateClientConfig creates client config -func CreateClientConfig(kubeconfig string) (*rest.Config, error) { +func CreateClientConfig(kubeconfig string, log logr.Logger) (*rest.Config, error) { + logger := log.WithName("CreateClientConfig") if kubeconfig == "" { - glog.Info("Using in-cluster configuration") + logger.Info("Using in-cluster configuration") return rest.InClusterConfig() } - glog.V(4).Infof("Using configuration from '%s'", kubeconfig) + logger.V(4).Info("Using specified kubeconfig", "kubeconfig", kubeconfig) return clientcmd.BuildConfigFromFlags("", kubeconfig) } diff --git a/pkg/config/dynamicconfig.go b/pkg/config/dynamicconfig.go index 6bf624e7bb..1787f67bd9 100644 --- a/pkg/config/dynamicconfig.go +++ b/pkg/config/dynamicconfig.go @@ -1,14 +1,13 @@ package config import ( - "fmt" "os" "reflect" "regexp" "strings" "sync" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/minio/minio/pkg/wildcard" v1 "k8s.io/api/core/v1" informers "k8s.io/client-go/informers/core/v1" @@ -31,6 +30,7 @@ type ConfigData struct { filters []k8Resource // hasynced cmSycned cache.InformerSynced + log logr.Logger } // ToFilter checks if the given resource is set to be filtered in the configuration @@ -51,20 +51,21 @@ type Interface interface { } // NewConfigData ... -func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8Resources string) *ConfigData { +func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapInformer, filterK8Resources string, log logr.Logger) *ConfigData { // environment var is read at start only if cmNameEnv == "" { - glog.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration") + log.Info("ConfigMap name not defined in env:INIT_CONFIG: loading no default configuration") } cd := ConfigData{ client: rclient, cmName: os.Getenv(cmNameEnv), cmSycned: cmInformer.Informer().HasSynced, + log: log, } //TODO: this has been added to backward support command line arguments // will be removed in future and the configuration will be set only via configmaps if filterK8Resources != "" { - glog.Info("Init configuration from commandline arguments") + cd.log.Info("init configuration from commandline arguments") cd.initFilters(filterK8Resources) } @@ -78,9 +79,10 @@ func NewConfigData(rclient kubernetes.Interface, cmInformer informers.ConfigMapI //Run checks syncing func (cd *ConfigData) Run(stopCh <-chan struct{}) { + logger := cd.log // wait for cache to populate first time if !cache.WaitForCacheSync(stopCh, cd.cmSycned) { - glog.Error("configuration: failed to sync informer cache") + logger.Info("configuration: failed to sync informer cache") } } @@ -103,16 +105,17 @@ func (cd *ConfigData) updateCM(old, cur interface{}) { } func (cd *ConfigData) deleteCM(obj interface{}) { + logger := cd.log cm, ok := obj.(*v1.ConfigMap) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + logger.Info("failed to get object from tombstone") return } _, ok = tombstone.Obj.(*v1.ConfigMap) if !ok { - glog.Info(fmt.Errorf("Tombstone contained object that is not a ConfigMap %#v", obj)) + logger.Info("Tombstone contained object that is not a ConfigMap", "object", obj) return } } @@ -125,19 +128,20 @@ func (cd *ConfigData) deleteCM(obj interface{}) { } func (cd *ConfigData) load(cm v1.ConfigMap) { + logger := cd.log.WithValues("name", cm.Name, "namespace", cm.Namespace) if cm.Data == nil { - glog.V(4).Infof("Configuration: No data defined in ConfigMap %s", cm.Name) + logger.V(4).Info("configuration: No data defined in ConfigMap") return } // get resource filters filters, ok := cm.Data["resourceFilters"] if !ok { - glog.V(4).Infof("Configuration: No resourceFilters defined in ConfigMap %s", cm.Name) + logger.V(4).Info("configuration: No resourceFilters defined in ConfigMap") return } // filters is a string if filters == "" { - glog.V(4).Infof("Configuration: resourceFilters is empty in ConfigMap %s", cm.Name) + logger.V(4).Info("configuration: resourceFilters is empty in ConfigMap") return } // parse and load the configuration @@ -146,11 +150,10 @@ func (cd *ConfigData) load(cm v1.ConfigMap) { newFilters := parseKinds(filters) if reflect.DeepEqual(newFilters, cd.filters) { - glog.V(4).Infof("Configuration: resourceFilters did not change in ConfigMap %s", cm.Name) + logger.V(4).Info("resourceFilters did not change") return } - glog.V(4).Infof("Configuration: Old resource filters %v", cd.filters) - glog.Infof("Configuration: New resource filters to %v", newFilters) + logger.V(4).Info(" Updated resource filters", "oldFilters", cd.filters, "newFilters", newFilters) // update filters cd.filters = newFilters } @@ -158,20 +161,20 @@ func (cd *ConfigData) load(cm v1.ConfigMap) { //TODO: this has been added to backward support command line arguments // will be removed in future and the configuration will be set only via configmaps func (cd *ConfigData) initFilters(filters string) { + logger := cd.log // parse and load the configuration cd.mux.Lock() defer cd.mux.Unlock() newFilters := parseKinds(filters) - glog.Infof("Configuration: Init resource filters to %v", newFilters) + logger.Info("Init resource filters", "filters", newFilters) // update filters cd.filters = newFilters } func (cd *ConfigData) unload(cm v1.ConfigMap) { - // TODO pick one msg - glog.Infof("Configuration: ConfigMap %s deleted, removing configuration filters", cm.Name) - glog.Infof("Configuration: Removing all resource filters as ConfigMap %s deleted", cm.Name) + logger := cd.log + logger.Info("ConfigMap deleted, removing configuration filters", "name", cm.Name, "namespace", cm.Namespace) cd.mux.Lock() defer cd.mux.Unlock() cd.filters = []k8Resource{} diff --git a/pkg/dclient/certificates.go b/pkg/dclient/certificates.go index 76b3cda819..125dc7cec3 100644 --- a/pkg/dclient/certificates.go +++ b/pkg/dclient/certificates.go @@ -6,7 +6,6 @@ import ( "net/url" "time" - "github.com/golang/glog" "github.com/nirmata/kyverno/pkg/config" tls "github.com/nirmata/kyverno/pkg/tls" certificates "k8s.io/api/certificates/v1beta1" @@ -19,13 +18,14 @@ import ( // Created pair is stored in cluster's secret. // Returns struct with key/certificate pair. func (c *Client) InitTLSPemPair(configuration *rest.Config, fqdncn bool) (*tls.TlsPemPair, error) { + logger := c.log certProps, err := c.GetTLSCertProps(configuration) if err != nil { return nil, err } tlsPair := c.ReadTlsPair(certProps) if tls.IsTLSPairShouldBeUpdated(tlsPair) { - glog.Info("Generating new key/certificate pair for TLS") + logger.Info("Generating new key/certificate pair for TLS") tlsPair, err = c.generateTLSPemPair(certProps, fqdncn) if err != nil { return nil, err @@ -35,8 +35,7 @@ func (c *Client) InitTLSPemPair(configuration *rest.Config, fqdncn bool) (*tls.T } return tlsPair, nil } - - glog.Infoln("Using existing TLS key/certificate pair") + logger.Info("Using existing TLS key/certificate pair") return tlsPair, nil } @@ -71,6 +70,7 @@ func (c *Client) generateTLSPemPair(props tls.TlsCertificateProps, fqdncn bool) // Submits and approves certificate request, returns request which need to be fetched func (c *Client) submitAndApproveCertificateRequest(req *certificates.CertificateSigningRequest) (*certificates.CertificateSigningRequest, error) { + logger := c.log.WithName("submitAndApproveCertificateRequest") certClient, err := c.GetCSRInterface() if err != nil { return nil, err @@ -86,7 +86,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat if err != nil { return nil, fmt.Errorf("Unable to delete existing certificate request: %v", err) } - glog.Info("Old certificate request is deleted") + logger.Info("Old certificate request is deleted") break } } @@ -95,7 +95,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat if err != nil { return nil, err } - glog.Infof("Certificate request %s is created", unstrRes.GetName()) + logger.Info("Certificate request created", "name", unstrRes.GetName()) res, err := convertToCSR(unstrRes) if err != nil { @@ -110,7 +110,7 @@ func (c *Client) submitAndApproveCertificateRequest(req *certificates.Certificat if err != nil { return nil, fmt.Errorf("Unable to approve certificate request: %v", err) } - glog.Infof("Certificate request %s is approved", res.ObjectMeta.Name) + logger.Info("Certificate request is approved", "name", res.ObjectMeta.Name) return res, nil } @@ -144,9 +144,10 @@ func (c *Client) fetchCertificateFromRequest(req *certificates.CertificateSignin //ReadRootCASecret returns the RootCA from the pre-defined secret func (c *Client) ReadRootCASecret() (result []byte) { + logger := c.log.WithName("ReadRootCASecret") certProps, err := c.GetTLSCertProps(c.clientConfig) if err != nil { - glog.Error(err) + logger.Error(err, "failed to get TLS Cert Properties") return result } sname := generateRootCASecretName(certProps) @@ -156,16 +157,16 @@ func (c *Client) ReadRootCASecret() (result []byte) { } tlsca, err := convertToSecret(stlsca) if err != nil { - glog.Error(err) + logger.Error(err, "failed to convert secret", "name", sname, "namespace", certProps.Namespace) return result } result = tlsca.Data[rootCAKey] if len(result) == 0 { - glog.Warningf("root CA certificate not found in secret %s/%s", certProps.Namespace, tlsca.Name) + logger.Info("root CA certificate not found in secret", "name", tlsca.Name, "namespace", certProps.Namespace) return result } - glog.V(4).Infof("using CA bundle defined in secret %s/%s to validate the webhook's server certificate", certProps.Namespace, tlsca.Name) + logger.V(4).Info("using CA bundle defined in secret to validate the webhook's server certificate", "name", tlsca.Name, "namespace", certProps.Namespace) return result } @@ -174,10 +175,11 @@ const rootCAKey string = "rootCA.crt" //ReadTlsPair Reads the pair of TLS certificate and key from the specified secret. func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair { + logger := c.log.WithName("ReadTlsPair") sname := generateTLSPairSecretName(props) unstrSecret, err := c.GetResource(Secrets, props.Namespace, sname) if err != nil { - glog.Warningf("Unable to get secret %s/%s: %s", props.Namespace, sname, err) + logger.Error(err, "Failed to get secret", "name", sname, "namespace", props.Namespace) return nil } @@ -188,7 +190,7 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair { sname := generateRootCASecretName(props) _, err := c.GetResource(Secrets, props.Namespace, sname) if err != nil { - glog.Errorf("Root CA secret %s/%s is required while using self-signed certificates TLS pair, defaulting to generating new TLS pair", props.Namespace, sname) + logger.Error(err, "Root CA secret is required while using self-signed certificates TLS pair, defaulting to generating new TLS pair", "name", sname, "namespace", props.Namespace) return nil } } @@ -201,11 +203,11 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair { PrivateKey: secret.Data[v1.TLSPrivateKeyKey], } if len(pemPair.Certificate) == 0 { - glog.Warningf("TLS Certificate not found in secret %s/%s", props.Namespace, sname) + logger.Info("TLS Certificate not found in secret", "name", sname, "namespace", props.Namespace) return nil } if len(pemPair.PrivateKey) == 0 { - glog.Warningf("TLS PrivateKey not found in secret %s/%s", props.Namespace, sname) + logger.Info("TLS PrivateKey not found in secret", "name", sname, "namespace", props.Namespace) return nil } return &pemPair @@ -214,6 +216,7 @@ func (c *Client) ReadTlsPair(props tls.TlsCertificateProps) *tls.TlsPemPair { //WriteTlsPair Writes the pair of TLS certificate and key to the specified secret. // Updates existing secret or creates new one. func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPemPair) error { + logger := c.log.WithName("WriteTlsPair") name := generateTLSPairSecretName(props) _, err := c.GetResource(Secrets, props.Namespace, name) if err != nil { @@ -235,7 +238,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem _, err := c.CreateResource(Secrets, props.Namespace, secret, false) if err == nil { - glog.Infof("Secret %s is created", name) + logger.Info("secret created", "name", name, "namespace", props.Namespace) } return err } @@ -251,7 +254,7 @@ func (c *Client) WriteTlsPair(props tls.TlsCertificateProps, pemPair *tls.TlsPem if err != nil { return err } - glog.Infof("Secret %s is updated", name) + logger.Info("secret updated", "name", name, "namespace", props.Namespace) return nil } diff --git a/pkg/dclient/client.go b/pkg/dclient/client.go index 9bb212369e..9fbfe0a0b0 100644 --- a/pkg/dclient/client.go +++ b/pkg/dclient/client.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/config" apps "k8s.io/api/apps/v1" certificates "k8s.io/api/certificates/v1beta1" @@ -30,13 +30,15 @@ import ( //Client enables interaction with k8 resource type Client struct { client dynamic.Interface + log logr.Logger clientConfig *rest.Config kclient kubernetes.Interface DiscoveryClient IDiscovery } //NewClient creates new instance of client -func NewClient(config *rest.Config, resync time.Duration, stopCh <-chan struct{}) (*Client, error) { +func NewClient(config *rest.Config, resync time.Duration, stopCh <-chan struct{}, log logr.Logger) (*Client, error) { + dclient, err := dynamic.NewForConfig(config) if err != nil { return nil, err @@ -49,9 +51,10 @@ func NewClient(config *rest.Config, resync time.Duration, stopCh <-chan struct{} client: dclient, clientConfig: config, kclient: kclient, + log: log.WithName("Client"), } // Set discovery client - discoveryClient := ServerPreferredResources{memory.NewMemCacheClient(kclient.Discovery())} + discoveryClient := ServerPreferredResources{cachedClient: memory.NewMemCacheClient(kclient.Discovery()), log: client.log} // client will invalidate registered resources cache every x seconds, // As there is no way to identify if the registered resource is available or not // we will be invalidating the local cache, so the next request get a fresh cache @@ -187,7 +190,6 @@ func (c *Client) UpdateStatusResource(kind string, namespace string, obj interfa func convertToUnstructured(obj interface{}) *unstructured.Unstructured { unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(&obj) if err != nil { - glog.Errorf("Unable to convert : %v", err) return nil } return &unstructured.Unstructured{Object: unstructuredObj} @@ -225,22 +227,24 @@ func (c *Client) SetDiscovery(discoveryClient IDiscovery) { //ServerPreferredResources stores the cachedClient instance for discovery client type ServerPreferredResources struct { cachedClient discovery.CachedDiscoveryInterface + log logr.Logger } //Poll will keep invalidate the local cache func (c ServerPreferredResources) Poll(resync time.Duration, stopCh <-chan struct{}) { + logger := c.log.WithName("Poll") // start a ticker ticker := time.NewTicker(resync) defer func() { ticker.Stop() }() - glog.Infof("Starting registered resources sync: every %d seconds", resync) + logger.Info("starting registered resources sync", "period", resync) for { select { case <-stopCh: - glog.Info("Stopping registered resources sync") + logger.Info("stopping registered resources sync") return case <-ticker.C: // set cache as stale - glog.V(6).Info("invalidating local client cache for registered resources") + logger.V(6).Info("invalidating local client cache for registered resources") c.cachedClient.Invalidate() } } @@ -254,12 +258,12 @@ func (c ServerPreferredResources) Poll(resync time.Duration, stopCh <-chan struc func (c ServerPreferredResources) GetGVRFromKind(kind string) schema.GroupVersionResource { var gvr schema.GroupVersionResource var err error - gvr, err = loadServerResources(kind, c.cachedClient) + gvr, err = loadServerResources(kind, c.cachedClient, c.log) if err != nil && !c.cachedClient.Fresh() { // invalidate cahce & re-try once more c.cachedClient.Invalidate() - gvr, err = loadServerResources(kind, c.cachedClient) + gvr, err = loadServerResources(kind, c.cachedClient, c.log) if err == nil { return gvr } @@ -272,11 +276,12 @@ func (c ServerPreferredResources) GetServerVersion() (*version.Info, error) { return c.cachedClient.ServerVersion() } -func loadServerResources(k string, cdi discovery.CachedDiscoveryInterface) (schema.GroupVersionResource, error) { - serverresources, err := cdi.ServerPreferredResources() +func loadServerResources(k string, cdi discovery.CachedDiscoveryInterface, log logr.Logger) (schema.GroupVersionResource, error) { + logger := log.WithName("loadServerResources") emptyGVR := schema.GroupVersionResource{} + serverresources, err := cdi.ServerPreferredResources() if err != nil { - glog.Error(err) + logger.Error(err, "failed to get registered preferred resources") return emptyGVR, err } for _, serverresource := range serverresources { @@ -286,7 +291,7 @@ func loadServerResources(k string, cdi discovery.CachedDiscoveryInterface) (sche if resource.Kind == k && !strings.Contains(resource.Name, "/") { gv, err := schema.ParseGroupVersion(serverresource.GroupVersion) if err != nil { - glog.Error(err) + logger.Error(err, "failed to parse groupVersion from schema", "groupVersion", serverresource.GroupVersion) return emptyGVR, err } return gv.WithResource(resource.Name), nil diff --git a/pkg/engine/anchor/anchor.go b/pkg/engine/anchor/anchor.go index 2d81a068ec..a8e7a1184a 100644 --- a/pkg/engine/anchor/anchor.go +++ b/pkg/engine/anchor/anchor.go @@ -4,7 +4,8 @@ import ( "fmt" "strconv" - "github.com/golang/glog" + "github.com/go-logr/logr" + "github.com/nirmata/kyverno/pkg/log" ) //ValidationHandler for element processes @@ -12,7 +13,7 @@ type ValidationHandler interface { Handle(handler resourceElementHandler, resourceMap map[string]interface{}, originPattern interface{}) (string, error) } -type resourceElementHandler = func(resourceElement, patternElement, originPattern interface{}, path string) (string, error) +type resourceElementHandler = func(log logr.Logger, resourceElement, patternElement, originPattern interface{}, path string) (string, error) //CreateElementHandler factory to process elements func CreateElementHandler(element string, pattern interface{}, path string) ValidationHandler { @@ -82,7 +83,7 @@ func (eh EqualityHandler) Handle(handler resourceElementHandler, resourceMap map // check if anchor is present in resource if value, ok := resourceMap[anchorKey]; ok { // validate the values of the pattern - returnPath, err := handler(value, eh.pattern, originPattern, currentPath) + returnPath, err := handler(log.Log, value, eh.pattern, originPattern, currentPath) if err != nil { return returnPath, err } @@ -115,7 +116,7 @@ func (dh DefaultHandler) Handle(handler resourceElementHandler, resourceMap map[ } else if dh.pattern == "*" && resourceMap[dh.element] == nil { return dh.path, fmt.Errorf("Validation rule failed at %s, Field %s is not present", dh.path, dh.element) } else { - path, err := handler(resourceMap[dh.element], dh.pattern, originPattern, currentPath) + path, err := handler(log.Log, resourceMap[dh.element], dh.pattern, originPattern, currentPath) if err != nil { return path, err } @@ -146,7 +147,7 @@ func (ch ConditionAnchorHandler) Handle(handler resourceElementHandler, resource // check if anchor is present in resource if value, ok := resourceMap[anchorKey]; ok { // validate the values of the pattern - returnPath, err := handler(value, ch.pattern, originPattern, currentPath) + returnPath, err := handler(log.Log, value, ch.pattern, originPattern, currentPath) if err != nil { return returnPath, err } @@ -194,7 +195,6 @@ func (eh ExistenceHandler) Handle(handler resourceElementHandler, resourceMap ma } return validateExistenceListResource(handler, typedResource, typedPatternMap, originPattern, currentPath) default: - glog.Error("Invalid type: Existence ^ () anchor can be used only on list/array type resource") return currentPath, fmt.Errorf("Invalid resource type %T: Existence ^ () anchor can be used only on list/array type resource", value) } } @@ -206,10 +206,9 @@ func validateExistenceListResource(handler resourceElementHandler, resourceList // if non satisfy then throw an error for i, resourceElement := range resourceList { currentPath := path + strconv.Itoa(i) + "/" - _, err := handler(resourceElement, patternMap, originPattern, currentPath) + _, err := handler(log.Log, resourceElement, patternMap, originPattern, currentPath) if err == nil { // condition is satisfied, dont check further - glog.V(4).Infof("Existence check satisfied at path %s, for pattern %v", currentPath, patternMap) return "", nil } } diff --git a/pkg/engine/context/context.go b/pkg/engine/context/context.go index 05805a0a91..21e1fed7fc 100644 --- a/pkg/engine/context/context.go +++ b/pkg/engine/context/context.go @@ -6,8 +6,9 @@ import ( "sync" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" + "github.com/nirmata/kyverno/pkg/log" ) //Interface to manage context operations @@ -33,6 +34,7 @@ type Context struct { mu sync.RWMutex jsonRaw []byte whiteListVars []string + log logr.Logger } //NewContext returns a new context @@ -42,6 +44,7 @@ func NewContext(whiteListVars ...string) *Context { // data: map[string]interface{}{}, jsonRaw: []byte(`{}`), // empty json struct whiteListVars: whiteListVars, + log: log.Log.WithName("context"), } return &ctx } @@ -54,7 +57,7 @@ func (ctx *Context) AddJSON(dataRaw []byte) error { // merge json ctx.jsonRaw, err = jsonpatch.MergePatch(ctx.jsonRaw, dataRaw) if err != nil { - glog.V(4).Infof("failed to merge JSON data: %v", err) + ctx.log.Error(err, "failed to merge JSON data") return err } return nil @@ -66,7 +69,7 @@ func (ctx *Context) AddResource(dataRaw []byte) error { // unmarshall the resource struct var data interface{} if err := json.Unmarshal(dataRaw, &data); err != nil { - glog.V(4).Infof("failed to unmarshall the context data: %v", err) + ctx.log.Error(err, "failed to unmarshall the resource") return err } @@ -82,7 +85,7 @@ func (ctx *Context) AddResource(dataRaw []byte) error { objRaw, err := json.Marshal(modifiedResource) if err != nil { - glog.V(4).Infof("failed to marshall the updated context data") + ctx.log.Error(err, "failed to marshal the resource") return err } return ctx.AddJSON(objRaw) @@ -98,7 +101,7 @@ func (ctx *Context) AddUserInfo(userRequestInfo kyverno.RequestInfo) error { objRaw, err := json.Marshal(modifiedResource) if err != nil { - glog.V(4).Infof("failed to marshall the updated context data") + ctx.log.Error(err, "failed to marshal the UserInfo") return err } return ctx.AddJSON(objRaw) @@ -118,8 +121,6 @@ func (ctx *Context) AddSA(userName string) error { // filter namespace groups := strings.Split(sa, ":") if len(groups) >= 2 { - glog.V(4).Infof("serviceAccount namespace: %s", groups[0]) - glog.V(4).Infof("serviceAccount name: %s", groups[1]) saName = groups[1] saNamespace = groups[0] } @@ -131,7 +132,7 @@ func (ctx *Context) AddSA(userName string) error { } saNameRaw, err := json.Marshal(saNameObj) if err != nil { - glog.V(4).Infof("failed to marshall the updated context data") + ctx.log.Error(err, "failed to marshal the SA") return err } if err := ctx.AddJSON(saNameRaw); err != nil { @@ -145,7 +146,7 @@ func (ctx *Context) AddSA(userName string) error { } saNsRaw, err := json.Marshal(saNsObj) if err != nil { - glog.V(4).Infof("failed to marshall the updated context data") + ctx.log.Error(err, "failed to marshal the SA namespace") return err } if err := ctx.AddJSON(saNsRaw); err != nil { diff --git a/pkg/engine/context/evaluate.go b/pkg/engine/context/evaluate.go index 78f9643497..f15a8a1e17 100644 --- a/pkg/engine/context/evaluate.go +++ b/pkg/engine/context/evaluate.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" - "github.com/golang/glog" jmespath "github.com/jmespath/go-jmespath" ) @@ -19,7 +18,7 @@ func (ctx *Context) Query(query string) (interface{}, error) { // compile the query queryPath, err := jmespath.Compile(query) if err != nil { - glog.V(4).Infof("incorrect query %s: %v", query, err) + ctx.log.Error(err, "incorrect query", "query", query) return emptyResult, fmt.Errorf("incorrect query %s: %v", query, err) } // search @@ -28,13 +27,13 @@ func (ctx *Context) Query(query string) (interface{}, error) { var data interface{} if err := json.Unmarshal(ctx.jsonRaw, &data); err != nil { - glog.V(4).Infof("failed to unmarshall context: %v", err) + ctx.log.Error(err, "failed to unmarshal context") return emptyResult, fmt.Errorf("failed to unmarshall context: %v", err) } result, err := queryPath.Search(data) if err != nil { - glog.V(4).Infof("failed to search query %s: %v", query, err) + ctx.log.Error(err, "failed to search query", "query", query) return emptyResult, fmt.Errorf("failed to search query %s: %v", query, err) } return result, nil diff --git a/pkg/engine/generation.go b/pkg/engine/generation.go index 25c7204851..94cb3d95df 100644 --- a/pkg/engine/generation.go +++ b/pkg/engine/generation.go @@ -1,11 +1,12 @@ package engine import ( - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/context" "github.com/nirmata/kyverno/pkg/engine/response" "github.com/nirmata/kyverno/pkg/engine/variables" + "github.com/nirmata/kyverno/pkg/log" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -18,23 +19,23 @@ func Generate(policyContext PolicyContext) (resp response.EngineResponse) { resource := policyContext.NewResource admissionInfo := policyContext.AdmissionInfo ctx := policyContext.Context - return filterRules(policy, resource, admissionInfo, ctx) + logger := log.Log.WithName("Generate").WithValues("policy", policy.Name, "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName()) + return filterRules(policy, resource, admissionInfo, ctx, logger) } -func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface) *response.RuleResponse { +func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger) *response.RuleResponse { if !rule.HasGenerate() { return nil } if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil { - glog.V(4).Infof(err.Error()) return nil } // operate on the copy of the conditions, as we perform variable substitution copyConditions := copyConditions(rule.Conditions) // evaluate pre-conditions - if !variables.EvaluateConditions(ctx, copyConditions) { - glog.V(4).Infof("resource %s/%s does not satisfy the conditions for the rule ", resource.GetNamespace(), resource.GetName()) + if !variables.EvaluateConditions(log, ctx, copyConditions) { + log.V(4).Info("preconditions not satisfied, skipping rule", "rule", rule.Name) return nil } // build rule Response @@ -44,7 +45,7 @@ func filterRule(rule kyverno.Rule, resource unstructured.Unstructured, admission } } -func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface) response.EngineResponse { +func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo, ctx context.EvalInterface, log logr.Logger) response.EngineResponse { resp := response.EngineResponse{ PolicyResponse: response.PolicyResponse{ Policy: policy.Name, @@ -57,7 +58,7 @@ func filterRules(policy kyverno.ClusterPolicy, resource unstructured.Unstructure } for _, rule := range policy.Spec.Rules { - if ruleResp := filterRule(rule, resource, admissionInfo, ctx); ruleResp != nil { + if ruleResp := filterRule(rule, resource, admissionInfo, ctx, log); ruleResp != nil { resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, *ruleResp) } } diff --git a/pkg/engine/mutate/overlayCondition.go b/pkg/engine/mutate/overlayCondition.go index 134cc23cc0..62232d99da 100755 --- a/pkg/engine/mutate/overlayCondition.go +++ b/pkg/engine/mutate/overlayCondition.go @@ -8,6 +8,7 @@ import ( "github.com/golang/glog" "github.com/nirmata/kyverno/pkg/engine/anchor" "github.com/nirmata/kyverno/pkg/engine/validate" + "github.com/nirmata/kyverno/pkg/log" ) func meetConditions(resource, overlay interface{}) (string, overlayError) { @@ -139,7 +140,7 @@ func compareOverlay(resource, overlay interface{}, path string) (string, overlay } } case string, float64, int, int64, bool, nil: - if !validate.ValidateValueWithPattern(resource, overlay) { + if !validate.ValidateValueWithPattern(log.Log, resource, overlay) { glog.V(4).Infof("Mutate rule: failed validating value %v with overlay %v", resource, overlay) return path, newOverlayError(conditionFailure, fmt.Sprintf("Failed validating value %v with overlay %v", resource, overlay)) } diff --git a/pkg/engine/mutation.go b/pkg/engine/mutation.go index d1fab46211..52a356dcb1 100644 --- a/pkg/engine/mutation.go +++ b/pkg/engine/mutation.go @@ -5,11 +5,12 @@ import ( "strings" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/mutate" "github.com/nirmata/kyverno/pkg/engine/response" "github.com/nirmata/kyverno/pkg/engine/variables" + "github.com/nirmata/kyverno/pkg/log" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -28,26 +29,25 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) { policy := policyContext.Policy resource := policyContext.NewResource ctx := policyContext.Context - + logger := log.Log.WithName("Mutate").WithValues("policy", policy.Name, "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName()) + logger.V(4).Info("start processing", "startTime", startTime) startMutateResultResponse(&resp, policy, resource) - glog.V(4).Infof("started applying mutation rules of policy %q (%v)", policy.Name, startTime) - defer endMutateResultResponse(&resp, startTime) + defer endMutateResultResponse(logger, &resp, startTime) patchedResource := policyContext.NewResource for _, rule := range policy.Spec.Rules { var ruleResponse response.RuleResponse + logger := logger.WithValues("rule", rule.Name) //TODO: to be checked before calling the resources as well if !rule.HasMutate() && !strings.Contains(PodControllers, resource.GetKind()) { continue } - startTime := time.Now() - glog.V(4).Infof("Time: Mutate matchAdmissionInfo %v", time.Since(startTime)) // check if the resource satisfies the filter conditions defined in the rule //TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that // dont statisfy a policy rule resource description if err := MatchesResourceDescription(resource, rule, policyContext.AdmissionInfo); err != nil { - glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule:\n%s", resource.GetNamespace(), resource.GetName(), err.Error()) + logger.V(4).Info("resource fails the match description") continue } @@ -55,8 +55,8 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) { copyConditions := copyConditions(rule.Conditions) // evaluate pre-conditions // - handle variable subsitutions - if !variables.EvaluateConditions(ctx, copyConditions) { - glog.V(4).Infof("resource %s/%s does not satisfy the conditions for the rule ", resource.GetNamespace(), resource.GetName()) + if !variables.EvaluateConditions(logger, ctx, copyConditions) { + logger.V(4).Info("resource fails the preconditions") continue } @@ -66,7 +66,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) { overlay := mutation.Overlay // subsiitue the variables var err error - if overlay, err = variables.SubstituteVars(ctx, overlay); err != nil { + if overlay, err = variables.SubstituteVars(logger, ctx, overlay); err != nil { // variable subsitution failed ruleResponse.Success = false ruleResponse.Message = err.Error() @@ -78,11 +78,9 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) { if ruleResponse.Success { // - overlay pattern does not match the resource conditions if ruleResponse.Patches == nil { - glog.V(4).Infof(ruleResponse.Message) continue } - - glog.V(4).Infof("Mutate overlay in rule '%s' successfully applied on %s/%s/%s", rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName()) + logger.V(4).Info("overlay applied succesfully") } resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse) @@ -93,7 +91,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) { if rule.Mutation.Patches != nil { var ruleResponse response.RuleResponse ruleResponse, patchedResource = mutate.ProcessPatches(rule, patchedResource) - glog.Infof("Mutate patches in rule '%s' successfully applied on %s/%s/%s", rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName()) + logger.V(4).Info("patches applied successfully") resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse) incrementAppliedRuleCount(&resp) } @@ -108,12 +106,12 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) { var ruleResponse response.RuleResponse ruleResponse, patchedResource = mutate.ProcessOverlay(rule.Name, podTemplateRule, patchedResource) if !ruleResponse.Success { - glog.Errorf("Failed to insert annotation to podTemplate of %s/%s/%s: %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), ruleResponse.Message) + logger.Info("failed to insert annotation for podTemplate", "error", ruleResponse.Message) continue } if ruleResponse.Success && ruleResponse.Patches != nil { - glog.V(2).Infof("Inserted annotation to podTemplate of %s/%s/%s: %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), ruleResponse.Message) + logger.V(2).Info("inserted annotation for podTemplate") resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse) } } @@ -137,10 +135,9 @@ func startMutateResultResponse(resp *response.EngineResponse, policy kyverno.Clu // TODO(shuting): set response with mutationFailureAction } -func endMutateResultResponse(resp *response.EngineResponse, startTime time.Time) { +func endMutateResultResponse(logger logr.Logger, resp *response.EngineResponse, startTime time.Time) { resp.PolicyResponse.ProcessingTime = time.Since(startTime) - glog.V(4).Infof("finished applying mutation rules policy %v (%v)", resp.PolicyResponse.Policy, resp.PolicyResponse.ProcessingTime) - glog.V(4).Infof("Mutation Rules appplied count %v for policy %q", resp.PolicyResponse.RulesAppliedCount, resp.PolicyResponse.Policy) + logger.V(4).Info("finshed processing", "processingTime", resp.PolicyResponse.ProcessingTime, "mutationRulesApplied", resp.PolicyResponse.RulesAppliedCount) } // podTemplateRule mutate pod template with annotation diff --git a/pkg/engine/policyContext.go b/pkg/engine/policyContext.go index 20dd04c52c..3e1f7d22aa 100644 --- a/pkg/engine/policyContext.go +++ b/pkg/engine/policyContext.go @@ -1,6 +1,7 @@ package engine import ( + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" client "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/engine/context" @@ -20,4 +21,6 @@ type PolicyContext struct { Client *client.Client // Contexts to store resources Context context.EvalInterface + // log + log logr.Logger } diff --git a/pkg/engine/utils.go b/pkg/engine/utils.go index c97b2eccb8..a6c9b39734 100644 --- a/pkg/engine/utils.go +++ b/pkg/engine/utils.go @@ -6,12 +6,11 @@ import ( "reflect" "time" + "github.com/nirmata/kyverno/pkg/log" "github.com/nirmata/kyverno/pkg/utils" authenticationv1 "k8s.io/api/authentication/v1" rbacv1 "k8s.io/api/rbac/v1" - "github.com/golang/glog" - "github.com/minio/minio/pkg/wildcard" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -53,7 +52,7 @@ func checkNameSpace(namespaces []string, resourceNameSpace string) bool { func checkSelector(labelSelector *metav1.LabelSelector, resourceLabels map[string]string) (bool, error) { selector, err := metav1.LabelSelectorAsSelector(labelSelector) if err != nil { - glog.Error(err) + log.Log.Error(err, "failed to build label selector") return false, err } diff --git a/pkg/engine/validate/pattern.go b/pkg/engine/validate/pattern.go index e7b37283bd..7779f9b6ca 100644 --- a/pkg/engine/validate/pattern.go +++ b/pkg/engine/validate/pattern.go @@ -1,12 +1,13 @@ package validate import ( + "fmt" "math" "regexp" "strconv" "strings" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/minio/minio/pkg/wildcard" "github.com/nirmata/kyverno/pkg/engine/operator" apiresource "k8s.io/apimachinery/pkg/api/resource" @@ -21,52 +22,52 @@ const ( ) // ValidateValueWithPattern validates value with operators and wildcards -func ValidateValueWithPattern(value, pattern interface{}) bool { +func ValidateValueWithPattern(log logr.Logger, value, pattern interface{}) bool { switch typedPattern := pattern.(type) { case bool: typedValue, ok := value.(bool) if !ok { - glog.V(4).Infof("Expected bool, found %T", value) + log.V(4).Info("Expected type bool", "type", fmt.Sprintf("%T", value), "value", value) return false } return typedPattern == typedValue case int: - return validateValueWithIntPattern(value, int64(typedPattern)) + return validateValueWithIntPattern(log, value, int64(typedPattern)) case int64: - return validateValueWithIntPattern(value, typedPattern) + return validateValueWithIntPattern(log, value, typedPattern) case float64: - return validateValueWithFloatPattern(value, typedPattern) + return validateValueWithFloatPattern(log, value, typedPattern) case string: - return validateValueWithStringPatterns(value, typedPattern) + return validateValueWithStringPatterns(log, value, typedPattern) case nil: - return validateValueWithNilPattern(value) + return validateValueWithNilPattern(log, value) case map[string]interface{}: // TODO: check if this is ever called? - return validateValueWithMapPattern(value, typedPattern) + return validateValueWithMapPattern(log, value, typedPattern) case []interface{}: // TODO: check if this is ever called? - glog.Warning("Arrays as patterns are not supported") + log.Info("arrays as patterns is not supported") return false default: - glog.Warningf("Unknown type as pattern: %v", typedPattern) + log.Info("Unkown type", "type", fmt.Sprintf("%T", typedPattern), "value", typedPattern) return false } } -func validateValueWithMapPattern(value interface{}, typedPattern map[string]interface{}) bool { +func validateValueWithMapPattern(log logr.Logger, value interface{}, typedPattern map[string]interface{}) bool { // verify the type of the resource value is map[string]interface, // we only check for existence of object, not the equality of content and value //TODO: check if adding _, ok := value.(map[string]interface{}) if !ok { - glog.Warningf("Expected map[string]interface{}, found %T\n", value) + log.Info("Expected type map[string]interface{}", "type", fmt.Sprintf("%T", value), "value", value) return false } return true } // Handler for int values during validation process -func validateValueWithIntPattern(value interface{}, pattern int64) bool { +func validateValueWithIntPattern(log logr.Logger, value interface{}, pattern int64) bool { switch typedValue := value.(type) { case int: return int64(typedValue) == pattern @@ -78,38 +79,38 @@ func validateValueWithIntPattern(value interface{}, pattern int64) bool { return int64(typedValue) == pattern } - glog.Warningf("Expected int, found float: %f\n", typedValue) + log.Info("Expected type int", "type", fmt.Sprintf("%T", typedValue), "value", typedValue) return false case string: // extract int64 from string int64Num, err := strconv.ParseInt(typedValue, 10, 64) if err != nil { - glog.Warningf("Failed to parse int64 from string: %v", err) + log.Error(err, "Failed to parse int64 from string") return false } return int64Num == pattern default: - glog.Warningf("Expected int, found: %T\n", value) + log.Info("Expected type int", "type", fmt.Sprintf("%T", value), "value", value) return false } } // Handler for float values during validation process -func validateValueWithFloatPattern(value interface{}, pattern float64) bool { +func validateValueWithFloatPattern(log logr.Logger, value interface{}, pattern float64) bool { switch typedValue := value.(type) { case int: // check that float has no fraction if pattern == math.Trunc(pattern) { return int(pattern) == value } - glog.Warningf("Expected float, found int: %d\n", typedValue) + log.Info("Expected type float", "type", fmt.Sprintf("%T", typedValue), "value", typedValue) return false case int64: // check that float has no fraction if pattern == math.Trunc(pattern) { return int64(pattern) == value } - glog.Warningf("Expected float, found int: %d\n", typedValue) + log.Info("Expected type float", "type", fmt.Sprintf("%T", typedValue), "value", typedValue) return false case float64: return typedValue == pattern @@ -117,18 +118,18 @@ func validateValueWithFloatPattern(value interface{}, pattern float64) bool { // extract float64 from string float64Num, err := strconv.ParseFloat(typedValue, 64) if err != nil { - glog.Warningf("Failed to parse float64 from string: %v", err) + log.Error(err, "Failed to parse float64 from string") return false } return float64Num == pattern default: - glog.Warningf("Expected float, found: %T\n", value) + log.Info("Expected type float", "type", fmt.Sprintf("%T", value), "value", value) return false } } // Handler for nil values during validation process -func validateValueWithNilPattern(value interface{}) bool { +func validateValueWithNilPattern(log logr.Logger, value interface{}) bool { switch typed := value.(type) { case float64: return typed == 0.0 @@ -143,20 +144,20 @@ func validateValueWithNilPattern(value interface{}) bool { case nil: return true case map[string]interface{}, []interface{}: - glog.Warningf("Maps and arrays could not be checked with nil pattern") + log.Info("Maps and arrays could not be checked with nil pattern") return false default: - glog.Warningf("Unknown type as value when checking for nil pattern: %T\n", value) + log.Info("Unknown type as value when checking for nil pattern", "type", fmt.Sprintf("%T", value), "value", value) return false } } // Handler for pattern values during validation process -func validateValueWithStringPatterns(value interface{}, pattern string) bool { +func validateValueWithStringPatterns(log logr.Logger, value interface{}, pattern string) bool { statements := strings.Split(pattern, "|") for _, statement := range statements { statement = strings.Trim(statement, " ") - if validateValueWithStringPattern(value, statement) { + if validateValueWithStringPattern(log, value, statement) { return true } } @@ -166,24 +167,24 @@ func validateValueWithStringPatterns(value interface{}, pattern string) bool { // Handler for single pattern value during validation process // Detects if pattern has a number -func validateValueWithStringPattern(value interface{}, pattern string) bool { +func validateValueWithStringPattern(log logr.Logger, value interface{}, pattern string) bool { operator := operator.GetOperatorFromStringPattern(pattern) pattern = pattern[len(operator):] number, str := getNumberAndStringPartsFromPattern(pattern) if "" == number { - return validateString(value, str, operator) + return validateString(log, value, str, operator) } - return validateNumberWithStr(value, pattern, operator) + return validateNumberWithStr(log, value, pattern, operator) } // Handler for string values -func validateString(value interface{}, pattern string, operatorVariable operator.Operator) bool { +func validateString(log logr.Logger, value interface{}, pattern string, operatorVariable operator.Operator) bool { if operator.NotEqual == operatorVariable || operator.Equal == operatorVariable { strValue, ok := value.(string) if !ok { - glog.Warningf("Expected string, found %T\n", value) + log.Info("Expected type string", "type", fmt.Sprintf("%T", value), "value", value) return false } @@ -195,17 +196,16 @@ func validateString(value interface{}, pattern string, operatorVariable operator return wildcardResult } - - glog.Warningf("Operators >, >=, <, <= are not applicable to strings") + log.Info("Operators >, >=, <, <= are not applicable to strings") return false } // validateNumberWithStr compares quantity if pattern type is quantity // or a wildcard match to pattern string -func validateNumberWithStr(value interface{}, pattern string, operator operator.Operator) bool { +func validateNumberWithStr(log logr.Logger, value interface{}, pattern string, operator operator.Operator) bool { typedValue, err := convertToString(value) if err != nil { - glog.Warning(err) + log.Error(err, "failed to convert to string") return false } @@ -214,7 +214,7 @@ func validateNumberWithStr(value interface{}, pattern string, operator operator. if err == nil { valueQuan, err := apiresource.ParseQuantity(typedValue) if err != nil { - glog.Warningf("Invalid quantity in resource %s, err: %v\n", typedValue, err) + log.Error(err, "invalid quantity in resource", "type", fmt.Sprintf("%T", typedValue), "value", typedValue) return false } @@ -223,7 +223,7 @@ func validateNumberWithStr(value interface{}, pattern string, operator operator. // 2. wildcard match if !wildcard.Match(pattern, typedValue) { - glog.Warningf("Value '%s' has not passed wildcard check: %s", typedValue, pattern) + log.Info("value failed wildcard check", "type", fmt.Sprintf("%T", typedValue), "value", typedValue, "check", pattern) return false } return true diff --git a/pkg/engine/validate/pattern_test.go b/pkg/engine/validate/pattern_test.go index 8ee4e34881..6783376aeb 100644 --- a/pkg/engine/validate/pattern_test.go +++ b/pkg/engine/validate/pattern_test.go @@ -5,14 +5,15 @@ import ( "testing" "github.com/nirmata/kyverno/pkg/engine/operator" + "github.com/nirmata/kyverno/pkg/log" "gotest.tools/assert" ) func TestValidateValueWithPattern_Bool(t *testing.T) { - assert.Assert(t, ValidateValueWithPattern(true, true)) - assert.Assert(t, !ValidateValueWithPattern(true, false)) - assert.Assert(t, !ValidateValueWithPattern(false, true)) - assert.Assert(t, ValidateValueWithPattern(false, false)) + assert.Assert(t, ValidateValueWithPattern(log.Log, true, true)) + assert.Assert(t, !ValidateValueWithPattern(log.Log, true, false)) + assert.Assert(t, !ValidateValueWithPattern(log.Log, false, true)) + assert.Assert(t, ValidateValueWithPattern(log.Log, false, false)) } func TestValidateString_AsteriskTest(t *testing.T) { @@ -20,8 +21,8 @@ func TestValidateString_AsteriskTest(t *testing.T) { value := "anything" empty := "" - assert.Assert(t, validateString(value, pattern, operator.Equal)) - assert.Assert(t, validateString(empty, pattern, operator.Equal)) + assert.Assert(t, validateString(log.Log, value, pattern, operator.Equal)) + assert.Assert(t, validateString(log.Log, empty, pattern, operator.Equal)) } func TestValidateString_LeftAsteriskTest(t *testing.T) { @@ -29,32 +30,32 @@ func TestValidateString_LeftAsteriskTest(t *testing.T) { value := "leftright" right := "right" - assert.Assert(t, validateString(value, pattern, operator.Equal)) - assert.Assert(t, validateString(right, pattern, operator.Equal)) + assert.Assert(t, validateString(log.Log, value, pattern, operator.Equal)) + assert.Assert(t, validateString(log.Log, right, pattern, operator.Equal)) value = "leftmiddle" middle := "middle" - assert.Assert(t, !validateString(value, pattern, operator.Equal)) - assert.Assert(t, !validateString(middle, pattern, operator.Equal)) + assert.Assert(t, !validateString(log.Log, value, pattern, operator.Equal)) + assert.Assert(t, !validateString(log.Log, middle, pattern, operator.Equal)) } func TestValidateString_MiddleAsteriskTest(t *testing.T) { pattern := "ab*ba" value := "abbeba" - assert.Assert(t, validateString(value, pattern, operator.Equal)) + assert.Assert(t, validateString(log.Log, value, pattern, operator.Equal)) value = "abbca" - assert.Assert(t, !validateString(value, pattern, operator.Equal)) + assert.Assert(t, !validateString(log.Log, value, pattern, operator.Equal)) } func TestValidateString_QuestionMark(t *testing.T) { pattern := "ab?ba" value := "abbba" - assert.Assert(t, validateString(value, pattern, operator.Equal)) + assert.Assert(t, validateString(log.Log, value, pattern, operator.Equal)) value = "abbbba" - assert.Assert(t, !validateString(value, pattern, operator.Equal)) + assert.Assert(t, !validateString(log.Log, value, pattern, operator.Equal)) } func TestValidateValueWithPattern_BoolInJson(t *testing.T) { @@ -76,7 +77,7 @@ func TestValidateValueWithPattern_BoolInJson(t *testing.T) { err = json.Unmarshal(rawValue, &value) assert.Assert(t, err) - assert.Assert(t, ValidateValueWithPattern(value["key"], pattern["key"])) + assert.Assert(t, ValidateValueWithPattern(log.Log, value["key"], pattern["key"])) } func TestValidateValueWithPattern_NullPatternStringValue(t *testing.T) { @@ -98,7 +99,7 @@ func TestValidateValueWithPattern_NullPatternStringValue(t *testing.T) { err = json.Unmarshal(rawValue, &value) assert.Assert(t, err) - assert.Assert(t, !ValidateValueWithPattern(value["key"], pattern["key"])) + assert.Assert(t, !ValidateValueWithPattern(log.Log, value["key"], pattern["key"])) } func TestValidateValueWithPattern_NullPatternDefaultString(t *testing.T) { @@ -120,7 +121,7 @@ func TestValidateValueWithPattern_NullPatternDefaultString(t *testing.T) { err = json.Unmarshal(rawValue, &value) assert.Assert(t, err) - assert.Assert(t, ValidateValueWithPattern(value["key"], pattern["key"])) + assert.Assert(t, ValidateValueWithPattern(log.Log, value["key"], pattern["key"])) } func TestValidateValueWithPattern_NullPatternDefaultFloat(t *testing.T) { @@ -142,7 +143,7 @@ func TestValidateValueWithPattern_NullPatternDefaultFloat(t *testing.T) { err = json.Unmarshal(rawValue, &value) assert.Assert(t, err) - assert.Assert(t, ValidateValueWithPattern(value["key"], pattern["key"])) + assert.Assert(t, ValidateValueWithPattern(log.Log, value["key"], pattern["key"])) } func TestValidateValueWithPattern_NullPatternDefaultInt(t *testing.T) { @@ -164,7 +165,7 @@ func TestValidateValueWithPattern_NullPatternDefaultInt(t *testing.T) { err = json.Unmarshal(rawValue, &value) assert.Assert(t, err) - assert.Assert(t, ValidateValueWithPattern(value["key"], pattern["key"])) + assert.Assert(t, ValidateValueWithPattern(log.Log, value["key"], pattern["key"])) } func TestValidateValueWithPattern_NullPatternDefaultBool(t *testing.T) { @@ -186,77 +187,77 @@ func TestValidateValueWithPattern_NullPatternDefaultBool(t *testing.T) { err = json.Unmarshal(rawValue, &value) assert.Assert(t, err) - assert.Assert(t, ValidateValueWithPattern(value["key"], pattern["key"])) + assert.Assert(t, ValidateValueWithPattern(log.Log, value["key"], pattern["key"])) } func TestValidateValueWithPattern_StringsLogicalOr(t *testing.T) { pattern := "192.168.88.1 | 10.100.11.*" value := "10.100.11.54" - assert.Assert(t, ValidateValueWithPattern(value, pattern)) + assert.Assert(t, ValidateValueWithPattern(log.Log, value, pattern)) } func TestValidateValueWithPattern_EqualTwoFloats(t *testing.T) { - assert.Assert(t, ValidateValueWithPattern(7.0, 7.000)) + assert.Assert(t, ValidateValueWithPattern(log.Log, 7.0, 7.000)) } func TestValidateValueWithNilPattern_NullPatternStringValue(t *testing.T) { - assert.Assert(t, !validateValueWithNilPattern("value")) + assert.Assert(t, !validateValueWithNilPattern(log.Log, "value")) } func TestValidateValueWithNilPattern_NullPatternDefaultString(t *testing.T) { - assert.Assert(t, validateValueWithNilPattern("")) + assert.Assert(t, validateValueWithNilPattern(log.Log, "")) } func TestValidateValueWithNilPattern_NullPatternDefaultFloat(t *testing.T) { - assert.Assert(t, validateValueWithNilPattern(0.0)) + assert.Assert(t, validateValueWithNilPattern(log.Log, 0.0)) } func TestValidateValueWithNilPattern_NullPatternFloat(t *testing.T) { - assert.Assert(t, !validateValueWithNilPattern(0.1)) + assert.Assert(t, !validateValueWithNilPattern(log.Log, 0.1)) } func TestValidateValueWithNilPattern_NullPatternDefaultInt(t *testing.T) { - assert.Assert(t, validateValueWithNilPattern(0)) + assert.Assert(t, validateValueWithNilPattern(log.Log, 0)) } func TestValidateValueWithNilPattern_NullPatternInt(t *testing.T) { - assert.Assert(t, !validateValueWithNilPattern(1)) + assert.Assert(t, !validateValueWithNilPattern(log.Log, 1)) } func TestValidateValueWithNilPattern_NullPatternDefaultBool(t *testing.T) { - assert.Assert(t, validateValueWithNilPattern(false)) + assert.Assert(t, validateValueWithNilPattern(log.Log, false)) } func TestValidateValueWithNilPattern_NullPatternTrueBool(t *testing.T) { - assert.Assert(t, !validateValueWithNilPattern(true)) + assert.Assert(t, !validateValueWithNilPattern(log.Log, true)) } func TestValidateValueWithFloatPattern_FloatValue(t *testing.T) { - assert.Assert(t, validateValueWithFloatPattern(7.9914, 7.9914)) + assert.Assert(t, validateValueWithFloatPattern(log.Log, 7.9914, 7.9914)) } func TestValidateValueWithFloatPattern_FloatValueNotPass(t *testing.T) { - assert.Assert(t, !validateValueWithFloatPattern(7.9914, 7.99141)) + assert.Assert(t, !validateValueWithFloatPattern(log.Log, 7.9914, 7.99141)) } func TestValidateValueWithFloatPattern_FloatPatternWithoutFractionIntValue(t *testing.T) { - assert.Assert(t, validateValueWithFloatPattern(7, 7.000000)) + assert.Assert(t, validateValueWithFloatPattern(log.Log, 7, 7.000000)) } func TestValidateValueWithFloatPattern_FloatPatternWithoutFraction(t *testing.T) { - assert.Assert(t, validateValueWithFloatPattern(7.000000, 7.000000)) + assert.Assert(t, validateValueWithFloatPattern(log.Log, 7.000000, 7.000000)) } func TestValidateValueWithIntPattern_FloatValueWithoutFraction(t *testing.T) { - assert.Assert(t, validateValueWithFloatPattern(7.000000, 7)) + assert.Assert(t, validateValueWithFloatPattern(log.Log, 7.000000, 7)) } func TestValidateValueWithIntPattern_FloatValueWitFraction(t *testing.T) { - assert.Assert(t, !validateValueWithFloatPattern(7.000001, 7)) + assert.Assert(t, !validateValueWithFloatPattern(log.Log, 7.000001, 7)) } func TestValidateValueWithIntPattern_NotPass(t *testing.T) { - assert.Assert(t, !validateValueWithFloatPattern(8, 7)) + assert.Assert(t, !validateValueWithFloatPattern(log.Log, 8, 7)) } func TestGetNumberAndStringPartsFromPattern_NumberAndString(t *testing.T) { @@ -290,35 +291,35 @@ func TestGetNumberAndStringPartsFromPattern_Empty(t *testing.T) { } func TestValidateNumberWithStr_LessFloatAndInt(t *testing.T) { - assert.Assert(t, validateNumberWithStr(7.00001, "7.000001", operator.More)) - assert.Assert(t, validateNumberWithStr(7.00001, "7", operator.NotEqual)) + assert.Assert(t, validateNumberWithStr(log.Log, 7.00001, "7.000001", operator.More)) + assert.Assert(t, validateNumberWithStr(log.Log, 7.00001, "7", operator.NotEqual)) - assert.Assert(t, validateNumberWithStr(7.0000, "7", operator.Equal)) - assert.Assert(t, !validateNumberWithStr(6.000000001, "6", operator.Less)) + assert.Assert(t, validateNumberWithStr(log.Log, 7.0000, "7", operator.Equal)) + assert.Assert(t, !validateNumberWithStr(log.Log, 6.000000001, "6", operator.Less)) } func TestValidateQuantity_InvalidQuantity(t *testing.T) { - assert.Assert(t, !validateNumberWithStr("1024Gi", "", operator.Equal)) - assert.Assert(t, !validateNumberWithStr("gii", "1024Gi", operator.Equal)) + assert.Assert(t, !validateNumberWithStr(log.Log, "1024Gi", "", operator.Equal)) + assert.Assert(t, !validateNumberWithStr(log.Log, "gii", "1024Gi", operator.Equal)) } func TestValidateQuantity_Equal(t *testing.T) { - assert.Assert(t, validateNumberWithStr("1024Gi", "1024Gi", operator.Equal)) - assert.Assert(t, validateNumberWithStr("1024Mi", "1Gi", operator.Equal)) - assert.Assert(t, validateNumberWithStr("0.2", "200m", operator.Equal)) - assert.Assert(t, validateNumberWithStr("500", "500", operator.Equal)) - assert.Assert(t, !validateNumberWithStr("2048", "1024", operator.Equal)) - assert.Assert(t, validateNumberWithStr(1024, "1024", operator.Equal)) + assert.Assert(t, validateNumberWithStr(log.Log, "1024Gi", "1024Gi", operator.Equal)) + assert.Assert(t, validateNumberWithStr(log.Log, "1024Mi", "1Gi", operator.Equal)) + assert.Assert(t, validateNumberWithStr(log.Log, "0.2", "200m", operator.Equal)) + assert.Assert(t, validateNumberWithStr(log.Log, "500", "500", operator.Equal)) + assert.Assert(t, !validateNumberWithStr(log.Log, "2048", "1024", operator.Equal)) + assert.Assert(t, validateNumberWithStr(log.Log, 1024, "1024", operator.Equal)) } func TestValidateQuantity_Operation(t *testing.T) { - assert.Assert(t, validateNumberWithStr("1Gi", "1000Mi", operator.More)) - assert.Assert(t, validateNumberWithStr("1G", "1Gi", operator.Less)) - assert.Assert(t, validateNumberWithStr("500m", "0.5", operator.MoreEqual)) - assert.Assert(t, validateNumberWithStr("1", "500m", operator.MoreEqual)) - assert.Assert(t, validateNumberWithStr("0.5", ".5", operator.LessEqual)) - assert.Assert(t, validateNumberWithStr("0.2", ".5", operator.LessEqual)) - assert.Assert(t, validateNumberWithStr("0.2", ".5", operator.NotEqual)) + assert.Assert(t, validateNumberWithStr(log.Log, "1Gi", "1000Mi", operator.More)) + assert.Assert(t, validateNumberWithStr(log.Log, "1G", "1Gi", operator.Less)) + assert.Assert(t, validateNumberWithStr(log.Log, "500m", "0.5", operator.MoreEqual)) + assert.Assert(t, validateNumberWithStr(log.Log, "1", "500m", operator.MoreEqual)) + assert.Assert(t, validateNumberWithStr(log.Log, "0.5", ".5", operator.LessEqual)) + assert.Assert(t, validateNumberWithStr(log.Log, "0.2", ".5", operator.LessEqual)) + assert.Assert(t, validateNumberWithStr(log.Log, "0.2", ".5", operator.NotEqual)) } func TestGetOperatorFromStringPattern_OneChar(t *testing.T) { diff --git a/pkg/engine/validate/validate.go b/pkg/engine/validate/validate.go index 9d94f07789..a3764cce22 100644 --- a/pkg/engine/validate/validate.go +++ b/pkg/engine/validate/validate.go @@ -8,15 +8,15 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/engine/anchor" "github.com/nirmata/kyverno/pkg/engine/operator" ) // ValidateResourceWithPattern is a start of element-by-element validation process // It assumes that validation is started from root, so "/" is passed -func ValidateResourceWithPattern(resource, pattern interface{}) (string, error) { - path, err := validateResourceElement(resource, pattern, pattern, "/") +func ValidateResourceWithPattern(log logr.Logger, resource, pattern interface{}) (string, error) { + path, err := validateResourceElement(log, resource, pattern, pattern, "/") if err != nil { return path, err } @@ -27,44 +27,44 @@ func ValidateResourceWithPattern(resource, pattern interface{}) (string, error) // validateResourceElement detects the element type (map, array, nil, string, int, bool, float) // and calls corresponding handler // Pattern tree and resource tree can have different structure. In this case validation fails -func validateResourceElement(resourceElement, patternElement, originPattern interface{}, path string) (string, error) { +func validateResourceElement(log logr.Logger, resourceElement, patternElement, originPattern interface{}, path string) (string, error) { var err error switch typedPatternElement := patternElement.(type) { // map case map[string]interface{}: typedResourceElement, ok := resourceElement.(map[string]interface{}) if !ok { - glog.V(4).Infof("Pattern and resource have different structures. Path: %s. Expected %T, found %T", path, patternElement, resourceElement) + log.V(4).Info("Pattern and resource have different structures.", "path", path, "expected", fmt.Sprintf("%T", patternElement), "current", fmt.Sprintf("%T", resourceElement)) return path, fmt.Errorf("Pattern and resource have different structures. Path: %s. Expected %T, found %T", path, patternElement, resourceElement) } - return validateMap(typedResourceElement, typedPatternElement, originPattern, path) + return validateMap(log, typedResourceElement, typedPatternElement, originPattern, path) // array case []interface{}: typedResourceElement, ok := resourceElement.([]interface{}) if !ok { - glog.V(4).Infof("Pattern and resource have different structures. Path: %s. Expected %T, found %T", path, patternElement, resourceElement) + log.V(4).Info("Pattern and resource have different structures.", "path", path, "expected", fmt.Sprintf("%T", patternElement), "current", fmt.Sprintf("%T", resourceElement)) return path, fmt.Errorf("Validation rule Failed at path %s, resource does not satisfy the expected overlay pattern", path) } - return validateArray(typedResourceElement, typedPatternElement, originPattern, path) + return validateArray(log, typedResourceElement, typedPatternElement, originPattern, path) // elementary values case string, float64, int, int64, bool, nil: /*Analyze pattern */ if checkedPattern := reflect.ValueOf(patternElement); checkedPattern.Kind() == reflect.String { if isStringIsReference(checkedPattern.String()) { //check for $ anchor - patternElement, err = actualizePattern(originPattern, checkedPattern.String(), path) + patternElement, err = actualizePattern(log, originPattern, checkedPattern.String(), path) if err != nil { return path, err } } } - if !ValidateValueWithPattern(resourceElement, patternElement) { + if !ValidateValueWithPattern(log, resourceElement, patternElement) { return path, fmt.Errorf("Validation rule failed at '%s' to validate value '%v' with pattern '%v'", path, resourceElement, patternElement) } default: - glog.V(4).Infof("Pattern contains unknown type %T. Path: %s", patternElement, path) + log.V(4).Info("Pattern contains unknown type", "path", path, "current", fmt.Sprintf("%T", patternElement)) return path, fmt.Errorf("Validation rule failed at '%s', pattern contains unknown type", path) } return "", nil @@ -72,7 +72,7 @@ func validateResourceElement(resourceElement, patternElement, originPattern inte // If validateResourceElement detects map element inside resource and pattern trees, it goes to validateMap // For each element of the map we must detect the type again, so we pass these elements to validateResourceElement -func validateMap(resourceMap, patternMap map[string]interface{}, origPattern interface{}, path string) (string, error) { +func validateMap(log logr.Logger, resourceMap, patternMap map[string]interface{}, origPattern interface{}, path string) (string, error) { // check if there is anchor in pattern // Phase 1 : Evaluate all the anchors // Phase 2 : Evaluate non-anchors @@ -91,7 +91,7 @@ func validateMap(resourceMap, patternMap map[string]interface{}, origPattern int if err != nil { // If Conditional anchor fails then we dont process the resources if anchor.IsConditionAnchor(key) { - glog.V(4).Infof("condition anchor did not satisfy, wont process the resources: %s", err) + log.Error(err, "condition anchor did not satisfy, wont process the resource") return "", nil } return handlerPath, err @@ -109,7 +109,7 @@ func validateMap(resourceMap, patternMap map[string]interface{}, origPattern int return "", nil } -func validateArray(resourceArray, patternArray []interface{}, originPattern interface{}, path string) (string, error) { +func validateArray(log logr.Logger, resourceArray, patternArray []interface{}, originPattern interface{}, path string) (string, error) { if 0 == len(patternArray) { return path, fmt.Errorf("Pattern Array empty") @@ -119,7 +119,7 @@ func validateArray(resourceArray, patternArray []interface{}, originPattern inte case map[string]interface{}: // This is special case, because maps in arrays can have anchors that must be // processed with the special way affecting the entire array - path, err := validateArrayOfMaps(resourceArray, typedPatternElement, originPattern, path) + path, err := validateArrayOfMaps(log, resourceArray, typedPatternElement, originPattern, path) if err != nil { return path, err } @@ -127,7 +127,7 @@ func validateArray(resourceArray, patternArray []interface{}, originPattern inte // In all other cases - detect type and handle each array element with validateResourceElement for i, patternElement := range patternArray { currentPath := path + strconv.Itoa(i) + "/" - path, err := validateResourceElement(resourceArray[i], patternElement, originPattern, currentPath) + path, err := validateResourceElement(log, resourceArray[i], patternElement, originPattern, currentPath) if err != nil { return path, err } @@ -137,7 +137,7 @@ func validateArray(resourceArray, patternArray []interface{}, originPattern inte return "", nil } -func actualizePattern(origPattern interface{}, referencePattern, absolutePath string) (interface{}, error) { +func actualizePattern(log logr.Logger, origPattern interface{}, referencePattern, absolutePath string) (interface{}, error) { var foundValue interface{} referencePattern = strings.Trim(referencePattern, "$()") @@ -155,7 +155,7 @@ func actualizePattern(origPattern interface{}, referencePattern, absolutePath st // value := actualPath := formAbsolutePath(referencePattern, absolutePath) - valFromReference, err := getValueFromReference(origPattern, actualPath) + valFromReference, err := getValueFromReference(log, origPattern, actualPath) if err != nil { return err, nil } @@ -196,15 +196,15 @@ func formAbsolutePath(referencePath, absolutePath string) string { } //Prepares original pattern, path to value, and call traverse function -func getValueFromReference(origPattern interface{}, reference string) (interface{}, error) { +func getValueFromReference(log logr.Logger, origPattern interface{}, reference string) (interface{}, error) { originalPatternMap := origPattern.(map[string]interface{}) reference = reference[1:] statements := strings.Split(reference, "/") - return getValueFromPattern(originalPatternMap, statements, 0) + return getValueFromPattern(log, originalPatternMap, statements, 0) } -func getValueFromPattern(patternMap map[string]interface{}, keys []string, currentKeyIndex int) (interface{}, error) { +func getValueFromPattern(log logr.Logger, patternMap map[string]interface{}, keys []string, currentKeyIndex int) (interface{}, error) { for key, pattern := range patternMap { rawKey := getRawKeyIfWrappedWithAttributes(key) @@ -221,11 +221,11 @@ func getValueFromPattern(patternMap map[string]interface{}, keys []string, curre for i, value := range typedPattern { resourceMap, ok := value.(map[string]interface{}) if !ok { - glog.V(4).Infof("Pattern and resource have different structures. Expected %T, found %T", pattern, value) + log.V(4).Info("Pattern and resource have different structures.", "expected", fmt.Sprintf("%T", pattern), "current", fmt.Sprintf("%T", value)) return nil, fmt.Errorf("Validation rule failed, resource does not have expected pattern %v", patternMap) } if keys[currentKeyIndex+1] == strconv.Itoa(i) { - return getValueFromPattern(resourceMap, keys, currentKeyIndex+2) + return getValueFromPattern(log, resourceMap, keys, currentKeyIndex+2) } return nil, errors.New("Reference to non-existent place in the document") } @@ -233,7 +233,7 @@ func getValueFromPattern(patternMap map[string]interface{}, keys []string, curre return nil, errors.New("Reference to non-existent place in the document") case map[string]interface{}: if keys[currentKeyIndex] == rawKey { - return getValueFromPattern(typedPattern, keys, currentKeyIndex+1) + return getValueFromPattern(log, typedPattern, keys, currentKeyIndex+1) } return nil, errors.New("Reference to non-existent place in the document") case string, float64, int, int64, bool, nil: @@ -251,12 +251,12 @@ func getValueFromPattern(patternMap map[string]interface{}, keys []string, curre // validateArrayOfMaps gets anchors from pattern array map element, applies anchors logic // and then validates each map due to the pattern -func validateArrayOfMaps(resourceMapArray []interface{}, patternMap map[string]interface{}, originPattern interface{}, path string) (string, error) { +func validateArrayOfMaps(log logr.Logger, resourceMapArray []interface{}, patternMap map[string]interface{}, originPattern interface{}, path string) (string, error) { for i, resourceElement := range resourceMapArray { // check the types of resource element // expect it to be map, but can be anything ?:( currentPath := path + strconv.Itoa(i) + "/" - returnpath, err := validateResourceElement(resourceElement, patternMap, originPattern, currentPath) + returnpath, err := validateResourceElement(log, resourceElement, patternMap, originPattern, currentPath) if err != nil { return returnpath, err } diff --git a/pkg/engine/validate/validate_test.go b/pkg/engine/validate/validate_test.go index acabd87ac1..bf18787324 100644 --- a/pkg/engine/validate/validate_test.go +++ b/pkg/engine/validate/validate_test.go @@ -4,6 +4,7 @@ import ( "encoding/json" "testing" + "github.com/nirmata/kyverno/pkg/log" "gotest.tools/assert" ) @@ -100,7 +101,7 @@ func TestValidateMap(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -196,7 +197,7 @@ func TestValidateMap_AsteriskForInt(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") t.Log(path) assert.NilError(t, err) } @@ -289,7 +290,7 @@ func TestValidateMap_AsteriskForMap(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -377,7 +378,7 @@ func TestValidateMap_AsteriskForArray(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -468,7 +469,7 @@ func TestValidateMap_AsteriskFieldIsMissing(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/template/spec/containers/0/") assert.Assert(t, err != nil) } @@ -559,7 +560,7 @@ func TestValidateMap_livenessProbeIsNull(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) json.Unmarshal(rawMap, &resource) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -649,7 +650,7 @@ func TestValidateMap_livenessProbeIsMissing(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateMap(resource, pattern, pattern, "/") + path, err := validateMap(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -695,7 +696,7 @@ func TestValidateMapElement_TwoElementsInArrayOnePass(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") // assert.Equal(t, path, "/1/object/0/key2/") // assert.NilError(t, err) @@ -730,7 +731,7 @@ func TestValidateMapElement_OneElementInArrayPass(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -784,7 +785,7 @@ func TestValidateMap_CorrectRelativePathInConfig(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -838,7 +839,7 @@ func TestValidateMap_RelativePathDoesNotExists(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/containers/0/resources/requests/memory/") assert.Assert(t, err != nil) } @@ -892,7 +893,7 @@ func TestValidateMap_OnlyAnchorsInPath(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/containers/0/resources/requests/memory/") assert.Assert(t, err != nil) } @@ -946,7 +947,7 @@ func TestValidateMap_MalformedReferenceOnlyDolarMark(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/containers/0/resources/requests/memory/") assert.Assert(t, err != nil) } @@ -1000,7 +1001,7 @@ func TestValidateMap_RelativePathWithParentheses(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.NilError(t, err) } @@ -1054,7 +1055,7 @@ func TestValidateMap_MalformedPath(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/containers/0/resources/requests/memory/") assert.Assert(t, err != nil) } @@ -1108,7 +1109,7 @@ func TestValidateMap_AbosolutePathExists(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.Assert(t, err == nil) } @@ -1149,7 +1150,7 @@ func TestValidateMap_AbsolutePathToMetadata(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "") assert.Assert(t, err == nil) } @@ -1191,7 +1192,7 @@ func TestValidateMap_AbsolutePathToMetadata_fail(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/containers/0/image/") assert.Assert(t, err != nil) } @@ -1245,7 +1246,7 @@ func TestValidateMap_AbosolutePathDoesNotExists(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) assert.Assert(t, json.Unmarshal(rawMap, &resource)) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/spec/containers/0/resources/requests/memory/") assert.Assert(t, err != nil) } @@ -1276,7 +1277,7 @@ func TestActualizePattern_GivenRelativePathThatExists(t *testing.T) { assert.Assert(t, json.Unmarshal(rawPattern, &pattern)) - pattern, err := actualizePattern(pattern, referencePath, absolutePath) + pattern, err := actualizePattern(log.Log, pattern, referencePath, absolutePath) assert.Assert(t, err == nil) } @@ -1347,7 +1348,7 @@ func TestValidateMapElement_OneElementInArrayNotPass(t *testing.T) { json.Unmarshal(rawPattern, &pattern) json.Unmarshal(rawMap, &resource) - path, err := validateResourceElement(resource, pattern, pattern, "/") + path, err := validateResourceElement(log.Log, resource, pattern, pattern, "/") assert.Equal(t, path, "/0/object/0/key2/") assert.Assert(t, err != nil) } diff --git a/pkg/engine/validation.go b/pkg/engine/validation.go index de05323f87..eb94649361 100644 --- a/pkg/engine/validation.go +++ b/pkg/engine/validation.go @@ -5,13 +5,14 @@ import ( "reflect" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/context" "github.com/nirmata/kyverno/pkg/engine/response" "github.com/nirmata/kyverno/pkg/engine/utils" "github.com/nirmata/kyverno/pkg/engine/validate" "github.com/nirmata/kyverno/pkg/engine/variables" + "github.com/nirmata/kyverno/pkg/log" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -23,17 +24,18 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) { oldR := policyContext.OldResource ctx := policyContext.Context admissionInfo := policyContext.AdmissionInfo + logger := log.Log.WithName("Validate").WithValues("policy", policy.Name, "kind", newR.GetKind(), "namespace", newR.GetNamespace(), "name", newR.GetName()) // policy information - glog.V(4).Infof("started applying validation rules of policy %q (%v)", policy.Name, startTime) + logger.V(4).Info("start processing", "startTime", startTime) // Process new & old resource if reflect.DeepEqual(oldR, unstructured.Unstructured{}) { // Create Mode // Operate on New Resource only - resp := validateResource(ctx, policy, newR, admissionInfo) + resp := validateResource(logger, ctx, policy, newR, admissionInfo) startResultResponse(resp, policy, newR) - defer endResultResponse(resp, startTime) + defer endResultResponse(logger, resp, startTime) // set PatchedResource with origin resource if empty // in order to create policy violation if reflect.DeepEqual(resp.PatchedResource, unstructured.Unstructured{}) { @@ -44,14 +46,14 @@ func Validate(policyContext PolicyContext) (resp response.EngineResponse) { // Update Mode // Operate on New and Old Resource only // New resource - oldResponse := validateResource(ctx, policy, oldR, admissionInfo) - newResponse := validateResource(ctx, policy, newR, admissionInfo) + oldResponse := validateResource(logger, ctx, policy, oldR, admissionInfo) + newResponse := validateResource(logger, ctx, policy, newR, admissionInfo) // if the old and new response is same then return empty response if !isSameResponse(oldResponse, newResponse) { // there are changes send response startResultResponse(newResponse, policy, newR) - defer endResultResponse(newResponse, startTime) + defer endResultResponse(logger, newResponse, startTime) if reflect.DeepEqual(newResponse.PatchedResource, unstructured.Unstructured{}) { newResponse.PatchedResource = newR } @@ -73,10 +75,9 @@ func startResultResponse(resp *response.EngineResponse, policy kyverno.ClusterPo resp.PolicyResponse.ValidationFailureAction = policy.Spec.ValidationFailureAction } -func endResultResponse(resp *response.EngineResponse, startTime time.Time) { +func endResultResponse(log logr.Logger, resp *response.EngineResponse, startTime time.Time) { resp.PolicyResponse.ProcessingTime = time.Since(startTime) - glog.V(4).Infof("Finished applying validation rules policy %v (%v)", resp.PolicyResponse.Policy, resp.PolicyResponse.ProcessingTime) - glog.V(4).Infof("Validation Rules appplied successfully count %v for policy %q", resp.PolicyResponse.RulesAppliedCount, resp.PolicyResponse.Policy) + log.V(4).Info("finshed processing", "processingTime", resp.PolicyResponse.ProcessingTime, "validationRulesApplied", resp.PolicyResponse.RulesAppliedCount) } func incrementAppliedCount(resp *response.EngineResponse) { @@ -84,20 +85,18 @@ func incrementAppliedCount(resp *response.EngineResponse) { resp.PolicyResponse.RulesAppliedCount++ } -func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse { +func validateResource(log logr.Logger, ctx context.EvalInterface, policy kyverno.ClusterPolicy, resource unstructured.Unstructured, admissionInfo kyverno.RequestInfo) *response.EngineResponse { resp := &response.EngineResponse{} for _, rule := range policy.Spec.Rules { if !rule.HasValidate() { continue } - startTime := time.Now() - glog.V(4).Infof("Time: Validate matchAdmissionInfo %v", time.Since(startTime)) // check if the resource satisfies the filter conditions defined in the rule // TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that // dont statisfy a policy rule resource description if err := MatchesResourceDescription(resource, rule, admissionInfo); err != nil { - glog.V(4).Infof("resource %s/%s does not satisfy the resource description for the rule:\n%s", resource.GetNamespace(), resource.GetName(), err.Error()) + log.V(4).Info("resource fails the match description") continue } @@ -105,13 +104,13 @@ func validateResource(ctx context.EvalInterface, policy kyverno.ClusterPolicy, r copyConditions := copyConditions(rule.Conditions) // evaluate pre-conditions // - handle variable subsitutions - if !variables.EvaluateConditions(ctx, copyConditions) { - glog.V(4).Infof("resource %s/%s does not satisfy the conditions for the rule ", resource.GetNamespace(), resource.GetName()) + if !variables.EvaluateConditions(log, ctx, copyConditions) { + log.V(4).Info("resource fails the preconditions") continue } if rule.Validation.Pattern != nil || rule.Validation.AnyPattern != nil { - ruleResponse := validatePatterns(ctx, resource, rule) + ruleResponse := validatePatterns(log, ctx, resource, rule) incrementAppliedCount(resp) resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse) } @@ -159,14 +158,15 @@ func isSameRules(oldRules []response.RuleResponse, newRules []response.RuleRespo } // validatePatterns validate pattern and anyPattern -func validatePatterns(ctx context.EvalInterface, resource unstructured.Unstructured, rule kyverno.Rule) (resp response.RuleResponse) { +func validatePatterns(log logr.Logger, ctx context.EvalInterface, resource unstructured.Unstructured, rule kyverno.Rule) (resp response.RuleResponse) { startTime := time.Now() - glog.V(4).Infof("started applying validation rule %q (%v)", rule.Name, startTime) + logger := log.WithValues("rule", rule.Name) + logger.V(4).Info("start processing rule", "startTime", startTime) resp.Name = rule.Name resp.Type = utils.Validation.String() defer func() { resp.RuleStats.ProcessingTime = time.Since(startTime) - glog.V(4).Infof("finished applying validation rule %q (%v)", resp.Name, resp.RuleStats.ProcessingTime) + logger.V(4).Info("finshed processing", "processingTime", resp.RuleStats.ProcessingTime) }() // work on a copy of validation rule validationRule := rule.Validation.DeepCopy() @@ -176,7 +176,7 @@ func validatePatterns(ctx context.EvalInterface, resource unstructured.Unstructu // substitute variables in the pattern pattern := validationRule.Pattern var err error - if pattern, err = variables.SubstituteVars(ctx, pattern); err != nil { + if pattern, err = variables.SubstituteVars(logger, ctx, pattern); err != nil { // variable subsitution failed resp.Success = false resp.Message = fmt.Sprintf("Validation error: %s; Validation rule '%s' failed. '%s'", @@ -184,7 +184,7 @@ func validatePatterns(ctx context.EvalInterface, resource unstructured.Unstructu return resp } - if path, err := validate.ValidateResourceWithPattern(resource.Object, pattern); err != nil { + if path, err := validate.ValidateResourceWithPattern(logger, resource.Object, pattern); err != nil { // validation failed resp.Success = false resp.Message = fmt.Sprintf("Validation error: %s; Validation rule '%s' failed at path '%s'", @@ -192,7 +192,7 @@ func validatePatterns(ctx context.EvalInterface, resource unstructured.Unstructu return resp } // rule application successful - glog.V(4).Infof("rule %s pattern validated successfully on resource %s/%s/%s", rule.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName()) + logger.V(4).Info("successfully processed rule") resp.Success = true resp.Message = fmt.Sprintf("Validation rule '%s' succeeded.", rule.Name) return resp @@ -203,19 +203,18 @@ func validatePatterns(ctx context.EvalInterface, resource unstructured.Unstructu var failedAnyPatternsErrors []error var err error for idx, pattern := range validationRule.AnyPattern { - if pattern, err = variables.SubstituteVars(ctx, pattern); err != nil { + if pattern, err = variables.SubstituteVars(logger, ctx, pattern); err != nil { // variable subsitution failed failedSubstitutionsErrors = append(failedSubstitutionsErrors, err) continue } - _, err := validate.ValidateResourceWithPattern(resource.Object, pattern) + _, err := validate.ValidateResourceWithPattern(logger, resource.Object, pattern) if err == nil { resp.Success = true resp.Message = fmt.Sprintf("Validation rule '%s' anyPattern[%d] succeeded.", rule.Name, idx) return resp } - glog.V(4).Infof("Validation error: %s; Validation rule %s anyPattern[%d] for %s/%s/%s", - rule.Validation.Message, rule.Name, idx, resource.GetKind(), resource.GetNamespace(), resource.GetName()) + logger.V(4).Info(fmt.Sprintf("validation rule failed for anyPattern[%d]", idx), "message", rule.Validation.Message) patternErr := fmt.Errorf("anyPattern[%d] failed; %s", idx, err) failedAnyPatternsErrors = append(failedAnyPatternsErrors, patternErr) } diff --git a/pkg/engine/variables/evaluate.go b/pkg/engine/variables/evaluate.go index 519a909d01..d0ebf7ed74 100644 --- a/pkg/engine/variables/evaluate.go +++ b/pkg/engine/variables/evaluate.go @@ -1,16 +1,16 @@ package variables import ( - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/context" "github.com/nirmata/kyverno/pkg/engine/variables/operator" ) //Evaluate evaluates the condition -func Evaluate(ctx context.EvalInterface, condition kyverno.Condition) bool { +func Evaluate(log logr.Logger, ctx context.EvalInterface, condition kyverno.Condition) bool { // get handler for the operator - handle := operator.CreateOperatorHandler(ctx, condition.Operator, SubstituteVars) + handle := operator.CreateOperatorHandler(log, ctx, condition.Operator, SubstituteVars) if handle == nil { return false } @@ -18,11 +18,10 @@ func Evaluate(ctx context.EvalInterface, condition kyverno.Condition) bool { } //EvaluateConditions evaluates multiple conditions -func EvaluateConditions(ctx context.EvalInterface, conditions []kyverno.Condition) bool { +func EvaluateConditions(log logr.Logger, ctx context.EvalInterface, conditions []kyverno.Condition) bool { // AND the conditions for _, condition := range conditions { - if !Evaluate(ctx, condition) { - glog.V(4).Infof("condition %v failed", condition) + if !Evaluate(log, ctx, condition) { return false } } diff --git a/pkg/engine/variables/evaluate_test.go b/pkg/engine/variables/evaluate_test.go index 33b3a4f2b7..78bc599672 100644 --- a/pkg/engine/variables/evaluate_test.go +++ b/pkg/engine/variables/evaluate_test.go @@ -6,6 +6,7 @@ import ( kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/context" + "github.com/nirmata/kyverno/pkg/log" ) // STRINGS @@ -18,7 +19,7 @@ func Test_Eval_Equal_Const_String_Pass(t *testing.T) { Value: "name", } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -32,7 +33,7 @@ func Test_Eval_Equal_Const_String_Fail(t *testing.T) { Value: "name1", } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -46,7 +47,7 @@ func Test_Eval_NoEqual_Const_String_Pass(t *testing.T) { Value: "name1", } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -60,7 +61,7 @@ func Test_Eval_NoEqual_Const_String_Fail(t *testing.T) { Value: "name", } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -76,7 +77,7 @@ func Test_Eval_Equal_Const_Bool_Pass(t *testing.T) { Value: true, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -90,7 +91,7 @@ func Test_Eval_Equal_Const_Bool_Fail(t *testing.T) { Value: false, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -104,7 +105,7 @@ func Test_Eval_NoEqual_Const_Bool_Pass(t *testing.T) { Value: false, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -118,7 +119,7 @@ func Test_Eval_NoEqual_Const_Bool_Fail(t *testing.T) { Value: true, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -133,7 +134,7 @@ func Test_Eval_Equal_Const_int_Pass(t *testing.T) { Value: 1, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -147,7 +148,7 @@ func Test_Eval_Equal_Const_int_Fail(t *testing.T) { Value: 2, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -161,7 +162,7 @@ func Test_Eval_NoEqual_Const_int_Pass(t *testing.T) { Value: 2, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -175,7 +176,7 @@ func Test_Eval_NoEqual_Const_int_Fail(t *testing.T) { Value: 1, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -190,7 +191,7 @@ func Test_Eval_Equal_Const_int64_Pass(t *testing.T) { Value: int64(1), } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -204,7 +205,7 @@ func Test_Eval_Equal_Const_int64_Fail(t *testing.T) { Value: int64(2), } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -218,7 +219,7 @@ func Test_Eval_NoEqual_Const_int64_Pass(t *testing.T) { Value: int64(2), } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -232,7 +233,7 @@ func Test_Eval_NoEqual_Const_int64_Fail(t *testing.T) { Value: int64(1), } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -248,7 +249,7 @@ func Test_Eval_Equal_Const_float64_Pass(t *testing.T) { Value: 1.5, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -262,7 +263,7 @@ func Test_Eval_Equal_Const_float64_Fail(t *testing.T) { Value: 1.6, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -276,7 +277,7 @@ func Test_Eval_NoEqual_Const_float64_Pass(t *testing.T) { Value: 1.6, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -290,7 +291,7 @@ func Test_Eval_NoEqual_Const_float64_Fail(t *testing.T) { Value: 1.5, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -320,7 +321,7 @@ func Test_Eval_Equal_Const_object_Pass(t *testing.T) { Value: obj2, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -348,7 +349,7 @@ func Test_Eval_Equal_Const_object_Fail(t *testing.T) { Value: obj2, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -376,7 +377,7 @@ func Test_Eval_NotEqual_Const_object_Pass(t *testing.T) { Value: obj2, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -404,7 +405,7 @@ func Test_Eval_NotEqual_Const_object_Fail(t *testing.T) { Value: obj2, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -434,7 +435,7 @@ func Test_Eval_Equal_Const_list_Pass(t *testing.T) { Value: obj2, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -460,7 +461,7 @@ func Test_Eval_Equal_Const_list_Fail(t *testing.T) { Value: obj2, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -486,7 +487,7 @@ func Test_Eval_NotEqual_Const_list_Pass(t *testing.T) { Value: obj2, } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -512,7 +513,7 @@ func Test_Eval_NotEqual_Const_list_Fail(t *testing.T) { Value: obj2, } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } @@ -545,7 +546,7 @@ func Test_Eval_Equal_Var_Pass(t *testing.T) { Value: "temp", } - if !Evaluate(ctx, condition) { + if !Evaluate(log.Log, ctx, condition) { t.Error("expected to pass") } } @@ -576,7 +577,7 @@ func Test_Eval_Equal_Var_Fail(t *testing.T) { Value: "temp1", } - if Evaluate(ctx, condition) { + if Evaluate(log.Log, ctx, condition) { t.Error("expected to fail") } } diff --git a/pkg/engine/variables/operator/equal.go b/pkg/engine/variables/operator/equal.go index 81ea80c621..1bc181f0f8 100644 --- a/pkg/engine/variables/operator/equal.go +++ b/pkg/engine/variables/operator/equal.go @@ -1,19 +1,21 @@ package operator import ( + "fmt" "math" "reflect" "strconv" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/engine/context" ) //NewEqualHandler returns handler to manage Equal operations -func NewEqualHandler(ctx context.EvalInterface, subHandler VariableSubstitutionHandler) OperatorHandler { +func NewEqualHandler(log logr.Logger, ctx context.EvalInterface, subHandler VariableSubstitutionHandler) OperatorHandler { return EqualHandler{ ctx: ctx, subHandler: subHandler, + log: log, } } @@ -21,6 +23,7 @@ func NewEqualHandler(ctx context.EvalInterface, subHandler VariableSubstitutionH type EqualHandler struct { ctx context.EvalInterface subHandler VariableSubstitutionHandler + log logr.Logger } //Evaluate evaluates expression with Equal Operator @@ -28,14 +31,14 @@ func (eh EqualHandler) Evaluate(key, value interface{}) bool { var err error //TODO: decouple variables from evaluation // substitute the variables - if key, err = eh.subHandler(eh.ctx, key); err != nil { + if key, err = eh.subHandler(eh.log, eh.ctx, key); err != nil { // Failed to resolve the variable - glog.Infof("Failed to resolve variables in key: %s: %v", key, err) + eh.log.Error(err, "Failed to resolve variable", "variable", key) return false } - if value, err = eh.subHandler(eh.ctx, value); err != nil { + if value, err = eh.subHandler(eh.log, eh.ctx, value); err != nil { // Failed to resolve the variable - glog.Infof("Failed to resolve variables in value: %s: %v", value, err) + eh.log.Error(err, "Failed to resolve variable", "variable", value) return false } @@ -56,7 +59,7 @@ func (eh EqualHandler) Evaluate(key, value interface{}) bool { case []interface{}: return eh.validateValueWithSlicePattern(typedKey, value) default: - glog.Errorf("Unsupported type %v", typedKey) + eh.log.Info("Unsupported type", "value", typedKey, "type", fmt.Sprintf("%T", typedKey)) return false } } @@ -65,7 +68,7 @@ func (eh EqualHandler) validateValueWithSlicePattern(key []interface{}, value in if val, ok := value.([]interface{}); ok { return reflect.DeepEqual(key, val) } - glog.Warningf("Expected []interface{}, %v is of type %T", value, value) + eh.log.Info("Expected type []interface{}", "value", value, "type", fmt.Sprintf("%T", value)) return false } @@ -73,7 +76,7 @@ func (eh EqualHandler) validateValueWithMapPattern(key map[string]interface{}, v if val, ok := value.(map[string]interface{}); ok { return reflect.DeepEqual(key, val) } - glog.Warningf("Expected map[string]interface{}, %v is of type %T", value, value) + eh.log.Info("Expected type map[string]interface{}", "value", value, "type", fmt.Sprintf("%T", value)) return false } @@ -81,7 +84,8 @@ func (eh EqualHandler) validateValuewithStringPattern(key string, value interfac if val, ok := value.(string); ok { return key == val } - glog.Warningf("Expected string, %v is of type %T", value, value) + + eh.log.Info("Expected type string", "value", value, "type", fmt.Sprintf("%T", value)) return false } @@ -92,25 +96,25 @@ func (eh EqualHandler) validateValuewithFloatPattern(key float64, value interfac if key == math.Trunc(key) { return int(key) == typedValue } - glog.Warningf("Expected float, found int: %d\n", typedValue) + eh.log.Info("Expected type float, found int", "typedValue", typedValue) case int64: // check that float has not fraction if key == math.Trunc(key) { return int64(key) == typedValue } - glog.Warningf("Expected float, found int: %d\n", typedValue) + eh.log.Info("Expected type float, found int", "typedValue", typedValue) case float64: return typedValue == key case string: // extract float from string float64Num, err := strconv.ParseFloat(typedValue, 64) if err != nil { - glog.Warningf("Failed to parse float64 from string: %v", err) + eh.log.Error(err, "Failed to parse float64 from string") return false } return float64Num == key default: - glog.Warningf("Expected float, found: %T\n", value) + eh.log.Info("Expected type float", "value", value, "type", fmt.Sprintf("%T", value)) return false } return false @@ -119,7 +123,7 @@ func (eh EqualHandler) validateValuewithFloatPattern(key float64, value interfac func (eh EqualHandler) validateValuewithBoolPattern(key bool, value interface{}) bool { typedValue, ok := value.(bool) if !ok { - glog.Error("Expected bool, found %V", value) + eh.log.Info("Expected type bool", "value", value, "type", fmt.Sprintf("%T", value)) return false } return key == typedValue @@ -136,18 +140,18 @@ func (eh EqualHandler) validateValuewithIntPattern(key int64, value interface{}) if typedValue == math.Trunc(typedValue) { return int64(typedValue) == key } - glog.Warningf("Expected int, found float: %f", typedValue) + eh.log.Info("Expected type int, found float", "value", typedValue, "type", fmt.Sprintf("%T", typedValue)) return false case string: // extract in64 from string int64Num, err := strconv.ParseInt(typedValue, 10, 64) if err != nil { - glog.Warningf("Failed to parse int64 from string: %v", err) + eh.log.Error(err, "Failed to parse int64 from string") return false } return int64Num == key default: - glog.Warningf("Expected int, %v is of type %T", value, value) + eh.log.Info("Expected type int", "value", value, "type", fmt.Sprintf("%T", value)) return false } } diff --git a/pkg/engine/variables/operator/notequal.go b/pkg/engine/variables/operator/notequal.go index 9af9e891f3..ce9b4e87f8 100644 --- a/pkg/engine/variables/operator/notequal.go +++ b/pkg/engine/variables/operator/notequal.go @@ -1,19 +1,21 @@ package operator import ( + "fmt" "math" "reflect" "strconv" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/engine/context" ) //NewNotEqualHandler returns handler to manage NotEqual operations -func NewNotEqualHandler(ctx context.EvalInterface, subHandler VariableSubstitutionHandler) OperatorHandler { +func NewNotEqualHandler(log logr.Logger, ctx context.EvalInterface, subHandler VariableSubstitutionHandler) OperatorHandler { return NotEqualHandler{ ctx: ctx, subHandler: subHandler, + log: log, } } @@ -21,6 +23,7 @@ func NewNotEqualHandler(ctx context.EvalInterface, subHandler VariableSubstituti type NotEqualHandler struct { ctx context.EvalInterface subHandler VariableSubstitutionHandler + log logr.Logger } //Evaluate evaluates expression with NotEqual Operator @@ -28,14 +31,14 @@ func (neh NotEqualHandler) Evaluate(key, value interface{}) bool { var err error //TODO: decouple variables from evaluation // substitute the variables - if key, err = neh.subHandler(neh.ctx, key); err != nil { + if key, err = neh.subHandler(neh.log, neh.ctx, key); err != nil { // Failed to resolve the variable - glog.Infof("Failed to resolve variables in key: %s: %v", key, err) + neh.log.Error(err, "Failed to resolve variable", "variable", key) return false } - if value, err = neh.subHandler(neh.ctx, value); err != nil { + if value, err = neh.subHandler(neh.log, neh.ctx, value); err != nil { // Failed to resolve the variable - glog.Infof("Failed to resolve variables in value: %s: %v", value, err) + neh.log.Error(err, "Failed to resolve variable", "variable", value) return false } // key and value need to be of same type @@ -55,7 +58,7 @@ func (neh NotEqualHandler) Evaluate(key, value interface{}) bool { case []interface{}: return neh.validateValueWithSlicePattern(typedKey, value) default: - glog.Error("Unsupported type %V", typedKey) + neh.log.Info("Unsupported type", "value", typedKey, "type", fmt.Sprintf("%T", typedKey)) return false } } @@ -64,7 +67,7 @@ func (neh NotEqualHandler) validateValueWithSlicePattern(key []interface{}, valu if val, ok := value.([]interface{}); ok { return !reflect.DeepEqual(key, val) } - glog.Warningf("Expected []interface{}, %v is of type %T", value, value) + neh.log.Info("Expected type []interface{}", "value", value, "type", fmt.Sprintf("%T", value)) return false } @@ -72,7 +75,7 @@ func (neh NotEqualHandler) validateValueWithMapPattern(key map[string]interface{ if val, ok := value.(map[string]interface{}); ok { return !reflect.DeepEqual(key, val) } - glog.Warningf("Expected map[string]interface{}, %v is of type %T", value, value) + neh.log.Info("Expected type map[string]interface{}", "value", value, "type", fmt.Sprintf("%T", value)) return false } @@ -80,7 +83,7 @@ func (neh NotEqualHandler) validateValuewithStringPattern(key string, value inte if val, ok := value.(string); ok { return key != val } - glog.Warningf("Expected string, %v is of type %T", value, value) + neh.log.Info("Expected type string", "value", value, "type", fmt.Sprintf("%T", value)) return false } @@ -91,25 +94,25 @@ func (neh NotEqualHandler) validateValuewithFloatPattern(key float64, value inte if key == math.Trunc(key) { return int(key) != typedValue } - glog.Warningf("Expected float, found int: %d\n", typedValue) + neh.log.Info("Expected type float, found int", "typedValue", typedValue) case int64: // check that float has not fraction if key == math.Trunc(key) { return int64(key) != typedValue } - glog.Warningf("Expected float, found int: %d\n", typedValue) + neh.log.Info("Expected type float, found int", "typedValue", typedValue) case float64: return typedValue != key case string: // extract float from string float64Num, err := strconv.ParseFloat(typedValue, 64) if err != nil { - glog.Warningf("Failed to parse float64 from string: %v", err) + neh.log.Error(err, "Failed to parse float64 from string") return false } return float64Num != key default: - glog.Warningf("Expected float, found: %T\n", value) + neh.log.Info("Expected type float", "value", value, "type", fmt.Sprintf("%T", value)) return false } return false @@ -118,7 +121,7 @@ func (neh NotEqualHandler) validateValuewithFloatPattern(key float64, value inte func (neh NotEqualHandler) validateValuewithBoolPattern(key bool, value interface{}) bool { typedValue, ok := value.(bool) if !ok { - glog.Error("Expected bool, found %V", value) + neh.log.Info("Expected type bool", "value", value, "type", fmt.Sprintf("%T", value)) return false } return key != typedValue @@ -135,18 +138,18 @@ func (neh NotEqualHandler) validateValuewithIntPattern(key int64, value interfac if typedValue == math.Trunc(typedValue) { return int64(typedValue) != key } - glog.Warningf("Expected int, found float: %f\n", typedValue) + neh.log.Info("Expected type int, found float", "value", typedValue, "type", fmt.Sprintf("%T", typedValue)) return false case string: // extract in64 from string int64Num, err := strconv.ParseInt(typedValue, 10, 64) if err != nil { - glog.Warningf("Failed to parse int64 from string: %v", err) + neh.log.Error(err, "Failed to parse int64 from string") return false } return int64Num != key default: - glog.Warningf("Expected int, %v is of type %T", value, value) + neh.log.Info("Expected type int", "value", value, "type", fmt.Sprintf("%T", value)) return false } } diff --git a/pkg/engine/variables/operator/operator.go b/pkg/engine/variables/operator/operator.go index 2b3f9ce7a1..adafe69982 100644 --- a/pkg/engine/variables/operator/operator.go +++ b/pkg/engine/variables/operator/operator.go @@ -1,7 +1,7 @@ package operator import ( - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/context" ) @@ -17,17 +17,17 @@ type OperatorHandler interface { } //VariableSubstitutionHandler defines the handler function for variable substitution -type VariableSubstitutionHandler = func(ctx context.EvalInterface, pattern interface{}) (interface{}, error) +type VariableSubstitutionHandler = func(log logr.Logger, ctx context.EvalInterface, pattern interface{}) (interface{}, error) //CreateOperatorHandler returns the operator handler based on the operator used in condition -func CreateOperatorHandler(ctx context.EvalInterface, op kyverno.ConditionOperator, subHandler VariableSubstitutionHandler) OperatorHandler { +func CreateOperatorHandler(log logr.Logger, ctx context.EvalInterface, op kyverno.ConditionOperator, subHandler VariableSubstitutionHandler) OperatorHandler { switch op { case kyverno.Equal: - return NewEqualHandler(ctx, subHandler) + return NewEqualHandler(log, ctx, subHandler) case kyverno.NotEqual: - return NewNotEqualHandler(ctx, subHandler) + return NewNotEqualHandler(log, ctx, subHandler) default: - glog.Errorf("unsupported operator: %s", string(op)) + log.Info("operator not supported", "operator", string(op)) } return nil } diff --git a/pkg/engine/variables/variables_test.go b/pkg/engine/variables/variables_test.go index 6d2eb7c60c..45b0ac58f8 100644 --- a/pkg/engine/variables/variables_test.go +++ b/pkg/engine/variables/variables_test.go @@ -6,6 +6,7 @@ import ( "testing" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" + "github.com/nirmata/kyverno/pkg/log" authenticationv1 "k8s.io/api/authentication/v1" "github.com/nirmata/kyverno/pkg/engine/context" @@ -84,7 +85,7 @@ func Test_variablesub1(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) @@ -174,7 +175,7 @@ func Test_variablesub_multiple(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) @@ -261,7 +262,7 @@ func Test_variablesubstitution(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) @@ -322,7 +323,7 @@ func Test_variableSubstitutionValue(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) @@ -380,7 +381,7 @@ func Test_variableSubstitutionValueOperatorNotEqual(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) @@ -439,7 +440,7 @@ func Test_variableSubstitutionValueFail(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err == nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err == nil { t.Log("expected to fails") t.Fail() } @@ -497,7 +498,7 @@ func Test_variableSubstitutionObject(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) @@ -561,7 +562,7 @@ func Test_variableSubstitutionObjectOperatorNotEqualFail(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err == nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err == nil { t.Error(err) } @@ -620,7 +621,7 @@ func Test_variableSubstitutionMultipleObject(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, patternCopy); err != nil { + if _, err := SubstituteVars(log.Log, ctx, patternCopy); err != nil { t.Error(err) } resultRaw, err := json.Marshal(patternCopy) diff --git a/pkg/engine/variables/vars.go b/pkg/engine/variables/vars.go index 4c336f2ede..59fd8f7fd5 100644 --- a/pkg/engine/variables/vars.go +++ b/pkg/engine/variables/vars.go @@ -6,7 +6,7 @@ import ( "strconv" "strings" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/engine/context" ) @@ -17,9 +17,9 @@ const ( //SubstituteVars replaces the variables with the values defined in the context // - if any variable is invaid or has nil value, it is considered as a failed varable substitution -func SubstituteVars(ctx context.EvalInterface, pattern interface{}) (interface{}, error) { +func SubstituteVars(log logr.Logger, ctx context.EvalInterface, pattern interface{}) (interface{}, error) { errs := []error{} - pattern = subVars(ctx, pattern, "", &errs) + pattern = subVars(log, ctx, pattern, "", &errs) if len(errs) == 0 { // no error while parsing the pattern return pattern, nil @@ -27,40 +27,40 @@ func SubstituteVars(ctx context.EvalInterface, pattern interface{}) (interface{} return pattern, fmt.Errorf("%v", errs) } -func subVars(ctx context.EvalInterface, pattern interface{}, path string, errs *[]error) interface{} { +func subVars(log logr.Logger, ctx context.EvalInterface, pattern interface{}, path string, errs *[]error) interface{} { switch typedPattern := pattern.(type) { case map[string]interface{}: - return subMap(ctx, typedPattern, path, errs) + return subMap(log, ctx, typedPattern, path, errs) case []interface{}: - return subArray(ctx, typedPattern, path, errs) + return subArray(log, ctx, typedPattern, path, errs) case string: - return subValR(ctx, typedPattern, path, errs) + return subValR(log, ctx, typedPattern, path, errs) default: return pattern } } -func subMap(ctx context.EvalInterface, patternMap map[string]interface{}, path string, errs *[]error) map[string]interface{} { +func subMap(log logr.Logger, ctx context.EvalInterface, patternMap map[string]interface{}, path string, errs *[]error) map[string]interface{} { for key, patternElement := range patternMap { curPath := path + "/" + key - value := subVars(ctx, patternElement, curPath, errs) + value := subVars(log, ctx, patternElement, curPath, errs) patternMap[key] = value } return patternMap } -func subArray(ctx context.EvalInterface, patternList []interface{}, path string, errs *[]error) []interface{} { +func subArray(log logr.Logger, ctx context.EvalInterface, patternList []interface{}, path string, errs *[]error) []interface{} { for idx, patternElement := range patternList { curPath := path + "/" + strconv.Itoa(idx) - value := subVars(ctx, patternElement, curPath, errs) + value := subVars(log, ctx, patternElement, curPath, errs) patternList[idx] = value } return patternList } // subValR resolves the variables if defined -func subValR(ctx context.EvalInterface, valuePattern string, path string, errs *[]error) interface{} { +func subValR(log logr.Logger, ctx context.EvalInterface, valuePattern string, path string, errs *[]error) interface{} { // variable values can be scalar values(string,int, float) or they can be obects(map,slice) // - {{variable}} @@ -73,7 +73,7 @@ func subValR(ctx context.EvalInterface, valuePattern string, path string, errs * // since this might be a potential place for error, required better error reporting and handling // object values are only suported for single variable substitution - if ok, retVal := processIfSingleVariable(ctx, valuePattern, path, errs); ok { + if ok, retVal := processIfSingleVariable(log, ctx, valuePattern, path, errs); ok { return retVal } // var emptyInterface interface{} @@ -82,7 +82,7 @@ func subValR(ctx context.EvalInterface, valuePattern string, path string, errs * for { valueStr := valuePattern if len(failedVars) != 0 { - glog.Info("some failed variables short-circuiting") + log.Info("failed to resolve variablesl short-circuiting") break } // get variables at this level @@ -123,7 +123,7 @@ func subValR(ctx context.EvalInterface, valuePattern string, path string, errs * continue } // if type is not scalar then consider this as a failed variable - glog.Infof("variable %s resolves to non-scalar value %v. Non-Scalar values are not supported for nested variables", k, v) + log.Info("variable resolves to non-scalar value. Non-Scalar values are not supported for nested variables", "variable", k, "value", v) failedVars = append(failedVars, k) } valuePattern = newVal @@ -143,10 +143,10 @@ func subValR(ctx context.EvalInterface, valuePattern string, path string, errs * // if the value can be evaluted return the value // -> return value can be scalar or object type // -> if the variable is not present in the context then add an error and dont process further -func processIfSingleVariable(ctx context.EvalInterface, valuePattern interface{}, path string, errs *[]error) (bool, interface{}) { +func processIfSingleVariable(log logr.Logger, ctx context.EvalInterface, valuePattern interface{}, path string, errs *[]error) (bool, interface{}) { valueStr, ok := valuePattern.(string) if !ok { - glog.Infof("failed to convert %v to string", valuePattern) + log.Info("failed to convert to string", "pattern", valuePattern) return false, nil } // get variables at this level diff --git a/pkg/engine/variables/vars_test.go b/pkg/engine/variables/vars_test.go index 171c62ace4..33745ea81f 100644 --- a/pkg/engine/variables/vars_test.go +++ b/pkg/engine/variables/vars_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/nirmata/kyverno/pkg/engine/context" + "github.com/nirmata/kyverno/pkg/log" "gotest.tools/assert" ) @@ -64,7 +65,7 @@ func Test_subVars_success(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, pattern); err != nil { + if _, err := SubstituteVars(log.Log, ctx, pattern); err != nil { t.Error(err) } } @@ -125,7 +126,7 @@ func Test_subVars_failed(t *testing.T) { t.Error(err) } - if _, err := SubstituteVars(ctx, pattern); err == nil { + if _, err := SubstituteVars(log.Log, ctx, pattern); err == nil { t.Error("error is expected") } } @@ -152,5 +153,5 @@ func Test_SubvarRecursive(t *testing.T) { ctx := context.NewContext() assert.Assert(t, ctx.AddResource(resourceRaw)) errs := []error{} - subValR(ctx, string(patternRaw), "/", &errs) + subValR(log.Log, ctx, string(patternRaw), "/", &errs) } diff --git a/pkg/event/controller.go b/pkg/event/controller.go index aa98d09e42..0125397c8d 100644 --- a/pkg/event/controller.go +++ b/pkg/event/controller.go @@ -3,7 +3,7 @@ package event import ( "time" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1" @@ -34,6 +34,7 @@ type Generator struct { admissionCtrRecorder record.EventRecorder // events generated at namespaced policy controller to process 'generate' rule genPolicyRecorder record.EventRecorder + log logr.Logger } //Interface to generate event @@ -42,32 +43,33 @@ type Interface interface { } //NewEventGenerator to generate a new event controller -func NewEventGenerator(client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer) *Generator { +func NewEventGenerator(client *client.Client, pInformer kyvernoinformer.ClusterPolicyInformer, log logr.Logger) *Generator { gen := Generator{ client: client, pLister: pInformer.Lister(), queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), eventWorkQueueName), pSynced: pInformer.Informer().HasSynced, - policyCtrRecorder: initRecorder(client, PolicyController), - admissionCtrRecorder: initRecorder(client, AdmissionController), - genPolicyRecorder: initRecorder(client, GeneratePolicyController), + policyCtrRecorder: initRecorder(client, PolicyController, log), + admissionCtrRecorder: initRecorder(client, AdmissionController, log), + genPolicyRecorder: initRecorder(client, GeneratePolicyController, log), + log: log, } return &gen } -func initRecorder(client *client.Client, eventSource Source) record.EventRecorder { +func initRecorder(client *client.Client, eventSource Source, log logr.Logger) record.EventRecorder { // Initliaze Event Broadcaster err := scheme.AddToScheme(scheme.Scheme) if err != nil { - glog.Error(err) + log.Error(err, "failed to add to scheme") return nil } eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.V(4).Infof) + eventBroadcaster.StartLogging(log.Info) eventInterface, err := client.GetEventsInterface() if err != nil { - glog.Error(err) // TODO: add more specific error + log.Error(err, "failed to get event interface for logging") return nil } eventBroadcaster.StartRecordingToSink( @@ -81,11 +83,12 @@ func initRecorder(client *client.Client, eventSource Source) record.EventRecorde //Add queues an event for generation func (gen *Generator) Add(infos ...Info) { + logger := gen.log for _, info := range infos { if info.Name == "" { // dont create event for resources with generateName // as the name is not generated yet - glog.V(4).Infof("received info %v, not creating an event as the resource has not been assigned a name yet", info) + logger.V(4).Info("not creating an event as the resource has not been assigned a name yet", "kind", info.Kind, "name", info.Name, "namespace", info.Namespace) continue } gen.queue.Add(info) @@ -94,12 +97,14 @@ func (gen *Generator) Add(infos ...Info) { // Run begins generator func (gen *Generator) Run(workers int, stopCh <-chan struct{}) { + logger := gen.log defer utilruntime.HandleCrash() - glog.Info("Starting event generator") - defer glog.Info("Shutting down event generator") + + logger.Info("start") + defer logger.Info("shutting down") if !cache.WaitForCacheSync(stopCh, gen.pSynced) { - glog.Error("event generator: failed to sync informer cache") + logger.Info("failed to sync informer cache") } for i := 0; i < workers; i++ { @@ -114,24 +119,25 @@ func (gen *Generator) runWorker() { } func (gen *Generator) handleErr(err error, key interface{}) { + logger := gen.log if err == nil { gen.queue.Forget(key) return } // This controller retries if something goes wrong. After that, it stops trying. if gen.queue.NumRequeues(key) < workQueueRetryLimit { - glog.Warningf("Error syncing events %v(re-queuing request, the resource might not have been created yet): %v", key, err) + logger.Error(err, "Error syncing events;re-queuing request,the resource might not have been created yet", "key", key) // Re-enqueue the key rate limited. Based on the rate limiter on the // queue and the re-enqueue history, the key will be processed later again. gen.queue.AddRateLimited(key) return } gen.queue.Forget(key) - glog.Error(err) - glog.Warningf("Dropping the key out of the queue: %v", err) + logger.Error(err, "dropping the key out of queue", "key", key) } func (gen *Generator) processNextWorkItem() bool { + logger := gen.log obj, shutdown := gen.queue.Get() if shutdown { return false @@ -144,7 +150,7 @@ func (gen *Generator) processNextWorkItem() bool { if key, ok = obj.(Info); !ok { gen.queue.Forget(obj) - glog.Warningf("Expecting type info by got %v\n", obj) + logger.Info("Incorrect type; expected type 'info'", "obj", obj) return nil } err := gen.syncHandler(key) @@ -152,13 +158,14 @@ func (gen *Generator) processNextWorkItem() bool { return nil }(obj) if err != nil { - glog.Error(err) + logger.Error(err, "failed to process next work item") return true } return true } func (gen *Generator) syncHandler(key Info) error { + logger := gen.log var robj runtime.Object var err error switch key.Kind { @@ -166,13 +173,13 @@ func (gen *Generator) syncHandler(key Info) error { //TODO: policy is clustered resource so wont need namespace robj, err = gen.pLister.Get(key.Name) if err != nil { - glog.V(4).Infof("Error creating event: unable to get policy %s, will retry ", key.Name) + logger.Error(err, "failed to get policy", "name", key.Name) return err } default: robj, err = gen.client.GetResource(key.Kind, key.Namespace, key.Name) if err != nil { - glog.V(4).Infof("Error creating event: unable to get resource %s/%s/%s, will retry ", key.Kind, key.Namespace, key.Name) + logger.Error(err, "failed to get resource", "kind", key.Kind, "name", key.Name, "namespace", key.Namespace) return err } } @@ -192,13 +199,14 @@ func (gen *Generator) syncHandler(key Info) error { case GeneratePolicyController: gen.genPolicyRecorder.Event(robj, eventType, key.Reason, key.Message) default: - glog.Info("info.source not defined for the event generator request") + logger.Info("info.source not defined for the request") } return nil } //NewEvent builds a event creation request func NewEvent( + log logr.Logger, rkind, rapiVersion, rnamespace, @@ -209,7 +217,7 @@ func NewEvent( args ...interface{}) Info { msgText, err := getEventMsg(message, args...) if err != nil { - glog.Error(err) + log.Error(err, "failed to get event message") } return Info{ Kind: rkind, diff --git a/pkg/generate/cleanup/controller.go b/pkg/generate/cleanup/controller.go index 396058a346..b7cc7a2d46 100644 --- a/pkg/generate/cleanup/controller.go +++ b/pkg/generate/cleanup/controller.go @@ -1,11 +1,11 @@ package cleanup import ( - "fmt" "time" "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1" @@ -53,6 +53,7 @@ type Controller struct { //TODO: list of generic informers // only support Namespaces for deletion of resource nsInformer informers.GenericInformer + log logr.Logger } //NewController returns a new controller instance to manage generate-requests @@ -62,6 +63,7 @@ func NewController( pInformer kyvernoinformer.ClusterPolicyInformer, grInformer kyvernoinformer.GenerateRequestInformer, dynamicInformer dynamicinformer.DynamicSharedInformerFactory, + log logr.Logger, ) *Controller { c := Controller{ kyvernoClient: kyvernoclient, @@ -70,6 +72,7 @@ func NewController( // as we dont want a deleted GR to be re-queue queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request-cleanup"), dynamicInformer: dynamicInformer, + log: log, } c.control = Control{client: kyvernoclient} c.enqueueGR = c.enqueue @@ -102,10 +105,11 @@ func NewController( } func (c *Controller) deleteGenericResource(obj interface{}) { + logger := c.log r := obj.(*unstructured.Unstructured) grs, err := c.grLister.GetGenerateRequestsForResource(r.GetKind(), r.GetNamespace(), r.GetName()) if err != nil { - glog.Errorf("failed to Generate Requests for resource %s/%s/%s: %v", r.GetKind(), r.GetNamespace(), r.GetName(), err) + logger.Error(err, "failed to get generate request CR for resource", "kind", r.GetKind(), "namespace", r.GetNamespace(), "name", r.GetName()) return } // re-evaluate the GR as the resource was deleted @@ -115,26 +119,27 @@ func (c *Controller) deleteGenericResource(obj interface{}) { } func (c *Controller) deletePolicy(obj interface{}) { + logger := c.log p, ok := obj.(*kyverno.ClusterPolicy) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + logger.Info("ouldn't get object from tombstone", "obj", obj) return } _, ok = tombstone.Obj.(*kyverno.ClusterPolicy) if !ok { - glog.Info(fmt.Errorf("Tombstone contained object that is not a Generate Request %#v", obj)) + logger.Info("Tombstone contained object that is not a Generate Request", "obj", obj) return } } - glog.V(4).Infof("Deleting Policy %s", p.Name) + logger.V(4).Info("deleting policy", "name", p.Name) // clean up the GR // Get the corresponding GR // get the list of GR for the current Policy version grs, err := c.grLister.GetGenerateRequestsForClusterPolicy(p.Name) if err != nil { - glog.Errorf("failed to Generate Requests for policy %s: %v", p.Name, err) + logger.Error(err, "failed to generate request CR for the policy", "name", p.Name) return } for _, gr := range grs { @@ -153,44 +158,46 @@ func (c *Controller) updateGR(old, cur interface{}) { } func (c *Controller) deleteGR(obj interface{}) { + logger := c.log gr, ok := obj.(*kyverno.GenerateRequest) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + logger.Info("Couldn't get object from tombstone", "obj", obj) return } _, ok = tombstone.Obj.(*kyverno.GenerateRequest) if !ok { - glog.Info(fmt.Errorf("Tombstone contained object that is not a Generate Request %#v", obj)) + logger.Info("ombstone contained object that is not a Generate Request", "obj", obj) return } } - glog.V(4).Infof("Deleting GR %s", gr.Name) + logger.V(4).Info("deleting Generate Request CR", "name", gr.Name) // sync Handler will remove it from the queue c.enqueueGR(gr) } func (c *Controller) enqueue(gr *kyverno.GenerateRequest) { + logger := c.log key, err := cache.MetaNamespaceKeyFunc(gr) if err != nil { glog.Error(err) return } - glog.V(4).Infof("cleanup enqueu: %v", gr.Name) + logger.V(4).Info("eneque generate request", "name", gr.Name) c.queue.Add(key) } //Run starts the generate-request re-conciliation loop func (c *Controller) Run(workers int, stopCh <-chan struct{}) { + logger := c.log defer utilruntime.HandleCrash() defer c.queue.ShutDown() - - glog.Info("Starting generate-policy-cleanup controller") - defer glog.Info("Shutting down generate-policy-cleanup controller") + logger.Info("starting") + defer logger.Info("shutting down") if !cache.WaitForCacheSync(stopCh, c.pSynced, c.grSynced) { - glog.Error("generate-policy-cleanup controller: failed to sync informer cache") + logger.Info("failed to sync informer cache") return } for i := 0; i < workers; i++ { @@ -219,31 +226,33 @@ func (c *Controller) processNextWorkItem() bool { } func (c *Controller) handleErr(err error, key interface{}) { + logger := c.log if err == nil { c.queue.Forget(key) return } if c.queue.NumRequeues(key) < maxRetries { - glog.Errorf("Error syncing Generate Request %v: %v", key, err) + logger.Error(err, "failed to sync generate request", "key", key) c.queue.AddRateLimited(key) return } utilruntime.HandleError(err) - glog.Infof("Dropping generate request %q out of the queue: %v", key, err) + logger.Error(err, "dropping generate request out of the queue", "key", key) c.queue.Forget(key) } func (c *Controller) syncGenerateRequest(key string) error { + logger := c.log.WithValues("key", key) var err error startTime := time.Now() - glog.V(4).Infof("Started syncing GR %q (%v)", key, startTime) + logger.Info("started syncing generate request", "startTime", startTime) defer func() { - glog.V(4).Infof("Finished syncing GR %q (%v)", key, time.Since(startTime)) + logger.V(4).Info("finished syncying generate request", "processingTIme", time.Since(startTime)) }() _, grName, err := cache.SplitMetaNamespaceKey(key) if errors.IsNotFound(err) { - glog.Infof("Generate Request %s has been deleted", key) + logger.Info("generate request has been deleted") return nil } if err != nil { diff --git a/pkg/generate/controller.go b/pkg/generate/controller.go index 944b8b49cb..8560043996 100644 --- a/pkg/generate/controller.go +++ b/pkg/generate/controller.go @@ -1,10 +1,9 @@ package generate import ( - "fmt" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1" @@ -57,6 +56,7 @@ type Controller struct { //TODO: list of generic informers // only support Namespaces for re-evalutation on resource updates nsInformer informers.GenericInformer + log logr.Logger } //NewController returns an instance of the Generate-Request Controller @@ -68,6 +68,7 @@ func NewController( eventGen event.Interface, pvGenerator policyviolation.GeneratorInterface, dynamicInformer dynamicinformer.DynamicSharedInformerFactory, + log logr.Logger, ) *Controller { c := Controller{ client: client, @@ -78,6 +79,7 @@ func NewController( // as we dont want a deleted GR to be re-queue queue: workqueue.NewNamedRateLimitingQueue(workqueue.NewItemExponentialFailureRateLimiter(1, 30), "generate-request"), dynamicInformer: dynamicInformer, + log: log, } c.statusControl = StatusControl{client: kyvernoclient} @@ -112,11 +114,12 @@ func NewController( } func (c *Controller) updateGenericResource(old, cur interface{}) { + logger := c.log curR := cur.(*unstructured.Unstructured) grs, err := c.grLister.GetGenerateRequestsForResource(curR.GetKind(), curR.GetNamespace(), curR.GetName()) if err != nil { - glog.Errorf("failed to Generate Requests for resource %s/%s/%s: %v", curR.GetKind(), curR.GetNamespace(), curR.GetName(), err) + logger.Error(err, "failed to get generate request CR for the resoource", "kind", curR.GetKind(), "name", curR.GetName(), "namespace", curR.GetNamespace()) return } // re-evaluate the GR as the resource was updated @@ -129,13 +132,14 @@ func (c *Controller) updateGenericResource(old, cur interface{}) { func (c *Controller) enqueue(gr *kyverno.GenerateRequest) { key, err := cache.MetaNamespaceKeyFunc(gr) if err != nil { - glog.Error(err) + c.log.Error(err, "failed to extract name") return } c.queue.Add(key) } func (c *Controller) updatePolicy(old, cur interface{}) { + logger := c.log oldP := old.(*kyverno.ClusterPolicy) curP := cur.(*kyverno.ClusterPolicy) if oldP.ResourceVersion == curP.ResourceVersion { @@ -143,11 +147,11 @@ func (c *Controller) updatePolicy(old, cur interface{}) { // Two different versions of the same replica set will always have different RVs. return } - glog.V(4).Infof("Updating Policy %s", oldP.Name) + logger.V(4).Info("updating policy", "name", oldP.Name) // get the list of GR for the current Policy version grs, err := c.grLister.GetGenerateRequestsForClusterPolicy(curP.Name) if err != nil { - glog.Errorf("failed to Generate Requests for policy %s: %v", curP.Name, err) + logger.Error(err, "failed to generate request for policy", "name", curP.Name) return } // re-evaluate the GR as the policy was updated @@ -178,34 +182,36 @@ func (c *Controller) updateGR(old, cur interface{}) { } func (c *Controller) deleteGR(obj interface{}) { + logger := c.log gr, ok := obj.(*kyverno.GenerateRequest) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + logger.Info("Couldn't get object from tombstone", "obj", obj) return } _, ok = tombstone.Obj.(*kyverno.GenerateRequest) if !ok { - glog.Info(fmt.Errorf("Tombstone contained object that is not a Generate Request %#v", obj)) + logger.Info("tombstone contained object that is not a Generate Request CR", "obj", obj) return } } - glog.V(4).Infof("Deleting GR %s", gr.Name) + logger.Info("deleting generate request", "name", gr.Name) // sync Handler will remove it from the queue c.enqueueGR(gr) } //Run ... func (c *Controller) Run(workers int, stopCh <-chan struct{}) { + logger := c.log defer utilruntime.HandleCrash() defer c.queue.ShutDown() - glog.Info("Starting generate-policy controller") - defer glog.Info("Shutting down generate-policy controller") + logger.Info("starting") + defer logger.Info("shutting down") if !cache.WaitForCacheSync(stopCh, c.pSynced, c.grSynced) { - glog.Error("generate-policy controller: failed to sync informer cache") + logger.Info("failed to sync informer cache") return } for i := 0; i < workers; i++ { @@ -234,27 +240,29 @@ func (c *Controller) processNextWorkItem() bool { } func (c *Controller) handleErr(err error, key interface{}) { + logger := c.log if err == nil { c.queue.Forget(key) return } if c.queue.NumRequeues(key) < maxRetries { - glog.Errorf("Error syncing Generate Request %v: %v", key, err) + logger.Error(err, "failed to sync generate request", "key", key) c.queue.AddRateLimited(key) return } utilruntime.HandleError(err) - glog.Infof("Dropping generate request %q out of the queue: %v", key, err) + logger.Error(err, "Dropping generate request from the queue", "key", key) c.queue.Forget(key) } func (c *Controller) syncGenerateRequest(key string) error { + logger := c.log var err error startTime := time.Now() - glog.V(4).Infof("Started syncing GR %q (%v)", key, startTime) + logger.Info("started sync", "key", key, "startTime", startTime) defer func() { - glog.V(4).Infof("Finished syncing GR %q (%v)", key, time.Since(startTime)) + logger.V(4).Info("finished sync", "key", key, "processingTime", time.Since(startTime)) }() _, grName, err := cache.SplitMetaNamespaceKey(key) if err != nil { @@ -263,7 +271,7 @@ func (c *Controller) syncGenerateRequest(key string) error { gr, err := c.grLister.Get(grName) if err != nil { - glog.V(4).Info(err) + logger.Error(err, "failed to list generate requests") return err } return c.processGR(gr) diff --git a/pkg/generate/generate.go b/pkg/generate/generate.go index cd9ab1b2fd..6adbb5b75b 100644 --- a/pkg/generate/generate.go +++ b/pkg/generate/generate.go @@ -4,7 +4,7 @@ import ( "encoding/json" "fmt" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" dclient "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/engine" @@ -16,6 +16,7 @@ import ( ) func (c *Controller) processGR(gr *kyverno.GenerateRequest) error { + logger := c.log.WithValues("name", gr.Name, "policy", gr.Spec.Policy, "kind", gr.Spec.Resource.Kind, "namespace", gr.Spec.Resource.Namespace, "name", gr.Spec.Resource.Name) var err error var resource *unstructured.Unstructured var genResources []kyverno.ResourceSpec @@ -23,45 +24,46 @@ func (c *Controller) processGR(gr *kyverno.GenerateRequest) error { resource, err = getResource(c.client, gr.Spec.Resource) if err != nil { // Dont update status - glog.V(4).Infof("resource does not exist or is yet to be created, requeuing: %v", err) + logger.Error(err, "resource does not exist or is yet to be created, requeueing") return err } // 2 - Apply the generate policy on the resource genResources, err = c.applyGenerate(*resource, *gr) // 3 - Report Events - reportEvents(err, c.eventGen, *gr, *resource) + reportEvents(logger, err, c.eventGen, *gr, *resource) // 4 - Update Status return updateStatus(c.statusControl, *gr, err, genResources) } func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyverno.GenerateRequest) ([]kyverno.ResourceSpec, error) { + logger := c.log.WithValues("name", gr.Name, "policy", gr.Spec.Policy, "kind", gr.Spec.Resource.Kind, "namespace", gr.Spec.Resource.Namespace, "name", gr.Spec.Resource.Name) // Get the list of rules to be applied // get policy policy, err := c.pLister.Get(gr.Spec.Policy) if err != nil { - glog.V(4).Infof("policy %s not found: %v", gr.Spec.Policy, err) + logger.Error(err, "policy not found") return nil, nil } // build context ctx := context.NewContext() resourceRaw, err := resource.MarshalJSON() if err != nil { - glog.V(4).Infof("failed to marshal resource: %v", err) + logger.Error(err, "failed to marshal resource") return nil, err } err = ctx.AddResource(resourceRaw) if err != nil { - glog.Infof("Failed to load resource in context: %v", err) + logger.Error(err, "failed to load resource in context") return nil, err } err = ctx.AddUserInfo(gr.Spec.Context.UserRequestInfo) if err != nil { - glog.Infof("Failed to load userInfo in context: %v", err) + logger.Error(err, "failed to load SA in context") return nil, err } err = ctx.AddSA(gr.Spec.Context.UserRequestInfo.AdmissionUserInfo.Username) if err != nil { - glog.Infof("Failed to load serviceAccount in context: %v", err) + logger.Error(err, "failed to load UserInfo in context") return nil, err } @@ -75,12 +77,12 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern // check if the policy still applies to the resource engineResponse := engine.Generate(policyContext) if len(engineResponse.PolicyResponse.Rules) == 0 { - glog.V(4).Infof("policy %s, dont not apply to resource %v", gr.Spec.Policy, gr.Spec.Resource) + logger.V(4).Info("policy does not apply to resource") return nil, fmt.Errorf("policy %s, dont not apply to resource %v", gr.Spec.Policy, gr.Spec.Resource) } // Apply the generate rule on resource - return applyGeneratePolicy(c.client, policyContext) + return applyGeneratePolicy(logger, c.client, policyContext) } func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateRequest, err error, genResources []kyverno.ResourceSpec) error { @@ -92,7 +94,7 @@ func updateStatus(statusControl StatusControlInterface, gr kyverno.GenerateReque return statusControl.Success(gr, genResources) } -func applyGeneratePolicy(client *dclient.Client, policyContext engine.PolicyContext) ([]kyverno.ResourceSpec, error) { +func applyGeneratePolicy(log logr.Logger, client *dclient.Client, policyContext engine.PolicyContext) ([]kyverno.ResourceSpec, error) { // List of generatedResources var genResources []kyverno.ResourceSpec // Get the response as the actions to be performed on the resource @@ -111,7 +113,7 @@ func applyGeneratePolicy(client *dclient.Client, policyContext engine.PolicyCont if !rule.HasGenerate() { continue } - genResource, err := applyRule(client, rule, resource, ctx, processExisting) + genResource, err := applyRule(log, client, rule, resource, ctx, processExisting) if err != nil { return nil, err } @@ -121,7 +123,7 @@ func applyGeneratePolicy(client *dclient.Client, policyContext engine.PolicyCont return genResources, nil } -func applyRule(client *dclient.Client, rule kyverno.Rule, resource unstructured.Unstructured, ctx context.EvalInterface, processExisting bool) (kyverno.ResourceSpec, error) { +func applyRule(log logr.Logger, client *dclient.Client, rule kyverno.Rule, resource unstructured.Unstructured, ctx context.EvalInterface, processExisting bool) (kyverno.ResourceSpec, error) { var rdata map[string]interface{} var err error var mode ResourceMode @@ -136,7 +138,7 @@ func applyRule(client *dclient.Client, rule kyverno.Rule, resource unstructured. // format : {{ results in error and rule is not applied // - valid variables are replaced with the values - if _, err := variables.SubstituteVars(ctx, genUnst.Object); err != nil { + if _, err := variables.SubstituteVars(log, ctx, genUnst.Object); err != nil { return noGenResource, err } genKind, _, err := unstructured.NestedString(genUnst.Object, "kind") @@ -168,9 +170,9 @@ func applyRule(client *dclient.Client, rule kyverno.Rule, resource unstructured. } if genData != nil { - rdata, mode, err = manageData(genKind, genNamespace, genName, genData, client, resource) + rdata, mode, err = manageData(log, genKind, genNamespace, genName, genData, client, resource) } else { - rdata, mode, err = manageClone(genKind, genNamespace, genName, genCopy, client, resource) + rdata, mode, err = manageClone(log, genKind, genNamespace, genName, genCopy, client, resource) } if err != nil { return noGenResource, err @@ -197,38 +199,38 @@ func applyRule(client *dclient.Client, rule kyverno.Rule, resource unstructured. // - app.kubernetes.io/managed-by: kyverno // - kyverno.io/generated-by: kind/namespace/name (trigger resource) manageLabels(newResource, resource) - + logger := log.WithValues("genKind", genKind, "genNamespace", genNamespace, "genName", genName) if mode == Create { // Reset resource version newResource.SetResourceVersion("") // Create the resource - glog.V(4).Infof("Creating new resource %s/%s/%s", genKind, genNamespace, genName) + logger.V(4).Info("creating new resource") _, err = client.CreateResource(genKind, genNamespace, newResource, false) if err != nil { // Failed to create resource return noGenResource, err } - glog.V(4).Infof("Created new resource %s/%s/%s", genKind, genNamespace, genName) + logger.V(4).Info("created new resource") } else if mode == Update { - glog.V(4).Infof("Updating existing resource %s/%s/%s", genKind, genNamespace, genName) + logger.V(4).Info("updating existing resource") // Update the resource _, err := client.UpdateResource(genKind, genNamespace, newResource, false) if err != nil { // Failed to update resource return noGenResource, err } - glog.V(4).Infof("Updated existing resource %s/%s/%s", genKind, genNamespace, genName) + logger.V(4).Info("updated new resource") } return newGenResource, nil } -func manageData(kind, namespace, name string, data map[string]interface{}, client *dclient.Client, resource unstructured.Unstructured) (map[string]interface{}, ResourceMode, error) { +func manageData(log logr.Logger, kind, namespace, name string, data map[string]interface{}, client *dclient.Client, resource unstructured.Unstructured) (map[string]interface{}, ResourceMode, error) { // check if resource to be generated exists obj, err := client.GetResource(kind, namespace, name) if apierrors.IsNotFound(err) { - glog.V(4).Infof("Resource %s/%s/%s does not exists, will try to create", kind, namespace, name) + log.Error(err, "resource does not exist, will try to create", "genKind", kind, "genNamespace", namespace, "genName", name) return data, Create, nil } if err != nil { @@ -237,18 +239,17 @@ func manageData(kind, namespace, name string, data map[string]interface{}, clien return nil, Skip, err } // Resource exists; verfiy the content of the resource - err = checkResource(data, obj) + err = checkResource(log, data, obj) if err == nil { // Existing resource does contain the mentioned configuration in spec, skip processing the resource as it is already in expected state return nil, Skip, nil } - - glog.V(4).Infof("Resource %s/%s/%s exists but missing required configuration, will try to update", kind, namespace, name) + log.Info("to be generated resoruce already exists, but is missing the specifeid configurations, will try to update", "genKind", kind, "genNamespace", namespace, "genName", name) return data, Update, nil } -func manageClone(kind, namespace, name string, clone map[string]interface{}, client *dclient.Client, resource unstructured.Unstructured) (map[string]interface{}, ResourceMode, error) { +func manageClone(log logr.Logger, kind, namespace, name string, clone map[string]interface{}, client *dclient.Client, resource unstructured.Unstructured) (map[string]interface{}, ResourceMode, error) { // check if resource to be generated exists _, err := client.GetResource(kind, namespace, name) if err == nil { @@ -257,6 +258,7 @@ func manageClone(kind, namespace, name string, clone map[string]interface{}, cli } //TODO: check this if !apierrors.IsNotFound(err) { + log.Error(err, "reference/clone resource is not found", "genKind", kind, "genNamespace", namespace, "genName", name) //something wrong while fetching resource return nil, Skip, err } @@ -274,8 +276,6 @@ func manageClone(kind, namespace, name string, clone map[string]interface{}, cli // attempting to clone it self, this will fail -> short-ciruit it return nil, Skip, nil } - - glog.V(4).Infof("check if resource %s/%s/%s exists", kind, newRNs, newRName) // check if the resource as reference in clone exists? obj, err := client.GetResource(kind, newRNs, newRName) if err != nil { @@ -298,10 +298,10 @@ const ( Update = "UPDATE" ) -func checkResource(newResourceSpec interface{}, resource *unstructured.Unstructured) error { +func checkResource(log logr.Logger, newResourceSpec interface{}, resource *unstructured.Unstructured) error { // check if the resource spec if a subset of the resource - if path, err := validate.ValidateResourceWithPattern(resource.Object, newResourceSpec); err != nil { - glog.V(4).Infof("Failed to match the resource at path %s: err %v", path, err) + if path, err := validate.ValidateResourceWithPattern(log, resource.Object, newResourceSpec); err != nil { + log.Error(err, "Failed to match the resource ", "path", path) return err } return nil diff --git a/pkg/generate/labels.go b/pkg/generate/labels.go index 282caf55fa..312d49c1e7 100644 --- a/pkg/generate/labels.go +++ b/pkg/generate/labels.go @@ -3,7 +3,7 @@ package generate import ( "fmt" - "github.com/golang/glog" + "github.com/nirmata/kyverno/pkg/log" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -30,7 +30,7 @@ func managedBy(labels map[string]string) { val, ok := labels[key] if ok { if val != value { - glog.Infof("resource managed by %s, kyverno wont over-ride the label", val) + log.Log.Info(fmt.Sprintf("resource managed by %s, kyverno wont over-ride the label", val)) return } } @@ -46,7 +46,7 @@ func generatedBy(labels map[string]string, triggerResource unstructured.Unstruct val, ok := labels[key] if ok { if val != value { - glog.Infof("resource generated by %s, kyverno wont over-ride the label", val) + log.Log.Info(fmt.Sprintf("resource generated by %s, kyverno wont over-ride the label", val)) return } } diff --git a/pkg/generate/report.go b/pkg/generate/report.go index eaa5939e41..f9d24fcc10 100644 --- a/pkg/generate/report.go +++ b/pkg/generate/report.go @@ -3,13 +3,13 @@ package generate import ( "fmt" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/event" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) -func reportEvents(err error, eventGen event.Interface, gr kyverno.GenerateRequest, resource unstructured.Unstructured) { +func reportEvents(log logr.Logger, err error, eventGen event.Interface, gr kyverno.GenerateRequest, resource unstructured.Unstructured) { if err == nil { // Success Events // - resource -> policy rule applied successfully @@ -18,7 +18,6 @@ func reportEvents(err error, eventGen event.Interface, gr kyverno.GenerateReques eventGen.Add(events...) return } - glog.V(4).Infof("reporing events for %v", err) events := failedEvents(err, gr, resource) eventGen.Add(events...) } diff --git a/pkg/log/deleg.go b/pkg/log/deleg.go new file mode 100644 index 0000000000..4bfee0b69b --- /dev/null +++ b/pkg/log/deleg.go @@ -0,0 +1,121 @@ +package log + +import ( + "sync" + + "github.com/go-logr/logr" +) + +// loggerPromise knows how to populate a concrete logr.Logger +// with options, given an actual base logger later on down the line. +type loggerPromise struct { + logger *DelegatingLogger + childPromises []*loggerPromise + promisesLock sync.Mutex + + name *string + tags []interface{} +} + +// WithName provides a new Logger with the name appended +func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromise { + res := &loggerPromise{ + logger: l, + name: &name, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// WithValues provides a new Logger with the tags appended +func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise { + res := &loggerPromise{ + logger: l, + tags: tags, + promisesLock: sync.Mutex{}, + } + + p.promisesLock.Lock() + defer p.promisesLock.Unlock() + p.childPromises = append(p.childPromises, res) + return res +} + +// Fulfill instantiates the Logger with the provided logger +func (p *loggerPromise) Fulfill(parentLogger logr.Logger) { + var logger = parentLogger + if p.name != nil { + logger = logger.WithName(*p.name) + } + + if p.tags != nil { + logger = logger.WithValues(p.tags...) + } + + p.logger.Logger = logger + p.logger.promise = nil + + for _, childPromise := range p.childPromises { + childPromise.Fulfill(logger) + } +} + +// DelegatingLogger is a logr.Logger that delegates to another logr.Logger. +// If the underlying promise is not nil, it registers calls to sub-loggers with +// the logging factory to be populated later, and returns a new delegating +// logger. It expects to have *some* logr.Logger set at all times (generally +// a no-op logger before the promises are fulfilled). +type DelegatingLogger struct { + logr.Logger + promise *loggerPromise +} + +// WithName provides a new Logger with the name appended +func (l *DelegatingLogger) WithName(name string) logr.Logger { + if l.promise == nil { + return l.Logger.WithName(name) + } + + res := &DelegatingLogger{Logger: l.Logger} + promise := l.promise.WithName(res, name) + res.promise = promise + + return res +} + +// WithValues provides a new Logger with the tags appended +func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger { + if l.promise == nil { + return l.Logger.WithValues(tags...) + } + + res := &DelegatingLogger{Logger: l.Logger} + promise := l.promise.WithValues(res, tags...) + res.promise = promise + + return res +} + +// Fulfill switches the logger over to use the actual logger +// provided, instead of the temporary initial one, if this method +// has not been previously called. +func (l *DelegatingLogger) Fulfill(actual logr.Logger) { + if l.promise != nil { + l.promise.Fulfill(actual) + } +} + +// NewDelegatingLogger constructs a new DelegatingLogger which uses +// the given logger before it's promise is fulfilled. +func NewDelegatingLogger(initial logr.Logger) *DelegatingLogger { + l := &DelegatingLogger{ + Logger: initial, + promise: &loggerPromise{promisesLock: sync.Mutex{}}, + } + l.promise.logger = l + return l +} diff --git a/pkg/log/log.go b/pkg/log/log.go new file mode 100644 index 0000000000..d8ec75b46c --- /dev/null +++ b/pkg/log/log.go @@ -0,0 +1,15 @@ +package log + +import ( + "github.com/go-logr/logr" +) + +// SetLogger sets a concrete logging implementation for all deferred Loggers. +func SetLogger(l logr.Logger) { + Log.Fulfill(l) +} + +// Log is the base logger used by kubebuilder. It delegates +// to another logr.Logger. You *must* call SetLogger to +// get any actual logging. +var Log = NewDelegatingLogger(NullLogger{}) diff --git a/pkg/log/null.go b/pkg/log/null.go new file mode 100644 index 0000000000..7e806e032c --- /dev/null +++ b/pkg/log/null.go @@ -0,0 +1,44 @@ +package log + +import ( + "github.com/go-logr/logr" +) + +// NB: this is the same as the null logger logr/testing, +// but avoids accidentally adding the testing flags to +// all binaries. + +// NullLogger is a logr.Logger that does nothing. +type NullLogger struct{} + +var _ logr.Logger = NullLogger{} + +// Info implements logr.InfoLogger +func (NullLogger) Info(_ string, _ ...interface{}) { + // Do nothing. +} + +// Enabled implements logr.InfoLogger +func (NullLogger) Enabled() bool { + return false +} + +// Error implements logr.Logger +func (NullLogger) Error(_ error, _ string, _ ...interface{}) { + // Do nothing. +} + +// V implements logr.Logger +func (log NullLogger) V(_ int) logr.InfoLogger { + return log +} + +// WithName implements logr.Logger +func (log NullLogger) WithName(_ string) logr.Logger { + return log +} + +// WithValues implements logr.Logger +func (log NullLogger) WithValues(_ ...interface{}) logr.Logger { + return log +} diff --git a/pkg/policy/actions.go b/pkg/policy/actions.go index cdf37ed1a8..6042177ecf 100644 --- a/pkg/policy/actions.go +++ b/pkg/policy/actions.go @@ -5,6 +5,7 @@ import ( kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" dclient "github.com/nirmata/kyverno/pkg/dclient" + "github.com/nirmata/kyverno/pkg/log" "github.com/nirmata/kyverno/pkg/policy/generate" "github.com/nirmata/kyverno/pkg/policy/mutate" "github.com/nirmata/kyverno/pkg/policy/validate" @@ -40,7 +41,7 @@ func validateActions(idx int, rule kyverno.Rule, client *dclient.Client) error { // Generate if rule.HasGenerate() { - checker = generate.NewGenerateFactory(client, rule.Generation) + checker = generate.NewGenerateFactory(client, rule.Generation, log.Log) if path, err := checker.Validate(); err != nil { return fmt.Errorf("path: spec.rules[%d].generate.%s.: %v", idx, path, err) } diff --git a/pkg/policy/apply.go b/pkg/policy/apply.go index ed4b72ac65..f777e21ab1 100644 --- a/pkg/policy/apply.go +++ b/pkg/policy/apply.go @@ -8,7 +8,7 @@ import ( "time" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/engine/context" @@ -19,12 +19,13 @@ import ( // applyPolicy applies policy on a resource //TODO: generation rules -func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface) (responses []response.EngineResponse) { +func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface, log logr.Logger) (responses []response.EngineResponse) { + logger := log.WithValues("kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName()) startTime := time.Now() var policyStats []PolicyStat - glog.V(4).Infof("Started apply policy %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), startTime) + logger.Info("start applying policy", "startTime", startTime) defer func() { - glog.V(4).Infof("Finished applying %s on resource %s/%s/%s (%v)", policy.Name, resource.GetKind(), resource.GetNamespace(), resource.GetName(), time.Since(startTime)) + logger.Info("finisnhed applying policy", "processingTime", time.Since(startTime)) }() // gather stats from the engine response @@ -66,10 +67,10 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure ctx.AddResource(transformResource(resource)) //MUTATION - engineResponse, err = mutation(policy, resource, policyStatus, ctx) + engineResponse, err = mutation(policy, resource, policyStatus, ctx, logger) engineResponses = append(engineResponses, engineResponse) if err != nil { - glog.Errorf("unable to process mutation rules: %v", err) + logger.Error(err, "failed to process mutation rule") } gatherStat(policy.Name, engineResponse.PolicyResponse) //send stats @@ -86,52 +87,52 @@ func applyPolicy(policy kyverno.ClusterPolicy, resource unstructured.Unstructure //TODO: GENERATION return engineResponses } -func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface, ctx context.EvalInterface) (response.EngineResponse, error) { +func mutation(policy kyverno.ClusterPolicy, resource unstructured.Unstructured, policyStatus PolicyStatusInterface, ctx context.EvalInterface, log logr.Logger) (response.EngineResponse, error) { engineResponse := engine.Mutate(engine.PolicyContext{Policy: policy, NewResource: resource, Context: ctx}) if !engineResponse.IsSuccesful() { - glog.V(4).Infof("mutation had errors reporting them") + log.V(4).Info("failed to apply mutation rules; reporting them") return engineResponse, nil } // Verify if the JSON pathes returned by the Mutate are already applied to the resource if reflect.DeepEqual(resource, engineResponse.PatchedResource) { // resources matches - glog.V(4).Infof("resource %s/%s/%s satisfies policy %s", engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name, engineResponse.PolicyResponse.Policy) + log.V(4).Info("resource already satisfys the policy") return engineResponse, nil } - return getFailedOverallRuleInfo(resource, engineResponse) + return getFailedOverallRuleInfo(resource, engineResponse, log) } // getFailedOverallRuleInfo gets detailed info for over-all mutation failure -func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse response.EngineResponse) (response.EngineResponse, error) { +func getFailedOverallRuleInfo(resource unstructured.Unstructured, engineResponse response.EngineResponse, log logr.Logger) (response.EngineResponse, error) { rawResource, err := resource.MarshalJSON() if err != nil { - glog.V(4).Infof("unable to marshal resource: %v\n", err) + log.Error(err, "faield to marshall resource") return response.EngineResponse{}, err } // resource does not match so there was a mutation rule violated for index, rule := range engineResponse.PolicyResponse.Rules { - glog.V(4).Infof("veriying if policy %s rule %s was applied before to resource %s/%s/%s", engineResponse.PolicyResponse.Policy, rule.Name, engineResponse.PolicyResponse.Resource.Kind, engineResponse.PolicyResponse.Resource.Namespace, engineResponse.PolicyResponse.Resource.Name) + log.V(4).Info("veriying if policy rule was applied before", "rule", rule.Name) if len(rule.Patches) == 0 { continue } patch, err := jsonpatch.DecodePatch(utils.JoinPatches(rule.Patches)) if err != nil { - glog.V(4).Infof("unable to decode patch %s: %v", rule.Patches, err) + log.Error(err, "failed to decode JSON patch", "patches", rule.Patches) return response.EngineResponse{}, err } // apply the patches returned by mutate to the original resource patchedResource, err := patch.Apply(rawResource) if err != nil { - glog.V(4).Infof("unable to apply patch %s: %v", rule.Patches, err) + log.Error(err, "failed to apply JSON patch", "patches", rule.Patches) return response.EngineResponse{}, err } if !jsonpatch.Equal(patchedResource, rawResource) { - glog.V(4).Infof("policy %s rule %s condition not satisfied by existing resource", engineResponse.PolicyResponse.Policy, rule.Name) + log.V(4).Info("policy rule conditions not satisfied by resource", "rule", rule.Name) engineResponse.PolicyResponse.Rules[index].Success = false - engineResponse.PolicyResponse.Rules[index].Message = fmt.Sprintf("mutation json patches not found at resource path %s", extractPatchPath(rule.Patches)) + engineResponse.PolicyResponse.Rules[index].Message = fmt.Sprintf("mutation json patches not found at resource path %s", extractPatchPath(rule.Patches, log)) } } return engineResponse, nil @@ -143,14 +144,14 @@ type jsonPatch struct { Value interface{} `json:"value"` } -func extractPatchPath(patches [][]byte) string { +func extractPatchPath(patches [][]byte, log logr.Logger) string { var resultPath []string // extract the patch path and value for _, patch := range patches { - glog.V(4).Infof("expected json patch not found in resource: %s", string(patch)) + log.V(4).Info("expected json patch not found in resource", "patch", string(patch)) var data jsonPatch if err := json.Unmarshal(patch, &data); err != nil { - glog.V(4).Infof("Failed to decode the generated patch %v: Error %v", string(patch), err) + log.Error(err, "failed to decode the generate patch", "patch", string(patch)) continue } resultPath = append(resultPath, data.Path) diff --git a/pkg/policy/background.go b/pkg/policy/background.go index 423fc51c4d..cec13c74d7 100644 --- a/pkg/policy/background.go +++ b/pkg/policy/background.go @@ -6,6 +6,7 @@ import ( kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/context" "github.com/nirmata/kyverno/pkg/engine/variables" + "github.com/nirmata/kyverno/pkg/log" ) //ContainsUserInfo returns error is userInfo is defined @@ -35,23 +36,23 @@ func ContainsUserInfo(policy kyverno.ClusterPolicy) error { filterVars := []string{"request.userInfo*", "serviceAccountName", "serviceAccountNamespace"} ctx := context.NewContext(filterVars...) for condIdx, condition := range rule.Conditions { - if condition.Key, err = variables.SubstituteVars(ctx, condition.Key); err != nil { + if condition.Key, err = variables.SubstituteVars(log.Log, ctx, condition.Key); err != nil { return fmt.Errorf("userInfo variable used at spec/rules[%d]/condition[%d]/key", idx, condIdx) } - if condition.Value, err = variables.SubstituteVars(ctx, condition.Value); err != nil { + if condition.Value, err = variables.SubstituteVars(log.Log, ctx, condition.Value); err != nil { return fmt.Errorf("userInfo variable used at spec/rules[%d]/condition[%d]/value", idx, condIdx) } } - if rule.Mutation.Overlay, err = variables.SubstituteVars(ctx, rule.Mutation.Overlay); err != nil { + if rule.Mutation.Overlay, err = variables.SubstituteVars(log.Log, ctx, rule.Mutation.Overlay); err != nil { return fmt.Errorf("userInfo variable used at spec/rules[%d]/mutate/overlay", idx) } - if rule.Validation.Pattern, err = variables.SubstituteVars(ctx, rule.Validation.Pattern); err != nil { + if rule.Validation.Pattern, err = variables.SubstituteVars(log.Log, ctx, rule.Validation.Pattern); err != nil { return fmt.Errorf("userInfo variable used at spec/rules[%d]/validate/pattern", idx) } for idx2, pattern := range rule.Validation.AnyPattern { - if rule.Validation.AnyPattern[idx2], err = variables.SubstituteVars(ctx, pattern); err != nil { + if rule.Validation.AnyPattern[idx2], err = variables.SubstituteVars(log.Log, ctx, pattern); err != nil { return fmt.Errorf("userInfo variable used at spec/rules[%d]/validate/anyPattern[%d]", idx, idx2) } } diff --git a/pkg/policy/cleanup.go b/pkg/policy/cleanup.go index 6a41c579ba..40394c2785 100644 --- a/pkg/policy/cleanup.go +++ b/pkg/policy/cleanup.go @@ -4,57 +4,71 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/response" "k8s.io/apimachinery/pkg/labels" ) +func (pc *PolicyController) cleanUp(ers []response.EngineResponse) { + for _, er := range ers { + if !er.IsSuccesful() { + continue + } + if len(er.PolicyResponse.Rules) == 0 { + continue + } + // clean up after the policy has been corrected + pc.cleanUpPolicyViolation(er.PolicyResponse) + } +} + func (pc *PolicyController) cleanUpPolicyViolation(pResponse response.PolicyResponse) { + logger := pc.log // - check if there is violation on resource (label:Selector) if pResponse.Resource.Namespace == "" { - pv, err := getClusterPV(pc.cpvLister, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Name) + pv, err := getClusterPV(pc.cpvLister, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Name, logger) if err != nil { - glog.Errorf("failed to cleanUp violations: %v", err) + logger.Error(err, "failed to get cluster policy violation on policy and resource", "policy", pResponse.Policy, "kind", pResponse.Resource.Kind, "name", pResponse.Resource.Name) return } if reflect.DeepEqual(pv, kyverno.ClusterPolicyViolation{}) { return } - - glog.V(4).Infof("cleanup cluster violation %s on %s", pv.Name, pv.Spec.ResourceSpec.ToKey()) if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil { - glog.Errorf("failed to delete cluster policy violation %s on %s: %v", pv.Name, pv.Spec.ResourceSpec.ToKey(), err) + logger.Error(err, "failed to delete cluster policy violation", "name", pv.Name) + } else { + logger.Info("deleted cluster policy violation", "name", pv.Name) } - return } // namespace policy violation - nspv, err := getNamespacedPV(pc.nspvLister, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Namespace, pResponse.Resource.Name) + nspv, err := getNamespacedPV(pc.nspvLister, pResponse.Policy, pResponse.Resource.Kind, pResponse.Resource.Namespace, pResponse.Resource.Name, logger) if err != nil { - glog.Error(err) + logger.Error(err, "failed to get namespaced policy violation on policy and resource", "policy", pResponse.Policy, "kind", pResponse.Resource.Kind, "namespace", pResponse.Resource.Namespace, "name", pResponse.Resource.Name) return } if reflect.DeepEqual(nspv, kyverno.PolicyViolation{}) { return } - glog.V(4).Infof("cleanup namespaced violation %s on %s.%s", nspv.Name, pResponse.Resource.Namespace, nspv.Spec.ResourceSpec.ToKey()) if err := pc.pvControl.DeleteNamespacedPolicyViolation(nspv.Namespace, nspv.Name); err != nil { - glog.Errorf("failed to delete namespaced policy violation %s on %s: %v", nspv.Name, nspv.Spec.ResourceSpec.ToKey(), err) + logger.Error(err, "failed to delete cluster policy violation", "name", nspv.Name, "namespace", nspv.Namespace) + } else { + logger.Info("deleted namespaced policy violation", "name", nspv.Name, "namespace", nspv.Namespace) } } // Wont do the claiming of objects, just lookup based on selectors -func getClusterPV(pvLister kyvernolister.ClusterPolicyViolationLister, policyName, rkind, rname string) (kyverno.ClusterPolicyViolation, error) { +func getClusterPV(pvLister kyvernolister.ClusterPolicyViolationLister, policyName, rkind, rname string, log logr.Logger) (kyverno.ClusterPolicyViolation, error) { var err error // Check Violation on resource pvs, err := pvLister.List(labels.Everything()) if err != nil { - glog.V(2).Infof("unable to list policy violations : %v", err) + log.Error(err, "failed to list cluster policy violations") return kyverno.ClusterPolicyViolation{}, fmt.Errorf("failed to list cluster pv: %v", err) } @@ -69,10 +83,10 @@ func getClusterPV(pvLister kyvernolister.ClusterPolicyViolationLister, policyNam return kyverno.ClusterPolicyViolation{}, nil } -func getNamespacedPV(nspvLister kyvernolister.PolicyViolationLister, policyName, rkind, rnamespace, rname string) (kyverno.PolicyViolation, error) { +func getNamespacedPV(nspvLister kyvernolister.PolicyViolationLister, policyName, rkind, rnamespace, rname string, log logr.Logger) (kyverno.PolicyViolation, error) { nspvs, err := nspvLister.PolicyViolations(rnamespace).List(labels.Everything()) if err != nil { - glog.V(2).Infof("failed to list namespaced pv: %v", err) + log.Error(err, "failed to list namespaced policy violation") return kyverno.PolicyViolation{}, fmt.Errorf("failed to list namespaced pv: %v", err) } diff --git a/pkg/policy/controller.go b/pkg/policy/controller.go index 04200cd5a5..822f11621e 100644 --- a/pkg/policy/controller.go +++ b/pkg/policy/controller.go @@ -1,11 +1,10 @@ package policy import ( - "fmt" "reflect" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" "github.com/nirmata/kyverno/pkg/client/clientset/versioned/scheme" @@ -75,6 +74,7 @@ type PolicyController struct { pvGenerator policyviolation.GeneratorInterface // resourceWebhookWatcher queues the webhook creation request, creates the webhook resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister + log logr.Logger } // NewPolicyController create a new PolicyController @@ -87,10 +87,11 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, eventGen event.Interface, pvGenerator policyviolation.GeneratorInterface, pMetaStore policystore.UpdateInterface, - resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister) (*PolicyController, error) { + resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister, + log logr.Logger) (*PolicyController, error) { // Event broad caster eventBroadcaster := record.NewBroadcaster() - eventBroadcaster.StartLogging(glog.Infof) + eventBroadcaster.StartLogging(log.Info) eventInterface, err := client.GetEventsInterface() if err != nil { return nil, err @@ -107,6 +108,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, pMetaStore: pMetaStore, pvGenerator: pvGenerator, resourceWebhookWatcher: resourceWebhookWatcher, + log: log, } pc.pvControl = RealPVControl{Client: kyvernoClient, Recorder: pc.eventRecorder} @@ -152,6 +154,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset, } func (pc *PolicyController) addPolicy(obj interface{}) { + logger := pc.log p := obj.(*kyverno.ClusterPolicy) // Only process policies that are enabled for "background" execution // policy.spec.background -> "True" @@ -175,19 +178,19 @@ func (pc *PolicyController) addPolicy(obj interface{}) { return } } - - glog.V(4).Infof("Adding Policy %s", p.Name) + logger.V(4).Info("adding policy", "name", p.Name) pc.enqueuePolicy(p) } func (pc *PolicyController) updatePolicy(old, cur interface{}) { + logger := pc.log oldP := old.(*kyverno.ClusterPolicy) curP := cur.(*kyverno.ClusterPolicy) // TODO: optimize this : policy meta-store // Update policy-> (remove,add) err := pc.pMetaStore.UnRegister(*oldP) if err != nil { - glog.Infof("Failed to unregister policy %s", oldP.Name) + logger.Error(err, "failed to unregister policy", "name", oldP.Name) } pc.pMetaStore.Register(*curP) @@ -210,28 +213,29 @@ func (pc *PolicyController) updatePolicy(old, cur interface{}) { return } } - glog.V(4).Infof("Updating Policy %s", oldP.Name) + logger.V(4).Info("updating policy", "name", oldP.Name) pc.enqueuePolicy(curP) } func (pc *PolicyController) deletePolicy(obj interface{}) { + logger := pc.log p, ok := obj.(*kyverno.ClusterPolicy) if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj)) + logger.Info("couldnt get object from tomstone", "obj", obj) return } p, ok = tombstone.Obj.(*kyverno.ClusterPolicy) if !ok { - glog.Info(fmt.Errorf("Tombstone contained object that is not a Policy %#v", obj)) + logger.Info("tombstone container object that is not a policy", "obj", obj) return } } - glog.V(4).Infof("Deleting Policy %s", p.Name) + logger.V(4).Info("deleting policy", "name", p.Name) // Unregister from policy meta-store if err := pc.pMetaStore.UnRegister(*p); err != nil { - glog.Infof("failed to unregister policy %s", p.Name) + logger.Error(err, "failed to unregister policy", "name", p.Name) } // we process policies that are not set of background processing as we need to perform policy violation // cleanup when a policy is deleted. @@ -239,9 +243,10 @@ func (pc *PolicyController) deletePolicy(obj interface{}) { } func (pc *PolicyController) enqueue(policy *kyverno.ClusterPolicy) { + logger := pc.log key, err := cache.MetaNamespaceKeyFunc(policy) if err != nil { - glog.Error(err) + logger.Error(err, "failed to enqueu policy") return } pc.queue.Add(key) @@ -249,15 +254,16 @@ func (pc *PolicyController) enqueue(policy *kyverno.ClusterPolicy) { // Run begins watching and syncing. func (pc *PolicyController) Run(workers int, stopCh <-chan struct{}) { + logger := pc.log defer utilruntime.HandleCrash() defer pc.queue.ShutDown() - glog.Info("Starting policy controller") - defer glog.Info("Shutting down policy controller") + logger.Info("starting") + defer logger.Info("shutting down") if !cache.WaitForCacheSync(stopCh, pc.pListerSynced, pc.cpvListerSynced, pc.nspvListerSynced) { - glog.Error("failed to sync informer cache") + logger.Info("failed to sync informer cache") return } @@ -295,31 +301,33 @@ func (pc *PolicyController) processNextWorkItem() bool { } func (pc *PolicyController) handleErr(err error, key interface{}) { + logger := pc.log if err == nil { pc.queue.Forget(key) return } if pc.queue.NumRequeues(key) < maxRetries { - glog.V(2).Infof("Error syncing Policy %v: %v", key, err) + logger.Error(err, "failed to sync policy", "key", key) pc.queue.AddRateLimited(key) return } utilruntime.HandleError(err) - glog.V(2).Infof("Dropping policy %q out of the queue: %v", key, err) + logger.V(2).Info("dropping policy out of queue", "key", key) pc.queue.Forget(key) } func (pc *PolicyController) syncPolicy(key string) error { + logger := pc.log startTime := time.Now() - glog.V(4).Infof("Started syncing policy %q (%v)", key, startTime) + logger.V(4).Info("started syncing policy", "key", key, "startTime", startTime) defer func() { - glog.V(4).Infof("Finished syncing policy %q (%v)", key, time.Since(startTime)) + logger.V(4).Info("finished syncing policy", "key", key, "processingTime", time.Since(startTime)) }() policy, err := pc.pLister.Get(key) if errors.IsNotFound(err) { - glog.V(2).Infof("Policy %v has been deleted", key) + logger.V(2).Info("policy deleted", "key", key) // delete cluster policy violation if err := pc.deleteClusterPolicyViolations(key); err != nil { return err @@ -334,8 +342,7 @@ func (pc *PolicyController) syncPolicy(key string) error { // remove webhook configurations if there are no policies if err := pc.removeResourceWebhookConfiguration(); err != nil { // do not fail, if unable to delete resource webhook config - glog.V(4).Infof("failed to remove resource webhook configuration: %v", err) - glog.Errorln(err) + logger.Error(err, "failed to remove resource webhook configurations") } return nil } diff --git a/pkg/policy/existing.go b/pkg/policy/existing.go index 9165978ca4..d0e2bc1fd0 100644 --- a/pkg/policy/existing.go +++ b/pkg/policy/existing.go @@ -5,7 +5,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/minio/minio/pkg/wildcard" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/config" @@ -19,27 +19,28 @@ import ( ) func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolicy) []response.EngineResponse { + logger := pc.log.WithValues("policy", policy.Name) // Parse through all the resources // drops the cache after configured rebuild time pc.rm.Drop() var engineResponses []response.EngineResponse // get resource that are satisfy the resource description defined in the rules - resourceMap := listResources(pc.client, policy, pc.configHandler) + resourceMap := listResources(pc.client, policy, pc.configHandler, logger) for _, resource := range resourceMap { // pre-processing, check if the policy and resource version has been processed before if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) { - glog.V(4).Infof("policy %s with resource version %s already processed on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) + logger.V(4).Info("policy and resource already processed", "policyResourceVersion", policy.ResourceVersion, "resourceResourceVersion", resource.GetResourceVersion(), "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName()) continue } // skip reporting violation on pod which has annotation pod-policies.kyverno.io/autogen-applied - if skipPodApplication(resource) { + if skipPodApplication(resource, logger) { continue } // apply the policy on each - glog.V(4).Infof("apply policy %s with resource version %s on resource %s/%s/%s with resource version %s", policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) - engineResponse := applyPolicy(policy, resource, pc.statusAggregator) + logger.V(4).Info("apply policy on resource", "policyResourceVersion", policy.ResourceVersion, "resourceResourceVersion", resource.GetResourceVersion(), "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName()) + engineResponse := applyPolicy(policy, resource, pc.statusAggregator, logger) // get engine response for mutation & validation independently engineResponses = append(engineResponses, engineResponse...) // post-processing, register the resource as processed @@ -48,36 +49,26 @@ func (pc *PolicyController) processExistingResources(policy kyverno.ClusterPolic return engineResponses } -func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHandler config.Interface) map[string]unstructured.Unstructured { +func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHandler config.Interface, log logr.Logger) map[string]unstructured.Unstructured { // key uid resourceMap := map[string]unstructured.Unstructured{} for _, rule := range policy.Spec.Rules { // resources that match for _, k := range rule.MatchResources.Kinds { - // if kindIsExcluded(k, rule.ExcludeResources.Kinds) { - // glog.V(4).Infof("processing policy %s rule %s: kind %s is exluded", policy.Name, rule.Name, k) - // continue - // } var namespaces []string - if k == "Namespace" { - // TODO - // this is handled by generator controller - glog.V(4).Infof("skipping processing policy %s rule %s for kind Namespace", policy.Name, rule.Name) - continue - } if len(rule.MatchResources.Namespaces) > 0 { namespaces = append(namespaces, rule.MatchResources.Namespaces...) - glog.V(4).Infof("namespaces specified for inclusion: %v", rule.MatchResources.Namespaces) + log.V(4).Info("namespaces included", "namespaces", rule.MatchResources.Namespaces) } else { - glog.V(4).Infof("processing policy %s rule %s, namespace not defined, getting all namespaces ", policy.Name, rule.Name) + log.V(4).Info("processing all namespaces", "rule", rule.Name) // get all namespaces - namespaces = getAllNamespaces(client) + namespaces = getAllNamespaces(client, log) } // get resources in the namespaces for _, ns := range namespaces { - rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler) + rMap := getResourcesPerNamespace(k, client, ns, rule, configHandler, log) mergeresources(resourceMap, rMap) } @@ -86,16 +77,16 @@ func listResources(client *client.Client, policy kyverno.ClusterPolicy, configHa return resourceMap } -func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, configHandler config.Interface) map[string]unstructured.Unstructured { +func getResourcesPerNamespace(kind string, client *client.Client, namespace string, rule kyverno.Rule, configHandler config.Interface, log logr.Logger) map[string]unstructured.Unstructured { resourceMap := map[string]unstructured.Unstructured{} // merge include and exclude label selector values ls := rule.MatchResources.Selector // ls := mergeLabelSectors(rule.MatchResources.Selector, rule.ExcludeResources.Selector) // list resources - glog.V(4).Infof("get resources for kind %s, namespace %s, selector %v", kind, namespace, rule.MatchResources.Selector) + log.V(4).Info("list resources to be processed") list, err := client.ListResource(kind, namespace, ls) if err != nil { - glog.Infof("unable to get resources: err %v", err) + log.Error(err, "failed to list resources", "kind", kind) return nil } // filter based on name @@ -103,7 +94,6 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri // match name if rule.MatchResources.Name != "" { if !wildcard.Match(rule.MatchResources.Name, r.GetName()) { - glog.V(4).Infof("skipping resource %s/%s due to include condition name=%s mistatch", r.GetNamespace(), r.GetName(), rule.MatchResources.Name) continue } } @@ -118,12 +108,11 @@ func getResourcesPerNamespace(kind string, client *client.Client, namespace stri // exclude the resources // skip resources to be filtered - excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, configHandler) - // glog.V(4).Infof("resource map: %v", resourceMap) + excludeResources(resourceMap, rule.ExcludeResources.ResourceDescription, configHandler, log) return resourceMap } -func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Interface) { +func excludeResources(included map[string]unstructured.Unstructured, exclude kyverno.ResourceDescription, configHandler config.Interface, log logr.Logger) { if reflect.DeepEqual(exclude, (kyverno.ResourceDescription{})) { return } @@ -154,7 +143,7 @@ func excludeResources(included map[string]unstructured.Unstructured, exclude kyv selector, err := metav1.LabelSelectorAsSelector(exclude.Selector) // if the label selector is incorrect, should be fail or if err != nil { - glog.Error(err) + log.Error(err, "failed to build label selector") return Skip } if selector.Matches(labels.Set(labelsMap)) { @@ -205,8 +194,6 @@ func excludeResources(included map[string]unstructured.Unstructured, exclude kyv } // exclude the filtered resources if configHandler.ToFilter(resource.GetKind(), resource.GetNamespace(), resource.GetName()) { - //TODO: improve the text - glog.V(4).Infof("excluding resource %s/%s/%s as its satisfies the filtered resources", resource.GetKind(), resource.GetNamespace(), resource.GetName()) delete(included, uid) continue } @@ -244,12 +231,13 @@ func mergeresources(a, b map[string]unstructured.Unstructured) { } } -func getAllNamespaces(client *client.Client) []string { +func getAllNamespaces(client *client.Client, log logr.Logger) []string { + var namespaces []string // get all namespaces nsList, err := client.ListResource("Namespace", "", nil) if err != nil { - glog.Error(err) + log.Error(err, "failed to list namespaces") return namespaces } for _, ns := range nsList.Items { @@ -291,14 +279,11 @@ type resourceManager interface { //TODO: or drop based on the size func (rm *ResourceManager) Drop() { timeSince := time.Since(rm.time) - glog.V(4).Infof("time since last cache reset time %v is %v", rm.time, timeSince) - glog.V(4).Infof("cache rebuild time %v", time.Duration(rm.rebuildTime)*time.Second) if timeSince > time.Duration(rm.rebuildTime)*time.Second { rm.mux.Lock() defer rm.mux.Unlock() rm.data = map[string]interface{}{} rm.time = time.Now() - glog.V(4).Infof("dropping cache at time %v", rm.time) } } @@ -327,14 +312,14 @@ func buildKey(policy, pv, kind, ns, name, rv string) string { return policy + "/" + pv + "/" + kind + "/" + ns + "/" + name + "/" + rv } -func skipPodApplication(resource unstructured.Unstructured) bool { +func skipPodApplication(resource unstructured.Unstructured, log logr.Logger) bool { if resource.GetKind() != "Pod" { return false } annotation := resource.GetAnnotations() if _, ok := annotation[engine.PodTemplateAnnotation]; ok { - glog.V(4).Infof("Policies already processed on pod controllers, skip processing policy on Pod/%s/%s", resource.GetNamespace(), resource.GetName()) + log.V(4).Info("Policies already processed on pod controllers, skip processing policy on Pod", "kind", resource.GetKind(), "namespace", resource.GetNamespace(), "name", resource.GetName()) return true } diff --git a/pkg/policy/generate/auth.go b/pkg/policy/generate/auth.go index 0bcd0cb0a1..3da64a4289 100644 --- a/pkg/policy/generate/auth.go +++ b/pkg/policy/generate/auth.go @@ -1,6 +1,7 @@ package generate import ( + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/auth" dclient "github.com/nirmata/kyverno/pkg/dclient" ) @@ -20,19 +21,21 @@ type Operations interface { //Auth provides implementation to check if caller/self/kyverno has access to perofrm operations type Auth struct { client *dclient.Client + log logr.Logger } //NewAuth returns a new instance of Auth for operations -func NewAuth(client *dclient.Client) *Auth { +func NewAuth(client *dclient.Client, log logr.Logger) *Auth { a := Auth{ client: client, + log: log, } return &a } // CanICreate returns 'true' if self can 'create' resource func (a *Auth) CanICreate(kind, namespace string) (bool, error) { - canI := auth.NewCanI(a.client, kind, namespace, "create") + canI := auth.NewCanI(a.client, kind, namespace, "create", a.log) ok, err := canI.RunAccessCheck() if err != nil { return false, err @@ -42,7 +45,7 @@ func (a *Auth) CanICreate(kind, namespace string) (bool, error) { // CanIUpdate returns 'true' if self can 'update' resource func (a *Auth) CanIUpdate(kind, namespace string) (bool, error) { - canI := auth.NewCanI(a.client, kind, namespace, "update") + canI := auth.NewCanI(a.client, kind, namespace, "update", a.log) ok, err := canI.RunAccessCheck() if err != nil { return false, err @@ -52,7 +55,7 @@ func (a *Auth) CanIUpdate(kind, namespace string) (bool, error) { // CanIDelete returns 'true' if self can 'delete' resource func (a *Auth) CanIDelete(kind, namespace string) (bool, error) { - canI := auth.NewCanI(a.client, kind, namespace, "delete") + canI := auth.NewCanI(a.client, kind, namespace, "delete", a.log) ok, err := canI.RunAccessCheck() if err != nil { return false, err @@ -62,7 +65,7 @@ func (a *Auth) CanIDelete(kind, namespace string) (bool, error) { // CanIGet returns 'true' if self can 'get' resource func (a *Auth) CanIGet(kind, namespace string) (bool, error) { - canI := auth.NewCanI(a.client, kind, namespace, "get") + canI := auth.NewCanI(a.client, kind, namespace, "get", a.log) ok, err := canI.RunAccessCheck() if err != nil { return false, err diff --git a/pkg/policy/generate/validate.go b/pkg/policy/generate/validate.go index 427faa33e9..460842ea98 100644 --- a/pkg/policy/generate/validate.go +++ b/pkg/policy/generate/validate.go @@ -4,7 +4,7 @@ import ( "fmt" "reflect" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" dclient "github.com/nirmata/kyverno/pkg/dclient" "github.com/nirmata/kyverno/pkg/engine/anchor" @@ -18,13 +18,16 @@ type Generate struct { rule kyverno.Generation // authCheck to check access for operations authCheck Operations + //logger + log logr.Logger } //NewGenerateFactory returns a new instance of Generate validation checker -func NewGenerateFactory(client *dclient.Client, rule kyverno.Generation) *Generate { +func NewGenerateFactory(client *dclient.Client, rule kyverno.Generation, log logr.Logger) *Generate { g := Generate{ rule: rule, - authCheck: NewAuth(client), + authCheck: NewAuth(client, log), + log: log, } return &g @@ -92,7 +95,7 @@ func (g *Generate) validateClone(c kyverno.CloneFrom, kind string) (string, erro return "", fmt.Errorf("kyverno does not have permissions to 'get' resource %s/%s. Update permissions in ClusterRole 'kyverno:generatecontroller'", kind, namespace) } } else { - glog.V(4).Info("name & namespace uses variables, so cannot be resolved. Skipping Auth Checks.") + g.log.V(4).Info("name & namespace uses variables, so cannot be resolved. Skipping Auth Checks.") } return "", nil } @@ -141,7 +144,7 @@ func (g *Generate) canIGenerate(kind, namespace string) error { } } else { - glog.V(4).Info("name & namespace uses variables, so cannot be resolved. Skipping Auth Checks.") + g.log.V(4).Info("name & namespace uses variables, so cannot be resolved. Skipping Auth Checks.") } return nil diff --git a/pkg/policy/namespacedpv.go b/pkg/policy/namespacedpv.go index 654343d7df..828ae7806f 100644 --- a/pkg/policy/namespacedpv.go +++ b/pkg/policy/namespacedpv.go @@ -7,6 +7,7 @@ import ( ) func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) { + logger := pc.log pv := obj.(*kyverno.PolicyViolation) if pv.DeletionTimestamp != nil { @@ -20,14 +21,15 @@ func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) { ps := pc.getPolicyForNamespacedPolicyViolation(pv) if len(ps) == 0 { // there is no cluster policy for this violation, so we can delete this cluster policy violation - glog.V(4).Infof("PolicyViolation %s does not belong to an active policy, will be cleanedup", pv.Name) + logger.V(4).Info("namepaced policy violation does not belong to any active policy, will be cleanedup", "namespacePolicyViolation", pv.Name) if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil { - glog.Errorf("Failed to deleted policy violation %s: %v", pv.Name, err) + logger.Error(err, "failed to delete namespaced policy violation", "namespacedPolicyViolation", pv.Name) return } - glog.V(4).Infof("PolicyViolation %s deleted", pv.Name) + logger.V(4).Info("resource delete", "namespacePOlicyViolation", pv.Name) return } + glog.V(4).Infof("Orphan Policy Violation %s added.", pv.Name) for _, p := range ps { pc.enqueuePolicy(p) diff --git a/pkg/policy/report.go b/pkg/policy/report.go index 206a6ebd61..3eb7502125 100644 --- a/pkg/policy/report.go +++ b/pkg/policy/report.go @@ -13,11 +13,12 @@ import ( // - has violation -> report // - no violation -> cleanup policy violations func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineResponse) { + logger := pc.log // generate Events eventInfos := generateEvents(engineResponses) pc.eventGen.Add(eventInfos...) // create policy violation - pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses) + pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger) pc.pvGenerator.Add(pvInfos...) // cleanup existing violations if any // if there is any error in clean up, we dont re-queue the resource @@ -25,19 +26,6 @@ func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineRe pc.cleanUp(engineResponses) } -func (pc *PolicyController) cleanUp(ers []response.EngineResponse) { - for _, er := range ers { - if !er.IsSuccesful() { - continue - } - if len(er.PolicyResponse.Rules) == 0 { - continue - } - // clean up after the policy has been corrected - pc.cleanUpPolicyViolation(er.PolicyResponse) - } -} - func generateEvents(ers []response.EngineResponse) []event.Info { var eventInfos []event.Info for _, er := range ers { diff --git a/pkg/policystore/policystore.go b/pkg/policystore/policystore.go index 3d23b106e4..633007d5ca 100644 --- a/pkg/policystore/policystore.go +++ b/pkg/policystore/policystore.go @@ -3,12 +3,11 @@ package policystore import ( "sync" - "k8s.io/apimachinery/pkg/labels" - - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1" kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) @@ -24,6 +23,7 @@ type PolicyStore struct { pLister kyvernolister.ClusterPolicyLister // returns true if the cluster policy store has been synced at least once pSynched cache.InformerSynced + log logr.Logger } //UpdateInterface provides api to update policies @@ -40,25 +40,29 @@ type LookupInterface interface { } // NewPolicyStore returns a new policy store -func NewPolicyStore(pInformer kyvernoinformer.ClusterPolicyInformer) *PolicyStore { +func NewPolicyStore(pInformer kyvernoinformer.ClusterPolicyInformer, + log logr.Logger) *PolicyStore { ps := PolicyStore{ data: make(kindMap), pLister: pInformer.Lister(), pSynched: pInformer.Informer().HasSynced, + log: log, } return &ps } //Run checks syncing func (ps *PolicyStore) Run(stopCh <-chan struct{}) { + logger := ps.log if !cache.WaitForCacheSync(stopCh, ps.pSynched) { - glog.Error("policy meta store: failed to sync informer cache") + logger.Info("failed to sync informer cache") } } //Register a new policy func (ps *PolicyStore) Register(policy kyverno.ClusterPolicy) { - glog.V(4).Infof("adding resources %s", policy.Name) + logger := ps.log + logger.V(4).Info("adding policy", "name", policy.Name) ps.mu.Lock() defer ps.mu.Unlock() var pmap policyMap diff --git a/pkg/policyviolation/builder.go b/pkg/policyviolation/builder.go index 1de366eb66..da556697be 100644 --- a/pkg/policyviolation/builder.go +++ b/pkg/policyviolation/builder.go @@ -3,24 +3,23 @@ package policyviolation import ( "fmt" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/response" ) //GeneratePVsFromEngineResponse generate Violations from engine responses -func GeneratePVsFromEngineResponse(ers []response.EngineResponse) (pvInfos []Info) { +func GeneratePVsFromEngineResponse(ers []response.EngineResponse, log logr.Logger) (pvInfos []Info) { for _, er := range ers { // ignore creation of PV for resources that are yet to be assigned a name if er.PolicyResponse.Resource.Name == "" { - glog.V(4).Infof("resource %v, has not been assigned a name, not creating a policy violation for it", er.PolicyResponse.Resource) + log.V(4).Info("resource does no have a name assigned yet, not creating a policy violation", "resource", er.PolicyResponse.Resource) continue } // skip when response succeed if er.IsSuccesful() { continue } - glog.V(4).Infof("Building policy violation for engine response %v", er) // build policy violation info pvInfos = append(pvInfos, buildPVInfo(er)) } diff --git a/pkg/policyviolation/builder_test.go b/pkg/policyviolation/builder_test.go index bd3f8c97f0..2498b7b5f2 100644 --- a/pkg/policyviolation/builder_test.go +++ b/pkg/policyviolation/builder_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/nirmata/kyverno/pkg/engine/response" + "github.com/nirmata/kyverno/pkg/log" "gotest.tools/assert" ) @@ -52,6 +53,6 @@ func Test_GeneratePVsFromEngineResponse_PathNotExist(t *testing.T) { }, } - pvInfos := GeneratePVsFromEngineResponse(ers) + pvInfos := GeneratePVsFromEngineResponse(ers, log.Log) assert.Assert(t, len(pvInfos) == 1) } diff --git a/pkg/policyviolation/generator.go b/pkg/policyviolation/generator.go index 7d820f980b..94099fefaf 100644 --- a/pkg/policyviolation/generator.go +++ b/pkg/policyviolation/generator.go @@ -8,7 +8,7 @@ import ( "sync" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1" @@ -40,6 +40,7 @@ type Generator struct { nspvSynced cache.InformerSynced queue workqueue.RateLimitingInterface dataStore *dataStore + log logr.Logger } //NewDataStore returns an instance of data store @@ -103,7 +104,8 @@ type GeneratorInterface interface { func NewPVGenerator(client *kyvernoclient.Clientset, dclient *dclient.Client, pvInformer kyvernoinformer.ClusterPolicyViolationInformer, - nspvInformer kyvernoinformer.PolicyViolationInformer) *Generator { + nspvInformer kyvernoinformer.PolicyViolationInformer, + log logr.Logger) *Generator { gen := Generator{ kyvernoInterface: client.KyvernoV1(), dclient: dclient, @@ -113,6 +115,7 @@ func NewPVGenerator(client *kyvernoclient.Clientset, nspvSynced: nspvInformer.Informer().HasSynced, queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), workQueueName), dataStore: newDataStore(), + log: log, } return &gen } @@ -130,18 +133,18 @@ func (gen *Generator) enqueue(info Info) { func (gen *Generator) Add(infos ...Info) { for _, info := range infos { gen.enqueue(info) - glog.V(3).Infof("Added policy violation: %s", info.toKey()) } } // Run starts the workers func (gen *Generator) Run(workers int, stopCh <-chan struct{}) { + logger := gen.log defer utilruntime.HandleCrash() - glog.Info("Start policy violation generator") - defer glog.Info("Shutting down policy violation generator") + logger.Info("start") + defer logger.Info("shutting down") if !cache.WaitForCacheSync(stopCh, gen.pvSynced, gen.nspvSynced) { - glog.Error("policy violation generator: failed to sync informer cache") + logger.Info("failed to sync informer cache") } for i := 0; i < workers; i++ { @@ -156,6 +159,7 @@ func (gen *Generator) runWorker() { } func (gen *Generator) handleErr(err error, key interface{}) { + logger := gen.log if err == nil { gen.queue.Forget(key) return @@ -163,23 +167,22 @@ func (gen *Generator) handleErr(err error, key interface{}) { // retires requests if there is error if gen.queue.NumRequeues(key) < workQueueRetryLimit { - glog.V(4).Infof("Error syncing policy violation %v: %v", key, err) + logger.Error(err, "failed to sync policy violation", "key", key) // Re-enqueue the key rate limited. Based on the rate limiter on the // queue and the re-enqueue history, the key will be processed later again. gen.queue.AddRateLimited(key) return } gen.queue.Forget(key) - glog.Error(err) // remove from data store if keyHash, ok := key.(string); ok { gen.dataStore.delete(keyHash) } - - glog.Warningf("Dropping the key out of the queue: %v", err) + logger.Error(err, "dropping key out of the queue", "key", key) } func (gen *Generator) processNextWorkitem() bool { + logger := gen.log obj, shutdown := gen.queue.Get() if shutdown { return false @@ -191,7 +194,7 @@ func (gen *Generator) processNextWorkitem() bool { var ok bool if keyHash, ok = obj.(string); !ok { gen.queue.Forget(obj) - glog.Warningf("Expecting type string but got %v\n", obj) + logger.Info("incorrect type; expecting type 'string'", "obj", obj) return nil } // lookup data store @@ -199,7 +202,7 @@ func (gen *Generator) processNextWorkitem() bool { if reflect.DeepEqual(info, Info{}) { // empty key gen.queue.Forget(obj) - glog.Warningf("Got empty key %v\n", obj) + logger.Info("empty key") return nil } err := gen.syncHandler(info) @@ -207,14 +210,14 @@ func (gen *Generator) processNextWorkitem() bool { return nil }(obj) if err != nil { - glog.Error(err) + logger.Error(err, "failed to process item") return true } return true } func (gen *Generator) syncHandler(info Info) error { - glog.V(4).Infof("received info:%v", info) + logger := gen.log var handler pvGenerator builder := newPvBuilder() if info.Resource.GetNamespace() == "" { @@ -229,12 +232,12 @@ func (gen *Generator) syncHandler(info Info) error { pv := builder.generate(info) // Create Policy Violations - glog.V(3).Infof("Creating policy violation: %s", info.toKey()) + logger.V(4).Info("creating policy violation", "key", info.toKey()) if err := handler.create(pv); err != nil { failure = true - glog.V(3).Infof("Failed to create policy violation: %v", err) + logger.Error(err, "failed to create policy violation") } else { - glog.V(3).Infof("Policy violation created: %s", info.toKey()) + logger.Info("created policy violation", "key", info.toKey()) } if failure { diff --git a/pkg/utils/util.go b/pkg/utils/util.go index c6c41f4be0..5fe0602e71 100644 --- a/pkg/utils/util.go +++ b/pkg/utils/util.go @@ -3,8 +3,7 @@ package utils import ( "reflect" - "github.com/golang/glog" - + "github.com/go-logr/logr" "github.com/minio/minio/pkg/wildcard" client "github.com/nirmata/kyverno/pkg/dclient" dclient "github.com/nirmata/kyverno/pkg/dclient" @@ -59,14 +58,15 @@ func Btoi(b bool) int { } //CRDInstalled to check if the CRD is installed or not -func CRDInstalled(discovery client.IDiscovery) bool { +func CRDInstalled(discovery client.IDiscovery, log logr.Logger) bool { + logger := log.WithName("CRDInstalled") check := func(kind string) bool { gvr := discovery.GetGVRFromKind(kind) if reflect.DeepEqual(gvr, (schema.GroupVersionResource{})) { - glog.Errorf("%s CRD not installed", kind) + logger.Info("CRD not installed", "kind", kind) return false } - glog.Infof("CRD %s found ", kind) + logger.Info("CRD found", "kind", kind) return true } if !check("ClusterPolicy") || !check("ClusterPolicyViolation") || !check("PolicyViolation") { @@ -77,11 +77,12 @@ func CRDInstalled(discovery client.IDiscovery) bool { //CleanupOldCrd deletes any existing NamespacedPolicyViolation resources in cluster // If resource violates policy, new Violations will be generated -func CleanupOldCrd(client *dclient.Client) { +func CleanupOldCrd(client *dclient.Client, log logr.Logger) { + logger := log.WithName("CleanupOldCrd") gvr := client.DiscoveryClient.GetGVRFromKind("NamespacedPolicyViolation") if !reflect.DeepEqual(gvr, (schema.GroupVersionResource{})) { if err := client.DeleteResource("CustomResourceDefinition", "", "namespacedpolicyviolations.kyverno.io", false); err != nil { - glog.Infof("Failed to remove previous CRD namespacedpolicyviolations: %v", err) + logger.Error(err, "Failed to remove prevous CRD", "kind", "namespacedpolicyviolation") } } } diff --git a/pkg/version/version.go b/pkg/version/version.go index ef768a35bf..8d9303d204 100644 --- a/pkg/version/version.go +++ b/pkg/version/version.go @@ -1,7 +1,7 @@ package version import ( - "github.com/golang/glog" + "github.com/go-logr/logr" ) // These fields are set during an official build @@ -13,8 +13,8 @@ var ( ) //PrintVersionInfo displays the kyverno version - git version -func PrintVersionInfo() { - glog.Infof("Kyverno version: %s\n", BuildVersion) - glog.Infof("Kyverno BuildHash: %s\n", BuildHash) - glog.Infof("Kyverno BuildTime: %s\n", BuildTime) +func PrintVersionInfo(log logr.Logger) { + log.Info("Kyverno", "Version", BuildVersion) + log.Info("Kyverno", "BuildHash", BuildHash) + log.Info("Kyverno", "BuildTime", BuildTime) } diff --git a/pkg/webhookconfig/checker.go b/pkg/webhookconfig/checker.go index ee85f36c35..e1923a507c 100644 --- a/pkg/webhookconfig/checker.go +++ b/pkg/webhookconfig/checker.go @@ -19,8 +19,8 @@ func (wrc *WebhookRegistrationClient) constructVerifyMutatingWebhookConfig(caDat wrc.constructOwner(), }, }, - Webhooks: []admregapi.Webhook{ - generateWebhook( + Webhooks: []admregapi.MutatingWebhook{ + generateMutatingWebhook( config.VerifyMutatingWebhookName, config.VerifyMutatingWebhookServicePath, caData, @@ -42,8 +42,8 @@ func (wrc *WebhookRegistrationClient) constructDebugVerifyMutatingWebhookConfig( ObjectMeta: v1.ObjectMeta{ Name: config.VerifyMutatingWebhookConfigurationDebugName, }, - Webhooks: []admregapi.Webhook{ - generateDebugWebhook( + Webhooks: []admregapi.MutatingWebhook{ + generateDebugMutatingWebhook( config.VerifyMutatingWebhookName, url, caData, diff --git a/pkg/webhookconfig/common.go b/pkg/webhookconfig/common.go index d73fcafb13..69ccaca885 100644 --- a/pkg/webhookconfig/common.go +++ b/pkg/webhookconfig/common.go @@ -61,10 +61,43 @@ func (wrc *WebhookRegistrationClient) constructOwner() v1.OwnerReference { } } -func generateDebugWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook { +// func generateDebugWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook { +// sideEffect := admregapi.SideEffectClassNoneOnDryRun +// failurePolicy := admregapi.Ignore +// return admregapi.Webhook{ +// Name: name, +// ClientConfig: admregapi.WebhookClientConfig{ +// URL: &url, +// CABundle: caData, +// }, +// SideEffects: &sideEffect, +// Rules: []admregapi.RuleWithOperations{ +// admregapi.RuleWithOperations{ +// Operations: operationTypes, +// Rule: admregapi.Rule{ +// APIGroups: []string{ +// apiGroups, +// }, +// APIVersions: []string{ +// apiVersions, +// }, +// Resources: []string{ +// resource, +// }, +// }, +// }, +// }, +// AdmissionReviewVersions: []string{"v1beta1"}, +// TimeoutSeconds: &timeoutSeconds, +// FailurePolicy: &failurePolicy, +// } +// } + +// debug mutating webhook +func generateDebugMutatingWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.MutatingWebhook { sideEffect := admregapi.SideEffectClassNoneOnDryRun failurePolicy := admregapi.Ignore - return admregapi.Webhook{ + return admregapi.MutatingWebhook{ Name: name, ClientConfig: admregapi.WebhookClientConfig{ URL: &url, @@ -93,10 +126,116 @@ func generateDebugWebhook(name, url string, caData []byte, validate bool, timeou } } -func generateWebhook(name, servicePath string, caData []byte, validation bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook { +func generateDebugValidatingWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.ValidatingWebhook { sideEffect := admregapi.SideEffectClassNoneOnDryRun failurePolicy := admregapi.Ignore - return admregapi.Webhook{ + return admregapi.ValidatingWebhook{ + Name: name, + ClientConfig: admregapi.WebhookClientConfig{ + URL: &url, + CABundle: caData, + }, + SideEffects: &sideEffect, + Rules: []admregapi.RuleWithOperations{ + admregapi.RuleWithOperations{ + Operations: operationTypes, + Rule: admregapi.Rule{ + APIGroups: []string{ + apiGroups, + }, + APIVersions: []string{ + apiVersions, + }, + Resources: []string{ + resource, + }, + }, + }, + }, + AdmissionReviewVersions: []string{"v1beta1"}, + TimeoutSeconds: &timeoutSeconds, + FailurePolicy: &failurePolicy, + } +} + +// func generateWebhook(name, servicePath string, caData []byte, validation bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook { +// sideEffect := admregapi.SideEffectClassNoneOnDryRun +// failurePolicy := admregapi.Ignore +// return admregapi.Webhook{ +// Name: name, +// ClientConfig: admregapi.WebhookClientConfig{ +// Service: &admregapi.ServiceReference{ +// Namespace: config.KubePolicyNamespace, +// Name: config.WebhookServiceName, +// Path: &servicePath, +// }, +// CABundle: caData, +// }, +// SideEffects: &sideEffect, +// Rules: []admregapi.RuleWithOperations{ +// admregapi.RuleWithOperations{ +// Operations: operationTypes, +// Rule: admregapi.Rule{ +// APIGroups: []string{ +// apiGroups, +// }, +// APIVersions: []string{ +// apiVersions, +// }, +// Resources: []string{ +// resource, +// }, +// }, +// }, +// }, +// AdmissionReviewVersions: []string{"v1beta1"}, +// TimeoutSeconds: &timeoutSeconds, +// FailurePolicy: &failurePolicy, +// } +// } + +// mutating webhook +func generateMutatingWebhook(name, servicePath string, caData []byte, validation bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.MutatingWebhook { + sideEffect := admregapi.SideEffectClassNoneOnDryRun + failurePolicy := admregapi.Ignore + return admregapi.MutatingWebhook{ + Name: name, + ClientConfig: admregapi.WebhookClientConfig{ + Service: &admregapi.ServiceReference{ + Namespace: config.KubePolicyNamespace, + Name: config.WebhookServiceName, + Path: &servicePath, + }, + CABundle: caData, + }, + SideEffects: &sideEffect, + Rules: []admregapi.RuleWithOperations{ + admregapi.RuleWithOperations{ + Operations: operationTypes, + Rule: admregapi.Rule{ + APIGroups: []string{ + apiGroups, + }, + APIVersions: []string{ + apiVersions, + }, + Resources: []string{ + resource, + }, + }, + }, + }, + AdmissionReviewVersions: []string{"v1beta1"}, + TimeoutSeconds: &timeoutSeconds, + FailurePolicy: &failurePolicy, + } +} + +// validating webhook +func generateValidatingWebhook(name, servicePath string, caData []byte, validation bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.ValidatingWebhook { + sideEffect := admregapi.SideEffectClassNoneOnDryRun + failurePolicy := admregapi.Ignore + return admregapi.ValidatingWebhook{ Name: name, ClientConfig: admregapi.WebhookClientConfig{ Service: &admregapi.ServiceReference{ diff --git a/pkg/webhookconfig/policy.go b/pkg/webhookconfig/policy.go index d1798a54e5..1a518596a7 100644 --- a/pkg/webhookconfig/policy.go +++ b/pkg/webhookconfig/policy.go @@ -18,8 +18,8 @@ func (wrc *WebhookRegistrationClient) contructPolicyValidatingWebhookConfig(caDa wrc.constructOwner(), }, }, - Webhooks: []admregapi.Webhook{ - generateWebhook( + Webhooks: []admregapi.ValidatingWebhook{ + generateValidatingWebhook( config.PolicyValidatingWebhookName, config.PolicyValidatingWebhookServicePath, caData, @@ -42,8 +42,8 @@ func (wrc *WebhookRegistrationClient) contructDebugPolicyValidatingWebhookConfig ObjectMeta: v1.ObjectMeta{ Name: config.PolicyValidatingWebhookConfigurationDebugName, }, - Webhooks: []admregapi.Webhook{ - generateDebugWebhook( + Webhooks: []admregapi.ValidatingWebhook{ + generateDebugValidatingWebhook( config.PolicyValidatingWebhookName, url, caData, @@ -66,8 +66,8 @@ func (wrc *WebhookRegistrationClient) contructPolicyMutatingWebhookConfig(caData wrc.constructOwner(), }, }, - Webhooks: []admregapi.Webhook{ - generateWebhook( + Webhooks: []admregapi.MutatingWebhook{ + generateMutatingWebhook( config.PolicyMutatingWebhookName, config.PolicyMutatingWebhookServicePath, caData, @@ -89,8 +89,8 @@ func (wrc *WebhookRegistrationClient) contructDebugPolicyMutatingWebhookConfig(c ObjectMeta: v1.ObjectMeta{ Name: config.PolicyMutatingWebhookConfigurationDebugName, }, - Webhooks: []admregapi.Webhook{ - generateDebugWebhook( + Webhooks: []admregapi.MutatingWebhook{ + generateDebugMutatingWebhook( config.PolicyMutatingWebhookName, url, caData, diff --git a/pkg/webhookconfig/registration.go b/pkg/webhookconfig/registration.go index 8db0cba73c..9edf71d1e0 100644 --- a/pkg/webhookconfig/registration.go +++ b/pkg/webhookconfig/registration.go @@ -2,9 +2,11 @@ package webhookconfig import ( "errors" + "fmt" "sync" "time" + "github.com/go-logr/logr" "github.com/golang/glog" "github.com/nirmata/kyverno/pkg/config" client "github.com/nirmata/kyverno/pkg/dclient" @@ -27,6 +29,7 @@ type WebhookRegistrationClient struct { // serverIP should be used if running Kyverno out of clutser serverIP string timeoutSeconds int32 + log logr.Logger } // NewWebhookRegistrationClient creates new WebhookRegistrationClient instance @@ -34,19 +37,22 @@ func NewWebhookRegistrationClient( clientConfig *rest.Config, client *client.Client, serverIP string, - webhookTimeout int32) *WebhookRegistrationClient { + webhookTimeout int32, + log logr.Logger) *WebhookRegistrationClient { return &WebhookRegistrationClient{ clientConfig: clientConfig, client: client, serverIP: serverIP, timeoutSeconds: webhookTimeout, + log: log.WithName("WebhookRegistrationClient"), } } // Register creates admission webhooks configs on cluster func (wrc *WebhookRegistrationClient) Register() error { + logger := wrc.log.WithName("Register") if wrc.serverIP != "" { - glog.Infof("Registering webhook with url https://%s\n", wrc.serverIP) + logger.Info("Registering webhook", "url", fmt.Sprintf("https://%s", wrc.serverIP)) } // For the case if cluster already has this configs @@ -88,6 +94,7 @@ func (wrc *WebhookRegistrationClient) RemoveWebhookConfigurations(cleanUp chan<- // used to forward request to kyverno webhooks to apply policeis // Mutationg webhook is be used for Mutating purpose func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration() error { + logger := wrc.log var caData []byte var config *admregapi.MutatingWebhookConfiguration @@ -108,16 +115,17 @@ func (wrc *WebhookRegistrationClient) CreateResourceMutatingWebhookConfiguration } _, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false) if errorsapi.IsAlreadyExists(err) { - glog.V(4).Infof("resource mutating webhook configuration %s, already exists. not creating one", config.Name) + logger.V(4).Info("resource mutating webhook configuration already exists. not creating one", "name", config.Name) return nil } if err != nil { - glog.V(4).Infof("failed to create resource mutating webhook configuration %s: %v", config.Name, err) + logger.Error(err, "failed to create resource mutating webhook configuration", "name", config.Name) return err } return nil } +//CreateResourceValidatingWebhookConfiguration ... func (wrc *WebhookRegistrationClient) CreateResourceValidatingWebhookConfiguration() error { var caData []byte var config *admregapi.ValidatingWebhookConfiguration @@ -134,14 +142,15 @@ func (wrc *WebhookRegistrationClient) CreateResourceValidatingWebhookConfigurati // clientConfig - service config = wrc.constructValidatingWebhookConfig(caData) } + logger := wrc.log.WithValues("kind", ValidatingWebhookConfigurationKind, "name", config.Name) _, err := wrc.client.CreateResource(ValidatingWebhookConfigurationKind, "", *config, false) if errorsapi.IsAlreadyExists(err) { - glog.V(4).Infof("resource validating webhook configuration %s, already exists. not creating one", config.Name) + logger.V(4).Info("resource already exists. not create one") return nil } if err != nil { - glog.V(4).Infof("failed to create resource validating webhook configuration %s: %v", config.Name, err) + logger.Error(err, "failed to create resource") return err } return nil @@ -168,13 +177,13 @@ func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration // clientConfig - service config = wrc.contructPolicyValidatingWebhookConfig(caData) } + logger := wrc.log.WithValues("kind", ValidatingWebhookConfigurationKind, "name", config.Name) // create validating webhook configuration resource if _, err := wrc.client.CreateResource(ValidatingWebhookConfigurationKind, "", *config, false); err != nil { return err } - - glog.V(4).Infof("created Validating Webhook Configuration %s ", config.Name) + logger.V(4).Info("created resource") return nil } diff --git a/pkg/webhookconfig/resource.go b/pkg/webhookconfig/resource.go index 090c33636a..050517da0a 100644 --- a/pkg/webhookconfig/resource.go +++ b/pkg/webhookconfig/resource.go @@ -3,7 +3,6 @@ package webhookconfig import ( "fmt" - "github.com/golang/glog" "github.com/nirmata/kyverno/pkg/config" admregapi "k8s.io/api/admissionregistration/v1beta1" "k8s.io/apimachinery/pkg/api/errors" @@ -11,15 +10,15 @@ import ( ) func (wrc *WebhookRegistrationClient) constructDebugMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration { + logger := wrc.log url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.MutatingWebhookServicePath) - glog.V(4).Infof("Debug MutatingWebhookConfig is registered with url %s\n", url) - + logger.V(4).Info("Debug MutatingWebhookConfig registed", "url", url) return &admregapi.MutatingWebhookConfiguration{ ObjectMeta: v1.ObjectMeta{ Name: config.MutatingWebhookConfigurationDebugName, }, - Webhooks: []admregapi.Webhook{ - generateDebugWebhook( + Webhooks: []admregapi.MutatingWebhook{ + generateDebugMutatingWebhook( config.MutatingWebhookName, url, caData, @@ -42,8 +41,8 @@ func (wrc *WebhookRegistrationClient) constructMutatingWebhookConfig(caData []by wrc.constructOwner(), }, }, - Webhooks: []admregapi.Webhook{ - generateWebhook( + Webhooks: []admregapi.MutatingWebhook{ + generateMutatingWebhook( config.MutatingWebhookName, config.MutatingWebhookServicePath, caData, @@ -68,32 +67,31 @@ func (wrc *WebhookRegistrationClient) GetResourceMutatingWebhookConfigName() str //RemoveResourceMutatingWebhookConfiguration removes mutating webhook configuration for all resources func (wrc *WebhookRegistrationClient) RemoveResourceMutatingWebhookConfiguration() error { - configName := wrc.GetResourceMutatingWebhookConfigName() + logger := wrc.log.WithValues("kind", MutatingWebhookConfigurationKind, "name", configName) // delete webhook configuration err := wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", configName, false) if errors.IsNotFound(err) { - glog.V(4).Infof("resource webhook configuration %s does not exits, so not deleting", configName) + logger.Error(err, "resource does not exit") return nil } if err != nil { - glog.V(4).Infof("failed to delete resource webhook configuration %s: %v", configName, err) + logger.V(4).Info("failed to delete resource") return err } - glog.V(4).Infof("deleted resource webhook configuration %s", configName) + logger.V(4).Info("deleted resource") return nil } func (wrc *WebhookRegistrationClient) constructDebugValidatingWebhookConfig(caData []byte) *admregapi.ValidatingWebhookConfiguration { url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.ValidatingWebhookServicePath) - glog.V(4).Infof("Debug ValidatingWebhookConfig is registered with url %s\n", url) return &admregapi.ValidatingWebhookConfiguration{ ObjectMeta: v1.ObjectMeta{ Name: config.ValidatingWebhookConfigurationDebugName, }, - Webhooks: []admregapi.Webhook{ - generateDebugWebhook( + Webhooks: []admregapi.ValidatingWebhook{ + generateDebugValidatingWebhook( config.ValidatingWebhookName, url, caData, @@ -116,8 +114,8 @@ func (wrc *WebhookRegistrationClient) constructValidatingWebhookConfig(caData [] wrc.constructOwner(), }, }, - Webhooks: []admregapi.Webhook{ - generateWebhook( + Webhooks: []admregapi.ValidatingWebhook{ + generateValidatingWebhook( config.ValidatingWebhookName, config.ValidatingWebhookServicePath, caData, @@ -141,15 +139,16 @@ func (wrc *WebhookRegistrationClient) GetResourceValidatingWebhookConfigName() s func (wrc *WebhookRegistrationClient) RemoveResourceValidatingWebhookConfiguration() error { configName := wrc.GetResourceValidatingWebhookConfigName() + logger := wrc.log.WithValues("kind", ValidatingWebhookConfigurationKind, "name", configName) err := wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", configName, false) if errors.IsNotFound(err) { - glog.V(4).Infof("resource webhook configuration %s does not exits, so not deleting", configName) + logger.Error(err, "resource does not exist; deleted already") return nil } if err != nil { - glog.V(4).Infof("failed to delete resource webhook configuration %s: %v", configName, err) + logger.Error(err, "failed to delete the resource") return err } - glog.V(4).Infof("deleted resource webhook configuration %s", configName) + logger.Info("resource deleted") return nil } diff --git a/pkg/webhookconfig/rwebhookregister.go b/pkg/webhookconfig/rwebhookregister.go index aacdc72b9f..a312e07277 100644 --- a/pkg/webhookconfig/rwebhookregister.go +++ b/pkg/webhookconfig/rwebhookregister.go @@ -3,7 +3,7 @@ package webhookconfig import ( "time" - "github.com/golang/glog" + "github.com/go-logr/logr" checker "github.com/nirmata/kyverno/pkg/checker" "github.com/tevino/abool" mconfiginformer "k8s.io/client-go/informers/admissionregistration/v1beta1" @@ -23,6 +23,7 @@ type ResourceWebhookRegister struct { vWebhookConfigLister mconfiglister.ValidatingWebhookConfigurationLister webhookRegistrationClient *WebhookRegistrationClient RunValidationInMutatingWebhook string + log logr.Logger } // NewResourceWebhookRegister returns a new instance of ResourceWebhookRegister manager @@ -32,6 +33,7 @@ func NewResourceWebhookRegister( vconfigwebhookinformer mconfiginformer.ValidatingWebhookConfigurationInformer, webhookRegistrationClient *WebhookRegistrationClient, runValidationInMutatingWebhook string, + log logr.Logger, ) *ResourceWebhookRegister { return &ResourceWebhookRegister{ pendingCreation: abool.New(), @@ -42,52 +44,54 @@ func NewResourceWebhookRegister( vWebhookConfigLister: vconfigwebhookinformer.Lister(), webhookRegistrationClient: webhookRegistrationClient, RunValidationInMutatingWebhook: runValidationInMutatingWebhook, + log: log, } } //RegisterResourceWebhook registers a resource webhook func (rww *ResourceWebhookRegister) RegisterResourceWebhook() { + logger := rww.log // drop the request if creation is in processing if rww.pendingCreation.IsSet() { - glog.V(3).Info("resource webhook configuration is in pending creation, skip the request") + logger.V(3).Info("resource webhook configuration is in pending creation, skip the request") return } timeDiff := time.Since(rww.LastReqTime.Time()) if timeDiff < checker.DefaultDeadline { - glog.V(3).Info("Verified webhook status, creating webhook configuration") + logger.V(3).Info("verified webhook status, creating webhook configuration") go func() { mutatingConfigName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName() mutatingConfig, _ := rww.mWebhookConfigLister.Get(mutatingConfigName) if mutatingConfig != nil { - glog.V(4).Info("mutating webhoook configuration already exists") + logger.V(4).Info("mutating webhoook configuration already exists") } else { rww.pendingCreation.Set() err1 := rww.webhookRegistrationClient.CreateResourceMutatingWebhookConfiguration() rww.pendingCreation.UnSet() if err1 != nil { - glog.Errorf("failed to create resource mutating webhook configuration: %v, re-queue creation request", err1) + logger.Error(err1, "failed to create resource mutating webhook configuration, re-queue creation request") rww.RegisterResourceWebhook() return } - glog.V(3).Info("Successfully created mutating webhook configuration for resources") + logger.V(3).Info("successfully created mutating webhook configuration for resources") } if rww.RunValidationInMutatingWebhook != "true" { validatingConfigName := rww.webhookRegistrationClient.GetResourceValidatingWebhookConfigName() validatingConfig, _ := rww.vWebhookConfigLister.Get(validatingConfigName) if validatingConfig != nil { - glog.V(4).Info("validating webhoook configuration already exists") + logger.V(4).Info("validating webhoook configuration already exists") } else { rww.pendingCreation.Set() err2 := rww.webhookRegistrationClient.CreateResourceValidatingWebhookConfiguration() rww.pendingCreation.UnSet() if err2 != nil { - glog.Errorf("failed to create resource validating webhook configuration: %v, re-queue creation request", err2) + logger.Error(err2, "failed to create resource validating webhook configuration; re-queue creation request") rww.RegisterResourceWebhook() return } - glog.V(3).Info("Successfully created validating webhook configuration for resources") + logger.V(3).Info("successfully created validating webhook configuration for resources") } } }() @@ -96,19 +100,20 @@ func (rww *ResourceWebhookRegister) RegisterResourceWebhook() { //Run starts the ResourceWebhookRegister manager func (rww *ResourceWebhookRegister) Run(stopCh <-chan struct{}) { + logger := rww.log // wait for cache to populate first time if !cache.WaitForCacheSync(stopCh, rww.mwebhookconfigSynced, rww.vwebhookconfigSynced) { - glog.Error("configuration: failed to sync webhook informer cache") + logger.Info("configuration: failed to sync webhook informer cache") } - } // RemoveResourceWebhookConfiguration removes the resource webhook configurations func (rww *ResourceWebhookRegister) RemoveResourceWebhookConfiguration() error { + logger := rww.log mutatingConfigName := rww.webhookRegistrationClient.GetResourceMutatingWebhookConfigName() mutatingConfig, err := rww.mWebhookConfigLister.Get(mutatingConfigName) if err != nil { - glog.V(4).Infof("failed to list mutating webhook config: %v", err) + logger.Error(err, "failed to list mutating webhook config") return err } if mutatingConfig != nil { @@ -116,14 +121,14 @@ func (rww *ResourceWebhookRegister) RemoveResourceWebhookConfiguration() error { if err != nil { return err } - glog.V(3).Info("removed mutating resource webhook configuration") + logger.V(3).Info("emoved mutating resource webhook configuration") } if rww.RunValidationInMutatingWebhook != "true" { validatingConfigName := rww.webhookRegistrationClient.GetResourceValidatingWebhookConfigName() validatingConfig, err := rww.vWebhookConfigLister.Get(validatingConfigName) if err != nil { - glog.V(4).Infof("failed to list validating webhook config: %v", err) + logger.Error(err, "failed to list validating webhook config") return err } if validatingConfig != nil { @@ -131,7 +136,7 @@ func (rww *ResourceWebhookRegister) RemoveResourceWebhookConfiguration() error { if err != nil { return err } - glog.V(3).Info("removed validating resource webhook configuration") + logger.V(3).Info("removed validating resource webhook configuration") } } return nil diff --git a/pkg/webhooks/admission_test.go b/pkg/webhooks/admission_test.go deleted file mode 100644 index d753352a72..0000000000 --- a/pkg/webhooks/admission_test.go +++ /dev/null @@ -1 +0,0 @@ -package webhooks_test diff --git a/pkg/webhooks/annotations.go b/pkg/webhooks/annotations.go index 34d3e10468..eddded1cde 100644 --- a/pkg/webhooks/annotations.go +++ b/pkg/webhooks/annotations.go @@ -7,7 +7,7 @@ import ( yamlv2 "gopkg.in/yaml.v2" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/engine/response" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" @@ -38,7 +38,7 @@ var operationToPastTense = map[string]string{ "test": "tested", } -func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte { +func generateAnnotationPatches(engineResponses []response.EngineResponse, log logr.Logger) []byte { var annotations map[string]string for _, er := range engineResponses { @@ -53,7 +53,7 @@ func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte } var patchResponse annresponse - value := annotationFromEngineResponses(engineResponses) + value := annotationFromEngineResponses(engineResponses, log) if value == nil { // no patches or error while processing patches return nil @@ -90,21 +90,21 @@ func generateAnnotationPatches(engineResponses []response.EngineResponse) []byte // check the patch _, err := jsonpatch.DecodePatch([]byte("[" + string(patchByte) + "]")) if err != nil { - glog.Errorf("Failed to make patch from annotation'%s', err: %v\n ", string(patchByte), err) + log.Error(err, "failed o build JSON patch for annotation", "patch", string(patchByte)) } return patchByte } -func annotationFromEngineResponses(engineResponses []response.EngineResponse) []byte { +func annotationFromEngineResponses(engineResponses []response.EngineResponse, log logr.Logger) []byte { var annotationContent = make(map[string]string) for _, engineResponse := range engineResponses { if !engineResponse.IsSuccesful() { - glog.V(3).Infof("Policy %s failed, skip preparing annotation\n", engineResponse.PolicyResponse.Policy) + log.V(3).Info("skip building annotation; policy failed to apply", "policy", engineResponse.PolicyResponse.Policy) continue } - rulePatches := annotationFromPolicyResponse(engineResponse.PolicyResponse) + rulePatches := annotationFromPolicyResponse(engineResponse.PolicyResponse, log) if rulePatches == nil { continue } @@ -126,13 +126,13 @@ func annotationFromEngineResponses(engineResponses []response.EngineResponse) [] return result } -func annotationFromPolicyResponse(policyResponse response.PolicyResponse) []rulePatch { +func annotationFromPolicyResponse(policyResponse response.PolicyResponse, log logr.Logger) []rulePatch { var rulePatches []rulePatch for _, ruleInfo := range policyResponse.Rules { for _, patch := range ruleInfo.Patches { var patchmap map[string]interface{} if err := json.Unmarshal(patch, &patchmap); err != nil { - glog.Errorf("Failed to parse patch bytes, err: %v\n", err) + log.Error(err, "Failed to parse JSON patch bytes") continue } @@ -142,14 +142,12 @@ func annotationFromPolicyResponse(policyResponse response.PolicyResponse) []rule Path: patchmap["path"].(string)} rulePatches = append(rulePatches, rp) - glog.V(4).Infof("Annotation value prepared: %v\n", rulePatches) + log.V(4).Info("annotation value prepared", "patches", rulePatches) } } - if len(rulePatches) == 0 { return nil } - return rulePatches } diff --git a/pkg/webhooks/annotations_test.go b/pkg/webhooks/annotations_test.go index 320e347ea3..dc4c6e6c95 100644 --- a/pkg/webhooks/annotations_test.go +++ b/pkg/webhooks/annotations_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/nirmata/kyverno/pkg/engine/response" + "github.com/nirmata/kyverno/pkg/log" "gotest.tools/assert" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" ) @@ -42,7 +43,7 @@ func Test_empty_annotation(t *testing.T) { patchStr := `{ "op": "replace", "path": "/spec/containers/0/imagePullPolicy", "value": "IfNotPresent" }` engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true, nil) - annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}) + annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}, log.Log) expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"default-imagepullpolicy.mutate-container.kyverno.io: replaced /spec/containers/0/imagePullPolicy\n"}}` assert.Assert(t, string(annPatches) == expectedPatches) } @@ -54,7 +55,7 @@ func Test_exist_annotation(t *testing.T) { patchStr := `{ "op": "replace", "path": "/spec/containers/0/imagePullPolicy", "value": "IfNotPresent" }` engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true, annotation) - annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}) + annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}, log.Log) expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"default-imagepullpolicy.mutate-container.kyverno.io: replaced /spec/containers/0/imagePullPolicy\n"}}` assert.Assert(t, string(annPatches) == expectedPatches) @@ -67,7 +68,7 @@ func Test_exist_kyverno_annotation(t *testing.T) { patchStr := `{ "op": "replace", "path": "/spec/containers/0/imagePullPolicy", "value": "IfNotPresent" }` engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{patchStr}, true, annotation) - annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}) + annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}, log.Log) expectedPatches := `{"op":"add","path":"/metadata/annotations","value":{"policies.kyverno.io/patches":"default-imagepullpolicy.mutate-container.kyverno.io: replaced /spec/containers/0/imagePullPolicy\n"}}` assert.Assert(t, string(annPatches) == expectedPatches) @@ -79,11 +80,11 @@ func Test_annotation_nil_patch(t *testing.T) { } engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", nil, true, annotation) - annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}) + annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}, log.Log) assert.Assert(t, annPatches == nil) engineResponseNew := newEngineResponse("mutate-container", "default-imagepullpolicy", []string{""}, true, annotation) - annPatchesNew := generateAnnotationPatches([]response.EngineResponse{engineResponseNew}) + annPatchesNew := generateAnnotationPatches([]response.EngineResponse{engineResponseNew}, log.Log) assert.Assert(t, annPatchesNew == nil) } @@ -93,7 +94,7 @@ func Test_annotation_failed_Patch(t *testing.T) { } engineResponse := newEngineResponse("mutate-container", "default-imagepullpolicy", nil, false, annotation) - annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}) + annPatches := generateAnnotationPatches([]response.EngineResponse{engineResponse}, log.Log) assert.Assert(t, annPatches == nil) } diff --git a/pkg/webhooks/checker.go b/pkg/webhooks/checker.go index e6ccfc41c9..4a52fa7649 100644 --- a/pkg/webhooks/checker.go +++ b/pkg/webhooks/checker.go @@ -1,13 +1,12 @@ package webhooks import ( - "github.com/golang/glog" "k8s.io/api/admission/v1beta1" ) func (ws *WebhookServer) handleVerifyRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse { - glog.V(4).Infof("Receive request in mutating webhook '/verify': Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s", - request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation) + logger := ws.log.WithValues("action", "verify", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) + logger.V(4).Info("incoming request") return &v1beta1.AdmissionResponse{ Allowed: true, } diff --git a/pkg/webhooks/common.go b/pkg/webhooks/common.go index a6a2682476..f9c4ab2ccc 100644 --- a/pkg/webhooks/common.go +++ b/pkg/webhooks/common.go @@ -4,7 +4,7 @@ import ( "fmt" "strings" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/response" engineutils "github.com/nirmata/kyverno/pkg/engine/utils" @@ -25,14 +25,14 @@ func isResponseSuccesful(engineReponses []response.EngineResponse) bool { // returns true -> if there is even one policy that blocks resource request // returns false -> if all the policies are meant to report only, we dont block resource request -func toBlockResource(engineReponses []response.EngineResponse) bool { +func toBlockResource(engineReponses []response.EngineResponse, log logr.Logger) bool { for _, er := range engineReponses { if !er.IsSuccesful() && er.PolicyResponse.ValidationFailureAction == Enforce { - glog.V(4).Infof("ValidationFailureAction set to enforce for policy %s , blocking resource request ", er.PolicyResponse.Policy) + log.Info("spec.ValidationFailureAction set to enforcel blocking resource request", "policy", er.PolicyResponse.Policy) return true } } - glog.V(4).Infoln("ValidationFailureAction set to audit, allowing resource request, reporting with policy violation") + log.V(4).Info("sepc.ValidationFailureAction set to auit for all applicable policies;allowing resource reques; reporting with policy violation ") return false } @@ -98,14 +98,14 @@ const ( Audit = "audit" // dont block the request on failure, but report failiures as policy violations ) -func processResourceWithPatches(patch []byte, resource []byte) []byte { +func processResourceWithPatches(patch []byte, resource []byte, log logr.Logger) []byte { if patch == nil { return resource } resource, err := engineutils.ApplyPatchNew(resource, patch) if err != nil { - glog.Errorf("failed to patch resource: %v", err) + log.Error(err, "failed to patch resource:") return nil } return resource diff --git a/pkg/webhooks/generate/generate.go b/pkg/webhooks/generate/generate.go index f120c9fb5d..3631845541 100644 --- a/pkg/webhooks/generate/generate.go +++ b/pkg/webhooks/generate/generate.go @@ -5,7 +5,7 @@ import ( "time" backoff "github.com/cenkalti/backoff" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" utilruntime "k8s.io/apimachinery/pkg/util/runtime" @@ -23,37 +23,41 @@ type Generator struct { ch chan kyverno.GenerateRequestSpec client *kyvernoclient.Clientset stopCh <-chan struct{} + log logr.Logger } //NewGenerator returns a new instance of Generate-Request resource generator -func NewGenerator(client *kyvernoclient.Clientset, stopCh <-chan struct{}) *Generator { +func NewGenerator(client *kyvernoclient.Clientset, stopCh <-chan struct{}, log logr.Logger) *Generator { gen := &Generator{ ch: make(chan kyverno.GenerateRequestSpec, 1000), client: client, stopCh: stopCh, + log: log, } return gen } //Create to create generate request resoruce (blocking call if channel is full) func (g *Generator) Create(gr kyverno.GenerateRequestSpec) error { - glog.V(4).Infof("create GR %v", gr) + logger := g.log + logger.V(4).Info("creating Generate Request", "request", gr) // Send to channel select { case g.ch <- gr: return nil case <-g.stopCh: - glog.Info("shutting down channel") + logger.Info("shutting down channel") return fmt.Errorf("shutting down gr create channel") } } // Run starts the generate request spec func (g *Generator) Run(workers int) { + logger := g.log defer utilruntime.HandleCrash() - glog.V(4).Info("Started generate request") + logger.V(4).Info("starting") defer func() { - glog.V(4).Info("Shutting down generate request") + logger.V(4).Info("shutting down") }() for i := 0; i < workers; i++ { go wait.Until(g.process, time.Second, g.stopCh) @@ -62,17 +66,18 @@ func (g *Generator) Run(workers int) { } func (g *Generator) process() { + logger := g.log for r := range g.ch { - glog.V(4).Infof("received generate request %v", r) + logger.V(4).Info("recieved generate request", "request", r) if err := g.generate(r); err != nil { - glog.Errorf("Failed to create Generate Request CR: %v", err) + logger.Error(err, "failed to generate request CR") } } } func (g *Generator) generate(grSpec kyverno.GenerateRequestSpec) error { // create a generate request - if err := retryCreateResource(g.client, grSpec); err != nil { + if err := retryCreateResource(g.client, grSpec, g.log); err != nil { return err } return nil @@ -81,7 +86,10 @@ func (g *Generator) generate(grSpec kyverno.GenerateRequestSpec) error { // -> receiving channel to take requests to create request // use worker pattern to read and create the CR resource -func retryCreateResource(client *kyvernoclient.Clientset, grSpec kyverno.GenerateRequestSpec) error { +func retryCreateResource(client *kyvernoclient.Clientset, + grSpec kyverno.GenerateRequestSpec, + log logr.Logger, +) error { var i int var err error createResource := func() error { @@ -95,7 +103,7 @@ func retryCreateResource(client *kyvernoclient.Clientset, grSpec kyverno.Generat // gr.Status.State = kyverno.Pending // generate requests created in kyverno namespace _, err = client.KyvernoV1().GenerateRequests("kyverno").Create(&gr) - glog.V(4).Infof("retry %v create generate request", i) + log.V(4).Info("retrying create generate request CR", "retryCount", i, "name", gr.GetGenerateName(), "namespace", gr.GetNamespace()) i++ return err } diff --git a/pkg/webhooks/generation.go b/pkg/webhooks/generation.go index 2dddf71e1c..6f005a504e 100644 --- a/pkg/webhooks/generation.go +++ b/pkg/webhooks/generation.go @@ -1,7 +1,6 @@ package webhooks import ( - "github.com/golang/glog" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/engine/context" @@ -13,19 +12,19 @@ import ( //HandleGenerate handles admission-requests for policies with generate rules func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []kyverno.ClusterPolicy, patchedResource []byte, roles, clusterRoles []string) (bool, string) { + logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) + logger.V(4).Info("incoming request") var engineResponses []response.EngineResponse // convert RAW to unstructured resource, err := utils.ConvertToUnstructured(request.Object.Raw) if err != nil { //TODO: skip applying the admission control ? - glog.Errorf("unable to convert raw resource to unstructured: %v", err) + logger.Error(err, "failed to convert RAR resource to unstructured format") return true, "" } // CREATE resources, do not have name, assigned in admission-request - glog.V(4).Infof("Handle Generate: Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s", - resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation) userRequestInfo := kyverno.RequestInfo{ Roles: roles, @@ -36,16 +35,16 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic // load incoming resource into the context err = ctx.AddResource(request.Object.Raw) if err != nil { - glog.Infof("Failed to load resource in context:%v", err) + logger.Error(err, "failed to load incoming resource in context") } err = ctx.AddUserInfo(userRequestInfo) if err != nil { - glog.Infof("Failed to load userInfo in context:%v", err) + logger.Error(err, "failed to load userInfo in context") } // load service account in context err = ctx.AddSA(userRequestInfo.AdmissionUserInfo.Username) if err != nil { - glog.Infof("Failed to load service account in context:%v", err) + logger.Error(err, "failed to load service account in context") } policyContext := engine.PolicyContext{ diff --git a/pkg/webhooks/mutation.go b/pkg/webhooks/mutation.go index 650fce8374..b7028bfcb2 100644 --- a/pkg/webhooks/mutation.go +++ b/pkg/webhooks/mutation.go @@ -1,9 +1,6 @@ package webhooks import ( - "time" - - "github.com/golang/glog" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/engine/context" @@ -19,8 +16,8 @@ import ( // HandleMutation handles mutating webhook admission request // return value: generated patches func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resource unstructured.Unstructured, policies []kyverno.ClusterPolicy, roles, clusterRoles []string) []byte { - glog.V(4).Infof("Receive request in mutating webhook: Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s", - request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation) + logger := ws.log.WithValues("action", "mutation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) + logger.V(4).Info("incoming request") var patches [][]byte var policyStats []policyctr.PolicyStat @@ -70,16 +67,16 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou // load incoming resource into the context err = ctx.AddResource(request.Object.Raw) if err != nil { - glog.Infof("Failed to load resource in context:%v", err) + logger.Error(err, "failed to load incoming resource in context") } err = ctx.AddUserInfo(userRequestInfo) if err != nil { - glog.Infof("Failed to load userInfo in context:%v", err) + logger.Error(err, "failed to load userInfo in context") } err = ctx.AddSA(userRequestInfo.AdmissionUserInfo.Username) if err != nil { - glog.Infof("Failed to load service account in context:%v", err) + logger.Error(err, "failed to load service account in context") } policyContext := engine.PolicyContext{ @@ -89,8 +86,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou } for _, policy := range policies { - glog.V(2).Infof("Handling mutation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s", - resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.UID, request.Operation) + logger.V(2).Info("evaluating policy", "policy", policy.Name) policyContext.Policy = policy engineResponse := engine.Mutate(policyContext) @@ -98,27 +94,24 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou // Gather policy application statistics gatherStat(policy.Name, engineResponse.PolicyResponse) if !engineResponse.IsSuccesful() { - glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, resource.GetNamespace(), resource.GetName()) + logger.V(4).Info("failed to apply policy", "policy", policy.Name) continue } // gather patches patches = append(patches, engineResponse.GetPatches()...) - glog.V(4).Infof("Mutation from policy %s has applied successfully to %s %s/%s", policy.Name, request.Kind.Kind, resource.GetNamespace(), resource.GetName()) + logger.Info("mutation rules from policy applied succesfully", "policy", policy.Name) policyContext.NewResource = engineResponse.PatchedResource } // generate annotations - if annPatches := generateAnnotationPatches(engineResponses); annPatches != nil { + if annPatches := generateAnnotationPatches(engineResponses, logger); annPatches != nil { patches = append(patches, annPatches) } - // report time - reportTime := time.Now() - // AUDIT // generate violation when response fails - pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses) + pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger) ws.pvGenerator.Add(pvInfos...) // REPORTING EVENTS // Scenario 1: @@ -128,7 +121,7 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou // all policies were applied succesfully. // create an event on the resource // ADD EVENTS - events := generateEvents(engineResponses, false, (request.Operation == v1beta1.Update)) + events := generateEvents(engineResponses, false, (request.Operation == v1beta1.Update), logger) ws.eventGen.Add(events...) sendStat(false) @@ -136,19 +129,15 @@ func (ws *WebhookServer) HandleMutation(request *v1beta1.AdmissionRequest, resou // debug info func() { if len(patches) != 0 { - glog.V(4).Infof("Patches generated for %s/%s/%s, operation=%v:\n %v", - resource.GetKind(), resource.GetNamespace(), resource.GetName(), request.Operation, string(engineutils.JoinPatches(patches))) + logger.V(4).Info("JSON patches generated") } // if any of the policies fails, print out the error if !isResponseSuccesful(engineResponses) { - glog.Errorf("Failed to mutate the resource, report as violation: %s\n", getErrorMsg(engineResponses)) + logger.Info("failed to apply mutation rules on the resource, reporting policy violation", "errors", getErrorMsg(engineResponses)) } }() - // report time end - glog.V(4).Infof("report: %v %s/%s/%s", time.Since(reportTime), resource.GetKind(), resource.GetNamespace(), resource.GetName()) - // patches holds all the successful patches, if no patch is created, it returns nil return engineutils.JoinPatches(patches) } diff --git a/pkg/webhooks/policymutation.go b/pkg/webhooks/policymutation.go index 8081528edd..dea2f87fff 100644 --- a/pkg/webhooks/policymutation.go +++ b/pkg/webhooks/policymutation.go @@ -8,7 +8,7 @@ import ( "strings" jsonpatch "github.com/evanphx/json-patch" - "github.com/golang/glog" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/utils" @@ -17,12 +17,13 @@ import ( ) func (ws *WebhookServer) handlePolicyMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse { + logger := ws.log.WithValues("action", "policymutation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) var policy *kyverno.ClusterPolicy raw := request.Object.Raw //TODO: can this happen? wont this be picked by OpenAPI spec schema ? if err := json.Unmarshal(raw, &policy); err != nil { - glog.Errorf("Failed to unmarshal policy admission request, err %v\n", err) + logger.Error(err, "faield to unmarshall policy admission request") return &v1beta1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ @@ -31,10 +32,9 @@ func (ws *WebhookServer) handlePolicyMutation(request *v1beta1.AdmissionRequest) } } // Generate JSON Patches for defaults - patches, updateMsgs := generateJSONPatchesForDefaults(policy) + patches, updateMsgs := generateJSONPatchesForDefaults(policy, logger) if patches != nil { patchType := v1beta1.PatchTypeJSONPatch - glog.V(4).Infof("defaulted values %v policy %s", updateMsgs, policy.Name) return &v1beta1.AdmissionResponse{ Allowed: true, Result: &metav1.Status{ @@ -44,35 +44,34 @@ func (ws *WebhookServer) handlePolicyMutation(request *v1beta1.AdmissionRequest) PatchType: &patchType, } } - glog.V(4).Infof("nothing to default for policy %s", policy.Name) return &v1beta1.AdmissionResponse{ Allowed: true, } } -func generateJSONPatchesForDefaults(policy *kyverno.ClusterPolicy) ([]byte, []string) { +func generateJSONPatchesForDefaults(policy *kyverno.ClusterPolicy, log logr.Logger) ([]byte, []string) { var patches [][]byte var updateMsgs []string // default 'ValidationFailureAction' - if patch, updateMsg := defaultvalidationFailureAction(policy); patch != nil { + if patch, updateMsg := defaultvalidationFailureAction(policy, log); patch != nil { patches = append(patches, patch) updateMsgs = append(updateMsgs, updateMsg) } // default 'Background' - if patch, updateMsg := defaultBackgroundFlag(policy); patch != nil { + if patch, updateMsg := defaultBackgroundFlag(policy, log); patch != nil { patches = append(patches, patch) updateMsgs = append(updateMsgs, updateMsg) } - patch, errs := generatePodControllerRule(*policy) + patch, errs := generatePodControllerRule(*policy, log) if len(errs) > 0 { var errMsgs []string for _, err := range errs { errMsgs = append(errMsgs, err.Error()) + log.Error(err, "failed to generate pod controller rule") } - glog.Errorf("failed auto generating rule for pod controllers: %s", errMsgs) updateMsgs = append(updateMsgs, strings.Join(errMsgs, ";")) } @@ -81,11 +80,11 @@ func generateJSONPatchesForDefaults(policy *kyverno.ClusterPolicy) ([]byte, []st return utils.JoinPatches(patches), updateMsgs } -func defaultBackgroundFlag(policy *kyverno.ClusterPolicy) ([]byte, string) { +func defaultBackgroundFlag(policy *kyverno.ClusterPolicy, log logr.Logger) ([]byte, string) { // default 'Background' flag to 'true' if not specified defaultVal := true if policy.Spec.Background == nil { - glog.V(4).Infof("default policy %s 'Background' to '%s'", policy.Name, strconv.FormatBool(true)) + log.V(4).Info("setting default value", "spec.background", true) jsonPatch := struct { Path string `json:"path"` Op string `json:"op"` @@ -97,19 +96,19 @@ func defaultBackgroundFlag(policy *kyverno.ClusterPolicy) ([]byte, string) { } patchByte, err := json.Marshal(jsonPatch) if err != nil { - glog.Errorf("failed to set default 'Background' to '%s' for policy %s", strconv.FormatBool(true), policy.Name) + log.Error(err, "failed to set default value", "spec.background", true) return nil, "" } - glog.V(4).Infof("generate JSON Patch to set default 'Background' to '%s' for policy %s", strconv.FormatBool(true), policy.Name) + log.Info("generated JSON Patch to set default", "spec.background", true) return patchByte, fmt.Sprintf("default 'Background' to '%s'", strconv.FormatBool(true)) } return nil, "" } -func defaultvalidationFailureAction(policy *kyverno.ClusterPolicy) ([]byte, string) { +func defaultvalidationFailureAction(policy *kyverno.ClusterPolicy, log logr.Logger) ([]byte, string) { // default ValidationFailureAction to "audit" if not specified if policy.Spec.ValidationFailureAction == "" { - glog.V(4).Infof("defaulting policy %s 'ValidationFailureAction' to '%s'", policy.Name, Audit) + log.V(4).Info("setting defautl value", "spec.validationFailureAction", Audit) jsonPatch := struct { Path string `json:"path"` Op string `json:"op"` @@ -121,10 +120,10 @@ func defaultvalidationFailureAction(policy *kyverno.ClusterPolicy) ([]byte, stri } patchByte, err := json.Marshal(jsonPatch) if err != nil { - glog.Errorf("failed to set default 'ValidationFailureAction' to '%s' for policy %s", Audit, policy.Name) + log.Error(err, "failed to default value", "spec.validationFailureAction", Audit) return nil, "" } - glog.V(4).Infof("generate JSON Patch to set default 'ValidationFailureAction' to '%s' for policy %s", Audit, policy.Name) + log.Info("generated JSON Patch to set default", "spec.validationFailureAction", Audit) return patchByte, fmt.Sprintf("default 'ValidationFailureAction' to '%s'", Audit) } return nil, "" @@ -140,7 +139,7 @@ func defaultvalidationFailureAction(policy *kyverno.ClusterPolicy) ([]byte, stri // make sure all fields are applicable to pod cotrollers // generatePodControllerRule returns two patches: rulePatches and annotation patch(if necessary) -func generatePodControllerRule(policy kyverno.ClusterPolicy) (patches [][]byte, errs []error) { +func generatePodControllerRule(policy kyverno.ClusterPolicy, log logr.Logger) (patches [][]byte, errs []error) { ann := policy.GetAnnotations() controllers, ok := ann[engine.PodControllersAnnotation] @@ -159,10 +158,9 @@ func generatePodControllerRule(policy kyverno.ClusterPolicy) (patches [][]byte, if controllers == "none" { return nil, nil } + log.V(3).Info("auto generating rule for pod controllers", "controlers", controllers) - glog.V(3).Infof("Auto generating rule for pod controller: %s", controllers) - - p, err := generateRulePatches(policy, controllers) + p, err := generateRulePatches(policy, controllers, log) patches = append(patches, p...) errs = append(errs, err...) return @@ -197,7 +195,7 @@ func createRuleMap(rules []kyverno.Rule) map[string]kyvernoRule { } // generateRulePatches generates rule for podControllers based on scenario A and C -func generateRulePatches(policy kyverno.ClusterPolicy, controllers string) (rulePatches [][]byte, errs []error) { +func generateRulePatches(policy kyverno.ClusterPolicy, controllers string, log logr.Logger) (rulePatches [][]byte, errs []error) { var genRule kyvernoRule insertIdx := len(policy.Spec.Rules) @@ -210,7 +208,7 @@ func generateRulePatches(policy kyverno.ClusterPolicy, controllers string) (rule for _, rule := range policy.Spec.Rules { patchPostion := insertIdx - genRule = generateRuleForControllers(rule, controllers) + genRule = generateRuleForControllers(rule, controllers, log) if reflect.DeepEqual(genRule, kyvernoRule{}) { continue } @@ -272,7 +270,7 @@ type kyvernoRule struct { Validation *kyverno.Validation `json:"validate,omitempty"` } -func generateRuleForControllers(rule kyverno.Rule, controllers string) kyvernoRule { +func generateRuleForControllers(rule kyverno.Rule, controllers string, log logr.Logger) kyvernoRule { if strings.HasPrefix(rule.Name, "autogen-") { return kyvernoRule{} } @@ -292,7 +290,7 @@ func generateRuleForControllers(rule kyverno.Rule, controllers string) kyvernoRu if controllers == "all" { if match.ResourceDescription.Name != "" || match.ResourceDescription.Selector != nil || exclude.ResourceDescription.Name != "" || exclude.ResourceDescription.Selector != nil { - glog.Warningf("Rule '%s' skip generating rule on pod controllers: Name / Selector in resource decription may not be applicable.", rule.Name) + log.Info("skip generating rule on pod controllers: Name / Selector in resource decription may not be applicable.", "rule", rule.Name) return kyvernoRule{} } controllers = engine.PodControllers diff --git a/pkg/webhooks/policymutation_test.go b/pkg/webhooks/policymutation_test.go index 0b5af63c59..f600ff8220 100644 --- a/pkg/webhooks/policymutation_test.go +++ b/pkg/webhooks/policymutation_test.go @@ -7,6 +7,7 @@ import ( kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/utils" + "github.com/nirmata/kyverno/pkg/log" "gotest.tools/assert" ) @@ -28,7 +29,7 @@ func TestGeneratePodControllerRule_NilAnnotation(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) p, err := utils.ApplyPatches(policyRaw, patches) @@ -61,7 +62,7 @@ func TestGeneratePodControllerRule_PredefinedAnnotation(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) assert.Assert(t, len(patches) == 0) } @@ -112,7 +113,7 @@ func TestGeneratePodControllerRule_DisableFeature(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) assert.Assert(t, len(patches) == 0) } @@ -163,7 +164,7 @@ func TestGeneratePodControllerRule_Mutate(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) p, err := utils.ApplyPatches(policyRaw, patches) @@ -261,7 +262,7 @@ func TestGeneratePodControllerRule_ExistOtherAnnotation(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) p, err := utils.ApplyPatches(policyRaw, patches) @@ -333,7 +334,7 @@ func TestGeneratePodControllerRule_ValidateAnyPattern(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) p, err := utils.ApplyPatches(policyRaw, patches) @@ -471,7 +472,7 @@ func TestGeneratePodControllerRule_ValidatePattern(t *testing.T) { var policy kyverno.ClusterPolicy assert.Assert(t, json.Unmarshal(policyRaw, &policy)) - patches, errs := generatePodControllerRule(policy) + patches, errs := generatePodControllerRule(policy, log.Log) assert.Assert(t, len(errs) == 0) p, err := utils.ApplyPatches(policyRaw, patches) diff --git a/pkg/webhooks/policyvalidation.go b/pkg/webhooks/policyvalidation.go index fcb79c7163..917e541bbc 100644 --- a/pkg/webhooks/policyvalidation.go +++ b/pkg/webhooks/policyvalidation.go @@ -4,7 +4,6 @@ import ( "encoding/json" "fmt" - "github.com/golang/glog" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" policyvalidate "github.com/nirmata/kyverno/pkg/policy" v1beta1 "k8s.io/api/admission/v1beta1" @@ -13,6 +12,7 @@ import ( //HandlePolicyValidation performs the validation check on policy resource func (ws *WebhookServer) handlePolicyValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse { + logger := ws.log.WithValues("action", "policyvalidation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) var policy *kyverno.ClusterPolicy admissionResp := &v1beta1.AdmissionResponse{ Allowed: true, @@ -21,7 +21,7 @@ func (ws *WebhookServer) handlePolicyValidation(request *v1beta1.AdmissionReques //TODO: can this happen? wont this be picked by OpenAPI spec schema ? raw := request.Object.Raw if err := json.Unmarshal(raw, &policy); err != nil { - glog.Errorf("Failed to unmarshal policy admission request, err %v\n", err) + logger.Error(err, "failed to unmarshal incoming resource to policy type") return &v1beta1.AdmissionResponse{Allowed: false, Result: &metav1.Status{ Message: fmt.Sprintf("Failed to unmarshal policy admission request err %v", err), diff --git a/pkg/webhooks/report.go b/pkg/webhooks/report.go index 035a60688e..83e67c5753 100644 --- a/pkg/webhooks/report.go +++ b/pkg/webhooks/report.go @@ -3,6 +3,7 @@ package webhooks import ( "strings" + "github.com/go-logr/logr" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine/response" @@ -10,7 +11,7 @@ import ( ) //generateEvents generates event info for the engine responses -func generateEvents(engineResponses []response.EngineResponse, blocked, onUpdate bool) []event.Info { +func generateEvents(engineResponses []response.EngineResponse, blocked, onUpdate bool, log logr.Logger) []event.Info { var events []event.Info // Scenario 1 // - Admission-Response is SUCCESS && CREATE @@ -26,6 +27,7 @@ func generateEvents(engineResponses []response.EngineResponse, blocked, onUpdate successRulesStr := strings.Join(successRules, ";") // event on resource e := event.NewEvent( + log, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.APIVersion, er.PolicyResponse.Resource.Namespace, @@ -59,6 +61,7 @@ func generateEvents(engineResponses []response.EngineResponse, blocked, onUpdate filedRulesStr := strings.Join(failedRules, ";") // Event on Policy e := event.NewEvent( + log, "ClusterPolicy", kyverno.SchemeGroupVersion.String(), "", @@ -90,6 +93,7 @@ func generateEvents(engineResponses []response.EngineResponse, blocked, onUpdate filedRulesStr := strings.Join(failedRules, ";") // Event on the policy e := event.NewEvent( + log, "ClusterPolicy", kyverno.SchemeGroupVersion.String(), "", @@ -104,6 +108,7 @@ func generateEvents(engineResponses []response.EngineResponse, blocked, onUpdate // Event on the resource // event on resource e = event.NewEvent( + log, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.APIVersion, er.PolicyResponse.Resource.Namespace, diff --git a/pkg/webhooks/server.go b/pkg/webhooks/server.go index d401cdc449..3d3fbcd857 100644 --- a/pkg/webhooks/server.go +++ b/pkg/webhooks/server.go @@ -10,7 +10,7 @@ import ( "net/http" "time" - "github.com/golang/glog" + "github.com/go-logr/logr" "github.com/nirmata/kyverno/pkg/checker" kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned" kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions/kyverno/v1" @@ -69,6 +69,7 @@ type WebhookServer struct { // generate request generator grGenerator *generate.Generator resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister + log logr.Logger } // NewWebhookServer creates new instance of WebhookServer accordingly to given configuration @@ -88,7 +89,9 @@ func NewWebhookServer( pvGenerator policyviolation.GeneratorInterface, grGenerator *generate.Generator, resourceWebhookWatcher *webhookconfig.ResourceWebhookRegister, - cleanUp chan<- struct{}) (*WebhookServer, error) { + cleanUp chan<- struct{}, + log logr.Logger, +) (*WebhookServer, error) { if tlsPair == nil { return nil, errors.New("NewWebhookServer is not initialized properly") @@ -120,6 +123,7 @@ func NewWebhookServer( pMetaStore: pMetaStore, grGenerator: grGenerator, resourceWebhookWatcher: resourceWebhookWatcher, + log: log, } mux := http.NewServeMux() mux.HandleFunc(config.MutatingWebhookServicePath, ws.serve) @@ -148,8 +152,9 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) { if admissionReview == nil { return } + logger := ws.log.WithValues("kind", admissionReview.Request.Kind, "namespace", admissionReview.Request.Namespace, "name", admissionReview.Request.Name) defer func() { - glog.V(4).Infof("request: %v %s/%s/%s", time.Since(startTime), admissionReview.Request.Kind, admissionReview.Request.Namespace, admissionReview.Request.Name) + logger.V(4).Info("request processed", "processingTime", time.Since(startTime)) }() admissionReview.Response = &v1beta1.AdmissionResponse{ @@ -195,31 +200,29 @@ func (ws *WebhookServer) serve(w http.ResponseWriter, r *http.Request) { } func (ws *WebhookServer) handleMutateAdmissionRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse { + logger := ws.log.WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) policies, err := ws.pMetaStore.ListAll() if err != nil { // Unable to connect to policy Lister to access policies - glog.Errorf("Unable to connect to policy controller to access policies. Policies are NOT being applied: %v", err) + logger.Error(err, "failed to list policies. Policies are NOT being applied") return &v1beta1.AdmissionResponse{Allowed: true} } var roles, clusterRoles []string // getRoleRef only if policy has roles/clusterroles defined - startTime := time.Now() if containRBACinfo(policies) { roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request) if err != nil { // TODO(shuting): continue apply policy if error getting roleRef? - glog.Errorf("Unable to get rbac information for request Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s: %v", - request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, err) + logger.Error(err, "failed to get RBAC infromation for request") } } - glog.V(4).Infof("Time: webhook GetRoleRef %v", time.Since(startTime)) // convert RAW to unstructured resource, err := convertResource(request.Object.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace) if err != nil { - glog.Errorf(err.Error()) + logger.Error(err, "failed to convert RAW resource to unstructured format") return &v1beta1.AdmissionResponse{ Allowed: false, @@ -245,13 +248,13 @@ func (ws *WebhookServer) handleMutateAdmissionRequest(request *v1beta1.Admission patches := ws.HandleMutation(request, resource, policies, roles, clusterRoles) // patch the resource with patches before handling validation rules - patchedResource := processResourceWithPatches(patches, request.Object.Raw) + patchedResource := processResourceWithPatches(patches, request.Object.Raw, logger) if ws.resourceWebhookWatcher != nil && ws.resourceWebhookWatcher.RunValidationInMutatingWebhook == "true" { // VALIDATION ok, msg := ws.HandleValidation(request, policies, patchedResource, roles, clusterRoles) if !ok { - glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name) + logger.Info("admission request denied") return &v1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ @@ -269,7 +272,7 @@ func (ws *WebhookServer) handleMutateAdmissionRequest(request *v1beta1.Admission if request.Operation == v1beta1.Create { ok, msg := ws.HandleGenerate(request, policies, patchedResource, roles, clusterRoles) if !ok { - glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name) + logger.Info("admission request denied") return &v1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ @@ -292,31 +295,29 @@ func (ws *WebhookServer) handleMutateAdmissionRequest(request *v1beta1.Admission } func (ws *WebhookServer) handleValidateAdmissionRequest(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse { + logger := ws.log.WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) policies, err := ws.pMetaStore.ListAll() if err != nil { // Unable to connect to policy Lister to access policies - glog.Errorf("Unable to connect to policy controller to access policies. Policies are NOT being applied: %v", err) + logger.Error(err, "failed to list policies. Policies are NOT being applied") return &v1beta1.AdmissionResponse{Allowed: true} } var roles, clusterRoles []string // getRoleRef only if policy has roles/clusterroles defined - startTime := time.Now() if containRBACinfo(policies) { roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request) if err != nil { // TODO(shuting): continue apply policy if error getting roleRef? - glog.Errorf("Unable to get rbac information for request Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s: %v", - request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation, err) + logger.Error(err, "failed to get RBAC infromation for request") } } - glog.V(4).Infof("Time: webhook GetRoleRef %v", time.Since(startTime)) // VALIDATION ok, msg := ws.HandleValidation(request, policies, nil, roles, clusterRoles) if !ok { - glog.V(4).Infof("Deny admission request: %v/%s/%s", request.Kind, request.Namespace, request.Name) + logger.Info("admission request denied") return &v1beta1.AdmissionResponse{ Allowed: false, Result: &metav1.Status{ @@ -336,27 +337,28 @@ func (ws *WebhookServer) handleValidateAdmissionRequest(request *v1beta1.Admissi // RunAsync TLS server in separate thread and returns control immediately func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) { + logger := ws.log if !cache.WaitForCacheSync(stopCh, ws.pSynced, ws.rbSynced, ws.crbSynced) { - glog.Error("webhook: failed to sync informer cache") + logger.Info("failed to sync informer cache") } go func(ws *WebhookServer) { - glog.V(3).Infof("serving on %s\n", ws.server.Addr) + logger.V(3).Info("started serving requests", "addr", ws.server.Addr) if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed { - glog.Infof("HTTP server error: %v", err) + logger.Error(err, "failed to listen to requests") } }(ws) - glog.Info("Started Webhook Server") + logger.Info("starting") // verifys if the admission control is enabled and active // resync: 60 seconds // deadline: 60 seconds (send request) // max deadline: deadline*3 (set the deployment annotation as false) go ws.lastReqTime.Run(ws.pLister, ws.eventGen, ws.client, checker.DefaultResync, checker.DefaultDeadline, stopCh) - } // Stop TLS server and returns control after the server is shut down func (ws *WebhookServer) Stop(ctx context.Context) { + logger := ws.log // cleanUp // remove the static webhookconfigurations go ws.webhookRegistrationClient.RemoveWebhookConfigurations(ws.cleanUp) @@ -364,7 +366,7 @@ func (ws *WebhookServer) Stop(ctx context.Context) { err := ws.server.Shutdown(ctx) if err != nil { // Error from closing listeners, or context timeout: - glog.Info("Server Shutdown error: ", err) + logger.Error(err, "shutting down server") ws.server.Close() } } @@ -372,6 +374,7 @@ func (ws *WebhookServer) Stop(ctx context.Context) { // bodyToAdmissionReview creates AdmissionReview object from request body // Answers to the http.ResponseWriter if request is not valid func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer http.ResponseWriter) *v1beta1.AdmissionReview { + logger := ws.log var body []byte if request.Body != nil { if data, err := ioutil.ReadAll(request.Body); err == nil { @@ -379,21 +382,21 @@ func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer htt } } if len(body) == 0 { - glog.Error("Error: empty body") + logger.Info("empty body") http.Error(writer, "empty body", http.StatusBadRequest) return nil } contentType := request.Header.Get("Content-Type") if contentType != "application/json" { - glog.Error("Error: invalid Content-Type: ", contentType) + logger.Info("invalid Content-Type", "contextType", contentType) http.Error(writer, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType) return nil } admissionReview := &v1beta1.AdmissionReview{} if err := json.Unmarshal(body, &admissionReview); err != nil { - glog.Errorf("Error: Can't decode body as AdmissionReview: %v", err) + logger.Error(err, "failed to decode request body to type 'AdmissionReview") http.Error(writer, "Can't decode body as AdmissionReview", http.StatusExpectationFailed) return nil } diff --git a/pkg/webhooks/validation.go b/pkg/webhooks/validation.go index cf30c61f17..6457b10a2c 100644 --- a/pkg/webhooks/validation.go +++ b/pkg/webhooks/validation.go @@ -2,9 +2,7 @@ package webhooks import ( "reflect" - "time" - "github.com/golang/glog" kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1" "github.com/nirmata/kyverno/pkg/engine" "github.com/nirmata/kyverno/pkg/engine/context" @@ -19,11 +17,10 @@ import ( // If there are no errors in validating rule we apply generation rules // patchedResource is the (resource + patches) after applying mutation rules func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, policies []kyverno.ClusterPolicy, patchedResource []byte, roles, clusterRoles []string) (bool, string) { - glog.V(4).Infof("Receive request in validating webhook: Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s", - request.Kind.Kind, request.Namespace, request.Name, request.UID, request.Operation) + logger := ws.log.WithValues("action", "validation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation) + logger.V(4).Info("incoming request") var policyStats []policyctr.PolicyStat - evalTime := time.Now() // gather stats from the engine response gatherStat := func(policyName string, policyResponse response.PolicyResponse) { ps := policyctr.PolicyStat{} @@ -57,7 +54,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol newR, oldR, err := extractResources(patchedResource, request) if err != nil { // as resource cannot be parsed, we skip processing - glog.Error(err) + logger.Error(err, "failed to extract resource") return true, "" } userRequestInfo := kyverno.RequestInfo{ @@ -69,17 +66,17 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol // load incoming resource into the context err = ctx.AddResource(request.Object.Raw) if err != nil { - glog.Infof("Failed to load resource in context:%v", err) + logger.Error(err, "failed to load incoming resource in context") } err = ctx.AddUserInfo(userRequestInfo) if err != nil { - glog.Infof("Failed to load userInfo in context:%v", err) + logger.Error(err, "failed to load userInfo in context") } err = ctx.AddSA(userRequestInfo.AdmissionUserInfo.Username) if err != nil { - glog.Infof("Failed to load service account in context:%v", err) + logger.Error(err, "failed to load service account in context") } policyContext := engine.PolicyContext{ @@ -90,8 +87,7 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol } var engineResponses []response.EngineResponse for _, policy := range policies { - glog.V(2).Infof("Handling validation for Kind=%s, Namespace=%s Name=%s UID=%s patchOperation=%s", - newR.GetKind(), newR.GetNamespace(), newR.GetName(), request.UID, request.Operation) + logger.V(2).Info("evaluating policy", "policy", policy.Name) policyContext.Policy = policy engineResponse := engine.Validate(policyContext) if reflect.DeepEqual(engineResponse, response.EngineResponse{}) { @@ -103,17 +99,13 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol // Gather policy application statistics gatherStat(policy.Name, engineResponse.PolicyResponse) if !engineResponse.IsSuccesful() { - glog.V(4).Infof("Failed to apply policy %s on resource %s/%s\n", policy.Name, newR.GetNamespace(), newR.GetName()) + logger.V(4).Info("failed to apply policy", "policy", policy.Name) continue } } - glog.V(4).Infof("eval: %v %s/%s/%s ", time.Since(evalTime), request.Kind, request.Namespace, request.Name) - // report time - reportTime := time.Now() - // If Validation fails then reject the request // no violations will be created on "enforce" - blocked := toBlockResource(engineResponses) + blocked := toBlockResource(engineResponses, logger) // REPORTING EVENTS // Scenario 1: @@ -125,10 +117,10 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol // Scenario 3: // all policies were applied succesfully. // create an event on the resource - events := generateEvents(engineResponses, blocked, (request.Operation == v1beta1.Update)) + events := generateEvents(engineResponses, blocked, (request.Operation == v1beta1.Update), logger) ws.eventGen.Add(events...) if blocked { - glog.V(4).Infof("resource %s/%s/%s is blocked\n", newR.GetKind(), newR.GetNamespace(), newR.GetName()) + logger.V(4).Info("resource blocked") sendStat(true) // EVENTS // - event on the Policy @@ -137,10 +129,9 @@ func (ws *WebhookServer) HandleValidation(request *v1beta1.AdmissionRequest, pol // ADD POLICY VIOLATIONS // violations are created with resource on "audit" - pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses) + pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger) ws.pvGenerator.Add(pvInfos...) sendStat(false) // report time end - glog.V(4).Infof("report: %v %s/%s/%s", time.Since(reportTime), request.Kind, request.Namespace, request.Name) return true, "" }