1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 10:28:36 +00:00

refactor logging

This commit is contained in:
shivkumar dudhani 2020-03-17 16:25:34 -07:00
parent 1b1ab78f77
commit d327309d72
51 changed files with 330 additions and 523 deletions

View file

@ -14,12 +14,13 @@ import (
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/log"
"github.com/nirmata/kyverno/pkg/signal"
"k8s.io/apimachinery/pkg/api/errors"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"k8s.io/klog"
"k8s.io/klog/klogr"
"sigs.k8s.io/controller-runtime/pkg/log"
)
var (
@ -33,6 +34,15 @@ const (
)
func main() {
klog.InitFlags(nil)
log.SetLogger(klogr.New())
// arguments
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
if err := flag.Set("v", "2"); err != nil {
klog.Fatalf("failed to set log level: %v", err)
}
flag.Parse()
// os signal handler
stopCh := signal.SetupSignalHandler()
// create client config
@ -92,18 +102,6 @@ func main() {
}
}
func init() {
// arguments
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.Set("logtostderr", "true")
flag.Set("stderrthreshold", "WARNING")
flag.Set("v", "2")
flag.Parse()
log.SetLogger(zap.New(func(o *zap.Options) {
o.Development = true
}))
}
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
logger := log.Log.WithName("removeExistingWebhook").WithValues("kind", kind, "name", name)
var err error

View file

@ -7,7 +7,6 @@ import (
"os"
"time"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/checker"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/nirmata/kyverno/pkg/client/informers/externalversions"
@ -16,7 +15,6 @@ import (
event "github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/generate"
generatecleanup "github.com/nirmata/kyverno/pkg/generate/cleanup"
"github.com/nirmata/kyverno/pkg/log"
"github.com/nirmata/kyverno/pkg/policy"
"github.com/nirmata/kyverno/pkg/policystore"
"github.com/nirmata/kyverno/pkg/policyviolation"
@ -27,7 +25,9 @@ import (
"github.com/nirmata/kyverno/pkg/webhooks"
webhookgenerate "github.com/nirmata/kyverno/pkg/webhooks/generate"
kubeinformers "k8s.io/client-go/informers"
"sigs.k8s.io/controller-runtime/pkg/log/zap"
"k8s.io/klog"
"k8s.io/klog/klogr"
log "sigs.k8s.io/controller-runtime/pkg/log"
)
var (
@ -39,13 +39,29 @@ var (
// will be removed in future and the configuration will be set only via configmaps
filterK8Resources string
// User FQDN as CSR CN
fqdncn bool
verbosity string
setupLog = log.Log.WithName("setup")
fqdncn bool
setupLog = log.Log.WithName("setup")
)
func main() {
defer glog.Flush()
klog.InitFlags(nil)
log.SetLogger(klogr.New())
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&runValidationInMutatingWebhook, "runValidationInMutatingWebhook", "", "Validation will also be done using the mutation webhook, set to 'true' to enable. Older kubernetes versions do not work properly when a validation webhook is registered.")
if err := flag.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level")
os.Exit(1)
// klog.Fatalf("failed to set log level: %v", err)
}
// Generate CSR with CN as FQDN due to https://github.com/nirmata/kyverno/issues/542
flag.BoolVar(&fqdncn, "fqdn-as-cn", false, "use FQDN as Common Name in CSR")
flag.Parse()
version.PrintVersionInfo(log.Log)
// cleanUp Channel
cleanUp := make(chan struct{})
@ -289,20 +305,3 @@ func main() {
<-cleanUp
setupLog.Info("Kyverno shutdown successful")
}
func init() {
flag.StringVar(&filterK8Resources, "filterK8Resources", "", "k8 resource in format [kind,namespace,name] where policy is not evaluated by the admission webhook. example --filterKind \"[Deployment, kyverno, kyverno]\" --filterKind \"[Deployment, kyverno, kyverno],[Events, *, *]\"")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&runValidationInMutatingWebhook, "runValidationInMutatingWebhook", "", "Validation will also be done using the mutation webhook, set to 'true' to enable. Older kubernetes versions do not work properly when a validation webhook is registered.")
// Generate CSR with CN as FQDN due to https://github.com/nirmata/kyverno/issues/542
flag.BoolVar(&fqdncn, "fqdn-as-cn", false, "use FQDN as Common Name in CSR")
config.LogDefaultFlags(setupLog)
flag.StringVar(&verbosity, "verbosity", "", "set verbosity for logs")
flag.Parse()
log.SetLogger(zap.New(func(o *zap.Options) {
o.Development = true
}))
}

View file

@ -1,9 +1,6 @@
package config
import (
"flag"
"os"
"github.com/go-logr/logr"
rest "k8s.io/client-go/rest"
clientcmd "k8s.io/client-go/tools/clientcmd"
@ -75,26 +72,26 @@ var (
VerifyMutatingWebhookServicePath = "/verifymutate"
)
//LogDefaultFlags sets default flags
func LogDefaultFlags(log logr.Logger) {
logger := log.WithName("LogDefaultFlags")
var err error
err = flag.Set("logtostderr", "true")
if err != nil {
logger.Error(err, "failed to set flag", "flag", "logtostderr", "value", "true")
os.Exit(1)
}
err = flag.Set("stderrthreshold", "WARNING")
if err != nil {
logger.Error(err, "failed to set flag", "flag", "stderrthreshold", "value", "WARNING")
os.Exit(1)
}
flag.Set("v", "2")
if err != nil {
logger.Error(err, "failed to set flag", "flag", "v", "value", "2")
os.Exit(1)
}
}
// //LogDefaultFlags sets default flags
// func LogDefaultFlags(log logr.Logger) {
// logger := log.WithName("LogDefaultFlags")
// var err error
// err = flag.Set("logtostderr", "true")
// if err != nil {
// logger.Error(err, "failed to set flag", "flag", "logtostderr", "value", "true")
// os.Exit(1)
// }
// err = flag.Set("stderrthreshold", "WARNING")
// if err != nil {
// logger.Error(err, "failed to set flag", "flag", "stderrthreshold", "value", "WARNING")
// os.Exit(1)
// }
// flag.Set("v", "2")
// if err != nil {
// logger.Error(err, "failed to set flag", "flag", "v", "value", "2")
// os.Exit(1)
// }
// }
//CreateClientConfig creates client config
func CreateClientConfig(kubeconfig string, log logr.Logger) (*rest.Config, error) {

View file

@ -5,7 +5,7 @@ import (
"strconv"
"github.com/go-logr/logr"
"github.com/nirmata/kyverno/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log"
)
//ValidationHandler for element processes

View file

@ -8,7 +8,7 @@ import (
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log"
)
//Interface to manage context operations

View file

@ -6,8 +6,8 @@ import (
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/log"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
// Generate checks for validity of generate rule on the resource

View file

@ -10,51 +10,53 @@ import (
"strings"
"time"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
jsonpatch "github.com/evanphx/json-patch"
"github.com/go-logr/logr"
"github.com/nirmata/kyverno/pkg/engine/anchor"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/utils"
)
// ProcessOverlay processes mutation overlay on the resource
func ProcessOverlay(ruleName string, overlay interface{}, resource unstructured.Unstructured) (resp response.RuleResponse, patchedResource unstructured.Unstructured) {
func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resource unstructured.Unstructured) (resp response.RuleResponse, patchedResource unstructured.Unstructured) {
startTime := time.Now()
glog.V(4).Infof("started applying overlay rule %q (%v)", ruleName, startTime)
logger := log.WithValues("rule", ruleName)
logger.V(4).Info("started applying overlay rule ", "startTime", startTime)
resp.Name = ruleName
resp.Type = utils.Mutation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
glog.V(4).Infof("finished applying overlay rule %q (%v)", resp.Name, resp.RuleStats.ProcessingTime)
logger.V(4).Info("finished applying overlay rule", "processingTime", resp.RuleStats.ProcessingTime)
}()
patches, overlayerr := processOverlayPatches(resource.UnstructuredContent(), overlay)
patches, overlayerr := processOverlayPatches(logger, resource.UnstructuredContent(), overlay)
// resource does not satisfy the overlay pattern, we don't apply this rule
if !reflect.DeepEqual(overlayerr, overlayError{}) {
switch overlayerr.statusCode {
// condition key is not present in the resource, don't apply this rule
// consider as success
case conditionNotPresent:
glog.V(3).Infof("Skip applying rule '%s' on resource '%s/%s/%s': %s", ruleName, resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg())
logger.V(3).Info("skip applying rule")
resp.Success = true
return resp, resource
// conditions are not met, don't apply this rule
case conditionFailure:
glog.V(3).Infof("Skip applying rule '%s' on resource '%s/%s/%s': %s", ruleName, resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg())
logger.V(3).Info("skip applying rule")
//TODO: send zero response and not consider this as applied?
resp.Success = true
resp.Message = overlayerr.ErrorMsg()
return resp, resource
// rule application failed
case overlayFailure:
glog.Errorf("Resource %s/%s/%s: failed to process overlay: %v in the rule %s", resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.ErrorMsg(), ruleName)
logger.Info("failed to process overlay")
resp.Success = false
resp.Message = fmt.Sprintf("failed to process overlay: %v", overlayerr.ErrorMsg())
return resp, resource
default:
glog.Errorf("Resource %s/%s/%s: Unknown type of error: %v", resource.GetKind(), resource.GetNamespace(), resource.GetName(), overlayerr.Error())
logger.Info("failed to process overlay")
resp.Success = false
resp.Message = fmt.Sprintf("Unknown type of error: %v", overlayerr.Error())
return resp, resource
@ -70,7 +72,7 @@ func ProcessOverlay(ruleName string, overlay interface{}, resource unstructured.
resourceRaw, err := resource.MarshalJSON()
if err != nil {
resp.Success = false
glog.Infof("unable to marshall resource: %v", err)
logger.Error(err, "failed to marshal resource")
resp.Message = fmt.Sprintf("failed to process JSON patches: %v", err)
return resp, resource
}
@ -79,7 +81,7 @@ func ProcessOverlay(ruleName string, overlay interface{}, resource unstructured.
patchResource, err = utils.ApplyPatches(resourceRaw, patches)
if err != nil {
msg := fmt.Sprintf("failed to apply JSON patches: %v", err)
glog.V(2).Infof("%s, patches=%s", msg, string(utils.JoinPatches(patches)))
logger.V(2).Info("applying patches", "patches", string(utils.JoinPatches(patches)))
resp.Success = false
resp.Message = msg
return resp, resource
@ -87,7 +89,7 @@ func ProcessOverlay(ruleName string, overlay interface{}, resource unstructured.
err = patchedResource.UnmarshalJSON(patchResource)
if err != nil {
glog.Infof("failed to unmarshall resource to undstructured: %v", err)
logger.Error(err, "failed to unmarshal resource")
resp.Success = false
resp.Message = fmt.Sprintf("failed to process JSON patches: %v", err)
return resp, resource
@ -101,17 +103,17 @@ func ProcessOverlay(ruleName string, overlay interface{}, resource unstructured.
return resp, patchedResource
}
func processOverlayPatches(resource, overlay interface{}) ([][]byte, overlayError) {
if path, overlayerr := meetConditions(resource, overlay); !reflect.DeepEqual(overlayerr, overlayError{}) {
func processOverlayPatches(log logr.Logger, resource, overlay interface{}) ([][]byte, overlayError) {
if path, overlayerr := meetConditions(log, resource, overlay); !reflect.DeepEqual(overlayerr, overlayError{}) {
switch overlayerr.statusCode {
// anchor key does not exist in the resource, skip applying policy
case conditionNotPresent:
glog.V(4).Infof("Mutate rule: skip applying policy: %v at %s", overlayerr, path)
log.V(4).Info("skip applying policy", "path", path, "error", overlayerr)
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("Policy not applied, condition tag not present: %v at %s", overlayerr.ErrorMsg(), path))
// anchor key is not satisfied in the resource, skip applying policy
case conditionFailure:
// anchor key is not satisfied in the resource, skip applying policy
glog.V(4).Infof("Mutate rule: failed to validate condition at %s, err: %v", path, overlayerr)
log.V(4).Info("failed to validate condition", "path", path, "error", overlayerr)
return nil, newOverlayError(overlayerr.statusCode, fmt.Sprintf("Policy not applied, conditions are not met at %s, %v", path, overlayerr))
}
}
@ -383,7 +385,7 @@ func prepareJSONValue(overlay interface{}) string {
overlayWithoutAnchors := removeAnchorFromSubTree(overlay)
jsonOverlay, err := json.Marshal(overlayWithoutAnchors)
if err != nil || hasOnlyAnchors(overlay) {
glog.V(3).Info(err)
log.Log.Error(err, "failed to marshall withoutanchors or has only anchors")
return ""
}

View file

@ -5,18 +5,18 @@ import (
"reflect"
"strconv"
"github.com/golang/glog"
"github.com/go-logr/logr"
"github.com/nirmata/kyverno/pkg/engine/anchor"
"github.com/nirmata/kyverno/pkg/engine/validate"
"github.com/nirmata/kyverno/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func meetConditions(resource, overlay interface{}) (string, overlayError) {
return checkConditions(resource, overlay, "/")
func meetConditions(log logr.Logger, resource, overlay interface{}) (string, overlayError) {
return checkConditions(log, resource, overlay, "/")
}
// resource and overlay should be the same type
func checkConditions(resource, overlay interface{}, path string) (string, overlayError) {
func checkConditions(log logr.Logger, resource, overlay interface{}, path string) (string, overlayError) {
// overlay has no anchor, return true
if !hasNestedAnchors(overlay) {
return "", overlayError{}
@ -27,7 +27,7 @@ func checkConditions(resource, overlay interface{}, path string) (string, overla
// condition never be true in this case
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
if hasNestedAnchors(overlay) {
glog.V(4).Infof("Found anchor on different types of element at path %s: overlay %T, resource %T", path, overlay, resource)
log.V(4).Info(fmt.Sprintf("Found anchor on different types of element at path %s: overlay %T, resource %T", path, overlay, resource))
return path, newOverlayError(conditionFailure,
fmt.Sprintf("Found anchor on different types of element at path %s: overlay %T %v, resource %T %v", path, overlay, overlay, resource, resource))
@ -45,7 +45,7 @@ func checkConditions(resource, overlay interface{}, path string) (string, overla
default:
// anchor on non map/array is invalid:
// - anchor defined on values
glog.Warningln("Found invalid conditional anchor: anchor defined on values")
log.Info("Found invalid conditional anchor: anchor defined on values")
return "", overlayError{}
}
}
@ -69,12 +69,12 @@ func checkConditionOnMap(resourceMap, overlayMap map[string]interface{}, path st
func checkConditionOnArray(resource, overlay []interface{}, path string) (string, overlayError) {
if 0 == len(overlay) {
glog.Infof("Mutate overlay pattern is empty, path %s", path)
log.Log.V(4).Info("Mutate overlay pattern is empty", "path", path)
return "", overlayError{}
}
if reflect.TypeOf(resource[0]) != reflect.TypeOf(overlay[0]) {
glog.V(4).Infof("Overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0])
log.Log.V(4).Info(fmt.Sprintf("Overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0]))
return path, newOverlayError(conditionFailure,
fmt.Sprintf("Overlay array and resource array have elements of different types: %T and %T", overlay[0], resource[0]))
}
@ -112,7 +112,7 @@ func validateConditionAnchorMap(resourceMap, anchors map[string]interface{}, pat
// resource - A: B2
func compareOverlay(resource, overlay interface{}, path string) (string, overlayError) {
if reflect.TypeOf(resource) != reflect.TypeOf(overlay) {
glog.V(4).Infof("Found anchor on different types of element: overlay %T, resource %T", overlay, resource)
log.Log.V(4).Info("Found anchor on different types of element: overlay %T, resource %T", overlay, resource)
return path, newOverlayError(conditionFailure, fmt.Sprintf("Found anchor on different types of element: overlay %T, resource %T", overlay, resource))
}
@ -141,7 +141,7 @@ func compareOverlay(resource, overlay interface{}, path string) (string, overlay
}
case string, float64, int, int64, bool, nil:
if !validate.ValidateValueWithPattern(log.Log, resource, overlay) {
glog.V(4).Infof("Mutate rule: failed validating value %v with overlay %v", resource, overlay)
log.Log.V(4).Info(fmt.Sprintf("Mutate rule: failed validating value %v with overlay %v", resource, overlay))
return path, newOverlayError(conditionFailure, fmt.Sprintf("Failed validating value %v with overlay %v", resource, overlay))
}
default:
@ -166,7 +166,7 @@ func validateNonAnchorOverlayMap(resourceMap, overlayWithoutAnchor map[string]in
continue
}
}
if newPath, err := checkConditions(resourceValue, overlayValue, curPath); !reflect.DeepEqual(err, overlayError{}) {
if newPath, err := checkConditions(log.Log, resourceValue, overlayValue, curPath); !reflect.DeepEqual(err, overlayError{}) {
return newPath, err
}
}
@ -180,7 +180,7 @@ func checkConditionsOnArrayOfSameTypes(resource, overlay []interface{}, path str
default:
for i, overlayElement := range overlay {
curPath := path + strconv.Itoa(i) + "/"
path, err := checkConditions(resource[i], overlayElement, curPath)
path, err := checkConditions(log.Log, resource[i], overlayElement, curPath)
if !reflect.DeepEqual(err, overlayError{}) {
return path, err
}

View file

@ -7,6 +7,7 @@ import (
"testing"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func TestMeetConditions_NoAnchor(t *testing.T) {
@ -28,7 +29,7 @@ func TestMeetConditions_NoAnchor(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(nil, overlay)
_, err := meetConditions(log.Log, nil, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
@ -81,7 +82,7 @@ func TestMeetConditions_conditionalAnchorOnMap(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, !reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`
@ -101,7 +102,7 @@ func TestMeetConditions_conditionalAnchorOnMap(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, overlayerr := meetConditions(resource, overlay)
_, overlayerr := meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
}
@ -140,7 +141,7 @@ func TestMeetConditions_DifferentTypes(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
// anchor exist
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, strings.Contains(err.Error(), "Found anchor on different types of element at path /subsets/"))
}
@ -193,7 +194,7 @@ func TestMeetConditions_anchosInSameObject(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Error(t, err, "[overlayError:0] Failed validating value 443 with overlay 444")
}
@ -251,7 +252,7 @@ func TestMeetConditions_anchorOnPeer(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
@ -328,7 +329,7 @@ func TestMeetConditions_anchorsOnMetaAndSpec(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
@ -409,7 +410,7 @@ func TestMeetConditions_anchorsOnPeer_single(t *testing.T) {
json.Unmarshal(resourceRawAnchorOnPeers, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
@ -443,7 +444,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
json.Unmarshal(resourceRawAnchorOnPeers, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Error(t, err, "[overlayError:0] Failed validating value true with overlay false")
overlayRaw = []byte(`{
@ -472,7 +473,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
_, err = meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`{
@ -501,7 +502,7 @@ func TestMeetConditions_anchorsOnPeer_two(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
_, err = meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
}
@ -535,7 +536,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
json.Unmarshal(resourceRawAnchorOnPeers, &resource)
json.Unmarshal(overlayRaw, &overlay)
_, err := meetConditions(resource, overlay)
_, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`{
@ -564,7 +565,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
_, err = meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
overlayRaw = []byte(`{
@ -593,7 +594,7 @@ func TestMeetConditions_anchorsOnPeer_multiple(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
_, err = meetConditions(resource, overlay)
_, err = meetConditions(log.Log, resource, overlay)
assert.Error(t, err, "[overlayError:0] Failed validating value ENV_VALUE with overlay ENV_VALUE1")
}
@ -652,7 +653,7 @@ func TestMeetConditions_AtleastOneExist(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
path, err := meetConditions(resource, overlay)
path, err := meetConditions(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
assert.Assert(t, len(path) == 0)
}

View file

@ -8,6 +8,7 @@ import (
jsonpatch "github.com/evanphx/json-patch"
"github.com/nirmata/kyverno/pkg/engine/utils"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func compareJSONAsMap(t *testing.T, expected, actual []byte) {
@ -66,7 +67,7 @@ func TestProcessOverlayPatches_NestedListWithAnchor(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, patches != nil)
@ -166,7 +167,7 @@ func TestProcessOverlayPatches_InsertIntoArray(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, patches != nil)
@ -287,7 +288,7 @@ func TestProcessOverlayPatches_TestInsertToArray(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, patches != nil)
@ -370,7 +371,7 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -458,7 +459,7 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
patches, err = processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -494,7 +495,7 @@ func TestProcessOverlayPatches_ImagePullPolicy(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
patches, err = processOverlayPatches(log.Log, resource, overlay)
assert.Error(t, err, "[overlayError:0] Policy not applied, conditions are not met at /spec/template/metadata/labels/app/, [overlayError:0] Failed validating value nginx with overlay nginx1")
assert.Assert(t, len(patches) == 0)
}
@ -523,7 +524,7 @@ func TestProcessOverlayPatches_AddingAnchor(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -608,7 +609,7 @@ func TestProcessOverlayPatches_AddingAnchorInsideListElement(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -686,7 +687,7 @@ func TestProcessOverlayPatches_AddingAnchorInsideListElement(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
patches, err = processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(err, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -750,7 +751,7 @@ func TestProcessOverlayPatches_anchorOnPeer(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -807,7 +808,7 @@ func TestProcessOverlayPatches_anchorOnPeer(t *testing.T) {
json.Unmarshal(overlayRaw, &overlay)
patches, err = processOverlayPatches(resource, overlay)
patches, err = processOverlayPatches(log.Log, resource, overlay)
assert.Error(t, err, "[overlayError:0] Policy not applied, conditions are not met at /subsets/0/ports/0/port/, [overlayError:0] Failed validating value 443 with overlay 444")
assert.Assert(t, len(patches) == 0)
}
@ -889,7 +890,7 @@ func TestProcessOverlayPatches_insertWithCondition(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)
@ -1000,7 +1001,7 @@ func TestProcessOverlayPatches_InsertIfNotPresentWithConditions(t *testing.T) {
json.Unmarshal(resourceRaw, &resource)
json.Unmarshal(overlayRaw, &overlay)
patches, overlayerr := processOverlayPatches(resource, overlay)
patches, overlayerr := processOverlayPatches(log.Log, resource, overlay)
assert.Assert(t, reflect.DeepEqual(overlayerr, overlayError{}))
assert.Assert(t, len(patches) != 0)

View file

@ -6,7 +6,7 @@ import (
"strings"
"time"
"github.com/golang/glog"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/utils"
@ -20,21 +20,22 @@ func applyPatch(resource []byte, patchRaw []byte) ([]byte, error) {
}
//ProcessPatches applies the patches on the resource and returns the patched resource
func ProcessPatches(rule kyverno.Rule, resource unstructured.Unstructured) (resp response.RuleResponse, patchedResource unstructured.Unstructured) {
func ProcessPatches(log logr.Logger, rule kyverno.Rule, resource unstructured.Unstructured) (resp response.RuleResponse, patchedResource unstructured.Unstructured) {
logger := log.WithValues("rule", rule.Name)
startTime := time.Now()
glog.V(4).Infof("started JSON patch rule %q (%v)", rule.Name, startTime)
logger.V(4).Info("started JSON patch", "startTime", startTime)
resp.Name = rule.Name
resp.Type = utils.Mutation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
glog.V(4).Infof("finished JSON patch rule %q (%v)", resp.Name, resp.RuleStats.ProcessingTime)
logger.V(4).Info("finished JSON patch", "processingTime", resp.RuleStats.ProcessingTime)
}()
// convert to RAW
resourceRaw, err := resource.MarshalJSON()
if err != nil {
resp.Success = false
glog.Infof("unable to marshall resource: %v", err)
logger.Error(err, "failed to marshal resource")
resp.Message = fmt.Sprintf("failed to process JSON patches: %v", err)
return resp, resource
}
@ -45,14 +46,14 @@ func ProcessPatches(rule kyverno.Rule, resource unstructured.Unstructured) (resp
// JSON patch
patchRaw, err := json.Marshal(patch)
if err != nil {
glog.V(4).Infof("failed to marshall JSON patch %v: %v", patch, err)
logger.Error(err, "failed to marshal JSON patch")
errs = append(errs, err)
continue
}
patchResource, err := applyPatch(resourceRaw, patchRaw)
// TODO: continue on error if one of the patches fails, will add the failure event in such case
if err != nil && patch.Operation == "remove" {
glog.Info(err)
log.Error(err, "failed to process JSON path or patch is a 'remove' operation")
continue
}
if err != nil {
@ -77,7 +78,7 @@ func ProcessPatches(rule kyverno.Rule, resource unstructured.Unstructured) (resp
}
err = patchedResource.UnmarshalJSON(resourceRaw)
if err != nil {
glog.Infof("failed to unmarshall resource to undstructured: %v", err)
logger.Error(err, "failed to unmmarshal resource")
resp.Success = false
resp.Message = fmt.Sprintf("failed to process JSON patches: %v", err)
return resp, resource

View file

@ -5,6 +5,7 @@ import (
"gotest.tools/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
types "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/utils"
@ -41,7 +42,7 @@ func TestProcessPatches_EmptyPatches(t *testing.T) {
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(emptyRule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, emptyRule, *resourceUnstructured)
assert.Check(t, rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
@ -70,14 +71,14 @@ func makeRuleWithPatches(patches []types.Patch) types.Rule {
func TestProcessPatches_EmptyDocument(t *testing.T) {
rule := makeRuleWithPatch(makeAddIsMutatedLabelPatch())
rr, _ := ProcessPatches(rule, unstructured.Unstructured{})
rr, _ := ProcessPatches(log.Log, rule, unstructured.Unstructured{})
assert.Assert(t, !rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
func TestProcessPatches_AllEmpty(t *testing.T) {
emptyRule := types.Rule{}
rr, _ := ProcessPatches(emptyRule, unstructured.Unstructured{})
rr, _ := ProcessPatches(log.Log, emptyRule, unstructured.Unstructured{})
assert.Check(t, !rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
@ -90,7 +91,7 @@ func TestProcessPatches_AddPathDoesntExist(t *testing.T) {
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(rule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, rule, *resourceUnstructured)
assert.Check(t, !rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
@ -102,7 +103,7 @@ func TestProcessPatches_RemovePathDoesntExist(t *testing.T) {
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(rule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, rule, *resourceUnstructured)
assert.Check(t, rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
@ -115,7 +116,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_EmptyResult(t *testing.T) {
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(rule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, rule, *resourceUnstructured)
assert.Check(t, !rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
@ -129,7 +130,7 @@ func TestProcessPatches_AddAndRemovePathsDontExist_ContinueOnError_NotEmptyResul
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(rule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, rule, *resourceUnstructured)
assert.Check(t, rr.Success)
assert.Assert(t, len(rr.Patches) != 0)
assertEqStringAndData(t, `{"path":"/metadata/labels/label3","op":"add","value":"label3Value"}`, rr.Patches[0])
@ -142,7 +143,7 @@ func TestProcessPatches_RemovePathDoesntExist_EmptyResult(t *testing.T) {
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(rule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, rule, *resourceUnstructured)
assert.Check(t, rr.Success)
assert.Assert(t, len(rr.Patches) == 0)
}
@ -155,7 +156,7 @@ func TestProcessPatches_RemovePathDoesntExist_NotEmptyResult(t *testing.T) {
if err != nil {
t.Error(err)
}
rr, _ := ProcessPatches(rule, *resourceUnstructured)
rr, _ := ProcessPatches(log.Log, rule, *resourceUnstructured)
assert.Check(t, rr.Success)
assert.Assert(t, len(rr.Patches) == 1)
assertEqStringAndData(t, `{"path":"/metadata/labels/label2","op":"add","value":"label2Value"}`, rr.Patches[0])

View file

@ -10,8 +10,8 @@ import (
"github.com/nirmata/kyverno/pkg/engine/mutate"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/log"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
const (
@ -74,7 +74,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
continue
}
ruleResponse, patchedResource = mutate.ProcessOverlay(rule.Name, overlay, patchedResource)
ruleResponse, patchedResource = mutate.ProcessOverlay(logger, rule.Name, overlay, patchedResource)
if ruleResponse.Success {
// - overlay pattern does not match the resource conditions
if ruleResponse.Patches == nil {
@ -90,7 +90,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
// Process Patches
if rule.Mutation.Patches != nil {
var ruleResponse response.RuleResponse
ruleResponse, patchedResource = mutate.ProcessPatches(rule, patchedResource)
ruleResponse, patchedResource = mutate.ProcessPatches(logger, rule, patchedResource)
logger.V(4).Info("patches applied successfully")
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResponse)
incrementAppliedRuleCount(&resp)
@ -104,7 +104,7 @@ func Mutate(policyContext PolicyContext) (resp response.EngineResponse) {
if strings.Contains(PodControllers, resource.GetKind()) {
var ruleResponse response.RuleResponse
ruleResponse, patchedResource = mutate.ProcessOverlay(rule.Name, podTemplateRule, patchedResource)
ruleResponse, patchedResource = mutate.ProcessOverlay(logger, rule.Name, podTemplateRule, patchedResource)
if !ruleResponse.Success {
logger.Info("failed to insert annotation for podTemplate", "error", ruleResponse.Message)
continue

View file

@ -6,10 +6,10 @@ import (
"reflect"
"time"
"github.com/nirmata/kyverno/pkg/log"
"github.com/nirmata/kyverno/pkg/utils"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
"github.com/minio/minio/pkg/wildcard"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"

View file

@ -5,8 +5,8 @@ import (
"testing"
"github.com/nirmata/kyverno/pkg/engine/operator"
"github.com/nirmata/kyverno/pkg/log"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func TestValidateValueWithPattern_Bool(t *testing.T) {

View file

@ -4,8 +4,8 @@ import (
"encoding/json"
"testing"
"github.com/nirmata/kyverno/pkg/log"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func TestValidateMap(t *testing.T) {

View file

@ -12,8 +12,8 @@ import (
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/engine/validate"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/log"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
//Validate applies validation rules from policy on the resource

View file

@ -6,7 +6,7 @@ import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log"
)
// STRINGS

View file

@ -6,8 +6,8 @@ import (
"testing"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/log"
authenticationv1 "k8s.io/api/authentication/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
"github.com/nirmata/kyverno/pkg/engine/context"
)

View file

@ -5,8 +5,8 @@ import (
"testing"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/log"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func Test_subVars_success(t *testing.T) {

View file

@ -1,19 +1,20 @@
package cleanup
import (
"github.com/golang/glog"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
apierrors "k8s.io/apimachinery/pkg/api/errors"
)
func (c *Controller) processGR(gr kyverno.GenerateRequest) error {
logger := c.log.WithValues("kind", gr.Kind, "namespace", gr.Namespace, "name", gr.Name)
// 1- Corresponding policy has been deleted
// then we dont delete the generated resources
// 2- The trigger resource is deleted, then delete the generated resources
if !ownerResourceExists(c.client, gr) {
if err := deleteGeneratedResources(c.client, gr); err != nil {
if !ownerResourceExists(logger, c.client, gr) {
if err := deleteGeneratedResources(logger, c.client, gr); err != nil {
return err
}
// - trigger-resource is deleted
@ -24,25 +25,25 @@ func (c *Controller) processGR(gr kyverno.GenerateRequest) error {
return nil
}
func ownerResourceExists(client *dclient.Client, gr kyverno.GenerateRequest) bool {
func ownerResourceExists(log logr.Logger, client *dclient.Client, gr kyverno.GenerateRequest) bool {
_, err := client.GetResource(gr.Spec.Resource.Kind, gr.Spec.Resource.Namespace, gr.Spec.Resource.Name)
// trigger resources has been deleted
if apierrors.IsNotFound(err) {
return false
}
if err != nil {
glog.V(4).Infof("Failed to get resource %s/%s/%s: error : %s", gr.Spec.Resource.Kind, gr.Spec.Resource.Namespace, gr.Spec.Resource.Name, err)
log.Error(err, "failed to get resource", "genKind", gr.Spec.Resource.Kind, "genNamespace", gr.Spec.Resource.Namespace, "genName", gr.Spec.Resource.Name)
}
// if there was an error while querying the resources we dont delete the generated resources
// but expect the deletion in next reconciliation loop
return true
}
func deleteGeneratedResources(client *dclient.Client, gr kyverno.GenerateRequest) error {
func deleteGeneratedResources(log logr.Logger, client *dclient.Client, gr kyverno.GenerateRequest) error {
for _, genResource := range gr.Status.GeneratedResources {
err := client.DeleteResource(genResource.Kind, genResource.Namespace, genResource.Name, false)
if apierrors.IsNotFound(err) {
glog.V(4).Infof("resource %s/%s/%s not found, will no delete", genResource.Kind, genResource.Namespace, genResource.Name)
log.Error(err, "resource not foundl will not delete", "genKind", gr.Spec.Resource.Kind, "genNamespace", gr.Spec.Resource.Namespace, "genName", gr.Spec.Resource.Name)
continue
}
if err != nil {

View file

@ -3,8 +3,6 @@ package cleanup
import (
"time"
"github.com/golang/glog"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
@ -181,7 +179,7 @@ func (c *Controller) enqueue(gr *kyverno.GenerateRequest) {
logger := c.log
key, err := cache.MetaNamespaceKeyFunc(gr)
if err != nil {
glog.Error(err)
logger.Error(err, "failed to extract key")
return
}
logger.V(4).Info("eneque generate request", "name", gr.Name)

View file

@ -3,8 +3,8 @@ package generate
import (
"fmt"
"github.com/nirmata/kyverno/pkg/log"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func manageLabels(unstr *unstructured.Unstructured, triggerResource unstructured.Unstructured) {

View file

@ -1,9 +1,9 @@
package generate
import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
"sigs.k8s.io/controller-runtime/pkg/log"
)
//StatusControlInterface provides interface to update status subresource
@ -25,10 +25,10 @@ func (sc StatusControl) Failed(gr kyverno.GenerateRequest, message string, genRe
gr.Status.GeneratedResources = genResources
_, err := sc.client.KyvernoV1().GenerateRequests("kyverno").UpdateStatus(&gr)
if err != nil {
glog.V(4).Infof("FAILED: updated gr %s status to %s", gr.Name, string(kyverno.Failed))
log.Log.Error(err, "failed to update generate request status", "name", gr.Name)
return err
}
glog.V(4).Infof("updated gr %s status to %s", gr.Name, string(kyverno.Failed))
log.Log.Info("updated generate request status", "name", gr.Name, "status", string(kyverno.Failed))
return nil
}
@ -41,9 +41,9 @@ func (sc StatusControl) Success(gr kyverno.GenerateRequest, genResources []kyver
_, err := sc.client.KyvernoV1().GenerateRequests("kyverno").UpdateStatus(&gr)
if err != nil {
glog.V(4).Infof("FAILED: updated gr %s status to %s", gr.Name, string(kyverno.Completed))
log.Log.Error(err, "failed to update generate request status", "name", gr.Name)
return err
}
glog.V(4).Infof("updated gr %s status to %s", gr.Name, string(kyverno.Completed))
log.Log.Info("updated generate request status", "name", gr.Name, "status", string(kyverno.Completed))
return nil
}

View file

@ -1,121 +0,0 @@
package log
import (
"sync"
"github.com/go-logr/logr"
)
// loggerPromise knows how to populate a concrete logr.Logger
// with options, given an actual base logger later on down the line.
type loggerPromise struct {
logger *DelegatingLogger
childPromises []*loggerPromise
promisesLock sync.Mutex
name *string
tags []interface{}
}
// WithName provides a new Logger with the name appended
func (p *loggerPromise) WithName(l *DelegatingLogger, name string) *loggerPromise {
res := &loggerPromise{
logger: l,
name: &name,
promisesLock: sync.Mutex{},
}
p.promisesLock.Lock()
defer p.promisesLock.Unlock()
p.childPromises = append(p.childPromises, res)
return res
}
// WithValues provides a new Logger with the tags appended
func (p *loggerPromise) WithValues(l *DelegatingLogger, tags ...interface{}) *loggerPromise {
res := &loggerPromise{
logger: l,
tags: tags,
promisesLock: sync.Mutex{},
}
p.promisesLock.Lock()
defer p.promisesLock.Unlock()
p.childPromises = append(p.childPromises, res)
return res
}
// Fulfill instantiates the Logger with the provided logger
func (p *loggerPromise) Fulfill(parentLogger logr.Logger) {
var logger = parentLogger
if p.name != nil {
logger = logger.WithName(*p.name)
}
if p.tags != nil {
logger = logger.WithValues(p.tags...)
}
p.logger.Logger = logger
p.logger.promise = nil
for _, childPromise := range p.childPromises {
childPromise.Fulfill(logger)
}
}
// DelegatingLogger is a logr.Logger that delegates to another logr.Logger.
// If the underlying promise is not nil, it registers calls to sub-loggers with
// the logging factory to be populated later, and returns a new delegating
// logger. It expects to have *some* logr.Logger set at all times (generally
// a no-op logger before the promises are fulfilled).
type DelegatingLogger struct {
logr.Logger
promise *loggerPromise
}
// WithName provides a new Logger with the name appended
func (l *DelegatingLogger) WithName(name string) logr.Logger {
if l.promise == nil {
return l.Logger.WithName(name)
}
res := &DelegatingLogger{Logger: l.Logger}
promise := l.promise.WithName(res, name)
res.promise = promise
return res
}
// WithValues provides a new Logger with the tags appended
func (l *DelegatingLogger) WithValues(tags ...interface{}) logr.Logger {
if l.promise == nil {
return l.Logger.WithValues(tags...)
}
res := &DelegatingLogger{Logger: l.Logger}
promise := l.promise.WithValues(res, tags...)
res.promise = promise
return res
}
// Fulfill switches the logger over to use the actual logger
// provided, instead of the temporary initial one, if this method
// has not been previously called.
func (l *DelegatingLogger) Fulfill(actual logr.Logger) {
if l.promise != nil {
l.promise.Fulfill(actual)
}
}
// NewDelegatingLogger constructs a new DelegatingLogger which uses
// the given logger before it's promise is fulfilled.
func NewDelegatingLogger(initial logr.Logger) *DelegatingLogger {
l := &DelegatingLogger{
Logger: initial,
promise: &loggerPromise{promisesLock: sync.Mutex{}},
}
l.promise.logger = l
return l
}

View file

@ -1,15 +0,0 @@
package log
import (
"github.com/go-logr/logr"
)
// SetLogger sets a concrete logging implementation for all deferred Loggers.
func SetLogger(l logr.Logger) {
Log.Fulfill(l)
}
// Log is the base logger used by kubebuilder. It delegates
// to another logr.Logger. You *must* call SetLogger to
// get any actual logging.
var Log = NewDelegatingLogger(NullLogger{})

View file

@ -1,44 +0,0 @@
package log
import (
"github.com/go-logr/logr"
)
// NB: this is the same as the null logger logr/testing,
// but avoids accidentally adding the testing flags to
// all binaries.
// NullLogger is a logr.Logger that does nothing.
type NullLogger struct{}
var _ logr.Logger = NullLogger{}
// Info implements logr.InfoLogger
func (NullLogger) Info(_ string, _ ...interface{}) {
// Do nothing.
}
// Enabled implements logr.InfoLogger
func (NullLogger) Enabled() bool {
return false
}
// Error implements logr.Logger
func (NullLogger) Error(_ error, _ string, _ ...interface{}) {
// Do nothing.
}
// V implements logr.Logger
func (log NullLogger) V(_ int) logr.InfoLogger {
return log
}
// WithName implements logr.Logger
func (log NullLogger) WithName(_ string) logr.Logger {
return log
}
// WithValues implements logr.Logger
func (log NullLogger) WithValues(_ ...interface{}) logr.Logger {
return log
}

View file

@ -5,10 +5,10 @@ import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
dclient "github.com/nirmata/kyverno/pkg/dclient"
"github.com/nirmata/kyverno/pkg/log"
"github.com/nirmata/kyverno/pkg/policy/generate"
"github.com/nirmata/kyverno/pkg/policy/mutate"
"github.com/nirmata/kyverno/pkg/policy/validate"
"sigs.k8s.io/controller-runtime/pkg/log"
)
//Validation provides methods to validate a rule

View file

@ -6,7 +6,7 @@ import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/context"
"github.com/nirmata/kyverno/pkg/engine/variables"
"github.com/nirmata/kyverno/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/log"
)
//ContainsUserInfo returns error is userInfo is defined

View file

@ -1,15 +1,13 @@
package policy
import (
"fmt"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"k8s.io/client-go/tools/cache"
)
func (pc *PolicyController) addClusterPolicyViolation(obj interface{}) {
pv := obj.(*kyverno.ClusterPolicyViolation)
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
if pv.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
@ -22,15 +20,15 @@ func (pc *PolicyController) addClusterPolicyViolation(obj interface{}) {
ps := pc.getPolicyForClusterPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
glog.V(4).Infof("Cluster Policy Violation %s does not belong to an active policy, will be cleanedup", pv.Name)
logger.V(4).Info("Cluster Policy Violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
glog.Errorf("Failed to deleted cluster policy violation %s: %v", pv.Name, err)
logger.Error(err, "failed to delete resource")
return
}
glog.V(4).Infof("Cluster Policy Violation %s deleted", pv.Name)
logger.V(4).Info("resource deleted")
return
}
glog.V(4).Infof("Cluster Policy Violation %s added.", pv.Name)
logger.V(4).Info("resource added")
for _, p := range ps {
pc.enqueuePolicy(p)
}
@ -44,19 +42,20 @@ func (pc *PolicyController) updateClusterPolicyViolation(old, cur interface{}) {
// Two different versions of the same replica set will always have different RVs.
return
}
logger := pc.log.WithValues("kind", curPV.Kind, "namespace", curPV.Namespace, "name", curPV.Name)
ps := pc.getPolicyForClusterPolicyViolation(curPV)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
glog.V(4).Infof("Cluster Policy Violation %s does not belong to an active policy, will be cleanedup", curPV.Name)
logger.V(4).Info("Cluster Policy Violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteClusterPolicyViolation(curPV.Name); err != nil {
glog.Errorf("Failed to deleted cluster policy violation %s: %v", curPV.Name, err)
logger.Error(err, "failed to delete resource")
return
}
glog.V(4).Infof("PolicyViolation %s deleted", curPV.Name)
logger.V(4).Info("resource deleted")
return
}
glog.V(4).Infof("Cluster PolicyViolation %s updated", curPV.Name)
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
@ -67,6 +66,7 @@ func (pc *PolicyController) updateClusterPolicyViolation(old, cur interface{}) {
// a DeletionFinalStateUnknown marker item.
func (pc *PolicyController) deleteClusterPolicyViolation(obj interface{}) {
logger := pc.log
pv, ok := obj.(*kyverno.ClusterPolicyViolation)
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
// in the list, leading to the insertion of a tombstone object which contains
@ -75,33 +75,35 @@ func (pc *PolicyController) deleteClusterPolicyViolation(obj interface{}) {
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
pv, ok = tombstone.Obj.(*kyverno.ClusterPolicyViolation)
if !ok {
glog.Info(fmt.Errorf("Couldn't get object from tombstone %#v", obj))
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
}
logger = logger.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
ps := pc.getPolicyForClusterPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
glog.V(4).Infof("Cluster Policy Violation %s does not belong to an active policy, will be cleanedup", pv.Name)
logger.V(4).Info("Cluster Policy Violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteClusterPolicyViolation(pv.Name); err != nil {
glog.Errorf("Failed to deleted cluster policy violation %s: %v", pv.Name, err)
logger.Error(err, "failed to delete resource")
return
}
glog.V(4).Infof("Cluster Policy Violation %s deleted", pv.Name)
logger.V(4).Info("resource deleted")
return
}
glog.V(4).Infof("Cluster PolicyViolation %s updated", pv.Name)
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) getPolicyForClusterPolicyViolation(pv *kyverno.ClusterPolicyViolation) []*kyverno.ClusterPolicy {
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
policies, err := pc.pLister.GetPolicyForPolicyViolation(pv)
if err != nil || len(policies) == 0 {
return nil
@ -113,8 +115,7 @@ func (pc *PolicyController) getPolicyForClusterPolicyViolation(pv *kyverno.Clust
if len(policies) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("user error! more than one policy is selecting policy violation %s with labels: %#v, returning %s",
pv.Name, pv.Labels, policies[0].Name)
logger.V(4).Info("user error! more than one policy is selecting policy violation", "labels", pv.Labels, "policy", policies[0].Name)
}
return policies
}

View file

@ -3,10 +3,10 @@ package policy
import (
"fmt"
"github.com/golang/glog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func buildPolicyLabel(policyName string) (labels.Selector, error) {
@ -27,7 +27,7 @@ func buildPolicyLabel(policyName string) (labels.Selector, error) {
func transformResource(resource unstructured.Unstructured) []byte {
data, err := resource.MarshalJSON()
if err != nil {
glog.Errorf("failed to marshall resource %v: %v", resource, err)
log.Log.Error(err, "failed to marshal resource")
return nil
}
return data

View file

@ -148,7 +148,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
// aggregator
// pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient, pInformer)
pc.statusAggregator = NewPolicyStatAggregator(kyvernoClient)
pc.statusAggregator = NewPolicyStatAggregator(pc.log, kyvernoClient)
return &pc, nil
}

View file

@ -1,14 +1,13 @@
package policy
import (
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
cache "k8s.io/client-go/tools/cache"
)
func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) {
logger := pc.log
pv := obj.(*kyverno.PolicyViolation)
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
if pv.DeletionTimestamp != nil {
// On a restart of the controller manager, it's possible for an object to
@ -21,16 +20,16 @@ func (pc *PolicyController) addNamespacedPolicyViolation(obj interface{}) {
ps := pc.getPolicyForNamespacedPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
logger.V(4).Info("namepaced policy violation does not belong to any active policy, will be cleanedup", "namespacePolicyViolation", pv.Name)
logger.V(4).Info("namepaced policy violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil {
logger.Error(err, "failed to delete namespaced policy violation", "namespacedPolicyViolation", pv.Name)
logger.Error(err, "failed to delete resource")
return
}
logger.V(4).Info("resource delete", "namespacePOlicyViolation", pv.Name)
logger.V(4).Info("resource deleted")
return
}
glog.V(4).Infof("Orphan Policy Violation %s added.", pv.Name)
logger.V(4).Info("resource added")
for _, p := range ps {
pc.enqueuePolicy(p)
}
@ -44,19 +43,20 @@ func (pc *PolicyController) updateNamespacedPolicyViolation(old, cur interface{}
// Two different versions of the same replica set will always have different RVs.
return
}
logger := pc.log.WithValues("kind", curPV.Kind, "namespace", curPV.Namespace, "name", curPV.Name)
ps := pc.getPolicyForNamespacedPolicyViolation(curPV)
if len(ps) == 0 {
// there is no namespaced policy for this violation, so we can delete this cluster policy violation
glog.V(4).Infof("Namespaced Policy Violation %s does not belong to an active policy, will be cleanedup", curPV.Name)
logger.V(4).Info("nameapced policy violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteNamespacedPolicyViolation(curPV.Namespace, curPV.Name); err != nil {
glog.Errorf("Failed to deleted namespaced policy violation %s: %v", curPV.Name, err)
logger.Error(err, "failed to delete resource")
return
}
glog.V(4).Infof("Namespaced Policy Violation %s deleted", curPV.Name)
logger.V(4).Info("resource deleted")
return
}
glog.V(4).Infof("Namespaced Policy sViolation %s updated", curPV.Name)
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
@ -64,6 +64,7 @@ func (pc *PolicyController) updateNamespacedPolicyViolation(old, cur interface{}
}
func (pc *PolicyController) deleteNamespacedPolicyViolation(obj interface{}) {
logger := pc.log
pv, ok := obj.(*kyverno.PolicyViolation)
// When a delete is dropped, the relist will notice a PolicyViolation in the store not
// in the list, leading to the insertion of a tombstone object which contains
@ -72,34 +73,36 @@ func (pc *PolicyController) deleteNamespacedPolicyViolation(obj interface{}) {
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
glog.Infof("Couldn't get object from tombstone %#v", obj)
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
pv, ok = tombstone.Obj.(*kyverno.PolicyViolation)
if !ok {
glog.Infof("Couldn't get object from tombstone %#v", obj)
logger.Info("Couldn't get object from tombstone", "obj", obj)
return
}
}
logger = logger.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
ps := pc.getPolicyForNamespacedPolicyViolation(pv)
if len(ps) == 0 {
// there is no cluster policy for this violation, so we can delete this cluster policy violation
glog.V(4).Infof("Namespaced Policy Violation %s does not belong to an active policy, will be cleanedup", pv.Name)
logger.V(4).Info("nameapced policy violation does not belong to an active policy, will be cleanedup")
if err := pc.pvControl.DeleteNamespacedPolicyViolation(pv.Namespace, pv.Name); err != nil {
glog.Errorf("Failed to deleted namespaced policy violation %s: %v", pv.Name, err)
logger.Error(err, "failed to delete resource")
return
}
glog.V(4).Infof("Namespaced Policy Violation %s deleted", pv.Name)
logger.V(4).Info("resource deleted")
return
}
glog.V(4).Infof("Namespaced PolicyViolation %s updated", pv.Name)
logger.V(4).Info("resource updated")
for _, p := range ps {
pc.enqueuePolicy(p)
}
}
func (pc *PolicyController) getPolicyForNamespacedPolicyViolation(pv *kyverno.PolicyViolation) []*kyverno.ClusterPolicy {
logger := pc.log.WithValues("kind", pv.Kind, "namespace", pv.Namespace, "name", pv.Name)
policies, err := pc.pLister.GetPolicyForNamespacedPolicyViolation(pv)
if err != nil || len(policies) == 0 {
return nil
@ -111,8 +114,7 @@ func (pc *PolicyController) getPolicyForNamespacedPolicyViolation(pv *kyverno.Po
if len(policies) > 1 {
// ControllerRef will ensure we don't do anything crazy, but more than one
// item in this list nevertheless constitutes user error.
glog.V(4).Infof("user error! more than one policy is selecting policy violation %s with labels: %#v, returning %s",
pv.Name, pv.Labels, policies[0].Name)
logger.V(4).Info("user error! more than one policy is selecting policy violation", "labels", pv.Labels, "policy", policies[0].Name)
}
return policies
}

View file

@ -3,7 +3,7 @@ package policy
import (
"fmt"
"github.com/golang/glog"
"github.com/go-logr/logr"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/event"
"github.com/nirmata/kyverno/pkg/policyviolation"
@ -15,7 +15,7 @@ import (
func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineResponse) {
logger := pc.log
// generate Events
eventInfos := generateEvents(engineResponses)
eventInfos := generateEvents(pc.log, engineResponses)
pc.eventGen.Add(eventInfos...)
// create policy violation
pvInfos := policyviolation.GeneratePVsFromEngineResponse(engineResponses, logger)
@ -26,26 +26,27 @@ func (pc *PolicyController) cleanupAndReport(engineResponses []response.EngineRe
pc.cleanUp(engineResponses)
}
func generateEvents(ers []response.EngineResponse) []event.Info {
func generateEvents(log logr.Logger, ers []response.EngineResponse) []event.Info {
var eventInfos []event.Info
for _, er := range ers {
if er.IsSuccesful() {
continue
}
eventInfos = append(eventInfos, generateEventsPerEr(er)...)
eventInfos = append(eventInfos, generateEventsPerEr(log, er)...)
}
return eventInfos
}
func generateEventsPerEr(er response.EngineResponse) []event.Info {
func generateEventsPerEr(log logr.Logger, er response.EngineResponse) []event.Info {
logger := log.WithValues("policy", er.PolicyResponse.Policy, "kind", er.PolicyResponse.Resource.Kind, "namespace", er.PolicyResponse.Resource.Namespace, "name", er.PolicyResponse.Resource.Name)
var eventInfos []event.Info
glog.V(4).Infof("reporting results for policy '%s' application on resource '%s/%s/%s'", er.PolicyResponse.Policy, er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
logger.V(4).Info("reporting results for policy")
for _, rule := range er.PolicyResponse.Rules {
if rule.Success {
continue
}
// generate event on resource for each failed rule
glog.V(4).Infof("generation event on resource '%s/%s/%s' for policy '%s'", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name, er.PolicyResponse.Policy)
logger.V(4).Info("generating event on resource")
e := event.Info{}
e.Kind = er.PolicyResponse.Resource.Kind
e.Namespace = er.PolicyResponse.Resource.Namespace
@ -60,7 +61,7 @@ func generateEventsPerEr(er response.EngineResponse) []event.Info {
}
// generate a event on policy for all failed rules
glog.V(4).Infof("generation event on policy '%s'", er.PolicyResponse.Policy)
logger.V(4).Info("generating event on policy")
e := event.Info{}
e.Kind = "ClusterPolicy"
e.Namespace = ""

View file

@ -4,7 +4,7 @@ import (
"sync"
"time"
"github.com/golang/glog"
"github.com/go-logr/logr"
kyvernoclient "github.com/nirmata/kyverno/pkg/client/clientset/versioned"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -21,24 +21,28 @@ type PolicyStatusAggregator struct {
mux sync.RWMutex
// stores aggregated stats for policy
policyData map[string]PolicyStatInfo
// logging implementation
log logr.Logger
}
//NewPolicyStatAggregator returns a new policy status
func NewPolicyStatAggregator(client *kyvernoclient.Clientset) *PolicyStatusAggregator {
func NewPolicyStatAggregator(log logr.Logger, client *kyvernoclient.Clientset) *PolicyStatusAggregator {
psa := PolicyStatusAggregator{
startTime: time.Now(),
ch: make(chan PolicyStat),
policyData: map[string]PolicyStatInfo{},
log: log,
}
return &psa
}
//Run begins aggregator
func (psa *PolicyStatusAggregator) Run(workers int, stopCh <-chan struct{}) {
logger := psa.log
defer utilruntime.HandleCrash()
glog.V(4).Info("Started aggregator for policy status stats")
logger.Info("Started aggregator for policy status stats")
defer func() {
glog.V(4).Info("Shutting down aggregator for policy status stats")
logger.Info("Shutting down aggregator for policy status stats")
}()
for i := 0; i < workers; i++ {
go wait.Until(psa.process, time.Second, stopCh)
@ -53,30 +57,31 @@ func (psa *PolicyStatusAggregator) process() {
// so we dont combine the results, but instead compute the execution time for
// mutation & validation rules separately
for r := range psa.ch {
glog.V(4).Infof("received policy stats %v", r)
psa.log.V(4).Info("received policy stats", "stats", r)
psa.aggregate(r)
}
}
func (psa *PolicyStatusAggregator) aggregate(ps PolicyStat) {
logger := psa.log.WithValues("policy", ps.PolicyName)
func() {
glog.V(4).Infof("write lock update policy %s", ps.PolicyName)
logger.V(4).Info("write lock update policy")
psa.mux.Lock()
}()
defer func() {
glog.V(4).Infof("write Unlock update policy %s", ps.PolicyName)
logger.V(4).Info("write unlock update policy")
psa.mux.Unlock()
}()
if len(ps.Stats.Rules) == 0 {
glog.V(4).Infof("ignoring stats, as no rule was applied")
logger.V(4).Info("ignoring stats, as no rule was applied")
return
}
info, ok := psa.policyData[ps.PolicyName]
if !ok {
psa.policyData[ps.PolicyName] = ps.Stats
glog.V(4).Infof("added stats for policy %s", ps.PolicyName)
logger.V(4).Info("added stats for policy")
return
}
// aggregate policy information
@ -87,19 +92,19 @@ func (psa *PolicyStatusAggregator) aggregate(ps PolicyStat) {
var zeroDuration time.Duration
if info.MutationExecutionTime != zeroDuration {
info.MutationExecutionTime = (info.MutationExecutionTime + ps.Stats.MutationExecutionTime) / 2
glog.V(4).Infof("updated avg mutation time %v", info.MutationExecutionTime)
logger.V(4).Info("updated avg mutation time", "updatedTime", info.MutationExecutionTime)
} else {
info.MutationExecutionTime = ps.Stats.MutationExecutionTime
}
if info.ValidationExecutionTime != zeroDuration {
info.ValidationExecutionTime = (info.ValidationExecutionTime + ps.Stats.ValidationExecutionTime) / 2
glog.V(4).Infof("updated avg validation time %v", info.ValidationExecutionTime)
logger.V(4).Info("updated avg validation time", "updatedTime", info.ValidationExecutionTime)
} else {
info.ValidationExecutionTime = ps.Stats.ValidationExecutionTime
}
if info.GenerationExecutionTime != zeroDuration {
info.GenerationExecutionTime = (info.GenerationExecutionTime + ps.Stats.GenerationExecutionTime) / 2
glog.V(4).Infof("updated avg generation time %v", info.GenerationExecutionTime)
logger.V(4).Info("updated avg generation time", "updatedTime", info.GenerationExecutionTime)
} else {
info.GenerationExecutionTime = ps.Stats.GenerationExecutionTime
}
@ -107,7 +112,7 @@ func (psa *PolicyStatusAggregator) aggregate(ps PolicyStat) {
info.Rules = aggregateRules(info.Rules, ps.Stats.Rules)
// update
psa.policyData[ps.PolicyName] = info
glog.V(4).Infof("updated stats for policy %s", ps.PolicyName)
logger.V(4).Info("updated stats for policy")
}
func aggregateRules(old []RuleStatinfo, update []RuleStatinfo) []RuleStatinfo {
@ -140,29 +145,31 @@ func aggregateRules(old []RuleStatinfo, update []RuleStatinfo) []RuleStatinfo {
//GetPolicyStats returns the policy stats
func (psa *PolicyStatusAggregator) GetPolicyStats(policyName string) PolicyStatInfo {
logger := psa.log.WithValues("policy", policyName)
func() {
glog.V(4).Infof("read lock update policy %s", policyName)
logger.V(4).Info("read lock update policy")
psa.mux.RLock()
}()
defer func() {
glog.V(4).Infof("read Unlock update policy %s", policyName)
logger.V(4).Info("read unlock update policy")
psa.mux.RUnlock()
}()
glog.V(4).Infof("read stats for policy %s", policyName)
logger.V(4).Info("read stats for policy")
return psa.policyData[policyName]
}
//RemovePolicyStats rmves policy stats records
func (psa *PolicyStatusAggregator) RemovePolicyStats(policyName string) {
logger := psa.log.WithValues("policy", policyName)
func() {
glog.V(4).Infof("write lock update policy %s", policyName)
logger.V(4).Info("write lock update policy")
psa.mux.Lock()
}()
defer func() {
glog.V(4).Infof("write Unlock update policy %s", policyName)
logger.V(4).Info("write unlock update policy")
psa.mux.Unlock()
}()
glog.V(4).Infof("removing stats for policy %s", policyName)
logger.V(4).Info("removing stats for policy")
delete(psa.policyData, policyName)
}
@ -199,7 +206,7 @@ type RuleStatinfo struct {
//SendStat sends the stat information for aggregation
func (psa *PolicyStatusAggregator) SendStat(stat PolicyStat) {
glog.V(4).Infof("sending policy stats: %v", stat)
psa.log.V(4).Info("sending policy stats", "stat", stat)
// Send over channel
psa.ch <- stat
}

View file

@ -1,25 +1,25 @@
package policy
import (
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/labels"
)
func (pc *PolicyController) removeResourceWebhookConfiguration() error {
logger := pc.log
var err error
// get all existing policies
policies, err := pc.pLister.List(labels.NewSelector())
if err != nil {
glog.V(4).Infof("failed to list policies: %v", err)
logger.Error(err, "failed to list policies")
return err
}
if len(policies) == 0 {
glog.V(4).Info("no policies loaded, removing resource webhook configuration if one exists")
logger.V(4).Info("no policies loaded, removing resource webhook configuration if one exists")
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
}
glog.V(4).Info("no policies with mutating or validating webhook configurations, remove resource webhook configuration if one exists")
logger.V(4).Info("no policies with mutating or validating webhook configurations, remove resource webhook configuration if one exists")
return pc.resourceWebhookWatcher.RemoveResourceWebhookConfiguration()
}

View file

@ -4,8 +4,8 @@ import (
"testing"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/log"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func Test_GeneratePVsFromEngineResponse_PathNotExist(t *testing.T) {

View file

@ -4,7 +4,7 @@ import (
"fmt"
"reflect"
"github.com/golang/glog"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
@ -20,9 +20,11 @@ type clusterPV struct {
cpvLister kyvernolister.ClusterPolicyViolationLister
// policy violation interface
kyvernoInterface kyvernov1.KyvernoV1Interface
// logger
log logr.Logger
}
func newClusterPV(dclient *client.Client,
func newClusterPV(log logr.Logger, dclient *client.Client,
cpvLister kyvernolister.ClusterPolicyViolationLister,
kyvernoInterface kyvernov1.KyvernoV1Interface,
) *clusterPV {
@ -30,6 +32,7 @@ func newClusterPV(dclient *client.Client,
dclient: dclient,
cpvLister: cpvLister,
kyvernoInterface: kyvernoInterface,
log: log,
}
return &cpv
}
@ -51,6 +54,7 @@ func (cpv *clusterPV) create(pv kyverno.PolicyViolationTemplate) error {
}
func (cpv *clusterPV) getExisting(newPv kyverno.ClusterPolicyViolation) (*kyverno.ClusterPolicyViolation, error) {
logger := cpv.log.WithValues("namespace", newPv.Namespace, "name", newPv.Name)
var err error
// use labels
policyLabelmap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
@ -61,7 +65,7 @@ func (cpv *clusterPV) getExisting(newPv kyverno.ClusterPolicyViolation) (*kyvern
pvs, err := cpv.cpvLister.List(ls)
if err != nil {
glog.Errorf("unable to list cluster policy violations : %v", err)
logger.Error(err, "failed to list cluster policy violations")
return nil, err
}
@ -78,7 +82,8 @@ func (cpv *clusterPV) getExisting(newPv kyverno.ClusterPolicyViolation) (*kyvern
func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
var err error
glog.V(4).Infof("creating new policy violation for policy %s & resource %s/%s", newPv.Spec.Policy, newPv.Spec.ResourceSpec.Kind, newPv.Spec.ResourceSpec.Name)
logger := cpv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
logger.V(4).Info("creating new policy violation")
obj, err := retryGetResource(cpv.dclient, newPv.Spec.ResourceSpec)
if err != nil {
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", newPv.Name, newPv.Spec.Policy, err)
@ -90,18 +95,19 @@ func (cpv *clusterPV) createPV(newPv *kyverno.ClusterPolicyViolation) error {
// create resource
_, err = cpv.kyvernoInterface.ClusterPolicyViolations().Create(newPv)
if err != nil {
glog.V(4).Infof("failed to create Cluster Policy Violation: %v", err)
logger.Error(err, "failed to create cluster policy violation")
return err
}
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
logger.Info("cluster policy violation created")
return nil
}
func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) error {
logger := cpv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
var err error
// check if there is any update
if reflect.DeepEqual(newPv.Spec, oldPv.Spec) {
glog.V(4).Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
logger.V(4).Info("policy violation spec did not change, not upadating the resource")
return nil
}
// set name
@ -113,7 +119,7 @@ func (cpv *clusterPV) updatePV(newPv, oldPv *kyverno.ClusterPolicyViolation) err
if err != nil {
return fmt.Errorf("failed to update cluster policy violation: %v", err)
}
glog.Infof("cluster policy violation updated for resource %v", newPv.Spec.ResourceSpec)
logger.Info("cluster policy violation created")
return nil
}

View file

@ -5,12 +5,12 @@ import (
"time"
backoff "github.com/cenkalti/backoff"
"github.com/golang/glog"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
client "github.com/nirmata/kyverno/pkg/dclient"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
unstructured "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/labels"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func createOwnerReference(resource *unstructured.Unstructured) metav1.OwnerReference {
@ -33,7 +33,7 @@ func retryGetResource(client *client.Client, rspec kyverno.ResourceSpec) (*unstr
var err error
getResource := func() error {
obj, err = client.GetResource(rspec.Kind, rspec.Namespace, rspec.Name)
glog.V(4).Infof("retry %v getting %s/%s/%s", i, rspec.Kind, rspec.Namespace, rspec.Name)
log.Log.V(4).Info(fmt.Sprintf("retry %v getting %s/%s/%s", i, rspec.Kind, rspec.Namespace, rspec.Name))
i++
return err
}

View file

@ -222,10 +222,10 @@ func (gen *Generator) syncHandler(info Info) error {
builder := newPvBuilder()
if info.Resource.GetNamespace() == "" {
// cluster scope resource generate a clusterpolicy violation
handler = newClusterPV(gen.dclient, gen.cpvLister, gen.kyvernoInterface)
handler = newClusterPV(gen.log.WithName("ClusterPV"), gen.dclient, gen.cpvLister, gen.kyvernoInterface)
} else {
// namespaced resources generated a namespaced policy violation in the namespace of the resource
handler = newNamespacedPV(gen.dclient, gen.nspvLister, gen.kyvernoInterface)
handler = newNamespacedPV(gen.log.WithName("NamespacedPV"), gen.dclient, gen.nspvLister, gen.kyvernoInterface)
}
failure := false

View file

@ -4,7 +4,7 @@ import (
"fmt"
"reflect"
"github.com/golang/glog"
"github.com/go-logr/logr"
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
kyvernov1 "github.com/nirmata/kyverno/pkg/client/clientset/versioned/typed/kyverno/v1"
kyvernolister "github.com/nirmata/kyverno/pkg/client/listers/kyverno/v1"
@ -20,9 +20,11 @@ type namespacedPV struct {
nspvLister kyvernolister.PolicyViolationLister
// policy violation interface
kyvernoInterface kyvernov1.KyvernoV1Interface
// logger
log logr.Logger
}
func newNamespacedPV(dclient *client.Client,
func newNamespacedPV(log logr.Logger, dclient *client.Client,
nspvLister kyvernolister.PolicyViolationLister,
kyvernoInterface kyvernov1.KyvernoV1Interface,
) *namespacedPV {
@ -30,6 +32,7 @@ func newNamespacedPV(dclient *client.Client,
dclient: dclient,
nspvLister: nspvLister,
kyvernoInterface: kyvernoInterface,
log: log,
}
return &nspv
}
@ -51,6 +54,7 @@ func (nspv *namespacedPV) create(pv kyverno.PolicyViolationTemplate) error {
}
func (nspv *namespacedPV) getExisting(newPv kyverno.PolicyViolation) (*kyverno.PolicyViolation, error) {
logger := nspv.log.WithValues("namespace", newPv.Namespace, "name", newPv.Name)
var err error
// use labels
policyLabelmap := map[string]string{"policy": newPv.Spec.Policy, "resource": newPv.Spec.ResourceSpec.ToKey()}
@ -60,7 +64,7 @@ func (nspv *namespacedPV) getExisting(newPv kyverno.PolicyViolation) (*kyverno.P
}
pvs, err := nspv.nspvLister.PolicyViolations(newPv.GetNamespace()).List(ls)
if err != nil {
glog.Errorf("unable to list namespaced policy violations : %v", err)
logger.Error(err, "failed to list namespaced policy violations")
return nil, err
}
@ -77,7 +81,8 @@ func (nspv *namespacedPV) getExisting(newPv kyverno.PolicyViolation) (*kyverno.P
func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
var err error
glog.V(4).Infof("creating new policy violation for policy %s & resource %s/%s/%s", newPv.Spec.Policy, newPv.Spec.ResourceSpec.Kind, newPv.Spec.ResourceSpec.Namespace, newPv.Spec.ResourceSpec.Name)
logger := nspv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
logger.V(4).Info("creating new policy violation")
obj, err := retryGetResource(nspv.dclient, newPv.Spec.ResourceSpec)
if err != nil {
return fmt.Errorf("failed to retry getting resource for policy violation %s/%s: %v", newPv.Name, newPv.Spec.Policy, err)
@ -89,18 +94,19 @@ func (nspv *namespacedPV) createPV(newPv *kyverno.PolicyViolation) error {
// create resource
_, err = nspv.kyvernoInterface.PolicyViolations(newPv.GetNamespace()).Create(newPv)
if err != nil {
glog.V(4).Infof("failed to create Cluster Policy Violation: %v", err)
logger.Error(err, "failed to create namespaced policy violation")
return err
}
glog.Infof("policy violation created for resource %v", newPv.Spec.ResourceSpec)
logger.Info("namespaced policy violation created")
return nil
}
func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error {
logger := nspv.log.WithValues("policy", newPv.Spec.Policy, "kind", newPv.Spec.ResourceSpec.Kind, "namespace", newPv.Spec.ResourceSpec.Namespace, "name", newPv.Spec.ResourceSpec.Name)
var err error
// check if there is any update
if reflect.DeepEqual(newPv.Spec, oldPv.Spec) {
glog.V(4).Infof("policy violation spec %v did not change so not updating it", newPv.Spec)
logger.V(4).Info("policy violation spec did not change, not upadating the resource")
return nil
}
// set name
@ -111,7 +117,6 @@ func (nspv *namespacedPV) updatePV(newPv, oldPv *kyverno.PolicyViolation) error
if err != nil {
return fmt.Errorf("failed to update namespaced policy violation: %v", err)
}
glog.Infof("namespaced policy violation updated for resource %v", newPv.Spec.ResourceSpec)
logger.Info("namespaced policy violation created")
return nil
}

View file

@ -3,7 +3,6 @@ package testrunner
import (
"bytes"
"encoding/json"
"flag"
"io/ioutil"
"os"
ospath "path"
@ -19,7 +18,6 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes/scheme"
"github.com/golang/glog"
"gopkg.in/yaml.v2"
apiyaml "k8s.io/apimachinery/pkg/util/yaml"
)
@ -308,7 +306,7 @@ func loadPolicyResource(t *testing.T, file string) *unstructured.Unstructured {
func getClient(t *testing.T, files []string) *client.Client {
var objects []runtime.Object
if files != nil {
glog.V(4).Infof("loading resources: %v", files)
for _, file := range files {
objects = loadObjects(t, file)
}
@ -404,7 +402,7 @@ func loadPolicy(t *testing.T, path string) *kyverno.ClusterPolicy {
policy := kyverno.ClusterPolicy{}
pBytes, err := apiyaml.ToJSON(p)
if err != nil {
glog.Error(err)
t.Error(err)
continue
}
@ -427,7 +425,7 @@ func loadPolicy(t *testing.T, path string) *kyverno.ClusterPolicy {
}
func testScenario(t *testing.T, path string) {
flag.Set("logtostderr", "true")
// flag.Set("logtostderr", "true")
// flag.Set("v", "8")
scenario, err := loadScenario(t, path)

View file

@ -4,8 +4,8 @@ import (
"io/ioutil"
"os"
"github.com/golang/glog"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
const (
@ -42,7 +42,7 @@ func ConvertToUnstructured(data []byte) (*unstructured.Unstructured, error) {
resource := &unstructured.Unstructured{}
err := resource.UnmarshalJSON(data)
if err != nil {
glog.V(4).Infof("failed to unmarshall resource: %v", err)
log.Log.Error(err, "failed to unmarshal resource")
return nil, err
}
return resource, nil

View file

@ -4,12 +4,12 @@ import (
"fmt"
"strings"
"github.com/golang/glog"
v1beta1 "k8s.io/api/admission/v1beta1"
authenticationv1 "k8s.io/api/authentication/v1"
rbacv1 "k8s.io/api/rbac/v1"
labels "k8s.io/apimachinery/pkg/labels"
rbaclister "k8s.io/client-go/listers/rbac/v1"
"sigs.k8s.io/controller-runtime/pkg/log"
)
const (
@ -101,7 +101,7 @@ func matchSubjectsMap(subject rbacv1.Subject, userInfo authenticationv1.UserInfo
func matchServiceAccount(subject rbacv1.Subject, userInfo authenticationv1.UserInfo) bool {
subjectServiceAccount := subject.Namespace + ":" + subject.Name
if userInfo.Username[len(SaPrefix):] != subjectServiceAccount {
glog.V(3).Infof("service account not match, expect %s, got %s", subjectServiceAccount, userInfo.Username[len(SaPrefix):])
log.Log.V(3).Info(fmt.Sprintf("service account not match, expect %s, got %s", subjectServiceAccount, userInfo.Username[len(SaPrefix):]))
return false
}
@ -117,6 +117,6 @@ func matchUserOrGroup(subject rbacv1.Subject, userInfo authenticationv1.UserInfo
}
}
glog.V(3).Infof("user/group '%v' info not found in request userInfo: %v", subject.Name, keys)
log.Log.V(3).Info(fmt.Sprintf("user/group '%v' info not found in request userInfo: %v", subject.Name, keys))
return false
}

View file

@ -1,7 +1,6 @@
package userinfo
import (
"flag"
"reflect"
"testing"
@ -153,9 +152,9 @@ func newRoleBinding(name, ns string, subjects []rbacv1.Subject, roles rbacv1.Rol
}
func Test_getRoleRefByRoleBindings(t *testing.T) {
flag.Parse()
flag.Set("logtostderr", "true")
flag.Set("v", "3")
// flag.Parse()
// flag.Set("logtostderr", "true")
// flag.Set("v", "3")
list := make([]*rbacv1.RoleBinding, 2)

View file

@ -4,7 +4,6 @@ import (
"fmt"
"sync"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
admregapi "k8s.io/api/admissionregistration/v1beta1"
errorsapi "k8s.io/apimachinery/pkg/api/errors"
@ -36,8 +35,9 @@ func (wrc *WebhookRegistrationClient) constructVerifyMutatingWebhookConfig(caDat
}
func (wrc *WebhookRegistrationClient) constructDebugVerifyMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
logger := wrc.log
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.VerifyMutatingWebhookServicePath)
glog.V(4).Infof("Debug VerifyMutatingWebhookConfig is registered with url %s\n", url)
logger.V(4).Info("Debug VerifyMutatingWebhookConfig is registered with url", "url", url)
return &admregapi.MutatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{
Name: config.VerifyMutatingWebhookConfigurationDebugName,
@ -68,13 +68,14 @@ func (wrc *WebhookRegistrationClient) removeVerifyWebhookMutatingWebhookConfig(w
} else {
mutatingConfig = config.VerifyMutatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
logger := wrc.log.WithValues("name", mutatingConfig)
logger.V(4).Info("removing webhook configuration")
err = wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("verify webhook configuration %s, does not exits. not deleting", mutatingConfig)
logger.Error(err, "verify webhook configuration, does not exits. not deleting")
} else if err != nil {
glog.Errorf("failed to delete verify webhook configuration %s: %v", mutatingConfig, err)
logger.Error(err, "failed to delete verify wwebhook configuration")
} else {
glog.V(4).Infof("successfully deleted verify webhook configuration %s", mutatingConfig)
logger.V(4).Info("successfully deleted verify webhook configuration")
}
}

View file

@ -3,7 +3,6 @@ package webhookconfig
import (
"io/ioutil"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
admregapi "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -11,20 +10,21 @@ import (
)
func (wrc *WebhookRegistrationClient) readCaData() []byte {
logger := wrc.log
var caData []byte
// Check if ca is defined in the secret tls-ca
// assume the key and signed cert have been defined in secret tls.kyverno
if caData = wrc.client.ReadRootCASecret(); len(caData) != 0 {
glog.V(4).Infof("read CA from secret")
logger.V(4).Info("read CA from secret")
return caData
}
glog.V(4).Infof("failed to read CA from secret, reading from kubeconfig")
logger.V(4).Info("failed to read CA from secret, reading from kubeconfig")
// load the CA from kubeconfig
if caData = extractCA(wrc.clientConfig); len(caData) != 0 {
glog.V(4).Infof("read CA from kubeconfig")
logger.V(4).Info("read CA from kubeconfig")
return caData
}
glog.V(4).Infof("failed to read CA from kubeconfig")
logger.V(4).Info("failed to read CA from kubeconfig")
return nil
}
@ -46,10 +46,11 @@ func extractCA(config *rest.Config) (result []byte) {
}
func (wrc *WebhookRegistrationClient) constructOwner() v1.OwnerReference {
logger := wrc.log
kubePolicyDeployment, err := wrc.client.GetKubePolicyDeployment()
if err != nil {
glog.Errorf("Error when constructing OwnerReference, err: %v\n", err)
logger.Error(err, "failed to construct OwnerReference")
return v1.OwnerReference{}
}
@ -61,38 +62,6 @@ func (wrc *WebhookRegistrationClient) constructOwner() v1.OwnerReference {
}
}
// func generateDebugWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.Webhook {
// sideEffect := admregapi.SideEffectClassNoneOnDryRun
// failurePolicy := admregapi.Ignore
// return admregapi.Webhook{
// Name: name,
// ClientConfig: admregapi.WebhookClientConfig{
// URL: &url,
// CABundle: caData,
// },
// SideEffects: &sideEffect,
// Rules: []admregapi.RuleWithOperations{
// admregapi.RuleWithOperations{
// Operations: operationTypes,
// Rule: admregapi.Rule{
// APIGroups: []string{
// apiGroups,
// },
// APIVersions: []string{
// apiVersions,
// },
// Resources: []string{
// resource,
// },
// },
// },
// },
// AdmissionReviewVersions: []string{"v1beta1"},
// TimeoutSeconds: &timeoutSeconds,
// FailurePolicy: &failurePolicy,
// }
// }
// debug mutating webhook
func generateDebugMutatingWebhook(name, url string, caData []byte, validate bool, timeoutSeconds int32, resource, apiGroups, apiVersions string, operationTypes []admregapi.OperationType) admregapi.MutatingWebhook {
sideEffect := admregapi.SideEffectClassNoneOnDryRun

View file

@ -3,7 +3,6 @@ package webhookconfig
import (
"fmt"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
admregapi "k8s.io/api/admissionregistration/v1beta1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -35,8 +34,9 @@ func (wrc *WebhookRegistrationClient) contructPolicyValidatingWebhookConfig(caDa
}
func (wrc *WebhookRegistrationClient) contructDebugPolicyValidatingWebhookConfig(caData []byte) *admregapi.ValidatingWebhookConfiguration {
logger := wrc.log
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.PolicyValidatingWebhookServicePath)
glog.V(4).Infof("Debug PolicyValidatingWebhookConfig is registered with url %s\n", url)
logger.V(4).Info("Debug PolicyValidatingWebhookConfig is registered with url ", "url", url)
return &admregapi.ValidatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{
@ -82,8 +82,9 @@ func (wrc *WebhookRegistrationClient) contructPolicyMutatingWebhookConfig(caData
}
}
func (wrc *WebhookRegistrationClient) contructDebugPolicyMutatingWebhookConfig(caData []byte) *admregapi.MutatingWebhookConfiguration {
logger := wrc.log
url := fmt.Sprintf("https://%s%s", wrc.serverIP, config.PolicyMutatingWebhookServicePath)
glog.V(4).Infof("Debug PolicyMutatingWebhookConfig is registered with url %s\n", url)
logger.V(4).Info("Debug PolicyMutatingWebhookConfig is registered with url ", "url", url)
return &admregapi.MutatingWebhookConfiguration{
ObjectMeta: v1.ObjectMeta{

View file

@ -7,7 +7,6 @@ import (
"time"
"github.com/go-logr/logr"
"github.com/golang/glog"
"github.com/nirmata/kyverno/pkg/config"
client "github.com/nirmata/kyverno/pkg/dclient"
admregapi "k8s.io/api/admissionregistration/v1beta1"
@ -190,7 +189,6 @@ func (wrc *WebhookRegistrationClient) createPolicyValidatingWebhookConfiguration
func (wrc *WebhookRegistrationClient) createPolicyMutatingWebhookConfiguration() error {
var caData []byte
var config *admregapi.MutatingWebhookConfiguration
// read CA data from
// 1) secret(config)
// 2) kubeconfig
@ -212,8 +210,7 @@ func (wrc *WebhookRegistrationClient) createPolicyMutatingWebhookConfiguration()
if _, err := wrc.client.CreateResource(MutatingWebhookConfigurationKind, "", *config, false); err != nil {
return err
}
glog.V(4).Infof("created Mutating Webhook Configuration %s ", config.Name)
wrc.log.V(4).Info("reated Mutating Webhook Configuration", "name", config.Name)
return nil
}
@ -243,7 +240,7 @@ func (wrc *WebhookRegistrationClient) createVerifyMutatingWebhookConfiguration()
return err
}
glog.V(4).Infof("created Mutating Webhook Configuration %s ", config.Name)
wrc.log.V(4).Info("reated Mutating Webhook Configuration", "name", config.Name)
return nil
}
@ -252,9 +249,9 @@ func (wrc *WebhookRegistrationClient) createVerifyMutatingWebhookConfiguration()
// Register will fail if the config exists, so there is no need to fail on error
func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
startTime := time.Now()
glog.V(4).Infof("Started cleaning up webhookconfigurations")
wrc.log.Info("Started cleaning up webhookconfigurations")
defer func() {
glog.V(4).Infof("Finished cleaning up webhookcongfigurations (%v)", time.Since(startTime))
wrc.log.V(4).Info("Finished cleaning up webhookcongfigurations", "processingTime", time.Since(startTime))
}()
var wg sync.WaitGroup
@ -278,13 +275,13 @@ func (wrc *WebhookRegistrationClient) removeWebhookConfigurations() {
func (wrc *WebhookRegistrationClient) removeResourceMutatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
if err := wrc.RemoveResourceMutatingWebhookConfiguration(); err != nil {
glog.Error(err)
wrc.log.Error(err, "failed to remove resource mutating webhook configuration")
}
}
func (wrc *WebhookRegistrationClient) removeResourceValidatingWebhookConfiguration(wg *sync.WaitGroup) {
defer wg.Done()
if err := wrc.RemoveResourceValidatingWebhookConfiguration(); err != nil {
glog.Error(err)
wrc.log.Error(err, "failed to remove resource validation webhook configuration")
}
}
@ -299,15 +296,15 @@ func (wrc *WebhookRegistrationClient) removePolicyMutatingWebhookConfiguration(w
} else {
mutatingConfig = config.PolicyMutatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", mutatingConfig)
logger := wrc.log.WithValues("name", mutatingConfig)
logger.V(4).Info("removing mutating webhook configuration")
err := wrc.client.DeleteResource(MutatingWebhookConfigurationKind, "", mutatingConfig, false)
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", mutatingConfig)
logger.Error(err, "policy mutating webhook configuration does not exist, not deleting")
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", mutatingConfig, err)
logger.Error(err, "failed to delete policy mutating webhook configuration")
} else {
glog.V(4).Infof("successfully deleted policy webhook configuration %s", mutatingConfig)
logger.V(4).Info("successfully deleted policy mutating webhook configutation")
}
}
@ -322,13 +319,14 @@ func (wrc *WebhookRegistrationClient) removePolicyValidatingWebhookConfiguration
} else {
validatingConfig = config.PolicyValidatingWebhookConfigurationName
}
glog.V(4).Infof("removing webhook configuration %s", validatingConfig)
logger := wrc.log.WithValues("name", validatingConfig)
logger.V(4).Info("removing validating webhook configuration")
err := wrc.client.DeleteResource(ValidatingWebhookConfigurationKind, "", validatingConfig, false)
if errorsapi.IsNotFound(err) {
glog.V(4).Infof("policy webhook configuration %s, does not exits. not deleting", validatingConfig)
logger.Error(err, "policy validating webhook configuration does not exist, not deleting")
} else if err != nil {
glog.Errorf("failed to delete policy webhook configuration %s: %v", validatingConfig, err)
logger.Error(err, "failed to delete policy validating webhook configuration")
} else {
glog.V(4).Infof("successfully deleted policy webhook configuration %s", validatingConfig)
logger.V(4).Info("successfully deleted policy validating webhook configutation")
}
}

View file

@ -4,9 +4,9 @@ import (
"testing"
"github.com/nirmata/kyverno/pkg/engine/response"
"github.com/nirmata/kyverno/pkg/log"
"gotest.tools/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func newPolicyResponse(policy, rule string, patchesStr []string, success bool) response.PolicyResponse {

View file

@ -7,8 +7,8 @@ import (
kyverno "github.com/nirmata/kyverno/pkg/api/kyverno/v1"
"github.com/nirmata/kyverno/pkg/engine/utils"
"github.com/nirmata/kyverno/pkg/log"
"gotest.tools/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func compareJSONAsMap(t *testing.T, expected, actual []byte) {