mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 10:28:36 +00:00
refactor: separate resource mutation/validation handlers from server (#3908)
* refactor: webhooks server logger Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * refactor: separate policy mutation/validation handlers from server Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com> * separate resource mutation from server code Signed-off-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
parent
52cc493e57
commit
c112aaefa1
21 changed files with 1234 additions and 1352 deletions
|
@ -38,6 +38,7 @@ import (
|
|||
"github.com/kyverno/kyverno/pkg/webhookconfig"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||
webhookspolicy "github.com/kyverno/kyverno/pkg/webhooks/policy"
|
||||
webhooksresource "github.com/kyverno/kyverno/pkg/webhooks/resource"
|
||||
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
|
@ -318,7 +319,7 @@ func main() {
|
|||
|
||||
pCacheController := policycache.NewCache(kyvernoV1.ClusterPolicies(), kyvernoV1.Policies())
|
||||
|
||||
auditHandler := webhooks.NewValidateAuditHandler(
|
||||
auditHandler := webhooksresource.NewValidateAuditHandler(
|
||||
pCacheController,
|
||||
eventGenerator,
|
||||
reportReqGen,
|
||||
|
@ -407,38 +408,32 @@ func main() {
|
|||
// -- generate policy violation resource
|
||||
// -- generate events on policy and resource
|
||||
policyHandlers := webhookspolicy.NewHandlers(dynamicClient, openAPIController)
|
||||
|
||||
server, err := webhooks.NewWebhookServer(
|
||||
policyHandlers,
|
||||
kyvernoClient,
|
||||
resourceHandlers := webhooksresource.NewHandlers(
|
||||
dynamicClient,
|
||||
certManager.GetTLSPemPair,
|
||||
kyvernoInformer.Kyverno().V1beta1().UpdateRequests(),
|
||||
kyvernoV1.ClusterPolicies(),
|
||||
kubeInformer.Rbac().V1().RoleBindings(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings(),
|
||||
kubeInformer.Rbac().V1().Roles(),
|
||||
kubeInformer.Rbac().V1().ClusterRoles(),
|
||||
kubeInformer.Core().V1().Namespaces(),
|
||||
eventGenerator,
|
||||
pCacheController,
|
||||
webhookCfg,
|
||||
webhookMonitor,
|
||||
kyvernoClient,
|
||||
configuration,
|
||||
promConfig,
|
||||
pCacheController,
|
||||
kubeInformer.Core().V1().Namespaces().Lister(),
|
||||
kubeInformer.Rbac().V1().RoleBindings().Lister(),
|
||||
kubeInformer.Rbac().V1().ClusterRoleBindings().Lister(),
|
||||
kyvernoInformer.Kyverno().V1beta1().UpdateRequests().Lister().UpdateRequests(config.KyvernoNamespace()),
|
||||
reportReqGen,
|
||||
urgen,
|
||||
eventGenerator,
|
||||
auditHandler,
|
||||
cleanUp,
|
||||
log.Log.WithName("WebhookServer"),
|
||||
openAPIController,
|
||||
urc,
|
||||
promConfig,
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
setupLog.Error(err, "Failed to create webhook server")
|
||||
os.Exit(1)
|
||||
}
|
||||
server := webhooks.NewServer(
|
||||
policyHandlers,
|
||||
resourceHandlers,
|
||||
certManager.GetTLSPemPair,
|
||||
configuration,
|
||||
webhookCfg,
|
||||
webhookMonitor,
|
||||
cleanUp,
|
||||
)
|
||||
|
||||
// wrap all controllers that need leaderelection
|
||||
// start them once by the leader
|
||||
|
@ -487,7 +482,7 @@ func main() {
|
|||
}
|
||||
|
||||
// verifies if the admission control is enabled and active
|
||||
server.RunAsync(stopCh)
|
||||
server.Run(stopCh)
|
||||
|
||||
<-stopCh
|
||||
|
||||
|
|
|
@ -1,173 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
urkyverno "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/autogen"
|
||||
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
engineutils2 "github.com/kyverno/kyverno/pkg/utils/engine"
|
||||
"github.com/pkg/errors"
|
||||
yamlv2 "gopkg.in/yaml.v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
// returns true -> if there is even one policy that blocks resource request
|
||||
// returns false -> if all the policies are meant to report only, we dont block resource request
|
||||
func toBlockResource(engineReponses []*response.EngineResponse, log logr.Logger) bool {
|
||||
for _, er := range engineReponses {
|
||||
if engineutils2.CheckEngineResponse(er) {
|
||||
log.Info("spec.ValidationFailureAction set to enforce, blocking resource request", "policy", er.PolicyResponse.Policy.Name)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
log.V(4).Info("spec.ValidationFailureAction set to audit for all applicable policies, won't block resource operation")
|
||||
return false
|
||||
}
|
||||
|
||||
// getEnforceFailureErrorMsg gets the error messages for failed enforce policy
|
||||
func getEnforceFailureErrorMsg(engineResponses []*response.EngineResponse) string {
|
||||
policyToRule := make(map[string]interface{})
|
||||
var resourceName string
|
||||
for _, er := range engineResponses {
|
||||
if engineutils2.CheckEngineResponse(er) {
|
||||
ruleToReason := make(map[string]string)
|
||||
for _, rule := range er.PolicyResponse.Rules {
|
||||
if rule.Status != response.RuleStatusPass {
|
||||
ruleToReason[rule.Name] = rule.Message
|
||||
}
|
||||
}
|
||||
resourceName = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||
policyToRule[er.PolicyResponse.Policy.Name] = ruleToReason
|
||||
}
|
||||
}
|
||||
result, _ := yamlv2.Marshal(policyToRule)
|
||||
return "\n\nresource " + resourceName + " was blocked due to the following policies\n\n" + string(result)
|
||||
}
|
||||
|
||||
// getErrorMsg gets all failed engine response message
|
||||
func getErrorMsg(engineReponses []*response.EngineResponse) string {
|
||||
var str []string
|
||||
var resourceInfo string
|
||||
for _, er := range engineReponses {
|
||||
if !er.IsSuccessful() {
|
||||
// resource in engineReponses is identical as this was called per admission request
|
||||
resourceInfo = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||
str = append(str, fmt.Sprintf("failed policy %s:", er.PolicyResponse.Policy.Name))
|
||||
for _, rule := range er.PolicyResponse.Rules {
|
||||
if rule.Status != response.RuleStatusPass {
|
||||
str = append(str, rule.ToString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("Resource %s %s", resourceInfo, strings.Join(str, ";"))
|
||||
}
|
||||
|
||||
// patchRequest applies patches to the request.Object and returns a new copy of the request
|
||||
func patchRequest(patches []byte, request *admissionv1.AdmissionRequest, logger logr.Logger) *admissionv1.AdmissionRequest {
|
||||
patchedResource := processResourceWithPatches(patches, request.Object.Raw, logger)
|
||||
newRequest := request.DeepCopy()
|
||||
newRequest.Object.Raw = patchedResource
|
||||
return newRequest
|
||||
}
|
||||
|
||||
func processResourceWithPatches(patch []byte, resource []byte, log logr.Logger) []byte {
|
||||
if patch == nil {
|
||||
return resource
|
||||
}
|
||||
|
||||
resource, err := engineutils.ApplyPatchNew(resource, patch)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to patch resource:", "patch", string(patch), "resource", string(resource))
|
||||
return nil
|
||||
}
|
||||
|
||||
log.V(6).Info("", "patchedResource", string(resource))
|
||||
return resource
|
||||
}
|
||||
|
||||
func containsRBACInfo(policies ...[]kyverno.PolicyInterface) bool {
|
||||
for _, policySlice := range policies {
|
||||
for _, policy := range policySlice {
|
||||
for _, rule := range autogen.ComputeRules(policy) {
|
||||
if checkForRBACInfo(rule) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkForRBACInfo(rule kyverno.Rule) bool {
|
||||
if len(rule.MatchResources.Roles) > 0 || len(rule.MatchResources.ClusterRoles) > 0 || len(rule.ExcludeResources.Roles) > 0 || len(rule.ExcludeResources.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
if len(rule.MatchResources.All) > 0 {
|
||||
for _, rf := range rule.MatchResources.All {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rule.MatchResources.Any) > 0 {
|
||||
for _, rf := range rule.MatchResources.Any {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rule.ExcludeResources.All) > 0 {
|
||||
for _, rf := range rule.ExcludeResources.All {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rule.ExcludeResources.Any) > 0 {
|
||||
for _, rf := range rule.ExcludeResources.Any {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func excludeKyvernoResources(kind string) bool {
|
||||
switch kind {
|
||||
case "ClusterPolicyReport":
|
||||
return true
|
||||
case "PolicyReport":
|
||||
return true
|
||||
case "ReportChangeRequest":
|
||||
return true
|
||||
case "GenerateRequest":
|
||||
return true
|
||||
case "ClusterReportChangeRequest":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func newVariablesContext(request *admissionv1.AdmissionRequest, userRequestInfo *urkyverno.RequestInfo) (enginectx.Interface, error) {
|
||||
ctx := enginectx.NewContext()
|
||||
if err := ctx.AddRequest(request); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load incoming request in context")
|
||||
}
|
||||
if err := ctx.AddUserInfo(*userRequestInfo); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load userInfo in context")
|
||||
}
|
||||
if err := ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load service account in context")
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
|
@ -1,431 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
contextdefault "context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/gardener/controller-manager-library/pkg/logger"
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
urkyverno "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/autogen"
|
||||
gencommon "github.com/kyverno/kyverno/pkg/background/common"
|
||||
gen "github.com/kyverno/kyverno/pkg/background/generate"
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
enginutils "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"github.com/kyverno/kyverno/pkg/engine/variables"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
//handleGenerate handles admission-requests for policies with generate rules
|
||||
func (ws *WebhookServer) handleGenerate(
|
||||
request *admissionv1.AdmissionRequest,
|
||||
policies []kyverno.PolicyInterface,
|
||||
policyContext *engine.PolicyContext,
|
||||
admissionRequestTimestamp int64,
|
||||
latencySender *chan int64,
|
||||
generateEngineResponsesSenderForAdmissionReviewDurationMetric *chan []*response.EngineResponse,
|
||||
generateEngineResponsesSenderForAdmissionRequestsCountMetric *chan []*response.EngineResponse,
|
||||
) {
|
||||
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
logger.V(6).Info("update request")
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
if (request.Operation == admissionv1.Create || request.Operation == admissionv1.Update) && len(policies) != 0 {
|
||||
for _, policy := range policies {
|
||||
var rules []response.RuleResponse
|
||||
policyContext.Policy = policy
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
policyContext.NamespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, ws.nsLister, logger)
|
||||
}
|
||||
engineResponse := engine.ApplyBackgroundChecks(policyContext)
|
||||
for _, rule := range engineResponse.PolicyResponse.Rules {
|
||||
if rule.Status != response.RuleStatusPass {
|
||||
ws.deleteGR(logger, engineResponse)
|
||||
continue
|
||||
}
|
||||
rules = append(rules, rule)
|
||||
}
|
||||
|
||||
if len(rules) > 0 {
|
||||
engineResponse.PolicyResponse.Rules = rules
|
||||
// some generate rules do apply to the resource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
}
|
||||
|
||||
// registering the kyverno_policy_results_total metric concurrently
|
||||
go ws.registerPolicyResultsMetricGeneration(logger, string(request.Operation), policy, *engineResponse)
|
||||
|
||||
// registering the kyverno_policy_execution_duration_seconds metric concurrently
|
||||
go ws.registerPolicyExecutionDurationMetricGenerate(logger, string(request.Operation), policy, *engineResponse)
|
||||
}
|
||||
|
||||
if failedResponse := applyUpdateRequest(request, urkyverno.Generate, ws.urGenerator, policyContext.AdmissionInfo, request.Operation, engineResponses...); failedResponse != nil {
|
||||
// report failure event
|
||||
for _, failedUR := range failedResponse {
|
||||
err := fmt.Errorf("failed to create Update Request: %v", failedUR.err)
|
||||
e := event.NewBackgroundFailedEvent(err, failedUR.ur.Policy, "", event.GeneratePolicyController, &policyContext.NewResource)
|
||||
ws.eventGen.Add(e...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if request.Operation == admissionv1.Update {
|
||||
ws.handleUpdatesForGenerateRules(request, policies)
|
||||
}
|
||||
|
||||
// sending the admission request latency to other goroutine (reporting the metrics) over the channel
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
|
||||
*latencySender <- admissionReviewLatencyDuration
|
||||
*generateEngineResponsesSenderForAdmissionReviewDurationMetric <- engineResponses
|
||||
*generateEngineResponsesSenderForAdmissionRequestsCountMetric <- engineResponses
|
||||
}
|
||||
|
||||
//handleUpdatesForGenerateRules handles admission-requests for update
|
||||
func (ws *WebhookServer) handleUpdatesForGenerateRules(request *admissionv1.AdmissionRequest, policies []kyverno.PolicyInterface) {
|
||||
if request.Operation != admissionv1.Update {
|
||||
return
|
||||
}
|
||||
|
||||
logger := ws.log.WithValues("action", "generate", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
resource, err := enginutils.ConvertToUnstructured(request.OldObject.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
}
|
||||
|
||||
resLabels := resource.GetLabels()
|
||||
if resLabels["generate.kyverno.io/clone-policy-name"] != "" {
|
||||
ws.handleUpdateGenerateSourceResource(resLabels, logger)
|
||||
}
|
||||
|
||||
if resLabels["app.kubernetes.io/managed-by"] == "kyverno" && resLabels["policy.kyverno.io/synchronize"] == "enable" && request.Operation == admissionv1.Update {
|
||||
ws.handleUpdateGenerateTargetResource(request, policies, resLabels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
//handleUpdateGenerateSourceResource - handles update of clone source for generate policy
|
||||
func (ws *WebhookServer) handleUpdateGenerateSourceResource(resLabels map[string]string, logger logr.Logger) {
|
||||
policyNames := strings.Split(resLabels["generate.kyverno.io/clone-policy-name"], ",")
|
||||
for _, policyName := range policyNames {
|
||||
// check if the policy exists
|
||||
_, err := ws.kyvernoClient.KyvernoV1().ClusterPolicies().Get(contextdefault.TODO(), policyName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
logger.V(4).Info("skipping update of update request as policy is deleted")
|
||||
} else {
|
||||
logger.Error(err, "failed to get generate policy", "Name", policyName)
|
||||
}
|
||||
} else {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
urkyverno.URGeneratePolicyLabel: policyName,
|
||||
}))
|
||||
|
||||
urList, err := ws.urLister.List(selector)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request for the resource", "label", urkyverno.URGeneratePolicyLabel)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ur := range urList {
|
||||
ws.updateAnnotationInUR(ur, logger)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateAnnotationInUR - function used to update UR annotation
|
||||
// updating UR will trigger reprocessing of UR and recreation/updation of generated resource
|
||||
func (ws *WebhookServer) updateAnnotationInUR(ur *urkyverno.UpdateRequest, logger logr.Logger) {
|
||||
urAnnotations := ur.Annotations
|
||||
if len(urAnnotations) == 0 {
|
||||
urAnnotations = make(map[string]string)
|
||||
}
|
||||
ws.mu.Lock()
|
||||
urAnnotations["generate.kyverno.io/updation-time"] = time.Now().String()
|
||||
ur.SetAnnotations(urAnnotations)
|
||||
ws.mu.Unlock()
|
||||
|
||||
patch := jsonutils.NewPatch(
|
||||
"/metadata/annotations",
|
||||
"replace",
|
||||
ur.Annotations,
|
||||
)
|
||||
|
||||
new, err := gencommon.PatchUpdateRequest(ur, patch, ws.kyvernoClient)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update update request update-time annotations for the resource", "update request", ur.Name)
|
||||
return
|
||||
}
|
||||
new.Status.State = urkyverno.Pending
|
||||
if _, err := ws.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).UpdateStatus(contextdefault.TODO(), new, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "failed to set UpdateRequest state to Pending", "update request", ur.Name)
|
||||
}
|
||||
}
|
||||
|
||||
//handleUpdateGenerateTargetResource - handles update of target resource for generate policy
|
||||
func (ws *WebhookServer) handleUpdateGenerateTargetResource(request *admissionv1.AdmissionRequest, policies []kyverno.PolicyInterface, resLabels map[string]string, logger logr.Logger) {
|
||||
enqueueBool := false
|
||||
newRes, err := enginutils.ConvertToUnstructured(request.Object.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
}
|
||||
|
||||
policyName := resLabels["policy.kyverno.io/policy-name"]
|
||||
targetSourceName := newRes.GetName()
|
||||
targetSourceKind := newRes.GetKind()
|
||||
|
||||
policy, err := ws.kyvernoClient.KyvernoV1().ClusterPolicies().Get(contextdefault.TODO(), policyName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get policy from kyverno client.", "policy name", policyName)
|
||||
return
|
||||
}
|
||||
|
||||
for _, rule := range autogen.ComputeRules(policy) {
|
||||
if rule.Generation.Kind == targetSourceKind && rule.Generation.Name == targetSourceName {
|
||||
updatedRule, err := getGeneratedByResource(newRes, resLabels, ws.client, rule, logger)
|
||||
if err != nil {
|
||||
logger.V(4).Info("skipping generate policy and resource pattern validaton", "error", err)
|
||||
} else {
|
||||
data := updatedRule.Generation.DeepCopy().GetData()
|
||||
if data != nil {
|
||||
if _, err := gen.ValidateResourceWithPattern(logger, newRes.Object, data); err != nil {
|
||||
enqueueBool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
cloneName := updatedRule.Generation.Clone.Name
|
||||
if cloneName != "" {
|
||||
obj, err := ws.client.GetResource("", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name)
|
||||
if err != nil {
|
||||
logger.Error(err, fmt.Sprintf("source resource %s/%s/%s not found.", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
sourceObj, newResObj := stripNonPolicyFields(obj.Object, newRes.Object, logger)
|
||||
|
||||
if _, err := gen.ValidateResourceWithPattern(logger, newResObj, sourceObj); err != nil {
|
||||
enqueueBool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if enqueueBool {
|
||||
urName := resLabels["policy.kyverno.io/gr-name"]
|
||||
ur, err := ws.urLister.Get(urName)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request", "name", urName)
|
||||
return
|
||||
}
|
||||
ws.updateAnnotationInUR(ur, logger)
|
||||
}
|
||||
}
|
||||
|
||||
func getGeneratedByResource(newRes *unstructured.Unstructured, resLabels map[string]string, client client.Interface, rule kyverno.Rule, logger logr.Logger) (kyverno.Rule, error) {
|
||||
var apiVersion, kind, name, namespace string
|
||||
sourceRequest := &admissionv1.AdmissionRequest{}
|
||||
kind = resLabels["kyverno.io/generated-by-kind"]
|
||||
name = resLabels["kyverno.io/generated-by-name"]
|
||||
if kind != "Namespace" {
|
||||
namespace = resLabels["kyverno.io/generated-by-namespace"]
|
||||
}
|
||||
obj, err := client.GetResource(apiVersion, kind, namespace, name)
|
||||
if err != nil {
|
||||
logger.Error(err, "source resource not found.")
|
||||
return rule, err
|
||||
}
|
||||
rawObj, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to marshal resource")
|
||||
return rule, err
|
||||
}
|
||||
sourceRequest.Object.Raw = rawObj
|
||||
sourceRequest.Operation = "CREATE"
|
||||
ctx := enginectx.NewContext()
|
||||
if err := ctx.AddRequest(sourceRequest); err != nil {
|
||||
logger.Error(err, "failed to load incoming request in context")
|
||||
return rule, err
|
||||
}
|
||||
if rule, err = variables.SubstituteAllInRule(logger, ctx, rule); err != nil {
|
||||
logger.Error(err, "variable substitution failed for rule %s", rule.Name)
|
||||
return rule, err
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
//stripNonPolicyFields - remove feilds which get updated with each request by kyverno and are non policy fields
|
||||
func stripNonPolicyFields(obj, newRes map[string]interface{}, logger logr.Logger) (map[string]interface{}, map[string]interface{}) {
|
||||
if metadata, found := obj["metadata"]; found {
|
||||
requiredMetadataInObj := make(map[string]interface{})
|
||||
if annotations, found := metadata.(map[string]interface{})["annotations"]; found {
|
||||
delete(annotations.(map[string]interface{}), "kubectl.kubernetes.io/last-applied-configuration")
|
||||
requiredMetadataInObj["annotations"] = annotations
|
||||
}
|
||||
|
||||
if labels, found := metadata.(map[string]interface{})["labels"]; found {
|
||||
delete(labels.(map[string]interface{}), "generate.kyverno.io/clone-policy-name")
|
||||
requiredMetadataInObj["labels"] = labels
|
||||
}
|
||||
obj["metadata"] = requiredMetadataInObj
|
||||
}
|
||||
|
||||
if metadata, found := newRes["metadata"]; found {
|
||||
requiredMetadataInNewRes := make(map[string]interface{})
|
||||
if annotations, found := metadata.(map[string]interface{})["annotations"]; found {
|
||||
requiredMetadataInNewRes["annotations"] = annotations
|
||||
}
|
||||
|
||||
if labels, found := metadata.(map[string]interface{})["labels"]; found {
|
||||
requiredMetadataInNewRes["labels"] = labels
|
||||
}
|
||||
newRes["metadata"] = requiredMetadataInNewRes
|
||||
}
|
||||
|
||||
delete(obj, "status")
|
||||
|
||||
if _, found := obj["spec"]; found {
|
||||
delete(obj["spec"].(map[string]interface{}), "tolerations")
|
||||
}
|
||||
|
||||
if dataMap, found := obj["data"]; found {
|
||||
keyInData := make([]string, 0)
|
||||
switch dataMap := dataMap.(type) {
|
||||
case map[string]interface{}:
|
||||
for k := range dataMap {
|
||||
keyInData = append(keyInData, k)
|
||||
}
|
||||
}
|
||||
|
||||
if len(keyInData) > 0 {
|
||||
for _, dataKey := range keyInData {
|
||||
originalResourceData := dataMap.(map[string]interface{})[dataKey]
|
||||
replaceData := strings.Replace(originalResourceData.(string), "\n", "", -1)
|
||||
dataMap.(map[string]interface{})[dataKey] = replaceData
|
||||
|
||||
newResourceData := newRes["data"].(map[string]interface{})[dataKey]
|
||||
replacenewResourceData := strings.Replace(newResourceData.(string), "\n", "", -1)
|
||||
newRes["data"].(map[string]interface{})[dataKey] = replacenewResourceData
|
||||
}
|
||||
} else {
|
||||
logger.V(4).Info("data is not of type map[string]interface{}")
|
||||
}
|
||||
}
|
||||
|
||||
return obj, newRes
|
||||
}
|
||||
|
||||
//HandleDelete handles DELETE admission-requests for generate policies
|
||||
func (ws *WebhookServer) handleDelete(request *admissionv1.AdmissionRequest) {
|
||||
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
resource, err := enginutils.ConvertToUnstructured(request.OldObject.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
}
|
||||
|
||||
resLabels := resource.GetLabels()
|
||||
if resLabels["app.kubernetes.io/managed-by"] == "kyverno" && request.Operation == admissionv1.Delete {
|
||||
urName := resLabels["policy.kyverno.io/gr-name"]
|
||||
ur, err := ws.urLister.Get(urName)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request", "name", urName)
|
||||
return
|
||||
}
|
||||
|
||||
if ur.Spec.Type == urkyverno.Mutate {
|
||||
return
|
||||
}
|
||||
ws.updateAnnotationInUR(ur, logger)
|
||||
}
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) deleteGR(logger logr.Logger, engineResponse *response.EngineResponse) {
|
||||
logger.V(4).Info("querying all update requests")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
urkyverno.URGeneratePolicyLabel: engineResponse.PolicyResponse.Policy.Name,
|
||||
"generate.kyverno.io/resource-name": engineResponse.PolicyResponse.Resource.Name,
|
||||
"generate.kyverno.io/resource-kind": engineResponse.PolicyResponse.Resource.Kind,
|
||||
"generate.kyverno.io/resource-namespace": engineResponse.PolicyResponse.Resource.Namespace,
|
||||
}))
|
||||
|
||||
urList, err := ws.urLister.List(selector)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request for the resource", "kind", engineResponse.PolicyResponse.Resource.Kind, "name", engineResponse.PolicyResponse.Resource.Name, "namespace", engineResponse.PolicyResponse.Resource.Namespace)
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range urList {
|
||||
err := ws.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(contextdefault.TODO(), v.GetName(), metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update ur")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func applyUpdateRequest(request *admissionv1.AdmissionRequest, ruleType urkyverno.RequestType, grGenerator updaterequest.Interface, userRequestInfo urkyverno.RequestInfo,
|
||||
action admissionv1.Operation, engineResponses ...*response.EngineResponse) (failedUpdateRequest []updateRequestResponse) {
|
||||
requestBytes, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
logger.Error(err, "error loading request into context")
|
||||
}
|
||||
admissionRequestInfo := urkyverno.AdmissionRequestInfoObject{
|
||||
AdmissionRequest: string(requestBytes),
|
||||
Operation: action,
|
||||
}
|
||||
|
||||
for _, er := range engineResponses {
|
||||
ur := transform(admissionRequestInfo, userRequestInfo, er, ruleType)
|
||||
if err := grGenerator.Apply(ur, action); err != nil {
|
||||
failedUpdateRequest = append(failedUpdateRequest, updateRequestResponse{ur: ur, err: err})
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func transform(admissionRequestInfo urkyverno.AdmissionRequestInfoObject, userRequestInfo urkyverno.RequestInfo, er *response.EngineResponse, ruleType urkyverno.RequestType) urkyverno.UpdateRequestSpec {
|
||||
var PolicyNameNamespaceKey string
|
||||
if er.PolicyResponse.Policy.Namespace != "" {
|
||||
PolicyNameNamespaceKey = er.PolicyResponse.Policy.Namespace + "/" + er.PolicyResponse.Policy.Name
|
||||
} else {
|
||||
PolicyNameNamespaceKey = er.PolicyResponse.Policy.Name
|
||||
}
|
||||
|
||||
ur := urkyverno.UpdateRequestSpec{
|
||||
Type: ruleType,
|
||||
Policy: PolicyNameNamespaceKey,
|
||||
Resource: kyverno.ResourceSpec{
|
||||
Kind: er.PolicyResponse.Resource.Kind,
|
||||
Namespace: er.PolicyResponse.Resource.Namespace,
|
||||
Name: er.PolicyResponse.Resource.Name,
|
||||
APIVersion: er.PolicyResponse.Resource.APIVersion,
|
||||
},
|
||||
Context: urkyverno.UpdateRequestSpecContext{
|
||||
UserRequestInfo: userRequestInfo,
|
||||
AdmissionRequestInfo: admissionRequestInfo,
|
||||
},
|
||||
}
|
||||
|
||||
return ur
|
||||
}
|
||||
|
||||
type updateRequestResponse struct {
|
||||
ur urkyverno.UpdateRequestSpec
|
||||
err error
|
||||
}
|
|
@ -1,186 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
urkyverno "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/userinfo"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
func errorResponse(logger logr.Logger, err error, message string) *admissionv1.AdmissionResponse {
|
||||
logger.Error(err, message)
|
||||
return admissionutils.ResponseFailure(false, message+": "+err.Error())
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) admissionHandler(logger logr.Logger, filter bool, inner handlers.AdmissionHandler) http.HandlerFunc {
|
||||
if filter {
|
||||
inner = handlers.Filter(ws.configuration, inner)
|
||||
}
|
||||
return handlers.Monitor(ws.webhookMonitor, handlers.Admission(logger, inner))
|
||||
}
|
||||
|
||||
// resourceMutation mutates resource
|
||||
func (ws *WebhookServer) resourceMutation(logger logr.Logger, request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
|
||||
if excludeKyvernoResources(request.Kind.Kind) {
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
|
||||
if request.Operation == admissionv1.Delete {
|
||||
resource, err := utils.ConvertResource(request.OldObject.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
|
||||
if err == nil {
|
||||
ws.prGenerator.Add(buildDeletionPrInfo(resource))
|
||||
} else {
|
||||
logger.Info(fmt.Sprintf("Converting oldObject failed: %v", err))
|
||||
}
|
||||
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
|
||||
kind := request.Kind.Kind
|
||||
logger.V(4).Info("received an admission request in mutating webhook", "kind", kind)
|
||||
|
||||
requestTime := time.Now().Unix()
|
||||
mutatePolicies := ws.pCache.GetPolicies(policycache.Mutate, kind, request.Namespace)
|
||||
verifyImagesPolicies := ws.pCache.GetPolicies(policycache.VerifyImagesMutate, kind, request.Namespace)
|
||||
|
||||
if len(mutatePolicies) == 0 && len(verifyImagesPolicies) == 0 {
|
||||
logger.V(4).Info("no policies matched mutate admission request", "kind", kind)
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
|
||||
logger.V(4).Info("processing policies for mutate admission request", "kind", kind,
|
||||
"mutatePolicies", len(mutatePolicies), "verifyImagesPolicies", len(verifyImagesPolicies))
|
||||
|
||||
addRoles := containsRBACInfo(mutatePolicies)
|
||||
policyContext, err := ws.buildPolicyContext(request, addRoles)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to build policy context")
|
||||
return admissionutils.ResponseFailure(false, err.Error())
|
||||
}
|
||||
|
||||
// update container images to a canonical form
|
||||
if err := enginectx.MutateResourceWithImageInfo(request.Object.Raw, policyContext.JSONContext); err != nil {
|
||||
ws.log.Error(err, "failed to patch images info to resource, policies that mutate images may be impacted")
|
||||
}
|
||||
|
||||
mutatePatches := ws.applyMutatePolicies(request, policyContext, mutatePolicies, requestTime, logger)
|
||||
newRequest := patchRequest(mutatePatches, request, logger)
|
||||
imagePatches, err := ws.applyImageVerifyPolicies(newRequest, policyContext, verifyImagesPolicies, logger)
|
||||
if err != nil {
|
||||
logger.Error(err, "image verification failed")
|
||||
return admissionutils.ResponseFailure(false, err.Error())
|
||||
}
|
||||
|
||||
var patches = append(mutatePatches, imagePatches...)
|
||||
|
||||
return admissionutils.ResponseSuccessWithPatch(true, "", patches)
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) resourceValidation(logger logr.Logger, request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
|
||||
if request.Operation == admissionv1.Delete {
|
||||
ws.handleDelete(request)
|
||||
}
|
||||
|
||||
if excludeKyvernoResources(request.Kind.Kind) {
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
|
||||
kind := request.Kind.Kind
|
||||
logger.V(4).Info("received an admission request in validating webhook", "kind", kind)
|
||||
|
||||
// timestamp at which this admission request got triggered
|
||||
requestTime := time.Now().Unix()
|
||||
policies := ws.pCache.GetPolicies(policycache.ValidateEnforce, kind, request.Namespace)
|
||||
mutatePolicies := ws.pCache.GetPolicies(policycache.Mutate, kind, request.Namespace)
|
||||
generatePolicies := ws.pCache.GetPolicies(policycache.Generate, kind, request.Namespace)
|
||||
imageVerifyValidatePolicies := ws.pCache.GetPolicies(policycache.VerifyImagesValidate, kind, request.Namespace)
|
||||
policies = append(policies, imageVerifyValidatePolicies...)
|
||||
|
||||
if len(policies) == 0 && len(mutatePolicies) == 0 && len(generatePolicies) == 0 {
|
||||
logger.V(4).Info("no policies matched admission request", "kind", kind)
|
||||
}
|
||||
|
||||
if len(generatePolicies) == 0 && request.Operation == admissionv1.Update {
|
||||
// handle generate source resource updates
|
||||
go ws.handleUpdatesForGenerateRules(request, []kyverno.PolicyInterface{})
|
||||
}
|
||||
|
||||
logger.V(4).Info("processing policies for validate admission request",
|
||||
"kind", kind, "validate", len(policies), "mutate", len(mutatePolicies), "generate", len(generatePolicies))
|
||||
|
||||
var roles, clusterRoles []string
|
||||
if containsRBACInfo(policies, generatePolicies) {
|
||||
var err error
|
||||
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configuration)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed to fetch RBAC data")
|
||||
}
|
||||
}
|
||||
|
||||
userRequestInfo := urkyverno.RequestInfo{
|
||||
Roles: roles,
|
||||
ClusterRoles: clusterRoles,
|
||||
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
|
||||
}
|
||||
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed create policy rule context")
|
||||
}
|
||||
|
||||
namespaceLabels := make(map[string]string)
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, ws.nsLister, logger)
|
||||
}
|
||||
|
||||
newResource, oldResource, err := utils.ExtractResources(nil, request)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed create parse resource")
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfos(&newResource); err != nil {
|
||||
return errorResponse(logger, err, "failed add image information to policy rule context")
|
||||
}
|
||||
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: newResource,
|
||||
OldResource: oldResource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: ws.configuration.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: ws.configuration.ToFilter,
|
||||
JSONContext: ctx,
|
||||
Client: ws.client,
|
||||
AdmissionOperation: true,
|
||||
}
|
||||
|
||||
vh := &validationHandler{
|
||||
log: ws.log,
|
||||
eventGen: ws.eventGen,
|
||||
prGenerator: ws.prGenerator,
|
||||
}
|
||||
|
||||
ok, msg := vh.handleValidation(ws.promConfig, request, policies, policyContext, namespaceLabels, requestTime)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return admissionutils.ResponseFailure(false, msg)
|
||||
}
|
||||
|
||||
// push admission request to audit handler, this won't block the admission request
|
||||
ws.auditHandler.Add(request.DeepCopy())
|
||||
|
||||
go ws.createUpdateRequests(request, policyContext, generatePolicies, mutatePolicies, requestTime, logger)
|
||||
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
|
@ -84,7 +84,7 @@ func Filter(c config.Configuration, inner AdmissionHandler) AdmissionHandler {
|
|||
}
|
||||
}
|
||||
|
||||
func Verify(m *webhookconfig.Monitor, logger logr.Logger) AdmissionHandler {
|
||||
func Verify(m *webhookconfig.Monitor) AdmissionHandler {
|
||||
return func(logger logr.Logger, request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
|
||||
logger.V(6).Info("incoming request", "last admission request timestamp", m.Time())
|
||||
return admissionutils.Response(true)
|
||||
|
|
5
pkg/webhooks/log.go
Normal file
5
pkg/webhooks/log.go
Normal file
|
@ -0,0 +1,5 @@
|
|||
package webhooks
|
||||
|
||||
import "sigs.k8s.io/controller-runtime/pkg/log"
|
||||
|
||||
var logger = log.Log.WithName("webhooks")
|
|
@ -1,158 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/utils/engine"
|
||||
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
|
||||
"github.com/pkg/errors"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
func (ws *WebhookServer) applyMutatePolicies(request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, policies []kyverno.PolicyInterface, ts int64, logger logr.Logger) []byte {
|
||||
var mutateEngineResponses []*response.EngineResponse
|
||||
|
||||
mutatePatches, mutateEngineResponses := ws.handleMutation(request, policyContext, policies)
|
||||
logger.V(6).Info("", "generated patches", string(mutatePatches))
|
||||
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(ts, 0)))
|
||||
go ws.registerAdmissionReviewDurationMetricMutate(logger, string(request.Operation), mutateEngineResponses, admissionReviewLatencyDuration)
|
||||
go ws.registerAdmissionRequestsMetricMutate(logger, string(request.Operation), mutateEngineResponses)
|
||||
|
||||
return mutatePatches
|
||||
}
|
||||
|
||||
// handleMutation handles mutating webhook admission request
|
||||
// return value: generated patches, triggered policies, engine responses correspdonding to the triggered policies
|
||||
func (ws *WebhookServer) handleMutation(
|
||||
request *admissionv1.AdmissionRequest,
|
||||
policyContext *engine.PolicyContext,
|
||||
policies []kyverno.PolicyInterface,
|
||||
) ([]byte, []*response.EngineResponse) {
|
||||
if len(policies) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
resourceName := request.Kind.Kind + "/" + request.Name
|
||||
if request.Namespace != "" {
|
||||
resourceName = request.Namespace + "/" + resourceName
|
||||
}
|
||||
|
||||
logger := ws.log.WithValues("action", "mutate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
patchedResource := request.Object.Raw
|
||||
newR, oldR, err := utils.ExtractResources(patchedResource, request)
|
||||
if err != nil {
|
||||
// as resource cannot be parsed, we skip processing
|
||||
logger.Error(err, "failed to extract resource")
|
||||
return nil, nil
|
||||
}
|
||||
var deletionTimeStamp *metav1.Time
|
||||
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
|
||||
deletionTimeStamp = newR.GetDeletionTimestamp()
|
||||
} else {
|
||||
deletionTimeStamp = oldR.GetDeletionTimestamp()
|
||||
}
|
||||
|
||||
if deletionTimeStamp != nil && request.Operation == admissionv1.Update {
|
||||
return nil, nil
|
||||
}
|
||||
var patches [][]byte
|
||||
var engineResponses []*response.EngineResponse
|
||||
|
||||
for _, policy := range policies {
|
||||
spec := policy.GetSpec()
|
||||
if !spec.HasMutate() {
|
||||
continue
|
||||
}
|
||||
logger.V(3).Info("applying policy mutate rules", "policy", policy.GetName())
|
||||
policyContext.Policy = policy
|
||||
engineResponse, policyPatches, err := ws.applyMutation(request, policyContext, logger)
|
||||
if err != nil {
|
||||
// TODO report errors in engineResponse and record in metrics
|
||||
logger.Error(err, "mutate error")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(policyPatches) > 0 {
|
||||
patches = append(patches, policyPatches...)
|
||||
rules := engineResponse.GetSuccessRules()
|
||||
if len(rules) != 0 {
|
||||
logger.Info("mutation rules from policy applied successfully", "policy", policy.GetName(), "rules", rules)
|
||||
}
|
||||
}
|
||||
|
||||
policyContext.NewResource = engineResponse.PatchedResource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
|
||||
// registering the kyverno_policy_results_total metric concurrently
|
||||
go ws.registerPolicyResultsMetricMutation(logger, string(request.Operation), policy, *engineResponse)
|
||||
|
||||
// registering the kyverno_policy_execution_duration_seconds metric concurrently
|
||||
go ws.registerPolicyExecutionDurationMetricMutate(logger, string(request.Operation), policy, *engineResponse)
|
||||
}
|
||||
|
||||
// generate annotations
|
||||
if annPatches := utils.GenerateAnnotationPatches(engineResponses, logger); annPatches != nil {
|
||||
patches = append(patches, annPatches...)
|
||||
}
|
||||
|
||||
// REPORTING EVENTS
|
||||
// Scenario 1:
|
||||
// some/all policies failed to apply on the resource. a policy violation is generated.
|
||||
// create an event on the resource and the policy that failed
|
||||
// Scenario 2:
|
||||
// all policies were applied successfully.
|
||||
// create an event on the resource
|
||||
// ADD EVENTS
|
||||
events := generateEvents(engineResponses, false, logger)
|
||||
ws.eventGen.Add(events...)
|
||||
|
||||
// debug info
|
||||
func() {
|
||||
if len(patches) != 0 {
|
||||
logger.V(4).Info("JSON patches generated")
|
||||
}
|
||||
|
||||
// if any of the policies fails, print out the error
|
||||
if !engineutils.IsResponseSuccessful(engineResponses) {
|
||||
logger.Error(errors.New(getErrorMsg(engineResponses)), "failed to apply mutation rules on the resource, reporting policy violation")
|
||||
}
|
||||
}()
|
||||
|
||||
// patches holds all the successful patches, if no patch is created, it returns nil
|
||||
return jsonutils.JoinPatches(patches...), engineResponses
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) applyMutation(request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, logger logr.Logger) (*response.EngineResponse, [][]byte, error) {
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
policyContext.NamespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(
|
||||
request.Kind.Kind, request.Namespace, ws.nsLister, logger)
|
||||
}
|
||||
|
||||
engineResponse := engine.Mutate(policyContext)
|
||||
policyPatches := engineResponse.GetPatches()
|
||||
|
||||
if !engineResponse.IsSuccessful() && len(engineResponse.GetFailedRules()) > 0 {
|
||||
return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy.GetName(), engineResponse.GetFailedRules())
|
||||
}
|
||||
|
||||
if engineResponse.PatchedResource.GetKind() != "*" {
|
||||
err := ws.openAPIController.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetAPIVersion(), engineResponse.PatchedResource.GetKind())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to validate resource mutated by policy %s", policyContext.Policy.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
return engineResponse, policyPatches, nil
|
||||
}
|
|
@ -13,25 +13,16 @@ import (
|
|||
policyvalidate "github.com/kyverno/kyverno/pkg/policy"
|
||||
"github.com/kyverno/kyverno/pkg/policymutation"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
type Handlers interface {
|
||||
// Mutate performs the mutation of policy resources
|
||||
Mutate(logr.Logger, *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse
|
||||
// Validate performs the validation check on policy resources
|
||||
Validate(logr.Logger, *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse
|
||||
}
|
||||
|
||||
type handlers struct {
|
||||
client client.Interface
|
||||
openAPIController *openapi.Controller
|
||||
}
|
||||
|
||||
func NewHandlers(
|
||||
client client.Interface,
|
||||
openAPIController *openapi.Controller,
|
||||
) Handlers {
|
||||
func NewHandlers(client client.Interface, openAPIController *openapi.Controller) webhooks.Handlers {
|
||||
return &handlers{
|
||||
client: client,
|
||||
openAPIController: openAPIController,
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package policy
|
||||
|
||||
import (
|
||||
"encoding/json"
|
258
pkg/webhooks/resource/generation.go
Normal file
258
pkg/webhooks/resource/generation.go
Normal file
|
@ -0,0 +1,258 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
contextdefault "context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
urkyverno "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/autogen"
|
||||
gencommon "github.com/kyverno/kyverno/pkg/background/common"
|
||||
gen "github.com/kyverno/kyverno/pkg/background/generate"
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
enginutils "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
//handleGenerate handles admission-requests for policies with generate rules
|
||||
func (h *handlers) handleGenerate(
|
||||
logger logr.Logger,
|
||||
request *admissionv1.AdmissionRequest,
|
||||
policies []kyverno.PolicyInterface,
|
||||
policyContext *engine.PolicyContext,
|
||||
admissionRequestTimestamp int64,
|
||||
latencySender *chan int64,
|
||||
generateEngineResponsesSenderForAdmissionReviewDurationMetric *chan []*response.EngineResponse,
|
||||
generateEngineResponsesSenderForAdmissionRequestsCountMetric *chan []*response.EngineResponse,
|
||||
) {
|
||||
logger.V(6).Info("update request")
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
if (request.Operation == admissionv1.Create || request.Operation == admissionv1.Update) && len(policies) != 0 {
|
||||
for _, policy := range policies {
|
||||
var rules []response.RuleResponse
|
||||
policyContext.Policy = policy
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
policyContext.NamespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, logger)
|
||||
}
|
||||
engineResponse := engine.ApplyBackgroundChecks(policyContext)
|
||||
for _, rule := range engineResponse.PolicyResponse.Rules {
|
||||
if rule.Status != response.RuleStatusPass {
|
||||
h.deleteGR(logger, engineResponse)
|
||||
continue
|
||||
}
|
||||
rules = append(rules, rule)
|
||||
}
|
||||
|
||||
if len(rules) > 0 {
|
||||
engineResponse.PolicyResponse.Rules = rules
|
||||
// some generate rules do apply to the resource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
}
|
||||
|
||||
// registering the kyverno_policy_results_total metric concurrently
|
||||
go h.registerPolicyResultsMetricGeneration(logger, string(request.Operation), policy, *engineResponse)
|
||||
// registering the kyverno_policy_execution_duration_seconds metric concurrently
|
||||
go h.registerPolicyExecutionDurationMetricGenerate(logger, string(request.Operation), policy, *engineResponse)
|
||||
}
|
||||
|
||||
if failedResponse := applyUpdateRequest(request, urkyverno.Generate, h.urGenerator, policyContext.AdmissionInfo, request.Operation, engineResponses...); failedResponse != nil {
|
||||
// report failure event
|
||||
for _, failedUR := range failedResponse {
|
||||
err := fmt.Errorf("failed to create Update Request: %v", failedUR.err)
|
||||
e := event.NewBackgroundFailedEvent(err, failedUR.ur.Policy, "", event.GeneratePolicyController, &policyContext.NewResource)
|
||||
h.eventGen.Add(e...)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if request.Operation == admissionv1.Update {
|
||||
h.handleUpdatesForGenerateRules(logger, request, policies)
|
||||
}
|
||||
|
||||
// sending the admission request latency to other goroutine (reporting the metrics) over the channel
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
|
||||
*latencySender <- admissionReviewLatencyDuration
|
||||
*generateEngineResponsesSenderForAdmissionReviewDurationMetric <- engineResponses
|
||||
*generateEngineResponsesSenderForAdmissionRequestsCountMetric <- engineResponses
|
||||
}
|
||||
|
||||
//handleUpdatesForGenerateRules handles admission-requests for update
|
||||
func (h *handlers) handleUpdatesForGenerateRules(logger logr.Logger, request *admissionv1.AdmissionRequest, policies []kyverno.PolicyInterface) {
|
||||
if request.Operation != admissionv1.Update {
|
||||
return
|
||||
}
|
||||
|
||||
resource, err := enginutils.ConvertToUnstructured(request.OldObject.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
}
|
||||
|
||||
resLabels := resource.GetLabels()
|
||||
if resLabels["generate.kyverno.io/clone-policy-name"] != "" {
|
||||
h.handleUpdateGenerateSourceResource(resLabels, logger)
|
||||
}
|
||||
|
||||
if resLabels["app.kubernetes.io/managed-by"] == "kyverno" && resLabels["policy.kyverno.io/synchronize"] == "enable" && request.Operation == admissionv1.Update {
|
||||
h.handleUpdateGenerateTargetResource(request, policies, resLabels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
//handleUpdateGenerateSourceResource - handles update of clone source for generate policy
|
||||
func (h *handlers) handleUpdateGenerateSourceResource(resLabels map[string]string, logger logr.Logger) {
|
||||
policyNames := strings.Split(resLabels["generate.kyverno.io/clone-policy-name"], ",")
|
||||
for _, policyName := range policyNames {
|
||||
// check if the policy exists
|
||||
_, err := h.kyvernoClient.KyvernoV1().ClusterPolicies().Get(contextdefault.TODO(), policyName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
logger.V(4).Info("skipping update of update request as policy is deleted")
|
||||
} else {
|
||||
logger.Error(err, "failed to get generate policy", "Name", policyName)
|
||||
}
|
||||
} else {
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
urkyverno.URGeneratePolicyLabel: policyName,
|
||||
}))
|
||||
|
||||
urList, err := h.urLister.List(selector)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request for the resource", "label", urkyverno.URGeneratePolicyLabel)
|
||||
return
|
||||
}
|
||||
|
||||
for _, ur := range urList {
|
||||
h.updateAnnotationInUR(ur, logger)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// updateAnnotationInUR - function used to update UR annotation
|
||||
// updating UR will trigger reprocessing of UR and recreation/updation of generated resource
|
||||
func (h *handlers) updateAnnotationInUR(ur *urkyverno.UpdateRequest, logger logr.Logger) {
|
||||
urAnnotations := ur.Annotations
|
||||
if len(urAnnotations) == 0 {
|
||||
urAnnotations = make(map[string]string)
|
||||
}
|
||||
h.mu.Lock()
|
||||
urAnnotations["generate.kyverno.io/updation-time"] = time.Now().String()
|
||||
ur.SetAnnotations(urAnnotations)
|
||||
h.mu.Unlock()
|
||||
|
||||
patch := jsonutils.NewPatch(
|
||||
"/metadata/annotations",
|
||||
"replace",
|
||||
ur.Annotations,
|
||||
)
|
||||
|
||||
new, err := gencommon.PatchUpdateRequest(ur, patch, h.kyvernoClient)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update update request update-time annotations for the resource", "update request", ur.Name)
|
||||
return
|
||||
}
|
||||
new.Status.State = urkyverno.Pending
|
||||
if _, err := h.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).UpdateStatus(contextdefault.TODO(), new, metav1.UpdateOptions{}); err != nil {
|
||||
logger.Error(err, "failed to set UpdateRequest state to Pending", "update request", ur.Name)
|
||||
}
|
||||
}
|
||||
|
||||
//handleUpdateGenerateTargetResource - handles update of target resource for generate policy
|
||||
func (h *handlers) handleUpdateGenerateTargetResource(request *admissionv1.AdmissionRequest, policies []kyverno.PolicyInterface, resLabels map[string]string, logger logr.Logger) {
|
||||
enqueueBool := false
|
||||
newRes, err := enginutils.ConvertToUnstructured(request.Object.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
}
|
||||
|
||||
policyName := resLabels["policy.kyverno.io/policy-name"]
|
||||
targetSourceName := newRes.GetName()
|
||||
targetSourceKind := newRes.GetKind()
|
||||
|
||||
policy, err := h.kyvernoClient.KyvernoV1().ClusterPolicies().Get(contextdefault.TODO(), policyName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get policy from kyverno client.", "policy name", policyName)
|
||||
return
|
||||
}
|
||||
|
||||
for _, rule := range autogen.ComputeRules(policy) {
|
||||
if rule.Generation.Kind == targetSourceKind && rule.Generation.Name == targetSourceName {
|
||||
updatedRule, err := getGeneratedByResource(newRes, resLabels, h.client, rule, logger)
|
||||
if err != nil {
|
||||
logger.V(4).Info("skipping generate policy and resource pattern validaton", "error", err)
|
||||
} else {
|
||||
data := updatedRule.Generation.DeepCopy().GetData()
|
||||
if data != nil {
|
||||
if _, err := gen.ValidateResourceWithPattern(logger, newRes.Object, data); err != nil {
|
||||
enqueueBool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
cloneName := updatedRule.Generation.Clone.Name
|
||||
if cloneName != "" {
|
||||
obj, err := h.client.GetResource("", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name)
|
||||
if err != nil {
|
||||
logger.Error(err, fmt.Sprintf("source resource %s/%s/%s not found.", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
sourceObj, newResObj := stripNonPolicyFields(obj.Object, newRes.Object, logger)
|
||||
|
||||
if _, err := gen.ValidateResourceWithPattern(logger, newResObj, sourceObj); err != nil {
|
||||
enqueueBool = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if enqueueBool {
|
||||
urName := resLabels["policy.kyverno.io/gr-name"]
|
||||
ur, err := h.urLister.Get(urName)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request", "name", urName)
|
||||
return
|
||||
}
|
||||
h.updateAnnotationInUR(ur, logger)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handlers) deleteGR(logger logr.Logger, engineResponse *response.EngineResponse) {
|
||||
logger.V(4).Info("querying all update requests")
|
||||
selector := labels.SelectorFromSet(labels.Set(map[string]string{
|
||||
urkyverno.URGeneratePolicyLabel: engineResponse.PolicyResponse.Policy.Name,
|
||||
"generate.kyverno.io/resource-name": engineResponse.PolicyResponse.Resource.Name,
|
||||
"generate.kyverno.io/resource-kind": engineResponse.PolicyResponse.Resource.Kind,
|
||||
"generate.kyverno.io/resource-namespace": engineResponse.PolicyResponse.Resource.Namespace,
|
||||
}))
|
||||
|
||||
urList, err := h.urLister.List(selector)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request for the resource", "kind", engineResponse.PolicyResponse.Resource.Kind, "name", engineResponse.PolicyResponse.Resource.Name, "namespace", engineResponse.PolicyResponse.Resource.Namespace)
|
||||
return
|
||||
}
|
||||
|
||||
for _, v := range urList {
|
||||
err := h.kyvernoClient.KyvernoV1beta1().UpdateRequests(config.KyvernoNamespace()).Delete(contextdefault.TODO(), v.GetName(), metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update ur")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// type updateRequestResponse struct {
|
||||
// ur urkyverno.UpdateRequestSpec
|
||||
// err error
|
||||
// }
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"reflect"
|
478
pkg/webhooks/resource/handlers.go
Normal file
478
pkg/webhooks/resource/handlers.go
Normal file
|
@ -0,0 +1,478 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
urlister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
engineutils2 "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"github.com/kyverno/kyverno/pkg/openapi"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/userinfo"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/utils/engine"
|
||||
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks"
|
||||
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
|
||||
"github.com/pkg/errors"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
corelister "k8s.io/client-go/listers/core/v1"
|
||||
rbaclister "k8s.io/client-go/listers/rbac/v1"
|
||||
)
|
||||
|
||||
type handlers struct {
|
||||
// clients
|
||||
client client.Interface
|
||||
kyvernoClient kyvernoclient.Interface
|
||||
|
||||
// config
|
||||
configuration config.Configuration
|
||||
promConfig *metrics.PromConfig
|
||||
|
||||
// cache
|
||||
pCache policycache.Cache
|
||||
|
||||
// listers
|
||||
nsLister corelister.NamespaceLister
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
urLister urlister.UpdateRequestNamespaceLister
|
||||
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
urGenerator webhookgenerate.Interface
|
||||
eventGen event.Interface
|
||||
auditHandler AuditHandler
|
||||
openAPIController *openapi.Controller
|
||||
mu sync.RWMutex
|
||||
}
|
||||
|
||||
func NewHandlers(
|
||||
client client.Interface,
|
||||
kyvernoClient kyvernoclient.Interface,
|
||||
configuration config.Configuration,
|
||||
promConfig *metrics.PromConfig,
|
||||
pCache policycache.Cache,
|
||||
nsLister corelister.NamespaceLister,
|
||||
rbLister rbaclister.RoleBindingLister,
|
||||
crbLister rbaclister.ClusterRoleBindingLister,
|
||||
urLister urlister.UpdateRequestNamespaceLister,
|
||||
prGenerator policyreport.GeneratorInterface,
|
||||
urGenerator webhookgenerate.Interface,
|
||||
eventGen event.Interface,
|
||||
auditHandler AuditHandler,
|
||||
openAPIController *openapi.Controller,
|
||||
) webhooks.Handlers {
|
||||
return &handlers{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
configuration: configuration,
|
||||
promConfig: promConfig,
|
||||
pCache: pCache,
|
||||
nsLister: nsLister,
|
||||
rbLister: rbLister,
|
||||
crbLister: crbLister,
|
||||
urLister: urLister,
|
||||
prGenerator: prGenerator,
|
||||
urGenerator: urGenerator,
|
||||
eventGen: eventGen,
|
||||
auditHandler: auditHandler,
|
||||
openAPIController: openAPIController,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *handlers) Validate(logger logr.Logger, request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
|
||||
if request.Operation == admissionv1.Delete {
|
||||
h.handleDelete(logger, request)
|
||||
}
|
||||
|
||||
if excludeKyvernoResources(request.Kind.Kind) {
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
|
||||
kind := request.Kind.Kind
|
||||
logger.V(4).Info("received an admission request in validating webhook", "kind", kind)
|
||||
|
||||
// timestamp at which this admission request got triggered
|
||||
requestTime := time.Now().Unix()
|
||||
policies := h.pCache.GetPolicies(policycache.ValidateEnforce, kind, request.Namespace)
|
||||
mutatePolicies := h.pCache.GetPolicies(policycache.Mutate, kind, request.Namespace)
|
||||
generatePolicies := h.pCache.GetPolicies(policycache.Generate, kind, request.Namespace)
|
||||
imageVerifyValidatePolicies := h.pCache.GetPolicies(policycache.VerifyImagesValidate, kind, request.Namespace)
|
||||
policies = append(policies, imageVerifyValidatePolicies...)
|
||||
|
||||
if len(policies) == 0 && len(mutatePolicies) == 0 && len(generatePolicies) == 0 {
|
||||
logger.V(4).Info("no policies matched admission request", "kind", kind)
|
||||
}
|
||||
|
||||
if len(generatePolicies) == 0 && request.Operation == admissionv1.Update {
|
||||
// handle generate source resource updates
|
||||
go h.handleUpdatesForGenerateRules(logger, request, []kyvernov1.PolicyInterface{})
|
||||
}
|
||||
|
||||
logger.V(4).Info("processing policies for validate admission request",
|
||||
"kind", kind, "validate", len(policies), "mutate", len(mutatePolicies), "generate", len(generatePolicies))
|
||||
|
||||
var roles, clusterRoles []string
|
||||
if containsRBACInfo(policies, generatePolicies) {
|
||||
var err error
|
||||
roles, clusterRoles, err = userinfo.GetRoleRef(h.rbLister, h.crbLister, request, h.configuration)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed to fetch RBAC data")
|
||||
}
|
||||
}
|
||||
|
||||
userRequestInfo := kyvernov1beta1.RequestInfo{
|
||||
Roles: roles,
|
||||
ClusterRoles: clusterRoles,
|
||||
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
|
||||
}
|
||||
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed create policy rule context")
|
||||
}
|
||||
|
||||
namespaceLabels := make(map[string]string)
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, logger)
|
||||
}
|
||||
|
||||
newResource, oldResource, err := utils.ExtractResources(nil, request)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed create parse resource")
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfos(&newResource); err != nil {
|
||||
return errorResponse(logger, err, "failed add image information to policy rule context")
|
||||
}
|
||||
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: newResource,
|
||||
OldResource: oldResource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: h.configuration.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: h.configuration.ToFilter,
|
||||
JSONContext: ctx,
|
||||
Client: h.client,
|
||||
AdmissionOperation: true,
|
||||
}
|
||||
|
||||
vh := &validationHandler{
|
||||
log: logger,
|
||||
eventGen: h.eventGen,
|
||||
prGenerator: h.prGenerator,
|
||||
}
|
||||
|
||||
ok, msg := vh.handleValidation(h.promConfig, request, policies, policyContext, namespaceLabels, requestTime)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return admissionutils.ResponseFailure(false, msg)
|
||||
}
|
||||
|
||||
// push admission request to audit handler, this won't block the admission request
|
||||
h.auditHandler.Add(request.DeepCopy())
|
||||
|
||||
go h.createUpdateRequests(logger, request, policyContext, generatePolicies, mutatePolicies, requestTime)
|
||||
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
|
||||
func (h *handlers) Mutate(logger logr.Logger, request *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse {
|
||||
if excludeKyvernoResources(request.Kind.Kind) {
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
if request.Operation == admissionv1.Delete {
|
||||
resource, err := utils.ConvertResource(request.OldObject.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
|
||||
if err == nil {
|
||||
h.prGenerator.Add(buildDeletionPrInfo(resource))
|
||||
} else {
|
||||
logger.Info(fmt.Sprintf("Converting oldObject failed: %v", err))
|
||||
}
|
||||
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
kind := request.Kind.Kind
|
||||
logger.V(4).Info("received an admission request in mutating webhook", "kind", kind)
|
||||
requestTime := time.Now().Unix()
|
||||
mutatePolicies := h.pCache.GetPolicies(policycache.Mutate, kind, request.Namespace)
|
||||
verifyImagesPolicies := h.pCache.GetPolicies(policycache.VerifyImagesMutate, kind, request.Namespace)
|
||||
if len(mutatePolicies) == 0 && len(verifyImagesPolicies) == 0 {
|
||||
logger.V(4).Info("no policies matched mutate admission request", "kind", kind)
|
||||
return admissionutils.ResponseSuccess(true, "")
|
||||
}
|
||||
logger.V(4).Info("processing policies for mutate admission request", "kind", kind, "mutatePolicies", len(mutatePolicies), "verifyImagesPolicies", len(verifyImagesPolicies))
|
||||
addRoles := containsRBACInfo(mutatePolicies)
|
||||
policyContext, err := h.buildPolicyContext(request, addRoles)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to build policy context")
|
||||
return admissionutils.ResponseFailure(false, err.Error())
|
||||
}
|
||||
// update container images to a canonical form
|
||||
if err := enginectx.MutateResourceWithImageInfo(request.Object.Raw, policyContext.JSONContext); err != nil {
|
||||
logger.Error(err, "failed to patch images info to resource, policies that mutate images may be impacted")
|
||||
}
|
||||
mutatePatches := h.applyMutatePolicies(logger, request, policyContext, mutatePolicies, requestTime)
|
||||
newRequest := patchRequest(mutatePatches, request, logger)
|
||||
imagePatches, err := h.applyImageVerifyPolicies(logger, newRequest, policyContext, verifyImagesPolicies)
|
||||
if err != nil {
|
||||
logger.Error(err, "image verification failed")
|
||||
return admissionutils.ResponseFailure(false, err.Error())
|
||||
}
|
||||
return admissionutils.ResponseSuccessWithPatch(true, "", append(mutatePatches, imagePatches...))
|
||||
}
|
||||
|
||||
func (h *handlers) buildPolicyContext(request *admissionv1.AdmissionRequest, addRoles bool) (*engine.PolicyContext, error) {
|
||||
userRequestInfo := kyvernov1beta1.RequestInfo{
|
||||
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
|
||||
}
|
||||
if addRoles {
|
||||
var err error
|
||||
userRequestInfo.Roles, userRequestInfo.ClusterRoles, err = userinfo.GetRoleRef(h.rbLister, h.crbLister, request, h.configuration)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to fetch RBAC information for request")
|
||||
}
|
||||
}
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create policy rule context")
|
||||
}
|
||||
resource, err := convertResource(request, request.Object.Raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := ctx.AddImageInfos(&resource); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add image information to the policy rule context")
|
||||
}
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: resource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: h.configuration.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: h.configuration.ToFilter,
|
||||
JSONContext: ctx,
|
||||
Client: h.client,
|
||||
AdmissionOperation: true,
|
||||
}
|
||||
if request.Operation == admissionv1.Update {
|
||||
policyContext.OldResource, err = convertResource(request, request.OldObject.Raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return policyContext, nil
|
||||
}
|
||||
|
||||
func (h *handlers) applyMutatePolicies(logger logr.Logger, request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, policies []kyvernov1.PolicyInterface, ts int64) []byte {
|
||||
mutatePatches, mutateEngineResponses := h.handleMutation(logger, request, policyContext, policies)
|
||||
logger.V(6).Info("", "generated patches", string(mutatePatches))
|
||||
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(ts, 0)))
|
||||
go h.registerAdmissionReviewDurationMetricMutate(logger, string(request.Operation), mutateEngineResponses, admissionReviewLatencyDuration)
|
||||
go h.registerAdmissionRequestsMetricMutate(logger, string(request.Operation), mutateEngineResponses)
|
||||
|
||||
return mutatePatches
|
||||
}
|
||||
|
||||
// handleMutation handles mutating webhook admission request
|
||||
// return value: generated patches, triggered policies, engine responses correspdonding to the triggered policies
|
||||
func (h *handlers) handleMutation(logger logr.Logger, request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, policies []kyvernov1.PolicyInterface) ([]byte, []*response.EngineResponse) {
|
||||
if len(policies) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
patchedResource := request.Object.Raw
|
||||
newR, oldR, err := utils.ExtractResources(patchedResource, request)
|
||||
if err != nil {
|
||||
// as resource cannot be parsed, we skip processing
|
||||
logger.Error(err, "failed to extract resource")
|
||||
return nil, nil
|
||||
}
|
||||
var deletionTimeStamp *metav1.Time
|
||||
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
|
||||
deletionTimeStamp = newR.GetDeletionTimestamp()
|
||||
} else {
|
||||
deletionTimeStamp = oldR.GetDeletionTimestamp()
|
||||
}
|
||||
|
||||
if deletionTimeStamp != nil && request.Operation == admissionv1.Update {
|
||||
return nil, nil
|
||||
}
|
||||
var patches [][]byte
|
||||
var engineResponses []*response.EngineResponse
|
||||
|
||||
for _, policy := range policies {
|
||||
spec := policy.GetSpec()
|
||||
if !spec.HasMutate() {
|
||||
continue
|
||||
}
|
||||
logger.V(3).Info("applying policy mutate rules", "policy", policy.GetName())
|
||||
policyContext.Policy = policy
|
||||
engineResponse, policyPatches, err := h.applyMutation(request, policyContext, logger)
|
||||
if err != nil {
|
||||
// TODO report errors in engineResponse and record in metrics
|
||||
logger.Error(err, "mutate error")
|
||||
continue
|
||||
}
|
||||
|
||||
if len(policyPatches) > 0 {
|
||||
patches = append(patches, policyPatches...)
|
||||
rules := engineResponse.GetSuccessRules()
|
||||
if len(rules) != 0 {
|
||||
logger.Info("mutation rules from policy applied successfully", "policy", policy.GetName(), "rules", rules)
|
||||
}
|
||||
}
|
||||
|
||||
policyContext.NewResource = engineResponse.PatchedResource
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
|
||||
// registering the kyverno_policy_results_total metric concurrently
|
||||
go h.registerPolicyResultsMetricMutation(logger, string(request.Operation), policy, *engineResponse)
|
||||
// registering the kyverno_policy_execution_duration_seconds metric concurrently
|
||||
go h.registerPolicyExecutionDurationMetricMutate(logger, string(request.Operation), policy, *engineResponse)
|
||||
}
|
||||
|
||||
// generate annotations
|
||||
if annPatches := utils.GenerateAnnotationPatches(engineResponses, logger); annPatches != nil {
|
||||
patches = append(patches, annPatches...)
|
||||
}
|
||||
|
||||
// REPORTING EVENTS
|
||||
// Scenario 1:
|
||||
// some/all policies failed to apply on the resource. a policy violation is generated.
|
||||
// create an event on the resource and the policy that failed
|
||||
// Scenario 2:
|
||||
// all policies were applied successfully.
|
||||
// create an event on the resource
|
||||
// ADD EVENTS
|
||||
events := generateEvents(engineResponses, false, logger)
|
||||
h.eventGen.Add(events...)
|
||||
|
||||
// debug info
|
||||
func() {
|
||||
if len(patches) != 0 {
|
||||
logger.V(4).Info("JSON patches generated")
|
||||
}
|
||||
|
||||
// if any of the policies fails, print out the error
|
||||
if !engineutils.IsResponseSuccessful(engineResponses) {
|
||||
logger.Error(errors.New(getErrorMsg(engineResponses)), "failed to apply mutation rules on the resource, reporting policy violation")
|
||||
}
|
||||
}()
|
||||
|
||||
// patches holds all the successful patches, if no patch is created, it returns nil
|
||||
return jsonutils.JoinPatches(patches...), engineResponses
|
||||
}
|
||||
|
||||
func (h *handlers) applyMutation(request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, logger logr.Logger) (*response.EngineResponse, [][]byte, error) {
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
policyContext.NamespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, logger)
|
||||
}
|
||||
|
||||
engineResponse := engine.Mutate(policyContext)
|
||||
policyPatches := engineResponse.GetPatches()
|
||||
|
||||
if !engineResponse.IsSuccessful() && len(engineResponse.GetFailedRules()) > 0 {
|
||||
return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy.GetName(), engineResponse.GetFailedRules())
|
||||
}
|
||||
|
||||
if engineResponse.PatchedResource.GetKind() != "*" {
|
||||
err := h.openAPIController.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetAPIVersion(), engineResponse.PatchedResource.GetKind())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to validate resource mutated by policy %s", policyContext.Policy.GetName())
|
||||
}
|
||||
}
|
||||
|
||||
return engineResponse, policyPatches, nil
|
||||
}
|
||||
|
||||
func (h *handlers) applyImageVerifyPolicies(logger logr.Logger, request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, policies []kyvernov1.PolicyInterface) ([]byte, error) {
|
||||
ok, message, imagePatches := h.handleVerifyImages(logger, request, policyContext, policies)
|
||||
if !ok {
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
logger.V(6).Info("images verified", "patches", string(imagePatches))
|
||||
return imagePatches, nil
|
||||
}
|
||||
|
||||
func (h *handlers) handleVerifyImages(logger logr.Logger, request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, policies []kyvernov1.PolicyInterface) (bool, string, []byte) {
|
||||
if len(policies) == 0 {
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
var patches [][]byte
|
||||
verifiedImageData := &engine.ImageVerificationMetadata{}
|
||||
for _, p := range policies {
|
||||
policyContext.Policy = p
|
||||
resp, ivm := engine.VerifyAndPatchImages(policyContext)
|
||||
|
||||
engineResponses = append(engineResponses, resp)
|
||||
patches = append(patches, resp.GetPatches()...)
|
||||
verifiedImageData.Merge(ivm)
|
||||
}
|
||||
|
||||
prInfos := policyreport.GeneratePRsFromEngineResponse(engineResponses, logger)
|
||||
h.prGenerator.Add(prInfos...)
|
||||
|
||||
blocked := toBlockResource(engineResponses, logger)
|
||||
events := generateEvents(engineResponses, blocked, logger)
|
||||
h.eventGen.Add(events...)
|
||||
|
||||
if blocked {
|
||||
logger.V(4).Info("resource blocked")
|
||||
return false, getEnforceFailureErrorMsg(engineResponses), nil
|
||||
}
|
||||
|
||||
if !verifiedImageData.IsEmpty() {
|
||||
hasAnnotations := hasAnnotations(policyContext)
|
||||
annotationPatches, err := verifiedImageData.Patches(hasAnnotations, logger)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create image verification annotation patches")
|
||||
} else {
|
||||
// add annotation patches first
|
||||
patches = append(annotationPatches, patches...)
|
||||
}
|
||||
}
|
||||
|
||||
return true, "", jsonutils.JoinPatches(patches...)
|
||||
}
|
||||
|
||||
func (h *handlers) handleDelete(logger logr.Logger, request *admissionv1.AdmissionRequest) {
|
||||
resource, err := engineutils2.ConvertToUnstructured(request.OldObject.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
}
|
||||
|
||||
resLabels := resource.GetLabels()
|
||||
if resLabels["app.kubernetes.io/managed-by"] == "kyverno" && request.Operation == admissionv1.Delete {
|
||||
urName := resLabels["policy.kyverno.io/gr-name"]
|
||||
ur, err := h.urLister.Get(urName)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get update request", "name", urName)
|
||||
return
|
||||
}
|
||||
|
||||
if ur.Spec.Type == kyvernov1beta1.Mutate {
|
||||
return
|
||||
}
|
||||
h.updateAnnotationInUR(ur, logger)
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -27,17 +27,17 @@ func registerMetric(logger logr.Logger, m string, requestOperation string, r rep
|
|||
|
||||
// ADMISSION REVIEW
|
||||
|
||||
func (ws *WebhookServer) registerAdmissionReviewDurationMetricMutate(logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64) {
|
||||
func (h *handlers) registerAdmissionReviewDurationMetricMutate(logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64) {
|
||||
registerMetric(logger, "kyverno_admission_review_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return admissionReviewDuration.ProcessEngineResponses(ws.promConfig, engineResponses, admissionReviewLatencyDuration, op)
|
||||
return admissionReviewDuration.ProcessEngineResponses(h.promConfig, engineResponses, admissionReviewLatencyDuration, op)
|
||||
})
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) registerAdmissionReviewDurationMetricGenerate(logger logr.Logger, requestOperation string, latencyReceiver *chan int64, engineResponsesReceiver *chan []*response.EngineResponse) {
|
||||
func (h *handlers) registerAdmissionReviewDurationMetricGenerate(logger logr.Logger, requestOperation string, latencyReceiver *chan int64, engineResponsesReceiver *chan []*response.EngineResponse) {
|
||||
defer close(*latencyReceiver)
|
||||
defer close(*engineResponsesReceiver)
|
||||
registerMetric(logger, "kyverno_admission_review_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return admissionReviewDuration.ProcessEngineResponses(ws.promConfig, <-(*engineResponsesReceiver), <-(*latencyReceiver), op)
|
||||
return admissionReviewDuration.ProcessEngineResponses(h.promConfig, <-(*engineResponsesReceiver), <-(*latencyReceiver), op)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -49,16 +49,16 @@ func registerAdmissionReviewDurationMetricValidate(logger logr.Logger, promConfi
|
|||
|
||||
// ADMISSION REQUEST
|
||||
|
||||
func (ws *WebhookServer) registerAdmissionRequestsMetricMutate(logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse) {
|
||||
func (h *handlers) registerAdmissionRequestsMetricMutate(logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse) {
|
||||
registerMetric(logger, "kyverno_admission_requests_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return admissionRequests.ProcessEngineResponses(ws.promConfig, engineResponses, op)
|
||||
return admissionRequests.ProcessEngineResponses(h.promConfig, engineResponses, op)
|
||||
})
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) registerAdmissionRequestsMetricGenerate(logger logr.Logger, requestOperation string, engineResponsesReceiver *chan []*response.EngineResponse) {
|
||||
func (h *handlers) registerAdmissionRequestsMetricGenerate(logger logr.Logger, requestOperation string, engineResponsesReceiver *chan []*response.EngineResponse) {
|
||||
defer close(*engineResponsesReceiver)
|
||||
registerMetric(logger, "kyverno_admission_requests_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return admissionRequests.ProcessEngineResponses(ws.promConfig, <-(*engineResponsesReceiver), op)
|
||||
return admissionRequests.ProcessEngineResponses(h.promConfig, <-(*engineResponsesReceiver), op)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -70,9 +70,9 @@ func registerAdmissionRequestsMetricValidate(logger logr.Logger, promConfig *met
|
|||
|
||||
// POLICY RESULTS
|
||||
|
||||
func (ws *WebhookServer) registerPolicyResultsMetricMutation(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
func (h *handlers) registerPolicyResultsMetricMutation(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
registerMetric(logger, "kyverno_policy_results_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return policyResults.ProcessEngineResponse(ws.promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
|
||||
return policyResults.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -82,17 +82,17 @@ func registerPolicyResultsMetricValidation(logger logr.Logger, promConfig *metri
|
|||
})
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) registerPolicyResultsMetricGeneration(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
func (h *handlers) registerPolicyResultsMetricGeneration(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
registerMetric(logger, "kyverno_policy_results_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return policyResults.ProcessEngineResponse(ws.promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
|
||||
return policyResults.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
|
||||
})
|
||||
}
|
||||
|
||||
// POLICY EXECUTION
|
||||
|
||||
func (ws *WebhookServer) registerPolicyExecutionDurationMetricMutate(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
func (h *handlers) registerPolicyExecutionDurationMetricMutate(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
registerMetric(logger, "kyverno_policy_execution_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return policyExecutionDuration.ProcessEngineResponse(ws.promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
|
||||
return policyExecutionDuration.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -102,8 +102,8 @@ func registerPolicyExecutionDurationMetricValidate(logger logr.Logger, promConfi
|
|||
})
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) registerPolicyExecutionDurationMetricGenerate(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
func (h *handlers) registerPolicyExecutionDurationMetricGenerate(logger logr.Logger, requestOperation string, policy kyverno.PolicyInterface, engineResponse response.EngineResponse) {
|
||||
registerMetric(logger, "kyverno_policy_execution_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
|
||||
return policyExecutionDuration.ProcessEngineResponse(ws.promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
|
||||
return policyExecutionDuration.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
|
||||
})
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"github.com/go-logr/logr"
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -15,20 +15,19 @@ import (
|
|||
)
|
||||
|
||||
// createUpdateRequests applies generate and mutateExisting policies, and creates update requests for background reconcile
|
||||
func (ws *WebhookServer) createUpdateRequests(request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, generatePolicies, mutatePolicies []kyverno.PolicyInterface, ts int64, logger logr.Logger) {
|
||||
func (h *handlers) createUpdateRequests(logger logr.Logger, request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, generatePolicies, mutatePolicies []kyverno.PolicyInterface, ts int64) {
|
||||
admissionReviewCompletionLatencyChannel := make(chan int64, 1)
|
||||
generateEngineResponsesSenderForAdmissionReviewDurationMetric := make(chan []*response.EngineResponse, 1)
|
||||
generateEngineResponsesSenderForAdmissionRequestsCountMetric := make(chan []*response.EngineResponse, 1)
|
||||
|
||||
go ws.handleMutateExisting(request, mutatePolicies, policyContext, ts)
|
||||
go ws.handleGenerate(request, generatePolicies, policyContext, ts, &admissionReviewCompletionLatencyChannel, &generateEngineResponsesSenderForAdmissionReviewDurationMetric, &generateEngineResponsesSenderForAdmissionRequestsCountMetric)
|
||||
go h.handleMutateExisting(logger, request, mutatePolicies, policyContext, ts)
|
||||
go h.handleGenerate(logger, request, generatePolicies, policyContext, ts, &admissionReviewCompletionLatencyChannel, &generateEngineResponsesSenderForAdmissionReviewDurationMetric, &generateEngineResponsesSenderForAdmissionRequestsCountMetric)
|
||||
|
||||
go ws.registerAdmissionReviewDurationMetricGenerate(logger, string(request.Operation), &admissionReviewCompletionLatencyChannel, &generateEngineResponsesSenderForAdmissionReviewDurationMetric)
|
||||
go ws.registerAdmissionRequestsMetricGenerate(logger, string(request.Operation), &generateEngineResponsesSenderForAdmissionRequestsCountMetric)
|
||||
go h.registerAdmissionReviewDurationMetricGenerate(logger, string(request.Operation), &admissionReviewCompletionLatencyChannel, &generateEngineResponsesSenderForAdmissionReviewDurationMetric)
|
||||
go h.registerAdmissionRequestsMetricGenerate(logger, string(request.Operation), &generateEngineResponsesSenderForAdmissionRequestsCountMetric)
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) handleMutateExisting(request *admissionv1.AdmissionRequest, policies []kyverno.PolicyInterface, policyContext *engine.PolicyContext, admissionRequestTimestamp int64) {
|
||||
logger := ws.log.WithValues("action", "mutateExisting", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
func (h *handlers) handleMutateExisting(logger logr.Logger, request *admissionv1.AdmissionRequest, policies []kyverno.PolicyInterface, policyContext *engine.PolicyContext, admissionRequestTimestamp int64) {
|
||||
logger.V(4).Info("update request")
|
||||
|
||||
if request.Operation == admissionv1.Delete {
|
||||
|
@ -62,21 +61,20 @@ func (ws *WebhookServer) handleMutateExisting(request *admissionv1.AdmissionRequ
|
|||
}
|
||||
|
||||
// registering the kyverno_policy_results_total metric concurrently
|
||||
go ws.registerPolicyResultsMetricMutation(logger, string(request.Operation), policy, *engineResponse)
|
||||
|
||||
go h.registerPolicyResultsMetricMutation(logger, string(request.Operation), policy, *engineResponse)
|
||||
// registering the kyverno_policy_execution_duration_seconds metric concurrently
|
||||
go ws.registerPolicyExecutionDurationMetricMutate(logger, string(request.Operation), policy, *engineResponse)
|
||||
go h.registerPolicyExecutionDurationMetricMutate(logger, string(request.Operation), policy, *engineResponse)
|
||||
}
|
||||
|
||||
if failedResponse := applyUpdateRequest(request, urkyverno.Mutate, ws.urGenerator, policyContext.AdmissionInfo, request.Operation, engineResponses...); failedResponse != nil {
|
||||
if failedResponse := applyUpdateRequest(request, urkyverno.Mutate, h.urGenerator, policyContext.AdmissionInfo, request.Operation, engineResponses...); failedResponse != nil {
|
||||
for _, failedUR := range failedResponse {
|
||||
err := fmt.Errorf("failed to create update request: %v", failedUR.err)
|
||||
events := event.NewBackgroundFailedEvent(err, failedUR.ur.Policy, "", event.GeneratePolicyController, &policyContext.NewResource)
|
||||
ws.eventGen.Add(events...)
|
||||
h.eventGen.Add(events...)
|
||||
}
|
||||
}
|
||||
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
|
||||
go ws.registerAdmissionReviewDurationMetricMutate(logger, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
|
||||
go ws.registerAdmissionRequestsMetricMutate(logger, string(request.Operation), engineResponses)
|
||||
go h.registerAdmissionReviewDurationMetricMutate(logger, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
|
||||
go h.registerAdmissionRequestsMetricMutate(logger, string(request.Operation), engineResponses)
|
||||
}
|
363
pkg/webhooks/resource/utils.go
Normal file
363
pkg/webhooks/resource/utils.go
Normal file
|
@ -0,0 +1,363 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/gardener/controller-manager-library/pkg/logger"
|
||||
"github.com/go-logr/logr"
|
||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
kyvernov1beta1 "github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/autogen"
|
||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"github.com/kyverno/kyverno/pkg/engine/variables"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
engineutils2 "github.com/kyverno/kyverno/pkg/utils/engine"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
|
||||
"github.com/pkg/errors"
|
||||
yamlv2 "gopkg.in/yaml.v2"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
type updateRequestResponse struct {
|
||||
ur kyvernov1beta1.UpdateRequestSpec
|
||||
err error
|
||||
}
|
||||
|
||||
func excludeKyvernoResources(kind string) bool {
|
||||
switch kind {
|
||||
case "ClusterPolicyReport":
|
||||
return true
|
||||
case "PolicyReport":
|
||||
return true
|
||||
case "ReportChangeRequest":
|
||||
return true
|
||||
case "GenerateRequest":
|
||||
return true
|
||||
case "ClusterReportChangeRequest":
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func errorResponse(logger logr.Logger, err error, message string) *admissionv1.AdmissionResponse {
|
||||
logger.Error(err, message)
|
||||
return admissionutils.ResponseFailure(false, message+": "+err.Error())
|
||||
}
|
||||
|
||||
func patchRequest(patches []byte, request *admissionv1.AdmissionRequest, logger logr.Logger) *admissionv1.AdmissionRequest {
|
||||
patchedResource := processResourceWithPatches(patches, request.Object.Raw, logger)
|
||||
newRequest := request.DeepCopy()
|
||||
newRequest.Object.Raw = patchedResource
|
||||
return newRequest
|
||||
}
|
||||
|
||||
func processResourceWithPatches(patch []byte, resource []byte, log logr.Logger) []byte {
|
||||
if patch == nil {
|
||||
return resource
|
||||
}
|
||||
resource, err := engineutils.ApplyPatchNew(resource, patch)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to patch resource:", "patch", string(patch), "resource", string(resource))
|
||||
return nil
|
||||
}
|
||||
log.V(6).Info("", "patchedResource", string(resource))
|
||||
return resource
|
||||
}
|
||||
|
||||
func newVariablesContext(request *admissionv1.AdmissionRequest, userRequestInfo *kyvernov1beta1.RequestInfo) (enginectx.Interface, error) {
|
||||
ctx := enginectx.NewContext()
|
||||
if err := ctx.AddRequest(request); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load incoming request in context")
|
||||
}
|
||||
if err := ctx.AddUserInfo(*userRequestInfo); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load userInfo in context")
|
||||
}
|
||||
if err := ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to load service account in context")
|
||||
}
|
||||
return ctx, nil
|
||||
}
|
||||
|
||||
func containsRBACInfo(policies ...[]kyvernov1.PolicyInterface) bool {
|
||||
for _, policySlice := range policies {
|
||||
for _, policy := range policySlice {
|
||||
for _, rule := range autogen.ComputeRules(policy) {
|
||||
if checkForRBACInfo(rule) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func checkForRBACInfo(rule kyvernov1.Rule) bool {
|
||||
if len(rule.MatchResources.Roles) > 0 || len(rule.MatchResources.ClusterRoles) > 0 || len(rule.ExcludeResources.Roles) > 0 || len(rule.ExcludeResources.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
if len(rule.MatchResources.All) > 0 {
|
||||
for _, rf := range rule.MatchResources.All {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rule.MatchResources.Any) > 0 {
|
||||
for _, rf := range rule.MatchResources.Any {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rule.ExcludeResources.All) > 0 {
|
||||
for _, rf := range rule.ExcludeResources.All {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(rule.ExcludeResources.Any) > 0 {
|
||||
for _, rf := range rule.ExcludeResources.Any {
|
||||
if len(rf.UserInfo.Roles) > 0 || len(rf.UserInfo.ClusterRoles) > 0 {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func buildDeletionPrInfo(oldR unstructured.Unstructured) policyreport.Info {
|
||||
return policyreport.Info{
|
||||
Namespace: oldR.GetNamespace(),
|
||||
Results: []policyreport.EngineResponseResult{
|
||||
{Resource: response.ResourceSpec{
|
||||
Kind: oldR.GetKind(),
|
||||
APIVersion: oldR.GetAPIVersion(),
|
||||
Namespace: oldR.GetNamespace(),
|
||||
Name: oldR.GetName(),
|
||||
UID: string(oldR.GetUID()),
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func convertResource(request *admissionv1.AdmissionRequest, resourceRaw []byte) (unstructured.Unstructured, error) {
|
||||
resource, err := utils.ConvertResource(resourceRaw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, errors.Wrap(err, "failed to convert raw resource to unstructured format")
|
||||
}
|
||||
if request.Kind.Kind == "Secret" && request.Operation == admissionv1.Update {
|
||||
resource, err = utils.NormalizeSecret(&resource)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, errors.Wrap(err, "failed to convert secret to unstructured format")
|
||||
}
|
||||
}
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
// returns true -> if there is even one policy that blocks resource request
|
||||
// returns false -> if all the policies are meant to report only, we dont block resource request
|
||||
func toBlockResource(engineReponses []*response.EngineResponse, log logr.Logger) bool {
|
||||
for _, er := range engineReponses {
|
||||
if engineutils2.CheckEngineResponse(er) {
|
||||
log.Info("spec.ValidationFailureAction set to enforce, blocking resource request", "policy", er.PolicyResponse.Policy.Name)
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
log.V(4).Info("spec.ValidationFailureAction set to audit for all applicable policies, won't block resource operation")
|
||||
return false
|
||||
}
|
||||
|
||||
// getEnforceFailureErrorMsg gets the error messages for failed enforce policy
|
||||
func getEnforceFailureErrorMsg(engineResponses []*response.EngineResponse) string {
|
||||
policyToRule := make(map[string]interface{})
|
||||
var resourceName string
|
||||
for _, er := range engineResponses {
|
||||
if engineutils2.CheckEngineResponse(er) {
|
||||
ruleToReason := make(map[string]string)
|
||||
for _, rule := range er.PolicyResponse.Rules {
|
||||
if rule.Status != response.RuleStatusPass {
|
||||
ruleToReason[rule.Name] = rule.Message
|
||||
}
|
||||
}
|
||||
resourceName = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||
policyToRule[er.PolicyResponse.Policy.Name] = ruleToReason
|
||||
}
|
||||
}
|
||||
result, _ := yamlv2.Marshal(policyToRule)
|
||||
return "\n\nresource " + resourceName + " was blocked due to the following policies\n\n" + string(result)
|
||||
}
|
||||
|
||||
func getErrorMsg(engineReponses []*response.EngineResponse) string {
|
||||
var str []string
|
||||
var resourceInfo string
|
||||
for _, er := range engineReponses {
|
||||
if !er.IsSuccessful() {
|
||||
// resource in engineReponses is identical as this was called per admission request
|
||||
resourceInfo = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||
str = append(str, fmt.Sprintf("failed policy %s:", er.PolicyResponse.Policy.Name))
|
||||
for _, rule := range er.PolicyResponse.Rules {
|
||||
if rule.Status != response.RuleStatusPass {
|
||||
str = append(str, rule.ToString())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("Resource %s %s", resourceInfo, strings.Join(str, ";"))
|
||||
}
|
||||
|
||||
func hasAnnotations(context *engine.PolicyContext) bool {
|
||||
annotations := context.NewResource.GetAnnotations()
|
||||
return len(annotations) != 0
|
||||
}
|
||||
|
||||
func getGeneratedByResource(newRes *unstructured.Unstructured, resLabels map[string]string, client client.Interface, rule kyvernov1.Rule, logger logr.Logger) (kyvernov1.Rule, error) {
|
||||
var apiVersion, kind, name, namespace string
|
||||
sourceRequest := &admissionv1.AdmissionRequest{}
|
||||
kind = resLabels["kyverno.io/generated-by-kind"]
|
||||
name = resLabels["kyverno.io/generated-by-name"]
|
||||
if kind != "Namespace" {
|
||||
namespace = resLabels["kyverno.io/generated-by-namespace"]
|
||||
}
|
||||
obj, err := client.GetResource(apiVersion, kind, namespace, name)
|
||||
if err != nil {
|
||||
logger.Error(err, "source resource not found.")
|
||||
return rule, err
|
||||
}
|
||||
rawObj, err := json.Marshal(obj)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to marshal resource")
|
||||
return rule, err
|
||||
}
|
||||
sourceRequest.Object.Raw = rawObj
|
||||
sourceRequest.Operation = "CREATE"
|
||||
ctx := enginectx.NewContext()
|
||||
if err := ctx.AddRequest(sourceRequest); err != nil {
|
||||
logger.Error(err, "failed to load incoming request in context")
|
||||
return rule, err
|
||||
}
|
||||
if rule, err = variables.SubstituteAllInRule(logger, ctx, rule); err != nil {
|
||||
logger.Error(err, "variable substitution failed for rule %s", rule.Name)
|
||||
return rule, err
|
||||
}
|
||||
return rule, nil
|
||||
}
|
||||
|
||||
//stripNonPolicyFields - remove feilds which get updated with each request by kyverno and are non policy fields
|
||||
func stripNonPolicyFields(obj, newRes map[string]interface{}, logger logr.Logger) (map[string]interface{}, map[string]interface{}) {
|
||||
if metadata, found := obj["metadata"]; found {
|
||||
requiredMetadataInObj := make(map[string]interface{})
|
||||
if annotations, found := metadata.(map[string]interface{})["annotations"]; found {
|
||||
delete(annotations.(map[string]interface{}), "kubectl.kubernetes.io/last-applied-configuration")
|
||||
requiredMetadataInObj["annotations"] = annotations
|
||||
}
|
||||
|
||||
if labels, found := metadata.(map[string]interface{})["labels"]; found {
|
||||
delete(labels.(map[string]interface{}), "generate.kyverno.io/clone-policy-name")
|
||||
requiredMetadataInObj["labels"] = labels
|
||||
}
|
||||
obj["metadata"] = requiredMetadataInObj
|
||||
}
|
||||
|
||||
if metadata, found := newRes["metadata"]; found {
|
||||
requiredMetadataInNewRes := make(map[string]interface{})
|
||||
if annotations, found := metadata.(map[string]interface{})["annotations"]; found {
|
||||
requiredMetadataInNewRes["annotations"] = annotations
|
||||
}
|
||||
|
||||
if labels, found := metadata.(map[string]interface{})["labels"]; found {
|
||||
requiredMetadataInNewRes["labels"] = labels
|
||||
}
|
||||
newRes["metadata"] = requiredMetadataInNewRes
|
||||
}
|
||||
|
||||
delete(obj, "status")
|
||||
|
||||
if _, found := obj["spec"]; found {
|
||||
delete(obj["spec"].(map[string]interface{}), "tolerations")
|
||||
}
|
||||
|
||||
if dataMap, found := obj["data"]; found {
|
||||
keyInData := make([]string, 0)
|
||||
switch dataMap := dataMap.(type) {
|
||||
case map[string]interface{}:
|
||||
for k := range dataMap {
|
||||
keyInData = append(keyInData, k)
|
||||
}
|
||||
}
|
||||
|
||||
if len(keyInData) > 0 {
|
||||
for _, dataKey := range keyInData {
|
||||
originalResourceData := dataMap.(map[string]interface{})[dataKey]
|
||||
replaceData := strings.Replace(originalResourceData.(string), "\n", "", -1)
|
||||
dataMap.(map[string]interface{})[dataKey] = replaceData
|
||||
|
||||
newResourceData := newRes["data"].(map[string]interface{})[dataKey]
|
||||
replacenewResourceData := strings.Replace(newResourceData.(string), "\n", "", -1)
|
||||
newRes["data"].(map[string]interface{})[dataKey] = replacenewResourceData
|
||||
}
|
||||
} else {
|
||||
logger.V(4).Info("data is not of type map[string]interface{}")
|
||||
}
|
||||
}
|
||||
|
||||
return obj, newRes
|
||||
}
|
||||
|
||||
func applyUpdateRequest(request *admissionv1.AdmissionRequest, ruleType kyvernov1beta1.RequestType, grGenerator updaterequest.Interface, userRequestInfo kyvernov1beta1.RequestInfo,
|
||||
action admissionv1.Operation, engineResponses ...*response.EngineResponse) (failedUpdateRequest []updateRequestResponse) {
|
||||
requestBytes, err := json.Marshal(request)
|
||||
if err != nil {
|
||||
logger.Error(err, "error loading request into context")
|
||||
}
|
||||
admissionRequestInfo := kyvernov1beta1.AdmissionRequestInfoObject{
|
||||
AdmissionRequest: string(requestBytes),
|
||||
Operation: action,
|
||||
}
|
||||
|
||||
for _, er := range engineResponses {
|
||||
ur := transform(admissionRequestInfo, userRequestInfo, er, ruleType)
|
||||
if err := grGenerator.Apply(ur, action); err != nil {
|
||||
failedUpdateRequest = append(failedUpdateRequest, updateRequestResponse{ur: ur, err: err})
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func transform(admissionRequestInfo kyvernov1beta1.AdmissionRequestInfoObject, userRequestInfo kyvernov1beta1.RequestInfo, er *response.EngineResponse, ruleType kyvernov1beta1.RequestType) kyvernov1beta1.UpdateRequestSpec {
|
||||
var PolicyNameNamespaceKey string
|
||||
if er.PolicyResponse.Policy.Namespace != "" {
|
||||
PolicyNameNamespaceKey = er.PolicyResponse.Policy.Namespace + "/" + er.PolicyResponse.Policy.Name
|
||||
} else {
|
||||
PolicyNameNamespaceKey = er.PolicyResponse.Policy.Name
|
||||
}
|
||||
|
||||
ur := kyvernov1beta1.UpdateRequestSpec{
|
||||
Type: ruleType,
|
||||
Policy: PolicyNameNamespaceKey,
|
||||
Resource: kyvernov1.ResourceSpec{
|
||||
Kind: er.PolicyResponse.Resource.Kind,
|
||||
Namespace: er.PolicyResponse.Resource.Namespace,
|
||||
Name: er.PolicyResponse.Resource.Name,
|
||||
APIVersion: er.PolicyResponse.Resource.APIVersion,
|
||||
},
|
||||
Context: kyvernov1beta1.UpdateRequestSpecContext{
|
||||
UserRequestInfo: userRequestInfo,
|
||||
AdmissionRequestInfo: admissionRequestInfo,
|
||||
},
|
||||
}
|
||||
|
||||
return ur
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"strings"
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
|
@ -131,23 +131,8 @@ func (v *validationHandler) handleValidation(
|
|||
//registering the kyverno_admission_review_duration_seconds metric concurrently
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
|
||||
go registerAdmissionReviewDurationMetricValidate(logger, promConfig, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
|
||||
|
||||
//registering the kyverno_admission_requests_total metric concurrently
|
||||
go registerAdmissionRequestsMetricValidate(logger, promConfig, string(request.Operation), engineResponses)
|
||||
|
||||
return true, ""
|
||||
}
|
||||
|
||||
func buildDeletionPrInfo(oldR unstructured.Unstructured) policyreport.Info {
|
||||
return policyreport.Info{
|
||||
Namespace: oldR.GetNamespace(),
|
||||
Results: []policyreport.EngineResponseResult{
|
||||
{Resource: response.ResourceSpec{
|
||||
Kind: oldR.GetKind(),
|
||||
APIVersion: oldR.GetAPIVersion(),
|
||||
Namespace: oldR.GetNamespace(),
|
||||
Name: oldR.GetName(),
|
||||
UID: string(oldR.GetUID()),
|
||||
}},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package webhooks
|
||||
package resource
|
||||
|
||||
import (
|
||||
"encoding/json"
|
|
@ -4,40 +4,23 @@ import (
|
|||
"context"
|
||||
"crypto/tls"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/kyverno/kyverno/api/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/background"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
|
||||
urinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1beta1"
|
||||
urlister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1beta1"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"github.com/kyverno/kyverno/pkg/openapi"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/userinfo"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
"github.com/kyverno/kyverno/pkg/webhookconfig"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
|
||||
"github.com/pkg/errors"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
informers "k8s.io/client-go/informers/core/v1"
|
||||
rbacinformer "k8s.io/client-go/informers/rbac/v1"
|
||||
listerv1 "k8s.io/client-go/listers/core/v1"
|
||||
rbaclister "k8s.io/client-go/listers/rbac/v1"
|
||||
)
|
||||
|
||||
type Server interface {
|
||||
// Run TLS server in separate thread and returns control immediately
|
||||
Run(<-chan struct{})
|
||||
// Stop TLS server and returns control after the server is shut down
|
||||
Stop(context.Context)
|
||||
}
|
||||
|
||||
type Handlers interface {
|
||||
// Mutate performs the mutation of policy resources
|
||||
Mutate(logr.Logger, *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse
|
||||
|
@ -45,234 +28,87 @@ type Handlers interface {
|
|||
Validate(logr.Logger, *admissionv1.AdmissionRequest) *admissionv1.AdmissionResponse
|
||||
}
|
||||
|
||||
// WebhookServer contains configured TLS server with MutationWebhook.
|
||||
type WebhookServer struct {
|
||||
server *http.Server
|
||||
|
||||
// clients
|
||||
client client.Interface
|
||||
kyvernoClient kyvernoclient.Interface
|
||||
|
||||
// listers
|
||||
urLister urlister.UpdateRequestNamespaceLister
|
||||
rbLister rbaclister.RoleBindingLister
|
||||
rLister rbaclister.RoleLister
|
||||
crLister rbaclister.ClusterRoleLister
|
||||
crbLister rbaclister.ClusterRoleBindingLister
|
||||
nsLister listerv1.NamespaceLister
|
||||
|
||||
// generate events
|
||||
eventGen event.Interface
|
||||
|
||||
// policy cache
|
||||
pCache policycache.Cache
|
||||
|
||||
// webhook registration client
|
||||
type server struct {
|
||||
server *http.Server
|
||||
webhookRegister *webhookconfig.Register
|
||||
|
||||
// helpers to validate against current loaded configuration
|
||||
configuration config.Configuration
|
||||
|
||||
// channel for cleanup notification
|
||||
cleanUp chan<- struct{}
|
||||
|
||||
// last request time
|
||||
webhookMonitor *webhookconfig.Monitor
|
||||
|
||||
// policy report generator
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
|
||||
// update request generator
|
||||
urGenerator webhookgenerate.Interface
|
||||
|
||||
auditHandler AuditHandler
|
||||
|
||||
log logr.Logger
|
||||
|
||||
openAPIController *openapi.Controller
|
||||
|
||||
urController *background.Controller
|
||||
|
||||
promConfig *metrics.PromConfig
|
||||
|
||||
mu sync.RWMutex
|
||||
cleanUp chan<- struct{}
|
||||
}
|
||||
|
||||
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
|
||||
// Policy Controller and Kubernetes Client should be initialized in configuration
|
||||
func NewWebhookServer(
|
||||
type TlsProvider func() ([]byte, []byte, error)
|
||||
|
||||
// NewServer creates new instance of server accordingly to given configuration
|
||||
func NewServer(
|
||||
policyHandlers Handlers,
|
||||
kyvernoClient kyvernoclient.Interface,
|
||||
client client.Interface,
|
||||
tlsPair func() ([]byte, []byte, error),
|
||||
urInformer urinformer.UpdateRequestInformer,
|
||||
pInformer kyvernoinformer.ClusterPolicyInformer,
|
||||
rbInformer rbacinformer.RoleBindingInformer,
|
||||
crbInformer rbacinformer.ClusterRoleBindingInformer,
|
||||
rInformer rbacinformer.RoleInformer,
|
||||
crInformer rbacinformer.ClusterRoleInformer,
|
||||
namespace informers.NamespaceInformer,
|
||||
eventGen event.Interface,
|
||||
pCache policycache.Cache,
|
||||
webhookRegistrationClient *webhookconfig.Register,
|
||||
webhookMonitor *webhookconfig.Monitor,
|
||||
configHandler config.Configuration,
|
||||
prGenerator policyreport.GeneratorInterface,
|
||||
urGenerator webhookgenerate.Interface,
|
||||
auditHandler AuditHandler,
|
||||
resourceHandlers Handlers,
|
||||
tlsProvider TlsProvider,
|
||||
configuration config.Configuration,
|
||||
register *webhookconfig.Register,
|
||||
monitor *webhookconfig.Monitor,
|
||||
cleanUp chan<- struct{},
|
||||
log logr.Logger,
|
||||
openAPIController *openapi.Controller,
|
||||
urc *background.Controller,
|
||||
promConfig *metrics.PromConfig,
|
||||
) (*WebhookServer, error) {
|
||||
if tlsPair == nil {
|
||||
return nil, errors.New("NewWebhookServer is not initialized properly")
|
||||
}
|
||||
ws := &WebhookServer{
|
||||
client: client,
|
||||
kyvernoClient: kyvernoClient,
|
||||
urLister: urInformer.Lister().UpdateRequests(config.KyvernoNamespace()),
|
||||
rbLister: rbInformer.Lister(),
|
||||
rLister: rInformer.Lister(),
|
||||
nsLister: namespace.Lister(),
|
||||
crbLister: crbInformer.Lister(),
|
||||
crLister: crInformer.Lister(),
|
||||
eventGen: eventGen,
|
||||
pCache: pCache,
|
||||
webhookRegister: webhookRegistrationClient,
|
||||
configuration: configHandler,
|
||||
cleanUp: cleanUp,
|
||||
webhookMonitor: webhookMonitor,
|
||||
prGenerator: prGenerator,
|
||||
urGenerator: urGenerator,
|
||||
urController: urc,
|
||||
auditHandler: auditHandler,
|
||||
log: log,
|
||||
openAPIController: openAPIController,
|
||||
promConfig: promConfig,
|
||||
}
|
||||
) Server {
|
||||
mux := httprouter.New()
|
||||
resourceLogger := ws.log.WithName("resource")
|
||||
policyLogger := ws.log.WithName("policy")
|
||||
verifyLogger := ws.log.WithName("verify")
|
||||
mux.HandlerFunc("POST", config.MutatingWebhookServicePath, ws.admissionHandler(resourceLogger.WithName("mutate"), true, ws.resourceMutation))
|
||||
mux.HandlerFunc("POST", config.ValidatingWebhookServicePath, ws.admissionHandler(resourceLogger.WithName("validate"), true, ws.resourceValidation))
|
||||
mux.HandlerFunc("POST", config.PolicyMutatingWebhookServicePath, ws.admissionHandler(policyLogger.WithName("mutate"), true, policyHandlers.Mutate))
|
||||
mux.HandlerFunc("POST", config.PolicyValidatingWebhookServicePath, ws.admissionHandler(policyLogger.WithName("validate"), true, policyHandlers.Validate))
|
||||
mux.HandlerFunc("POST", config.VerifyMutatingWebhookServicePath, ws.admissionHandler(verifyLogger.WithName("mutate"), false, handlers.Verify(ws.webhookMonitor, ws.log.WithName("verifyHandler"))))
|
||||
mux.HandlerFunc("GET", config.LivenessServicePath, handlers.Probe(ws.webhookRegister.Check))
|
||||
resourceLogger := logger.WithName("resource")
|
||||
policyLogger := logger.WithName("policy")
|
||||
verifyLogger := logger.WithName("verify")
|
||||
mux.HandlerFunc("POST", config.MutatingWebhookServicePath, admission(resourceLogger.WithName("mutate"), monitor, filter(configuration, resourceHandlers.Mutate)))
|
||||
mux.HandlerFunc("POST", config.ValidatingWebhookServicePath, admission(resourceLogger.WithName("validate"), monitor, filter(configuration, resourceHandlers.Validate)))
|
||||
mux.HandlerFunc("POST", config.PolicyMutatingWebhookServicePath, admission(policyLogger.WithName("mutate"), monitor, filter(configuration, policyHandlers.Mutate)))
|
||||
mux.HandlerFunc("POST", config.PolicyValidatingWebhookServicePath, admission(policyLogger.WithName("validate"), monitor, filter(configuration, policyHandlers.Validate)))
|
||||
mux.HandlerFunc("POST", config.VerifyMutatingWebhookServicePath, admission(verifyLogger.WithName("mutate"), monitor, handlers.Verify(monitor)))
|
||||
mux.HandlerFunc("GET", config.LivenessServicePath, handlers.Probe(register.Check))
|
||||
mux.HandlerFunc("GET", config.ReadinessServicePath, handlers.Probe(nil))
|
||||
ws.server = &http.Server{
|
||||
Addr: ":9443", // Listen on port for HTTPS requests
|
||||
TLSConfig: &tls.Config{
|
||||
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certPem, keyPem, err := tlsPair()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pair, err := tls.X509KeyPair(certPem, keyPem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pair, nil
|
||||
return &server{
|
||||
server: &http.Server{
|
||||
Addr: ":9443",
|
||||
TLSConfig: &tls.Config{
|
||||
GetCertificate: func(*tls.ClientHelloInfo) (*tls.Certificate, error) {
|
||||
certPem, keyPem, err := tlsProvider()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pair, err := tls.X509KeyPair(certPem, keyPem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pair, nil
|
||||
},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
},
|
||||
MinVersion: tls.VersionTLS12,
|
||||
Handler: mux,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 30 * time.Second,
|
||||
},
|
||||
Handler: mux,
|
||||
ReadTimeout: 30 * time.Second,
|
||||
WriteTimeout: 30 * time.Second,
|
||||
webhookRegister: register,
|
||||
cleanUp: cleanUp,
|
||||
}
|
||||
return ws, nil
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) buildPolicyContext(request *admissionv1.AdmissionRequest, addRoles bool) (*engine.PolicyContext, error) {
|
||||
userRequestInfo := v1beta1.RequestInfo{
|
||||
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
|
||||
}
|
||||
|
||||
if addRoles {
|
||||
var err error
|
||||
userRequestInfo.Roles, userRequestInfo.ClusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configuration)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to fetch RBAC information for request")
|
||||
}
|
||||
}
|
||||
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create policy rule context")
|
||||
}
|
||||
|
||||
resource, err := convertResource(request, request.Object.Raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfos(&resource); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to add image information to the policy rule context")
|
||||
}
|
||||
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: resource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: ws.configuration.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: ws.configuration.ToFilter,
|
||||
JSONContext: ctx,
|
||||
Client: ws.client,
|
||||
AdmissionOperation: true,
|
||||
}
|
||||
|
||||
if request.Operation == admissionv1.Update {
|
||||
policyContext.OldResource, err = convertResource(request, request.OldObject.Raw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return policyContext, nil
|
||||
}
|
||||
|
||||
// convertResource converts RAW to unstructured
|
||||
func convertResource(request *admissionv1.AdmissionRequest, resourceRaw []byte) (unstructured.Unstructured, error) {
|
||||
resource, err := utils.ConvertResource(resourceRaw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, errors.Wrap(err, "failed to convert raw resource to unstructured format")
|
||||
}
|
||||
|
||||
if request.Kind.Kind == "Secret" && request.Operation == admissionv1.Update {
|
||||
resource, err = utils.NormalizeSecret(&resource)
|
||||
if err != nil {
|
||||
return unstructured.Unstructured{}, errors.Wrap(err, "failed to convert secret to unstructured format")
|
||||
}
|
||||
}
|
||||
|
||||
return resource, nil
|
||||
}
|
||||
|
||||
// RunAsync TLS server in separate thread and returns control immediately
|
||||
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
|
||||
func (s *server) Run(stopCh <-chan struct{}) {
|
||||
go func() {
|
||||
ws.log.V(3).Info("started serving requests", "addr", ws.server.Addr)
|
||||
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
|
||||
ws.log.Error(err, "failed to listen to requests")
|
||||
logger.V(3).Info("started serving requests", "addr", s.server.Addr)
|
||||
if err := s.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
|
||||
logger.Error(err, "failed to listen to requests")
|
||||
}
|
||||
}()
|
||||
ws.log.Info("starting service")
|
||||
logger.Info("starting service")
|
||||
}
|
||||
|
||||
// Stop TLS server and returns control after the server is shut down
|
||||
func (ws *WebhookServer) Stop(ctx context.Context) {
|
||||
// remove the static webhook configurations
|
||||
go ws.webhookRegister.Remove(ws.cleanUp)
|
||||
// shutdown http.Server with context timeout
|
||||
err := ws.server.Shutdown(ctx)
|
||||
func (s *server) Stop(ctx context.Context) {
|
||||
go s.webhookRegister.Remove(s.cleanUp)
|
||||
err := s.server.Shutdown(ctx)
|
||||
if err != nil {
|
||||
// Error from closing listeners, or context timeout:
|
||||
ws.log.Error(err, "shutting down server")
|
||||
err = ws.server.Close()
|
||||
logger.Error(err, "shutting down server")
|
||||
err = s.server.Close()
|
||||
if err != nil {
|
||||
ws.log.Error(err, "server shut down failed")
|
||||
logger.Error(err, "server shut down failed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func filter(configuration config.Configuration, inner handlers.AdmissionHandler) handlers.AdmissionHandler {
|
||||
return handlers.Filter(configuration, inner)
|
||||
}
|
||||
|
||||
func admission(logger logr.Logger, monitor *webhookconfig.Monitor, inner handlers.AdmissionHandler) http.HandlerFunc {
|
||||
return handlers.Monitor(monitor, handlers.Admission(logger, inner))
|
||||
}
|
||||
|
|
|
@ -1,79 +0,0 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
v1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
jsonutils "github.com/kyverno/kyverno/pkg/utils/json"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
func (ws *WebhookServer) applyImageVerifyPolicies(request *admissionv1.AdmissionRequest, policyContext *engine.PolicyContext, policies []v1.PolicyInterface, logger logr.Logger) ([]byte, error) {
|
||||
ok, message, imagePatches := ws.handleVerifyImages(request, policyContext, policies)
|
||||
if !ok {
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
logger.V(6).Info("images verified", "patches", string(imagePatches))
|
||||
return imagePatches, nil
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) handleVerifyImages(
|
||||
request *admissionv1.AdmissionRequest,
|
||||
policyContext *engine.PolicyContext,
|
||||
policies []v1.PolicyInterface,
|
||||
) (bool, string, []byte) {
|
||||
if len(policies) == 0 {
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
resourceName := admissionutils.GetResourceName(request)
|
||||
logger := ws.log.WithValues("action", "verifyImages", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
var patches [][]byte
|
||||
verifiedImageData := &engine.ImageVerificationMetadata{}
|
||||
for _, p := range policies {
|
||||
policyContext.Policy = p
|
||||
resp, ivm := engine.VerifyAndPatchImages(policyContext)
|
||||
|
||||
engineResponses = append(engineResponses, resp)
|
||||
patches = append(patches, resp.GetPatches()...)
|
||||
verifiedImageData.Merge(ivm)
|
||||
}
|
||||
|
||||
prInfos := policyreport.GeneratePRsFromEngineResponse(engineResponses, logger)
|
||||
ws.prGenerator.Add(prInfos...)
|
||||
|
||||
blocked := toBlockResource(engineResponses, logger)
|
||||
events := generateEvents(engineResponses, blocked, logger)
|
||||
ws.eventGen.Add(events...)
|
||||
|
||||
if blocked {
|
||||
logger.V(4).Info("resource blocked")
|
||||
return false, getEnforceFailureErrorMsg(engineResponses), nil
|
||||
}
|
||||
|
||||
if !verifiedImageData.IsEmpty() {
|
||||
hasAnnotations := hasAnnotations(policyContext)
|
||||
annotationPatches, err := verifiedImageData.Patches(hasAnnotations, logger)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create image verification annotation patches")
|
||||
} else {
|
||||
// add annotation patches first
|
||||
patches = append(annotationPatches, patches...)
|
||||
}
|
||||
}
|
||||
|
||||
return true, "", jsonutils.JoinPatches(patches...)
|
||||
}
|
||||
|
||||
func hasAnnotations(context *engine.PolicyContext) bool {
|
||||
annotations := context.NewResource.GetAnnotations()
|
||||
return len(annotations) != 0
|
||||
}
|
Loading…
Add table
Reference in a new issue