1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-06 16:06:56 +00:00
kyverno/pkg/webhooks/server.go

601 lines
22 KiB
Go
Raw Normal View History

2019-05-13 21:33:01 +03:00
package webhooks
import (
"context"
"crypto/tls"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"time"
2020-03-17 11:05:20 -07:00
"github.com/go-logr/logr"
2020-04-27 18:38:03 +05:30
"github.com/julienschmidt/httprouter"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
kyvernoclient "github.com/kyverno/kyverno/pkg/client/clientset/versioned"
kyvernoinformer "github.com/kyverno/kyverno/pkg/client/informers/externalversions/kyverno/v1"
kyvernolister "github.com/kyverno/kyverno/pkg/client/listers/kyverno/v1"
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
2020-12-16 19:44:28 +05:30
"github.com/kyverno/kyverno/pkg/generate"
"github.com/kyverno/kyverno/pkg/metrics"
admissionReviewLatency "github.com/kyverno/kyverno/pkg/metrics/admissionreviewlatency"
"github.com/kyverno/kyverno/pkg/openapi"
"github.com/kyverno/kyverno/pkg/policycache"
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
"github.com/kyverno/kyverno/pkg/resourcecache"
ktls "github.com/kyverno/kyverno/pkg/tls"
tlsutils "github.com/kyverno/kyverno/pkg/tls"
userinfo "github.com/kyverno/kyverno/pkg/userinfo"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/kyverno/kyverno/pkg/webhookconfig"
2020-12-16 19:44:28 +05:30
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/generate"
"github.com/pkg/errors"
v1beta1 "k8s.io/api/admission/v1beta1"
2019-08-23 18:34:23 -07:00
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
informers "k8s.io/client-go/informers/core/v1"
rbacinformer "k8s.io/client-go/informers/rbac/v1"
listerv1 "k8s.io/client-go/listers/core/v1"
rbaclister "k8s.io/client-go/listers/rbac/v1"
2019-08-12 10:02:07 -07:00
"k8s.io/client-go/tools/cache"
)
// WebhookServer contains configured TLS server with MutationWebhook.
type WebhookServer struct {
server *http.Server
client *client.Client
kyvernoClient *kyvernoclient.Clientset
// grLister can list/get generate request from the shared informer's store
grLister kyvernolister.GenerateRequestNamespaceLister
// grSynced returns true if the Generate Request store has been synced at least once
grSynced cache.InformerSynced
2019-11-15 15:59:37 -08:00
// list/get cluster policy resource
pLister kyvernolister.ClusterPolicyLister
2019-11-15 15:59:37 -08:00
// returns true if the cluster policy store has synced atleast
pSynced cache.InformerSynced
2019-11-15 15:59:37 -08:00
// list/get role binding resource
rbLister rbaclister.RoleBindingLister
// list/get role binding resource
rLister rbaclister.RoleLister
// list/get role binding resource
crLister rbaclister.ClusterRoleLister
2019-11-15 15:59:37 -08:00
// return true if role bining store has synced atleast once
rbSynced cache.InformerSynced
2020-07-10 16:59:17 -07:00
// return true if role store has synced atleast once
rSynced cache.InformerSynced
2019-11-15 15:59:37 -08:00
// list/get cluster role binding resource
crbLister rbaclister.ClusterRoleBindingLister
2019-11-15 15:59:37 -08:00
// return true if cluster role binding store has synced atleast once
crbSynced cache.InformerSynced
2020-07-10 16:59:17 -07:00
// return true if cluster role store has synced atleast once
crSynced cache.InformerSynced
2019-11-15 15:59:37 -08:00
// generate events
eventGen event.Interface
// policy cache
pCache policycache.Interface
2019-11-15 15:59:37 -08:00
// webhook registration client
webhookRegister *webhookconfig.Register
// API to send policy stats for aggregation
statusListener policystatus.Listener
2019-10-18 17:38:46 -07:00
// helpers to validate against current loaded configuration
configHandler config.Interface
2019-10-18 17:38:46 -07:00
// channel for cleanup notification
cleanUp chan<- struct{}
// last request time
webhookMonitor *webhookconfig.Monitor
certRenewer *ktls.CertRenewer
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
// policy report generator
prGenerator policyreport.GeneratorInterface
// generate request generator
2020-12-16 19:44:28 +05:30
grGenerator *webhookgenerate.Generator
nsLister listerv1.NamespaceLister
// nsListerSynced returns true if the namespace store has been synced at least once
nsListerSynced cache.InformerSynced
auditHandler AuditHandler
2020-12-03 23:13:43 +05:30
log logr.Logger
openAPIController *openapi.Controller
// resCache - controls creation and fetching of resource informer cache
resCache resourcecache.ResourceCache
2020-12-16 19:44:28 +05:30
grController *generate.Controller
debug bool
promConfig *metrics.PromConfig
}
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
// Policy Controller and Kubernetes Client should be initialized in configuration
func NewWebhookServer(
kyvernoClient *kyvernoclient.Clientset,
client *client.Client,
2020-11-17 13:07:30 -08:00
tlsPair *tlsutils.PemPair,
grInformer kyvernoinformer.GenerateRequestInformer,
pInformer kyvernoinformer.ClusterPolicyInformer,
rbInformer rbacinformer.RoleBindingInformer,
crbInformer rbacinformer.ClusterRoleBindingInformer,
rInformer rbacinformer.RoleInformer,
crInformer rbacinformer.ClusterRoleInformer,
namespace informers.NamespaceInformer,
eventGen event.Interface,
pCache policycache.Interface,
webhookRegistrationClient *webhookconfig.Register,
webhookMonitor *webhookconfig.Monitor,
certRenewer *ktls.CertRenewer,
statusSync policystatus.Listener,
configHandler config.Interface,
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
prGenerator policyreport.GeneratorInterface,
2020-12-16 19:44:28 +05:30
grGenerator *webhookgenerate.Generator,
auditHandler AuditHandler,
2020-03-17 11:05:20 -07:00
cleanUp chan<- struct{},
log logr.Logger,
2020-03-27 19:06:06 +05:30
openAPIController *openapi.Controller,
resCache resourcecache.ResourceCache,
2020-12-16 19:44:28 +05:30
grc *generate.Controller,
debug bool,
promConfig *metrics.PromConfig,
2020-03-17 11:05:20 -07:00
) (*WebhookServer, error) {
if tlsPair == nil {
return nil, errors.New("NewWebhookServer is not initialized properly")
}
var tlsConfig tls.Config
pair, err := tls.X509KeyPair(tlsPair.Certificate, tlsPair.PrivateKey)
if err != nil {
return nil, err
}
tlsConfig.Certificates = []tls.Certificate{pair}
ws := &WebhookServer{
client: client,
kyvernoClient: kyvernoClient,
grLister: grInformer.Lister().GenerateRequests(config.KyvernoNamespace),
grSynced: grInformer.Informer().HasSynced,
pLister: pInformer.Lister(),
pSynced: pInformer.Informer().HasSynced,
rbLister: rbInformer.Lister(),
rbSynced: rbInformer.Informer().HasSynced,
rLister: rInformer.Lister(),
rSynced: rInformer.Informer().HasSynced,
nsLister: namespace.Lister(),
nsListerSynced: namespace.Informer().HasSynced,
2020-07-10 16:59:17 -07:00
crbLister: crbInformer.Lister(),
crLister: crInformer.Lister(),
crbSynced: crbInformer.Informer().HasSynced,
crSynced: crInformer.Informer().HasSynced,
eventGen: eventGen,
pCache: pCache,
webhookRegister: webhookRegistrationClient,
statusListener: statusSync,
configHandler: configHandler,
cleanUp: cleanUp,
webhookMonitor: webhookMonitor,
certRenewer: certRenewer,
prGenerator: prGenerator,
grGenerator: grGenerator,
grController: grc,
auditHandler: auditHandler,
log: log,
openAPIController: openAPIController,
resCache: resCache,
debug: debug,
promConfig: promConfig,
}
2020-04-27 18:38:03 +05:30
mux := httprouter.New()
2020-08-05 23:53:27 +05:30
mux.HandlerFunc("POST", config.MutatingWebhookServicePath, ws.handlerFunc(ws.ResourceMutation, true))
mux.HandlerFunc("POST", config.ValidatingWebhookServicePath, ws.handlerFunc(ws.resourceValidation, true))
mux.HandlerFunc("POST", config.PolicyMutatingWebhookServicePath, ws.handlerFunc(ws.policyMutation, true))
mux.HandlerFunc("POST", config.PolicyValidatingWebhookServicePath, ws.handlerFunc(ws.policyValidation, true))
mux.HandlerFunc("POST", config.VerifyMutatingWebhookServicePath, ws.handlerFunc(ws.verifyHandler, false))
// Handle Liveness responds to a Kubernetes Liveness probe
// Fail this request if Kubernetes should restart this instance
mux.HandlerFunc("GET", config.LivenessServicePath, func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
if err := ws.webhookRegister.Check(); err != nil {
w.WriteHeader(http.StatusInternalServerError)
} else {
w.WriteHeader(http.StatusOK)
}
})
// Handle Readiness responds to a Kubernetes Readiness probe
// Fail this request if this instance can't accept traffic, but Kubernetes shouldn't restart it
mux.HandlerFunc("GET", config.ReadinessServicePath, func(w http.ResponseWriter, r *http.Request) {
defer r.Body.Close()
w.WriteHeader(http.StatusOK)
})
ws.server = &http.Server{
Addr: ":9443", // Listen on port for HTTPS requests
TLSConfig: &tlsConfig,
Handler: mux,
ReadTimeout: 15 * time.Second,
WriteTimeout: 15 * time.Second,
}
return ws, nil
}
2020-03-29 07:36:18 +05:30
func (ws *WebhookServer) handlerFunc(handler func(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse, filter bool) http.HandlerFunc {
return func(rw http.ResponseWriter, r *http.Request) {
2020-03-29 07:36:18 +05:30
startTime := time.Now()
ws.webhookMonitor.SetTime(startTime)
admissionReview := ws.bodyToAdmissionReview(r, rw)
2020-03-29 07:36:18 +05:30
if admissionReview == nil {
ws.log.Info("failed to parse admission review request", "request", r)
2020-03-29 07:36:18 +05:30
return
2020-03-25 10:53:03 +05:30
}
logger := ws.log.WithName("handlerFunc").WithValues("kind", admissionReview.Request.Kind, "namespace", admissionReview.Request.Namespace,
"name", admissionReview.Request.Name, "operation", admissionReview.Request.Operation, "uid", admissionReview.Request.UID)
2020-03-29 07:36:18 +05:30
admissionReview.Response = &v1beta1.AdmissionResponse{
Allowed: true,
UID: admissionReview.Request.UID,
2020-03-25 10:53:03 +05:30
}
2020-03-29 07:36:18 +05:30
// Do not process the admission requests for kinds that are in filterKinds for filtering
request := admissionReview.Request
if filter && ws.configHandler.ToFilter(request.Kind.Kind, request.Namespace, request.Name) {
writeResponse(rw, admissionReview)
2020-03-29 07:36:18 +05:30
return
}
2020-04-22 20:15:16 +05:30
admissionReview.Response = handler(request)
writeResponse(rw, admissionReview)
logger.V(4).Info("admission review request processed", "time", time.Since(startTime).String())
2020-05-18 18:30:39 -07:00
return
}
}
func writeResponse(rw http.ResponseWriter, admissionReview *v1beta1.AdmissionReview) {
responseJSON, err := json.Marshal(admissionReview)
if err != nil {
http.Error(rw, fmt.Sprintf("Could not encode response: %v", err), http.StatusInternalServerError)
return
}
rw.Header().Set("Content-Type", "application/json; charset=utf-8")
if _, err := rw.Write(responseJSON); err != nil {
http.Error(rw, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)
}
}
2020-11-17 13:07:30 -08:00
// ResourceMutation mutates resource
2020-08-05 23:53:27 +05:30
func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
logger := ws.log.WithName("ResourceMutation").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
2020-07-08 14:22:32 -07:00
2020-05-18 20:01:20 -07:00
if excludeKyvernoResources(request.Kind.Kind) {
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Status: "Success",
},
}
}
logger.V(6).Info("received an admission request in mutating webhook")
// timestamp at which this admission request got triggered
admissionRequestTimestamp := time.Now().Unix()
mutatePolicies := ws.pCache.GetPolicyObject(policycache.Mutate, request.Kind.Kind, "")
generatePolicies := ws.pCache.GetPolicyObject(policycache.Generate, request.Kind.Kind, "")
// Get namespace policies from the cache for the requested resource namespace
nsMutatePolicies := ws.pCache.GetPolicyObject(policycache.Mutate, request.Kind.Kind, request.Namespace)
mutatePolicies = append(mutatePolicies, nsMutatePolicies...)
// convert RAW to unstructured
resource, err := utils.ConvertResource(request.Object.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
if err != nil {
2020-03-17 11:05:20 -07:00
logger.Error(err, "failed to convert RAW resource to unstructured format")
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: err.Error(),
},
}
}
var roles, clusterRoles []string
// getRoleRef only if policy has roles/clusterroles defined
if containRBACInfo(mutatePolicies, generatePolicies) {
if roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler); err != nil {
logger.Error(err, "failed to get RBAC information for request")
}
}
2020-04-10 23:24:54 +05:30
userRequestInfo := v1.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
}
2020-04-10 23:24:54 +05:30
ctx, err := newVariablesContext(request, &userRequestInfo)
2020-04-10 23:24:54 +05:30
if err != nil {
logger.Error(err, "unable to build variable context")
2020-04-10 23:24:54 +05:30
}
if err := ctx.AddImageInfo(&resource); err != nil {
logger.Error(err, "unable to add image info to variables context")
2020-04-10 23:24:54 +05:30
}
var patches []byte
patchedResource := request.Object.Raw
// MUTATION
var triggeredMutatePolicies []v1.ClusterPolicy
var mutateEngineResponses []*response.EngineResponse
patches, triggeredMutatePolicies, mutateEngineResponses = ws.HandleMutation(request, resource, mutatePolicies, ctx, userRequestInfo, admissionRequestTimestamp)
logger.V(6).Info("", "generated patches", string(patches))
// patch the resource with patches before handling validation rules
patchedResource = processResourceWithPatches(patches, request.Object.Raw, logger)
logger.V(6).Info("", "patchedResource", string(patchedResource))
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
// registering the kyverno_admission_review_latency_milliseconds metric concurrently
go registerAdmissionReviewLatencyMetricMutate(logger, *ws.promConfig.Metrics, string(request.Operation), mutateEngineResponses, triggeredMutatePolicies, admissionReviewLatencyDuration)
// GENERATE
newRequest := request.DeepCopy()
newRequest.Object.Raw = patchedResource
// this channel will be used to transmit the admissionReviewLatency from ws.HandleGenerate(..,) goroutine to registeGeneraterPolicyAdmissionReviewLatencyMetric(...) goroutine
admissionReviewCompletionLatencyChannel := make(chan int64, 1)
go ws.HandleGenerate(newRequest, generatePolicies, ctx, userRequestInfo, ws.configHandler, admissionRequestTimestamp, &admissionReviewCompletionLatencyChannel)
// registering the kyverno_admission_review_latency_milliseconds metric concurrently
go registerAdmissionReviewLatencyMetricGenerate(logger, *ws.promConfig.Metrics, string(request.Operation), mutateEngineResponses, triggeredMutatePolicies, &admissionReviewCompletionLatencyChannel)
2019-08-23 18:34:23 -07:00
patchType := v1beta1.PatchTypeJSONPatch
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Status: "Success",
},
Patch: patches,
PatchType: &patchType,
}
}
func registerAdmissionReviewLatencyMetricMutate(logger logr.Logger, promMetrics metrics.PromMetrics, requestOperation string, engineResponses []*response.EngineResponse, triggeredPolicies []v1.ClusterPolicy, admissionReviewLatencyDuration int64) {
resourceRequestOperationPromAlias, err := admissionReviewLatency.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
if err := admissionReviewLatency.ParsePromMetrics(promMetrics).ProcessEngineResponses(engineResponses, triggeredPolicies, admissionReviewLatencyDuration, resourceRequestOperationPromAlias); err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
}
func registerAdmissionReviewLatencyMetricGenerate(logger logr.Logger, promMetrics metrics.PromMetrics, requestOperation string, engineResponses []*response.EngineResponse, triggeredPolicies []v1.ClusterPolicy, latencyReceiver *chan int64) {
defer close(*latencyReceiver)
resourceRequestOperationPromAlias, err := admissionReviewLatency.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
// this goroutine will keep on waiting here till it doesn't receive the admission review latency int64 from the other goroutine i.e. ws.HandleGenerate
admissionReviewLatencyDuration := <-(*latencyReceiver)
if err := admissionReviewLatency.ParsePromMetrics(promMetrics).ProcessEngineResponses(engineResponses, triggeredPolicies, admissionReviewLatencyDuration, resourceRequestOperationPromAlias); err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
}
2020-04-10 23:24:54 +05:30
func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
logger := ws.log.WithName("Validate").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
2020-12-30 00:12:36 +05:30
if request.Operation == v1beta1.Delete {
ws.handleDelete(request)
2020-12-16 19:44:28 +05:30
}
2020-07-10 16:59:17 -07:00
2020-05-18 20:01:20 -07:00
if excludeKyvernoResources(request.Kind.Kind) {
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Status: "Success",
},
}
}
logger.V(6).Info("received an admission request in validating webhook")
// timestamp at which this admission request got triggered
admissionRequestTimestamp := time.Now().Unix()
policies := ws.pCache.GetPolicyObject(policycache.ValidateEnforce, request.Kind.Kind, "")
// Get namespace policies from the cache for the requested resource namespace
nsPolicies := ws.pCache.GetPolicyObject(policycache.ValidateEnforce, request.Kind.Kind, request.Namespace)
policies = append(policies, nsPolicies...)
if len(policies) == 0 {
// push admission request to audit handler, this won't block the admission request
ws.auditHandler.Add(request.DeepCopy())
logger.V(4).Info("no enforce validation policies; returning AdmissionResponse.Allowed: true")
2020-01-11 18:33:11 +05:30
return &v1beta1.AdmissionResponse{Allowed: true}
}
var roles, clusterRoles []string
2020-07-10 16:59:17 -07:00
var err error
2020-01-11 18:33:11 +05:30
// getRoleRef only if policy has roles/clusterroles defined
2020-12-01 22:50:40 -08:00
if containRBACInfo(policies) {
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler)
2020-01-11 18:33:11 +05:30
if err != nil {
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: err.Error(),
},
}
2020-01-11 18:33:11 +05:30
}
}
2020-04-10 23:24:54 +05:30
userRequestInfo := v1.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
}
2020-04-10 23:24:54 +05:30
ctx, err := newVariablesContext(request, &userRequestInfo)
2020-04-10 23:24:54 +05:30
if err != nil {
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: err.Error(),
},
}
2020-04-10 23:24:54 +05:30
}
namespaceLabels := make(map[string]string)
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, ws.nsLister, logger)
}
ok, msg := HandleValidation(ws.promConfig, request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.prGenerator, ws.log, ws.configHandler, ws.resCache, ws.client, namespaceLabels, admissionRequestTimestamp)
2020-05-20 17:08:30 -07:00
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: msg,
},
2020-01-11 18:33:11 +05:30
}
}
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Status: "Success",
},
}
}
// RunAsync TLS server in separate thread and returns control immediately
2019-10-25 16:55:48 -05:00
func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
2020-03-17 11:05:20 -07:00
logger := ws.log
if !cache.WaitForCacheSync(stopCh, ws.grSynced, ws.pSynced, ws.rbSynced, ws.crbSynced, ws.rSynced, ws.crSynced) {
2020-03-17 11:05:20 -07:00
logger.Info("failed to sync informer cache")
2019-11-15 15:59:37 -08:00
}
Add Policy Report (#1229) * add report in cli * policy report crd added * policy report added * configmap added * added jobs * added jobs * bug fixed * added logic for cli * common function added * sub command added for policy report * subcommand added for report * common package changed * configmap added * added logic for kyverno cli * added logic for jobs * added logic for jobs * added logic for jobs * added logic for cli * buf fix * cli changes * count bug fix * docs added for command * go fmt * refactor codebase * remove policy controller for policyreport * policy report removed * bug fixes * bug fixes * added job trigger if needed * job deletation logic added * build failed fix * fixed e2e test * remove hard coded variables * packages adde * improvment added in jobs sheduler * policy report yaml added * cronjob added * small fixes * remove background sync * documentation added for report command * remove extra log * small improvement * tested policy report * revert hardcoded changes * changes for demo * demo changes * resource aggrigation added * More changes * More changes * - resolve PR comments; - refactor jobs controller * set rbac for jobs * add clean up in job controller * add short names * remove application scope for policyreport * move job controller to policyreport * add report logic in command apply * - update policy report types; - upgrade k8s library; - update code gen * temporarily comment out code to pass CI build * generate / update policyreport to cluster * add unit test for CLI report * add test for apply - generate policy report * fix unit test * - remove job controller; - remove in-memory configmap; - clean up kustomize manifest * remove dependency * add reportRequest / clusterReportRequest * clean up policy report * generate report request * update crd clusterReportRequest * - update json tag of report summary; - update definition manifests; - fix dclient creation * aggregate reportRequest into policy report * fix unit tests * - update report summary to optional; - generate clusterPolicyReport; - remove reportRequests after merged to report * remove * generate reportRequest in kyverno namespace * update resource filter in helm chart * - rename reportRequest to reportChangeRequest; -rename clusterReportRequest to clusterReportChangeRequest * generate policy report in background scan * skip generating report change request if there's entry results * fix results entry removal when policy / rule gets deleted * rename apiversion from policy.kubernetes.io to policy.k8s.io * update summary.* to lower case * move reportChangeRequest to kyverno.io/v1alpha1 * remove policy report flag * fix report update * clean up policy violation CRD * remove violation CRD from manifest * clean up policy violation code - remove pvGenerator * change severity fields to lower case * update import library * set report category Co-authored-by: Yuvraj <yuvraj.yad001@gmail.com> Co-authored-by: Yuvraj <10830562+evalsocket@users.noreply.github.com> Co-authored-by: Jim Bugwadia <jim@nirmata.com>
2020-11-09 11:26:12 -08:00
go func() {
2020-03-17 11:05:20 -07:00
logger.V(3).Info("started serving requests", "addr", ws.server.Addr)
2019-08-27 17:00:16 -07:00
if err := ws.server.ListenAndServeTLS("", ""); err != http.ErrServerClosed {
2020-03-17 11:05:20 -07:00
logger.Error(err, "failed to listen to requests")
}
}()
logger.Info("starting service")
if !ws.debug {
go ws.webhookMonitor.Run(ws.webhookRegister, ws.certRenewer, ws.eventGen, stopCh)
}
}
// Stop TLS server and returns control after the server is shut down
func (ws *WebhookServer) Stop(ctx context.Context) {
2020-03-17 11:05:20 -07:00
logger := ws.log
// remove the static webhook configurations
go ws.webhookRegister.Remove(ws.cleanUp)
// shutdown http.Server with context timeout
err := ws.server.Shutdown(ctx)
if err != nil {
// Error from closing listeners, or context timeout:
2020-03-17 11:05:20 -07:00
logger.Error(err, "shutting down server")
ws.server.Close()
}
}
// bodyToAdmissionReview creates AdmissionReview object from request body
// Answers to the http.ResponseWriter if request is not valid
func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer http.ResponseWriter) *v1beta1.AdmissionReview {
2020-03-17 11:05:20 -07:00
logger := ws.log
if request.Body == nil {
logger.Info("empty body", "req", request.URL.String())
http.Error(writer, "empty body", http.StatusBadRequest)
return nil
}
defer request.Body.Close()
body, err := ioutil.ReadAll(request.Body)
if err != nil {
logger.Info("failed to read HTTP body", "req", request.URL.String())
http.Error(writer, "failed to read HTTP body", http.StatusBadRequest)
}
contentType := request.Header.Get("Content-Type")
if contentType != "application/json" {
2020-03-17 11:05:20 -07:00
logger.Info("invalid Content-Type", "contextType", contentType)
http.Error(writer, "invalid Content-Type, expect `application/json`", http.StatusUnsupportedMediaType)
return nil
}
admissionReview := &v1beta1.AdmissionReview{}
if err := json.Unmarshal(body, &admissionReview); err != nil {
2020-03-17 11:05:20 -07:00
logger.Error(err, "failed to decode request body to type 'AdmissionReview")
http.Error(writer, "Can't decode body as AdmissionReview", http.StatusExpectationFailed)
return nil
}
2019-06-05 17:43:59 -07:00
return admissionReview
}
func newVariablesContext(request *v1beta1.AdmissionRequest, userRequestInfo *v1.RequestInfo) (*enginectx.Context, error) {
ctx := enginectx.NewContext()
if err := ctx.AddRequest(request); err != nil {
return nil, errors.Wrap(err, "failed to load incoming request in context")
}
if err := ctx.AddUserInfo(*userRequestInfo); err != nil {
return nil, errors.Wrap(err, "failed to load userInfo in context")
}
if err := ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username); err != nil {
return nil, errors.Wrap(err, "failed to load service account in context")
}
return ctx, nil
}