mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
feat(audit): use a worker pool for Audit policies (#10048)
* enhancement: split validation logic for enforce and audit policies to return admission response earlier Signed-off-by: ShutingZhao <shuting@nirmata.com> * chore: add missing file Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: unit tests Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: linter issues Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: unit tests Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: get latest policy object before updating status Signed-off-by: ShutingZhao <shuting@nirmata.com> * chore: remove debug code Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: compare before updates Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: initial reconcile Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: updates Signed-off-by: ShutingZhao <shuting@nirmata.com> * feat(audit): use a worker pool for Audit policies Signed-off-by: Khaled Emara <khaled.emara@nirmata.com> * fix: unit test Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix(attempt): spin up go routine Signed-off-by: ShutingZhao <shuting@nirmata.com> * feat: add flags maxAuditWorkers, maxAuditCapacity Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: enable debug log on failure Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: wait group panic Signed-off-by: ShutingZhao <shuting@nirmata.com> * load-tests: add stess tests configurations Signed-off-by: ShutingZhao <shuting@nirmata.com> * load-tests: disable admissionreports Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: build policy contexts syncronously Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> * fix: only run generate and mutate existing go routines when policies are present Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> * fix: mutate and verify tests Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> * fix: return early if no audit policy Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix: run handlegenerate and mutate existing in all cases Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> * fix: only test bgapplies in generate test Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> * fix: defer wait in tests Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> * enhancement: process validate enforce in a go routine Signed-off-by: ShutingZhao <shuting@nirmata.com> --------- Signed-off-by: ShutingZhao <shuting@nirmata.com> Signed-off-by: Khaled Emara <khaled.emara@nirmata.com> Signed-off-by: Vishal Choudhary <vishal.choudhary@nirmata.com> Co-authored-by: ShutingZhao <shuting@nirmata.com> Co-authored-by: Vishal Choudhary <vishal.choudhary@nirmata.com>
This commit is contained in:
parent
90d1440d82
commit
fb40aa5f38
12 changed files with 223 additions and 160 deletions
7
.github/workflows/load-testing.yml
vendored
7
.github/workflows/load-testing.yml
vendored
|
@ -56,9 +56,9 @@ jobs:
|
|||
- name: default
|
||||
values:
|
||||
- default-with-profiling
|
||||
- name: standard
|
||||
- name: stress
|
||||
values:
|
||||
- standard-with-profiling
|
||||
- stress-with-profiling
|
||||
test:
|
||||
- kyverno-pss
|
||||
- kyverno-mutate
|
||||
|
@ -139,3 +139,6 @@ jobs:
|
|||
with:
|
||||
name: pprof-heap-profiles
|
||||
path: heap.pprof
|
||||
- name: Debug failure
|
||||
if: failure()
|
||||
uses: ./.github/actions/kyverno-logs
|
||||
|
|
|
@ -254,6 +254,8 @@ func main() {
|
|||
backgroundServiceAccountName string
|
||||
maxAPICallResponseLength int64
|
||||
renewBefore time.Duration
|
||||
maxAuditWorkers int
|
||||
maxAuditCapacity int
|
||||
)
|
||||
flagset := flag.NewFlagSet("kyverno", flag.ExitOnError)
|
||||
flagset.BoolVar(&dumpPayload, "dumpPayload", false, "Set this flag to activate/deactivate debug mode.")
|
||||
|
@ -274,6 +276,8 @@ func main() {
|
|||
flagset.StringVar(&tlsSecretName, "tlsSecretName", "", "Name of the secret containing TLS pair.")
|
||||
flagset.Int64Var(&maxAPICallResponseLength, "maxAPICallResponseLength", 10*1000*1000, "Configure the value of maximum allowed GET response size from API Calls")
|
||||
flagset.DurationVar(&renewBefore, "renewBefore", 15*24*time.Hour, "The certificate renewal time before expiration")
|
||||
flagset.IntVar(&maxAuditWorkers, "maxAuditWorkers", 8, "Maximum number of workers for audit policy processing")
|
||||
flagset.IntVar(&maxAuditCapacity, "maxAuditCapacity", 1000, "Maximum capacity of the audit policy task queue")
|
||||
// config
|
||||
appConfig := internal.NewConfiguration(
|
||||
internal.WithProfiling(),
|
||||
|
@ -533,6 +537,8 @@ func main() {
|
|||
admissionReports,
|
||||
backgroundServiceAccountName,
|
||||
setup.Jp,
|
||||
maxAuditWorkers,
|
||||
maxAuditCapacity,
|
||||
)
|
||||
exceptionHandlers := webhooksexception.NewHandlers(exception.ValidationOptions{
|
||||
Enabled: internal.PolicyExceptionEnabled(),
|
||||
|
|
2
go.mod
2
go.mod
|
@ -92,6 +92,8 @@ require (
|
|||
sigs.k8s.io/yaml v1.4.0
|
||||
)
|
||||
|
||||
require github.com/alitto/pond v1.8.3 // indirect
|
||||
|
||||
require (
|
||||
cloud.google.com/go/compute v1.25.0 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||
|
|
2
go.sum
2
go.sum
|
@ -132,6 +132,8 @@ github.com/alibabacloud-go/tea-utils v1.4.5/go.mod h1:KNcT0oXlZZxOXINnZBs6YvgOd5
|
|||
github.com/alibabacloud-go/tea-xml v1.1.2/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8=
|
||||
github.com/alibabacloud-go/tea-xml v1.1.3 h1:7LYnm+JbOq2B+T/B0fHC4Ies4/FofC4zHzYtqw7dgt0=
|
||||
github.com/alibabacloud-go/tea-xml v1.1.3/go.mod h1:Rq08vgCcCAjHyRi/M7xlHKUykZCEtyBy9+DPF6GgEu8=
|
||||
github.com/alitto/pond v1.8.3 h1:ydIqygCLVPqIX/USe5EaV/aSRXTRXDEI9JwuDdu+/xs=
|
||||
github.com/alitto/pond v1.8.3/go.mod h1:CmvIIGd5jKLasGI3D87qDkQxjzChdKMmnXMg3fG6M6Q=
|
||||
github.com/aliyun/credentials-go v1.1.2/go.mod h1:ozcZaMR5kLM7pwtCMEpVmQ242suV6qTJya2bDq4X1Tw=
|
||||
github.com/aliyun/credentials-go v1.3.2 h1:L4WppI9rctC8PdlMgyTkF8bBsy9pyKQEzBD1bHMRl+g=
|
||||
github.com/aliyun/credentials-go v1.3.2/go.mod h1:tlpz4uys4Rn7Ik4/piGRrTbXy2uLKvePgQJJduE+Y5c=
|
||||
|
|
|
@ -3,6 +3,7 @@ package resource
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/alitto/pond"
|
||||
fakekyvernov1 "github.com/kyverno/kyverno/pkg/client/clientset/versioned/fake"
|
||||
kyvernoinformers "github.com/kyverno/kyverno/pkg/client/informers/externalversions"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
|
@ -53,6 +54,7 @@ func NewFakeHandlers(ctx context.Context, policyCache policycache.Cache) *resour
|
|||
urGenerator: updaterequest.NewFake(),
|
||||
eventGen: event.NewFake(),
|
||||
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, jp),
|
||||
auditPool: pond.New(8, 1000),
|
||||
engine: engine.NewEngine(
|
||||
configuration,
|
||||
config.NewDefaultMetricsConfiguration(),
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/alitto/pond"
|
||||
"github.com/go-logr/logr"
|
||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
|
@ -37,8 +38,6 @@ import (
|
|||
)
|
||||
|
||||
type resourceHandlers struct {
|
||||
wg sync.WaitGroup
|
||||
|
||||
// clients
|
||||
client dclient.Interface
|
||||
kyvernoClient versioned.Interface
|
||||
|
@ -63,6 +62,7 @@ type resourceHandlers struct {
|
|||
|
||||
admissionReports bool
|
||||
backgroundServiceAccountName string
|
||||
auditPool *pond.WorkerPool
|
||||
}
|
||||
|
||||
func NewHandlers(
|
||||
|
@ -81,6 +81,8 @@ func NewHandlers(
|
|||
admissionReports bool,
|
||||
backgroundServiceAccountName string,
|
||||
jp jmespath.Interface,
|
||||
maxAuditWorkers int,
|
||||
maxAuditCapacity int,
|
||||
) webhooks.ResourceHandlers {
|
||||
return &resourceHandlers{
|
||||
engine: engine,
|
||||
|
@ -98,6 +100,7 @@ func NewHandlers(
|
|||
pcBuilder: webhookutils.NewPolicyContextBuilder(configuration, jp),
|
||||
admissionReports: admissionReports,
|
||||
backgroundServiceAccountName: backgroundServiceAccountName,
|
||||
auditPool: pond.New(maxAuditWorkers, maxAuditCapacity, pond.Strategy(pond.Lazy())),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -117,23 +120,33 @@ func (h *resourceHandlers) Validate(ctx context.Context, logger logr.Logger, req
|
|||
|
||||
logger.V(4).Info("processing policies for validate admission request", "validate", len(policies), "mutate", len(mutatePolicies), "generate", len(generatePolicies))
|
||||
|
||||
policyContext, err := h.buildPolicyContextFromAdmissionRequest(logger, request)
|
||||
if err != nil {
|
||||
return errorResponse(logger, request.UID, err, "failed create policy context")
|
||||
vh := validation.NewValidationHandler(logger, h.kyvernoClient, h.engine, h.pCache, h.pcBuilder, h.eventGen, h.admissionReports, h.metricsConfig, h.configuration, h.nsLister)
|
||||
var wg sync.WaitGroup
|
||||
var ok bool
|
||||
var msg string
|
||||
var warnings []string
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
ok, msg, warnings = vh.HandleValidationEnforce(ctx, request, policies, startTime)
|
||||
}()
|
||||
|
||||
go h.auditPool.Submit(func() {
|
||||
vh.HandleValidationAudit(ctx, request)
|
||||
})
|
||||
if !admissionutils.IsDryRun(request.AdmissionRequest) {
|
||||
h.handleBackgroundApplies(ctx, logger, request, generatePolicies, mutatePolicies, startTime, nil)
|
||||
}
|
||||
if len(policies) == 0 {
|
||||
return admissionutils.ResponseSuccess(request.UID)
|
||||
}
|
||||
|
||||
vh := validation.NewValidationHandler(logger, h.kyvernoClient, h.engine, h.pCache, h.pcBuilder, h.eventGen, h.admissionReports, h.metricsConfig, h.configuration)
|
||||
|
||||
ok, msg, warnings := vh.HandleValidation(ctx, request, policies, policyContext, startTime)
|
||||
wg.Wait()
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return admissionutils.Response(request.UID, errors.New(msg), warnings...)
|
||||
}
|
||||
if !admissionutils.IsDryRun(request.AdmissionRequest) {
|
||||
h.wg.Add(1)
|
||||
go h.handleBackgroundApplies(ctx, logger, request, generatePolicies, mutatePolicies, startTime)
|
||||
h.wg.Wait()
|
||||
}
|
||||
|
||||
return admissionutils.ResponseSuccess(request.UID, warnings...)
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -212,8 +213,15 @@ var policyMutateAndVerify = `
|
|||
"entries": [
|
||||
{
|
||||
"keys": {
|
||||
"publicKeys": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM\n5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA==\n-----END PUBLIC KEY-----"
|
||||
}
|
||||
"publicKeys": "-----BEGIN PUBLIC KEY-----\nMFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM\n5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA==\n-----END PUBLIC KEY-----",
|
||||
"rekor": {
|
||||
"url": "https://rekor.sigstore.dev",
|
||||
"ignoreTlog": true
|
||||
},
|
||||
"ctlog": {
|
||||
"ignoreSCT": true
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
@ -533,7 +541,7 @@ func Test_MutateAndVerify(t *testing.T) {
|
|||
AdmissionRequest: v1.AdmissionRequest{
|
||||
Operation: v1.Create,
|
||||
Kind: metav1.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
|
||||
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "Pod"},
|
||||
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
||||
Object: apiruntime.RawExtension{
|
||||
Raw: []byte(resourceMutateAndVerify),
|
||||
},
|
||||
|
@ -578,7 +586,7 @@ func Test_MutateAndGenerate(t *testing.T) {
|
|||
AdmissionRequest: v1.AdmissionRequest{
|
||||
Operation: v1.Create,
|
||||
Kind: metav1.GroupVersionKind{Group: "", Version: "v1", Kind: "Pod"},
|
||||
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "Pod"},
|
||||
Resource: metav1.GroupVersionResource{Group: "", Version: "v1", Resource: "pods"},
|
||||
Object: apiruntime.RawExtension{
|
||||
Raw: []byte(resourceMutateandGenerate),
|
||||
},
|
||||
|
@ -587,38 +595,29 @@ func Test_MutateAndGenerate(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
response := resourceHandlers.Validate(ctx, logger, request, "", time.Now())
|
||||
|
||||
assert.Assert(t, len(mockPcBuilder.contexts) >= 3, fmt.Sprint("expected no of context ", 3, " received ", len(mockPcBuilder.contexts)))
|
||||
|
||||
validateJSONContext := mockPcBuilder.contexts[0].JSONContext()
|
||||
mutateJSONContext := mockPcBuilder.contexts[1].JSONContext()
|
||||
generateJSONContext := mockPcBuilder.contexts[2].JSONContext()
|
||||
|
||||
_, err = enginecontext.AddMockDeferredLoader(validateJSONContext, "key1", "value1")
|
||||
assert.NilError(t, err)
|
||||
_, err = enginecontext.AddMockDeferredLoader(mutateJSONContext, "key2", "value2")
|
||||
assert.NilError(t, err)
|
||||
_, err = enginecontext.AddMockDeferredLoader(generateJSONContext, "key3", "value3")
|
||||
_, mutatePolicies, generatePolicies, _, err := resourceHandlers.retrieveAndCategorizePolicies(ctx, logger, request, "", false)
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = mutateJSONContext.Query("key1")
|
||||
assert.ErrorContains(t, err, `Unknown key "key1" in path`)
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
resourceHandlers.handleBackgroundApplies(ctx, logger, request, generatePolicies, mutatePolicies, time.Now(), &wg)
|
||||
wg.Wait()
|
||||
|
||||
assert.Assert(t, len(mockPcBuilder.contexts) >= 2, fmt.Sprint("expected no of context ", 2, " received ", len(mockPcBuilder.contexts)))
|
||||
|
||||
mutateJSONContext := mockPcBuilder.contexts[0].JSONContext()
|
||||
generateJSONContext := mockPcBuilder.contexts[1].JSONContext()
|
||||
|
||||
_, err = enginecontext.AddMockDeferredLoader(mutateJSONContext, "key1", "value1")
|
||||
assert.NilError(t, err)
|
||||
_, err = enginecontext.AddMockDeferredLoader(generateJSONContext, "key2", "value2")
|
||||
assert.NilError(t, err)
|
||||
|
||||
_, err = mutateJSONContext.Query("key2")
|
||||
assert.ErrorContains(t, err, `Unknown key "key2" in path`)
|
||||
|
||||
_, err = generateJSONContext.Query("key1")
|
||||
assert.ErrorContains(t, err, `Unknown key "key1" in path`)
|
||||
|
||||
_, err = validateJSONContext.Query("key2")
|
||||
assert.ErrorContains(t, err, `Unknown key "key2" in path`)
|
||||
_, err = generateJSONContext.Query("key2")
|
||||
assert.ErrorContains(t, err, `Unknown key "key2" in path`)
|
||||
|
||||
_, err = validateJSONContext.Query("key3")
|
||||
assert.ErrorContains(t, err, `Unknown key "key3" in path`)
|
||||
_, err = mutateJSONContext.Query("key3")
|
||||
assert.ErrorContains(t, err, `Unknown key "key3" in path`)
|
||||
|
||||
assert.Equal(t, response.Allowed, true)
|
||||
assert.Equal(t, len(response.Warnings), 0)
|
||||
}
|
||||
|
||||
func makeKey(policy kyverno.PolicyInterface) string {
|
||||
|
|
|
@ -3,6 +3,7 @@ package resource
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
|
@ -19,19 +20,20 @@ import (
|
|||
)
|
||||
|
||||
// handleBackgroundApplies applies generate and mutateExisting policies, and creates update requests for background reconcile
|
||||
func (h *resourceHandlers) handleBackgroundApplies(ctx context.Context, logger logr.Logger, request handlers.AdmissionRequest, generatePolicies, mutatePolicies []kyvernov1.PolicyInterface, ts time.Time) {
|
||||
h.wg.Add(1)
|
||||
go h.handleMutateExisting(ctx, logger, request, mutatePolicies, ts)
|
||||
h.handleGenerate(ctx, logger, request, generatePolicies, ts)
|
||||
func (h *resourceHandlers) handleBackgroundApplies(ctx context.Context, logger logr.Logger, request handlers.AdmissionRequest, generatePolicies, mutatePolicies []kyvernov1.PolicyInterface, ts time.Time, wg *sync.WaitGroup) {
|
||||
go h.handleMutateExisting(ctx, logger, request, mutatePolicies, ts, wg)
|
||||
go h.handleGenerate(ctx, logger, request, generatePolicies, ts, wg)
|
||||
}
|
||||
|
||||
func (h *resourceHandlers) handleMutateExisting(ctx context.Context, logger logr.Logger, request handlers.AdmissionRequest, policies []kyvernov1.PolicyInterface, admissionRequestTimestamp time.Time) {
|
||||
func (h *resourceHandlers) handleMutateExisting(ctx context.Context, logger logr.Logger, request handlers.AdmissionRequest, policies []kyvernov1.PolicyInterface, admissionRequestTimestamp time.Time, wg *sync.WaitGroup) {
|
||||
policyContext, err := h.buildPolicyContextFromAdmissionRequest(logger, request)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create policy context")
|
||||
return
|
||||
}
|
||||
h.wg.Done()
|
||||
if wg != nil { // for unit testing purposes
|
||||
defer wg.Done()
|
||||
}
|
||||
|
||||
if request.AdmissionRequest.Operation == admissionv1.Delete {
|
||||
policyContext = policyContext.WithNewResource(policyContext.OldResource())
|
||||
|
@ -92,13 +94,15 @@ func (h *resourceHandlers) handleMutateExisting(ctx context.Context, logger logr
|
|||
}
|
||||
}
|
||||
|
||||
func (h *resourceHandlers) handleGenerate(ctx context.Context, logger logr.Logger, request handlers.AdmissionRequest, generatePolicies []kyvernov1.PolicyInterface, ts time.Time) {
|
||||
func (h *resourceHandlers) handleGenerate(ctx context.Context, logger logr.Logger, request handlers.AdmissionRequest, generatePolicies []kyvernov1.PolicyInterface, ts time.Time, wg *sync.WaitGroup) {
|
||||
policyContext, err := h.buildPolicyContextFromAdmissionRequest(logger, request)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create policy context")
|
||||
return
|
||||
}
|
||||
h.wg.Done()
|
||||
if wg != nil { // for unit testing purposes
|
||||
defer wg.Done()
|
||||
}
|
||||
|
||||
gh := generation.NewGenerationHandler(logger, h.engine, h.client, h.kyvernoClient, h.nsLister, h.urLister, h.cpolLister, h.polLister, h.urGenerator, h.eventGen, h.metricsConfig, h.backgroundServiceAccountName)
|
||||
var policies []kyvernov1.PolicyInterface
|
||||
|
@ -108,5 +112,5 @@ func (h *resourceHandlers) handleGenerate(ctx context.Context, logger logr.Logge
|
|||
policies = append(policies, new)
|
||||
}
|
||||
}
|
||||
go gh.Handle(ctx, request.AdmissionRequest, policies, policyContext)
|
||||
gh.Handle(ctx, request.AdmissionRequest, policies, policyContext)
|
||||
}
|
||||
|
|
30
pkg/webhooks/resource/validation/utils.go
Normal file
30
pkg/webhooks/resource/validation/utils.go
Normal file
|
@ -0,0 +1,30 @@
|
|||
package validation
|
||||
|
||||
import (
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
reportutils "github.com/kyverno/kyverno/pkg/utils/report"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func needsReports(request handlers.AdmissionRequest, resource unstructured.Unstructured, admissionReport bool) bool {
|
||||
createReport := admissionReport
|
||||
if admissionutils.IsDryRun(request.AdmissionRequest) {
|
||||
createReport = false
|
||||
}
|
||||
// we don't need reports for deletions
|
||||
if request.Operation == admissionv1.Delete {
|
||||
createReport = false
|
||||
}
|
||||
// check if the resource supports reporting
|
||||
if !reportutils.IsGvkSupported(schema.GroupVersionKind(request.Kind)) {
|
||||
createReport = false
|
||||
}
|
||||
// if the underlying resource has no UID don't create a report
|
||||
if resource.GetUID() == "" {
|
||||
createReport = false
|
||||
}
|
||||
return createReport
|
||||
}
|
|
@ -9,27 +9,29 @@ import (
|
|||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
engineapi "github.com/kyverno/kyverno/pkg/engine/api"
|
||||
"github.com/kyverno/kyverno/pkg/engine/policycontext"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
"github.com/kyverno/kyverno/pkg/policycache"
|
||||
"github.com/kyverno/kyverno/pkg/tracing"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/utils/engine"
|
||||
reportutils "github.com/kyverno/kyverno/pkg/utils/report"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||
webhookutils "github.com/kyverno/kyverno/pkg/webhooks/utils"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
corev1listers "k8s.io/client-go/listers/core/v1"
|
||||
)
|
||||
|
||||
type ValidationHandler interface {
|
||||
// HandleValidation handles validating webhook admission request
|
||||
// If there are no errors in validating rule we apply generation rules
|
||||
// patchedResource is the (resource + patches) after applying mutation rules
|
||||
HandleValidation(context.Context, handlers.AdmissionRequest, []kyvernov1.PolicyInterface, *engine.PolicyContext, time.Time) (bool, string, []string)
|
||||
HandleValidationEnforce(context.Context, handlers.AdmissionRequest, []kyvernov1.PolicyInterface, time.Time) (bool, string, []string)
|
||||
HandleValidationAudit(context.Context, handlers.AdmissionRequest)
|
||||
}
|
||||
|
||||
func NewValidationHandler(
|
||||
|
@ -42,6 +44,7 @@ func NewValidationHandler(
|
|||
admissionReports bool,
|
||||
metrics metrics.MetricsConfigManager,
|
||||
cfg config.Configuration,
|
||||
nsLister corev1listers.NamespaceLister,
|
||||
) ValidationHandler {
|
||||
return &validationHandler{
|
||||
log: log,
|
||||
|
@ -53,6 +56,7 @@ func NewValidationHandler(
|
|||
admissionReports: admissionReports,
|
||||
metrics: metrics,
|
||||
cfg: cfg,
|
||||
nsLister: nsLister,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -66,18 +70,27 @@ type validationHandler struct {
|
|||
admissionReports bool
|
||||
metrics metrics.MetricsConfigManager
|
||||
cfg config.Configuration
|
||||
nsLister corev1listers.NamespaceLister
|
||||
}
|
||||
|
||||
func (v *validationHandler) HandleValidation(
|
||||
func (v *validationHandler) HandleValidationEnforce(
|
||||
ctx context.Context,
|
||||
request handlers.AdmissionRequest,
|
||||
policies []kyvernov1.PolicyInterface,
|
||||
policyContext *engine.PolicyContext,
|
||||
admissionRequestTimestamp time.Time,
|
||||
) (bool, string, []string) {
|
||||
resourceName := admissionutils.GetResourceName(request.AdmissionRequest)
|
||||
logger := v.log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind)
|
||||
|
||||
if len(policies) == 0 {
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
policyContext, err := v.buildPolicyContextFromAdmissionRequest(logger, request)
|
||||
if err != nil {
|
||||
return false, "failed create policy context", nil
|
||||
}
|
||||
|
||||
var engineResponses []engineapi.EngineResponse
|
||||
failurePolicy := kyvernov1.Ignore
|
||||
for _, policy := range policies {
|
||||
|
@ -94,7 +107,7 @@ func (v *validationHandler) HandleValidation(
|
|||
engineResponse := v.engine.Validate(ctx, policyContext)
|
||||
if engineResponse.IsNil() {
|
||||
// we get an empty response if old and new resources created the same response
|
||||
// allow updates if resource update doesnt change the policy evaluation
|
||||
// allow updates if resource update doesn't change the policy evaluation
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -120,24 +133,61 @@ func (v *validationHandler) HandleValidation(
|
|||
return false, webhookutils.GetBlockedMessages(engineResponses), nil
|
||||
}
|
||||
|
||||
go v.handleAudit(ctx, policyContext.NewResource(), request, policyContext.NamespaceLabels(), engineResponses...)
|
||||
go func() {
|
||||
if needsReports(request, policyContext.NewResource(), v.admissionReports) {
|
||||
if err := v.createReports(ctx, policyContext.NewResource(), request, engineResponses...); err != nil {
|
||||
v.log.Error(err, "failed to create report")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
warnings := webhookutils.GetWarningMessages(engineResponses)
|
||||
return true, "", warnings
|
||||
}
|
||||
|
||||
func (v *validationHandler) buildAuditResponses(
|
||||
func (v *validationHandler) HandleValidationAudit(
|
||||
ctx context.Context,
|
||||
resource unstructured.Unstructured,
|
||||
request handlers.AdmissionRequest,
|
||||
namespaceLabels map[string]string,
|
||||
) ([]engineapi.EngineResponse, error) {
|
||||
) {
|
||||
gvr := schema.GroupVersionResource(request.Resource)
|
||||
policies := v.pCache.GetPolicies(policycache.ValidateAudit, gvr, request.SubResource, request.Namespace)
|
||||
policyContext, err := v.pcBuilder.Build(request.AdmissionRequest, request.Roles, request.ClusterRoles, request.GroupVersionKind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if len(policies) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
policyContext, err := v.buildPolicyContextFromAdmissionRequest(v.log, request)
|
||||
if err != nil {
|
||||
v.log.Error(err, "failed to build policy context")
|
||||
return
|
||||
}
|
||||
|
||||
needsReport := needsReports(request, policyContext.NewResource(), v.admissionReports)
|
||||
tracing.Span(
|
||||
context.Background(),
|
||||
"",
|
||||
fmt.Sprintf("AUDIT %s %s", request.Operation, request.Kind),
|
||||
func(ctx context.Context, span trace.Span) {
|
||||
responses, err := v.buildAuditResponses(ctx, policyContext, policies)
|
||||
if err != nil {
|
||||
v.log.Error(err, "failed to build audit responses")
|
||||
}
|
||||
events := webhookutils.GenerateEvents(responses, false)
|
||||
v.eventGen.Add(events...)
|
||||
if needsReport {
|
||||
if err := v.createReports(ctx, policyContext.NewResource(), request, responses...); err != nil {
|
||||
v.log.Error(err, "failed to create report")
|
||||
}
|
||||
}
|
||||
},
|
||||
trace.WithLinks(trace.LinkFromContext(ctx)),
|
||||
)
|
||||
}
|
||||
|
||||
func (v *validationHandler) buildAuditResponses(
|
||||
ctx context.Context,
|
||||
policyContext *policycontext.PolicyContext,
|
||||
policies []kyvernov1.PolicyInterface,
|
||||
) ([]engineapi.EngineResponse, error) {
|
||||
var responses []engineapi.EngineResponse
|
||||
for _, policy := range policies {
|
||||
tracing.ChildSpan(
|
||||
|
@ -145,7 +195,7 @@ func (v *validationHandler) buildAuditResponses(
|
|||
"pkg/webhooks/resource/validate",
|
||||
fmt.Sprintf("POLICY %s/%s", policy.GetNamespace(), policy.GetName()),
|
||||
func(ctx context.Context, span trace.Span) {
|
||||
policyContext := policyContext.WithPolicy(policy).WithNamespaceLabels(namespaceLabels)
|
||||
policyContext := policyContext.WithPolicy(policy)
|
||||
response := v.engine.Validate(ctx, policyContext)
|
||||
responses = append(responses, response)
|
||||
},
|
||||
|
@ -154,51 +204,31 @@ func (v *validationHandler) buildAuditResponses(
|
|||
return responses, nil
|
||||
}
|
||||
|
||||
func (v *validationHandler) handleAudit(
|
||||
func (v *validationHandler) buildPolicyContextFromAdmissionRequest(logger logr.Logger, request handlers.AdmissionRequest) (*policycontext.PolicyContext, error) {
|
||||
policyContext, err := v.pcBuilder.Build(request.AdmissionRequest, request.Roles, request.ClusterRoles, request.GroupVersionKind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
namespaceLabels := make(map[string]string)
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
namespaceLabels = engineutils.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, v.nsLister, logger)
|
||||
}
|
||||
policyContext = policyContext.WithNamespaceLabels(namespaceLabels)
|
||||
return policyContext, nil
|
||||
}
|
||||
|
||||
func (v *validationHandler) createReports(
|
||||
ctx context.Context,
|
||||
resource unstructured.Unstructured,
|
||||
request handlers.AdmissionRequest,
|
||||
namespaceLabels map[string]string,
|
||||
engineResponses ...engineapi.EngineResponse,
|
||||
) {
|
||||
createReport := v.admissionReports
|
||||
if admissionutils.IsDryRun(request.AdmissionRequest) {
|
||||
createReport = false
|
||||
) error {
|
||||
report := reportutils.BuildAdmissionReport(resource, request.AdmissionRequest, engineResponses...)
|
||||
if len(report.GetResults()) > 0 {
|
||||
_, err := reportutils.CreateReport(ctx, report, v.kyvernoClient)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// we don't need reports for deletions
|
||||
if request.Operation == admissionv1.Delete {
|
||||
createReport = false
|
||||
}
|
||||
// check if the resource supports reporting
|
||||
if !reportutils.IsGvkSupported(schema.GroupVersionKind(request.Kind)) {
|
||||
createReport = false
|
||||
}
|
||||
// if the underlying resource has no UID don't create a report
|
||||
if resource.GetUID() == "" {
|
||||
createReport = false
|
||||
}
|
||||
tracing.Span(
|
||||
context.Background(),
|
||||
"",
|
||||
fmt.Sprintf("AUDIT %s %s", request.Operation, request.Kind),
|
||||
func(ctx context.Context, span trace.Span) {
|
||||
responses, err := v.buildAuditResponses(ctx, resource, request, namespaceLabels)
|
||||
if err != nil {
|
||||
v.log.Error(err, "failed to build audit responses")
|
||||
}
|
||||
events := webhookutils.GenerateEvents(responses, false)
|
||||
v.eventGen.Add(events...)
|
||||
if createReport {
|
||||
responses = append(responses, engineResponses...)
|
||||
report := reportutils.BuildAdmissionReport(resource, request.AdmissionRequest, responses...)
|
||||
if len(report.GetResults()) > 0 {
|
||||
_, err = reportutils.CreateReport(ctx, report, v.kyvernoClient)
|
||||
if err != nil {
|
||||
v.log.Error(err, "failed to create report")
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
trace.WithLinks(trace.LinkFromContext(ctx)),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
features:
|
||||
policyExceptions:
|
||||
enabled: true
|
||||
|
||||
admissionController:
|
||||
profiling:
|
||||
enabled: true
|
||||
serviceType: NodePort
|
||||
nodePort: 30950
|
||||
|
||||
backgroundController:
|
||||
rbac:
|
||||
clusterRole:
|
||||
extraResources:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- configmaps
|
||||
- networkpolicies
|
||||
- resourcequotas
|
||||
- secrets
|
||||
- roles
|
||||
- rolebindings
|
||||
- limitranges
|
||||
- namespaces
|
||||
- nodes
|
||||
- nodes/status
|
||||
- pods
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
- patch
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
|
||||
cleanupController:
|
||||
rbac:
|
||||
clusterRole:
|
||||
extraResources:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- list
|
||||
- delete
|
19
scripts/config/stress-with-profiling/kyverno.yaml
Normal file
19
scripts/config/stress-with-profiling/kyverno.yaml
Normal file
|
@ -0,0 +1,19 @@
|
|||
# stress-with-profiling sets the most restricted configurations for the admission controller. It disables
|
||||
# any additional feature that could affect the performance of the admission controller.
|
||||
features:
|
||||
policyExceptions:
|
||||
enabled: true
|
||||
omitEvents:
|
||||
eventTypes:
|
||||
- PolicyApplied
|
||||
- PolicySkipped
|
||||
- PolicyViolation
|
||||
- PolicyError
|
||||
admissionReports:
|
||||
enabled: false
|
||||
|
||||
admissionController:
|
||||
profiling:
|
||||
enabled: true
|
||||
serviceType: NodePort
|
||||
nodePort: 30950
|
Loading…
Reference in a new issue