1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-05 07:26:55 +00:00

feat: Opentelemetry support for metrics and traces (#3910)

* integrating opentelemetry

Signed-off-by: Tathagata Paul <tathagatapaul7@gmail.com>

* fix multiple imports

Signed-off-by: Tathagata Paul <tathagatapaul7@gmail.com>

* fixed cli help statement

Signed-off-by: Tathagata Paul <tathagatapaul7@gmail.com>

* added init file for metrics

Signed-off-by: Tathagata Paul <tathagatapaul7@gmail.com>

Co-authored-by: shuting <shuting@nirmata.com>
This commit is contained in:
Tathagata Paul 2022-07-11 23:19:47 +05:30 committed by GitHub
parent 6e9609409b
commit 3e2894b6fa
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 709 additions and 323 deletions

View file

@ -33,6 +33,7 @@ import (
"github.com/kyverno/kyverno/pkg/signal"
"github.com/kyverno/kyverno/pkg/tls"
"github.com/kyverno/kyverno/pkg/toggle"
"github.com/kyverno/kyverno/pkg/tracing"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/kyverno/kyverno/pkg/version"
"github.com/kyverno/kyverno/pkg/webhookconfig"
@ -40,7 +41,6 @@ import (
webhookspolicy "github.com/kyverno/kyverno/pkg/webhooks/policy"
webhooksresource "github.com/kyverno/kyverno/pkg/webhooks/resource"
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/updaterequest"
"github.com/prometheus/client_golang/prometheus/promhttp"
kubeinformers "k8s.io/client-go/informers"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
@ -62,6 +62,10 @@ var (
genWorkers int
profile bool
disableMetricsExport bool
enableTracing bool
otel string
otelCollector string
transportCreds string
autoUpdateWebhooks bool
policyControllerResyncPeriod time.Duration
imagePullSecrets string
@ -84,6 +88,10 @@ func main() {
flag.BoolVar(&profile, "profile", false, "Set this flag to 'true', to enable profiling.")
flag.StringVar(&profilePort, "profilePort", "6060", "Enable profiling at given port, defaults to 6060.")
flag.BoolVar(&disableMetricsExport, "disableMetrics", false, "Set this flag to 'true', to enable exposing the metrics.")
flag.BoolVar(&enableTracing, "enableTracing", false, "Set this flag to 'true', to enable exposing traces.")
flag.StringVar(&otel, "otelConfig", "prometheus", "Set this flag to 'grpc', to enable exporting metrics to an Opentelemetry Collector. The default collector is set to \"prometheus\"")
flag.StringVar(&otelCollector, "otelCollector", "opentelemetrycollector.kyverno.svc.cluster.local", "Set this flag to the OpenTelemetry Collector Service Address. Kyverno will try to connect to this on the metrics port.")
flag.StringVar(&transportCreds, "transportCreds", "", "Set this flag to the CA secret containing the certificate which is used by our Opentelemetry Metrics Client. If empty string is set, means an insecure connection will be used")
flag.StringVar(&metricsPort, "metricsPort", "8000", "Expose prometheus metrics at the given port, default to 8000.")
flag.DurationVar(&policyControllerResyncPeriod, "backgroundScan", time.Hour, "Perform background scan every given interval, e.g., 30s, 15m, 1h.")
flag.StringVar(&imagePullSecrets, "imagePullSecrets", "", "Secret resource names for image registry access credentials.")
@ -140,8 +148,7 @@ func main() {
os.Exit(1)
}
var metricsServerMux *http.ServeMux
var promConfig *metrics.PromConfig
var metricsConfig *metrics.MetricsConfig
if profile {
addr := ":" + profilePort
@ -265,22 +272,41 @@ func main() {
os.Exit(1)
}
if !disableMetricsExport {
promConfig, err = metrics.NewPromConfig(metricsConfigData)
// Metrics Configuration
metricsAddr := ":" + metricsPort
metricsConfig, metricsServerMux, err := metrics.InitMetrics(
disableMetricsExport,
otel,
metricsAddr,
otelCollector,
metricsConfigData,
transportCreds,
kubeClient,
log.Log.WithName("Metrics"),
)
if err != nil {
setupLog.Error(err, "failed to initialize metrics")
os.Exit(1)
}
if otel == "prometheus" {
go func() {
setupLog.Info("Enabling Metrics for Kyverno", "address", metricsAddr)
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
setupLog.Error(err, "failed to enable metrics", "address", metricsAddr)
}
}()
}
// Tracing Configuration
if enableTracing {
setupLog.Info("Enabling tracing for Kyverno...")
err = tracing.NewTraceConfig(otelCollector, transportCreds, kubeClient, log.Log.WithName("Tracing"))
if err != nil {
setupLog.Error(err, "failed to setup Prometheus metric configuration")
setupLog.Error(err, "Failed to enable tracing for Kyverno")
os.Exit(1)
}
metricsServerMux = http.NewServeMux()
metricsServerMux.Handle("/metrics", promhttp.HandlerFor(promConfig.MetricsRegistry, promhttp.HandlerOpts{Timeout: 10 * time.Second}))
metricsAddr := ":" + metricsPort
go func() {
setupLog.Info("enabling metrics service", "address", metricsAddr)
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
setupLog.Error(err, "failed to enable metrics service", "address", metricsAddr)
os.Exit(1)
}
}()
}
// POLICY CONTROLLER
@ -301,7 +327,7 @@ func main() {
kubeInformer.Core().V1().Namespaces(),
log.Log.WithName("PolicyController"),
policyControllerResyncPeriod,
promConfig,
metricsConfig,
)
if err != nil {
setupLog.Error(err, "Failed to create policy controller")
@ -346,7 +372,7 @@ func main() {
log.Log.WithName("ValidateAuditHandler"),
configuration,
dynamicClient,
promConfig,
metricsConfig,
)
certRenewer, err := tls.NewCertRenewer(
@ -428,7 +454,7 @@ func main() {
dynamicClient,
kyvernoClient,
configuration,
promConfig,
metricsConfig,
policyCache,
kubeInformer.Core().V1().Namespaces().Lister(),
kubeInformer.Rbac().V1().RoleBindings().Lister(),

35
go.mod
View file

@ -18,7 +18,7 @@ require (
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/go-git/go-billy/v5 v5.0.0
github.com/go-git/go-git/v5 v5.2.0
github.com/go-logr/logr v1.2.2
github.com/go-logr/logr v1.2.3
github.com/google/go-containerregistry v0.9.0
github.com/google/go-containerregistry/pkg/authn/kubernetes v0.0.0-20220301182634-bfe2ffc6b6bd
github.com/googleapis/gnostic v0.5.5
@ -35,8 +35,7 @@ require (
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.12.1
github.com/robfig/cron/v3 v3.0.1
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/sigstore/cosign v1.9.1-0.20220615165628-e4bc4a95743b
github.com/sigstore/sigstore v1.2.1-0.20220614141825-9c0e2e247545
github.com/spf13/cobra v1.4.0
@ -59,7 +58,20 @@ require (
sigs.k8s.io/yaml v1.3.0
)
require github.com/jmoiron/jsonq v0.0.0-20150511023944-e874b168d07e
require (
github.com/jmoiron/jsonq v0.0.0-20150511023944-e874b168d07e
go.opentelemetry.io/otel v1.7.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.30.0
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.30.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
go.opentelemetry.io/otel/metric v0.30.0
go.opentelemetry.io/otel/sdk v1.7.0
go.opentelemetry.io/otel/sdk/metric v0.30.0
go.opentelemetry.io/otel/trace v1.7.0
google.golang.org/grpc v1.47.0
)
require (
bitbucket.org/creachadair/shell v0.0.6 // indirect
@ -104,11 +116,12 @@ require (
github.com/aws/aws-sdk-go-v2/service/sso v1.11.8 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.16.7 // indirect
github.com/aws/smithy-go v1.11.3 // indirect
github.com/benbjohnson/clock v1.1.0 // indirect
github.com/benbjohnson/clock v1.3.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bgentry/speakeasy v0.1.0 // indirect
github.com/blang/semver v3.5.1+incompatible // indirect
github.com/cenkalti/backoff/v3 v3.2.2 // indirect
github.com/cenkalti/backoff/v4 v4.1.3 // indirect
github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect
@ -136,6 +149,7 @@ require (
github.com/go-chi/chi v4.1.2+incompatible // indirect
github.com/go-errors/errors v1.0.1 // indirect
github.com/go-git/gcfg v1.5.0 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-openapi/analysis v0.21.2 // indirect
github.com/go-openapi/errors v0.20.2 // indirect
github.com/go-openapi/jsonpointer v0.19.5 // indirect
@ -172,6 +186,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect
github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/hashicorp/errwrap v1.1.0 // indirect
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
github.com/hashicorp/go-hclog v1.2.1 // indirect
@ -282,14 +297,11 @@ require (
go.opencensus.io v0.23.0 // indirect
go.opentelemetry.io/contrib v1.3.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0 // indirect
go.opentelemetry.io/otel v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp v0.20.0 // indirect
go.opentelemetry.io/otel/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk v0.20.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 // indirect
go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 // indirect
go.opentelemetry.io/otel/sdk/metric v0.20.0 // indirect
go.opentelemetry.io/otel/trace v0.20.0 // indirect
go.opentelemetry.io/proto/otlp v0.12.0 // indirect
go.opentelemetry.io/proto/otlp v0.16.0 // indirect
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 // indirect
go.uber.org/atomic v1.9.0 // indirect
go.uber.org/multierr v1.7.0 // indirect
@ -308,7 +320,6 @@ require (
google.golang.org/api v0.84.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac // indirect
google.golang.org/grpc v1.47.0 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/cheggaaa/pb.v1 v1.0.28 // indirect
gopkg.in/ini.v1 v1.66.4 // indirect

49
go.sum
View file

@ -375,8 +375,9 @@ github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U
github.com/beeker1121/goque v1.0.3-0.20191103205551-d618510128af/go.mod h1:84CWnaDz4g1tEVnFLnuBigmGK15oPohy0RfvSN8d4eg=
github.com/beevik/etree v1.1.0/go.mod h1:r8Aw8JqVegEf0w2fDnATrX9VpkMcyFeM0FhwO62wh+A=
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
@ -413,6 +414,8 @@ github.com/cenkalti/backoff/v3 v3.0.0/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4r
github.com/cenkalti/backoff/v3 v3.2.2 h1:cfUAAO3yvKMYKPrvhDuHSwQnhZNk/RMHKdZqKTxfm6M=
github.com/cenkalti/backoff/v3 v3.2.2/go.mod h1:cIeZDE3IrqwwJl6VUwCN6trj1oXrTS4rc0ij+ULvLYs=
github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4=
github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk=
@ -717,8 +720,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI=
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
@ -1140,6 +1146,8 @@ github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t
github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/hanwen/go-fuse v1.0.0/go.mod h1:unqXarDXqzAk0rt98O2tVndEPIpUgLD9+rwFisZH3Ok=
github.com/hanwen/go-fuse/v2 v2.1.0/go.mod h1:oRyA5eK+pvJyv5otpO/DgccS8y/RvYMaO00GgRLGryc=
github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q=
@ -1756,8 +1764,6 @@ github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqn
github.com/remyoudompheng/bigfft v0.0.0-20170806203942-52369c62f446/go.mod h1:uYEyJGbgTkfkS4+E/PavXkNJcbFIpEtjt2B0KDQ5+9M=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/robfig/cron/v3 v3.0.1 h1:WdRxkvbJztn8LMz/QEvLN5sBU+xKpSqwwUO1Pjr4qDs=
github.com/robfig/cron/v3 v3.0.1/go.mod h1:eQICP3HwyT7UooqI/z+Ov+PtYAWygg1TEWWzGIFLtro=
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
@ -2117,28 +2123,51 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:
go.opentelemetry.io/contrib/propagators v0.19.0 h1:HrixVNZYFjUl/Db+Tr3DhqzLsVW9GeVf/Gye+C5dNUY=
go.opentelemetry.io/contrib/propagators v0.19.0/go.mod h1:4QOdZClXISU5S43xZxk5tYaWcpb+lehqfKtE6PK6msE=
go.opentelemetry.io/otel v0.19.0/go.mod h1:j9bF567N9EfomkSidSfmMwIwIBuP37AMAIzVW85OxSg=
go.opentelemetry.io/otel v0.20.0 h1:eaP0Fqu7SXHwvjiqDq83zImeehOHX8doTvU9AwXON8g=
go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo=
go.opentelemetry.io/otel v1.4.0/go.mod h1:jeAqMFKy2uLIxCtKxoFj0FAL5zAPKQagc3+GtBWakzk=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/exporters/otlp v0.20.0 h1:PTNgq9MRmQqqJY0REVbZFvwkYOA85vbdQU/nVfxDyqg=
go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0 h1:7Yxsak1q4XrJ5y7XBnNwqWx9amMZvoidCctv62XOQ6Y=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.7.0/go.mod h1:M1hVZHNxcbkAlcvrOMlpQ4YOO3Awf+4N2dxkZL3xm04=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.30.0 h1:Os0ds8fJp2AUa9DNraFWIycgUzevz47i6UvnSh+8LQ0=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric v0.30.0/go.mod h1:8Lz1GGcrx1kPGE3zqDrK7ZcPzABEfIQqBjq7roQa5ZA=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.30.0 h1:7E8znQuiqnaFDDl1zJYUpoqHteZI6u2rrcxH3Gwoiis=
go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc v0.30.0/go.mod h1:RejW0QAFotPIixlFZKZka4/70S5UaFOqDO9DYOgScIs=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0 h1:cMDtmgJ5FpRvqx9x2Aq+Mm0O6K/zcUkH73SFz20TuBw=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.7.0/go.mod h1:ceUgdyfNv4h4gLxHR0WNfDiiVmZFodZhZSbOLhpxqXE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0 h1:MFAyzUPrTwLOwCi+cltN0ZVyy4phU41lwH+lyMyQTS4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.7.0/go.mod h1:E+/KKhwOSw8yoPxSSuUHG6vKppkvhN+S1Jc7Nib3k3o=
go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0=
go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs=
go.opentelemetry.io/otel/internal/metric v0.27.0 h1:9dAVGAfFiiEq5NVB9FUJ5et+btbDQAUIJehJ+ikyryk=
go.opentelemetry.io/otel/internal/metric v0.27.0/go.mod h1:n1CVxRqKqYZtqyTh9U/onvKapPGv7y/rpyOTI+LFNzw=
go.opentelemetry.io/otel/metric v0.19.0/go.mod h1:8f9fglJPRnXuskQmKpnad31lcLJ2VmNNqIsx/uIwBSc=
go.opentelemetry.io/otel/metric v0.20.0 h1:4kzhXFP+btKm4jwxpjIqjs41A7MakRFUS86bqLHTIw8=
go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU=
go.opentelemetry.io/otel/metric v0.27.0/go.mod h1:raXDJ7uP2/Jc0nVZWQjJtzoyssOYWu/+pjZqRzfvZ7g=
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
go.opentelemetry.io/otel/oteltest v0.19.0/go.mod h1:tI4yxwh8U21v7JD6R3BcA/2+RBoTKFexE/PJ/nSO7IA=
go.opentelemetry.io/otel/oteltest v0.20.0 h1:HiITxCawalo5vQzdHfKeZurV8x7ljcqAgiWzF6Vaeaw=
go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw=
go.opentelemetry.io/otel/sdk v0.20.0 h1:JsxtGXd06J8jrnya7fdI/U/MR6yXA5DtbZy+qoHQlr8=
go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc=
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0 h1:c5VRjxCXdQlx1HjzwGdQHzZaVI82b5EbBgOu2ljD92g=
go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE=
go.opentelemetry.io/otel/sdk/metric v0.20.0 h1:7ao1wpzHRVKf0OQ7GIxiQJA6X7DLX9o14gmVon7mMK8=
go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE=
go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME=
go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8=
go.opentelemetry.io/otel/trace v0.19.0/go.mod h1:4IXiNextNOpPnRlI4ryK69mn5iC84bjBWZQA5DXz/qg=
go.opentelemetry.io/otel/trace v0.20.0 h1:1DL6EXUdcg95gukhuRRvLDO/4X5THh/5dIV52lqtnbw=
go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw=
go.opentelemetry.io/otel/trace v1.4.0/go.mod h1:uc3eRsqDfWs9R7b92xbQbU42/eTNz4N+gLP8qJCi4aE=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.12.0 h1:CMJ/3Wp7iOWES+CYLfnBv+DVmPbB+kmy9PJ92XvlR6c=
go.opentelemetry.io/proto/otlp v0.12.0/go.mod h1:TsIjwGWIx5VFYv9KGVlOpxoBl5Dy+63SUguV7GGvlSQ=
go.opentelemetry.io/proto/otlp v0.16.0 h1:WHzDWdXUvbc5bG2ObdrGfaNpQz7ft7QN9HHmJlbiB1E=
go.opentelemetry.io/proto/otlp v0.16.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5 h1:+FNtrFTmVw0YZGpBGX56XDee331t6JAXeK2bcyhLOOc=
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
go.step.sm/crypto v0.14.0/go.mod h1:3G0yQr5lQqfEG0CMYz8apC/qMtjLRQlzflL2AxkcN+g=

View file

@ -14,6 +14,7 @@ import (
"github.com/in-toto/in-toto-golang/in_toto"
wildcard "github.com/kyverno/go-wildcard"
"github.com/kyverno/kyverno/pkg/registryclient"
"github.com/kyverno/kyverno/pkg/tracing"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/pkg/errors"
"github.com/sigstore/cosign/cmd/cosign/cli/fulcio"
@ -72,7 +73,15 @@ func verifySignature(opts Options) (*Response, error) {
return nil, err
}
signatures, bundleVerified, err := client.VerifyImageSignatures(context.Background(), ref, cosignOpts)
var (
signatures []oci.Signature
bundleVerified bool
)
tracing.DoInSpan(context.Background(), "cosign", "verify_image_signatures", func(ctx context.Context) {
signatures, bundleVerified, err = client.VerifyImageSignatures(ctx, ref, cosignOpts)
})
if err != nil {
logger.Info("image verification failed", "error", err.Error())
return nil, err
@ -255,7 +264,13 @@ func fetchAttestations(opts Options) (*Response, error) {
return nil, errors.Wrap(err, "failed to parse image")
}
signatures, bundleVerified, err := client.VerifyImageAttestations(context.Background(), ref, cosignOpts)
var signatures []oci.Signature
var bundleVerified bool
tracing.DoInSpan(context.Background(), "cosign_operations", "verify_image_signatures", func(ctx context.Context) {
signatures, bundleVerified, err = client.VerifyImageAttestations(context.Background(), ref, cosignOpts)
})
if err != nil {
msg := err.Error()
logger.Info("failed to fetch attestations", "error", msg)

View file

@ -6,32 +6,29 @@ import (
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/utils"
prom "github.com/prometheus/client_golang/prometheus"
)
func registerAdmissionRequestsMetric(
pc *metrics.PromConfig,
m *metrics.MetricsConfig,
resourceKind, resourceNamespace string,
resourceRequestOperation metrics.ResourceRequestOperation,
) error {
includeNamespaces, excludeNamespaces := pc.Config.GetIncludeNamespaces(), pc.Config.GetExcludeNamespaces()
includeNamespaces, excludeNamespaces := m.Config.GetIncludeNamespaces(), m.Config.GetExcludeNamespaces()
if (resourceNamespace != "" && resourceNamespace != "-") && utils.ContainsString(excludeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_admission_requests_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_admission_requests_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
return nil
}
if (resourceNamespace != "" && resourceNamespace != "-") && len(includeNamespaces) > 0 && !utils.ContainsString(includeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_admission_requests_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_admission_requests_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
return nil
}
pc.Metrics.AdmissionRequests.With(prom.Labels{
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
}).Inc()
m.RecordAdmissionRequests(resourceKind, resourceNamespace, resourceRequestOperation)
return nil
}
func ProcessEngineResponses(pc *metrics.PromConfig, engineResponses []*response.EngineResponse, resourceRequestOperation metrics.ResourceRequestOperation) error {
func ProcessEngineResponses(m *metrics.MetricsConfig, engineResponses []*response.EngineResponse, resourceRequestOperation metrics.ResourceRequestOperation) error {
if len(engineResponses) == 0 {
return nil
}
@ -52,5 +49,5 @@ func ProcessEngineResponses(pc *metrics.PromConfig, engineResponses []*response.
if validateRulesCount == 0 && mutateRulesCount == 0 && generateRulesCount == 0 {
return nil
}
return registerAdmissionRequestsMetric(pc, resourceKind, resourceNamespace, resourceRequestOperation)
return registerAdmissionRequestsMetric(m, resourceKind, resourceNamespace, resourceRequestOperation)
}

View file

@ -6,33 +6,30 @@ import (
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/utils"
prom "github.com/prometheus/client_golang/prometheus"
)
func registerAdmissionReviewDurationMetric(
pc *metrics.PromConfig,
m *metrics.MetricsConfig,
resourceKind, resourceNamespace string,
resourceRequestOperation metrics.ResourceRequestOperation,
admissionRequestLatency float64,
) error {
includeNamespaces, excludeNamespaces := pc.Config.GetIncludeNamespaces(), pc.Config.GetExcludeNamespaces()
includeNamespaces, excludeNamespaces := m.Config.GetIncludeNamespaces(), m.Config.GetExcludeNamespaces()
if (resourceNamespace != "" && resourceNamespace != "-") && utils.ContainsString(excludeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_admission_review_duration_seconds metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_admission_review_duration_seconds metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
return nil
}
if (resourceNamespace != "" && resourceNamespace != "-") && len(includeNamespaces) > 0 && !utils.ContainsString(includeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_admission_review_duration_seconds metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_admission_review_duration_seconds metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
return nil
}
pc.Metrics.AdmissionReviewDuration.With(prom.Labels{
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
}).Observe(admissionRequestLatency)
m.RecordAdmissionReviewDuration(resourceKind, resourceNamespace, string(resourceRequestOperation), admissionRequestLatency)
return nil
}
func ProcessEngineResponses(pc *metrics.PromConfig, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64, resourceRequestOperation metrics.ResourceRequestOperation) error {
func ProcessEngineResponses(m *metrics.MetricsConfig, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64, resourceRequestOperation metrics.ResourceRequestOperation) error {
if len(engineResponses) == 0 {
return nil
}
@ -54,5 +51,5 @@ func ProcessEngineResponses(pc *metrics.PromConfig, engineResponses []*response.
return nil
}
admissionReviewLatencyDurationInSeconds := float64(admissionReviewLatencyDuration) / float64(1000*1000*1000)
return registerAdmissionReviewDurationMetric(pc, resourceKind, resourceNamespace, resourceRequestOperation, admissionReviewLatencyDurationInSeconds)
return registerAdmissionReviewDurationMetric(m, resourceKind, resourceNamespace, resourceRequestOperation, admissionReviewLatencyDurationInSeconds)
}

49
pkg/metrics/init.go Normal file
View file

@ -0,0 +1,49 @@
package metrics
import (
"net/http"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
"k8s.io/client-go/kubernetes"
)
func InitMetrics(
disableMetricsExport bool,
otel string,
metricsAddr string,
otelCollector string,
metricsConfigData *config.MetricsConfigData,
transportCreds string,
kubeClient kubernetes.Interface,
log logr.Logger) (*MetricsConfig, *http.ServeMux, error) {
var metricsConfig *MetricsConfig
var err error
var metricsServerMux *http.ServeMux
if !disableMetricsExport {
if otel == "grpc" {
// Otlpgrpc metrics will be served on port 4317: default port for otlpgrpcmetrics
log.Info("Enabling Metrics for Kyverno", "address", metricsAddr)
endpoint := otelCollector + metricsAddr
metricsConfig, err = NewOTLPGRPCConfig(
endpoint,
metricsConfigData,
transportCreds,
kubeClient,
log,
)
if err != nil {
return nil, nil, err
}
} else if otel == "prometheus" {
// Prometheus Server will serve metrics on metrics-port
metricsConfig, metricsServerMux, err = NewPrometheusConfig(metricsConfigData, log)
if err != nil {
return nil, nil, err
}
}
}
return metricsConfig, metricsServerMux, nil
}

View file

@ -1,143 +1,316 @@
package metrics
import (
"fmt"
"context"
"net/http"
"time"
"github.com/kyverno/kyverno/pkg/config"
prom "github.com/prometheus/client_golang/prometheus"
"github.com/robfig/cron/v3"
"github.com/go-logr/logr"
kconfig "github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/utils/kube"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric"
"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc"
"go.opentelemetry.io/otel/exporters/prometheus"
"go.opentelemetry.io/otel/metric/global"
"go.opentelemetry.io/otel/metric/instrument"
"go.opentelemetry.io/otel/metric/instrument/asyncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"go.opentelemetry.io/otel/metric/instrument/syncint64"
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
"go.opentelemetry.io/otel/sdk/metric/selector/simple"
"go.opentelemetry.io/otel/sdk/resource"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"k8s.io/client-go/kubernetes"
)
type PromConfig struct {
MetricsRegistry *prom.Registry
Metrics *PromMetrics
Config *config.MetricsConfigData
cron *cron.Cron
const (
meterName = "kyverno"
)
type MetricsConfig struct {
// instruments
policyChangesMetric syncint64.Counter
policyResultsMetric syncint64.Counter
policyRuleInfoMetric asyncfloat64.Gauge
policyExecutionDurationMetric syncfloat64.Histogram
admissionRequestsMetric syncint64.Counter
admissionReviewDurationMetric syncfloat64.Histogram
// config
Config *kconfig.MetricsConfigData
Log logr.Logger
}
type PromMetrics struct {
PolicyResults *prom.CounterVec
PolicyRuleInfo *prom.GaugeVec
PolicyChanges *prom.CounterVec
PolicyExecutionDuration *prom.HistogramVec
AdmissionReviewDuration *prom.HistogramVec
AdmissionRequests *prom.CounterVec
func initializeMetrics(m *MetricsConfig) (*MetricsConfig, error) {
var err error
meter := global.MeterProvider().Meter(meterName)
m.policyResultsMetric, err = meter.SyncInt64().Counter("kyverno_policy_results_total", instrument.WithDescription("can be used to track the results associated with the policies applied in the users cluster, at the level from rule to policy to admission requests"))
if err != nil {
m.Log.Error(err, "Failed to create instrument")
return nil, err
}
m.policyChangesMetric, err = meter.SyncInt64().Counter("kyverno_policy_changes_total", instrument.WithDescription("can be used to track all the changes associated with the Kyverno policies present on the cluster such as creation, updates and deletions"))
if err != nil {
m.Log.Error(err, "Failed to create instrument")
return nil, err
}
m.admissionRequestsMetric, err = meter.SyncInt64().Counter("kyverno_admission_requests_total", instrument.WithDescription("can be used to track the number of admission requests encountered by Kyverno in the cluster"))
if err != nil {
m.Log.Error(err, "Failed to create instrument")
return nil, err
}
m.policyExecutionDurationMetric, err = meter.SyncFloat64().Histogram("kyverno_policy_execution_duration_seconds", instrument.WithDescription("can be used to track the latencies (in seconds) associated with the execution/processing of the individual rules under Kyverno policies whenever they evaluate incoming resource requests"))
if err != nil {
m.Log.Error(err, "Failed to create instrument")
return nil, err
}
m.admissionReviewDurationMetric, err = meter.SyncFloat64().Histogram("kyverno_admission_review_duration_seconds", instrument.WithDescription("can be used to track the latencies (in seconds) associated with the entire individual admission review. For example, if an incoming request trigger, say, five policies, this metric will track the e2e latency associated with the execution of all those policies"))
if err != nil {
m.Log.Error(err, "Failed to create instrument")
return nil, err
}
// Register Async Callbacks
m.policyRuleInfoMetric, err = meter.AsyncFloat64().Gauge("kyverno_policy_rule_info_total", instrument.WithDescription("can be used to track the info of the rules or/and policies present in the cluster. 0 means the rule doesn't exist and has been deleted, 1 means the rule is currently existent in the cluster"))
if err != nil {
m.Log.Error(err, "Failed to create instrument")
return nil, err
}
return m, nil
}
func NewPromConfig(metricsConfigData *config.MetricsConfigData) (*PromConfig, error) {
pc := new(PromConfig)
pc.Config = metricsConfigData
pc.cron = cron.New()
pc.MetricsRegistry = prom.NewRegistry()
func NewOTLPGRPCConfig(endpoint string,
metricsConfigData *kconfig.MetricsConfigData,
certs string,
kubeClient kubernetes.Interface,
log logr.Logger,
) (*MetricsConfig, error) {
ctx := context.Background()
var client otlpmetric.Client
policyResultsLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_name", "policy_namespace",
"resource_kind", "resource_namespace", "resource_request_operation",
"rule_name", "rule_result", "rule_type", "rule_execution_cause",
}
policyResultsMetric := prom.NewCounterVec(
prom.CounterOpts{
Name: "kyverno_policy_results_total",
Help: "can be used to track the results associated with the policies applied in the users cluster, at the level from rule to policy to admission requests.",
},
policyResultsLabels,
)
policyRuleInfoLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_namespace", "policy_name", "rule_name", "rule_type", "status_ready",
}
policyRuleInfoMetric := prom.NewGaugeVec(
prom.GaugeOpts{
Name: "kyverno_policy_rule_info_total",
Help: "can be used to track the info of the rules or/and policies present in the cluster. 0 means the rule doesn't exist and has been deleted, 1 means the rule is currently existent in the cluster.",
},
policyRuleInfoLabels,
)
policyChangesLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_namespace", "policy_name", "policy_change_type",
}
policyChangesMetric := prom.NewCounterVec(
prom.CounterOpts{
Name: "kyverno_policy_changes_total",
Help: "can be used to track all the changes associated with the Kyverno policies present on the cluster such as creation, updates and deletions.",
},
policyChangesLabels,
)
policyExecutionDurationLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_name", "policy_namespace",
"resource_kind", "resource_namespace", "resource_request_operation",
"rule_name", "rule_result", "rule_type", "rule_execution_cause", "generate_rule_latency_type",
}
policyExecutionDurationMetric := prom.NewHistogramVec(
prom.HistogramOpts{
Name: "kyverno_policy_execution_duration_seconds",
Help: "can be used to track the latencies (in seconds) associated with the execution/processing of the individual rules under Kyverno policies whenever they evaluate incoming resource requests.",
},
policyExecutionDurationLabels,
)
admissionReviewDurationLabels := []string{
"resource_kind", "resource_namespace", "resource_request_operation",
}
admissionReviewDurationMetric := prom.NewHistogramVec(
prom.HistogramOpts{
Name: "kyverno_admission_review_duration_seconds",
Help: "can be used to track the latencies (in seconds) associated with the entire individual admission review. For example, if an incoming request trigger, say, five policies, this metric will track the e2e latency associated with the execution of all those policies.",
},
admissionReviewDurationLabels,
)
admissionRequestsLabels := []string{
"resource_kind", "resource_namespace", "resource_request_operation",
}
admissionRequestsMetric := prom.NewCounterVec(
prom.CounterOpts{
Name: "kyverno_admission_requests_total",
Help: "can be used to track the number of admission requests encountered by Kyverno in the cluster.",
},
admissionRequestsLabels,
)
pc.Metrics = &PromMetrics{
PolicyResults: policyResultsMetric,
PolicyRuleInfo: policyRuleInfoMetric,
PolicyChanges: policyChangesMetric,
PolicyExecutionDuration: policyExecutionDurationMetric,
AdmissionReviewDuration: admissionReviewDurationMetric,
AdmissionRequests: admissionRequestsMetric,
}
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyResults)
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyRuleInfo)
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyChanges)
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyExecutionDuration)
pc.MetricsRegistry.MustRegister(pc.Metrics.AdmissionReviewDuration)
pc.MetricsRegistry.MustRegister(pc.Metrics.AdmissionRequests)
// configuring metrics periodic refresh
if pc.Config.GetMetricsRefreshInterval() != 0 {
if len(pc.cron.Entries()) > 0 {
logger.Info("Skipping the configuration of metrics refresh. Already found cron expiration to be set.")
} else {
_, err := pc.cron.AddFunc(fmt.Sprintf("@every %s", pc.Config.GetMetricsRefreshInterval()), func() {
logger.Info("Resetting the metrics as per their periodic refresh")
pc.Metrics.PolicyResults.Reset()
pc.Metrics.PolicyRuleInfo.Reset()
pc.Metrics.PolicyChanges.Reset()
pc.Metrics.PolicyExecutionDuration.Reset()
pc.Metrics.AdmissionReviewDuration.Reset()
pc.Metrics.AdmissionRequests.Reset()
})
if err != nil {
return nil, err
}
logger.Info(fmt.Sprintf("Configuring metrics refresh at a periodic rate of %s", pc.Config.GetMetricsRefreshInterval()))
pc.cron.Start()
if certs != "" {
// here the certificates are stored as configmaps
transportCreds, err := kube.FetchCert(ctx, certs, kubeClient)
if err != nil {
log.Error(err, "Error fetching certificate from secret")
return nil, err
}
client = otlpmetricgrpc.NewClient(
otlpmetricgrpc.WithEndpoint(endpoint),
otlpmetricgrpc.WithTLSCredentials(transportCreds),
)
} else {
logger.Info("Skipping the configuration of metrics refresh as 'metricsRefreshInterval' wasn't specified in values.yaml at the time of installing kyverno")
client = otlpmetricgrpc.NewClient(
otlpmetricgrpc.WithEndpoint(endpoint),
otlpmetricgrpc.WithInsecure(),
)
}
return pc, nil
// create New Exporter for exporting metrics
metricExp, err := otlpmetric.New(ctx, client)
if err != nil {
log.Error(err, "Failed to create the collector exporter")
return nil, err
}
res, err := resource.New(context.Background(),
resource.WithAttributes(semconv.ServiceNameKey.String("kyverno_metrics")),
resource.WithSchemaURL(semconv.SchemaURL),
)
if err != nil {
log.Error(err, "failed creating resource")
return nil, err
}
// create controller and bind the exporter with it
pusher := controller.New(
processor.NewFactory(
simple.NewWithHistogramDistribution(),
aggregation.CumulativeTemporalitySelector(),
processor.WithMemory(true),
),
controller.WithExporter(metricExp),
controller.WithResource(res),
controller.WithCollectPeriod(2*time.Second),
)
global.SetMeterProvider(pusher)
m := new(MetricsConfig)
m.Log = log
m.Config = metricsConfigData
m, err = initializeMetrics(m)
if err != nil {
log.Error(err, "Failed initializing metrics")
return nil, err
}
if err := pusher.Start(ctx); err != nil {
log.Error(err, "could not start metric exporter")
return nil, err
}
return m, nil
}
func NewPrometheusConfig(metricsConfigData *kconfig.MetricsConfigData,
log logr.Logger,
) (*MetricsConfig, *http.ServeMux, error) {
config := prometheus.Config{}
res, err := resource.New(context.Background(),
resource.WithAttributes(semconv.ServiceNameKey.String("kyverno-svc-metrics")),
resource.WithAttributes(semconv.ServiceNamespaceKey.String(kconfig.KyvernoNamespace())),
resource.WithSchemaURL(semconv.SchemaURL),
)
if err != nil {
log.Error(err, "failed creating resource")
return nil, nil, err
}
c := controller.New(
processor.NewFactory(
simple.NewWithHistogramDistribution(),
aggregation.CumulativeTemporalitySelector(),
processor.WithMemory(true),
),
controller.WithResource(res),
)
exporter, err := prometheus.New(config, c)
if err != nil {
log.Error(err, "failed to initialize prometheus exporter")
return nil, nil, err
}
global.SetMeterProvider(exporter.MeterProvider())
// Create new config object and attach metricsConfig to it
m := new(MetricsConfig)
m.Config = metricsConfigData
// Initialize metrics logger
m.Log = log
m, err = initializeMetrics(m)
if err != nil {
log.Error(err, "failed to initialize metrics config")
return nil, nil, err
}
metricsServerMux := http.NewServeMux()
metricsServerMux.HandleFunc("/metrics", exporter.ServeHTTP)
return m, metricsServerMux, nil
}
func (m *MetricsConfig) RecordPolicyResults(policyValidationMode PolicyValidationMode, policyType PolicyType, policyBackgroundMode PolicyBackgroundMode, policyNamespace string, policyName string,
resourceKind string, resourceNamespace string, resourceRequestOperation ResourceRequestOperation, ruleName string, ruleResult RuleResult, ruleType RuleType,
ruleExecutionCause RuleExecutionCause) {
ctx := context.Background()
commonLabels := []attribute.KeyValue{
attribute.String("policy_validation_mode", string(policyValidationMode)),
attribute.String("policy_type", string(policyType)),
attribute.String("policy_background_mode", string(policyBackgroundMode)),
attribute.String("policy_namespace", policyNamespace),
attribute.String("policy_name", policyName),
attribute.String("resource_kind", resourceKind),
attribute.String("resource_namespace", resourceNamespace),
attribute.String("resource_request_operation", string(resourceRequestOperation)),
attribute.String("rule_name", ruleName),
attribute.String("rule_result", string(ruleResult)),
attribute.String("rule_type", string(ruleType)),
attribute.String("rule_execution_cause", string(ruleExecutionCause)),
}
m.policyResultsMetric.Add(ctx, 1, commonLabels...)
}
func (m *MetricsConfig) RecordPolicyChanges(policyValidationMode PolicyValidationMode, policyType PolicyType, policyBackgroundMode PolicyBackgroundMode, policyNamespace string, policyName string, policyChangeType string) {
ctx := context.Background()
commonLabels := []attribute.KeyValue{
attribute.String("policy_validation_mode", string(policyValidationMode)),
attribute.String("policy_type", string(policyType)),
attribute.String("policy_background_mode", string(policyBackgroundMode)),
attribute.String("policy_namespace", policyNamespace),
attribute.String("policy_name", policyName),
attribute.String("policy_change_type", policyChangeType),
}
m.policyChangesMetric.Add(ctx, 1, commonLabels...)
}
func (m *MetricsConfig) RecordPolicyRuleInfo(policyValidationMode PolicyValidationMode, policyType PolicyType, policyBackgroundMode PolicyBackgroundMode, policyNamespace string, policyName string,
ruleName string, ruleType RuleType, status string, metricValue float64) {
ctx := context.Background()
commonLabels := []attribute.KeyValue{
attribute.String("policy_validation_mode", string(policyValidationMode)),
attribute.String("policy_type", string(policyType)),
attribute.String("policy_background_mode", string(policyBackgroundMode)),
attribute.String("policy_namespace", policyNamespace),
attribute.String("policy_name", policyName),
attribute.String("rule_name", ruleName),
attribute.String("rule_type", string(ruleType)),
attribute.String("status_ready", status),
}
m.policyRuleInfoMetric.Observe(ctx, metricValue, commonLabels...)
}
func (m MetricsConfig) RecordAdmissionRequests(resourceKind string, resourceNamespace string, resourceRequestOperation ResourceRequestOperation) {
ctx := context.Background()
commonLabels := []attribute.KeyValue{
attribute.String("resource_kind", resourceKind),
attribute.String("resource_namespace", resourceNamespace),
attribute.String("resource_request_operation", string(resourceRequestOperation)),
}
m.admissionRequestsMetric.Add(ctx, 1, commonLabels...)
}
func (m *MetricsConfig) RecordPolicyExecutionDuration(policyValidationMode PolicyValidationMode, policyType PolicyType, policyBackgroundMode PolicyBackgroundMode, policyNamespace string, policyName string,
resourceKind string, resourceNamespace string, resourceRequestOperation ResourceRequestOperation, ruleName string, ruleResult RuleResult, ruleType RuleType,
ruleExecutionCause RuleExecutionCause, generalRuleLatencyType string, ruleExecutionLatency float64) {
ctx := context.Background()
commonLabels := []attribute.KeyValue{
attribute.String("policy_validation_mode", string(policyValidationMode)),
attribute.String("policy_type", string(policyType)),
attribute.String("policy_background_mode", string(policyBackgroundMode)),
attribute.String("policy_namespace", policyNamespace),
attribute.String("policy_name", policyName),
attribute.String("resource_kind", resourceKind),
attribute.String("resource_namespace", resourceNamespace),
attribute.String("resource_request_operation", string(resourceRequestOperation)),
attribute.String("rule_name", ruleName),
attribute.String("rule_result", string(ruleResult)),
attribute.String("rule_type", string(ruleType)),
attribute.String("rule_execution_cause", string(ruleExecutionCause)),
attribute.String("general_rule_latency_type", generalRuleLatencyType),
}
m.policyExecutionDurationMetric.Record(ctx, ruleExecutionLatency, commonLabels...)
}
func (m *MetricsConfig) RecordAdmissionReviewDuration(resourceKind string, resourceNamespace string, resourceRequestOperation string, admissionRequestLatency float64) {
ctx := context.Background()
commonLabels := []attribute.KeyValue{
attribute.String("resource_kind", resourceKind),
attribute.String("resource_namespace", resourceNamespace),
attribute.String("resource_request_operation", resourceRequestOperation),
}
m.admissionReviewDurationMetric.Record(ctx, admissionRequestLatency, commonLabels...)
}

View file

@ -6,11 +6,10 @@ import (
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/utils"
prom "github.com/prometheus/client_golang/prometheus"
)
func registerPolicyChangesMetric(
pc *metrics.PromConfig,
m *metrics.MetricsConfig,
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
@ -20,32 +19,27 @@ func registerPolicyChangesMetric(
if policyType == metrics.Cluster {
policyNamespace = "-"
}
includeNamespaces, excludeNamespaces := pc.Config.GetIncludeNamespaces(), pc.Config.GetExcludeNamespaces()
includeNamespaces, excludeNamespaces := m.Config.GetIncludeNamespaces(), m.Config.GetExcludeNamespaces()
if (policyNamespace != "" && policyNamespace != "-") && utils.ContainsString(excludeNamespaces, policyNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_changes_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", policyNamespace, excludeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_changes_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", policyNamespace, excludeNamespaces))
return nil
}
if (policyNamespace != "" && policyNamespace != "-") && len(includeNamespaces) > 0 && !utils.ContainsString(includeNamespaces, policyNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_changes_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", policyNamespace, includeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_changes_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", policyNamespace, includeNamespaces))
return nil
}
pc.Metrics.PolicyChanges.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"policy_change_type": string(policyChangeType),
}).Inc()
m.RecordPolicyChanges(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, string(policyChangeType))
return nil
}
func RegisterPolicy(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface, policyChangeType PolicyChangeType) error {
func RegisterPolicy(m *metrics.MetricsConfig, policy kyvernov1.PolicyInterface, policyChangeType PolicyChangeType) error {
name, namespace, policyType, backgroundMode, validationMode, err := metrics.GetPolicyInfos(policy)
if err != nil {
return err
}
if err = registerPolicyChangesMetric(pc, validationMode, policyType, backgroundMode, namespace, name, policyChangeType); err != nil {
if err = registerPolicyChangesMetric(m, validationMode, policyType, backgroundMode, namespace, name, policyChangeType); err != nil {
return err
}
return nil

View file

@ -7,11 +7,10 @@ import (
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/utils"
prom "github.com/prometheus/client_golang/prometheus"
)
func registerPolicyExecutionDurationMetric(
pc *metrics.PromConfig,
m *metrics.MetricsConfig,
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
@ -31,36 +30,24 @@ func registerPolicyExecutionDurationMetric(
if ruleType != metrics.Generate || generateRuleLatencyType == "" {
generateRuleLatencyType = "-"
}
includeNamespaces, excludeNamespaces := pc.Config.GetIncludeNamespaces(), pc.Config.GetExcludeNamespaces()
includeNamespaces, excludeNamespaces := m.Config.GetIncludeNamespaces(), m.Config.GetExcludeNamespaces()
if (resourceNamespace != "" && resourceNamespace != "-") && utils.ContainsString(excludeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_execution_duration_seconds metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_execution_duration_seconds metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
return nil
}
if (resourceNamespace != "" && resourceNamespace != "-") && len(includeNamespaces) > 0 && !utils.ContainsString(includeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_execution_duration_seconds metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_execution_duration_seconds metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
return nil
}
pc.Metrics.PolicyExecutionDuration.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
"rule_name": ruleName,
"rule_result": string(ruleResult),
"rule_type": string(ruleType),
"rule_execution_cause": string(ruleExecutionCause),
"generate_rule_latency_type": generateRuleLatencyType,
}).Observe(ruleExecutionLatency)
m.RecordPolicyExecutionDuration(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, resourceKind, resourceNamespace, resourceRequestOperation, ruleName, ruleResult, ruleType, ruleExecutionCause, generateRuleLatencyType, ruleExecutionLatency)
return nil
}
// policy - policy related data
// engineResponse - resource and rule related data
func ProcessEngineResponse(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse, executionCause metrics.RuleExecutionCause, generateRuleLatencyType string, resourceRequestOperation metrics.ResourceRequestOperation) error {
//policy - policy related data
//engineResponse - resource and rule related data
func ProcessEngineResponse(m *metrics.MetricsConfig, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse, executionCause metrics.RuleExecutionCause, generateRuleLatencyType string, resourceRequestOperation metrics.ResourceRequestOperation) error {
name, namespace, policyType, backgroundMode, validationMode, err := metrics.GetPolicyInfos(policy)
if err != nil {
return err
@ -89,7 +76,7 @@ func ProcessEngineResponse(pc *metrics.PromConfig, policy kyvernov1.PolicyInterf
}
ruleExecutionLatencyInSeconds := float64(rule.RuleStats.ProcessingTime) / float64(1000*1000*1000)
if err := registerPolicyExecutionDurationMetric(
pc,
m,
validationMode,
policyType,
backgroundMode,

View file

@ -7,11 +7,10 @@ import (
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/utils"
prom "github.com/prometheus/client_golang/prometheus"
)
func registerPolicyResultsMetric(
pc *metrics.PromConfig,
m *metrics.MetricsConfig,
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
@ -26,35 +25,24 @@ func registerPolicyResultsMetric(
if policyType == metrics.Cluster {
policyNamespace = "-"
}
includeNamespaces, excludeNamespaces := pc.Config.GetIncludeNamespaces(), pc.Config.GetExcludeNamespaces()
includeNamespaces, excludeNamespaces := m.Config.GetIncludeNamespaces(), m.Config.GetExcludeNamespaces()
if (resourceNamespace != "" && resourceNamespace != "-") && utils.ContainsString(excludeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_results_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_results_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", resourceNamespace, excludeNamespaces))
return nil
}
if (resourceNamespace != "" && resourceNamespace != "-") && len(includeNamespaces) > 0 && !utils.ContainsString(includeNamespaces, resourceNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_results_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_results_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", resourceNamespace, includeNamespaces))
return nil
}
pc.Metrics.PolicyResults.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
"rule_name": ruleName,
"rule_result": string(ruleResult),
"rule_type": string(ruleType),
"rule_execution_cause": string(ruleExecutionCause),
}).Inc()
m.RecordPolicyResults(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, resourceKind, resourceNamespace, resourceRequestOperation, ruleName, ruleResult, ruleType, ruleExecutionCause)
return nil
}
// policy - policy related data
// engineResponse - resource and rule related data
func ProcessEngineResponse(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse, executionCause metrics.RuleExecutionCause, resourceRequestOperation metrics.ResourceRequestOperation) error {
//policy - policy related data
//engineResponse - resource and rule related data
func ProcessEngineResponse(m *metrics.MetricsConfig, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse, executionCause metrics.RuleExecutionCause, resourceRequestOperation metrics.ResourceRequestOperation) error {
name, namespace, policyType, backgroundMode, validationMode, err := metrics.GetPolicyInfos(policy)
if err != nil {
return err
@ -82,7 +70,7 @@ func ProcessEngineResponse(pc *metrics.PromConfig, policy kyvernov1.PolicyInterf
ruleResult = metrics.Fail
}
if err := registerPolicyResultsMetric(
pc,
m,
validationMode,
policyType,
backgroundMode,

View file

@ -7,11 +7,10 @@ import (
"github.com/kyverno/kyverno/pkg/autogen"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/utils"
prom "github.com/prometheus/client_golang/prometheus"
)
func registerPolicyRuleInfoMetric(
pc *metrics.PromConfig,
m *metrics.MetricsConfig,
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
@ -29,13 +28,13 @@ func registerPolicyRuleInfoMetric(
default:
return fmt.Errorf("unknown metric change type found: %s", metricChangeType)
}
includeNamespaces, excludeNamespaces := pc.Config.GetIncludeNamespaces(), pc.Config.GetExcludeNamespaces()
includeNamespaces, excludeNamespaces := m.Config.GetIncludeNamespaces(), m.Config.GetExcludeNamespaces()
if (policyNamespace != "" && policyNamespace != "-") && utils.ContainsString(excludeNamespaces, policyNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_rule_info_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", policyNamespace, excludeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_rule_info_total metric as the operation belongs to the namespace '%s' which is one of 'namespaces.exclude' %+v in values.yaml", policyNamespace, excludeNamespaces))
return nil
}
if (policyNamespace != "" && policyNamespace != "-") && len(includeNamespaces) > 0 && !utils.ContainsString(includeNamespaces, policyNamespace) {
metrics.Logger().Info(fmt.Sprintf("Skipping the registration of kyverno_policy_rule_info_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", policyNamespace, includeNamespaces))
m.Log.Info(fmt.Sprintf("Skipping the registration of kyverno_policy_rule_info_total metric as the operation belongs to the namespace '%s' which is not one of 'namespaces.include' %+v in values.yaml", policyNamespace, includeNamespaces))
return nil
}
if policyType == metrics.Cluster {
@ -45,20 +44,12 @@ func registerPolicyRuleInfoMetric(
if ready {
status = "true"
}
pc.Metrics.PolicyRuleInfo.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"rule_name": ruleName,
"rule_type": string(ruleType),
"status_ready": status,
}).Set(metricValue)
m.RecordPolicyRuleInfo(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, ruleName, ruleType, status, metricValue)
return nil
}
func AddPolicy(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface) error {
func AddPolicy(m *metrics.MetricsConfig, policy kyvernov1.PolicyInterface) error {
name, namespace, policyType, backgroundMode, validationMode, err := metrics.GetPolicyInfos(policy)
if err != nil {
return err
@ -67,14 +58,14 @@ func AddPolicy(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface) error {
for _, rule := range autogen.ComputeRules(policy) {
ruleName := rule.Name
ruleType := metrics.ParseRuleType(rule)
if err = registerPolicyRuleInfoMetric(pc, validationMode, policyType, backgroundMode, namespace, name, ruleName, ruleType, PolicyRuleCreated, ready); err != nil {
if err = registerPolicyRuleInfoMetric(m, validationMode, policyType, backgroundMode, namespace, name, ruleName, ruleType, PolicyRuleCreated, ready); err != nil {
return err
}
}
return nil
}
func RemovePolicy(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface) error {
func RemovePolicy(m *metrics.MetricsConfig, policy kyvernov1.PolicyInterface) error {
name, namespace, policyType, backgroundMode, validationMode, err := metrics.GetPolicyInfos(policy)
if err != nil {
return err
@ -83,7 +74,7 @@ func RemovePolicy(pc *metrics.PromConfig, policy kyvernov1.PolicyInterface) erro
for _, rule := range autogen.ComputeRules(policy) {
ruleName := rule.Name
ruleType := metrics.ParseRuleType(rule)
if err = registerPolicyRuleInfoMetric(pc, validationMode, policyType, backgroundMode, namespace, name, ruleName, ruleType, PolicyRuleDeleted, ready); err != nil {
if err = registerPolicyRuleInfoMetric(m, validationMode, policyType, backgroundMode, namespace, name, ruleName, ruleType, PolicyRuleDeleted, ready); err != nil {
return err
}
}

View file

@ -62,13 +62,13 @@ func (pc *PolicyController) applyAndReportPerNamespace(policy kyvernov1.PolicyIn
}
func (pc *PolicyController) registerPolicyResultsMetricValidation(logger logr.Logger, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
if err := policyResults.ProcessEngineResponse(pc.promConfig, policy, engineResponse, metrics.BackgroundScan, metrics.ResourceCreated); err != nil {
if err := policyResults.ProcessEngineResponse(pc.metricsConfig, policy, engineResponse, metrics.BackgroundScan, metrics.ResourceCreated); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_results_total metrics for the above policy", "name", policy.GetName())
}
}
func (pc *PolicyController) registerPolicyExecutionDurationMetricValidate(logger logr.Logger, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
if err := policyExecutionDuration.ProcessEngineResponse(pc.promConfig, policy, engineResponse, metrics.BackgroundScan, "", metrics.ResourceCreated); err != nil {
if err := policyExecutionDuration.ProcessEngineResponse(pc.metricsConfig, policy, engineResponse, metrics.BackgroundScan, "", metrics.ResourceCreated); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_execution_duration_seconds metrics for the above policy", "name", policy.GetName())
}
}

View file

@ -10,7 +10,7 @@ import (
)
func (pc *PolicyController) registerPolicyRuleInfoMetricAddPolicy(logger logr.Logger, p kyvernov1.PolicyInterface) {
err := policyRuleInfoMetric.AddPolicy(pc.promConfig, p)
err := policyRuleInfoMetric.AddPolicy(pc.metricsConfig, p)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's creation", "name", p.GetName())
}
@ -18,26 +18,26 @@ func (pc *PolicyController) registerPolicyRuleInfoMetricAddPolicy(logger logr.Lo
func (pc *PolicyController) registerPolicyRuleInfoMetricUpdatePolicy(logger logr.Logger, oldP, curP kyvernov1.PolicyInterface) {
// removing the old rules associated metrics
err := policyRuleInfoMetric.RemovePolicy(pc.promConfig, oldP)
err := policyRuleInfoMetric.RemovePolicy(pc.metricsConfig, oldP)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's updation", "name", oldP.GetName())
}
// adding the new rules associated metrics
err = policyRuleInfoMetric.AddPolicy(pc.promConfig, curP)
err = policyRuleInfoMetric.AddPolicy(pc.metricsConfig, curP)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's updation", "name", oldP.GetName())
}
}
func (pc *PolicyController) registerPolicyRuleInfoMetricDeletePolicy(logger logr.Logger, p kyvernov1.PolicyInterface) {
err := policyRuleInfoMetric.RemovePolicy(pc.promConfig, p)
err := policyRuleInfoMetric.RemovePolicy(pc.metricsConfig, p)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's deletion", "name", p.GetName())
}
}
func (pc *PolicyController) registerPolicyChangesMetricAddPolicy(logger logr.Logger, p kyvernov1.PolicyInterface) {
err := policyChangesMetric.RegisterPolicy(pc.promConfig, p, policyChangesMetric.PolicyCreated)
err := policyChangesMetric.RegisterPolicy(pc.metricsConfig, p, policyChangesMetric.PolicyCreated)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_total metrics for the above policy's creation", "name", p.GetName())
}
@ -49,13 +49,13 @@ func (pc *PolicyController) registerPolicyChangesMetricUpdatePolicy(logger logr.
if reflect.DeepEqual(oldSpec, curSpec) {
return
}
err := policyChangesMetric.RegisterPolicy(pc.promConfig, oldP, policyChangesMetric.PolicyUpdated)
err := policyChangesMetric.RegisterPolicy(pc.metricsConfig, oldP, policyChangesMetric.PolicyUpdated)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_total metrics for the above policy's updation", "name", oldP.GetName())
}
// curP will require a new kyverno_policy_changes_total metric if the above update involved change in the following fields:
if curSpec.BackgroundProcessingEnabled() != oldSpec.BackgroundProcessingEnabled() || curSpec.GetValidationFailureAction() != oldSpec.GetValidationFailureAction() {
err = policyChangesMetric.RegisterPolicy(pc.promConfig, curP, policyChangesMetric.PolicyUpdated)
err = policyChangesMetric.RegisterPolicy(pc.metricsConfig, curP, policyChangesMetric.PolicyUpdated)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_total metrics for the above policy's updation", "name", curP.GetName())
}
@ -63,7 +63,7 @@ func (pc *PolicyController) registerPolicyChangesMetricUpdatePolicy(logger logr.
}
func (pc *PolicyController) registerPolicyChangesMetricDeletePolicy(logger logr.Logger, p kyvernov1.PolicyInterface) {
err := policyChangesMetric.RegisterPolicy(pc.promConfig, p, policyChangesMetric.PolicyDeleted)
err := policyChangesMetric.RegisterPolicy(pc.metricsConfig, p, policyChangesMetric.PolicyDeleted)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_total metrics for the above policy's deletion", "name", p.GetName())
}

View file

@ -95,7 +95,7 @@ type PolicyController struct {
log logr.Logger
promConfig *metrics.PromConfig
metricsConfig *metrics.MetricsConfig
}
// NewPolicyController create a new PolicyController
@ -113,7 +113,7 @@ func NewPolicyController(
namespaces corev1informers.NamespaceInformer,
log logr.Logger,
reconcilePeriod time.Duration,
promConfig *metrics.PromConfig,
metricsConfig *metrics.MetricsConfig,
) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
@ -136,7 +136,7 @@ func NewPolicyController(
prGenerator: prGenerator,
policyReportEraser: policyReportEraser,
reconcilePeriod: reconcilePeriod,
promConfig: promConfig,
metricsConfig: metricsConfig,
log: log,
}

85
pkg/tracing/tracing.go Normal file
View file

@ -0,0 +1,85 @@
package tracing
import (
"context"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/utils/kube"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
"go.opentelemetry.io/otel/propagation"
"go.opentelemetry.io/otel/sdk/resource"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
"go.opentelemetry.io/otel/trace"
"k8s.io/client-go/kubernetes"
)
// NewTraceConfig generates the initial tracing configuration with 'endpoint' as the endpoint to connect to the Opentelemetry Collector
func NewTraceConfig(endpoint string, certs string, kubeClient kubernetes.Interface, log logr.Logger) error {
ctx := context.Background()
var client otlptrace.Client
if certs != "" {
// here the certificates are stored as configmaps
transportCreds, err := kube.FetchCert(ctx, certs, kubeClient)
if err != nil {
log.Error(err, "Error fetching certificate from secret")
}
client = otlptracegrpc.NewClient(
otlptracegrpc.WithEndpoint(endpoint),
otlptracegrpc.WithTLSCredentials(transportCreds),
)
} else {
client = otlptracegrpc.NewClient(
otlptracegrpc.WithEndpoint(endpoint),
otlptracegrpc.WithInsecure(),
)
}
// create New Exporter for exporting metrics
traceExp, err := otlptrace.New(ctx, client)
if err != nil {
log.Error(err, "Failed to create the collector exporter")
return err
}
res, err := resource.New(context.Background(),
resource.WithAttributes(semconv.ServiceNameKey.String("kyverno_traces")),
resource.WithSchemaURL(semconv.SchemaURL),
)
if err != nil {
log.Error(err, "failed creating resource")
return err
}
bsp := sdktrace.NewBatchSpanProcessor(traceExp)
// create controller and bind the exporter with it
tp := sdktrace.NewTracerProvider(
sdktrace.WithSampler(sdktrace.AlwaysSample()),
sdktrace.WithSpanProcessor(bsp),
sdktrace.WithResource(res),
)
// set global propagator to tracecontext (the default is no-op).
otel.SetTextMapPropagator(propagation.TraceContext{})
otel.SetTracerProvider(tp)
return nil
}
// DoInSpan executes function doFn inside new span with `operationName` name and hooking as child to a span found within given context if any.
func DoInSpan(ctx context.Context, tracerName string, operationName string, doFn func(context.Context)) {
newCtx, span := otel.Tracer(tracerName).Start(ctx, operationName)
defer span.End()
doFn(newCtx)
}
// StartSpan creates a span from a context with `operationName` name
func StartSpan(ctx context.Context, tracerName string, operationName string, attributes []attribute.KeyValue) trace.Span {
_, span := otel.Tracer(tracerName).Start(ctx, operationName, trace.WithAttributes(attributes...))
return span
}

30
pkg/utils/kube/cert.go Normal file
View file

@ -0,0 +1,30 @@
package kube
import (
"context"
"crypto/x509"
"fmt"
"github.com/kyverno/kyverno/pkg/config"
"google.golang.org/grpc/credentials"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
func FetchCert(
ctx context.Context,
certs string,
kubeClient kubernetes.Interface) (credentials.TransportCredentials, error) {
secret, err := kubeClient.CoreV1().Secrets(config.KyvernoNamespace()).Get(ctx, certs, v1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error fetching certificate from secret")
}
cp := x509.NewCertPool()
if !cp.AppendCertsFromPEM(secret.Data["ca.pem"]) {
return nil, fmt.Errorf("credentials: failed to append certificates")
}
transportCreds := credentials.NewClientTLSFromCert(cp, "")
return transportCreds, nil
}

View file

@ -9,8 +9,10 @@ import (
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/tracing"
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
"github.com/kyverno/kyverno/pkg/webhookconfig"
"go.opentelemetry.io/otel/attribute"
admissionv1 "k8s.io/api/admission/v1"
)
@ -18,6 +20,7 @@ type AdmissionHandler func(logr.Logger, *admissionv1.AdmissionRequest) *admissio
func Admission(logger logr.Logger, inner AdmissionHandler) http.HandlerFunc {
return func(writer http.ResponseWriter, request *http.Request) {
ctx := request.Context()
startTime := time.Now()
if request.Body == nil {
logger.Info("empty body", "req", request.URL.String())
@ -63,6 +66,18 @@ func Admission(logger logr.Logger, inner AdmissionHandler) http.HandlerFunc {
http.Error(writer, fmt.Sprintf("Could not encode response: %v", err), http.StatusInternalServerError)
return
}
// start span from request context
attributes := []attribute.KeyValue{
attribute.String("kind", admissionReview.Request.Kind.Kind),
attribute.String("namespace", admissionReview.Request.Namespace),
attribute.String("name", admissionReview.Request.Name),
attribute.String("operation", string(admissionReview.Request.Operation)),
attribute.String("uid", string(admissionReview.Request.UID)),
}
span := tracing.StartSpan(ctx, "admission_webhook_operations", string(admissionReview.Request.Operation), attributes)
defer span.End()
writer.Header().Set("Content-Type", "application/json; charset=utf-8")
if _, err := writer.Write(responseJSON); err != nil {
http.Error(writer, fmt.Sprintf("could not write response: %v", err), http.StatusInternalServerError)

View file

@ -44,7 +44,7 @@ type handlers struct {
// config
configuration config.Configuration
promConfig *metrics.PromConfig
metricsConfig *metrics.MetricsConfig
// cache
pCache policycache.Cache
@ -66,7 +66,7 @@ func NewHandlers(
client dclient.Interface,
kyvernoClient kyvernoclient.Interface,
configuration config.Configuration,
promConfig *metrics.PromConfig,
metricsConfig *metrics.MetricsConfig,
pCache policycache.Cache,
nsLister corev1listers.NamespaceLister,
rbLister rbacv1listers.RoleBindingLister,
@ -82,7 +82,7 @@ func NewHandlers(
client: client,
kyvernoClient: kyvernoClient,
configuration: configuration,
promConfig: promConfig,
metricsConfig: metricsConfig,
pCache: pCache,
nsLister: nsLister,
rbLister: rbLister,
@ -179,7 +179,7 @@ func (h *handlers) Validate(logger logr.Logger, request *admissionv1.AdmissionRe
prGenerator: h.prGenerator,
}
ok, msg := vh.handleValidation(h.promConfig, request, policies, policyContext, namespaceLabels, requestTime)
ok, msg := vh.handleValidation(h.metricsConfig, request, policies, policyContext, namespaceLabels, requestTime)
if !ok {
logger.Info("admission request denied")
return admissionutils.ResponseFailure(false, msg)

View file

@ -29,7 +29,7 @@ func registerMetric(logger logr.Logger, m string, requestOperation string, r rep
func (h *handlers) registerAdmissionReviewDurationMetricMutate(logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64) {
registerMetric(logger, "kyverno_admission_review_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
return admissionReviewDuration.ProcessEngineResponses(h.promConfig, engineResponses, admissionReviewLatencyDuration, op)
return admissionReviewDuration.ProcessEngineResponses(h.metricsConfig, engineResponses, admissionReviewLatencyDuration, op)
})
}
@ -37,13 +37,13 @@ func (h *handlers) registerAdmissionReviewDurationMetricGenerate(logger logr.Log
defer close(*latencyReceiver)
defer close(*engineResponsesReceiver)
registerMetric(logger, "kyverno_admission_review_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
return admissionReviewDuration.ProcessEngineResponses(h.promConfig, <-(*engineResponsesReceiver), <-(*latencyReceiver), op)
return admissionReviewDuration.ProcessEngineResponses(h.metricsConfig, <-(*engineResponsesReceiver), <-(*latencyReceiver), op)
})
}
func registerAdmissionReviewDurationMetricValidate(logger logr.Logger, promConfig *metrics.PromConfig, requestOperation string, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64) {
func registerAdmissionReviewDurationMetricValidate(logger logr.Logger, metricsConfig *metrics.MetricsConfig, requestOperation string, engineResponses []*response.EngineResponse, admissionReviewLatencyDuration int64) {
registerMetric(logger, "kyverno_admission_review_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
return admissionReviewDuration.ProcessEngineResponses(promConfig, engineResponses, admissionReviewLatencyDuration, op)
return admissionReviewDuration.ProcessEngineResponses(metricsConfig, engineResponses, admissionReviewLatencyDuration, op)
})
}
@ -51,20 +51,20 @@ func registerAdmissionReviewDurationMetricValidate(logger logr.Logger, promConfi
func (h *handlers) registerAdmissionRequestsMetricMutate(logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse) {
registerMetric(logger, "kyverno_admission_requests_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
return admissionRequests.ProcessEngineResponses(h.promConfig, engineResponses, op)
return admissionRequests.ProcessEngineResponses(h.metricsConfig, engineResponses, op)
})
}
func (h *handlers) registerAdmissionRequestsMetricGenerate(logger logr.Logger, requestOperation string, engineResponsesReceiver *chan []*response.EngineResponse) {
defer close(*engineResponsesReceiver)
registerMetric(logger, "kyverno_admission_requests_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
return admissionRequests.ProcessEngineResponses(h.promConfig, <-(*engineResponsesReceiver), op)
return admissionRequests.ProcessEngineResponses(h.metricsConfig, <-(*engineResponsesReceiver), op)
})
}
func registerAdmissionRequestsMetricValidate(logger logr.Logger, promConfig *metrics.PromConfig, requestOperation string, engineResponses []*response.EngineResponse) {
func registerAdmissionRequestsMetricValidate(logger logr.Logger, metricsConfig *metrics.MetricsConfig, requestOperation string, engineResponses []*response.EngineResponse) {
registerMetric(logger, "kyverno_admission_requests_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
return admissionRequests.ProcessEngineResponses(promConfig, engineResponses, op)
return admissionRequests.ProcessEngineResponses(metricsConfig, engineResponses, op)
})
}
@ -72,19 +72,19 @@ func registerAdmissionRequestsMetricValidate(logger logr.Logger, promConfig *met
func (h *handlers) registerPolicyResultsMetricMutation(logger logr.Logger, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
registerMetric(logger, "kyverno_policy_results_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
return policyResults.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
return policyResults.ProcessEngineResponse(h.metricsConfig, policy, engineResponse, metrics.AdmissionRequest, op)
})
}
func registerPolicyResultsMetricValidation(logger logr.Logger, promConfig *metrics.PromConfig, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
func registerPolicyResultsMetricValidation(logger logr.Logger, metricsConfig *metrics.MetricsConfig, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
registerMetric(logger, "kyverno_policy_results_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
return policyResults.ProcessEngineResponse(promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
return policyResults.ProcessEngineResponse(metricsConfig, policy, engineResponse, metrics.AdmissionRequest, op)
})
}
func (h *handlers) registerPolicyResultsMetricGeneration(logger logr.Logger, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
registerMetric(logger, "kyverno_policy_results_total", requestOperation, func(op metrics.ResourceRequestOperation) error {
return policyResults.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, op)
return policyResults.ProcessEngineResponse(h.metricsConfig, policy, engineResponse, metrics.AdmissionRequest, op)
})
}
@ -92,18 +92,18 @@ func (h *handlers) registerPolicyResultsMetricGeneration(logger logr.Logger, req
func (h *handlers) registerPolicyExecutionDurationMetricMutate(logger logr.Logger, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
registerMetric(logger, "kyverno_policy_execution_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
return policyExecutionDuration.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
return policyExecutionDuration.ProcessEngineResponse(h.metricsConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
})
}
func registerPolicyExecutionDurationMetricValidate(logger logr.Logger, promConfig *metrics.PromConfig, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
func registerPolicyExecutionDurationMetricValidate(logger logr.Logger, metricsConfig *metrics.MetricsConfig, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
registerMetric(logger, "kyverno_policy_execution_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
return policyExecutionDuration.ProcessEngineResponse(promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
return policyExecutionDuration.ProcessEngineResponse(metricsConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
})
}
func (h *handlers) registerPolicyExecutionDurationMetricGenerate(logger logr.Logger, requestOperation string, policy kyvernov1.PolicyInterface, engineResponse response.EngineResponse) {
registerMetric(logger, "kyverno_policy_execution_duration_seconds", requestOperation, func(op metrics.ResourceRequestOperation) error {
return policyExecutionDuration.ProcessEngineResponse(h.promConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
return policyExecutionDuration.ProcessEngineResponse(h.metricsConfig, policy, engineResponse, metrics.AdmissionRequest, "", op)
})
}

View file

@ -57,7 +57,7 @@ type auditHandler struct {
log logr.Logger
configHandler config.Configuration
promConfig *metrics.PromConfig
metricsConfig *metrics.MetricsConfig
}
// NewValidateAuditHandler returns a new instance of audit policy handler
@ -70,7 +70,7 @@ func NewValidateAuditHandler(pCache policycache.Cache,
log logr.Logger,
dynamicConfig config.Configuration,
client dclient.Interface,
promConfig *metrics.PromConfig,
metricsConfig *metrics.MetricsConfig,
) AuditHandler {
c := &auditHandler{
pCache: pCache,
@ -83,7 +83,7 @@ func NewValidateAuditHandler(pCache policycache.Cache,
prGenerator: prGenerator,
configHandler: dynamicConfig,
client: client,
promConfig: promConfig,
metricsConfig: metricsConfig,
}
c.informersSynced = []cache.InformerSynced{rbInformer.Informer().HasSynced, crbInformer.Informer().HasSynced, namespaces.Informer().HasSynced}
return c
@ -198,7 +198,7 @@ func (h *auditHandler) process(request *admissionv1.AdmissionRequest) error {
prGenerator: h.prGenerator,
}
vh.handleValidation(h.promConfig, request, policies, policyContext, namespaceLabels, admissionRequestTimestamp)
vh.handleValidation(h.metricsConfig, request, policies, policyContext, namespaceLabels, admissionRequestTimestamp)
return nil
}

View file

@ -27,7 +27,7 @@ type validationHandler struct {
// If there are no errors in validating rule we apply generation rules
// patchedResource is the (resource + patches) after applying mutation rules
func (v *validationHandler) handleValidation(
promConfig *metrics.PromConfig,
metricsConfig *metrics.MetricsConfig,
request *admissionv1.AdmissionRequest,
policies []kyvernov1.PolicyInterface,
policyContext *engine.PolicyContext,
@ -65,9 +65,9 @@ func (v *validationHandler) handleValidation(
}
// registering the kyverno_policy_results_total metric concurrently
go registerPolicyResultsMetricValidation(logger, promConfig, string(request.Operation), policyContext.Policy, *engineResponse)
go registerPolicyResultsMetricValidation(logger, metricsConfig, string(request.Operation), policyContext.Policy, *engineResponse)
// registering the kyverno_policy_execution_duration_seconds metric concurrently
go registerPolicyExecutionDurationMetricValidate(logger, promConfig, string(request.Operation), policyContext.Policy, *engineResponse)
go registerPolicyExecutionDurationMetricValidate(logger, metricsConfig, string(request.Operation), policyContext.Policy, *engineResponse)
engineResponses = append(engineResponses, engineResponse)
if !engineResponse.IsSuccessful() {
@ -103,9 +103,9 @@ func (v *validationHandler) handleValidation(
logger.V(4).Info("resource blocked")
// registering the kyverno_admission_review_duration_seconds metric concurrently
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
go registerAdmissionReviewDurationMetricValidate(logger, promConfig, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
// registering the kyverno_admission_requests_total metric concurrently
go registerAdmissionRequestsMetricValidate(logger, promConfig, string(request.Operation), engineResponses)
go registerAdmissionReviewDurationMetricValidate(logger, metricsConfig, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
//registering the kyverno_admission_requests_total metric concurrently
go registerAdmissionRequestsMetricValidate(logger, metricsConfig, string(request.Operation), engineResponses)
return false, getEnforceFailureErrorMsg(engineResponses)
}
@ -132,9 +132,8 @@ func (v *validationHandler) handleValidation(
// registering the kyverno_admission_review_duration_seconds metric concurrently
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
go registerAdmissionReviewDurationMetricValidate(logger, promConfig, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
// registering the kyverno_admission_requests_total metric concurrently
go registerAdmissionRequestsMetricValidate(logger, promConfig, string(request.Operation), engineResponses)
go registerAdmissionReviewDurationMetricValidate(logger, metricsConfig, string(request.Operation), engineResponses, admissionReviewLatencyDuration)
//registering the kyverno_admission_requests_total metric concurrently
go registerAdmissionRequestsMetricValidate(logger, metricsConfig, string(request.Operation), engineResponses)
return true, ""
}