1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-13 19:28:55 +00:00

Merge pull request #1877 from yashvardhan-kukreja/prometheus-integration-setup

feat: Prometheus metrics integration
This commit is contained in:
shuting 2021-05-26 12:31:21 -07:00 committed by GitHub
commit cd4d738667
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
45 changed files with 1383 additions and 46 deletions

View file

@ -82,6 +82,8 @@ jobs:
kubectl get pods -n kyverno
${GITHUB_WORKSPACE}/scripts/verify-deployment.sh -n kyverno kyverno
sleep 20
echo ">>> Expose the Kyverno's service's metric server to the host"
kubectl port-forward svc/kyverno-svc -n kyverno 8000:8000 &
echo ">>> Run Kyverno e2e test"
make test-e2e
kubectl delete -f ${GITHUB_WORKSPACE}/definitions/install.yaml

View file

@ -171,6 +171,7 @@ code-cov-report: $(CODE_COVERAGE_FILE_TXT)
# Test E2E
test-e2e:
$(eval export E2E="ok")
go test ./test/e2e/metrics -v
go test ./test/e2e/mutate -v
go test ./test/e2e/generate -v
$(eval export E2E="")

View file

@ -91,6 +91,9 @@ spec:
- containerPort: 9443
name: https
protocol: TCP
- containerPort: 8000
name: metrics-port
protocol: TCP
env:
- name: INIT_CONFIG
value: {{ template "kyverno.configMapName" . }}

View file

@ -16,5 +16,12 @@ spec:
{{- if and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)) }}
nodePort: {{ .Values.service.nodePort }}
{{- end }}
- port: {{ .Values.service.metricsPort }}
targetPort: 8000
protocol: TCP
name: metrics-port
{{- if and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort)) }}
nodePort: {{ .Values.service.metricsNodePort }}
{{- end }}
selector: {{ include "kyverno.matchLabels" . | nindent 4 }}
type: {{ .Values.service.type }}

View file

@ -145,6 +145,10 @@ service:
type: ClusterIP
# Only used if service.type is NodePort
nodePort:
## Kyverno's metrics server will be exposed at this port
metricsPort: 8000
## The Node's port which will allow access Kyverno's metrics at the host level. Only used if service.type is NodePort.
metricsNodePort: 8000
## Provide any additional annotations which may be required. This can be used to
## set the LoadBalancer service type to internal only.
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer

View file

@ -34,6 +34,9 @@ import (
"k8s.io/klog/v2"
"k8s.io/klog/v2/klogr"
log "sigs.k8s.io/controller-runtime/pkg/log"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
const resyncPeriod = 15 * time.Minute
@ -47,12 +50,13 @@ var (
excludeGroupRole string
excludeUsername string
profilePort string
metricsPort string
webhookTimeout int
genWorkers int
profile bool
policyReport bool
profile bool
disableMetricsExport bool
policyControllerResyncPeriod time.Duration
setupLog = log.Log.WithName("setup")
@ -70,6 +74,8 @@ func main() {
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.BoolVar(&profile, "profile", false, "Set this flag to 'true', to enable profiling.")
flag.StringVar(&profilePort, "profile-port", "6060", "Enable profiling at given port, default to 6060.")
flag.BoolVar(&disableMetricsExport, "disable-metrics", false, "Set this flag to 'true', to enable exposing the metrics.")
flag.StringVar(&metricsPort, "metrics-port", "8000", "Expose prometheus metrics at the given port, default to 8000.")
flag.DurationVar(&policyControllerResyncPeriod, "background-scan", time.Hour, "Perform background scan every given interval, e.g., 30s, 15m, 1h.")
if err := flag.Set("v", "2"); err != nil {
setupLog.Error(err, "failed to set log level")
@ -87,11 +93,16 @@ func main() {
os.Exit(1)
}
var profilingServerMux *http.ServeMux
var metricsServerMux *http.ServeMux
var promConfig *metrics.PromConfig
if profile {
profilingServerMux = http.NewServeMux()
addr := ":" + profilePort
setupLog.Info("Enable profiling, see details at https://github.com/kyverno/kyverno/wiki/Profiling-Kyverno-on-Kubernetes", "port", profilePort)
go func() {
if err := http.ListenAndServe(addr, nil); err != nil {
if err := http.ListenAndServe(addr, profilingServerMux); err != nil {
setupLog.Error(err, "Failed to enable profiling")
os.Exit(1)
}
@ -99,6 +110,20 @@ func main() {
}
if !disableMetricsExport {
promConfig = metrics.NewPromConfig()
metricsServerMux = http.NewServeMux()
metricsServerMux.Handle("/metrics", promhttp.HandlerFor(promConfig.MetricsRegistry, promhttp.HandlerOpts{Timeout: 10 * time.Second}))
metricsAddr := ":" + metricsPort
setupLog.Info("Enable exposure of metrics, see details at https://github.com/kyverno/kyverno/wiki/Metrics-Kyverno-on-Kubernetes", "port", metricsPort)
go func() {
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
setupLog.Error(err, "Failed to enable exposure of metrics")
os.Exit(1)
}
}()
}
// KYVERNO CRD CLIENT
// access CRD resources
// - ClusterPolicy, Policy
@ -224,6 +249,7 @@ func main() {
log.Log.WithName("PolicyController"),
rCache,
policyControllerResyncPeriod,
promConfig,
)
if err != nil {
@ -286,6 +312,7 @@ func main() {
configData,
rCache,
client,
promConfig,
)
certRenewer := ktls.NewCertRenewer(client, clientConfig, ktls.CertRenewalInterval, ktls.CertValidityDuration, log.Log.WithName("CertRenewer"))
@ -361,6 +388,7 @@ func main() {
rCache,
grc,
debug,
promConfig,
)
if err != nil {

View file

@ -15,7 +15,11 @@ metadata:
spec:
ports:
- port: 443
name: https
targetPort: https
- port: 8000
name: metrics-port
targetPort: metrics-port
selector:
app: kyverno
app.kubernetes.io/name: kyverno

View file

@ -58,6 +58,9 @@ spec:
- containerPort: 9443
name: https
protocol: TCP
- containerPort: 8000
name: metrics-port
protocol: TCP
env:
- name: INIT_CONFIG
value: init-config

2
go.mod
View file

@ -19,7 +19,6 @@ require (
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
github.com/julienschmidt/httprouter v1.3.0
github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23
github.com/konsorten/go-windows-terminal-sequences v1.0.3 // indirect
github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a
github.com/mattn/go-isatty v0.0.12 // indirect
@ -30,6 +29,7 @@ require (
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.9.1
github.com/prometheus/client_golang v1.7.1
github.com/spf13/cobra v1.1.1
github.com/stretchr/testify v1.6.1
gopkg.in/yaml.v2 v2.4.0

26
go.sum
View file

@ -102,6 +102,7 @@ github.com/bcicen/jstream v0.0.0-20190220045926-16c1f8af81c2/go.mod h1:RDu/qcrnp
github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
@ -113,7 +114,9 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.28/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -238,7 +241,6 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
@ -319,7 +321,6 @@ github.com/gogo/protobuf v0.0.0-20171007142547-342cbe0a0415/go.mod h1:r8qH/GZQm5
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4=
github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
@ -519,8 +520,6 @@ github.com/klauspost/pgzip v1.2.1/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ
github.com/klauspost/readahead v1.3.1/go.mod h1:AH9juHzNH7xqdqFHrMRSHeH2Ps+vFf+kblDqzPFiLJg=
github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ/pcNv7fu+8Un4=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJKvHsGVBgn1xWYf1NbHQhywc8=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
@ -573,6 +572,7 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@ -692,23 +692,27 @@ github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4
github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs=
github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1 h1:NTGy1Ja9pByO+xAeH/qiWnLrKtr3hJPNjaVUwnjpdpA=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0 h1:RyRA7RzGXQZiW+tGMr7sxa85G1z0yOpM1qq5c8lNawc=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3 h1:F0+tqvhOksq22sc6iCHF5WGlWjdwj92p0udFh1VFBS8=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/qri-io/starlib v0.4.2-0.20200213133954-ff2e8cd5ef8d/go.mod h1:7DPO4domFU579Ga6E61sB9VFNaniPVwJP5C4bBCu3wA=
@ -871,7 +875,6 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83 h1:/ZScEX8SfEmUGRHs0gxpqteO5nfNW6axyZbBdw9A12g=
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
@ -944,7 +947,6 @@ golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7 h1:OgUuv8lsRpBibGNbSizVwKWlysjaNzmC9gYMhPVfqFM=
golang.org/x/net v0.0.0-20210224082022-3d97a244fca7/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@ -1012,7 +1014,6 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073 h1:8qxJSnu+7dRq6upnbntrmriWByIakBuct5OM/MdQC1M=
@ -1034,7 +1035,6 @@ golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxb
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1203,7 +1203,6 @@ gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
@ -1224,15 +1223,12 @@ k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0H
k8s.io/api v0.16.4/go.mod h1:AtzMnsR45tccQss5q8RnF+W8L81DH6XwXwo/joEx9u0=
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
k8s.io/api v0.21.0 h1:gu5iGF4V6tfVCQ/R+8Hc0h7H1JuEhzyEi9S4R5LM8+Y=
k8s.io/api v0.21.0/go.mod h1:+YbrhBBGgsxbF6o6Kj4KJPJnBmAKuXDeS3E18bgHNVU=
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
k8s.io/apiextensions-apiserver v0.20.2 h1:rfrMWQ87lhd8EzQWRnbQ4gXrniL/yTRBgYH1x1+BLlo=
k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs=
k8s.io/apiextensions-apiserver v0.21.0 h1:Nd4uBuweg6ImzbxkC1W7xUNZcCV/8Vt10iTdTIVF3hw=
k8s.io/apiextensions-apiserver v0.21.0/go.mod h1:gsQGNtGkc/YoDG9loKI0V+oLZM4ljRPjc/sql5tmvzc=
k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad/go.mod h1:I4A+glKBHiTgiEjQiCCQfCAIcIMFGt291SmsvcrFzJA=
@ -1240,14 +1236,12 @@ k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT
k8s.io/apimachinery v0.16.4/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ=
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.21.0 h1:3Fx+41if+IRavNcKOz09FwEXDBG6ORh6iMsTSelhkMA=
k8s.io/apimachinery v0.21.0/go.mod h1:jbreFvJo3ov9rj7eWT7+sYiRx+qZuCYXwWT1bcDswPY=
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA=
k8s.io/apiserver v0.21.0/go.mod h1:w2YSn4/WIwYuxG5zJmcqtRdtqgW/J2JRgFAqps3bBpg=
k8s.io/cli-runtime v0.20.2 h1:W0/FHdbApnl9oB7xdG643c/Zaf7TZT+43I+zKxwqvhU=
k8s.io/cli-runtime v0.20.2/go.mod h1:FjH6uIZZZP3XmwrXWeeYCbgxcrD6YXxoAykBaWH0VdM=
@ -1255,7 +1249,6 @@ k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53
k8s.io/client-go v0.16.4/go.mod h1:ZgxhFDxSnoKY0J0U2/Y1C8obKDdlhGPZwA7oHH863Ok=
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ=
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
k8s.io/client-go v0.21.0 h1:n0zzzJsAQmJngpC0IhgFcApZyoGXPrDIAD601HD09ag=
k8s.io/client-go v0.21.0/go.mod h1:nNBytTF9qPFDEhoqgEPaarobC8QPae13bElIVHzIglA=
@ -1273,7 +1266,6 @@ k8s.io/klog v0.4.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
k8s.io/klog/v2 v2.4.0 h1:7+X0fUguPyrKEC4WjH8iGDg3laWgMo5tMnRTIGTTxGQ=
k8s.io/klog/v2 v2.4.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.8.0 h1:Q3gmuM9hKEjefWFFYF0Mat+YyFJvsUyYuwyNNJ5C9Ts=
k8s.io/klog/v2 v2.8.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
@ -1281,7 +1273,6 @@ k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7 h1:vEx13qjvaZ4yfObSSXW7BrMc/KQBBT/Jyee8XtLf4x0=
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7/go.mod h1:wXW5VT87nVfh/iLV8FpR2uDvrFyomxbtb1KivDbvPTE=
@ -1314,7 +1305,6 @@ sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:I
sigs.k8s.io/structured-merge-diff v1.0.1 h1:LOs1LZWMsz1xs77Phr/pkB4LFaavH7IVq/3+WTN9XTA=
sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0 h1:C4r9BgJ98vrKnnVCjwCSXcWjWe0NKcUQkmzDXZXGwH8=
sigs.k8s.io/structured-merge-diff/v4 v4.1.0/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=

View file

@ -14,10 +14,11 @@ import (
// - the caller has to check the ruleResponse to determine whether the path exist
// 2. returns the list of rules that are applicable on this policy and resource, if 1 succeed
func Generate(policyContext *PolicyContext) (resp *response.EngineResponse) {
return filterRules(policyContext)
policyStartTime := time.Now()
return filterRules(policyContext, policyStartTime)
}
func filterRules(policyContext *PolicyContext) *response.EngineResponse {
func filterRules(policyContext *PolicyContext, startTime time.Time) *response.EngineResponse {
kind := policyContext.NewResource.GetKind()
name := policyContext.NewResource.GetName()
namespace := policyContext.NewResource.GetNamespace()
@ -25,6 +26,9 @@ func filterRules(policyContext *PolicyContext) *response.EngineResponse {
resp := &response.EngineResponse{
PolicyResponse: response.PolicyResponse{
Policy: policyContext.Policy.Name,
PolicyStats: response.PolicyStats{
PolicyExecutionTimestamp: startTime.Unix(),
},
Resource: response.ResourceSpec{
Kind: kind,
Name: name,
@ -76,7 +80,8 @@ func filterRule(rule kyverno.Rule, policyContext *PolicyContext) *response.RuleR
Type: "Generation",
Success: false,
RuleStats: response.RuleStats{
ProcessingTime: time.Since(startTime),
ProcessingTime: time.Since(startTime),
RuleExecutionTimestamp: startTime.Unix(),
},
}
}
@ -111,7 +116,8 @@ func filterRule(rule kyverno.Rule, policyContext *PolicyContext) *response.RuleR
Type: "Generation",
Success: true,
RuleStats: response.RuleStats{
ProcessingTime: time.Since(startTime),
ProcessingTime: time.Since(startTime),
RuleExecutionTimestamp: startTime.Unix(),
},
}
}

View file

@ -28,6 +28,7 @@ func ProcessOverlay(log logr.Logger, ruleName string, overlay interface{}, resou
resp.Type = utils.Mutation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
resp.RuleStats.RuleExecutionTimestamp = startTime.Unix()
logger.V(4).Info("finished applying overlay rule", "processingTime", resp.RuleStats.ProcessingTime.String())
}()

View file

@ -21,6 +21,7 @@ func ProcessPatchJSON6902(ruleName string, patchesJSON6902 []byte, resource unst
resp.Type = utils.Mutation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
resp.RuleStats.RuleExecutionTimestamp = startTime.Unix()
logger.V(4).Info("applied JSON6902 patch", "processingTime", resp.RuleStats.ProcessingTime.String())
}()

View file

@ -28,6 +28,7 @@ func ProcessPatches(log logr.Logger, ruleName string, mutation kyverno.Mutation,
resp.Type = utils.Mutation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
resp.RuleStats.RuleExecutionTimestamp = startTime.Unix()
logger.V(4).Info("applied JSON patch", "processingTime", resp.RuleStats.ProcessingTime.String())
}()

View file

@ -26,6 +26,7 @@ func ProcessStrategicMergePatch(ruleName string, overlay interface{}, resource u
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
resp.RuleStats.RuleExecutionTimestamp = startTime.Unix()
logger.V(4).Info("finished applying strategicMerge patch", "processingTime", resp.RuleStats.ProcessingTime.String())
}()

View file

@ -147,5 +147,6 @@ func endMutateResultResponse(logger logr.Logger, resp *response.EngineResponse,
}
resp.PolicyResponse.ProcessingTime = time.Since(startTime)
resp.PolicyResponse.PolicyExecutionTimestamp = startTime.Unix()
logger.V(5).Info("finished processing policy", "processingTime", resp.PolicyResponse.ProcessingTime.String(), "mutationRulesApplied", resp.PolicyResponse.RulesAppliedCount)
}

View file

@ -52,6 +52,8 @@ type PolicyStats struct {
ProcessingTime time.Duration `json:"processingTime"`
// Count of rules that were applied successfully
RulesAppliedCount int `json:"rulesAppliedCount"`
// Timestamp of the instant the Policy was triggered
PolicyExecutionTimestamp int64 `json:"policyExecutionTimestamp"`
}
//RuleResponse details for each rule application
@ -79,6 +81,8 @@ func (rr RuleResponse) ToString() string {
type RuleStats struct {
// time required to apply the rule on the resource
ProcessingTime time.Duration `json:"processingTime"`
// Timestamp of the instant the rule got triggered
RuleExecutionTimestamp int64 `json:"ruleExecutionTimestamp"`
}
//IsSuccessful checks if any rule has failed or not

View file

@ -67,6 +67,7 @@ func buildResponse(logger logr.Logger, ctx *PolicyContext, resp *response.Engine
resp.PolicyResponse.Resource.APIVersion = resp.PatchedResource.GetAPIVersion()
resp.PolicyResponse.ValidationFailureAction = ctx.Policy.Spec.ValidationFailureAction
resp.PolicyResponse.ProcessingTime = time.Since(startTime)
resp.PolicyResponse.PolicyExecutionTimestamp = startTime.Unix()
}
func incrementAppliedCount(resp *response.EngineResponse) {
@ -229,6 +230,7 @@ func validatePatterns(log logr.Logger, ctx context.EvalInterface, resource unstr
resp.Type = utils.Validation.String()
defer func() {
resp.RuleStats.ProcessingTime = time.Since(startTime)
resp.RuleStats.RuleExecutionTimestamp = startTime.Unix()
logger.V(4).Info("finished processing rule", "processingTime", resp.RuleStats.ProcessingTime.String())
}()

View file

@ -0,0 +1,68 @@
package admissionreviewlatency
import (
"fmt"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
prom "github.com/prometheus/client_golang/prometheus"
)
func (pm PromMetrics) registerAdmissionReviewLatencyMetric(
clusterPoliciesCount, namespacedPoliciesCount int,
validateRulesCount, mutateRulesCount, generateRulesCount int,
resourceName, resourceKind, resourceNamespace string,
resourceRequestOperation metrics.ResourceRequestOperation,
admissionRequestLatency float64,
) error {
pm.AdmissionReviewLatency.With(prom.Labels{
"cluster_policies_count": fmt.Sprintf("%d", clusterPoliciesCount),
"namespaced_policies_count": fmt.Sprintf("%d", namespacedPoliciesCount),
"validate_rules_count": fmt.Sprintf("%d", validateRulesCount),
"mutate_rules_count": fmt.Sprintf("%d", mutateRulesCount),
"generate_rules_count": fmt.Sprintf("%d", generateRulesCount),
"resource_name": resourceName,
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
}).Set(admissionRequestLatency)
return nil
}
func (pm PromMetrics) ProcessEngineResponses(engineResponses []*response.EngineResponse, triggeredPolicies []kyverno.ClusterPolicy, admissionReviewLatencyDuration int64, resourceRequestOperation metrics.ResourceRequestOperation) error {
if len(engineResponses) == 0 {
return nil
}
resourceName, resourceNamespace, resourceKind := engineResponses[0].PolicyResponse.Resource.Name, engineResponses[0].PolicyResponse.Resource.Namespace, engineResponses[0].PolicyResponse.Resource.Kind
clusterPoliciesCount, namespacedPoliciesCount, totalValidateRulesCount, totalMutateRulesCount, totalGenerateRulesCount := 0, 0, 0, 0, 0
for i, e := range engineResponses {
validateRulesCount, mutateRulesCount, generateRulesCount := 0, 0, 0
for _, rule := range e.PolicyResponse.Rules {
switch rule.Type {
case "Validation":
validateRulesCount++
case "Mutation":
mutateRulesCount++
case "Generation":
generateRulesCount++
}
}
// no rules triggered
if validateRulesCount+mutateRulesCount+generateRulesCount == 0 {
continue
}
if triggeredPolicies[i].Namespace == "" {
clusterPoliciesCount++
} else {
namespacedPoliciesCount++
}
totalValidateRulesCount += validateRulesCount
totalMutateRulesCount += mutateRulesCount
totalGenerateRulesCount += generateRulesCount
}
if totalValidateRulesCount+totalMutateRulesCount+totalGenerateRulesCount == 0 {
return nil
}
admissionReviewLatencyDurationInMs := float64(admissionReviewLatencyDuration) / float64(1000*1000)
return pm.registerAdmissionReviewLatencyMetric(clusterPoliciesCount, namespacedPoliciesCount, totalValidateRulesCount, totalMutateRulesCount, totalGenerateRulesCount, resourceName, resourceKind, resourceNamespace, resourceRequestOperation, admissionReviewLatencyDurationInMs)
}

View file

@ -0,0 +1,25 @@
package admissionreviewlatency
import (
"fmt"
"github.com/kyverno/kyverno/pkg/metrics"
)
func ParsePromMetrics(pm metrics.PromMetrics) PromMetrics {
return PromMetrics(pm)
}
func ParseResourceRequestOperation(requestOperationStr string) (metrics.ResourceRequestOperation, error) {
switch requestOperationStr {
case "CREATE":
return metrics.ResourceCreated, nil
case "UPDATE":
return metrics.ResourceUpdated, nil
case "DELETE":
return metrics.ResourceDeleted, nil
case "CONNECT":
return metrics.ResourceConnected, nil
default:
return "", fmt.Errorf("Unknown request operation made by resource: %s. Allowed requests: 'CREATE', 'UPDATE', 'DELETE', 'CONNECT'", requestOperationStr)
}
}

View file

@ -0,0 +1,7 @@
package admissionreviewlatency
import (
"github.com/kyverno/kyverno/pkg/metrics"
)
type PromMetrics metrics.PromMetrics

View file

@ -0,0 +1,57 @@
package metrics
type PolicyValidationMode string
const (
Enforce PolicyValidationMode = "enforce"
Audit PolicyValidationMode = "audit"
)
type PolicyType string
const (
Cluster PolicyType = "cluster"
Namespaced PolicyType = "namespaced"
)
type PolicyBackgroundMode string
const (
BackgroundTrue PolicyBackgroundMode = "true"
BackgroundFalse PolicyBackgroundMode = "false"
)
type RuleType string
const (
Validate RuleType = "validate"
Mutate RuleType = "mutate"
Generate RuleType = "generate"
EmptyRuleType RuleType = "-"
)
type RuleResult string
const (
Pass RuleResult = "pass"
Fail RuleResult = "fail"
Warn RuleResult = "warn"
Error RuleResult = "error"
Skip RuleResult = "skip"
)
type RuleExecutionCause string
const (
AdmissionRequest RuleExecutionCause = "admission_request"
BackgroundScan RuleExecutionCause = "background_scan"
)
type ResourceRequestOperation string
const (
ResourceCreated ResourceRequestOperation = "create"
ResourceUpdated ResourceRequestOperation = "update"
ResourceDeleted ResourceRequestOperation = "delete"
ResourceConnected ResourceRequestOperation = "connect"
)

103
pkg/metrics/metrics.go Normal file
View file

@ -0,0 +1,103 @@
package metrics
import (
prom "github.com/prometheus/client_golang/prometheus"
)
type PromConfig struct {
MetricsRegistry *prom.Registry
Metrics *PromMetrics
}
type PromMetrics struct {
PolicyRuleResults *prom.GaugeVec
PolicyRuleInfo *prom.GaugeVec
PolicyChanges *prom.GaugeVec
PolicyRuleExecutionLatency *prom.GaugeVec
AdmissionReviewLatency *prom.GaugeVec
}
func NewPromConfig() *PromConfig {
pc := new(PromConfig)
pc.MetricsRegistry = prom.NewRegistry()
policyRuleResultsLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_name", "policy_namespace",
"resource_name", "resource_kind", "resource_namespace", "resource_request_operation",
"rule_name", "rule_result", "rule_type", "rule_execution_cause", "rule_response",
"main_request_trigger_timestamp", "policy_execution_timestamp", "rule_execution_timestamp",
}
policyRuleResultsMetric := prom.NewGaugeVec(
prom.GaugeOpts{
Name: "kyverno_policy_rule_results_info",
Help: "can be used to track the results associated with the policies applied in the users cluster, at the level from rule to policy to admission requests.",
},
policyRuleResultsLabels,
)
policyRuleInfoLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_namespace", "policy_name", "rule_name", "rule_type",
}
policyRuleInfoMetric := prom.NewGaugeVec(
prom.GaugeOpts{
Name: "kyverno_policy_rule_info_total",
Help: "can be used to track the info of the rules or/and policies present in the cluster. 0 means the rule doesn't exist and has been deleted, 1 means the rule is currently existent in the cluster.",
},
policyRuleInfoLabels,
)
policyChangesLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_namespace", "policy_name", "policy_change_type", "timestamp",
}
policyChangesMetric := prom.NewGaugeVec(
prom.GaugeOpts{
Name: "kyverno_policy_changes_info",
Help: "can be used to track all the Kyverno policies which have been created, updated or deleted.",
},
policyChangesLabels,
)
policyRuleExecutionLatencyLabels := []string{
"policy_validation_mode", "policy_type", "policy_background_mode", "policy_name", "policy_namespace",
"resource_name", "resource_kind", "resource_namespace", "resource_request_operation",
"rule_name", "rule_result", "rule_type", "rule_execution_cause", "rule_response", "generate_rule_latency_type",
"main_request_trigger_timestamp", "policy_execution_timestamp", "rule_execution_timestamp",
}
policyRuleExecutionLatencyMetric := prom.NewGaugeVec(
prom.GaugeOpts{
Name: "kyverno_policy_rule_execution_latency_milliseconds",
Help: "can be used to track the latencies (in milliseconds) associated with the execution/processing of the individual rules under Kyverno policies whenever they evaluate incoming resource requests.",
},
policyRuleExecutionLatencyLabels,
)
admissionReviewLatency := []string{
"cluster_policies_count", "namespaced_policies_count",
"validate_rules_count", "mutate_rules_count", "generate_rules_count",
"resource_name", "resource_kind", "resource_namespace", "resource_request_operation",
}
admissionReviewLatencyMetric := prom.NewGaugeVec(
prom.GaugeOpts{
Name: "kyverno_admission_review_latency_milliseconds",
Help: "can be used to track the latencies associated with the entire individual admission review. For example, if an incoming request trigger, say, five policies, this metric will track the e2e latency associated with the execution of all those policies.",
},
admissionReviewLatency,
)
pc.Metrics = &PromMetrics{
PolicyRuleResults: policyRuleResultsMetric,
PolicyRuleInfo: policyRuleInfoMetric,
PolicyChanges: policyChangesMetric,
PolicyRuleExecutionLatency: policyRuleExecutionLatencyMetric,
AdmissionReviewLatency: admissionReviewLatencyMetric,
}
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyRuleResults)
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyRuleInfo)
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyChanges)
pc.MetricsRegistry.MustRegister(pc.Metrics.PolicyRuleExecutionLatency)
pc.MetricsRegistry.MustRegister(pc.Metrics.AdmissionReviewLatency)
return pc
}

38
pkg/metrics/parsers.go Normal file
View file

@ -0,0 +1,38 @@
package metrics
import (
"fmt"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"reflect"
)
func ParsePolicyValidationMode(validationFailureAction string) (PolicyValidationMode, error) {
switch validationFailureAction {
case "enforce":
return Enforce, nil
case "audit":
return Audit, nil
default:
return "", fmt.Errorf("wrong validation failure action found %s. Allowed: '%s', '%s'", validationFailureAction, "enforce", "audit")
}
}
func ParsePolicyBackgroundMode(backgroundMode bool) PolicyBackgroundMode {
if backgroundMode {
return BackgroundTrue
}
return BackgroundFalse
}
func ParseRuleType(rule kyverno.Rule) RuleType {
if !reflect.DeepEqual(rule.Validation, kyverno.Validation{}) {
return Validate
}
if !reflect.DeepEqual(rule.Mutation, kyverno.Mutation{}) {
return Mutate
}
if !reflect.DeepEqual(rule.Generation, kyverno.Generation{}) {
return Generate
}
return EmptyRuleType
}

View file

@ -0,0 +1,9 @@
package policychanges
import (
"github.com/kyverno/kyverno/pkg/metrics"
)
func ParsePromMetrics(pm metrics.PromMetrics) PromMetrics {
return PromMetrics(pm)
}

View file

@ -0,0 +1,65 @@
package policychanges
import (
"fmt"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/metrics"
prom "github.com/prometheus/client_golang/prometheus"
"time"
)
func (pm PromMetrics) registerPolicyChangesMetric(
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
policyNamespace, policyName string,
policyChangeType PolicyChangeType,
policyChangeTimestamp int64,
) error {
if policyType == metrics.Cluster {
policyNamespace = "-"
}
pm.PolicyChanges.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"policy_change_type": string(policyChangeType),
"timestamp": fmt.Sprintf("%+v", time.Unix(policyChangeTimestamp, 0)),
}).Set(1)
return nil
}
func (pm PromMetrics) RegisterPolicy(policy interface{}, policyChangeType PolicyChangeType, policyChangeTimestamp int64) error {
switch inputPolicy := policy.(type) {
case *kyverno.ClusterPolicy:
policyValidationMode, err := metrics.ParsePolicyValidationMode(inputPolicy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*inputPolicy.Spec.Background)
policyType := metrics.Cluster
policyNamespace := "" // doesn't matter for cluster policy
policyName := inputPolicy.ObjectMeta.Name
if err = pm.registerPolicyChangesMetric(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, policyChangeType, policyChangeTimestamp); err != nil {
return err
}
return nil
case *kyverno.Policy:
policyValidationMode, err := metrics.ParsePolicyValidationMode(inputPolicy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*inputPolicy.Spec.Background)
policyType := metrics.Namespaced
policyNamespace := inputPolicy.ObjectMeta.Namespace
policyName := inputPolicy.ObjectMeta.Name
if err = pm.registerPolicyChangesMetric(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, policyChangeType, policyChangeTimestamp); err != nil {
return err
}
return nil
default:
return fmt.Errorf("wrong input type provided %T. Only kyverno.Policy and kyverno.ClusterPolicy allowed", inputPolicy)
}
}

View file

@ -0,0 +1,15 @@
package policychanges
import (
"github.com/kyverno/kyverno/pkg/metrics"
)
type PolicyChangeType string
const (
PolicyCreated PolicyChangeType = "created"
PolicyUpdated PolicyChangeType = "updated"
PolicyDeleted PolicyChangeType = "deleted"
)
type PromMetrics metrics.PromMetrics

View file

@ -0,0 +1,40 @@
package policyruleexecutionlatency
import (
"fmt"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
)
func ParsePromMetrics(pm metrics.PromMetrics) PromMetrics {
return PromMetrics(pm)
}
func ParseRuleTypeFromEngineRuleResponse(rule response.RuleResponse) metrics.RuleType {
switch rule.Type {
case "Validation":
return metrics.Validate
case "Mutation":
return metrics.Mutate
case "Generation":
return metrics.Generate
default:
return metrics.EmptyRuleType
}
}
func ParseResourceRequestOperation(requestOperationStr string) (metrics.ResourceRequestOperation, error) {
switch requestOperationStr {
case "CREATE":
return metrics.ResourceCreated, nil
case "UPDATE":
return metrics.ResourceUpdated, nil
case "DELETE":
return metrics.ResourceDeleted, nil
case "CONNECT":
return metrics.ResourceConnected, nil
default:
return "", fmt.Errorf("Unknown request operation made by resource: %s. Allowed requests: 'CREATE', 'UPDATE', 'DELETE', 'CONNECT'", requestOperationStr)
}
}

View file

@ -0,0 +1,117 @@
package policyruleexecutionlatency
import (
"fmt"
"time"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
prom "github.com/prometheus/client_golang/prometheus"
)
func (pm PromMetrics) registerPolicyRuleResultsMetric(
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
policyNamespace, policyName string,
resourceName, resourceKind, resourceNamespace string,
resourceRequestOperation metrics.ResourceRequestOperation,
ruleName string,
ruleResult metrics.RuleResult,
ruleType metrics.RuleType,
ruleExecutionCause metrics.RuleExecutionCause,
ruleResponse string,
mainRequestTriggerTimestamp, policyExecutionTimestamp, ruleExecutionTimestamp int64,
generateRuleLatencyType string,
ruleExecutionLatencyInMs float64,
) error {
if policyType == metrics.Cluster {
policyNamespace = "-"
}
if ruleType != metrics.Generate || generateRuleLatencyType == "" {
generateRuleLatencyType = "-"
}
pm.PolicyRuleExecutionLatency.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"resource_name": resourceName,
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
"rule_name": ruleName,
"rule_result": string(ruleResult),
"rule_type": string(ruleType),
"rule_execution_cause": string(ruleExecutionCause),
"rule_response": ruleResponse,
"main_request_trigger_timestamp": fmt.Sprintf("%+v", time.Unix(mainRequestTriggerTimestamp, 0)),
"policy_execution_timestamp": fmt.Sprintf("%+v", time.Unix(policyExecutionTimestamp, 0)),
"rule_execution_timestamp": fmt.Sprintf("%+v", time.Unix(ruleExecutionTimestamp, 0)),
"generate_rule_latency_type": generateRuleLatencyType,
}).Set(ruleExecutionLatencyInMs)
return nil
}
//policy - policy related data
//engineResponse - resource and rule related data
func (pm PromMetrics) ProcessEngineResponse(policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, executionCause metrics.RuleExecutionCause, generateRuleLatencyType string, resourceRequestOperation metrics.ResourceRequestOperation, mainRequestTriggerTimestamp int64) error {
policyValidationMode, err := metrics.ParsePolicyValidationMode(policy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyType := metrics.Namespaced
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*policy.Spec.Background)
policyNamespace := policy.ObjectMeta.Namespace
if policyNamespace == "" {
policyNamespace = "-"
policyType = metrics.Cluster
}
policyName := policy.ObjectMeta.Name
policyExecutionTimestamp := engineResponse.PolicyResponse.PolicyExecutionTimestamp
resourceSpec := engineResponse.PolicyResponse.Resource
resourceName := resourceSpec.Name
resourceKind := resourceSpec.Kind
resourceNamespace := resourceSpec.Namespace
ruleResponses := engineResponse.PolicyResponse.Rules
for _, rule := range ruleResponses {
ruleName := rule.Name
ruleType := ParseRuleTypeFromEngineRuleResponse(rule)
ruleResponse := rule.Message
ruleResult := metrics.Fail
if rule.Success {
ruleResult = metrics.Pass
}
ruleExecutionTimestamp := rule.RuleStats.RuleExecutionTimestamp
ruleExecutionLatencyInMs := float64(rule.RuleStats.ProcessingTime) / float64(1000*1000)
if err := pm.registerPolicyRuleResultsMetric(
policyValidationMode,
policyType,
policyBackgroundMode,
policyNamespace, policyName,
resourceName, resourceKind, resourceNamespace,
resourceRequestOperation,
ruleName,
ruleResult,
ruleType,
executionCause,
ruleResponse,
mainRequestTriggerTimestamp, policyExecutionTimestamp, ruleExecutionTimestamp,
generateRuleLatencyType,
ruleExecutionLatencyInMs,
); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,7 @@
package policyruleexecutionlatency
import (
"github.com/kyverno/kyverno/pkg/metrics"
)
type PromMetrics metrics.PromMetrics

View file

@ -0,0 +1,21 @@
package policyruleinfo
import (
"fmt"
"github.com/kyverno/kyverno/pkg/metrics"
)
func ParsePolicyRuleInfoMetricChangeType(change string) (PolicyRuleInfoMetricChangeType, error) {
if change == "created" {
return PolicyRuleCreated, nil
}
if change == "deleted" {
return PolicyRuleDeleted, nil
}
return "", fmt.Errorf("wrong policy rule count metric change type found %s. Allowed: '%s', '%s'", change, "created", "deleted")
}
func ParsePromMetrics(pm metrics.PromMetrics) PromMetrics {
return PromMetrics(pm)
}

View file

@ -0,0 +1,133 @@
package policyruleinfo
import (
"fmt"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/metrics"
prom "github.com/prometheus/client_golang/prometheus"
)
func (pm PromMetrics) registerPolicyRuleInfoMetric(
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
policyNamespace, policyName, ruleName string,
ruleType metrics.RuleType,
metricChangeType PolicyRuleInfoMetricChangeType,
) error {
var metricValue float64
switch metricChangeType {
case PolicyRuleCreated:
metricValue = float64(1)
case PolicyRuleDeleted:
metricValue = float64(0)
default:
return fmt.Errorf("unknown metric change type found: %s", metricChangeType)
}
if policyType == metrics.Cluster {
policyNamespace = "-"
}
pm.PolicyRuleInfo.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"rule_name": ruleName,
"rule_type": string(ruleType),
}).Set(metricValue)
return nil
}
func (pm PromMetrics) AddPolicy(policy interface{}) error {
switch inputPolicy := policy.(type) {
case *kyverno.ClusterPolicy:
policyValidationMode, err := metrics.ParsePolicyValidationMode(inputPolicy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*inputPolicy.Spec.Background)
policyType := metrics.Cluster
policyNamespace := "" // doesn't matter for cluster policy
policyName := inputPolicy.ObjectMeta.Name
// registering the metrics on a per-rule basis
for _, rule := range inputPolicy.Spec.Rules {
ruleName := rule.Name
ruleType := metrics.ParseRuleType(rule)
if err = pm.registerPolicyRuleInfoMetric(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, ruleName, ruleType, PolicyRuleCreated); err != nil {
return err
}
}
return nil
case *kyverno.Policy:
policyValidationMode, err := metrics.ParsePolicyValidationMode(inputPolicy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*inputPolicy.Spec.Background)
policyType := metrics.Namespaced
policyNamespace := inputPolicy.ObjectMeta.Namespace
policyName := inputPolicy.ObjectMeta.Name
// registering the metrics on a per-rule basis
for _, rule := range inputPolicy.Spec.Rules {
ruleName := rule.Name
ruleType := metrics.ParseRuleType(rule)
if err = pm.registerPolicyRuleInfoMetric(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, ruleName, ruleType, PolicyRuleCreated); err != nil {
return err
}
}
return nil
default:
return fmt.Errorf("wrong input type provided %T. Only kyverno.Policy and kyverno.ClusterPolicy allowed", inputPolicy)
}
}
func (pm PromMetrics) RemovePolicy(policy interface{}) error {
switch inputPolicy := policy.(type) {
case *kyverno.ClusterPolicy:
for _, rule := range inputPolicy.Spec.Rules {
policyValidationMode, err := metrics.ParsePolicyValidationMode(inputPolicy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*inputPolicy.Spec.Background)
policyType := metrics.Cluster
policyNamespace := "" // doesn't matter for cluster policy
policyName := inputPolicy.ObjectMeta.Name
ruleName := rule.Name
ruleType := metrics.ParseRuleType(rule)
if err = pm.registerPolicyRuleInfoMetric(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, ruleName, ruleType, PolicyRuleDeleted); err != nil {
return err
}
}
return nil
case *kyverno.Policy:
for _, rule := range inputPolicy.Spec.Rules {
policyValidationMode, err := metrics.ParsePolicyValidationMode(inputPolicy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*inputPolicy.Spec.Background)
policyType := metrics.Namespaced
policyNamespace := inputPolicy.ObjectMeta.Namespace
policyName := inputPolicy.ObjectMeta.Name
ruleName := rule.Name
ruleType := metrics.ParseRuleType(rule)
if err = pm.registerPolicyRuleInfoMetric(policyValidationMode, policyType, policyBackgroundMode, policyNamespace, policyName, ruleName, ruleType, PolicyRuleDeleted); err != nil {
return err
}
}
return nil
default:
return fmt.Errorf("wrong input type provided %T. Only kyverno.Policy and kyverno.ClusterPolicy allowed", inputPolicy)
}
}

View file

@ -0,0 +1,14 @@
package policyruleinfo
import (
"github.com/kyverno/kyverno/pkg/metrics"
)
type PolicyRuleInfoMetricChangeType string
const (
PolicyRuleCreated PolicyRuleInfoMetricChangeType = "created"
PolicyRuleDeleted PolicyRuleInfoMetricChangeType = "deleted"
)
type PromMetrics metrics.PromMetrics

View file

@ -0,0 +1,39 @@
package policyruleresults
import (
"fmt"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
)
func ParsePromMetrics(pm metrics.PromMetrics) PromMetrics {
return PromMetrics(pm)
}
func ParseRuleTypeFromEngineRuleResponse(rule response.RuleResponse) metrics.RuleType {
switch rule.Type {
case "Validation":
return metrics.Validate
case "Mutation":
return metrics.Mutate
case "Generation":
return metrics.Generate
default:
return metrics.EmptyRuleType
}
}
func ParseResourceRequestOperation(requestOperationStr string) (metrics.ResourceRequestOperation, error) {
switch requestOperationStr {
case "CREATE":
return metrics.ResourceCreated, nil
case "UPDATE":
return metrics.ResourceUpdated, nil
case "DELETE":
return metrics.ResourceDeleted, nil
case "CONNECT":
return metrics.ResourceConnected, nil
default:
return "", fmt.Errorf("Unknown request operation made by resource: %s. Allowed requests: 'CREATE', 'UPDATE', 'DELETE', 'CONNECT'", requestOperationStr)
}
}

View file

@ -0,0 +1,107 @@
package policyruleresults
import (
"fmt"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
prom "github.com/prometheus/client_golang/prometheus"
"time"
)
func (pm PromMetrics) registerPolicyRuleResultsMetric(
policyValidationMode metrics.PolicyValidationMode,
policyType metrics.PolicyType,
policyBackgroundMode metrics.PolicyBackgroundMode,
policyNamespace, policyName string,
resourceName, resourceKind, resourceNamespace string,
resourceRequestOperation metrics.ResourceRequestOperation,
ruleName string,
ruleResult metrics.RuleResult,
ruleType metrics.RuleType,
ruleExecutionCause metrics.RuleExecutionCause,
ruleResponse string,
mainRequestTriggerTimestamp, policyExecutionTimestamp, ruleExecutionTimestamp int64,
) error {
if policyType == metrics.Cluster {
policyNamespace = "-"
}
pm.PolicyRuleResults.With(prom.Labels{
"policy_validation_mode": string(policyValidationMode),
"policy_type": string(policyType),
"policy_background_mode": string(policyBackgroundMode),
"policy_namespace": policyNamespace,
"policy_name": policyName,
"resource_name": resourceName,
"resource_kind": resourceKind,
"resource_namespace": resourceNamespace,
"resource_request_operation": string(resourceRequestOperation),
"rule_name": ruleName,
"rule_result": string(ruleResult),
"rule_type": string(ruleType),
"rule_execution_cause": string(ruleExecutionCause),
"rule_response": ruleResponse,
"main_request_trigger_timestamp": fmt.Sprintf("%+v", time.Unix(mainRequestTriggerTimestamp, 0)),
"policy_execution_timestamp": fmt.Sprintf("%+v", time.Unix(policyExecutionTimestamp, 0)),
"rule_execution_timestamp": fmt.Sprintf("%+v", time.Unix(ruleExecutionTimestamp, 0)),
}).Set(1)
return nil
}
//policy - policy related data
//engineResponse - resource and rule related data
func (pm PromMetrics) ProcessEngineResponse(policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, executionCause metrics.RuleExecutionCause, resourceRequestOperation metrics.ResourceRequestOperation, mainRequestTriggerTimestamp int64) error {
policyValidationMode, err := metrics.ParsePolicyValidationMode(policy.Spec.ValidationFailureAction)
if err != nil {
return err
}
policyType := metrics.Namespaced
policyBackgroundMode := metrics.ParsePolicyBackgroundMode(*policy.Spec.Background)
policyNamespace := policy.ObjectMeta.Namespace
if policyNamespace == "" {
policyNamespace = "-"
policyType = metrics.Cluster
}
policyName := policy.ObjectMeta.Name
policyExecutionTimestamp := engineResponse.PolicyResponse.PolicyExecutionTimestamp
resourceSpec := engineResponse.PolicyResponse.Resource
resourceName := resourceSpec.Name
resourceKind := resourceSpec.Kind
resourceNamespace := resourceSpec.Namespace
ruleResponses := engineResponse.PolicyResponse.Rules
for _, rule := range ruleResponses {
ruleName := rule.Name
ruleType := ParseRuleTypeFromEngineRuleResponse(rule)
ruleResponse := rule.Message
ruleResult := metrics.Fail
if rule.Success {
ruleResult = metrics.Pass
}
ruleExecutionTimestamp := rule.RuleStats.RuleExecutionTimestamp
if err := pm.registerPolicyRuleResultsMetric(
policyValidationMode,
policyType,
policyBackgroundMode,
policyNamespace, policyName,
resourceName, resourceKind, resourceNamespace,
resourceRequestOperation,
ruleName,
ruleResult,
ruleType,
executionCause,
ruleResponse,
mainRequestTriggerTimestamp, policyExecutionTimestamp, ruleExecutionTimestamp,
); err != nil {
return err
}
}
return nil
}

View file

@ -0,0 +1,7 @@
package policyruleresults
import (
"github.com/kyverno/kyverno/pkg/metrics"
)
type PromMetrics metrics.PromMetrics

View file

@ -11,10 +11,13 @@ import (
"github.com/kyverno/kyverno/pkg/common"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/metrics"
policyRuleExecutionLatency "github.com/kyverno/kyverno/pkg/metrics/policyruleexecutionlatency"
policyRuleResults "github.com/kyverno/kyverno/pkg/metrics/policyruleresults"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPolicy) {
func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPolicy, backgroundScanTimestamp int64) {
logger := pc.log.WithValues("policy", policy.Name)
logger.V(4).Info("applying policy to existing resources")
@ -38,8 +41,11 @@ func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPoli
namespaced, _ = pc.rm.GetScope(k)
}
// this tracker would help to ensure that even for multiple namespaces, duplicate metric are not generated
metricRegisteredTracker := false
if !namespaced {
pc.applyAndReportPerNamespace(policy, k, "", rule, logger.WithValues("kind", k))
pc.applyAndReportPerNamespace(policy, k, "", rule, logger.WithValues("kind", k), backgroundScanTimestamp, &metricRegisteredTracker)
continue
}
@ -48,7 +54,7 @@ func (pc *PolicyController) processExistingResources(policy *kyverno.ClusterPoli
// for kind: Policy, consider only the namespace which the policy belongs to.
// for kind: ClusterPolicy, consider all the namespaces.
if policy.Namespace == ns || policy.Namespace == "" {
pc.applyAndReportPerNamespace(policy, k, ns, rule, logger.WithValues("kind", k).WithValues("ns", ns))
pc.applyAndReportPerNamespace(policy, k, ns, rule, logger.WithValues("kind", k).WithValues("ns", ns), backgroundScanTimestamp, &metricRegisteredTracker)
}
}
}
@ -66,7 +72,7 @@ func (pc *PolicyController) registerResource(gvk string) (err error) {
return nil
}
func (pc *PolicyController) applyAndReportPerNamespace(policy *kyverno.ClusterPolicy, kind string, ns string, rule kyverno.Rule, logger logr.Logger) {
func (pc *PolicyController) applyAndReportPerNamespace(policy *kyverno.ClusterPolicy, kind string, ns string, rule kyverno.Rule, logger logr.Logger, backgroundScanTimestamp int64, metricAlreadyRegistered *bool) {
rMap := pc.getResourcesPerNamespace(kind, ns, rule, logger)
excludeAutoGenResources(*policy, rMap, logger)
if len(rMap) == 0 {
@ -79,9 +85,31 @@ func (pc *PolicyController) applyAndReportPerNamespace(policy *kyverno.ClusterPo
engineResponses = append(engineResponses, responses...)
}
if !*metricAlreadyRegistered && len(engineResponses) > 0 {
for _, engineResponse := range engineResponses {
// registering the kyverno_policy_rule_results_info metric concurrently
go pc.registerPolicyRuleResultsMetricValidationBS(logger, *policy, *engineResponse, backgroundScanTimestamp)
// registering the kyverno_policy_rule_execution_latency_milliseconds metric concurrently
go pc.registerPolicyRuleExecutionLatencyMetricValidateBS(logger, *policy, *engineResponse, backgroundScanTimestamp)
}
*metricAlreadyRegistered = true
}
pc.report(policy.Name, engineResponses, logger)
}
func (pc *PolicyController) registerPolicyRuleResultsMetricValidationBS(logger logr.Logger, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, backgroundScanTimestamp int64) {
if err := policyRuleResults.ParsePromMetrics(*pc.promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.BackgroundScan, metrics.ResourceCreated, backgroundScanTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
}
func (pc *PolicyController) registerPolicyRuleExecutionLatencyMetricValidateBS(logger logr.Logger, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, backgroundScanTimestamp int64) {
if err := policyRuleExecutionLatency.ParsePromMetrics(*pc.promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.BackgroundScan, "", metrics.ResourceCreated, backgroundScanTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
}
func (pc *PolicyController) applyPolicy(policy *kyverno.ClusterPolicy, resource unstructured.Unstructured, logger logr.Logger) (engineResponses []*response.EngineResponse) {
// pre-processing, check if the policy and resource version has been processed before
if !pc.rm.ProcessResource(policy.Name, policy.ResourceVersion, resource.GetKind(), resource.GetNamespace(), resource.GetName(), resource.GetResourceVersion()) {

View file

@ -19,6 +19,8 @@ import (
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/kyverno/common"
"github.com/kyverno/kyverno/pkg/metrics"
policyRuleInfoMetric "github.com/kyverno/kyverno/pkg/metrics/policyruleinfo"
pm "github.com/kyverno/kyverno/pkg/policymutation"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/resourcecache"
@ -36,6 +38,8 @@ import (
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/client-go/util/workqueue"
policyChangesMetric "github.com/kyverno/kyverno/pkg/metrics/policychanges"
)
const (
@ -105,6 +109,8 @@ type PolicyController struct {
reconcilePeriod time.Duration
log logr.Logger
promConfig *metrics.PromConfig
}
// NewPolicyController create a new PolicyController
@ -120,7 +126,8 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
namespaces informers.NamespaceInformer,
log logr.Logger,
resCache resourcecache.ResourceCache,
reconcilePeriod time.Duration) (*PolicyController, error) {
reconcilePeriod time.Duration,
promConfig *metrics.PromConfig) (*PolicyController, error) {
// Event broad caster
eventBroadcaster := record.NewBroadcaster()
@ -143,6 +150,7 @@ func NewPolicyController(kyvernoClient *kyvernoclient.Clientset,
log: log,
resCache: resCache,
reconcilePeriod: reconcilePeriod,
promConfig: promConfig,
}
pInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
@ -191,12 +199,75 @@ func (pc *PolicyController) canBackgroundProcess(p *kyverno.ClusterPolicy) bool
return true
}
func (pc *PolicyController) registerPolicyRuleInfoMetricAddPolicy(logger logr.Logger, p *kyverno.ClusterPolicy) {
err := policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).AddPolicy(p)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's creation", "name", p.Name)
}
}
func (pc *PolicyController) registerPolicyRuleInfoMetricUpdatePolicy(logger logr.Logger, oldP, curP *kyverno.ClusterPolicy) {
// removing the old rules associated metrics
err := policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).RemovePolicy(oldP)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's updation", "name", oldP.Name)
}
// adding the new rules associated metrics
err = policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).AddPolicy(curP)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's updation", "name", oldP.Name)
}
}
func (pc *PolicyController) registerPolicyRuleInfoMetricDeletePolicy(logger logr.Logger, p *kyverno.ClusterPolicy) {
err := policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).RemovePolicy(p)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's deletion", "name", p.Name)
}
}
func (pc *PolicyController) registerPolicyChangesMetricAddPolicy(logger logr.Logger, p *kyverno.ClusterPolicy, policyChangeTimestamp int64) {
err := policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(p, policyChangesMetric.PolicyCreated, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's creation", "name", p.Name)
}
}
func (pc *PolicyController) registerPolicyChangesMetricUpdatePolicy(logger logr.Logger, oldP, curP *kyverno.ClusterPolicy, policyChangeTimestamp int64) {
if reflect.DeepEqual((*oldP).Spec, (*curP).Spec) {
return
}
err := policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(oldP, policyChangesMetric.PolicyUpdated, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's updation", "name", oldP.Name)
}
// curP will require a new kyverno_policy_changes_info metric if the above update involved change in the following fields:
if curP.Spec.Background != oldP.Spec.Background || curP.Spec.ValidationFailureAction != oldP.Spec.ValidationFailureAction {
err = policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(curP, policyChangesMetric.PolicyUpdated, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's updation", "name", curP.Name)
}
}
}
func (pc *PolicyController) registerPolicyChangesMetricDeletePolicy(logger logr.Logger, p *kyverno.ClusterPolicy, policyChangeTimestamp int64) {
err := policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(p, policyChangesMetric.PolicyDeleted, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's deletion", "name", p.Name)
}
}
func (pc *PolicyController) addPolicy(obj interface{}) {
logger := pc.log
p := obj.(*kyverno.ClusterPolicy)
logger.Info("policy created", "uid", p.UID, "kind", "ClusterPolicy", "name", p.Name)
// register kyverno_policy_rule_info_total metric concurrently
go pc.registerPolicyRuleInfoMetricAddPolicy(logger, p)
// register kyverno_policy_changes_info metric concurrently
go pc.registerPolicyChangesMetricAddPolicy(logger, p, time.Now().Unix())
if p.Spec.Background == nil || p.Spec.ValidationFailureAction == "" || missingAutoGenRules(p, logger) {
pol, _ := common.MutatePolicy(p, logger)
pol.SetGroupVersionKind(schema.GroupVersionKind{Group: "kyverno.io", Version: "v1", Kind: "ClusterPolicy"})
@ -219,6 +290,11 @@ func (pc *PolicyController) updatePolicy(old, cur interface{}) {
oldP := old.(*kyverno.ClusterPolicy)
curP := cur.(*kyverno.ClusterPolicy)
// register kyverno_policy_rule_info_total metric concurrently
go pc.registerPolicyRuleInfoMetricUpdatePolicy(logger, oldP, curP)
// register kyverno_policy_changes_info metric concurrently
go pc.registerPolicyChangesMetricUpdatePolicy(logger, oldP, curP, time.Now().Unix())
if curP.Spec.Background == nil || curP.Spec.ValidationFailureAction == "" || missingAutoGenRules(curP, logger) {
pol, _ := common.MutatePolicy(curP, logger)
pol.SetGroupVersionKind(schema.GroupVersionKind{Group: "kyverno.io", Version: "v1", Kind: "ClusterPolicy"})
@ -260,6 +336,11 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
}
}
// register kyverno_policy_rule_info_total metric concurrently
go pc.registerPolicyRuleInfoMetricDeletePolicy(logger, p)
// register kyverno_policy_changes_info metric concurrently
go pc.registerPolicyChangesMetricDeletePolicy(logger, p, time.Now().Unix())
logger.Info("policy deleted", "uid", p.UID, "kind", "ClusterPolicy", "name", p.Name)
// we process policies that are not set of background processing
@ -268,10 +349,73 @@ func (pc *PolicyController) deletePolicy(obj interface{}) {
pc.enqueueRCRDeletedPolicy(p.Name)
}
func (pc *PolicyController) registerPolicyRuleInfoMetricAddNsPolicy(logger logr.Logger, p *kyverno.Policy) {
err := policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).AddPolicy(p)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's creation", "name", p.Name)
}
}
func (pc *PolicyController) registerPolicyRuleInfoMetricUpdateNsPolicy(logger logr.Logger, oldP, curP *kyverno.Policy) {
// removing the old rules associated metrics
err := policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).RemovePolicy(oldP)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's updation", "name", oldP.Name)
}
// adding the new rules associated metrics
err = policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).AddPolicy(curP)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's updation", "name", oldP.Name)
}
}
func (pc *PolicyController) registerPolicyRuleInfoMetricDeleteNsPolicy(logger logr.Logger, p *kyverno.Policy) {
err := policyRuleInfoMetric.ParsePromMetrics(*pc.promConfig.Metrics).RemovePolicy(p)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_info_total metrics for the above policy's deletion", "name", p.Name)
}
}
func (pc *PolicyController) registerPolicyChangesMetricAddNsPolicy(logger logr.Logger, p *kyverno.Policy, policyChangeTimestamp int64) {
err := policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(p, policyChangesMetric.PolicyCreated, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's creation", "name", p.Name)
}
}
func (pc *PolicyController) registerPolicyChangesMetricUpdateNsPolicy(logger logr.Logger, oldP, curP *kyverno.Policy, policyChangeTimestamp int64) {
if reflect.DeepEqual((*oldP).Spec, (*curP).Spec) {
return
}
err := policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(oldP, policyChangesMetric.PolicyUpdated, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's updation", "name", oldP.Name)
}
// curP will require a new kyverno_policy_changes_info metric if the above update involved change in the following fields:
if curP.Spec.Background != oldP.Spec.Background || curP.Spec.ValidationFailureAction != oldP.Spec.ValidationFailureAction {
err = policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(curP, policyChangesMetric.PolicyUpdated, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's updation", "name", curP.Name)
}
}
}
func (pc *PolicyController) registerPolicyChangesMetricDeleteNsPolicy(logger logr.Logger, p *kyverno.Policy, policyChangeTimestamp int64) {
err := policyChangesMetric.ParsePromMetrics(*pc.promConfig.Metrics).RegisterPolicy(p, policyChangesMetric.PolicyDeleted, policyChangeTimestamp)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_changes_info metrics for the above policy's deletion", "name", p.Name)
}
}
func (pc *PolicyController) addNsPolicy(obj interface{}) {
logger := pc.log
p := obj.(*kyverno.Policy)
// register kyverno_policy_rule_info_total metric concurrently
go pc.registerPolicyRuleInfoMetricAddNsPolicy(logger, p)
// register kyverno_policy_changes_info metric concurrently
go pc.registerPolicyChangesMetricAddNsPolicy(logger, p, time.Now().Unix())
logger.Info("policy created", "uid", p.UID, "kind", "Policy", "name", p.Name, "namespaces", p.Namespace)
pol := ConvertPolicyToClusterPolicy(p)
@ -294,6 +438,12 @@ func (pc *PolicyController) updateNsPolicy(old, cur interface{}) {
logger := pc.log
oldP := old.(*kyverno.Policy)
curP := cur.(*kyverno.Policy)
// register kyverno_policy_rule_info_total metric concurrently
go pc.registerPolicyRuleInfoMetricUpdateNsPolicy(logger, oldP, curP)
// register kyverno_policy_changes_info metric concurrently
go pc.registerPolicyChangesMetricUpdateNsPolicy(logger, oldP, curP, time.Now().Unix())
ncurP := ConvertPolicyToClusterPolicy(curP)
if ncurP.Spec.Background == nil || ncurP.Spec.ValidationFailureAction == "" || missingAutoGenRules(ncurP, logger) {
@ -336,6 +486,11 @@ func (pc *PolicyController) deleteNsPolicy(obj interface{}) {
}
}
// register kyverno_policy_rule_info_total metric concurrently
go pc.registerPolicyRuleInfoMetricDeleteNsPolicy(logger, p)
// register kyverno_policy_changes_info metric concurrently
go pc.registerPolicyChangesMetricDeleteNsPolicy(logger, p, time.Now().Unix())
logger.Info("policy deleted event", "uid", p.UID, "kind", "Policy", "policy_name", p.Name, "namespaces", p.Namespace)
pol := ConvertPolicyToClusterPolicy(p)
@ -469,7 +624,7 @@ func (pc *PolicyController) syncPolicy(key string) error {
}
updateGR(pc.kyvernoClient, policy.Name, grList, logger)
pc.processExistingResources(policy)
pc.processExistingResources(policy, startTime.Unix())
return nil
}

View file

@ -24,6 +24,9 @@ import (
"github.com/kyverno/kyverno/pkg/engine/variables"
"github.com/kyverno/kyverno/pkg/event"
gen "github.com/kyverno/kyverno/pkg/generate"
"github.com/kyverno/kyverno/pkg/metrics"
policyRuleExecutionLatency "github.com/kyverno/kyverno/pkg/metrics/policyruleexecutionlatency"
policyRuleResults "github.com/kyverno/kyverno/pkg/metrics/policyruleresults"
kyvernoutils "github.com/kyverno/kyverno/pkg/utils"
"github.com/kyverno/kyverno/pkg/webhooks/generate"
v1beta1 "k8s.io/api/admission/v1beta1"
@ -33,7 +36,7 @@ import (
)
//HandleGenerate handles admission-requests for policies with generate rules
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo, dynamicConfig config.Interface) {
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo, dynamicConfig config.Interface, admissionRequestTimestamp int64, latencySender *chan int64) {
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
logger.V(4).Info("incoming request")
var engineResponses []*response.EngineResponse
@ -81,6 +84,11 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
resp: engineResponse,
})
}
// registering the kyverno_policy_rule_results_info metric concurrently
go ws.registerPolicyRuleResultsMetricGeneration(logger, string(request.Operation), *policy, *engineResponse, admissionRequestTimestamp)
// registering the kyverno_policy_rule_execution_latency_milliseconds metric concurrently
go ws.registerPolicyRuleExecutionLatencyMetricGenerate(logger, string(request.Operation), *policy, *engineResponse, admissionRequestTimestamp)
}
// Adds Generate Request to a channel(queue size 1000) to generators
@ -96,6 +104,30 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
if request.Operation == v1beta1.Update {
ws.handleUpdate(request, policies)
}
// sending the admission request latency to other goroutine (reporting the metrics) over the channel
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
*latencySender <- admissionReviewLatencyDuration
}
func (ws *WebhookServer) registerPolicyRuleResultsMetricGeneration(logger logr.Logger, resourceRequestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
resourceRequestOperationPromAlias, err := policyRuleResults.ParseResourceRequestOperation(resourceRequestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
if err := policyRuleResults.ParsePromMetrics(*ws.promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.AdmissionRequest, resourceRequestOperationPromAlias, admissionRequestTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
}
func (ws *WebhookServer) registerPolicyRuleExecutionLatencyMetricGenerate(logger logr.Logger, resourceRequestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
resourceRequestOperationPromAlias, err := policyRuleExecutionLatency.ParseResourceRequestOperation(resourceRequestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
if err := policyRuleExecutionLatency.ParsePromMetrics(*ws.promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.AdmissionRequest, "", resourceRequestOperationPromAlias, admissionRequestTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
}
//handleUpdate handles admission-requests for update

View file

@ -6,6 +6,7 @@ import (
"sort"
"time"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/common"
@ -13,21 +14,25 @@ import (
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/response"
engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
"github.com/kyverno/kyverno/pkg/metrics"
policyRuleExecutionLatency "github.com/kyverno/kyverno/pkg/metrics/policyruleexecutionlatency"
policyRuleResults "github.com/kyverno/kyverno/pkg/metrics/policyruleresults"
v1beta1 "k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
// HandleMutation handles mutating webhook admission request
// return value: generated patches
// return value: generated patches, triggered policies, engine responses correspdonding to the triggered policies
func (ws *WebhookServer) HandleMutation(
request *v1beta1.AdmissionRequest,
resource unstructured.Unstructured,
policies []*kyverno.ClusterPolicy,
ctx *context.Context,
userRequestInfo kyverno.RequestInfo) []byte {
userRequestInfo kyverno.RequestInfo,
admissionRequestTimestamp int64) ([]byte, []kyverno.ClusterPolicy, []*response.EngineResponse) {
if len(policies) == 0 {
return nil
return nil, nil, nil
}
resourceName := request.Kind.Kind + "/" + request.Name
@ -39,6 +44,7 @@ func (ws *WebhookServer) HandleMutation(
var patches [][]byte
var engineResponses []*response.EngineResponse
var triggeredPolicies []kyverno.ClusterPolicy
policyContext := &engine.PolicyContext{
NewResource: resource,
AdmissionInfo: userRequestInfo,
@ -87,6 +93,14 @@ func (ws *WebhookServer) HandleMutation(
policyContext.NewResource = engineResponse.PatchedResource
engineResponses = append(engineResponses, engineResponse)
// registering the kyverno_policy_rule_results_info metric concurrently
go ws.registerPolicyRuleResultsMetricMutation(logger, string(request.Operation), *policy, *engineResponse, admissionRequestTimestamp)
triggeredPolicies = append(triggeredPolicies, *policy)
// registering the kyverno_policy_rule_execution_latency_milliseconds metric concurrently
go ws.registerPolicyRuleExecutionLatencyMetricMutate(logger, string(request.Operation), *policy, *engineResponse, admissionRequestTimestamp)
triggeredPolicies = append(triggeredPolicies, *policy)
}
// generate annotations
@ -118,7 +132,27 @@ func (ws *WebhookServer) HandleMutation(
}()
// patches holds all the successful patches, if no patch is created, it returns nil
return engineutils.JoinPatches(patches)
return engineutils.JoinPatches(patches), triggeredPolicies, engineResponses
}
func (ws *WebhookServer) registerPolicyRuleResultsMetricMutation(logger logr.Logger, resourceRequestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
resourceRequestOperationPromAlias, err := policyRuleResults.ParseResourceRequestOperation(resourceRequestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
if err := policyRuleResults.ParsePromMetrics(*ws.promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.AdmissionRequest, resourceRequestOperationPromAlias, admissionRequestTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
}
func (ws *WebhookServer) registerPolicyRuleExecutionLatencyMetricMutate(logger logr.Logger, resourceRequestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
resourceRequestOperationPromAlias, err := policyRuleExecutionLatency.ParseResourceRequestOperation(resourceRequestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
if err := policyRuleExecutionLatency.ParsePromMetrics(*ws.promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.AdmissionRequest, "", resourceRequestOperationPromAlias, admissionRequestTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
}
type mutateStats struct {

View file

@ -19,8 +19,11 @@ import (
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/generate"
"github.com/kyverno/kyverno/pkg/metrics"
admissionReviewLatency "github.com/kyverno/kyverno/pkg/metrics/admissionreviewlatency"
"github.com/kyverno/kyverno/pkg/openapi"
"github.com/kyverno/kyverno/pkg/policycache"
"github.com/kyverno/kyverno/pkg/policyreport"
@ -130,6 +133,8 @@ type WebhookServer struct {
grController *generate.Controller
debug bool
promConfig *metrics.PromConfig
}
// NewWebhookServer creates new instance of WebhookServer accordingly to given configuration
@ -161,6 +166,7 @@ func NewWebhookServer(
resCache resourcecache.ResourceCache,
grc *generate.Controller,
debug bool,
promConfig *metrics.PromConfig,
) (*WebhookServer, error) {
if tlsPair == nil {
@ -208,6 +214,7 @@ func NewWebhookServer(
openAPIController: openAPIController,
resCache: resCache,
debug: debug,
promConfig: promConfig,
}
mux := httprouter.New()
@ -308,6 +315,8 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
}
logger.V(6).Info("received an admission request in mutating webhook")
// timestamp at which this admission request got triggered
admissionRequestTimestamp := time.Now().Unix()
mutatePolicies := ws.pCache.GetPolicyObject(policycache.Mutate, request.Kind.Kind, "")
generatePolicies := ws.pCache.GetPolicyObject(policycache.Generate, request.Kind.Kind, "")
@ -355,18 +364,30 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
patchedResource := request.Object.Raw
// MUTATION
patches = ws.HandleMutation(request, resource, mutatePolicies, ctx, userRequestInfo)
var triggeredMutatePolicies []v1.ClusterPolicy
var mutateEngineResponses []*response.EngineResponse
patches, triggeredMutatePolicies, mutateEngineResponses = ws.HandleMutation(request, resource, mutatePolicies, ctx, userRequestInfo, admissionRequestTimestamp)
logger.V(6).Info("", "generated patches", string(patches))
// patch the resource with patches before handling validation rules
patchedResource = processResourceWithPatches(patches, request.Object.Raw, logger)
logger.V(6).Info("", "patchedResource", string(patchedResource))
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
// registering the kyverno_admission_review_latency_milliseconds metric concurrently
go registerAdmissionReviewLatencyMetricMutate(logger, *ws.promConfig.Metrics, string(request.Operation), mutateEngineResponses, triggeredMutatePolicies, admissionReviewLatencyDuration)
// GENERATE
newRequest := request.DeepCopy()
newRequest.Object.Raw = patchedResource
go ws.HandleGenerate(newRequest, generatePolicies, ctx, userRequestInfo, ws.configHandler)
// this channel will be used to transmit the admissionReviewLatency from ws.HandleGenerate(..,) goroutine to registeGeneraterPolicyAdmissionReviewLatencyMetric(...) goroutine
admissionReviewCompletionLatencyChannel := make(chan int64, 1)
go ws.HandleGenerate(newRequest, generatePolicies, ctx, userRequestInfo, ws.configHandler, admissionRequestTimestamp, &admissionReviewCompletionLatencyChannel)
// registering the kyverno_admission_review_latency_milliseconds metric concurrently
go registerAdmissionReviewLatencyMetricGenerate(logger, *ws.promConfig.Metrics, string(request.Operation), mutateEngineResponses, triggeredMutatePolicies, &admissionReviewCompletionLatencyChannel)
patchType := v1beta1.PatchTypeJSONPatch
return &v1beta1.AdmissionResponse{
Allowed: true,
@ -378,6 +399,29 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
}
}
func registerAdmissionReviewLatencyMetricMutate(logger logr.Logger, promMetrics metrics.PromMetrics, requestOperation string, engineResponses []*response.EngineResponse, triggeredPolicies []v1.ClusterPolicy, admissionReviewLatencyDuration int64) {
resourceRequestOperationPromAlias, err := admissionReviewLatency.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
if err := admissionReviewLatency.ParsePromMetrics(promMetrics).ProcessEngineResponses(engineResponses, triggeredPolicies, admissionReviewLatencyDuration, resourceRequestOperationPromAlias); err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
}
func registerAdmissionReviewLatencyMetricGenerate(logger logr.Logger, promMetrics metrics.PromMetrics, requestOperation string, engineResponses []*response.EngineResponse, triggeredPolicies []v1.ClusterPolicy, latencyReceiver *chan int64) {
defer close(*latencyReceiver)
resourceRequestOperationPromAlias, err := admissionReviewLatency.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
// this goroutine will keep on waiting here till it doesn't receive the admission review latency int64 from the other goroutine i.e. ws.HandleGenerate
admissionReviewLatencyDuration := <-(*latencyReceiver)
if err := admissionReviewLatency.ParsePromMetrics(promMetrics).ProcessEngineResponses(engineResponses, triggeredPolicies, admissionReviewLatencyDuration, resourceRequestOperationPromAlias); err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
}
func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
logger := ws.log.WithName("Validate").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
if request.Operation == v1beta1.Delete {
@ -394,6 +438,8 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
}
logger.V(6).Info("received an admission request in validating webhook")
// timestamp at which this admission request got triggered
admissionRequestTimestamp := time.Now().Unix()
policies := ws.pCache.GetPolicyObject(policycache.ValidateEnforce, request.Kind.Kind, "")
// Get namespace policies from the cache for the requested resource namespace
@ -445,7 +491,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, ws.nsLister, logger)
}
ok, msg := HandleValidation(request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.prGenerator, ws.log, ws.configHandler, ws.resCache, ws.client, namespaceLabels)
ok, msg := HandleValidation(ws.promConfig, request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.prGenerator, ws.log, ws.configHandler, ws.resCache, ws.client, namespaceLabels, admissionRequestTimestamp)
if !ok {
logger.Info("admission request denied")
return &v1beta1.AdmissionResponse{

View file

@ -11,6 +11,7 @@ import (
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/metrics"
"github.com/kyverno/kyverno/pkg/policycache"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
@ -60,6 +61,7 @@ type auditHandler struct {
log logr.Logger
configHandler config.Interface
resCache resourcecache.ResourceCache
promConfig *metrics.PromConfig
}
// NewValidateAuditHandler returns a new instance of audit policy handler
@ -73,7 +75,8 @@ func NewValidateAuditHandler(pCache policycache.Interface,
log logr.Logger,
dynamicConfig config.Interface,
resCache resourcecache.ResourceCache,
client *client.Client) AuditHandler {
client *client.Client,
promConfig *metrics.PromConfig) AuditHandler {
return &auditHandler{
pCache: pCache,
@ -91,6 +94,7 @@ func NewValidateAuditHandler(pCache policycache.Interface,
configHandler: dynamicConfig,
resCache: resCache,
client: client,
promConfig: promConfig,
}
}
@ -147,7 +151,8 @@ func (h *auditHandler) processNextWorkItem() bool {
func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
var roles, clusterRoles []string
var err error
// time at which the corresponding the admission request's processing got initiated
admissionRequestTimestamp := time.Now().Unix()
logger := h.log.WithName("process")
policies := h.pCache.GetPolicyObject(policycache.ValidateAudit, request.Kind.Kind, "")
// Get namespace policies from the cache for the requested resource namespace
@ -176,7 +181,7 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, logger)
}
HandleValidation(request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.prGenerator, logger, h.configHandler, h.resCache, h.client, namespaceLabels)
HandleValidation(h.promConfig, request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.prGenerator, logger, h.configHandler, h.resCache, h.client, namespaceLabels, admissionRequestTimestamp)
return nil
}

View file

@ -5,16 +5,19 @@ import (
"sort"
"time"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/engine"
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/response"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/metrics"
admissionReviewLatency "github.com/kyverno/kyverno/pkg/metrics/admissionreviewlatency"
policyRuleExecutionLatency "github.com/kyverno/kyverno/pkg/metrics/policyruleexecutionlatency"
policyRuleResults "github.com/kyverno/kyverno/pkg/metrics/policyruleresults"
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/resourcecache"
@ -28,6 +31,7 @@ import (
// If there are no errors in validating rule we apply generation rules
// patchedResource is the (resource + patches) after applying mutation rules
func HandleValidation(
promConfig *metrics.PromConfig,
request *v1beta1.AdmissionRequest,
policies []*kyverno.ClusterPolicy,
patchedResource []byte,
@ -40,7 +44,8 @@ func HandleValidation(
dynamicConfig config.Interface,
resCache resourcecache.ResourceCache,
client *client.Client,
namespaceLabels map[string]string) (bool, string) {
namespaceLabels map[string]string,
admissionRequestTimestamp int64) (bool, string) {
if len(policies) == 0 {
return true, ""
@ -88,6 +93,7 @@ func HandleValidation(
}
var engineResponses []*response.EngineResponse
var triggeredPolicies []kyverno.ClusterPolicy
for _, policy := range policies {
logger.V(3).Info("evaluating policy", "policy", policy.Name)
policyContext.Policy = *policy
@ -99,7 +105,13 @@ func HandleValidation(
continue
}
// registering the kyverno_policy_rule_results_info metric concurrently
go registerPolicyRuleResultsMetricValidation(promConfig, logger, string(request.Operation), policyContext.Policy, *engineResponse, admissionRequestTimestamp)
// registering the kyverno_policy_rule_execution_latency_milliseconds metric concurrently
go registerPolicyRuleExecutionLatencyMetricValidate(promConfig, logger, string(request.Operation), policyContext.Policy, *engineResponse, admissionRequestTimestamp)
engineResponses = append(engineResponses, engineResponse)
triggeredPolicies = append(triggeredPolicies, *policy)
statusListener.Update(validateStats{
resp: engineResponse,
namespace: policy.Namespace,
@ -133,6 +145,9 @@ func HandleValidation(
eventGen.Add(events...)
if blocked {
logger.V(4).Info("resource blocked")
//registering the kyverno_admission_review_latency_milliseconds metric concurrently
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
go registerAdmissionReviewLatencyMetricValidate(promConfig, logger, string(request.Operation), engineResponses, triggeredPolicies, admissionReviewLatencyDuration)
return false, getEnforceFailureErrorMsg(engineResponses)
}
@ -144,9 +159,43 @@ func HandleValidation(
prInfos := policyreport.GeneratePRsFromEngineResponse(engineResponses, logger)
prGenerator.Add(prInfos...)
//registering the kyverno_admission_review_latency_milliseconds metric concurrently
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
go registerAdmissionReviewLatencyMetricValidate(promConfig, logger, string(request.Operation), engineResponses, triggeredPolicies, admissionReviewLatencyDuration)
return true, ""
}
func registerPolicyRuleResultsMetricValidation(promConfig *metrics.PromConfig, logger logr.Logger, requestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
resourceRequestOperationPromAlias, err := policyRuleResults.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
if err := policyRuleResults.ParsePromMetrics(*promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.AdmissionRequest, resourceRequestOperationPromAlias, admissionRequestTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_results_info metrics for the above policy", "name", policy.Name)
}
}
func registerPolicyRuleExecutionLatencyMetricValidate(promConfig *metrics.PromConfig, logger logr.Logger, requestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
resourceRequestOperationPromAlias, err := policyRuleExecutionLatency.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
if err := policyRuleExecutionLatency.ParsePromMetrics(*promConfig.Metrics).ProcessEngineResponse(policy, engineResponse, metrics.AdmissionRequest, "", resourceRequestOperationPromAlias, admissionRequestTimestamp); err != nil {
logger.Error(err, "error occurred while registering kyverno_policy_rule_execution_latency_milliseconds metrics for the above policy", "name", policy.Name)
}
}
func registerAdmissionReviewLatencyMetricValidate(promConfig *metrics.PromConfig, logger logr.Logger, requestOperation string, engineResponses []*response.EngineResponse, triggeredPolicies []kyverno.ClusterPolicy, admissionReviewLatencyDuration int64) {
resourceRequestOperationPromAlias, err := admissionReviewLatency.ParseResourceRequestOperation(requestOperation)
if err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
if err := admissionReviewLatency.ParsePromMetrics(*promConfig.Metrics).ProcessEngineResponses(engineResponses, triggeredPolicies, admissionReviewLatencyDuration, resourceRequestOperationPromAlias); err != nil {
logger.Error(err, "error occurred while registering kyverno_admission_review_latency_milliseconds metrics")
}
}
func buildDeletionPrInfo(oldR unstructured.Unstructured) policyreport.Info {
return policyreport.Info{
Namespace: oldR.GetNamespace(),

View file

@ -0,0 +1,22 @@
package metrics
import (
"github.com/kyverno/kyverno/test/e2e"
. "github.com/onsi/gomega"
"os"
"testing"
)
func Test_MetricsServerAvailability(t *testing.T) {
RegisterTestingT(t)
if os.Getenv("E2E") == "" {
t.Skip("Skipping E2E Test")
}
requestObj := e2e.APIRequest{
URL: "http://localhost:8000/metrics",
Type: "GET",
}
response, err := e2e.CallAPI(requestObj)
Expect(err).NotTo(HaveOccurred())
Expect(response.StatusCode).To(Equal(200))
}

View file

@ -2,6 +2,9 @@ package e2e
import (
"context"
"fmt"
"io"
"net/http"
"os"
"time"
@ -18,6 +21,12 @@ type E2EClient struct {
Client dynamic.Interface
}
type APIRequest struct {
URL string
Type string
Body io.Reader
}
// NewE2EClient returns a new instance of E2EClient
func NewE2EClient() (*E2EClient, error) {
kubeconfig := os.Getenv("KUBECONFIG")
@ -124,3 +133,29 @@ func (e2e *E2EClient) CreateClusteredResourceYaml(gvr schema.GroupVersionResourc
result, err := e2e.CreateClusteredResource(gvr, &resource)
return result, err
}
func CallAPI(request APIRequest) (*http.Response, error) {
var response *http.Response
switch request.Type {
case "GET":
resp, err := http.Get(request.URL)
if err != nil {
return nil, fmt.Errorf("error occurred while calling %s: %w", request.URL, err)
}
response = resp
case "POST", "PUT", "DELETE", "PATCH":
req, err := http.NewRequest(string(request.Type), request.URL, request.Body)
if err != nil {
return nil, fmt.Errorf("error occurred while calling %s: %w", request.URL, err)
}
resp, err := http.DefaultClient.Do(req)
if err != nil {
return nil, fmt.Errorf("error occurred while calling %s: %w", request.URL, err)
}
response = resp
default:
return nil, fmt.Errorf("error occurred while calling %s: wrong request type found", request.URL)
}
return response, nil
}