mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
Feature/cosign (#2078)
* add image verification * inline policy list Signed-off-by: Jim Bugwadia <jim@nirmata.com> * cosign version and dependencies updates Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add registry initialization Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add build tag to exclude k8schain for cloud providers Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add build tag to exclude k8schain for cloud providers Signed-off-by: Jim Bugwadia <jim@nirmata.com> * generate deep copy and other fixtures Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix deep copy issues Signed-off-by: Jim Bugwadia <jim@nirmata.com> * mutate images to add digest Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add certificates to Kyverno container for HTTPS lookups Signed-off-by: Jim Bugwadia <jim@nirmata.com> * align flag syntax Signed-off-by: Jim Bugwadia <jim@nirmata.com> * update docs Signed-off-by: Jim Bugwadia <jim@nirmata.com> * update dependencies Signed-off-by: Jim Bugwadia <jim@nirmata.com> * update dependencies Signed-off-by: Jim Bugwadia <jim@nirmata.com> * patch image with digest and fix checks Signed-off-by: Jim Bugwadia <jim@nirmata.com> * hardcode image for demos Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add default registry (docker.io) before calling reference.Parse Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix definition Signed-off-by: Jim Bugwadia <jim@nirmata.com> * increase webhook timeout Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix args Signed-off-by: Jim Bugwadia <jim@nirmata.com> * run gofmt Signed-off-by: Jim Bugwadia <jim@nirmata.com> * rename for clarity Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix HasImageVerify check Signed-off-by: Jim Bugwadia <jim@nirmata.com> * align make test commands Signed-off-by: Jim Bugwadia <jim@nirmata.com> * align make test commands Signed-off-by: Jim Bugwadia <jim@nirmata.com> * align make test commands Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix linter error Signed-off-by: Jim Bugwadia <jim@nirmata.com> * format Signed-off-by: Jim Bugwadia <jim@nirmata.com> * handle API conflict and retry Signed-off-by: Jim Bugwadia <jim@nirmata.com> * format Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix reviewdog issues Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix make for unit tests Signed-off-by: Jim Bugwadia <jim@nirmata.com> * improve error message Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix durations Signed-off-by: Jim Bugwadia <jim@nirmata.com> * handle errors in tests Signed-off-by: Jim Bugwadia <jim@nirmata.com> * print policy name Signed-off-by: Jim Bugwadia <jim@nirmata.com> * update tests Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add retries and duration to error log Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix time check in tests Signed-off-by: Jim Bugwadia <jim@nirmata.com> * round creation times in test Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix retry loop Signed-off-by: Jim Bugwadia <jim@nirmata.com> * remove timing check for policy creation Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix e2e error - policy not found Signed-off-by: Shuting Zhao <shutting06@gmail.com> * update string comparison method Signed-off-by: Shuting Zhao <shutting06@gmail.com> * fix test Generate_Namespace_Label_Actions Signed-off-by: Shuting Zhao <shutting06@gmail.com> * add debug info for e2e tests Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix error Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix generate bug Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix format Signed-off-by: Jim Bugwadia <jim@nirmata.com> * add check for update operations Signed-off-by: Jim Bugwadia <jim@nirmata.com> * increase time for deleteing a resource Signed-off-by: Jim Bugwadia <jim@nirmata.com> * fix check Signed-off-by: Jim Bugwadia <jim@nirmata.com> Co-authored-by: Shuting Zhao <shutting06@gmail.com>
This commit is contained in:
parent
20ac2a6556
commit
13caaed8b7
71 changed files with 6762 additions and 984 deletions
12
.github/workflows/build.yaml
vendored
12
.github/workflows/build.yaml
vendored
|
@ -20,7 +20,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -60,7 +60,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -93,7 +93,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -126,7 +126,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -159,7 +159,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -172,7 +172,7 @@ jobs:
|
|||
- name: Kyverno unit test
|
||||
run: |
|
||||
export PROJECT_PATH=$(pwd)
|
||||
make test-all
|
||||
make test-unit
|
||||
|
||||
helm-tests:
|
||||
runs-on: ubuntu-latest
|
||||
|
|
4
.github/workflows/e2e.yaml
vendored
4
.github/workflows/e2e.yaml
vendored
|
@ -28,7 +28,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v1
|
||||
|
@ -48,7 +48,7 @@ jobs:
|
|||
|
||||
- name: Test Policy
|
||||
run: |
|
||||
make run_testcmd_policy
|
||||
make test-cmd
|
||||
|
||||
- name: gofmt check
|
||||
run: |
|
||||
|
|
2
.github/workflows/fossa.yml
vendored
2
.github/workflows/fossa.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
|||
- uses: actions/checkout@v2
|
||||
- uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: "^1.14.x"
|
||||
go-version: "^1.16.x"
|
||||
- run: go version
|
||||
# Runs a set of commands to initialize and analyze with FOSSA
|
||||
- name: run FOSSA analysis
|
||||
|
|
6
.github/workflows/image.yaml
vendored
6
.github/workflows/image.yaml
vendored
|
@ -16,7 +16,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
run: echo ${{ secrets.CR_PAT }} | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin
|
||||
|
@ -43,7 +43,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
run: echo ${{ secrets.CR_PAT }} | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin
|
||||
|
@ -70,7 +70,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
run: echo ${{ secrets.CR_PAT }} | docker login ghcr.io -u ${{ github.repository_owner }} --password-stdin
|
||||
|
|
16
.github/workflows/release.yaml
vendored
16
.github/workflows/release.yaml
vendored
|
@ -15,7 +15,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -28,7 +28,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
- uses: creekorful/goreportcard-action@v1.0
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
|
@ -55,7 +55,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -68,7 +68,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
- uses: creekorful/goreportcard-action@v1.0
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
|
@ -95,7 +95,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -108,7 +108,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
- uses: creekorful/goreportcard-action@v1.0
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
|
@ -139,7 +139,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@v1
|
||||
|
@ -152,7 +152,7 @@ jobs:
|
|||
- name: Set up Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.14
|
||||
go-version: 1.16
|
||||
- uses: creekorful/goreportcard-action@v1.0
|
||||
|
||||
- name: login to GitHub Container Registry
|
||||
|
|
30
Makefile
30
Makefile
|
@ -7,6 +7,7 @@ GIT_VERSION := $(shell git describe --always --tags)
|
|||
GIT_BRANCH := $(shell git branch | grep \* | cut -d ' ' -f2)
|
||||
GIT_HASH := $(GIT_BRANCH)/$(shell git log -1 --pretty=format:"%H")
|
||||
TIMESTAMP := $(shell date '+%Y-%m-%d_%I:%M:%S%p')
|
||||
CONTROLLER_GEN=controller-gen
|
||||
CONTROLLER_GEN_REQ_VERSION := v0.4.0
|
||||
|
||||
REGISTRY?=ghcr.io
|
||||
|
@ -16,6 +17,10 @@ GOOS ?= $(shell go env GOOS)
|
|||
PACKAGE ?=github.com/kyverno/kyverno
|
||||
LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKAGE)/pkg/version.BuildHash=$(GIT_HASH) -X $(PACKAGE)/pkg/version.BuildTime=$(TIMESTAMP)"
|
||||
|
||||
# Used to disable inclusion of cloud provider code in k8schain
|
||||
# https://github.com/google/go-containerregistry/tree/main/pkg/authn/k8schain
|
||||
TAGS=disable_aws,disable_azure,disable_gcp
|
||||
|
||||
##################################
|
||||
# KYVERNO
|
||||
##################################
|
||||
|
@ -64,28 +69,28 @@ KYVERNO_PATH := cmd/kyverno
|
|||
KYVERNO_IMAGE := kyverno
|
||||
|
||||
local:
|
||||
go build -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)
|
||||
go build -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)
|
||||
go build -tags $(TAGS) -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)
|
||||
go build -tags $(TAGS) -ldflags=$(LD_FLAGS) $(PWD)/$(CLI_PATH)
|
||||
|
||||
kyverno: fmt vet
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
GOOS=$(GOOS) go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -tags $(TAGS) -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
|
||||
docker-publish-kyverno: docker-build-kyverno docker-push-kyverno
|
||||
|
||||
docker-build-kyverno:
|
||||
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TAGS=$(TAGS)
|
||||
|
||||
docker-build-local-kyverno:
|
||||
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
docker-build-kyverno-local:
|
||||
CGO_ENABLED=0 GOOS=linux go build -o $(PWD)/$(KYVERNO_PATH)/kyverno -tags $(TAGS) -ldflags=$(LD_FLAGS) $(PWD)/$(KYVERNO_PATH)/main.go
|
||||
@docker build -f $(PWD)/$(KYVERNO_PATH)/localDockerfile -t $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(PWD)/$(KYVERNO_PATH)
|
||||
@docker tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) $(REPO)/$(KYVERNO_IMAGE):latest
|
||||
|
||||
docker-build-kyverno-amd64:
|
||||
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TARGETPLATFORM="linux/amd64"
|
||||
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TARGETPLATFORM="linux/amd64" --build-arg TAGS=$(TAGS)
|
||||
|
||||
docker-push-kyverno:
|
||||
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS)
|
||||
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TAGS=$(TAGS)
|
||||
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TAGS=$(TAGS)
|
||||
|
||||
##################################
|
||||
|
||||
|
@ -156,9 +161,12 @@ $(GO_ACC):
|
|||
# go-acc merges the result for pks so that it be used by
|
||||
# go tool cover for reporting
|
||||
|
||||
test: test-unit test-e2e test-cmd
|
||||
|
||||
|
||||
# go get downloads and installs the binary
|
||||
# we temporarily add the GO_ACC to the path
|
||||
test-all: $(GO_ACC)
|
||||
test-unit: $(GO_ACC)
|
||||
@echo " running unit tests"
|
||||
go-acc ./... -o $(CODE_COVERAGE_FILE_TXT)
|
||||
|
||||
|
@ -177,7 +185,7 @@ test-e2e:
|
|||
$(eval export E2E="")
|
||||
|
||||
#Test TestCmd Policy
|
||||
run_testcmd_policy: cli
|
||||
test-cmd: cli
|
||||
$(PWD)/$(CLI_PATH)/kyverno test https://github.com/kyverno/policies/main
|
||||
$(PWD)/$(CLI_PATH)/kyverno test ./test/cli/test
|
||||
$(PWD)/$(CLI_PATH)/kyverno test ./test/cli/test-fail/missing-policy && exit 1 || exit 0
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Multi-stage docker build
|
||||
# Build stage
|
||||
FROM golang:1.14 AS builder
|
||||
FROM golang:1.16 AS builder
|
||||
|
||||
LABEL maintainer="Kyverno"
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
# Multi-stage docker build
|
||||
# Build stage
|
||||
FROM golang:1.14 AS builder
|
||||
FROM golang:1.16 AS builder
|
||||
|
||||
LABEL maintainer="Kyverno"
|
||||
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
# Multi-stage docker build
|
||||
# Build stage
|
||||
FROM golang:1.14 AS builder
|
||||
FROM golang:1.16 AS builder
|
||||
|
||||
LABEL maintainer="Kyverno"
|
||||
|
||||
# LD_FLAGS is passed as argument from Makefile. It will be empty, if no argument passed
|
||||
ARG LD_FLAGS
|
||||
ARG TARGETPLATFORM
|
||||
ARG TAGS
|
||||
|
||||
ADD . /kyverno
|
||||
WORKDIR /kyverno
|
||||
|
@ -16,15 +17,15 @@ RUN export GOOS=$(echo ${TARGETPLATFORM} | cut -d / -f1) && \
|
|||
|
||||
RUN go env
|
||||
|
||||
RUN CGO_ENABLED=0 go build -o /output/kyverno -ldflags="${LD_FLAGS}" -v ./cmd/kyverno/
|
||||
RUN CGO_ENABLED=0 go build -o /output/kyverno -tags "${TAGS}" -ldflags="${LD_FLAGS}" -v ./cmd/kyverno/
|
||||
|
||||
# Packaging stage
|
||||
FROM scratch
|
||||
|
||||
LABEL maintainer="Kyverno"
|
||||
|
||||
COPY --from=builder /output/kyverno /
|
||||
COPY --from=builder /etc/passwd /etc/passwd
|
||||
COPY --from=builder /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
|
||||
USER 10001
|
||||
|
||||
|
|
3293
cmd/kyverno/ca-certificates.crt
Normal file
3293
cmd/kyverno/ca-certificates.crt
Normal file
File diff suppressed because it is too large
Load diff
|
@ -1,7 +1,5 @@
|
|||
FROM scratch
|
||||
|
||||
ADD kyverno /kyverno
|
||||
|
||||
ADD ca-certificates.crt /etc/ssl/certs/
|
||||
USER 10001
|
||||
|
||||
ENTRYPOINT ["/kyverno"]
|
|
@ -4,9 +4,11 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"github.com/kyverno/kyverno/pkg/cosign"
|
||||
"net/http"
|
||||
_ "net/http/pprof"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
|
@ -46,21 +48,19 @@ const resyncPeriod = 15 * time.Minute
|
|||
var (
|
||||
//TODO: this has been added to backward support command line arguments
|
||||
// will be removed in future and the configuration will be set only via configmaps
|
||||
filterK8sResources string
|
||||
kubeconfig string
|
||||
serverIP string
|
||||
excludeGroupRole string
|
||||
excludeUsername string
|
||||
profilePort string
|
||||
metricsPort string
|
||||
|
||||
webhookTimeout int
|
||||
genWorkers int
|
||||
|
||||
profile bool
|
||||
disableMetricsExport bool
|
||||
|
||||
filterK8sResources string
|
||||
kubeconfig string
|
||||
serverIP string
|
||||
excludeGroupRole string
|
||||
excludeUsername string
|
||||
profilePort string
|
||||
metricsPort string
|
||||
webhookTimeout int
|
||||
genWorkers int
|
||||
profile bool
|
||||
disableMetricsExport bool
|
||||
policyControllerResyncPeriod time.Duration
|
||||
imagePullSecrets string
|
||||
setupLog = log.Log.WithName("setup")
|
||||
)
|
||||
|
||||
|
@ -75,10 +75,12 @@ func main() {
|
|||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
|
||||
flag.BoolVar(&profile, "profile", false, "Set this flag to 'true', to enable profiling.")
|
||||
flag.StringVar(&profilePort, "profile-port", "6060", "Enable profiling at given port, default to 6060.")
|
||||
flag.StringVar(&profilePort, "profile-port", "6060", "Enable profiling at given port, defaults to 6060.")
|
||||
flag.BoolVar(&disableMetricsExport, "disable-metrics", false, "Set this flag to 'true', to enable exposing the metrics.")
|
||||
flag.StringVar(&metricsPort, "metrics-port", "8000", "Expose prometheus metrics at the given port, default to 8000.")
|
||||
flag.DurationVar(&policyControllerResyncPeriod, "background-scan", time.Hour, "Perform background scan every given interval, e.g., 30s, 15m, 1h.")
|
||||
flag.StringVar(&imagePullSecrets, "imagePullSecrets", "", "Secret resource names for image registry access credentials")
|
||||
|
||||
if err := flag.Set("v", "2"); err != nil {
|
||||
setupLog.Error(err, "failed to set log level")
|
||||
os.Exit(1)
|
||||
|
@ -109,7 +111,6 @@ func main() {
|
|||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
||||
}
|
||||
|
||||
if !disableMetricsExport {
|
||||
|
@ -117,10 +118,10 @@ func main() {
|
|||
metricsServerMux = http.NewServeMux()
|
||||
metricsServerMux.Handle("/metrics", promhttp.HandlerFor(promConfig.MetricsRegistry, promhttp.HandlerOpts{Timeout: 10 * time.Second}))
|
||||
metricsAddr := ":" + metricsPort
|
||||
setupLog.Info("Enable exposure of metrics, see details at https://github.com/kyverno/kyverno/wiki/Metrics-Kyverno-on-Kubernetes", "port", metricsPort)
|
||||
go func() {
|
||||
setupLog.Info("enabling metrics service", "address", metricsAddr)
|
||||
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
|
||||
setupLog.Error(err, "Failed to enable exposure of metrics")
|
||||
setupLog.Error(err, "failed to enable metrics service", "address", metricsAddr)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
|
@ -164,6 +165,17 @@ func main() {
|
|||
rCache, err := resourcecache.NewResourceCache(client, kubedynamicInformer, log.Log.WithName("resourcecache"))
|
||||
if err != nil {
|
||||
setupLog.Error(err, "ConfigMap lookup disabled: failed to create resource cache")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// load image registry secrets
|
||||
secrets := strings.Split(imagePullSecrets, ",")
|
||||
if imagePullSecrets != "" && len(secrets) > 0 {
|
||||
setupLog.Info("initializing registry credentials", "secrets", secrets)
|
||||
if err := cosign.Initialize(kubeClient, config.KyvernoNamespace, "", secrets); err != nil {
|
||||
setupLog.Error(err, "failed to initialize image pull secrets")
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// KYVERNO CRD INFORMER
|
||||
|
|
|
@ -600,14 +600,12 @@ spec:
|
|||
maxLength: 63
|
||||
type: string
|
||||
preconditions:
|
||||
description: AnyAllConditions enable variable-based conditional
|
||||
rule execution. This is useful for finer control of when an
|
||||
rule is applied. A condition can reference object data using
|
||||
JMESPath notation. This too can be made to happen in a logical-manner
|
||||
where in some situation all the conditions need to pass and
|
||||
in some other situation, atleast one condition is enough to
|
||||
pass. For the sake of backwards compatibility, it can be populated
|
||||
with []kyverno.Condition.
|
||||
description: 'Preconditions are used to determine if a policy
|
||||
rule should be applied by evaluating a set of conditions.
|
||||
The declaration can contain nested `any` or `all` statements.
|
||||
A direct list of conditions (without `any` or `all` statements
|
||||
is supported for backwards compatibility but will be deprecated
|
||||
in the next major release. See: https://kyverno.io/docs/writing-policies/preconditions/'
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
validate:
|
||||
description: Validation is used to validate matching resources.
|
||||
|
@ -618,13 +616,15 @@ spec:
|
|||
validation rule to succeed.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
deny:
|
||||
description: Deny defines conditions to fail the validation
|
||||
rule.
|
||||
description: Deny defines conditions used to pass or fail
|
||||
a validation rule.
|
||||
properties:
|
||||
conditions:
|
||||
description: specifies the set of conditions to deny
|
||||
in a logical manner For the sake of backwards compatibility,
|
||||
it can be populated with []kyverno.Condition.
|
||||
description: 'Multiple conditions can be declared under
|
||||
an `any` or `all` statement. A direct list of conditions
|
||||
(without `any` or `all` statements) is also supported
|
||||
for backwards compatibility but will be deprecated
|
||||
in the next major release. See: https://kyverno.io/docs/writing-policies/validate/#deny-rules'
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: object
|
||||
message:
|
||||
|
@ -636,6 +636,26 @@ spec:
|
|||
used to check resources.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: object
|
||||
verifyImages:
|
||||
description: VerifyImages is used to verify image signatures
|
||||
and mutate them to add a digest
|
||||
items:
|
||||
description: ImageVerification validates that images that
|
||||
match the specified pattern are signed with the supplied
|
||||
public key. Once the image is verified it is mutated to
|
||||
include the SHA digest retrieved during the registration.
|
||||
properties:
|
||||
image:
|
||||
description: 'Image is the image name consisting of the
|
||||
registry address, repository, image, and tag. Wildcards
|
||||
(''*'' and ''?'') are allowed. See: https://kubernetes.io/docs/concepts/containers/images.'
|
||||
type: string
|
||||
key:
|
||||
description: Key is the PEM encoded public key that the
|
||||
image is signed with.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
validationFailureAction:
|
||||
|
|
|
@ -601,14 +601,12 @@ spec:
|
|||
maxLength: 63
|
||||
type: string
|
||||
preconditions:
|
||||
description: AnyAllConditions enable variable-based conditional
|
||||
rule execution. This is useful for finer control of when an
|
||||
rule is applied. A condition can reference object data using
|
||||
JMESPath notation. This too can be made to happen in a logical-manner
|
||||
where in some situation all the conditions need to pass and
|
||||
in some other situation, atleast one condition is enough to
|
||||
pass. For the sake of backwards compatibility, it can be populated
|
||||
with []kyverno.Condition.
|
||||
description: 'Preconditions are used to determine if a policy
|
||||
rule should be applied by evaluating a set of conditions.
|
||||
The declaration can contain nested `any` or `all` statements.
|
||||
A direct list of conditions (without `any` or `all` statements
|
||||
is supported for backwards compatibility but will be deprecated
|
||||
in the next major release. See: https://kyverno.io/docs/writing-policies/preconditions/'
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
validate:
|
||||
description: Validation is used to validate matching resources.
|
||||
|
@ -619,13 +617,15 @@ spec:
|
|||
validation rule to succeed.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
deny:
|
||||
description: Deny defines conditions to fail the validation
|
||||
rule.
|
||||
description: Deny defines conditions used to pass or fail
|
||||
a validation rule.
|
||||
properties:
|
||||
conditions:
|
||||
description: specifies the set of conditions to deny
|
||||
in a logical manner For the sake of backwards compatibility,
|
||||
it can be populated with []kyverno.Condition.
|
||||
description: 'Multiple conditions can be declared under
|
||||
an `any` or `all` statement. A direct list of conditions
|
||||
(without `any` or `all` statements) is also supported
|
||||
for backwards compatibility but will be deprecated
|
||||
in the next major release. See: https://kyverno.io/docs/writing-policies/validate/#deny-rules'
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: object
|
||||
message:
|
||||
|
@ -637,6 +637,26 @@ spec:
|
|||
used to check resources.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: object
|
||||
verifyImages:
|
||||
description: VerifyImages is used to verify image signatures
|
||||
and mutate them to add a digest
|
||||
items:
|
||||
description: ImageVerification validates that images that
|
||||
match the specified pattern are signed with the supplied
|
||||
public key. Once the image is verified it is mutated to
|
||||
include the SHA digest retrieved during the registration.
|
||||
properties:
|
||||
image:
|
||||
description: 'Image is the image name consisting of the
|
||||
registry address, repository, image, and tag. Wildcards
|
||||
(''*'' and ''?'') are allowed. See: https://kubernetes.io/docs/concepts/containers/images.'
|
||||
type: string
|
||||
key:
|
||||
description: Key is the PEM encoded public key that the
|
||||
image is signed with.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
validationFailureAction:
|
||||
|
|
|
@ -654,6 +654,19 @@ spec:
|
|||
used to check resources.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: object
|
||||
verifyImages:
|
||||
description: VerifyImages is used to verify image signatures and mutate them to add a digest
|
||||
items:
|
||||
description: ImageVerification validates that images that match the specified pattern are signed with the supplied public key. Once the image is verified it is mutated to include the SHA digest retrieved during the registration.
|
||||
properties:
|
||||
image:
|
||||
description: 'Image is the image name consisting of the registry address, repository, image, and tag. Wildcards (''*'' and ''?'') are allowed. See: https://kubernetes.io/docs/concepts/containers/images.'
|
||||
type: string
|
||||
key:
|
||||
description: Key is the PEM encoded public key that the image is signed with.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
validationFailureAction:
|
||||
|
@ -2269,6 +2282,19 @@ spec:
|
|||
used to check resources.
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: object
|
||||
verifyImages:
|
||||
description: VerifyImages is used to verify image signatures and mutate them to add a digest
|
||||
items:
|
||||
description: ImageVerification validates that images that match the specified pattern are signed with the supplied public key. Once the image is verified it is mutated to include the SHA digest retrieved during the registration.
|
||||
properties:
|
||||
image:
|
||||
description: 'Image is the image name consisting of the registry address, repository, image, and tag. Wildcards (''*'' and ''?'') are allowed. See: https://kubernetes.io/docs/concepts/containers/images.'
|
||||
type: string
|
||||
key:
|
||||
description: Key is the PEM encoded public key that the image is signed with.
|
||||
type: string
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
type: array
|
||||
validationFailureAction:
|
||||
|
@ -3559,6 +3585,7 @@ spec:
|
|||
- args:
|
||||
- --filterK8sResources=[Event,*,*][*,kube-system,*][*,kube-public,*][*,kube-node-lease,*][Node,*,*][APIService,*,*][TokenReview,*,*][SubjectAccessReview,*,*][*,kyverno,*][Binding,*,*][ReplicaSet,*,*][ReportChangeRequest,*,*][ClusterReportChangeRequest,*,*][PolicyReport,*,*][ClusterPolicyReport,*,*]
|
||||
- -v=2
|
||||
- --webhooktimeout=30
|
||||
env:
|
||||
- name: INIT_CONFIG
|
||||
value: init-config
|
||||
|
@ -3568,7 +3595,7 @@ spec:
|
|||
fieldPath: metadata.namespace
|
||||
- name: KYVERNO_SVC
|
||||
value: kyverno-svc
|
||||
image: ghcr.io/kyverno/kyverno:v1.4.1
|
||||
image: ghcr.io/kyverno/kyverno:cosign-demo-v1
|
||||
imagePullPolicy: IfNotPresent
|
||||
livenessProbe:
|
||||
failureThreshold: 2
|
||||
|
|
52
go.mod
52
go.mod
|
@ -1,67 +1,59 @@
|
|||
module github.com/kyverno/kyverno
|
||||
|
||||
go 1.14
|
||||
go 1.16
|
||||
|
||||
require (
|
||||
github.com/cenkalti/backoff v2.2.1+incompatible
|
||||
github.com/cornelk/hashmap v1.0.1
|
||||
github.com/dchest/siphash v1.2.1 // indirect
|
||||
github.com/distribution/distribution v2.7.1+incompatible
|
||||
github.com/docker/distribution v2.7.1+incompatible // indirect
|
||||
github.com/evanphx/json-patch/v5 v5.3.0
|
||||
github.com/evanphx/json-patch/v5 v5.2.0
|
||||
github.com/fatih/color v1.12.0
|
||||
github.com/gardener/controller-manager-library v0.2.0
|
||||
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
|
||||
github.com/go-git/go-billy/v5 v5.0.0
|
||||
github.com/go-git/go-git/v5 v5.2.0
|
||||
github.com/go-logr/logr v0.4.0
|
||||
github.com/google/go-containerregistry v0.5.1
|
||||
github.com/google/go-containerregistry/pkg/authn/k8schain v0.0.0-20210216200643-d81088d9983e
|
||||
github.com/googleapis/gnostic v0.5.4
|
||||
github.com/jmespath/go-jmespath v0.4.0
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0
|
||||
github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23
|
||||
github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a
|
||||
github.com/mattn/go-runewidth v0.0.7 // indirect
|
||||
github.com/minio/pkg v1.0.4
|
||||
github.com/mitchellh/mapstructure v1.3.2 // indirect
|
||||
github.com/onsi/ginkgo v1.14.1
|
||||
github.com/onsi/gomega v1.10.2
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
|
||||
github.com/minio/pkg v1.0.7
|
||||
github.com/onsi/ginkgo v1.15.0
|
||||
github.com/onsi/gomega v1.11.0
|
||||
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6
|
||||
github.com/ory/go-acc v0.2.6 // indirect
|
||||
github.com/patrickmn/go-cache v2.1.0+incompatible
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.8.0
|
||||
github.com/prometheus/procfs v0.6.0 // indirect
|
||||
github.com/spf13/cobra v1.1.1
|
||||
github.com/prometheus/client_golang v1.10.0
|
||||
github.com/sigstore/cosign v0.5.0
|
||||
github.com/sigstore/sigstore v0.0.0-20210530211317-99216b8b86a6
|
||||
github.com/spf13/cobra v1.1.3
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b // indirect
|
||||
golang.org/x/net v0.0.0-20210421230115-4e50805a0758 // indirect
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007 // indirect
|
||||
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b
|
||||
gotest.tools v2.2.0+incompatible
|
||||
k8s.io/api v0.21.0
|
||||
k8s.io/apiextensions-apiserver v0.21.0
|
||||
k8s.io/apimachinery v0.21.0
|
||||
k8s.io/cli-runtime v0.20.2
|
||||
k8s.io/client-go v0.21.0
|
||||
k8s.io/klog/v2 v2.8.0
|
||||
k8s.io/api v0.21.1
|
||||
k8s.io/apiextensions-apiserver v0.21.1
|
||||
k8s.io/apimachinery v0.21.1
|
||||
k8s.io/cli-runtime v0.21.1
|
||||
k8s.io/client-go v0.21.1
|
||||
k8s.io/klog/v2 v2.9.0
|
||||
k8s.io/kube-openapi v0.0.0-20210305001622-591a79e4bda7
|
||||
sigs.k8s.io/controller-runtime v0.8.1
|
||||
sigs.k8s.io/kustomize/api v0.7.0
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.3
|
||||
sigs.k8s.io/kustomize/api v0.8.8
|
||||
sigs.k8s.io/kustomize/kyaml v0.10.17
|
||||
sigs.k8s.io/yaml v1.2.0
|
||||
)
|
||||
|
||||
// Added for go1.13 migration https://github.com/golang/go/issues/32805
|
||||
replace (
|
||||
github.com/evanphx/json-patch/v5 => github.com/kacejot/json-patch/v5 v5.3.1-0.20210513152033-7395b4a9e87f
|
||||
github.com/gorilla/rpc v1.2.0+incompatible => github.com/gorilla/rpc v1.2.0
|
||||
github.com/jmespath/go-jmespath => github.com/kyverno/go-jmespath v0.4.1-0.20210511164400-a1d46efa2ed6
|
||||
k8s.io/code-generator => k8s.io/code-generator v0.0.0-20200306081859-6a048a382944
|
||||
k8s.io/component-base => k8s.io/component-base v0.0.0-20190612130303-4062e14deebe
|
||||
)
|
||||
|
|
|
@ -59,7 +59,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
By(fmt.Sprintf("Deleting Namespace : %s", nspace))
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace) // Clear Namespace
|
||||
e2eClient.DeleteNamespacedResource(dcsmPolGVR, nspace, resource.testResourceName)
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error { // Wait Till Deletion of Namespace
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error { // Wait Till Deletion of Namespace
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
@ -71,7 +71,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
By(fmt.Sprintf("Creating Namespace %s", saGVR))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(nsGVR, LitmusChaosnamespaceYaml)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error { // Wait Till Creation of Namespace
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error { // Wait Till Creation of Namespace
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, resource.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -101,7 +101,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
|
||||
By(fmt.Sprintf("\nMonitoring status from ChaosResult in %s", nspace))
|
||||
|
||||
e2e.GetWithRetry(time.Duration(30), 5, func() error { // Wait Till preparing Chaos engine
|
||||
e2e.GetWithRetry(1*time.Second, 120, func() error { // Wait Till preparing Chaos engine
|
||||
chaosresult, err := e2eClient.GetNamespacedResource(crGVR, nspace, "kind-chaos-pod-cpu-hog")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to fatch ChaosResult: %v", err)
|
||||
|
@ -132,8 +132,8 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
//CleanUp Resources
|
||||
e2eClient.CleanClusterPolicies(clPolGVR) //Clean Cluster Policy
|
||||
e2eClient.CleanClusterPolicies(saGVR)
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace) // Clear Namespace
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error { // Wait Till Deletion of Namespace
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace) // Clear Namespace
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error { // Wait Till Deletion of Namespace
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
|
|
@ -80,12 +80,11 @@ type Rule struct {
|
|||
// +optional
|
||||
ExcludeResources ExcludeResources `json:"exclude,omitempty" yaml:"exclude,omitempty"`
|
||||
|
||||
// AnyAllConditions enable variable-based conditional rule execution. This is useful for
|
||||
// finer control of when an rule is applied. A condition can reference object data
|
||||
// using JMESPath notation.
|
||||
// This too can be made to happen in a logical-manner where in some situation all the conditions need to pass
|
||||
// and in some other situation, atleast one condition is enough to pass.
|
||||
// For the sake of backwards compatibility, it can be populated with []kyverno.Condition.
|
||||
// Preconditions are used to determine if a policy rule should be applied by evaluating a
|
||||
// set of conditions. The declaration can contain nested `any` or `all` statements. A direct list
|
||||
// of conditions (without `any` or `all` statements is supported for backwards compatibility but
|
||||
// will be deprecated in the next major release.
|
||||
// See: https://kyverno.io/docs/writing-policies/preconditions/
|
||||
// +kubebuilder:validation:XPreserveUnknownFields
|
||||
// +optional
|
||||
AnyAllConditions apiextensions.JSON `json:"preconditions,omitempty" yaml:"preconditions,omitempty"`
|
||||
|
@ -101,6 +100,10 @@ type Rule struct {
|
|||
// Generation is used to create new resources.
|
||||
// +optional
|
||||
Generation Generation `json:"generate,omitempty" yaml:"generate,omitempty"`
|
||||
|
||||
// VerifyImages is used to verify image signatures and mutate them to add a digest
|
||||
// +optional
|
||||
VerifyImages []*ImageVerification `json:"verifyImages,omitempty" yaml:"verifyImages,omitempty"`
|
||||
}
|
||||
|
||||
// AnyAllCondition consists of conditions wrapped denoting a logical criteria to be fulfilled.
|
||||
|
@ -110,7 +113,7 @@ type AnyAllConditions struct {
|
|||
// AnyConditions enable variable-based conditional rule execution. This is useful for
|
||||
// finer control of when an rule is applied. A condition can reference object data
|
||||
// using JMESPath notation.
|
||||
// Here, atleast one of the conditions need to pass
|
||||
// Here, at least one of the conditions need to pass
|
||||
// +optional
|
||||
AnyConditions []Condition `json:"any,omitempty" yaml:"any,omitempty"`
|
||||
|
||||
|
@ -363,20 +366,34 @@ type Validation struct {
|
|||
// +optional
|
||||
AnyPattern apiextensions.JSON `json:"anyPattern,omitempty" yaml:"anyPattern,omitempty"`
|
||||
|
||||
// Deny defines conditions to fail the validation rule.
|
||||
// Deny defines conditions used to pass or fail a validation rule.
|
||||
// +optional
|
||||
Deny *Deny `json:"deny,omitempty" yaml:"deny,omitempty"`
|
||||
}
|
||||
|
||||
// Deny specifies a list of conditions. The validation rule fails, if any Condition
|
||||
// evaluates to "false".
|
||||
// Deny specifies a list of conditions used to pass or fail a validation rule.
|
||||
type Deny struct {
|
||||
// specifies the set of conditions to deny in a logical manner
|
||||
// For the sake of backwards compatibility, it can be populated with []kyverno.Condition.
|
||||
// Multiple conditions can be declared under an `any` or `all` statement. A direct list
|
||||
// of conditions (without `any` or `all` statements) is also supported for backwards compatibility
|
||||
// but will be deprecated in the next major release.
|
||||
// See: https://kyverno.io/docs/writing-policies/validate/#deny-rules
|
||||
// +kubebuilder:validation:XPreserveUnknownFields
|
||||
AnyAllConditions apiextensions.JSON `json:"conditions,omitempty" yaml:"conditions,omitempty"`
|
||||
}
|
||||
|
||||
// ImageVerification validates that images that match the specified pattern
|
||||
// are signed with the supplied public key. Once the image is verified it is
|
||||
// mutated to include the SHA digest retrieved during the registration.
|
||||
type ImageVerification struct {
|
||||
|
||||
// Image is the image name consisting of the registry address, repository, image, and tag.
|
||||
// Wildcards ('*' and '?') are allowed. See: https://kubernetes.io/docs/concepts/containers/images.
|
||||
Image string `json:"image,omitempty" yaml:"image,omitempty"`
|
||||
|
||||
// Key is the PEM encoded public key that the image is signed with.
|
||||
Key string `json:"key,omitempty" yaml:"key,omitempty"`
|
||||
}
|
||||
|
||||
// Generation defines how new resources should be created and managed.
|
||||
type Generation struct {
|
||||
|
||||
|
|
|
@ -27,6 +27,28 @@ func (p *ClusterPolicy) HasMutateOrValidateOrGenerate() bool {
|
|||
return false
|
||||
}
|
||||
|
||||
//HasMutate checks for mutate rule types
|
||||
func (p *ClusterPolicy) HasMutate() bool {
|
||||
for _, rule := range p.Spec.Rules {
|
||||
if rule.HasMutate() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
//HasVerifyImages checks for image verification rule types
|
||||
func (p *ClusterPolicy) HasVerifyImages() bool {
|
||||
for _, rule := range p.Spec.Rules {
|
||||
if rule.HasVerifyImages() {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// BackgroundProcessingEnabled checks if background is set to true
|
||||
func (p *ClusterPolicy) BackgroundProcessingEnabled() bool {
|
||||
if p.Spec.Background == nil {
|
||||
|
@ -41,6 +63,11 @@ func (r Rule) HasMutate() bool {
|
|||
return !reflect.DeepEqual(r.Mutation, Mutation{})
|
||||
}
|
||||
|
||||
// HasVerifyImages checks for verifyImages rule
|
||||
func (r Rule) HasVerifyImages() bool {
|
||||
return r.VerifyImages != nil && !reflect.DeepEqual(r.VerifyImages, ImageVerification{})
|
||||
}
|
||||
|
||||
// HasValidate checks for validate rule
|
||||
func (r Rule) HasValidate() bool {
|
||||
return !reflect.DeepEqual(r.Validation, Validation{})
|
||||
|
@ -78,37 +105,68 @@ func (in *Mutation) DeepCopyInto(out *Mutation) {
|
|||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is declared because k8s:deepcopy-gen is
|
||||
// not able to generate this method for interface{} member
|
||||
// TODO - the DeepCopyInto methods are added here to work-around
|
||||
// codegen issues with handling DeepCopy of the apiextensions.JSON
|
||||
// type. We need to update to apiextensions/v1.JSON which works
|
||||
// with DeepCopy and remove these methods, or re-write them to
|
||||
// actually perform a deep copy.
|
||||
// Also see: https://github.com/kyverno/kyverno/pull/2000
|
||||
|
||||
func (pp *Patch) DeepCopyInto(out *Patch) {
|
||||
if out != nil {
|
||||
*out = *pp
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is declared because k8s:deepcopy-gen is
|
||||
// not able to generate this method for interface{} member
|
||||
func (in *Validation) DeepCopyInto(out *Validation) {
|
||||
if out != nil {
|
||||
*out = *in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is declared because k8s:deepcopy-gen is
|
||||
// not able to generate this method for interface{} member
|
||||
func (gen *Generation) DeepCopyInto(out *Generation) {
|
||||
if out != nil {
|
||||
*out = *gen
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopyInto is declared because k8s:deepcopy-gen is
|
||||
// not able to generate this method for interface{} member
|
||||
func (cond *Condition) DeepCopyInto(out *Condition) {
|
||||
if out != nil {
|
||||
*out = *cond
|
||||
}
|
||||
}
|
||||
func (in *Deny) DeepCopyInto(out *Deny) {
|
||||
*out = *in
|
||||
if in.AnyAllConditions != nil {
|
||||
out.AnyAllConditions = in.AnyAllConditions
|
||||
}
|
||||
}
|
||||
func (in *Rule) DeepCopyInto(out *Rule) {
|
||||
*out = *in
|
||||
if in.Context != nil {
|
||||
in, out := &in.Context, &out.Context
|
||||
*out = make([]ContextEntry, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.MatchResources.DeepCopyInto(&out.MatchResources)
|
||||
in.ExcludeResources.DeepCopyInto(&out.ExcludeResources)
|
||||
if in.AnyAllConditions != nil {
|
||||
out.AnyAllConditions = in.AnyAllConditions
|
||||
}
|
||||
in.Mutation.DeepCopyInto(&out.Mutation)
|
||||
in.Validation.DeepCopyInto(&out.Validation)
|
||||
in.Generation.DeepCopyInto(&out.Generation)
|
||||
if in.VerifyImages != nil {
|
||||
in, out := &in.VerifyImages, &out.VerifyImages
|
||||
*out = make([]*ImageVerification, len(*in))
|
||||
for i := range *in {
|
||||
if (*in)[i] != nil {
|
||||
in, out := &(*in)[i], &(*out)[i]
|
||||
*out = new(ImageVerification)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
//ToKey generates the key string used for adding label to polivy violation
|
||||
func (rs ResourceSpec) ToKey() string {
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
@ -14,19 +16,21 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *APICall) DeepCopyInto(out *APICall) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new APICall.
|
||||
|
@ -56,6 +60,7 @@ func (in *AnyAllConditions) DeepCopyInto(out *AnyAllConditions) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AnyAllConditions.
|
||||
|
@ -68,9 +73,18 @@ func (in *AnyAllConditions) DeepCopy() *AnyAllConditions {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyJSON is an autogenerated deepcopy function, copying the receiver, creating a new apiextensions.JSON.
|
||||
func (in *AnyAllConditions) DeepCopyJSON() apiextensions.JSON {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CloneFrom) DeepCopyInto(out *CloneFrom) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CloneFrom.
|
||||
|
@ -90,6 +104,7 @@ func (in *ClusterPolicy) DeepCopyInto(out *ClusterPolicy) {
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicy.
|
||||
|
@ -122,6 +137,7 @@ func (in *ClusterPolicyList) DeepCopyInto(out *ClusterPolicyList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyList.
|
||||
|
@ -155,6 +171,7 @@ func (in *Condition) DeepCopy() *Condition {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ConfigMapReference) DeepCopyInto(out *ConfigMapReference) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapReference.
|
||||
|
@ -180,6 +197,7 @@ func (in *ContextEntry) DeepCopyInto(out *ContextEntry) {
|
|||
*out = new(APICall)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContextEntry.
|
||||
|
@ -192,11 +210,6 @@ func (in *ContextEntry) DeepCopy() *ContextEntry {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Deny) DeepCopyInto(out *Deny) {
|
||||
*out = *in
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Deny.
|
||||
func (in *Deny) DeepCopy() *Deny {
|
||||
if in == nil {
|
||||
|
@ -207,11 +220,20 @@ func (in *Deny) DeepCopy() *Deny {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyJSON is an autogenerated deepcopy function, copying the receiver, creating a new apiextensions.JSON.
|
||||
func (in *Deny) DeepCopyJSON() apiextensions.JSON {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ExcludeResources) DeepCopyInto(out *ExcludeResources) {
|
||||
*out = *in
|
||||
in.UserInfo.DeepCopyInto(&out.UserInfo)
|
||||
in.ResourceDescription.DeepCopyInto(&out.ResourceDescription)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ExcludeResources.
|
||||
|
@ -231,6 +253,7 @@ func (in *GenerateRequest) DeepCopyInto(out *GenerateRequest) {
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequest.
|
||||
|
@ -255,6 +278,7 @@ func (in *GenerateRequest) DeepCopyObject() runtime.Object {
|
|||
func (in *GenerateRequestContext) DeepCopyInto(out *GenerateRequestContext) {
|
||||
*out = *in
|
||||
in.UserRequestInfo.DeepCopyInto(&out.UserRequestInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestContext.
|
||||
|
@ -279,6 +303,7 @@ func (in *GenerateRequestList) DeepCopyInto(out *GenerateRequestList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestList.
|
||||
|
@ -304,6 +329,7 @@ func (in *GenerateRequestSpec) DeepCopyInto(out *GenerateRequestSpec) {
|
|||
*out = *in
|
||||
out.Resource = in.Resource
|
||||
in.Context.DeepCopyInto(&out.Context)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestSpec.
|
||||
|
@ -324,6 +350,7 @@ func (in *GenerateRequestStatus) DeepCopyInto(out *GenerateRequestStatus) {
|
|||
*out = make([]ResourceSpec, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new GenerateRequestStatus.
|
||||
|
@ -346,11 +373,28 @@ func (in *Generation) DeepCopy() *Generation {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ImageVerification) DeepCopyInto(out *ImageVerification) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageVerification.
|
||||
func (in *ImageVerification) DeepCopy() *ImageVerification {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ImageVerification)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MatchResources) DeepCopyInto(out *MatchResources) {
|
||||
*out = *in
|
||||
in.UserInfo.DeepCopyInto(&out.UserInfo)
|
||||
in.ResourceDescription.DeepCopyInto(&out.ResourceDescription)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MatchResources.
|
||||
|
@ -380,6 +424,7 @@ func (in *Policy) DeepCopyInto(out *Policy) {
|
|||
in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
|
||||
in.Spec.DeepCopyInto(&out.Spec)
|
||||
in.Status.DeepCopyInto(&out.Status)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Policy.
|
||||
|
@ -412,6 +457,7 @@ func (in *PolicyList) DeepCopyInto(out *PolicyList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyList.
|
||||
|
@ -440,6 +486,7 @@ func (in *PolicyStatus) DeepCopyInto(out *PolicyStatus) {
|
|||
*out = make([]RuleStats, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyStatus.
|
||||
|
@ -466,6 +513,7 @@ func (in *RequestInfo) DeepCopyInto(out *RequestInfo) {
|
|||
copy(*out, *in)
|
||||
}
|
||||
in.AdmissionUserInfo.DeepCopyInto(&out.AdmissionUserInfo)
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RequestInfo.
|
||||
|
@ -508,6 +556,7 @@ func (in *ResourceDescription) DeepCopyInto(out *ResourceDescription) {
|
|||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDescription.
|
||||
|
@ -523,6 +572,7 @@ func (in *ResourceDescription) DeepCopy() *ResourceDescription {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ResourceSpec) DeepCopyInto(out *ResourceSpec) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceSpec.
|
||||
|
@ -535,20 +585,6 @@ func (in *ResourceSpec) DeepCopy() *ResourceSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Rule) DeepCopyInto(out *Rule) {
|
||||
*out = *in
|
||||
if in.Context != nil {
|
||||
in, out := &in.Context, &out.Context
|
||||
*out = make([]ContextEntry, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
in.MatchResources.DeepCopyInto(&out.MatchResources)
|
||||
in.ExcludeResources.DeepCopyInto(&out.ExcludeResources)
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Rule.
|
||||
func (in *Rule) DeepCopy() *Rule {
|
||||
if in == nil {
|
||||
|
@ -562,6 +598,7 @@ func (in *Rule) DeepCopy() *Rule {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *RuleStats) DeepCopyInto(out *RuleStats) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new RuleStats.
|
||||
|
@ -589,6 +626,7 @@ func (in *Spec) DeepCopyInto(out *Spec) {
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Spec.
|
||||
|
@ -619,6 +657,7 @@ func (in *UserInfo) DeepCopyInto(out *UserInfo) {
|
|||
*out = make([]rbacv1.Subject, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new UserInfo.
|
||||
|
@ -644,6 +683,7 @@ func (in *Validation) DeepCopy() *Validation {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ViolatedRule) DeepCopyInto(out *ViolatedRule) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ViolatedRule.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
@ -14,13 +16,13 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
policyreportv1alpha1 "github.com/kyverno/kyverno/pkg/api/policyreport/v1alpha1"
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -52,6 +54,7 @@ func (in *ClusterReportChangeRequest) DeepCopyInto(out *ClusterReportChangeReque
|
|||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReportChangeRequest.
|
||||
|
@ -84,6 +87,7 @@ func (in *ClusterReportChangeRequestList) DeepCopyInto(out *ClusterReportChangeR
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterReportChangeRequestList.
|
||||
|
@ -131,6 +135,7 @@ func (in *ReportChangeRequest) DeepCopyInto(out *ReportChangeRequest) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportChangeRequest.
|
||||
|
@ -163,6 +168,7 @@ func (in *ReportChangeRequestList) DeepCopyInto(out *ReportChangeRequestList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ReportChangeRequestList.
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
@ -14,12 +16,12 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by controller-gen. DO NOT EDIT.
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
@ -51,6 +53,7 @@ func (in *ClusterPolicyReport) DeepCopyInto(out *ClusterPolicyReport) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyReport.
|
||||
|
@ -83,6 +86,7 @@ func (in *ClusterPolicyReportList) DeepCopyInto(out *ClusterPolicyReportList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ClusterPolicyReportList.
|
||||
|
@ -130,6 +134,7 @@ func (in *PolicyReport) DeepCopyInto(out *PolicyReport) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyReport.
|
||||
|
@ -162,6 +167,7 @@ func (in *PolicyReportList) DeepCopyInto(out *PolicyReportList) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyReportList.
|
||||
|
@ -208,6 +214,7 @@ func (in *PolicyReportResult) DeepCopyInto(out *PolicyReportResult) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyReportResult.
|
||||
|
@ -223,6 +230,7 @@ func (in *PolicyReportResult) DeepCopy() *PolicyReportResult {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PolicyReportSummary) DeepCopyInto(out *PolicyReportSummary) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PolicyReportSummary.
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
var scheme = runtime.NewScheme()
|
||||
var codecs = serializer.NewCodecFactory(scheme)
|
||||
var parameterCodec = runtime.NewParameterCodec(scheme)
|
||||
|
||||
var localSchemeBuilder = runtime.SchemeBuilder{
|
||||
kyvernov1.AddToScheme,
|
||||
kyvernov1alpha1.AddToScheme,
|
||||
|
|
|
@ -26,10 +26,13 @@ import (
|
|||
)
|
||||
|
||||
// ClusterPolicyLister helps list ClusterPolicies.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type ClusterPolicyLister interface {
|
||||
// List lists all ClusterPolicies in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.ClusterPolicy, err error)
|
||||
// Get retrieves the ClusterPolicy from the index for a given name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1.ClusterPolicy, error)
|
||||
ClusterPolicyListerExpansion
|
||||
}
|
||||
|
|
8
pkg/client/listers/kyverno/v1/expansion_custom.go
Normal file
8
pkg/client/listers/kyverno/v1/expansion_custom.go
Normal file
|
@ -0,0 +1,8 @@
|
|||
package v1
|
||||
|
||||
import kyvernov1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
|
||||
type GenerateRequestNamespaceListerExpansion interface {
|
||||
GetGenerateRequestsForClusterPolicy(policy string) ([]*kyvernov1.GenerateRequest, error)
|
||||
GetGenerateRequestsForResource(kind, namespace, name string) ([]*kyvernov1.GenerateRequest, error)
|
||||
}
|
|
@ -18,28 +18,14 @@ limitations under the License.
|
|||
|
||||
package v1
|
||||
|
||||
import (
|
||||
kyvernov1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
// ClusterPolicyListerExpansion allows custom methods to be added to
|
||||
// ClusterPolicyLister.
|
||||
type ClusterPolicyListerExpansion interface {
|
||||
ListResources(selector labels.Selector) (ret []*kyvernov1.ClusterPolicy, err error)
|
||||
}
|
||||
type ClusterPolicyListerExpansion interface{}
|
||||
|
||||
// GenerateRequestListerExpansion allows custom methods to be added to
|
||||
// GenerateRequestLister.
|
||||
type GenerateRequestListerExpansion interface{}
|
||||
|
||||
// GenerateRequestNamespaceListerExpansion allows custom methods to be added to
|
||||
// GenerateRequestNamespaceLister.
|
||||
type GenerateRequestNamespaceListerExpansion interface {
|
||||
GetGenerateRequestsForClusterPolicy(policy string) ([]*kyvernov1.GenerateRequest, error)
|
||||
GetGenerateRequestsForResource(kind, namespace, name string) ([]*kyvernov1.GenerateRequest, error)
|
||||
}
|
||||
|
||||
// PolicyListerExpansion allows custom methods to be added to
|
||||
// PolicyLister.
|
||||
type PolicyListerExpansion interface{}
|
||||
|
|
|
@ -26,8 +26,10 @@ import (
|
|||
)
|
||||
|
||||
// GenerateRequestLister helps list GenerateRequests.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type GenerateRequestLister interface {
|
||||
// List lists all GenerateRequests in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.GenerateRequest, err error)
|
||||
// GenerateRequests returns an object that can list and get GenerateRequests.
|
||||
GenerateRequests(namespace string) GenerateRequestNamespaceLister
|
||||
|
@ -58,10 +60,13 @@ func (s *generateRequestLister) GenerateRequests(namespace string) GenerateReque
|
|||
}
|
||||
|
||||
// GenerateRequestNamespaceLister helps list and get GenerateRequests.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type GenerateRequestNamespaceLister interface {
|
||||
// List lists all GenerateRequests in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.GenerateRequest, err error)
|
||||
// Get retrieves the GenerateRequest from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1.GenerateRequest, error)
|
||||
GenerateRequestNamespaceListerExpansion
|
||||
}
|
||||
|
|
|
@ -26,8 +26,10 @@ import (
|
|||
)
|
||||
|
||||
// PolicyLister helps list Policies.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type PolicyLister interface {
|
||||
// List lists all Policies in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.Policy, err error)
|
||||
// Policies returns an object that can list and get Policies.
|
||||
Policies(namespace string) PolicyNamespaceLister
|
||||
|
@ -58,10 +60,13 @@ func (s *policyLister) Policies(namespace string) PolicyNamespaceLister {
|
|||
}
|
||||
|
||||
// PolicyNamespaceLister helps list and get Policies.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type PolicyNamespaceLister interface {
|
||||
// List lists all Policies in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1.Policy, err error)
|
||||
// Get retrieves the Policy from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1.Policy, error)
|
||||
PolicyNamespaceListerExpansion
|
||||
}
|
||||
|
|
|
@ -26,10 +26,13 @@ import (
|
|||
)
|
||||
|
||||
// ClusterReportChangeRequestLister helps list ClusterReportChangeRequests.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type ClusterReportChangeRequestLister interface {
|
||||
// List lists all ClusterReportChangeRequests in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.ClusterReportChangeRequest, err error)
|
||||
// Get retrieves the ClusterReportChangeRequest from the index for a given name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1alpha1.ClusterReportChangeRequest, error)
|
||||
ClusterReportChangeRequestListerExpansion
|
||||
}
|
||||
|
|
|
@ -26,8 +26,10 @@ import (
|
|||
)
|
||||
|
||||
// ReportChangeRequestLister helps list ReportChangeRequests.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type ReportChangeRequestLister interface {
|
||||
// List lists all ReportChangeRequests in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.ReportChangeRequest, err error)
|
||||
// ReportChangeRequests returns an object that can list and get ReportChangeRequests.
|
||||
ReportChangeRequests(namespace string) ReportChangeRequestNamespaceLister
|
||||
|
@ -58,10 +60,13 @@ func (s *reportChangeRequestLister) ReportChangeRequests(namespace string) Repor
|
|||
}
|
||||
|
||||
// ReportChangeRequestNamespaceLister helps list and get ReportChangeRequests.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type ReportChangeRequestNamespaceLister interface {
|
||||
// List lists all ReportChangeRequests in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.ReportChangeRequest, err error)
|
||||
// Get retrieves the ReportChangeRequest from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1alpha1.ReportChangeRequest, error)
|
||||
ReportChangeRequestNamespaceListerExpansion
|
||||
}
|
||||
|
|
|
@ -26,10 +26,13 @@ import (
|
|||
)
|
||||
|
||||
// ClusterPolicyReportLister helps list ClusterPolicyReports.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type ClusterPolicyReportLister interface {
|
||||
// List lists all ClusterPolicyReports in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.ClusterPolicyReport, err error)
|
||||
// Get retrieves the ClusterPolicyReport from the index for a given name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1alpha1.ClusterPolicyReport, error)
|
||||
ClusterPolicyReportListerExpansion
|
||||
}
|
||||
|
|
|
@ -26,8 +26,10 @@ import (
|
|||
)
|
||||
|
||||
// PolicyReportLister helps list PolicyReports.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type PolicyReportLister interface {
|
||||
// List lists all PolicyReports in the indexer.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.PolicyReport, err error)
|
||||
// PolicyReports returns an object that can list and get PolicyReports.
|
||||
PolicyReports(namespace string) PolicyReportNamespaceLister
|
||||
|
@ -58,10 +60,13 @@ func (s *policyReportLister) PolicyReports(namespace string) PolicyReportNamespa
|
|||
}
|
||||
|
||||
// PolicyReportNamespaceLister helps list and get PolicyReports.
|
||||
// All objects returned here must be treated as read-only.
|
||||
type PolicyReportNamespaceLister interface {
|
||||
// List lists all PolicyReports in the indexer for a given namespace.
|
||||
// Objects returned here must be treated as read-only.
|
||||
List(selector labels.Selector) (ret []*v1alpha1.PolicyReport, err error)
|
||||
// Get retrieves the PolicyReport from the indexer for a given namespace and name.
|
||||
// Objects returned here must be treated as read-only.
|
||||
Get(name string) (*v1alpha1.PolicyReport, error)
|
||||
PolicyReportNamespaceListerExpansion
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package common
|
|||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -142,43 +143,61 @@ func RetryFunc(retryInterval, timeout time.Duration, run func() error, logger lo
|
|||
func ProcessDeletePolicyForCloneGenerateRule(rules []kyverno.Rule, client *dclient.Client, pName string, logger logr.Logger) bool {
|
||||
generatePolicyWithClone := false
|
||||
for _, rule := range rules {
|
||||
if rule.Generation.Clone.Name != "" {
|
||||
logger.V(4).Info("generate policy with clone, skipping deletion of generate request")
|
||||
generatePolicyWithClone = true
|
||||
obj, err := client.GetResource("", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name)
|
||||
if rule.Generation.Clone.Name == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
logger.V(4).Info("generate policy with clone, remove policy name from label of source resource")
|
||||
generatePolicyWithClone = true
|
||||
|
||||
retryCount := 0
|
||||
for retryCount < 5 {
|
||||
err := updateSourceResource(pName, rule, client, logger)
|
||||
if err != nil {
|
||||
logger.Error(err, fmt.Sprintf("source resource %s/%s/%s not found.", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name))
|
||||
continue
|
||||
}
|
||||
|
||||
updateSource := true
|
||||
label := obj.GetLabels()
|
||||
logger.V(4).Info("removing policy name from label of source resource")
|
||||
|
||||
if len(label) != 0 {
|
||||
if label["generate.kyverno.io/clone-policy-name"] != "" {
|
||||
policyNames := label["generate.kyverno.io/clone-policy-name"]
|
||||
if strings.Contains(policyNames, pName) {
|
||||
updatedPolicyNames := strings.Replace(policyNames, pName, "", -1)
|
||||
label["generate.kyverno.io/clone-policy-name"] = updatedPolicyNames
|
||||
} else {
|
||||
updateSource = false
|
||||
}
|
||||
logger.Error(err, "failed to update generate source resource labels")
|
||||
if apierrors.IsConflict(err) {
|
||||
retryCount++
|
||||
} else {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if updateSource {
|
||||
logger.V(4).Info("updating existing clone source")
|
||||
obj.SetLabels(label)
|
||||
_, err = client.UpdateResource(obj.GetAPIVersion(), rule.Generation.Kind, rule.Generation.Clone.Namespace, obj, false)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to update source", "kind", obj.GetKind(), "name", obj.GetName(), "namespace", obj.GetNamespace())
|
||||
continue
|
||||
}
|
||||
logger.V(4).Info("updated source", "kind", obj.GetKind(), "name", obj.GetName(), "namespace", obj.GetNamespace())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return generatePolicyWithClone
|
||||
}
|
||||
|
||||
func updateSourceResource(pName string, rule kyverno.Rule, client *dclient.Client, log logr.Logger) error {
|
||||
obj, err := client.GetResource("", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "source resource %s/%s/%s not found", rule.Generation.Kind, rule.Generation.Clone.Namespace, rule.Generation.Clone.Name)
|
||||
}
|
||||
|
||||
update := false
|
||||
labels := obj.GetLabels()
|
||||
update, labels = removePolicyFromLabels(pName, labels)
|
||||
if update {
|
||||
return nil
|
||||
}
|
||||
|
||||
obj.SetLabels(labels)
|
||||
_, err = client.UpdateResource(obj.GetAPIVersion(), rule.Generation.Kind, rule.Generation.Clone.Namespace, obj, false)
|
||||
return err
|
||||
}
|
||||
|
||||
func removePolicyFromLabels(pName string, labels map[string]string) (bool, map[string]string) {
|
||||
if len(labels) == 0 {
|
||||
return false, labels
|
||||
}
|
||||
|
||||
if labels["generate.kyverno.io/clone-policy-name"] != "" {
|
||||
policyNames := labels["generate.kyverno.io/clone-policy-name"]
|
||||
if strings.Contains(policyNames, pName) {
|
||||
updatedPolicyNames := strings.Replace(policyNames, pName, "", -1)
|
||||
labels["generate.kyverno.io/clone-policy-name"] = updatedPolicyNames
|
||||
return true, labels
|
||||
}
|
||||
}
|
||||
|
||||
return false, labels
|
||||
}
|
||||
|
|
116
pkg/cosign/cosign.go
Normal file
116
pkg/cosign/cosign.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package cosign
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/google/go-containerregistry/pkg/authn"
|
||||
"github.com/google/go-containerregistry/pkg/authn/k8schain"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sigstore/cosign/pkg/cosign"
|
||||
"github.com/sigstore/sigstore/pkg/signature"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// Initialize loads the image pull secrets and initializes the default auth method for container registry API calls
|
||||
func Initialize(client kubernetes.Interface, namespace, serviceAccount string, imagePullSecrets []string) error {
|
||||
var kc authn.Keychain
|
||||
kcOpts := &k8schain.Options{
|
||||
Namespace: namespace,
|
||||
ServiceAccountName: serviceAccount,
|
||||
ImagePullSecrets: imagePullSecrets,
|
||||
}
|
||||
|
||||
kc, err := k8schain.New(context.Background(), client, *kcOpts)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize registry keychain")
|
||||
}
|
||||
|
||||
authn.DefaultKeychain = kc
|
||||
return nil
|
||||
}
|
||||
|
||||
func Verify(imageRef string, key []byte, log logr.Logger) (digest string, err error) {
|
||||
pubKey, err := decodePEM(key)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to decode PEM %v", string(key))
|
||||
}
|
||||
|
||||
cosignOpts := &cosign.CheckOpts{
|
||||
Annotations: map[string]interface{}{},
|
||||
Claims: false,
|
||||
Tlog: false,
|
||||
Roots: nil,
|
||||
PubKey: pubKey,
|
||||
}
|
||||
|
||||
ref, err := name.ParseReference(imageRef)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to parse image")
|
||||
}
|
||||
|
||||
verified, err := cosign.Verify(context.Background(), ref, cosignOpts, "https://rekor.sigstore.dev")
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to verify image")
|
||||
}
|
||||
|
||||
digest, err = extractDigest(imageRef, verified, log)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to get digest")
|
||||
}
|
||||
|
||||
return digest, nil
|
||||
}
|
||||
|
||||
func decodePEM(raw []byte) (pub cosign.PublicKey, err error) {
|
||||
// PEM encoded file.
|
||||
ed, err := cosign.PemToECDSAKey(raw)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "pem to ecdsa")
|
||||
}
|
||||
|
||||
return signature.ECDSAVerifier{Key: ed, HashAlg: crypto.SHA256}, nil
|
||||
}
|
||||
|
||||
func extractDigest(imgRef string, verified []cosign.SignedPayload, log logr.Logger) (string, error) {
|
||||
var jsonMap map[string]interface{}
|
||||
for _, vp := range verified {
|
||||
if err := json.Unmarshal(vp.Payload, &jsonMap); err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
log.V(4).Info("image verification response", "image", imgRef, "payload", jsonMap)
|
||||
|
||||
// The cosign response is in the JSON format:
|
||||
// {
|
||||
// "critical": {
|
||||
// "identity": {
|
||||
// "docker-reference": "registry-v2.nirmata.io/pause"
|
||||
// },
|
||||
// "image": {
|
||||
// "docker-manifest-digest": "sha256:4a1c4b21597c1b4415bdbecb28a3296c6b5e23ca4f9feeb599860a1dac6a0108"
|
||||
// },
|
||||
// "type": "cosign container image signature"
|
||||
// },
|
||||
// "optional": null
|
||||
// }
|
||||
critical := jsonMap["critical"].(map[string]interface{})
|
||||
if critical != nil {
|
||||
typeStr := critical["type"].(string)
|
||||
if typeStr == "cosign container image signature" {
|
||||
identity := critical["identity"].(map[string]interface{})
|
||||
if identity != nil {
|
||||
image := critical["image"].(map[string]interface{})
|
||||
if image != nil {
|
||||
return image["docker-manifest-digest"].(string), nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("digest not found for " + imgRef)
|
||||
}
|
|
@ -46,6 +46,7 @@ type Context struct {
|
|||
jsonRaw []byte
|
||||
jsonRawCheckpoint []byte
|
||||
builtInVars []string
|
||||
images *Images
|
||||
log logr.Logger
|
||||
}
|
||||
|
||||
|
@ -222,15 +223,19 @@ func (ctx *Context) AddImageInfo(resource *unstructured.Unstructured) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
resourceImg := newResourceImage(initContainersImgs, containersImgs)
|
||||
|
||||
images := struct {
|
||||
Images interface{} `json:"images"`
|
||||
}{
|
||||
Images: resourceImg,
|
||||
images := newImages(initContainersImgs, containersImgs)
|
||||
if images == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
objRaw, err := json.Marshal(images)
|
||||
ctx.images = images
|
||||
imagesTag := struct {
|
||||
Images interface{} `json:"images"`
|
||||
}{
|
||||
Images: images,
|
||||
}
|
||||
|
||||
objRaw, err := json.Marshal(imagesTag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -238,6 +243,10 @@ func (ctx *Context) AddImageInfo(resource *unstructured.Unstructured) error {
|
|||
return ctx.AddJSON(objRaw)
|
||||
}
|
||||
|
||||
func (ctx *Context) ImageInfo() *Images {
|
||||
return ctx.images
|
||||
}
|
||||
|
||||
// Checkpoint creates a copy of the internal state.
|
||||
// Prior checkpoints will be overridden.
|
||||
func (ctx *Context) Checkpoint() {
|
||||
|
|
|
@ -42,9 +42,13 @@ func (ctx *Context) Query(query string) (interface{}, error) {
|
|||
|
||||
result, err := queryPath.Search(data)
|
||||
if err != nil {
|
||||
ctx.log.Error(err, "failed to search query", "query", query)
|
||||
if !strings.HasPrefix(err.Error(), "Unknown key") {
|
||||
ctx.log.Error(err, "JMESPath search failed", "query", query)
|
||||
}
|
||||
|
||||
return emptyResult, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package context
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/distribution/distribution/reference"
|
||||
|
@ -9,92 +10,93 @@ import (
|
|||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
type imageInfo struct {
|
||||
type ImageInfo struct {
|
||||
|
||||
// Registry is the URL address of the image registry e.g. `docker.io`
|
||||
Registry string `json:"registry,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Tag string `json:"tag,omitempty"`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
|
||||
// Name is the image name portion e.g. `busybox`
|
||||
Name string `json:"name"`
|
||||
|
||||
// Path is the repository path and image name e.g. `some-repository/busybox`
|
||||
Path string `json:"path"`
|
||||
|
||||
// Tag is the image tag e.g. `v2`
|
||||
Tag string `json:"tag,omitempty"`
|
||||
|
||||
// Digest is the image digest portion e.g. `sha256:128c6e3534b842a2eec139999b8ce8aa9a2af9907e2b9269550809d18cd832a3`
|
||||
Digest string `json:"digest,omitempty"`
|
||||
|
||||
// JSONPath is full JSON path to this image e.g. `/spec/containers/0/image`
|
||||
JSONPath string `json:"jsonPath,omitempty"`
|
||||
}
|
||||
|
||||
type containerImage struct {
|
||||
func (i *ImageInfo) String() string {
|
||||
image := i.Registry + "/" + i.Path + ":" + i.Tag
|
||||
if i.Digest != "" {
|
||||
image = image + "@" + i.Digest
|
||||
}
|
||||
|
||||
return image
|
||||
}
|
||||
|
||||
type ContainerImage struct {
|
||||
Name string
|
||||
Image imageInfo
|
||||
Image *ImageInfo
|
||||
}
|
||||
|
||||
type resourceImage struct {
|
||||
Containers map[string]interface{} `json:"containers"`
|
||||
InitContainers map[string]interface{} `json:"initContainers,omitempty"`
|
||||
type Images struct {
|
||||
InitContainers map[string]*ImageInfo `json:"initContainers,omitempty"`
|
||||
Containers map[string]*ImageInfo `json:"containers"`
|
||||
}
|
||||
|
||||
func newResourceImage(initContainersImgs, containersImgs []*containerImage) resourceImage {
|
||||
initContainers := make(map[string]interface{})
|
||||
containers := make(map[string]interface{})
|
||||
|
||||
func newImages(initContainersImgs, containersImgs []*ContainerImage) *Images {
|
||||
initContainers := make(map[string]*ImageInfo)
|
||||
for _, resource := range initContainersImgs {
|
||||
initContainers[resource.Name] = resource.Image
|
||||
}
|
||||
|
||||
containers := make(map[string]*ImageInfo)
|
||||
for _, resource := range containersImgs {
|
||||
containers[resource.Name] = resource.Image
|
||||
}
|
||||
|
||||
return resourceImage{
|
||||
Containers: containers,
|
||||
return &Images{
|
||||
InitContainers: initContainers,
|
||||
Containers: containers,
|
||||
}
|
||||
}
|
||||
|
||||
func extractImageInfo(resource *unstructured.Unstructured, log logr.Logger) (initContainersImgs, containersImgs []*containerImage) {
|
||||
func extractImageInfo(resource *unstructured.Unstructured, log logr.Logger) (initContainersImgs, containersImgs []*ContainerImage) {
|
||||
logger := log.WithName("extractImageInfo").WithValues("kind", resource.GetKind(), "ns", resource.GetNamespace(), "name", resource.GetName())
|
||||
|
||||
switch resource.GetKind() {
|
||||
case "Pod":
|
||||
for i, tag := range []string{"initContainers", "containers"} {
|
||||
if initContainers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", tag); ok {
|
||||
img, err := convertToImageInfo(initContainers)
|
||||
if err != nil {
|
||||
logger.WithName(tag).Error(err, "failed to extract image info")
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
initContainersImgs = append(initContainersImgs, img...)
|
||||
for _, tag := range []string{"initContainers", "containers"} {
|
||||
switch resource.GetKind() {
|
||||
case "Pod":
|
||||
if containers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", tag); ok {
|
||||
if tag == "initContainers" {
|
||||
initContainersImgs = extractImageInfos(containers, initContainersImgs, "/spec/initContainers", logger)
|
||||
} else {
|
||||
containersImgs = append(containersImgs, img...)
|
||||
containersImgs = extractImageInfos(containers, containersImgs, "/spec/containers", logger)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "Deployment", "DaemonSet", "Job", "StatefulSet":
|
||||
for i, tag := range []string{"initContainers", "containers"} {
|
||||
if initContainers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "template", "spec", tag); ok {
|
||||
img, err := convertToImageInfo(initContainers)
|
||||
if err != nil {
|
||||
logger.WithName(tag).Error(err, "failed to extract image info")
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
initContainersImgs = append(initContainersImgs, img...)
|
||||
case "CronJob":
|
||||
if containers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "jobTemplate", "spec", "template", "spec", tag); ok {
|
||||
if tag == "initContainers" {
|
||||
initContainersImgs = extractImageInfos(containers, initContainersImgs, "/spec/jobTemplate/spec/template/spec/initContainers", logger)
|
||||
} else {
|
||||
containersImgs = append(containersImgs, img...)
|
||||
containersImgs = extractImageInfos(containers, containersImgs, "/spec/jobTemplate/spec/template/spec/containers", logger)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case "CronJob":
|
||||
for i, tag := range []string{"initContainers", "containers"} {
|
||||
if initContainers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "jobTemplate", "spec", "template", "spec", tag); ok {
|
||||
img, err := convertToImageInfo(initContainers)
|
||||
if err != nil {
|
||||
logger.WithName(tag).Error(err, "failed to extract image info")
|
||||
continue
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
initContainersImgs = append(initContainersImgs, img...)
|
||||
// handles "Deployment", "DaemonSet", "Job", "StatefulSet", and custom controllers with the same pattern
|
||||
default:
|
||||
if containers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "template", "spec", tag); ok {
|
||||
if tag == "initContainers" {
|
||||
initContainersImgs = extractImageInfos(containers, initContainersImgs, "/spec/template/spec/initContainers", logger)
|
||||
} else {
|
||||
containersImgs = append(containersImgs, img...)
|
||||
containersImgs = extractImageInfos(containers, containersImgs, "/spec/template/spec/containers", logger)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -103,49 +105,36 @@ func extractImageInfo(resource *unstructured.Unstructured, log logr.Logger) (ini
|
|||
return
|
||||
}
|
||||
|
||||
func convertToImageInfo(containers []interface{}) (images []*containerImage, err error) {
|
||||
var errs []string
|
||||
func extractImageInfos(containers []interface{}, images []*ContainerImage, jsonPath string, log logr.Logger) []*ContainerImage {
|
||||
img, err := convertToImageInfo(containers, jsonPath)
|
||||
if err != nil {
|
||||
log.Error(err, "failed to extract image info", "element", containers)
|
||||
}
|
||||
|
||||
return append(images, img...)
|
||||
}
|
||||
|
||||
func convertToImageInfo(containers []interface{}, jsonPath string) (images []*ContainerImage, err error) {
|
||||
var errs []string
|
||||
var index = 0
|
||||
for _, ctr := range containers {
|
||||
if container, ok := ctr.(map[string]interface{}); ok {
|
||||
repo, err := reference.Parse(container["image"].(string))
|
||||
name := container["name"].(string)
|
||||
image := container["image"].(string)
|
||||
jp := strings.Join([]string{jsonPath, strconv.Itoa(index), "image"}, "/")
|
||||
imageInfo, err := newImageInfo(image, jp)
|
||||
if err != nil {
|
||||
errs = append(errs, errors.Wrapf(err, "bad image: %s", container["image"].(string)).Error())
|
||||
errs = append(errs, err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
var registry, name, tag, digest string
|
||||
if named, ok := repo.(reference.Named); ok {
|
||||
registry, name = reference.SplitHostname(named)
|
||||
}
|
||||
|
||||
if tagged, ok := repo.(reference.Tagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
if digested, ok := repo.(reference.Digested); ok {
|
||||
digest = digested.Digest().String()
|
||||
}
|
||||
|
||||
// set default registry and tag
|
||||
if registry == "" {
|
||||
registry = "docker.io"
|
||||
}
|
||||
|
||||
if tag == "" {
|
||||
tag = "latest"
|
||||
}
|
||||
|
||||
images = append(images, &containerImage{
|
||||
Name: container["name"].(string),
|
||||
Image: imageInfo{
|
||||
Registry: registry,
|
||||
Name: name,
|
||||
Tag: tag,
|
||||
Digest: digest,
|
||||
},
|
||||
images = append(images, &ContainerImage{
|
||||
Name: name,
|
||||
Image: imageInfo,
|
||||
})
|
||||
}
|
||||
|
||||
index++
|
||||
}
|
||||
|
||||
if len(errs) == 0 {
|
||||
|
@ -154,3 +143,49 @@ func convertToImageInfo(containers []interface{}) (images []*containerImage, err
|
|||
|
||||
return images, errors.Errorf("%s", strings.Join(errs, ";"))
|
||||
}
|
||||
|
||||
func newImageInfo(image, jsonPath string) (*ImageInfo, error) {
|
||||
image = addDefaultDomain(image)
|
||||
ref, err := reference.Parse(image)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "bad image: %s", image)
|
||||
}
|
||||
|
||||
var registry, path, name, tag, digest string
|
||||
if named, ok := ref.(reference.Named); ok {
|
||||
registry = reference.Domain(named)
|
||||
path = reference.Path(named)
|
||||
name = path[strings.LastIndex(path, "/")+1:]
|
||||
}
|
||||
|
||||
if tagged, ok := ref.(reference.Tagged); ok {
|
||||
tag = tagged.Tag()
|
||||
}
|
||||
|
||||
if digested, ok := ref.(reference.Digested); ok {
|
||||
digest = digested.Digest().String()
|
||||
}
|
||||
|
||||
// set default tag - the domain is set via addDefaultDomain before parsing
|
||||
if tag == "" {
|
||||
tag = "latest"
|
||||
}
|
||||
|
||||
return &ImageInfo{
|
||||
Registry: registry,
|
||||
Name: name,
|
||||
Path: path,
|
||||
Tag: tag,
|
||||
Digest: digest,
|
||||
JSONPath: jsonPath,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func addDefaultDomain(name string) string {
|
||||
i := strings.IndexRune(name, '/')
|
||||
if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost" && strings.ToLower(name[:i]) == name[:i]) {
|
||||
return "docker.io/" + name
|
||||
}
|
||||
|
||||
return name
|
||||
}
|
||||
|
|
|
@ -11,21 +11,26 @@ import (
|
|||
func Test_extractImageInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
raw []byte
|
||||
containers []*containerImage
|
||||
initContainers []*containerImage
|
||||
containers []*ContainerImage
|
||||
initContainers []*ContainerImage
|
||||
}{
|
||||
{
|
||||
raw: []byte(`{"apiVersion": "v1","kind": "Pod","metadata": {"name": "myapp"},"spec": {"initContainers": [{"name": "init","image": "index.docker.io/busybox:v1.2.3"}],"containers": [{"name": "nginx","image": "nginx:latest"}]}}`),
|
||||
initContainers: []*containerImage{{Name: "init", Image: imageInfo{Registry: "index.docker.io", Name: "busybox", Tag: "v1.2.3"}}},
|
||||
containers: []*containerImage{{Name: "nginx", Image: imageInfo{Registry: "docker.io", Name: "nginx", Tag: "latest"}}},
|
||||
initContainers: []*ContainerImage{{Name: "init", Image: &ImageInfo{Registry: "index.docker.io", Name: "busybox", Path: "busybox", Tag: "v1.2.3", JSONPath: "/spec/initContainers/0/image"}}},
|
||||
containers: []*ContainerImage{{Name: "nginx", Image: &ImageInfo{Registry: "docker.io", Name: "nginx", Path: "nginx", Tag: "latest", JSONPath: "/spec/containers/0/image"}}},
|
||||
},
|
||||
{
|
||||
raw: []byte(`{"apiVersion": "v1","kind": "Pod","metadata": {"name": "myapp"},"spec": {"containers": [{"name": "nginx","image": "test/nginx:latest"}]}}`),
|
||||
initContainers: []*ContainerImage{},
|
||||
containers: []*ContainerImage{{Name: "nginx", Image: &ImageInfo{Registry: "docker.io", Name: "nginx", Path: "test/nginx", Tag: "latest", JSONPath: "/spec/containers/0/image"}}},
|
||||
},
|
||||
{
|
||||
raw: []byte(`{"apiVersion": "apps/v1","kind": "Deployment","metadata": {"name": "myapp"},"spec": {"selector": {"matchLabels": {"app": "myapp"}},"template": {"metadata": {"labels": {"app": "myapp"}},"spec": {"initContainers": [{"name": "init","image": "fictional.registry.example:10443/imagename:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}],"containers": [{"name": "myapp","image": "fictional.registry.example:10443/imagename"}]}}}}`),
|
||||
initContainers: []*containerImage{{Name: "init", Image: imageInfo{Registry: "fictional.registry.example:10443", Name: "imagename", Tag: "tag", Digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}}},
|
||||
containers: []*containerImage{{Name: "myapp", Image: imageInfo{Registry: "fictional.registry.example:10443", Name: "imagename", Tag: "latest"}}}},
|
||||
initContainers: []*ContainerImage{{Name: "init", Image: &ImageInfo{Registry: "fictional.registry.example:10443", Name: "imagename", Path: "imagename", Tag: "tag", JSONPath: "/spec/template/spec/initContainers/0/image", Digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}}},
|
||||
containers: []*ContainerImage{{Name: "myapp", Image: &ImageInfo{Registry: "fictional.registry.example:10443", Name: "imagename", Path: "imagename", Tag: "latest", JSONPath: "/spec/template/spec/containers/0/image"}}}},
|
||||
{
|
||||
raw: []byte(`{"apiVersion": "batch/v1beta1","kind": "CronJob","metadata": {"name": "hello"},"spec": {"schedule": "*/1 * * * *","jobTemplate": {"spec": {"template": {"spec": {"containers": [{"name": "hello","image": "b.gcr.io/test.example.com/my-app:test.example.com"}]}}}}}}`),
|
||||
containers: []*containerImage{{Name: "hello", Image: imageInfo{Registry: "b.gcr.io", Name: "test.example.com/my-app", Tag: "test.example.com"}}},
|
||||
raw: []byte(`{"apiVersion": "batch/v1beta1","kind": "CronJob","metadata": {"name": "hello"},"spec": {"schedule": "*/1 * * * *","jobTemplate": {"spec": {"template": {"spec": {"containers": [{"name": "hello","image": "test.example.com/test/my-app:v2"}]}}}}}}`),
|
||||
containers: []*ContainerImage{{Name: "hello", Image: &ImageInfo{Registry: "test.example.com", Name: "my-app", Path: "test/my-app", Tag: "v2", JSONPath: "/spec/jobTemplate/spec/template/spec/containers/0/image"}}},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -35,11 +40,78 @@ func Test_extractImageInfo(t *testing.T) {
|
|||
|
||||
init, container := extractImageInfo(resource, log.Log.WithName("TestExtractImageInfo"))
|
||||
if len(test.initContainers) > 0 {
|
||||
assert.Equal(t, test.initContainers, init, "unexpected initContainers", resource.GetName())
|
||||
assert.Equal(t, test.initContainers, init, "unexpected initContainers %s", resource.GetName())
|
||||
}
|
||||
|
||||
if len(test.containers) > 0 {
|
||||
assert.Equal(t, test.containers, container, "unexpected containers", resource.GetName())
|
||||
assert.Equal(t, test.containers, container, "unexpected containers %s", resource.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ImageInfo_String(t *testing.T) {
|
||||
validateImageInfo(t,
|
||||
"registry.test.io/test/myapp:v1.2-21.g5523e95@sha256:31aaf12480bd08c54e7990c6b0e43d775a7a84603d2921a6de4abbc317b2fd10",
|
||||
"myapp",
|
||||
"test/myapp",
|
||||
"registry.test.io",
|
||||
"v1.2-21.g5523e95",
|
||||
"sha256:31aaf12480bd08c54e7990c6b0e43d775a7a84603d2921a6de4abbc317b2fd10",
|
||||
"registry.test.io/test/myapp:v1.2-21.g5523e95@sha256:31aaf12480bd08c54e7990c6b0e43d775a7a84603d2921a6de4abbc317b2fd10")
|
||||
|
||||
validateImageInfo(t,
|
||||
"nginx",
|
||||
"nginx",
|
||||
"nginx",
|
||||
"docker.io",
|
||||
"latest",
|
||||
"",
|
||||
"docker.io/nginx:latest")
|
||||
|
||||
validateImageInfo(t,
|
||||
"nginx:v10.3",
|
||||
"nginx",
|
||||
"nginx",
|
||||
"docker.io",
|
||||
"v10.3",
|
||||
"",
|
||||
"docker.io/nginx:v10.3")
|
||||
|
||||
validateImageInfo(t,
|
||||
"docker.io/test/nginx:v10.3",
|
||||
"nginx",
|
||||
"test/nginx",
|
||||
"docker.io",
|
||||
"v10.3",
|
||||
"",
|
||||
"docker.io/test/nginx:v10.3")
|
||||
|
||||
validateImageInfo(t,
|
||||
"test/nginx",
|
||||
"nginx",
|
||||
"test/nginx",
|
||||
"docker.io",
|
||||
"latest",
|
||||
"",
|
||||
"docker.io/test/nginx:latest")
|
||||
|
||||
validateImageInfo(t,
|
||||
"localhost:4443/test/nginx",
|
||||
"nginx",
|
||||
"test/nginx",
|
||||
"localhost:4443",
|
||||
"latest",
|
||||
"",
|
||||
"localhost:4443/test/nginx:latest")
|
||||
}
|
||||
|
||||
func validateImageInfo(t *testing.T, raw, name, path, registry, tag, digest, str string) {
|
||||
i1, err := newImageInfo(raw, "/spec/containers/0/image")
|
||||
assert.Nil(t, err)
|
||||
assert.Equal(t, name, i1.Name)
|
||||
assert.Equal(t, path, i1.Path)
|
||||
assert.Equal(t, registry, i1.Registry)
|
||||
assert.Equal(t, tag, i1.Tag)
|
||||
assert.Equal(t, digest, i1.Digest)
|
||||
assert.Equal(t, str, i1.String())
|
||||
}
|
||||
|
|
116
pkg/engine/imageVerify.go
Normal file
116
pkg/engine/imageVerify.go
Normal file
|
@ -0,0 +1,116 @@
|
|||
package engine
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"github.com/go-logr/logr"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/cosign"
|
||||
"github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
"github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"github.com/minio/minio/pkg/wildcard"
|
||||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
"time"
|
||||
)
|
||||
|
||||
func VerifyAndPatchImages(policyContext *PolicyContext) (resp *response.EngineResponse) {
|
||||
resp = &response.EngineResponse{}
|
||||
images := policyContext.JSONContext.ImageInfo()
|
||||
if images == nil {
|
||||
return
|
||||
}
|
||||
|
||||
policy := policyContext.Policy
|
||||
patchedResource := policyContext.NewResource
|
||||
logger := log.Log.WithName("EngineVerifyImages").WithValues("policy", policy.Name,
|
||||
"kind", patchedResource.GetKind(), "namespace", patchedResource.GetNamespace(), "name", patchedResource.GetName())
|
||||
|
||||
if ManagedPodResource(policy, patchedResource) {
|
||||
logger.V(4).Info("container images for pods managed by workload controllers are already verified", "policy", policy.GetName())
|
||||
resp.PatchedResource = patchedResource
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
defer func() {
|
||||
buildResponse(logger, policyContext, resp, startTime)
|
||||
logger.V(4).Info("finished policy processing", "processingTime", resp.PolicyResponse.ProcessingTime.String(), "rulesApplied", resp.PolicyResponse.RulesAppliedCount)
|
||||
}()
|
||||
|
||||
policyContext.JSONContext.Checkpoint()
|
||||
defer policyContext.JSONContext.Restore()
|
||||
|
||||
for i := range policyContext.Policy.Spec.Rules {
|
||||
rule := policyContext.Policy.Spec.Rules[i]
|
||||
if len(rule.VerifyImages) == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if !matches(logger, rule, policyContext) {
|
||||
continue
|
||||
}
|
||||
|
||||
policyContext.JSONContext.Restore()
|
||||
for _, imageVerify := range rule.VerifyImages {
|
||||
verifyAndPatchImages(logger, &rule, imageVerify, images.Containers, resp)
|
||||
verifyAndPatchImages(logger, &rule, imageVerify, images.InitContainers, resp)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func verifyAndPatchImages(logger logr.Logger, rule *v1.Rule, imageVerify *v1.ImageVerification, images map[string]*context.ImageInfo, resp *response.EngineResponse) {
|
||||
imagePattern := imageVerify.Image
|
||||
key := imageVerify.Key
|
||||
|
||||
for _, imageInfo := range images {
|
||||
image := imageInfo.String()
|
||||
if !wildcard.Match(imagePattern, image) {
|
||||
logger.V(4).Info("image does not match pattern", "image", image, "pattern", imagePattern)
|
||||
continue
|
||||
}
|
||||
|
||||
logger.Info("verifying image", "image", image)
|
||||
incrementAppliedCount(resp)
|
||||
|
||||
ruleResp := response.RuleResponse{
|
||||
Name: rule.Name,
|
||||
Type: utils.Validation.String(),
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
digest, err := cosign.Verify(image, []byte(key), logger)
|
||||
if err != nil {
|
||||
logger.Info("failed to verify image", "image", image, "key", key, "error", err, "duration", time.Since(start).Seconds())
|
||||
ruleResp.Success = false
|
||||
ruleResp.Message = fmt.Sprintf("image verification failed for %s: %v", image, err)
|
||||
} else {
|
||||
logger.V(3).Info("verified image", "image", image, "digest", digest, "duration", time.Since(start).Seconds())
|
||||
ruleResp.Success = true
|
||||
ruleResp.Message = fmt.Sprintf("image %s verified", image)
|
||||
|
||||
// add digest to image
|
||||
if imageInfo.Digest == "" {
|
||||
patch, err := makeAddDigestPatch(imageInfo, digest)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to patch image with digest", "image", imageInfo.String(), "jsonPath", imageInfo.JSONPath)
|
||||
} else {
|
||||
logger.V(4).Info("patching verified image with digest", "patch", string(patch))
|
||||
ruleResp.Patches = [][]byte{patch}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.PolicyResponse.Rules = append(resp.PolicyResponse.Rules, ruleResp)
|
||||
}
|
||||
}
|
||||
|
||||
func makeAddDigestPatch(imageInfo *context.ImageInfo, digest string) ([]byte, error) {
|
||||
var patch = make(map[string]interface{})
|
||||
patch["op"] = "replace"
|
||||
patch["path"] = imageInfo.JSONPath
|
||||
patch["value"] = imageInfo.String() + "@" + digest
|
||||
return json.Marshal(patch)
|
||||
}
|
|
@ -19,7 +19,7 @@ func CreateMutateHandler(ruleName string, mutate *kyverno.Mutation, patchedResou
|
|||
|
||||
switch {
|
||||
case isPatchStrategicMerge(mutate):
|
||||
return newpatchStrategicMergeHandler(ruleName, mutate, patchedResource, context, logger)
|
||||
return newPatchStrategicMergeHandler(ruleName, mutate, patchedResource, context, logger)
|
||||
case isPatchesJSON6902(mutate):
|
||||
return newPatchesJSON6902Handler(ruleName, mutate, patchedResource, logger)
|
||||
case isOverlay(mutate):
|
||||
|
@ -27,9 +27,9 @@ func CreateMutateHandler(ruleName string, mutate *kyverno.Mutation, patchedResou
|
|||
mutate.PatchStrategicMerge = mutate.Overlay
|
||||
var a interface{}
|
||||
mutate.Overlay = a
|
||||
return newpatchStrategicMergeHandler(ruleName, mutate, patchedResource, context, logger)
|
||||
return newPatchStrategicMergeHandler(ruleName, mutate, patchedResource, context, logger)
|
||||
case isPatches(mutate):
|
||||
return newpatchesHandler(ruleName, mutate, patchedResource, context, logger)
|
||||
return newPatchesHandler(ruleName, mutate, patchedResource, context, logger)
|
||||
default:
|
||||
return newEmptyHandler(patchedResource)
|
||||
}
|
||||
|
@ -44,7 +44,7 @@ type patchStrategicMergeHandler struct {
|
|||
logger logr.Logger
|
||||
}
|
||||
|
||||
func newpatchStrategicMergeHandler(ruleName string, mutate *kyverno.Mutation, patchedResource unstructured.Unstructured, context context.EvalInterface, logger logr.Logger) Handler {
|
||||
func newPatchStrategicMergeHandler(ruleName string, mutate *kyverno.Mutation, patchedResource unstructured.Unstructured, context context.EvalInterface, logger logr.Logger) Handler {
|
||||
return patchStrategicMergeHandler{
|
||||
ruleName: ruleName,
|
||||
mutation: mutate,
|
||||
|
@ -123,7 +123,7 @@ type patchesHandler struct {
|
|||
logger logr.Logger
|
||||
}
|
||||
|
||||
func newpatchesHandler(ruleName string, mutate *kyverno.Mutation, patchedResource unstructured.Unstructured, context context.EvalInterface, logger logr.Logger) Handler {
|
||||
func newPatchesHandler(ruleName string, mutate *kyverno.Mutation, patchedResource unstructured.Unstructured, context context.EvalInterface, logger logr.Logger) Handler {
|
||||
return patchesHandler{
|
||||
ruleName: ruleName,
|
||||
mutation: mutate,
|
||||
|
|
|
@ -41,7 +41,7 @@ func Mutate(policyContext *PolicyContext) (resp *response.EngineResponse) {
|
|||
defer endMutateResultResponse(logger, resp, startTime)
|
||||
|
||||
if ManagedPodResource(policy, patchedResource) {
|
||||
logger.V(5).Info("skip applying policy as direct changes to pods managed by workload controllers are not allowed", "policy", policy.GetName())
|
||||
logger.V(5).Info("changes to pods managed by workload controllers are not permitted", "policy", policy.GetName())
|
||||
resp.PatchedResource = patchedResource
|
||||
return
|
||||
}
|
||||
|
@ -50,15 +50,13 @@ func Mutate(policyContext *PolicyContext) (resp *response.EngineResponse) {
|
|||
defer policyContext.JSONContext.Restore()
|
||||
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
var ruleResponse response.RuleResponse
|
||||
logger := logger.WithValues("rule", rule.Name)
|
||||
if !rule.HasMutate() {
|
||||
continue
|
||||
}
|
||||
|
||||
// check if the resource satisfies the filter conditions defined in the rule
|
||||
//TODO: this needs to be extracted, to filter the resource so that we can avoid passing resources that
|
||||
// don't satisfy a policy rule resource description
|
||||
var ruleResponse response.RuleResponse
|
||||
logger := logger.WithValues("rule", rule.Name)
|
||||
|
||||
excludeResource := []string{}
|
||||
if len(policyContext.ExcludeGroupRole) > 0 {
|
||||
excludeResource = policyContext.ExcludeGroupRole
|
||||
|
|
|
@ -25,7 +25,7 @@ type PolicyResponse struct {
|
|||
PolicyStats `json:",inline"`
|
||||
// rule response
|
||||
Rules []RuleResponse `json:"rules"`
|
||||
// ValidationFailureAction: audit(default if not set),enforce
|
||||
// ValidationFailureAction: audit (default) or enforce
|
||||
ValidationFailureAction string
|
||||
}
|
||||
|
||||
|
|
|
@ -80,6 +80,7 @@ func JoinPatches(patches [][]byte) []byte {
|
|||
result = append(result, []byte(",\n")...)
|
||||
}
|
||||
}
|
||||
|
||||
result = append(result, []byte("\n]")...)
|
||||
return result
|
||||
}
|
||||
|
|
|
@ -113,6 +113,7 @@ func validateResource(log logr.Logger, ctx *PolicyContext) *response.EngineRespo
|
|||
log.V(2).Info("wrongfully configured data", "reason", err.Error())
|
||||
continue
|
||||
}
|
||||
|
||||
// evaluate pre-conditions
|
||||
if !variables.EvaluateConditions(log, ctx.JSONContext, preconditionsCopy, true) {
|
||||
log.V(4).Info("resource fails the preconditions")
|
||||
|
|
|
@ -158,14 +158,14 @@ func (c *Controller) deletePolicy(obj interface{}) {
|
|||
|
||||
for _, gr := range grList {
|
||||
for _, generatedResource := range gr.Status.GeneratedResources {
|
||||
logger.Info("retaining resource", "APIVersion", generatedResource.APIVersion, "Kind", generatedResource.Kind, "Name", generatedResource.Name, "Nmaespace", generatedResource.Namespace)
|
||||
logger.V(4).Info("retaining resource", "apiVersion", generatedResource.APIVersion, "kind", generatedResource.Kind, "name", generatedResource.Name, "namespace", generatedResource.Namespace)
|
||||
}
|
||||
}
|
||||
|
||||
if !generatePolicyWithClone {
|
||||
grs, err := c.grLister.GetGenerateRequestsForClusterPolicy(p.Name)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to generate request CR for the policy", "name", p.Name)
|
||||
logger.Error(err, "failed to generate request for the policy", "name", p.Name)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -361,7 +361,7 @@ func applyRule(log logr.Logger, client *dclient.Client, rule kyverno.Rule, resou
|
|||
logger.V(3).Info("applying generate rule", "mode", mode)
|
||||
|
||||
if rdata == nil && mode == Update {
|
||||
logger.V(4).Info("no changes required for target resource")
|
||||
logger.V(4).Info("no changes required for generate target resource")
|
||||
return newGenResource, nil
|
||||
}
|
||||
|
||||
|
@ -402,7 +402,7 @@ func applyRule(log logr.Logger, client *dclient.Client, rule kyverno.Rule, resou
|
|||
return noGenResource, err
|
||||
}
|
||||
|
||||
logger.V(2).Info("generated target resource")
|
||||
logger.V(2).Info("created generate target resource")
|
||||
|
||||
} else if mode == Update {
|
||||
if rule.Generation.Synchronize {
|
||||
|
@ -418,7 +418,7 @@ func applyRule(log logr.Logger, client *dclient.Client, rule kyverno.Rule, resou
|
|||
logger.Error(err, "failed to update resource")
|
||||
return noGenResource, err
|
||||
}
|
||||
logger.V(2).Info("updated target resource")
|
||||
logger.V(2).Info("updated generate target resource")
|
||||
}
|
||||
|
||||
return newGenResource, nil
|
||||
|
|
|
@ -341,7 +341,7 @@ func printReportOrViolation(policyReport bool, validateEngineResponses []*respon
|
|||
resps := buildPolicyReports(validateEngineResponses, skippedPolicies)
|
||||
if len(resps) > 0 || resourcesLen == 0 {
|
||||
fmt.Println("----------------------------------------------------------------------\nPOLICY REPORT:\n----------------------------------------------------------------------")
|
||||
report, _ := generateCLIraw(resps)
|
||||
report, _ := generateCLIRaw(resps)
|
||||
yamlReport, _ := yaml1.Marshal(report)
|
||||
fmt.Println(string(yamlReport))
|
||||
} else {
|
|
@ -11,8 +11,8 @@ import (
|
|||
"sigs.k8s.io/controller-runtime/pkg/log"
|
||||
)
|
||||
|
||||
// generateCLIraw merges all policy reports to a singe cluster policy report
|
||||
func generateCLIraw(reports []*unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
// generateCLIRaw merges all policy reports to a singe cluster policy report
|
||||
func generateCLIRaw(reports []*unstructured.Unstructured) (*unstructured.Unstructured, error) {
|
||||
for _, report := range reports {
|
||||
if report.GetNamespace() != "" {
|
||||
report.SetNamespace("")
|
||||
|
|
|
@ -616,7 +616,7 @@ func PrintMutatedOutput(mutateLogPath string, mutateLogPathIsDir bool, yaml stri
|
|||
yaml = yaml + ("\n---\n\n")
|
||||
|
||||
if !mutateLogPathIsDir {
|
||||
// truncation for the case when mutateLogPath is a file (not a directory) is handled under pkg/kyverno/apply/command.go
|
||||
// truncation for the case when mutateLogPath is a file (not a directory) is handled under pkg/kyverno/apply/test_command.go
|
||||
f, err = os.OpenFile(mutateLogPath, os.O_APPEND|os.O_WRONLY, 0644)
|
||||
} else {
|
||||
f, err = os.OpenFile(mutateLogPath+"/"+fileName+".yaml", os.O_CREATE|os.O_WRONLY, 0644)
|
||||
|
|
|
@ -321,34 +321,41 @@ func applyPoliciesFromPath(fs billy.Filesystem, policyBytes []byte, valuesFile s
|
|||
fmt.Printf("Error: failed to load policies\nCause: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
mutatedPolicies, err := common.MutatePolices(policies)
|
||||
if err != nil {
|
||||
if !sanitizederror.IsErrorSanitized(err) {
|
||||
return sanitizederror.NewWithError("failed to mutate policy", err)
|
||||
}
|
||||
}
|
||||
|
||||
resources, err := common.GetResourceAccordingToResourcePath(fs, fullResourcePath, false, mutatedPolicies, dClient, "", false, isGit, policyResourcePath)
|
||||
if err != nil {
|
||||
fmt.Printf("Error: failed to load resources\nCause: %s\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
msgPolicies := "1 policy"
|
||||
if len(mutatedPolicies) > 1 {
|
||||
msgPolicies = fmt.Sprintf("%d policies", len(policies))
|
||||
}
|
||||
|
||||
msgResources := "1 resource"
|
||||
if len(resources) > 1 {
|
||||
msgResources = fmt.Sprintf("%d resources", len(resources))
|
||||
}
|
||||
|
||||
if len(mutatedPolicies) > 0 && len(resources) > 0 {
|
||||
fmt.Printf("\napplying %s to %s... \n", msgPolicies, msgResources)
|
||||
}
|
||||
|
||||
for _, policy := range mutatedPolicies {
|
||||
err := policy2.Validate(policy, nil, true, openAPIController)
|
||||
if err != nil {
|
||||
log.Log.V(3).Info(fmt.Sprintf("skipping policy %v as it is not valid", policy.Name), "error", err)
|
||||
log.Log.Error(err, "skipping invalid policy", "name", policy.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
matches := common.PolicyHasVariables(*policy)
|
||||
variable := common.RemoveDuplicateVariables(matches)
|
||||
if len(matches) > 0 && variablesString == "" && values.Variables == "" {
|
|
@ -633,7 +633,7 @@ func validateUniqueRuleName(p kyverno.ClusterPolicy) (string, error) {
|
|||
|
||||
// validateRuleType checks only one type of rule is defined per rule
|
||||
func validateRuleType(r kyverno.Rule) error {
|
||||
ruleTypes := []bool{r.HasMutate(), r.HasValidate(), r.HasGenerate()}
|
||||
ruleTypes := []bool{r.HasMutate(), r.HasValidate(), r.HasGenerate(), r.HasVerifyImages()}
|
||||
|
||||
operationCount := func() int {
|
||||
count := 0
|
||||
|
@ -646,9 +646,9 @@ func validateRuleType(r kyverno.Rule) error {
|
|||
}()
|
||||
|
||||
if operationCount == 0 {
|
||||
return fmt.Errorf("no operation defined in the rule '%s'.(supported operations: mutation,validation,generation)", r.Name)
|
||||
return fmt.Errorf("no operation defined in the rule '%s'.(supported operations: mutate,validate,generate,verifyImages)", r.Name)
|
||||
} else if operationCount != 1 {
|
||||
return fmt.Errorf("multiple operations defined in the rule '%s', only one type of operation is allowed per rule", r.Name)
|
||||
return fmt.Errorf("multiple operations defined in the rule '%s', only one operation (mutate,validate,generate,verifyImages) is allowed per rule", r.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -38,9 +38,17 @@ type policyCache struct {
|
|||
// Interface ...
|
||||
// Interface get method use for to get policy names and mostly use to test cache testcases
|
||||
type Interface interface {
|
||||
|
||||
// Add adds a policy to the cache
|
||||
Add(policy *kyverno.ClusterPolicy)
|
||||
|
||||
// Remove removes a policy from the cache
|
||||
Remove(policy *kyverno.ClusterPolicy)
|
||||
GetPolicyObject(pkey PolicyType, kind string, nspace string) []*kyverno.ClusterPolicy
|
||||
|
||||
// GetPolicies returns all policies that apply to a namespace, including cluster-wide policies
|
||||
// If the namespace is empty, only cluster-wide policies are returned
|
||||
GetPolicies(pkey PolicyType, kind string, nspace string) []*kyverno.ClusterPolicy
|
||||
|
||||
get(pkey PolicyType, kind string, nspace string) []string
|
||||
}
|
||||
|
||||
|
@ -51,6 +59,7 @@ func newPolicyCache(log logr.Logger, pLister kyvernolister.ClusterPolicyLister,
|
|||
ValidateEnforce: make(map[string]bool),
|
||||
ValidateAudit: make(map[string]bool),
|
||||
Generate: make(map[string]bool),
|
||||
VerifyImages: make(map[string]bool),
|
||||
}
|
||||
|
||||
return &policyCache{
|
||||
|
@ -74,8 +83,14 @@ func (pc *policyCache) Add(policy *kyverno.ClusterPolicy) {
|
|||
func (pc *policyCache) get(pkey PolicyType, kind, nspace string) []string {
|
||||
return pc.pMap.get(pkey, kind, nspace)
|
||||
}
|
||||
func (pc *policyCache) GetPolicyObject(pkey PolicyType, kind, nspace string) []*kyverno.ClusterPolicy {
|
||||
return pc.getPolicyObject(pkey, kind, nspace)
|
||||
func (pc *policyCache) GetPolicies(pkey PolicyType, kind, nspace string) []*kyverno.ClusterPolicy {
|
||||
policies := pc.getPolicyObject(pkey, kind, "")
|
||||
if nspace == "" {
|
||||
return policies
|
||||
}
|
||||
|
||||
nsPolicies := pc.getPolicyObject(pkey, kind, nspace)
|
||||
return append(policies, nsPolicies...)
|
||||
}
|
||||
|
||||
// Remove a policy from cache
|
||||
|
@ -93,11 +108,14 @@ func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
|||
validateEnforceMap := m.nameCacheMap[ValidateEnforce]
|
||||
validateAuditMap := m.nameCacheMap[ValidateAudit]
|
||||
generateMap := m.nameCacheMap[Generate]
|
||||
imageVerifyMap := m.nameCacheMap[VerifyImages]
|
||||
|
||||
var pName = policy.GetName()
|
||||
pSpace := policy.GetNamespace()
|
||||
if pSpace != "" {
|
||||
pName = pSpace + "/" + pName
|
||||
}
|
||||
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
|
||||
for _, gvk := range rule.MatchResources.Kinds {
|
||||
|
@ -115,6 +133,7 @@ func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
|||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if rule.HasValidate() {
|
||||
if enforcePolicy {
|
||||
if !validateEnforceMap[kind+"/"+pName] {
|
||||
|
@ -142,12 +161,23 @@ func (m *pMap) add(policy *kyverno.ClusterPolicy) {
|
|||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if rule.HasVerifyImages() {
|
||||
if !imageVerifyMap[kind+"/"+pName] {
|
||||
imageVerifyMap[kind+"/"+pName] = true
|
||||
imageVerifyMapPolicy := m.kindDataMap[kind][VerifyImages]
|
||||
m.kindDataMap[kind][VerifyImages] = append(imageVerifyMapPolicy, pName)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
m.nameCacheMap[Mutate] = mutateMap
|
||||
m.nameCacheMap[ValidateEnforce] = validateEnforceMap
|
||||
m.nameCacheMap[ValidateAudit] = validateAuditMap
|
||||
m.nameCacheMap[Generate] = generateMap
|
||||
m.nameCacheMap[VerifyImages] = imageVerifyMap
|
||||
}
|
||||
|
||||
func (pc *pMap) get(key PolicyType, gvk, namespace string) (names []string) {
|
||||
|
|
|
@ -9,4 +9,5 @@ const (
|
|||
ValidateEnforce
|
||||
ValidateAudit
|
||||
Generate
|
||||
VerifyImages
|
||||
)
|
||||
|
|
|
@ -412,13 +412,14 @@ func generateRulePatches(policy kyverno.ClusterPolicy, controllers string, log l
|
|||
// https://github.com/kyverno/kyverno/issues/568
|
||||
|
||||
type kyvernoRule struct {
|
||||
Name string `json:"name"`
|
||||
MatchResources *kyverno.MatchResources `json:"match"`
|
||||
ExcludeResources *kyverno.ExcludeResources `json:"exclude,omitempty"`
|
||||
Context *[]kyverno.ContextEntry `json:"context,omitempty"`
|
||||
AnyAllConditions *apiextensions.JSON `json:"preconditions,omitempty"`
|
||||
Mutation *kyverno.Mutation `json:"mutate,omitempty"`
|
||||
Validation *kyverno.Validation `json:"validate,omitempty"`
|
||||
Name string `json:"name"`
|
||||
MatchResources *kyverno.MatchResources `json:"match"`
|
||||
ExcludeResources *kyverno.ExcludeResources `json:"exclude,omitempty"`
|
||||
Context *[]kyverno.ContextEntry `json:"context,omitempty"`
|
||||
AnyAllConditions *apiextensions.JSON `json:"preconditions,omitempty"`
|
||||
Mutation *kyverno.Mutation `json:"mutate,omitempty"`
|
||||
Validation *kyverno.Validation `json:"validate,omitempty"`
|
||||
VerifyImages []*kyverno.ImageVerification `json:"verifyImages,omitempty" yaml:"verifyImages,omitempty"`
|
||||
}
|
||||
|
||||
func generateRuleForControllers(rule kyverno.Rule, controllers string, log logr.Logger) kyvernoRule {
|
||||
|
@ -467,6 +468,7 @@ func generateRuleForControllers(rule kyverno.Rule, controllers string, log logr.
|
|||
if len(name) > 63 {
|
||||
name = name[:63]
|
||||
}
|
||||
|
||||
controllerRule := &kyvernoRule{
|
||||
Name: name,
|
||||
MatchResources: match.DeepCopy(),
|
||||
|
@ -561,6 +563,16 @@ func generateRuleForControllers(rule kyverno.Rule, controllers string, log logr.
|
|||
return *controllerRule
|
||||
}
|
||||
|
||||
if rule.VerifyImages != nil {
|
||||
newVerifyImages := make([]*kyverno.ImageVerification, len(rule.VerifyImages))
|
||||
for i, vi := range rule.VerifyImages {
|
||||
newVerifyImages[i] = vi.DeepCopy()
|
||||
}
|
||||
|
||||
controllerRule.VerifyImages = newVerifyImages
|
||||
return *controllerRule
|
||||
}
|
||||
|
||||
return kyvernoRule{}
|
||||
}
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ func generateAnnotationPatches(engineResponses []*response.EngineResponse, log l
|
|||
// check the patch
|
||||
_, err := jsonpatch.DecodePatch([]byte("[" + string(patchByte) + "]"))
|
||||
if err != nil {
|
||||
log.Error(err, "failed o build JSON patch for annotation", "patch", string(patchByte))
|
||||
log.Error(err, "failed to build JSON patch for annotation", "patch", string(patchByte))
|
||||
}
|
||||
|
||||
return patchByte
|
||||
|
|
|
@ -34,6 +34,7 @@ func toBlockResource(engineReponses []*response.EngineResponse, log logr.Logger)
|
|||
return true
|
||||
}
|
||||
}
|
||||
|
||||
log.V(4).Info("spec.ValidationFailureAction set to audit for all applicable policies, won't block resource operation")
|
||||
return false
|
||||
}
|
||||
|
@ -50,8 +51,8 @@ func getEnforceFailureErrorMsg(engineResponses []*response.EngineResponse) strin
|
|||
ruleToReason[rule.Name] = rule.Message
|
||||
}
|
||||
}
|
||||
resourceName = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||
|
||||
resourceName = fmt.Sprintf("%s/%s/%s", er.PolicyResponse.Resource.Kind, er.PolicyResponse.Resource.Namespace, er.PolicyResponse.Resource.Name)
|
||||
policyToRule[er.PolicyResponse.Policy.Name] = ruleToReason
|
||||
}
|
||||
}
|
||||
|
@ -107,10 +108,12 @@ func processResourceWithPatches(patch []byte, resource []byte, log logr.Logger)
|
|||
log.Error(err, "failed to patch resource:", "patch", string(patch), "resource", string(resource))
|
||||
return nil
|
||||
}
|
||||
|
||||
log.V(6).Info("", "patchedResource", string(resource))
|
||||
return resource
|
||||
}
|
||||
|
||||
func containRBACInfo(policies ...[]*kyverno.ClusterPolicy) bool {
|
||||
func containsRBACInfo(policies ...[]*kyverno.ClusterPolicy) bool {
|
||||
for _, policySlice := range policies {
|
||||
for _, policy := range policySlice {
|
||||
for _, rule := range policy.Spec.Rules {
|
||||
|
|
|
@ -35,10 +35,28 @@ import (
|
|||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
//HandleGenerate handles admission-requests for policies with generate rules
|
||||
func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy, ctx *context.Context, userRequestInfo kyverno.RequestInfo, dynamicConfig config.Interface, admissionRequestTimestamp int64, latencySender *chan int64, triggeredGeneratePoliciesSender *chan []kyverno.ClusterPolicy, generateEngineResponsesSender *chan []*response.EngineResponse) {
|
||||
func (ws *WebhookServer) applyGeneratePolicies(request *v1beta1.AdmissionRequest, policyContext *engine.PolicyContext, policies []*v1.ClusterPolicy, ts int64, logger logr.Logger) {
|
||||
admissionReviewCompletionLatencyChannel := make(chan int64, 1)
|
||||
triggeredGeneratePoliciesChannel := make(chan []v1.ClusterPolicy, 1)
|
||||
generateEngineResponsesChannel := make(chan []*response.EngineResponse, 1)
|
||||
go ws.handleGenerate(request, policies, policyContext.JSONContext, policyContext.AdmissionInfo, ws.configHandler, ts, &admissionReviewCompletionLatencyChannel, &triggeredGeneratePoliciesChannel, &generateEngineResponsesChannel)
|
||||
go registerAdmissionReviewLatencyMetricGenerate(logger, *ws.promConfig.Metrics, string(request.Operation), ts, &admissionReviewCompletionLatencyChannel, &triggeredGeneratePoliciesChannel, &generateEngineResponsesChannel)
|
||||
}
|
||||
|
||||
//handleGenerate handles admission-requests for policies with generate rules
|
||||
func (ws *WebhookServer) handleGenerate(
|
||||
request *v1beta1.AdmissionRequest,
|
||||
policies []*kyverno.ClusterPolicy,
|
||||
ctx *context.Context,
|
||||
userRequestInfo kyverno.RequestInfo,
|
||||
dynamicConfig config.Interface,
|
||||
admissionRequestTimestamp int64,
|
||||
latencySender *chan int64,
|
||||
triggeredGeneratePoliciesSender *chan []kyverno.ClusterPolicy,
|
||||
generateEngineResponsesSender *chan []*response.EngineResponse) {
|
||||
|
||||
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
logger.V(4).Info("incoming request")
|
||||
logger.V(6).Info("generate request")
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
var triggeredGeneratePolicies []kyverno.ClusterPolicy
|
||||
|
@ -102,7 +120,7 @@ func (ws *WebhookServer) HandleGenerate(request *v1beta1.AdmissionRequest, polic
|
|||
}
|
||||
|
||||
if request.Operation == v1beta1.Update {
|
||||
ws.handleUpdate(request, policies)
|
||||
ws.handleUpdatesForGenerateRules(request, policies)
|
||||
}
|
||||
|
||||
// sending the admission request latency to other goroutine (reporting the metrics) over the channel
|
||||
|
@ -132,9 +150,13 @@ func (ws *WebhookServer) registerPolicyRuleExecutionLatencyMetricGenerate(logger
|
|||
}
|
||||
}
|
||||
|
||||
//handleUpdate handles admission-requests for update
|
||||
func (ws *WebhookServer) handleUpdate(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy) {
|
||||
logger := ws.log.WithValues("action", "generation", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
//handleUpdatesForGenerateRules handles admission-requests for update
|
||||
func (ws *WebhookServer) handleUpdatesForGenerateRules(request *v1beta1.AdmissionRequest, policies []*kyverno.ClusterPolicy) {
|
||||
if request.Operation != v1beta1.Update {
|
||||
return
|
||||
}
|
||||
|
||||
logger := ws.log.WithValues("action", "generate", "uid", request.UID, "kind", request.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
resource, err := enginutils.ConvertToUnstructured(request.OldObject.Raw)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert object resource to unstructured format")
|
||||
|
@ -142,16 +164,16 @@ func (ws *WebhookServer) handleUpdate(request *v1beta1.AdmissionRequest, policie
|
|||
|
||||
resLabels := resource.GetLabels()
|
||||
if resLabels["generate.kyverno.io/clone-policy-name"] != "" {
|
||||
ws.handleUpdateCloneSourceResource(resLabels, logger)
|
||||
ws.handleUpdateGenerateSourceResource(resLabels, logger)
|
||||
}
|
||||
|
||||
if resLabels["app.kubernetes.io/managed-by"] == "kyverno" && resLabels["policy.kyverno.io/synchronize"] == "enable" && request.Operation == v1beta1.Update {
|
||||
ws.handleUpdateTargetResource(request, policies, resLabels, logger)
|
||||
ws.handleUpdateGenerateTargetResource(request, policies, resLabels, logger)
|
||||
}
|
||||
}
|
||||
|
||||
//handleUpdateCloneSourceResource - handles update of clone source for generate policy
|
||||
func (ws *WebhookServer) handleUpdateCloneSourceResource(resLabels map[string]string, logger logr.Logger) {
|
||||
//handleUpdateGenerateSourceResource - handles update of clone source for generate policy
|
||||
func (ws *WebhookServer) handleUpdateGenerateSourceResource(resLabels map[string]string, logger logr.Logger) {
|
||||
policyNames := strings.Split(resLabels["generate.kyverno.io/clone-policy-name"], ",")
|
||||
for _, policyName := range policyNames {
|
||||
|
||||
|
@ -159,7 +181,7 @@ func (ws *WebhookServer) handleUpdateCloneSourceResource(resLabels map[string]st
|
|||
_, err := ws.kyvernoClient.KyvernoV1().ClusterPolicies().Get(contextdefault.TODO(), policyName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if strings.Contains(err.Error(), "not found") {
|
||||
logger.V(4).Info("skipping updation of generate request as policy is deleted")
|
||||
logger.V(4).Info("skipping update of generate request as policy is deleted")
|
||||
} else {
|
||||
logger.Error(err, "failed to get generate policy", "Name", policyName)
|
||||
}
|
||||
|
@ -198,8 +220,8 @@ func (ws *WebhookServer) updateAnnotationInGR(gr *v1.GenerateRequest, logger log
|
|||
}
|
||||
}
|
||||
|
||||
//handleUpdateTargetResource - handles update of target resource for generate policy
|
||||
func (ws *WebhookServer) handleUpdateTargetResource(request *v1beta1.AdmissionRequest, policies []*v1.ClusterPolicy, resLabels map[string]string, logger logr.Logger) {
|
||||
//handleUpdateGenerateTargetResource - handles update of target resource for generate policy
|
||||
func (ws *WebhookServer) handleUpdateGenerateTargetResource(request *v1beta1.AdmissionRequest, policies []*v1.ClusterPolicy, resLabels map[string]string, logger logr.Logger) {
|
||||
enqueueBool := false
|
||||
newRes, err := enginutils.ConvertToUnstructured(request.Object.Raw)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,37 +1,48 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
policyRuleExecutionLatency "github.com/kyverno/kyverno/pkg/metrics/policyruleexecutionlatency"
|
||||
policyRuleResults "github.com/kyverno/kyverno/pkg/metrics/policyruleresults"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
"github.com/pkg/errors"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// HandleMutation handles mutating webhook admission request
|
||||
func (ws *WebhookServer) applyMutatePolicies(request *v1beta1.AdmissionRequest, policyContext *engine.PolicyContext, policies []*v1.ClusterPolicy, ts int64, logger logr.Logger) []byte {
|
||||
var triggeredMutatePolicies []v1.ClusterPolicy
|
||||
var mutateEngineResponses []*response.EngineResponse
|
||||
|
||||
mutatePatches, triggeredMutatePolicies, mutateEngineResponses := ws.handleMutation(request, policyContext, policies, ts)
|
||||
logger.V(6).Info("", "generated patches", string(mutatePatches))
|
||||
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(ts, 0)))
|
||||
go registerAdmissionReviewLatencyMetricMutate(logger, *ws.promConfig.Metrics, string(request.Operation), mutateEngineResponses, triggeredMutatePolicies, admissionReviewLatencyDuration, ts)
|
||||
|
||||
return mutatePatches
|
||||
}
|
||||
|
||||
// handleMutation handles mutating webhook admission request
|
||||
// return value: generated patches, triggered policies, engine responses correspdonding to the triggered policies
|
||||
func (ws *WebhookServer) HandleMutation(
|
||||
func (ws *WebhookServer) handleMutation(
|
||||
request *v1beta1.AdmissionRequest,
|
||||
resource unstructured.Unstructured,
|
||||
policyContext *engine.PolicyContext,
|
||||
policies []*kyverno.ClusterPolicy,
|
||||
ctx *context.Context,
|
||||
patchedResource []byte,
|
||||
userRequestInfo kyverno.RequestInfo,
|
||||
admissionRequestTimestamp int64) ([]byte, []kyverno.ClusterPolicy, []*response.EngineResponse) {
|
||||
|
||||
if len(policies) == 0 {
|
||||
|
@ -44,6 +55,8 @@ func (ws *WebhookServer) HandleMutation(
|
|||
}
|
||||
|
||||
logger := ws.log.WithValues("action", "mutate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
patchedResource := request.Object.Raw
|
||||
newR, oldR, err := utils.ExtractResources(patchedResource, request)
|
||||
if err != nil {
|
||||
// as resource cannot be parsed, we skip processing
|
||||
|
@ -63,43 +76,18 @@ func (ws *WebhookServer) HandleMutation(
|
|||
var patches [][]byte
|
||||
var engineResponses []*response.EngineResponse
|
||||
var triggeredPolicies []kyverno.ClusterPolicy
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: resource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: ws.configHandler.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: ws.configHandler.ToFilter,
|
||||
ResourceCache: ws.resCache,
|
||||
JSONContext: ctx,
|
||||
Client: ws.client,
|
||||
}
|
||||
|
||||
if request.Operation == v1beta1.Update {
|
||||
// set OldResource to inform engine of operation type
|
||||
policyContext.OldResource = resource
|
||||
}
|
||||
|
||||
for _, policy := range policies {
|
||||
logger.V(3).Info("evaluating policy", "policy", policy.Name)
|
||||
|
||||
policyContext.Policy = *policy
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
policyContext.NamespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, ws.nsLister, logger)
|
||||
}
|
||||
engineResponse := engine.Mutate(policyContext)
|
||||
policyPatches := engineResponse.GetPatches()
|
||||
|
||||
if engineResponse.PolicyResponse.RulesAppliedCount > 0 && len(policyPatches) > 0 {
|
||||
ws.statusListener.Update(mutateStats{resp: engineResponse, namespace: policy.Namespace})
|
||||
}
|
||||
|
||||
if !engineResponse.IsSuccessful() && len(engineResponse.GetFailedRules()) > 0 {
|
||||
logger.Error(errors.New("some rules failed"), "failed to apply policy", "policy", policy.Name, "failed rules", engineResponse.GetFailedRules())
|
||||
if !policy.HasMutate() {
|
||||
continue
|
||||
}
|
||||
|
||||
err := ws.openAPIController.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetAPIVersion(), engineResponse.PatchedResource.GetKind())
|
||||
logger.V(3).Info("applying policy mutate rules", "policy", policy.Name)
|
||||
policyContext.Policy = *policy
|
||||
engineResponse, policyPatches, err := ws.applyMutation(request, policyContext, logger)
|
||||
if err != nil {
|
||||
logger.Info("validation error", "policy", policy.Name, "error", err.Error())
|
||||
// TODO report errors in engineResponse and record in metrics
|
||||
logger.Error(err, "mutate error")
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -153,6 +141,31 @@ func (ws *WebhookServer) HandleMutation(
|
|||
return engineutils.JoinPatches(patches), triggeredPolicies, engineResponses
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) applyMutation(request *v1beta1.AdmissionRequest, policyContext *engine.PolicyContext, logger logr.Logger) (*response.EngineResponse, [][]byte, error) {
|
||||
if request.Kind.Kind != "Namespace" && request.Namespace != "" {
|
||||
policyContext.NamespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(
|
||||
request.Kind.Kind, request.Namespace, ws.nsLister, logger)
|
||||
}
|
||||
|
||||
engineResponse := engine.Mutate(policyContext)
|
||||
policyPatches := engineResponse.GetPatches()
|
||||
|
||||
if engineResponse.PolicyResponse.RulesAppliedCount > 0 && len(policyPatches) > 0 {
|
||||
ws.statusListener.Update(mutateStats{resp: engineResponse, namespace: policyContext.Policy.Namespace})
|
||||
}
|
||||
|
||||
if !engineResponse.IsSuccessful() && len(engineResponse.GetFailedRules()) > 0 {
|
||||
return nil, nil, fmt.Errorf("failed to apply policy %s rules %v", policyContext.Policy.Name, engineResponse.GetFailedRules())
|
||||
}
|
||||
|
||||
err := ws.openAPIController.ValidateResource(*engineResponse.PatchedResource.DeepCopy(), engineResponse.PatchedResource.GetAPIVersion(), engineResponse.PatchedResource.GetKind())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrapf(err, "failed to validate resource mutated by policy %s", policyContext.Policy.Name)
|
||||
}
|
||||
|
||||
return engineResponse, policyPatches, nil
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) registerPolicyRuleResultsMetricMutation(logger logr.Logger, resourceRequestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
|
||||
resourceRequestOperationPromAlias, err := policyRuleResults.ParseResourceRequestOperation(resourceRequestOperation)
|
||||
if err != nil {
|
||||
|
|
|
@ -19,7 +19,6 @@ func (ws *WebhookServer) policyMutation(request *v1beta1.AdmissionRequest) *v1be
|
|||
var policy *kyverno.ClusterPolicy
|
||||
raw := request.Object.Raw
|
||||
|
||||
//TODO: can this happen? wont this be picked by OpenAPI spec schema ?
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
logger.Error(err, "failed to unmarshal policy admission request")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
|
@ -209,7 +211,7 @@ func NewWebhookServer(
|
|||
}
|
||||
|
||||
mux := httprouter.New()
|
||||
mux.HandlerFunc("POST", config.MutatingWebhookServicePath, ws.handlerFunc(ws.ResourceMutation, true))
|
||||
mux.HandlerFunc("POST", config.MutatingWebhookServicePath, ws.handlerFunc(ws.resourceMutation, true))
|
||||
mux.HandlerFunc("POST", config.ValidatingWebhookServicePath, ws.handlerFunc(ws.resourceValidation, true))
|
||||
mux.HandlerFunc("POST", config.PolicyMutatingWebhookServicePath, ws.handlerFunc(ws.policyMutation, true))
|
||||
mux.HandlerFunc("POST", config.PolicyValidatingWebhookServicePath, ws.handlerFunc(ws.policyValidation, true))
|
||||
|
@ -292,102 +294,143 @@ func writeResponse(rw http.ResponseWriter, admissionReview *v1beta1.AdmissionRev
|
|||
}
|
||||
}
|
||||
|
||||
// ResourceMutation mutates resource
|
||||
func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
logger := ws.log.WithName("ResourceMutation").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
// resourceMutation mutates resource
|
||||
func (ws *WebhookServer) resourceMutation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
logger := ws.log.WithName("MutateWebhook").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
if excludeKyvernoResources(request.Kind.Kind) {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Status: "Success",
|
||||
},
|
||||
}
|
||||
return successResponse(nil)
|
||||
}
|
||||
|
||||
logger.V(6).Info("received an admission request in mutating webhook")
|
||||
// timestamp at which this admission request got triggered
|
||||
admissionRequestTimestamp := time.Now().Unix()
|
||||
mutatePolicies := ws.pCache.GetPolicyObject(policycache.Mutate, request.Kind.Kind, "")
|
||||
generatePolicies := ws.pCache.GetPolicyObject(policycache.Generate, request.Kind.Kind, "")
|
||||
logger.V(4).Info("received an admission request in mutating webhook")
|
||||
requestTime := time.Now().Unix()
|
||||
|
||||
// Get namespace policies from the cache for the requested resource namespace
|
||||
nsMutatePolicies := ws.pCache.GetPolicyObject(policycache.Mutate, request.Kind.Kind, request.Namespace)
|
||||
mutatePolicies = append(mutatePolicies, nsMutatePolicies...)
|
||||
mutatePolicies := ws.pCache.GetPolicies(policycache.Mutate, request.Kind.Kind, request.Namespace)
|
||||
generatePolicies := ws.pCache.GetPolicies(policycache.Generate, request.Kind.Kind, request.Namespace)
|
||||
verifyImagesPolicies := ws.pCache.GetPolicies(policycache.VerifyImages, request.Kind.Kind, request.Namespace)
|
||||
|
||||
// convert RAW to unstructured
|
||||
resource, err := utils.ConvertResource(request.Object.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
|
||||
if len(mutatePolicies) == 0 && len(generatePolicies) == 0 && len(verifyImagesPolicies) == 0 {
|
||||
logger.V(4).Info("no policies matched admission request")
|
||||
if request.Operation == v1beta1.Update {
|
||||
// handle generate source resource updates
|
||||
go ws.handleUpdatesForGenerateRules(request, []*v1.ClusterPolicy{})
|
||||
}
|
||||
|
||||
return successResponse(nil)
|
||||
}
|
||||
|
||||
addRoles := containsRBACInfo(mutatePolicies, generatePolicies)
|
||||
policyContext, err := ws.buildPolicyContext(request, addRoles)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to convert RAW resource to unstructured format")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
logger.Error(err, "failed to build policy context")
|
||||
return failureResponse(err.Error())
|
||||
}
|
||||
|
||||
var roles, clusterRoles []string
|
||||
// getRoleRef only if policy has roles/clusterroles defined
|
||||
if containRBACInfo(mutatePolicies, generatePolicies) {
|
||||
if roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler); err != nil {
|
||||
logger.Error(err, "failed to get RBAC information for request")
|
||||
}
|
||||
mutatePatches := ws.applyMutatePolicies(request, policyContext, mutatePolicies, requestTime, logger)
|
||||
|
||||
newRequest := patchRequest(mutatePatches, request, logger)
|
||||
imagePatches, err := ws.applyImageVerifyPolicies(newRequest, policyContext, verifyImagesPolicies, logger)
|
||||
if err != nil {
|
||||
logger.Error(err, "image verification failed")
|
||||
return failureResponse(err.Error())
|
||||
}
|
||||
|
||||
newRequest = patchRequest(imagePatches, newRequest, logger)
|
||||
ws.applyGeneratePolicies(newRequest, policyContext, generatePolicies, requestTime, logger)
|
||||
|
||||
var patches = append(mutatePatches, imagePatches...)
|
||||
return successResponse(patches)
|
||||
}
|
||||
|
||||
// patchRequest applies patches to the request.Object and returns a new copy of the request
|
||||
func patchRequest(patches []byte, request *v1beta1.AdmissionRequest, logger logr.Logger) *v1beta1.AdmissionRequest {
|
||||
patchedResource := processResourceWithPatches(patches, request.Object.Raw, logger)
|
||||
newRequest := request.DeepCopy()
|
||||
newRequest.Object.Raw = patchedResource
|
||||
return newRequest
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) buildPolicyContext(request *v1beta1.AdmissionRequest, addRoles bool) (*engine.PolicyContext, error) {
|
||||
userRequestInfo := v1.RequestInfo{
|
||||
Roles: roles,
|
||||
ClusterRoles: clusterRoles,
|
||||
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
|
||||
}
|
||||
|
||||
if addRoles {
|
||||
if roles, clusterRoles, err := userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to fetch RBAC information for request")
|
||||
} else {
|
||||
userRequestInfo.Roles = roles
|
||||
userRequestInfo.ClusterRoles = clusterRoles
|
||||
}
|
||||
}
|
||||
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
logger.Error(err, "unable to build variable context")
|
||||
return nil, errors.Wrap(err, "failed to create policy rule context")
|
||||
}
|
||||
|
||||
// convert RAW to unstructured
|
||||
resource, err := utils.ConvertResource(request.Object.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to convert raw resource to unstructured format")
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfo(&resource); err != nil {
|
||||
logger.Error(err, "unable to add image info to variables context")
|
||||
return nil, errors.Wrap(err, "failed to add image information to the policy rule context")
|
||||
}
|
||||
|
||||
var patches []byte
|
||||
patchedResource := request.Object.Raw
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: resource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: ws.configHandler.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: ws.configHandler.ToFilter,
|
||||
ResourceCache: ws.resCache,
|
||||
JSONContext: ctx,
|
||||
Client: ws.client,
|
||||
}
|
||||
|
||||
// MUTATION
|
||||
var triggeredMutatePolicies []v1.ClusterPolicy
|
||||
var mutateEngineResponses []*response.EngineResponse
|
||||
if request.Operation == v1beta1.Update {
|
||||
policyContext.OldResource = resource
|
||||
}
|
||||
|
||||
patches, triggeredMutatePolicies, mutateEngineResponses = ws.HandleMutation(request, resource, mutatePolicies, ctx, patchedResource, userRequestInfo, admissionRequestTimestamp)
|
||||
logger.V(6).Info("", "generated patches", string(patches))
|
||||
return policyContext, nil
|
||||
}
|
||||
|
||||
// patch the resource with patches before handling validation rules
|
||||
patchedResource = processResourceWithPatches(patches, request.Object.Raw, logger)
|
||||
logger.V(6).Info("", "patchedResource", string(patchedResource))
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
|
||||
// registering the kyverno_admission_review_latency_milliseconds metric concurrently
|
||||
go registerAdmissionReviewLatencyMetricMutate(logger, *ws.promConfig.Metrics, string(request.Operation), mutateEngineResponses, triggeredMutatePolicies, admissionReviewLatencyDuration, admissionRequestTimestamp)
|
||||
|
||||
// GENERATE
|
||||
newRequest := request.DeepCopy()
|
||||
newRequest.Object.Raw = patchedResource
|
||||
|
||||
// this channel will be used to transmit the admissionReviewLatency from ws.HandleGenerate(..,) goroutine to registeGeneraterPolicyAdmissionReviewLatencyMetric(...) goroutine
|
||||
admissionReviewCompletionLatencyChannel := make(chan int64, 1)
|
||||
triggeredGeneratePoliciesChannel := make(chan []v1.ClusterPolicy, 1)
|
||||
generateEngineResponsesChannel := make(chan []*response.EngineResponse, 1)
|
||||
|
||||
go ws.HandleGenerate(newRequest, generatePolicies, ctx, userRequestInfo, ws.configHandler, admissionRequestTimestamp, &admissionReviewCompletionLatencyChannel, &triggeredGeneratePoliciesChannel, &generateEngineResponsesChannel)
|
||||
// registering the kyverno_admission_review_latency_milliseconds metric concurrently
|
||||
go registerAdmissionReviewLatencyMetricGenerate(logger, *ws.promConfig.Metrics, string(newRequest.Operation), admissionRequestTimestamp, &admissionReviewCompletionLatencyChannel, &triggeredGeneratePoliciesChannel, &generateEngineResponsesChannel)
|
||||
patchType := v1beta1.PatchTypeJSONPatch
|
||||
return &v1beta1.AdmissionResponse{
|
||||
func successResponse(patch []byte) *v1beta1.AdmissionResponse {
|
||||
r := &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Status: "Success",
|
||||
},
|
||||
Patch: patches,
|
||||
PatchType: &patchType,
|
||||
}
|
||||
|
||||
if len(patch) > 0 {
|
||||
patchType := v1beta1.PatchTypeJSONPatch
|
||||
r.PatchType = &patchType
|
||||
r.Patch = patch
|
||||
}
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func errorResponse(logger logr.Logger, err error, message string) *v1beta1.AdmissionResponse {
|
||||
logger.Error(err, message)
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: message + ": " + err.Error(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func failureResponse(message string) *v1beta1.AdmissionResponse {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: message,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -421,42 +464,30 @@ func registerAdmissionReviewLatencyMetricGenerate(logger logr.Logger, promMetric
|
|||
}
|
||||
|
||||
func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *v1beta1.AdmissionResponse {
|
||||
logger := ws.log.WithName("Validate").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
|
||||
logger := ws.log.WithName("ValidateWebhook").WithValues("uid", request.UID, "kind", request.Kind.Kind, "namespace", request.Namespace, "name", request.Name, "operation", request.Operation)
|
||||
if request.Operation == v1beta1.Delete {
|
||||
ws.handleDelete(request)
|
||||
}
|
||||
|
||||
if excludeKyvernoResources(request.Kind.Kind) {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: true,
|
||||
Result: &metav1.Status{
|
||||
Status: "Success",
|
||||
},
|
||||
}
|
||||
return successResponse(nil)
|
||||
}
|
||||
|
||||
logger.V(6).Info("received an admission request in validating webhook")
|
||||
// timestamp at which this admission request got triggered
|
||||
admissionRequestTimestamp := time.Now().Unix()
|
||||
|
||||
policies := ws.pCache.GetPolicyObject(policycache.ValidateEnforce, request.Kind.Kind, "")
|
||||
policies := ws.pCache.GetPolicies(policycache.ValidateEnforce, request.Kind.Kind, "")
|
||||
// Get namespace policies from the cache for the requested resource namespace
|
||||
nsPolicies := ws.pCache.GetPolicyObject(policycache.ValidateEnforce, request.Kind.Kind, request.Namespace)
|
||||
nsPolicies := ws.pCache.GetPolicies(policycache.ValidateEnforce, request.Kind.Kind, request.Namespace)
|
||||
policies = append(policies, nsPolicies...)
|
||||
|
||||
var roles, clusterRoles []string
|
||||
var err error
|
||||
// getRoleRef only if policy has roles/clusterroles defined
|
||||
if containRBACInfo(policies) {
|
||||
if containsRBACInfo(policies) {
|
||||
var err error
|
||||
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler)
|
||||
if err != nil {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
return errorResponse(logger, err, "failed to fetch RBAC data")
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -468,13 +499,7 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
return &v1beta1.AdmissionResponse{
|
||||
Allowed: false,
|
||||
Result: &metav1.Status{
|
||||
Status: "Failure",
|
||||
Message: err.Error(),
|
||||
},
|
||||
}
|
||||
return errorResponse(logger, err, "failed create policy rule context")
|
||||
}
|
||||
|
||||
namespaceLabels := make(map[string]string)
|
||||
|
@ -482,7 +507,34 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
|
|||
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, ws.nsLister, logger)
|
||||
}
|
||||
|
||||
ok, msg := HandleValidation(ws.promConfig, request, policies, nil, ctx, userRequestInfo, ws.statusListener, ws.eventGen, ws.prGenerator, ws.log, ws.configHandler, ws.resCache, ws.client, namespaceLabels, admissionRequestTimestamp)
|
||||
newResource, oldResource, err := utils.ExtractResources(nil, request)
|
||||
if err != nil {
|
||||
return errorResponse(logger, err, "failed create parse resource")
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfo(&newResource); err != nil {
|
||||
return errorResponse(logger, err, "failed add image information to policy rule context")
|
||||
}
|
||||
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: newResource,
|
||||
OldResource: oldResource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: ws.configHandler.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: ws.configHandler.ToFilter,
|
||||
ResourceCache: ws.resCache,
|
||||
JSONContext: ctx,
|
||||
Client: ws.client,
|
||||
}
|
||||
|
||||
vh := &validationHandler{
|
||||
log: ws.log,
|
||||
statusListener: ws.statusListener,
|
||||
eventGen: ws.eventGen,
|
||||
prGenerator: ws.prGenerator,
|
||||
}
|
||||
|
||||
ok, msg := vh.handleValidation(ws.promConfig, request, policies, policyContext, namespaceLabels, admissionRequestTimestamp)
|
||||
if !ok {
|
||||
logger.Info("admission request denied")
|
||||
return &v1beta1.AdmissionResponse{
|
||||
|
|
|
@ -1,9 +1,13 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/common"
|
||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
|
||||
|
@ -153,12 +157,11 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
|
|||
// time at which the corresponding the admission request's processing got initiated
|
||||
admissionRequestTimestamp := time.Now().Unix()
|
||||
logger := h.log.WithName("process")
|
||||
policies := h.pCache.GetPolicyObject(policycache.ValidateAudit, request.Kind.Kind, "")
|
||||
// Get namespace policies from the cache for the requested resource namespace
|
||||
nsPolicies := h.pCache.GetPolicyObject(policycache.ValidateAudit, request.Kind.Kind, request.Namespace)
|
||||
policies = append(policies, nsPolicies...)
|
||||
|
||||
policies := h.pCache.GetPolicies(policycache.ValidateAudit, request.Kind.Kind, request.Namespace)
|
||||
|
||||
// getRoleRef only if policy has roles/clusterroles defined
|
||||
if containRBACInfo(policies) {
|
||||
if containsRBACInfo(policies) {
|
||||
roles, clusterRoles, err = userinfo.GetRoleRef(h.rbLister, h.crbLister, request, h.configHandler)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to get RBAC information for request")
|
||||
|
@ -172,7 +175,7 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
|
|||
|
||||
ctx, err := newVariablesContext(request, &userRequestInfo)
|
||||
if err != nil {
|
||||
logger.Error(err, "unable to build variable context")
|
||||
return errors.Wrap(err, "unable to build variable context")
|
||||
}
|
||||
|
||||
namespaceLabels := make(map[string]string)
|
||||
|
@ -180,7 +183,34 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
|
|||
namespaceLabels = common.GetNamespaceSelectorsFromNamespaceLister(request.Kind.Kind, request.Namespace, h.nsLister, logger)
|
||||
}
|
||||
|
||||
HandleValidation(h.promConfig, request, policies, nil, ctx, userRequestInfo, h.statusListener, h.eventGen, h.prGenerator, logger, h.configHandler, h.resCache, h.client, namespaceLabels, admissionRequestTimestamp)
|
||||
newResource, oldResource, err := utils.ExtractResources(nil, request)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed create parse resource")
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfo(&newResource); err != nil {
|
||||
return errors.Wrap(err, "failed add image information to policy rule context\"")
|
||||
}
|
||||
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: newResource,
|
||||
OldResource: oldResource,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: h.configHandler.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: h.configHandler.ToFilter,
|
||||
ResourceCache: h.resCache,
|
||||
JSONContext: ctx,
|
||||
Client: h.client,
|
||||
}
|
||||
|
||||
vh := &validationHandler{
|
||||
log: h.log,
|
||||
statusListener: h.statusListener,
|
||||
eventGen: h.eventGen,
|
||||
prGenerator: h.prGenerator,
|
||||
}
|
||||
|
||||
vh.handleValidation(h.promConfig, request, policies, policyContext, namespaceLabels, admissionRequestTimestamp)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"reflect"
|
||||
"sort"
|
||||
"time"
|
||||
|
@ -8,42 +10,33 @@ import (
|
|||
"github.com/go-logr/logr"
|
||||
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
client "github.com/kyverno/kyverno/pkg/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/context"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
"github.com/kyverno/kyverno/pkg/event"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
admissionReviewLatency "github.com/kyverno/kyverno/pkg/metrics/admissionreviewlatency"
|
||||
policyRuleExecutionLatency "github.com/kyverno/kyverno/pkg/metrics/policyruleexecutionlatency"
|
||||
policyRuleResults "github.com/kyverno/kyverno/pkg/metrics/policyruleresults"
|
||||
"github.com/kyverno/kyverno/pkg/policyreport"
|
||||
"github.com/kyverno/kyverno/pkg/policystatus"
|
||||
"github.com/kyverno/kyverno/pkg/resourcecache"
|
||||
"github.com/kyverno/kyverno/pkg/utils"
|
||||
v1beta1 "k8s.io/api/admission/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
// HandleValidation handles validating webhook admission request
|
||||
type validationHandler struct {
|
||||
log logr.Logger
|
||||
statusListener policystatus.Listener
|
||||
eventGen event.Interface
|
||||
prGenerator policyreport.GeneratorInterface
|
||||
}
|
||||
|
||||
// handleValidation handles validating webhook admission request
|
||||
// If there are no errors in validating rule we apply generation rules
|
||||
// patchedResource is the (resource + patches) after applying mutation rules
|
||||
func HandleValidation(
|
||||
func (v *validationHandler) handleValidation(
|
||||
promConfig *metrics.PromConfig,
|
||||
request *v1beta1.AdmissionRequest,
|
||||
policies []*kyverno.ClusterPolicy,
|
||||
patchedResource []byte,
|
||||
ctx *context.Context,
|
||||
userRequestInfo kyverno.RequestInfo,
|
||||
statusListener policystatus.Listener,
|
||||
eventGen event.Interface,
|
||||
prGenerator policyreport.GeneratorInterface,
|
||||
log logr.Logger,
|
||||
dynamicConfig config.Interface,
|
||||
resCache resourcecache.ResourceCache,
|
||||
client *client.Client,
|
||||
policyContext *engine.PolicyContext,
|
||||
namespaceLabels map[string]string,
|
||||
admissionRequestTimestamp int64) (bool, string) {
|
||||
|
||||
|
@ -51,47 +44,20 @@ func HandleValidation(
|
|||
return true, ""
|
||||
}
|
||||
|
||||
resourceName := request.Kind.Kind + "/" + request.Name
|
||||
if request.Namespace != "" {
|
||||
resourceName = request.Namespace + "/" + resourceName
|
||||
}
|
||||
|
||||
logger := log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
// Get new and old resource
|
||||
newR, oldR, err := utils.ExtractResources(patchedResource, request)
|
||||
if err != nil {
|
||||
// as resource cannot be parsed, we skip processing
|
||||
logger.Error(err, "failed to extract resource")
|
||||
return true, ""
|
||||
}
|
||||
resourceName := getResourceName(request)
|
||||
logger := v.log.WithValues("action", "validate", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
var deletionTimeStamp *metav1.Time
|
||||
if reflect.DeepEqual(newR, unstructured.Unstructured{}) {
|
||||
deletionTimeStamp = newR.GetDeletionTimestamp()
|
||||
if reflect.DeepEqual(policyContext.NewResource, unstructured.Unstructured{}) {
|
||||
deletionTimeStamp = policyContext.NewResource.GetDeletionTimestamp()
|
||||
} else {
|
||||
deletionTimeStamp = oldR.GetDeletionTimestamp()
|
||||
deletionTimeStamp = policyContext.OldResource.GetDeletionTimestamp()
|
||||
}
|
||||
|
||||
if deletionTimeStamp != nil && request.Operation == v1beta1.Update {
|
||||
return true, ""
|
||||
}
|
||||
|
||||
if err := ctx.AddImageInfo(&newR); err != nil {
|
||||
logger.Error(err, "unable to add image info to variables context")
|
||||
}
|
||||
|
||||
policyContext := &engine.PolicyContext{
|
||||
NewResource: newR,
|
||||
OldResource: oldR,
|
||||
AdmissionInfo: userRequestInfo,
|
||||
ExcludeGroupRole: dynamicConfig.GetExcludeGroupRole(),
|
||||
ExcludeResourceFunc: dynamicConfig.ToFilter,
|
||||
ResourceCache: resCache,
|
||||
JSONContext: ctx,
|
||||
Client: client,
|
||||
}
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
var triggeredPolicies []kyverno.ClusterPolicy
|
||||
for _, policy := range policies {
|
||||
|
@ -112,7 +78,7 @@ func HandleValidation(
|
|||
|
||||
engineResponses = append(engineResponses, engineResponse)
|
||||
triggeredPolicies = append(triggeredPolicies, *policy)
|
||||
statusListener.Update(validateStats{
|
||||
v.statusListener.Update(validateStats{
|
||||
resp: engineResponse,
|
||||
namespace: policy.Namespace,
|
||||
})
|
||||
|
@ -142,7 +108,7 @@ func HandleValidation(
|
|||
// all policies were applied successfully.
|
||||
// create an event on the resource
|
||||
events := generateEvents(engineResponses, blocked, (request.Operation == v1beta1.Update), logger)
|
||||
eventGen.Add(events...)
|
||||
v.eventGen.Add(events...)
|
||||
if blocked {
|
||||
logger.V(4).Info("resource blocked")
|
||||
//registering the kyverno_admission_review_latency_milliseconds metric concurrently
|
||||
|
@ -152,12 +118,12 @@ func HandleValidation(
|
|||
}
|
||||
|
||||
if request.Operation == v1beta1.Delete {
|
||||
prGenerator.Add(buildDeletionPrInfo(oldR))
|
||||
v.prGenerator.Add(buildDeletionPrInfo(policyContext.OldResource))
|
||||
return true, ""
|
||||
}
|
||||
|
||||
prInfos := policyreport.GeneratePRsFromEngineResponse(engineResponses, logger)
|
||||
prGenerator.Add(prInfos...)
|
||||
v.prGenerator.Add(prInfos...)
|
||||
|
||||
//registering the kyverno_admission_review_latency_milliseconds metric concurrently
|
||||
admissionReviewLatencyDuration := int64(time.Since(time.Unix(admissionRequestTimestamp, 0)))
|
||||
|
@ -166,6 +132,15 @@ func HandleValidation(
|
|||
return true, ""
|
||||
}
|
||||
|
||||
func getResourceName(request *v1beta1.AdmissionRequest) string {
|
||||
resourceName := request.Kind.Kind + "/" + request.Name
|
||||
if request.Namespace != "" {
|
||||
resourceName = request.Namespace + "/" + resourceName
|
||||
}
|
||||
|
||||
return resourceName
|
||||
}
|
||||
|
||||
func registerPolicyRuleResultsMetricValidation(promConfig *metrics.PromConfig, logger logr.Logger, requestOperation string, policy kyverno.ClusterPolicy, engineResponse response.EngineResponse, admissionRequestTimestamp int64) {
|
||||
resourceRequestOperationPromAlias, err := policyRuleResults.ParseResourceRequestOperation(requestOperation)
|
||||
if err != nil {
|
||||
|
|
50
pkg/webhooks/verify_images.go
Normal file
50
pkg/webhooks/verify_images.go
Normal file
|
@ -0,0 +1,50 @@
|
|||
package webhooks
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"github.com/go-logr/logr"
|
||||
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
|
||||
"github.com/kyverno/kyverno/pkg/engine"
|
||||
"github.com/kyverno/kyverno/pkg/engine/response"
|
||||
engineutils "github.com/kyverno/kyverno/pkg/engine/utils"
|
||||
"k8s.io/api/admission/v1beta1"
|
||||
)
|
||||
|
||||
func (ws *WebhookServer) applyImageVerifyPolicies(request *v1beta1.AdmissionRequest, policyContext *engine.PolicyContext, policies []*v1.ClusterPolicy, logger logr.Logger) ([]byte, error) {
|
||||
ok, message, imagePatches := ws.handleVerifyImages(request, policyContext, policies)
|
||||
if !ok {
|
||||
return nil, errors.New(message)
|
||||
}
|
||||
|
||||
logger.V(6).Info("images verified", "patches", string(imagePatches))
|
||||
return imagePatches, nil
|
||||
}
|
||||
|
||||
func (ws *WebhookServer) handleVerifyImages(request *v1beta1.AdmissionRequest,
|
||||
policyContext *engine.PolicyContext,
|
||||
policies []*v1.ClusterPolicy) (bool, string, []byte) {
|
||||
|
||||
if len(policies) == 0 {
|
||||
return true, "", nil
|
||||
}
|
||||
|
||||
resourceName := getResourceName(request)
|
||||
logger := ws.log.WithValues("action", "verifyImages", "resource", resourceName, "operation", request.Operation, "gvk", request.Kind.String())
|
||||
|
||||
var engineResponses []*response.EngineResponse
|
||||
var patches [][]byte
|
||||
for _, p := range policies {
|
||||
policyContext.Policy = *p
|
||||
resp := engine.VerifyAndPatchImages(policyContext)
|
||||
engineResponses = append(engineResponses, resp)
|
||||
patches = append(patches, resp.GetPatches()...)
|
||||
}
|
||||
|
||||
blocked := toBlockResource(engineResponses, logger)
|
||||
if blocked {
|
||||
logger.V(4).Info("resource blocked")
|
||||
return false, getEnforceFailureErrorMsg(engineResponses), nil
|
||||
}
|
||||
|
||||
return true, "", engineutils.JoinPatches(patches)
|
||||
}
|
|
@ -2,6 +2,7 @@ package common
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
|
@ -30,42 +31,50 @@ func CallMetrics() (string, error) {
|
|||
}
|
||||
|
||||
// ProcessMetrics checks the metrics log and identify if the policy is added in cache or not
|
||||
func ProcessMetrics(newStr, e2ePolicyName string, e2eTime time.Time) (bool, error) {
|
||||
var action, policyName string
|
||||
var timeInTimeFormat time.Time
|
||||
var err error
|
||||
func ProcessMetrics(newStr, e2ePolicyName string) error {
|
||||
splitByNewLine := strings.Split(newStr, "\n")
|
||||
for _, lineSplitedByNewLine := range splitByNewLine {
|
||||
if strings.HasPrefix(lineSplitedByNewLine, "kyverno_policy_changes_info{") {
|
||||
splitByComma := strings.Split(lineSplitedByNewLine, ",")
|
||||
for _, lineSplitedByComma := range splitByComma {
|
||||
if strings.HasPrefix(lineSplitedByComma, "policy_change_type=") {
|
||||
splitByQuote := strings.Split(lineSplitedByComma, "\"")
|
||||
action = splitByQuote[1]
|
||||
}
|
||||
if strings.HasPrefix(lineSplitedByComma, "policy_name=") {
|
||||
splitByQuote := strings.Split(lineSplitedByComma, "\"")
|
||||
policyName = splitByQuote[1]
|
||||
}
|
||||
if strings.HasPrefix(lineSplitedByComma, "timestamp=") {
|
||||
splitByQuote := strings.Split(lineSplitedByComma, "\"")
|
||||
layout := "2006-01-02 15:04:05 -0700 MST"
|
||||
timeInTimeFormat, err = time.Parse(layout, splitByQuote[1])
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
}
|
||||
for _, lineSplitByNewLine := range splitByNewLine {
|
||||
// kyverno_policy_rule_info_total{policy_background_mode=\"false\",policy_name=\"gen-cluster-policy\",policy_namespace=\"-\",policy_type=\"cluster\",policy_validation_mode=\"audit\",rule_name=\"gen-cluster-role\",rule_type=\"generate\"} 1
|
||||
if !strings.HasPrefix(lineSplitByNewLine, "kyverno_policy_rule_info_total{") {
|
||||
continue
|
||||
}
|
||||
|
||||
if policyName == e2ePolicyName {
|
||||
diff := e2eTime.Sub(timeInTimeFormat)
|
||||
if diff < time.Second {
|
||||
if action == "created" {
|
||||
return true, nil
|
||||
}
|
||||
if !strings.HasSuffix(lineSplitByNewLine, "} 1") {
|
||||
continue
|
||||
}
|
||||
|
||||
splitByComma := strings.Split(lineSplitByNewLine, ",")
|
||||
for _, lineSplitByComma := range splitByComma {
|
||||
if strings.HasPrefix(lineSplitByComma, "policy_name=") {
|
||||
splitByQuote := strings.Split(lineSplitByComma, "\"")
|
||||
policyName := splitByQuote[1]
|
||||
if policyName == e2ePolicyName {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
|
||||
return fmt.Errorf("policy %s not found in metrics %s", e2ePolicyName, newStr)
|
||||
}
|
||||
|
||||
func PolicyCreated(policyName string) error {
|
||||
return e2e.GetWithRetry(1*time.Second, 60, checkPolicyCreated(policyName))
|
||||
}
|
||||
|
||||
func checkPolicyCreated(policyName string) func() error {
|
||||
return func() error {
|
||||
var metricsString string
|
||||
metricsString, err := CallMetrics()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get metrics: %v", err)
|
||||
}
|
||||
|
||||
err = ProcessMetrics(metricsString, policyName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("policy not created: %v", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -13,6 +13,7 @@ func Test_MetricsServerAvailability(t *testing.T) {
|
|||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
requestObj := e2e.APIRequest{
|
||||
URL: "http://localhost:8000/metrics",
|
||||
Type: "GET",
|
||||
|
|
|
@ -2,7 +2,6 @@ package mutate
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
|
@ -44,19 +43,21 @@ func Test_Mutate_Sets(t *testing.T) {
|
|||
|
||||
// Clean up Resources
|
||||
By("Cleaning Cluster Policies")
|
||||
e2eClient.CleanClusterPolicies(clPolGVR)
|
||||
_ = e2eClient.CleanClusterPolicies(clPolGVR)
|
||||
|
||||
// Clear Namespace
|
||||
By(fmt.Sprintf("Deleting Namespace : %s", tests.ResourceNamespace))
|
||||
e2eClient.DeleteClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
_ = e2eClient.DeleteClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Deleting Namespace")
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Namespace
|
||||
By(fmt.Sprintf("Creating Namespace %s", clPolNS))
|
||||
|
@ -64,7 +65,7 @@ func Test_Mutate_Sets(t *testing.T) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait Till Creation of Namespace
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -72,6 +73,7 @@ func Test_Mutate_Sets(t *testing.T) {
|
|||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create source CM
|
||||
By(fmt.Sprintf("\nCreating source ConfigMap in %s", tests.ResourceNamespace))
|
||||
|
@ -80,25 +82,11 @@ func Test_Mutate_Sets(t *testing.T) {
|
|||
|
||||
// Create CM Policy
|
||||
By(fmt.Sprintf("\nCreating Mutate ConfigMap Policy in %s", clPolNS))
|
||||
loc, _ := time.LoadLocation("UTC")
|
||||
timeBeforePolicyCreation := time.Now().In(loc)
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(clPolGVR, clPolNS, tests.Data)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check policy in metrics
|
||||
policySyncBool := false
|
||||
e2e.GetWithRetry(time.Duration(2), 10, func() error {
|
||||
metricsString, err := commonE2E.CallMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
policySyncBool, err = commonE2E.ProcessMetrics(metricsString, tests.PolicyName, timeBeforePolicyCreation)
|
||||
if policySyncBool == false || err != nil {
|
||||
return errors.New("policy not created")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Expect(policySyncBool).To(Equal(true))
|
||||
err = commonE2E.PolicyCreated(tests.PolicyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create target CM
|
||||
By(fmt.Sprintf("\nCreating target ConfigMap in %s", tests.ResourceNamespace))
|
||||
|
@ -108,7 +96,7 @@ func Test_Mutate_Sets(t *testing.T) {
|
|||
// Verify created ConfigMap
|
||||
By(fmt.Sprintf("Verifying ConfigMap in the Namespace : %s", tests.ResourceNamespace))
|
||||
// Wait Till Creation of ConfigMap
|
||||
err = e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(cmGVR, tests.ResourceNamespace, "target")
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -125,18 +113,20 @@ func Test_Mutate_Sets(t *testing.T) {
|
|||
Expect(cmRes.GetLabels()["kyverno.key/copy-me"]).To(Equal("sample-value"))
|
||||
|
||||
//CleanUp Resources
|
||||
e2eClient.CleanClusterPolicies(clPolGVR)
|
||||
_ = e2eClient.CleanClusterPolicies(clPolGVR)
|
||||
|
||||
// Clear Namespace
|
||||
e2eClient.DeleteClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
_ = e2eClient.DeleteClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, tests.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Deleting Namespace")
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Test %s Completed \n\n\n", tests.TestName))
|
||||
}
|
||||
|
@ -154,41 +144,27 @@ func Test_Mutate_Ingress(t *testing.T) {
|
|||
|
||||
nspace := ingressTests.testNamesapce
|
||||
By(fmt.Sprintf("Cleaning Cluster Policies"))
|
||||
e2eClient.CleanClusterPolicies(clPolGVR)
|
||||
_ = e2eClient.CleanClusterPolicies(clPolGVR)
|
||||
|
||||
By(fmt.Sprintf("Deleting Namespace : %s", nspace))
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace)
|
||||
_ = e2eClient.DeleteClusteredResource(nsGVR, nspace)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
err = e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Deleting Namespace")
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Creating mutate ClusterPolicy "))
|
||||
loc, _ := time.LoadLocation("UTC")
|
||||
timeBeforePolicyCreation := time.Now().In(loc)
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(clPolGVR, ingressTests.cpol)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// check policy in metrics
|
||||
policySyncBool := false
|
||||
e2e.GetWithRetry(time.Duration(2), 10, func() error {
|
||||
metricsString, err := commonE2E.CallMetrics()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
policySyncBool, err = commonE2E.ProcessMetrics(metricsString, ingressTests.policyName, timeBeforePolicyCreation)
|
||||
if policySyncBool == false || err != nil {
|
||||
return errors.New("policy not created")
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Expect(policySyncBool).To(Equal(true))
|
||||
err = commonE2E.PolicyCreated(ingressTests.policyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Namespace %s", nspace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(nsGVR, newNamespaceYaml(nspace))
|
||||
|
@ -204,7 +180,7 @@ func Test_Mutate_Ingress(t *testing.T) {
|
|||
|
||||
By(fmt.Sprintf("Verifying Ingress %v in the Namespace : %s", gvr, nspace))
|
||||
var mutatedResource *unstructured.Unstructured
|
||||
err = e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
mutatedResource, err = e2eClient.GetNamespacedResource(gvr, nspace, test.resourceName)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -79,14 +79,14 @@ func GetWithRetry(sleepInterval time.Duration, retryCount int, retryFunc func()
|
|||
var err error
|
||||
for i := 0; i < retryCount; i++ {
|
||||
err = retryFunc()
|
||||
if err != nil {
|
||||
time.Sleep(sleepInterval * time.Second)
|
||||
continue
|
||||
} else {
|
||||
break
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
time.Sleep(sleepInterval)
|
||||
}
|
||||
return err
|
||||
|
||||
return fmt.Errorf("operation failed, retries=%v, duration=%v: %v", retryCount, sleepInterval, err)
|
||||
}
|
||||
|
||||
// DeleteNamespacedResource ...
|
||||
|
@ -122,9 +122,6 @@ func (e2e *E2EClient) CreateNamespacedResourceYaml(gvr schema.GroupVersionResour
|
|||
return nil, err
|
||||
}
|
||||
result, err := e2e.Client.Resource(gvr).Namespace(namespace).Create(context.TODO(), &resource, metav1.CreateOptions{})
|
||||
if gvr.Resource == "clusterpolicies" {
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
return result, err
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ func Test_Validate_Sets(t *testing.T) {
|
|||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
@ -78,7 +78,7 @@ func Test_Validate_Sets(t *testing.T) {
|
|||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait till CRD is created
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err == nil {
|
||||
return nil
|
||||
|
@ -104,7 +104,7 @@ func Test_Validate_Sets(t *testing.T) {
|
|||
// Clear Namespace
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace)
|
||||
// Wait Till Deletion of Namespace
|
||||
e2e.GetWithRetry(time.Duration(1), 15, func() error {
|
||||
e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
|
Loading…
Reference in a new issue