1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-31 03:45:17 +00:00

merge and resolve conflicts

Signed-off-by: Jim Bugwadia <jim@nirmata.com>
This commit is contained in:
Jim Bugwadia 2021-03-25 16:43:12 -07:00
commit 6dff9e0ab9
174 changed files with 1613 additions and 3373 deletions

View file

@ -41,6 +41,9 @@ them, don't hesitate to ask. We're here to help! This is simply a reminder of wh
- [] I have read the [contributing guidelines](https://github.com/kyverno/kyverno/blob/main/CONTRIBUTING.md).
- [] I have added tests that prove my fix is effective or that my feature works.
- [] I have added or changed [the documentation](https://github.com/kyverno/website).
- If not, I have raised an issue in [kyverno/website](https://github.com/kyverno/website) to track the doc update:
<!-- Uncomment to link to the issue -->
<!-- https://github.com/kyverno/website/issues/1 -->
## Further comments

View file

@ -43,6 +43,10 @@ jobs:
- name: golangci-lint
uses: reviewdog/action-golangci-lint@v1
- name: Checking unused pkgs using go mod tidy
run: |
make unused-package-check
build-init-kyverno:
runs-on: ubuntu-latest
needs: pre-checks

View file

@ -5,7 +5,6 @@ on:
- 'main'
paths-ignore:
- 'README.md'
- 'samples/**'
- 'charts/**'
- 'docs/**'
pull_request:
@ -13,7 +12,6 @@ on:
- 'main'
paths-ignore:
- 'README.md'
- 'samples/**'
- 'charts/**'
- 'docs/**'
@ -27,7 +25,7 @@ jobs:
- name: Unshallow
run: git fetch --prune --unshallow
- name: Set up Go
- name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.14
@ -39,6 +37,10 @@ jobs:
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
restore-keys: |
${{ runner.os }}-go-
- name: Test Policy
run: |
make run_testcmd_policy
- name: gofmt check
run: |

View file

@ -1,40 +0,0 @@
name: gosum
on:
pull_request:
branches:
- 'master'
paths:
- '.github/workflows/gosum.yml'
- 'go.mod'
- 'go.sum'
jobs:
fix:
runs-on: ubuntu-latest
steps:
-
name: Checkout
uses: actions/checkout@v2
-
name: Set up Go
uses: actions/setup-go@v2
with:
go-version: 1.14
-
name: Tidy
run: |
rm -f go.sum
go mod tidy
-
name: Create Pull Request
uses: peter-evans/create-pull-request@v2
with:
token: ${{ secrets.GITHUB_TOKEN }}
commit-message: "fix: go mod tidy"
title: "fix: go mod tidy"
body: |
Current `go.mod` and `go.sum` don't match the source code.
branch: go-mod-tidy
branch-suffix: timestamp
labels: automerge

View file

@ -20,6 +20,16 @@ LD_FLAGS="-s -w -X $(PACKAGE)/pkg/version.BuildVersion=$(GIT_VERSION) -X $(PACKA
# KYVERNO
##################################
.PHONY: unused-package-check
unused-package-check:
@echo "------------------"
@echo "--> Check unused packages for the all kyverno components"
@echo "------------------"
@tidy=$$(go mod tidy); \
if [ -n "$${tidy}" ]; then \
echo "go mod tidy checking failed!"; echo "$${tidy}"; echo; \
fi
KYVERNO_PATH:= cmd/kyverno
build: kyverno
PWD := $(CURDIR)
@ -43,8 +53,8 @@ docker-build-initContainer-amd64:
@docker build -f $(PWD)/$(INITC_PATH)/Dockerfile -t $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TARGETPLATFORM="linux/amd64"
docker-push-initContainer:
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) .
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):latest .
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
@docker buildx build --file $(PWD)/$(INITC_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(INITC_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS)
##################################
# KYVERNO CONTAINER
@ -74,8 +84,8 @@ docker-build-kyverno-amd64:
@docker build -f $(PWD)/$(KYVERNO_PATH)/Dockerfile -t $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TARGETPLATFORM="linux/amd64"
docker-push-kyverno:
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) .
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):latest .
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
@docker buildx build --file $(PWD)/$(KYVERNO_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS)
##################################
@ -105,9 +115,9 @@ docker-build-cli-amd64:
@docker build -f $(PWD)/$(CLI_PATH)/Dockerfile -t $(REPO)/$(KYVERNO_CLI_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS) --build-arg TARGETPLATFORM="linux/amd64"
docker-push-cli:
@docker buildx build --file $(PWD)/$(CLI_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_CLI_IMAGE):$(IMAGE_TAG) .
@docker buildx build --file $(PWD)/$(CLI_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_CLI_IMAGE):latest .
@docker buildx build --file $(PWD)/$(CLI_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_CLI_IMAGE):$(IMAGE_TAG) . --build-arg LD_FLAGS=$(LD_FLAGS)
@docker buildx build --file $(PWD)/$(CLI_PATH)/Dockerfile --progress plane --push --platform linux/arm64,linux/amd64 --tag $(REPO)/$(KYVERNO_CLI_IMAGE):latest . --build-arg LD_FLAGS=$(LD_FLAGS)
##################################
docker-publish-all: docker-publish-initContainer docker-publish-kyverno docker-publish-cli
@ -164,6 +174,11 @@ test-e2e:
go test ./test/e2e/... -v
$(eval export E2E="")
#Test TestCmd Policy
run_testcmd_policy:
go build -o kyvernoctl cmd/cli/kubectl-kyverno/main.go
./kyvernoctl test https://github.com/kyverno/policies/main
# godownloader create downloading script for kyverno-cli
godownloader:
godownloader .goreleaser.yml --repo kyverno/kyverno -o ./scripts/install-cli.sh --source="raw"

View file

@ -30,4 +30,13 @@ Checkout out the Kyverno <a href="https://kyverno.io/community">Community</a> pa
- For feature requests and bugs, file an [issue](https://github.com/kyverno/kyverno/issues).
- For discussions or questions, join the **#kyverno** channel on the [Kubernetes Slack](https://kubernetes.slack.com/) or the [mailing list](https://groups.google.com/g/kyverno).
## Spread The Love
We built Kyverno to make it easy to secure and manage Kubernetes configurations. If you like the project, [let us know](https://github.com/kyverno/kyverno/stargazers)!
[![Stargazers over time](https://starchart.cc/kyverno/kyverno.svg)](https://starchart.cc/kyverno/kyverno)

View file

@ -2,11 +2,11 @@
[Kyverno](https://kyverno.io) is a Kubernetes Native Policy Management engine. It allows you to:
* Manage policies as Kubernetes resources (no new language required.)
* Validate, mutate, and generate resource configurations.
* Select resources based on labels and wildcards.
* View policy enforcement as events.
* Scan existing resources for violations.
- Manage policies as Kubernetes resources (no new language required.)
- Validate, mutate, and generate resource configurations.
- Select resources based on labels and wildcards.
- View policy enforcement as events.
- Scan existing resources for violations.
Access the complete user documentation and guides at: https://kyverno.io.
@ -62,48 +62,49 @@ The command removes all the Kubernetes components associated with the chart and
The following table lists the configurable parameters of the kyverno chart and their default values.
Parameter | Description | Default
--- | --- | ---
`affinity` | node/pod affinities | `nil`
`createSelfSignedCert` | generate a self signed cert and certificate authority. Kyverno defaults to using kube-controller-manager CA-signed certificate or existing cert secret if false. | `false`
`config.existingConfig` | existing Kubernetes configmap to use for the resource filters configuration | `nil`
`config.resourceFilters` | list of filter of resource types to be skipped by kyverno policy engine. See [documentation](https://github.com/kyverno/kyverno/blob/master/documentation/installation.md#filter-kubernetes-resources-that-admission-webhook-should-not-process) for details | `["[Event,*,*]","[*,kube-system,*]","[*,kube-public,*]","[*,kube-node-lease,*]","[Node,*,*]","[APIService,*,*]","[TokenReview,*,*]","[SubjectAccessReview,*,*]","[*,kyverno,*]"]`
`dnsPolicy` | Sets the DNS Policy which determines the manner in which DNS resolution happens across the cluster. For further reference, see [the official docs](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | `ClusterFirst`
`extraArgs` | list of extra arguments to give the binary | `[]`
`fullnameOverride` | override the expanded name of the chart | `nil`
`generatecontrollerExtraResources` | extra resource type Kyverno is allowed to generate | `[]`
`hostNetwork` | Use the host network's namespace. Set it to `true` when dealing with a custom CNI over Amazon EKS | `false`
`image.pullPolicy` | Image pull policy | `IfNotPresent`
`image.pullSecrets` | Specify image pull secrets | `[]` (does not add image pull secrets to deployed pods)
`image.repository` | Image repository | `ghcr.io/kyverno/kyverno`
`image.tag` | Image tag | `nil`
`initImage.pullPolicy` | Init image pull policy | `nil`
`initImage.repository` | Init image repository | `ghcr.io/kyverno/kyvernopre`
`initImage.tag` | Init image tag | `nil`
`livenessProbe` | liveness probe configuration | `{}`
`nameOverride` | override the name of the chart | `nil`
`namespace` | namespace the chart deploy to | `nil`
`nodeSelector` | node labels for pod assignment | `{}`
`podAnnotations` | annotations to add to each pod | `{}`
`podLabels` | additional labels to add to each pod | `{}`
`podSecurityContext` | security context for the pod | `{}`
`priorityClassName` | priorityClassName | `nil`
`rbac.create` | create cluster roles, cluster role bindings, and service account | `true`
`rbac.serviceAccount.create` | create a service account | `true`
`rbac.serviceAccount.name` | the service account name | `nil`
`rbac.serviceAccount.annotations` | annotations for the service account | `{}`
`readinessProbe` | readiness probe configuration | `{}`
`replicaCount` | desired number of pods | `1`
`resources` | pod resource requests & limits | `{}`
`service.annotations` | annotations to add to the service | `{}`
`service.nodePort` | node port | `nil`
`service.port` | port for the service | `443`
`service.type` | type of service | `ClusterIP`
`tolerations` | list of node taints to tolerate | `[]`
`securityContext` | security context configuration | `{}`
`podSecurityStandard` | set desired pod security level `privileged`, `default`, `restricted`, `custom`. Set to `restricted` for maximum security for your cluster. See: https://kyverno.io/policies/pod-security/ | `default`
`podSecurityPolicies` | Policies to include when `podSecurityStandard` is set to `custom` | `[]`
`validationFailureAction` | set to get response in failed validation check. Supported values- `audit`, `enforce`. See: https://kyverno.io/docs/writing-policies/validate/ | `audit`
| Parameter | Description | Default |
| ---------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `affinity` | node/pod affinities | `nil` |
| `createSelfSignedCert` | generate a self signed cert and certificate authority. Kyverno defaults to using kube-controller-manager CA-signed certificate or existing cert secret if false. | `false` |
| `config.existingConfig` | existing Kubernetes configmap to use for the resource filters configuration | `nil` |
| `config.resourceFilters` | list of filter of resource types to be skipped by kyverno policy engine. See [documentation](https://github.com/kyverno/kyverno/blob/master/documentation/installation.md#filter-kubernetes-resources-that-admission-webhook-should-not-process) for details | `["[Event,*,*]","[*,kube-system,*]","[*,kube-public,*]","[*,kube-node-lease,*]","[Node,*,*]","[APIService,*,*]","[TokenReview,*,*]","[SubjectAccessReview,*,*]","[*,kyverno,*]"]` |
| `dnsPolicy` | Sets the DNS Policy which determines the manner in which DNS resolution happens across the cluster. For further reference, see [the official docs](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy) | `ClusterFirst` |
| envVars | Extra environment variables to pass to kyverno | {} |
| `extraArgs` | list of extra arguments to give the binary | `[]` |
| `fullnameOverride` | override the expanded name of the chart | `nil` |
| `generatecontrollerExtraResources` | extra resource type Kyverno is allowed to generate | `[]` |
| `hostNetwork` | Use the host network's namespace. Set it to `true` when dealing with a custom CNI over Amazon EKS | `false` |
| `image.pullPolicy` | Image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify image pull secrets | `[]` (does not add image pull secrets to deployed pods) |
| `image.repository` | Image repository | `ghcr.io/kyverno/kyverno` |
| `image.tag` | Image tag | `nil` |
| `initImage.pullPolicy` | Init image pull policy | `nil` |
| `initImage.repository` | Init image repository | `ghcr.io/kyverno/kyvernopre` |
| `initImage.tag` | Init image tag | `nil` |
| `livenessProbe` | liveness probe configuration | `{}` |
| `nameOverride` | override the name of the chart | `nil` |
| `namespace` | namespace the chart deploy to | `nil` |
| `nodeSelector` | node labels for pod assignment | `{}` |
| `podAnnotations` | annotations to add to each pod | `{}` |
| `podLabels` | additional labels to add to each pod | `{}` |
| `podSecurityContext` | security context for the pod | `{}` |
| `priorityClassName` | priorityClassName | `nil` |
| `rbac.create` | create cluster roles, cluster role bindings, and service account | `true` |
| `rbac.serviceAccount.create` | create a service account | `true` |
| `rbac.serviceAccount.name` | the service account name | `nil` |
| `rbac.serviceAccount.annotations` | annotations for the service account | `{}` |
| `readinessProbe` | readiness probe configuration | `{}` |
| `replicaCount` | desired number of pods | `1` |
| `resources` | pod resource requests & limits | `{}` |
| `service.annotations` | annotations to add to the service | `{}` |
| `service.nodePort` | node port | `nil` |
| `service.port` | port for the service | `443` |
| `service.type` | type of service | `ClusterIP` |
| `tolerations` | list of node taints to tolerate | `[]` |
| `securityContext` | security context configuration | `{}` |
| `podSecurityStandard` | set desired pod security level `privileged`, `default`, `restricted`, `custom`. Set to `restricted` for maximum security for your cluster. See: https://kyverno.io/policies/pod-security/ | `default` |
| `podSecurityPolicies` | Policies to include when `podSecurityStandard` is set to `custom` | `[]` |
| `validationFailureAction` | set to get response in failed validation check. Supported values- `audit`, `enforce`. See: https://kyverno.io/docs/writing-policies/validate/ | `audit` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
@ -128,4 +129,5 @@ If `createSelfSignedCert` is `false`, Kyverno will generate a self-signed CA and
## Kyverno CLI
See: https://kyverno.io/docs/kyverno-cli/
See: https://kyverno.io/docs/kyverno-cli/

View file

@ -61,6 +61,10 @@ spec:
valueFrom:
fieldRef:
fieldPath: metadata.namespace
{{- with .Values.envVars }}
env:
{{- toYaml . | nindent 14 }}
{{- end }}
containers:
- name: kyverno
image: {{ .Values.image.repository }}:{{ default .Chart.AppVersion .Values.image.tag }}

View file

@ -57,6 +57,8 @@ hostNetwork: false
# for further reference: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy
dnsPolicy: "ClusterFirst"
envVars: {}
extraArgs: []
# - --webhooktimeout=4

View file

@ -18,8 +18,6 @@ RUN go env
RUN CGO_ENABLED=0 go build -o /output/kyverno -ldflags="${LD_FLAGS}" -v ./cmd/cli/kubectl-kyverno/
RUN useradd -u 10001 kyverno
# Packaging stage
FROM scratch
@ -28,6 +26,6 @@ LABEL maintainer="Kyverno"
COPY --from=builder /output/kyverno /
COPY --from=builder /etc/passwd /etc/passwd
USER kyverno
USER 10001
ENTRYPOINT ["./kyverno"]

View file

@ -18,8 +18,6 @@ RUN go env
RUN CGO_ENABLED=0 go build -o /output/kyvernopre -ldflags="${LD_FLAGS}" -v ./cmd/initContainer/
RUN useradd -u 10001 kyverno
# Packaging stage
FROM scratch
@ -28,6 +26,6 @@ LABEL maintainer="Kyverno"
COPY --from=builder /output/kyvernopre /
COPY --from=builder /etc/passwd /etc/passwd
USER kyverno
USER 10001
ENTRYPOINT ["./kyvernopre"]

View file

@ -10,7 +10,6 @@ import (
"sync"
"time"
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/signal"
"github.com/kyverno/kyverno/pkg/utils"
@ -28,8 +27,6 @@ var (
)
const (
mutatingWebhookConfigKind string = "MutatingWebhookConfiguration"
validatingWebhookConfigKind string = "ValidatingWebhookConfiguration"
policyReportKind string = "PolicyReport"
clusterPolicyReportKind string = "ClusterPolicyReport"
reportChangeRequestKind string = "ReportChangeRequest"
@ -72,16 +69,6 @@ func main() {
}
requests := []request{
{validatingWebhookConfigKind, config.ValidatingWebhookConfigurationName},
{validatingWebhookConfigKind, config.ValidatingWebhookConfigurationDebugName},
{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationName},
{mutatingWebhookConfigKind, config.MutatingWebhookConfigurationDebugName},
{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationName},
{validatingWebhookConfigKind, config.PolicyValidatingWebhookConfigurationDebugName},
{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationName},
{mutatingWebhookConfigKind, config.PolicyMutatingWebhookConfigurationDebugName},
{policyReportKind, ""},
{clusterPolicyReportKind, ""},
@ -120,8 +107,6 @@ func main() {
func executeRequest(client *client.Client, req request) error {
switch req.kind {
case mutatingWebhookConfigKind, validatingWebhookConfigKind:
return removeWebhookIfExists(client, req.kind, req.name)
case policyReportKind:
return removePolicyReport(client, req.kind)
case clusterPolicyReportKind:
@ -236,29 +221,6 @@ func merge(done <-chan struct{}, stopCh <-chan struct{}, processes ...<-chan err
return out
}
func removeWebhookIfExists(client *client.Client, kind string, name string) error {
logger := log.Log.WithName("removeExistingWebhook").WithValues("kind", kind, "name", name)
var err error
// Get resource
_, err = client.GetResource("", kind, "", name)
if errors.IsNotFound(err) {
logger.V(4).Info("resource not found")
return nil
}
if err != nil {
logger.Error(err, "failed to get resource")
return err
}
// Delete resource
err = client.DeleteResource("", kind, "", name, false)
if err != nil {
logger.Error(err, "failed to delete resource")
return err
}
logger.Info("removed the resource")
return nil
}
func removeClusterPolicyReport(client *client.Client, kind string) error {
logger := log.Log.WithName("removeClusterPolicyReport")

View file

@ -18,8 +18,6 @@ RUN go env
RUN CGO_ENABLED=0 go build -o /output/kyverno -ldflags="${LD_FLAGS}" -v ./cmd/kyverno/
RUN useradd -u 10001 kyverno
# Packaging stage
FROM scratch
@ -28,6 +26,6 @@ LABEL maintainer="Kyverno"
COPY --from=builder /output/kyverno /
COPY --from=builder /etc/passwd /etc/passwd
USER kyverno
USER 10001
ENTRYPOINT ["./kyverno"]

View file

@ -1,3 +1,7 @@
FROM scratch
ADD kyverno /kyverno
USER 10001
ENTRYPOINT ["/kyverno"]

View file

@ -24,6 +24,7 @@ import (
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/signal"
ktls "github.com/kyverno/kyverno/pkg/tls"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/kyverno/kyverno/pkg/version"
"github.com/kyverno/kyverno/pkg/webhookconfig"
@ -49,6 +50,7 @@ var (
profilePort string
webhookTimeout int
genWorkers int
profile bool
policyReport bool
@ -62,6 +64,7 @@ func main() {
flag.StringVar(&excludeGroupRole, "excludeGroupRole", "", "")
flag.StringVar(&excludeUsername, "excludeUsername", "", "")
flag.IntVar(&webhookTimeout, "webhooktimeout", 3, "timeout for webhook configurations")
flag.IntVar(&genWorkers, "gen-workers", 20, "workers for generate controller")
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
flag.StringVar(&serverIP, "serverIP", "", "IP address where Kyverno controller runs. Only required if out-of-cluster.")
flag.StringVar(&runValidationInMutatingWebhook, "runValidationInMutatingWebhook", "", "Validation will also be done using the mutation webhook, set to 'true' to enable. Older kubernetes versions do not work properly when a validation webhook is registered.")
@ -144,7 +147,7 @@ func main() {
log.Log)
// Resource Mutating Webhook Watcher
webhookMonitor := webhookconfig.NewMonitor(log.Log.WithName("WebhookMonitor"))
webhookMonitor := webhookconfig.NewMonitor(rCache, log.Log.WithName("WebhookMonitor"))
// KYVERNO CRD INFORMER
// watches CRD resources:
@ -283,8 +286,9 @@ func main() {
client,
)
certRenewer := ktls.NewCertRenewer(client, clientConfig, ktls.CertRenewalInterval, ktls.CertValidityDuration, log.Log.WithName("CertRenewer"))
// Configure certificates
tlsPair, err := client.InitTLSPemPair(clientConfig, serverIP)
tlsPair, err := certRenewer.InitTLSPemPair(serverIP)
if err != nil {
setupLog.Error(err, "Failed to initialize TLS key/certificate pair")
os.Exit(1)
@ -305,8 +309,6 @@ func main() {
// Sync openAPI definitions of resources
openAPISync := openapi.NewCRDSync(client, openAPIController)
supportMutateValidate := utils.HigherThanKubernetesVersion(client, log.Log, 1, 14, 0)
// WEBHOOK
// - https server to provide endpoints called based on rules defined in Mutating & Validation webhook configuration
// - reports the results based on the response from the policy engine:
@ -329,12 +331,12 @@ func main() {
pCacheController.Cache,
webhookCfg,
webhookMonitor,
certRenewer,
statusSync.Listener,
configData,
reportReqGen,
grgen,
auditHandler,
supportMutateValidate,
cleanUp,
log.Log.WithName("WebhookServer"),
openAPIController,
@ -355,11 +357,11 @@ func main() {
go reportReqGen.Run(2, stopCh)
go prgen.Run(1, stopCh)
go grgen.Run(1, stopCh)
go configData.Run(stopCh)
go policyCtrl.Run(2, stopCh)
go eventGenerator.Run(3, stopCh)
go grc.Run(1, stopCh)
go grgen.Run(10, stopCh)
go grc.Run(genWorkers, stopCh)
go grcc.Run(1, stopCh)
go statusSync.Run(1, stopCh)
go pCacheController.Run(1, stopCh)

View file

@ -2480,7 +2480,6 @@ spec:
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
initContainers:
- image: ghcr.io/kyverno/kyvernopre:v1.3.4
imagePullPolicy: IfNotPresent
@ -2493,7 +2492,6 @@ spec:
privileged: false
readOnlyRootFilesystem: true
runAsNonRoot: true
runAsUser: 1000
securityContext:
runAsNonRoot: true
serviceAccountName: kyverno-service-account

View file

@ -24,7 +24,6 @@ spec:
image: ghcr.io/kyverno/kyvernopre:latest
imagePullPolicy: IfNotPresent
securityContext:
runAsUser: 1000
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false
@ -42,6 +41,8 @@ spec:
#- "--webhooktimeout=4"
# enable profiling
# - "--profile"
# configure the workers for generate controller
# - --gen-workers=20
- "-v=2"
ports:
- containerPort: 9443
@ -57,7 +58,6 @@ spec:
- name: KYVERNO_SVC
value: kyverno-svc
securityContext:
runAsUser: 1000
runAsNonRoot: true
privileged: false
allowPrivilegeEscalation: false

10
go.mod
View file

@ -5,35 +5,33 @@ go 1.14
require (
github.com/cenkalti/backoff v2.2.1+incompatible
github.com/cornelk/hashmap v1.0.1
github.com/distribution/distribution v2.7.1+incompatible
github.com/docker/distribution v2.7.1+incompatible // indirect
github.com/evanphx/json-patch/v5 v5.2.0
github.com/fatih/color v1.9.0
github.com/fsnotify/fsnotify v1.4.9 // indirect
github.com/gardener/controller-manager-library v0.2.0
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32
github.com/go-git/go-billy/v5 v5.0.0
github.com/go-git/go-git/v5 v5.2.0
github.com/go-logr/logr v0.3.0
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/googleapis/gnostic v0.5.4
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af
github.com/julienschmidt/httprouter v1.3.0
github.com/kataras/tablewriter v0.0.0-20180708051242-e063d29b7c23
github.com/kr/pretty v0.2.0 // indirect
github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7
github.com/mattbaird/jsonpatch v0.0.0-20171005235357-81af80346b1a
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/minio/minio v0.0.0-20200114012931-30922148fbb5
github.com/onsi/ginkgo v1.14.1
github.com/onsi/gomega v1.10.2
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.1 // indirect
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/pkg/errors v0.9.1
github.com/sirupsen/logrus v1.6.0 // indirect
github.com/spf13/cobra v1.1.1
github.com/stretchr/testify v1.6.1
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gopkg.in/yaml.v2 v2.3.0
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776
gotest.tools v2.2.0+incompatible

128
go.sum
View file

@ -70,17 +70,17 @@ github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUW
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM=
github.com/ahmetb/gen-crd-api-reference-docs v0.1.5/go.mod h1:P/XzJ+c2+khJKNKABcm2biRwk2QAuwbLf8DlXuaL7WM=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7 h1:uSoVVbwJiQipAclBbw+8quDsfcvFjOpI5iCf4p/cqCs=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/alecthomas/participle v0.2.1/go.mod h1:SW6HZGeZgSIpcUWX3fXpfZhuaWHnmoD5KCVaqSaNTkk=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc h1:cAKDfWh5VpdgMhJosfJnn5/FoN2SRZ4p7fJNX58YPaU=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf h1:qet1QNfXsQxTZqLG4oE62mJzwPIB8+Tee4RNCL9ulrY=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/aliyun/aliyun-oss-go-sdk v0.0.0-20190307165228-86c17b95fcd5/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8=
github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8=
github.com/andybalholm/cascadia v1.0.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239 h1:kFOfPq6dUM1hTo4JG6LR5AXSUEsOjtdm0kw0FtQtMJA=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ=
github.com/appscode/jsonpatch v0.0.0-20190108182946-7c0e3b262f30/go.mod h1:4AJxUpXUhv4N+ziTvIcWWXgeorXpxPZOfk9HdEVr96M=
@ -89,9 +89,9 @@ github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
github.com/armon/go-metrics v0.0.0-20190430140413-ec5e00d3c878/go.mod h1:3AMJUQhVx52RsWOnlkpikZr01T/yAVN2gn0861vByNg=
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.20.21/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo=
github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc=
@ -103,7 +103,6 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r
github.com/bgentry/go-netrc v0.0.0-20140422174119-9fd32a8b3d3d/go.mod h1:6QX/PXZ00z/TKoufEY6K/a0k6AhaJrQKdFe6OfVXsa4=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs=
github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/bombsimon/wsl v1.2.5/go.mod h1:43lEF/i0kpXbLCeDXL9LMT8c92HyBywXb0AsgMHYngM=
@ -111,7 +110,6 @@ github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEe
github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM=
github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.28/go.mod h1:pQciLPpbU0oxA0h+VJYYLxO+XeDQb5pZijXscXHm81s=
@ -120,7 +118,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag=
github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I=
github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8=
github.com/coredns/coredns v1.4.0/go.mod h1:zASH/MVDgR6XZTbxvOnsZfffS+31vg6Ackf/wo1+AM0=
@ -156,7 +153,11 @@ github.com/dchest/siphash v1.1.0/go.mod h1:q+IRvb2gOSrUnYoPqHiyHXS0FOBBOdl6tONBl
github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/distribution/distribution v2.7.1+incompatible h1:aGFx4EvJWKEh//lHPLwFhFgwFHKH06TzNVPamrMn04M=
github.com/distribution/distribution v2.7.1+incompatible/go.mod h1:EgLm2NgWtdKgzF9NpMzUKgzmR7AMmb0VQi2B+ZzDRjc=
github.com/djherbis/atime v1.0.0/go.mod h1:5W+KBIuTwVGcqjIfaTwt+KSYX1o6uep8dtevevQP/f8=
github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v0.7.3-0.20190327010347-be7ac8be2ae0/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
@ -184,7 +185,6 @@ github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7
github.com/evanphx/json-patch v0.0.0-20190203023257-5858425f7550/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.0.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses=
github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
@ -194,6 +194,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s=
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
@ -208,6 +209,7 @@ github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2H
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 h1:Mn26/9ZMNWSw9C9ERFA1PUxfmGpolnw2v0bKOREu5ew=
github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/gliderlabs/ssh v0.2.2 h1:6zsha5zo/TWhRhwqCD3+EarCAgZ2yN28ipRnGPnwkI0=
github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q=
@ -216,12 +218,9 @@ github.com/go-errors/errors v1.0.1 h1:LUHzmkK3GUKUrL/1gfBUxAHzcev3apQlezX/+O7ma6
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4=
github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E=
github.com/go-git/go-billy v1.0.0 h1:bXR6Zu3opPSg0R4dDxqaLglY4rxw7ja7wS16qSpOKL4=
github.com/go-git/go-billy v4.2.0+incompatible h1:Z6QtVXd5tjxUtcODLugkJg4WaZnGg13CD8qB9pr+7q0=
github.com/go-git/go-billy/v5 v5.0.0 h1:7NQHvd9FVid8VL4qVUMm8XifBK+2xCoZ2lSk0agRrHM=
github.com/go-git/go-billy/v5 v5.0.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0=
github.com/go-git/go-git v1.0.0 h1:YcN9iDGDoXuIw0vHls6rINwV416HYa0EB2X+RBsyYp4=
github.com/go-git/go-git v4.7.0+incompatible h1:+W9rgGY4DOKKdX2x6HxSR7HNeTxqiKrOvKnuittYVdA=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12 h1:PbKy9zOy4aAKrJ5pibIRpVO2BXnK1Tlcg+caKI7Ox5M=
github.com/go-git/go-git-fixtures/v4 v4.0.2-0.20200613231340-f56387b50c12/go.mod h1:m+ICp2rF3jDhFgEZ/8yziagdT1C+ZpZcrJjappBCDSw=
github.com/go-git/go-git/v5 v5.2.0 h1:YPBLG/3UK1we1ohRkncLjaXWLW+HKp5QNM/jTli2JgI=
github.com/go-git/go-git/v5 v5.2.0/go.mod h1:kh02eMX+wdqqxgNMEyq8YgwlIOsDOa9homkUq1PoTMs=
@ -234,14 +233,10 @@ github.com/go-ldap/ldap v3.0.2+incompatible/go.mod h1:qfd9rJvER9Q0/D/Sqn1DfHRoBp
github.com/go-lintpack/lintpack v0.5.2/go.mod h1:NwZuYi2nUHho8XEIZ6SIxihrnPoqBTDqfpXvXAN0sXM=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logr/logr v0.1.0 h1:M1Tv3VzNlEHg6uyACnRdtrploV2P7wZqH8BoQMtz0cg=
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v0.3.0 h1:q4c+kbcR0d5rSurhBR8dIgieOaYpXtsdTYfx22Cu6rs=
github.com/go-logr/logr v0.3.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/zapr v0.1.0 h1:h+WVe9j6HAA01niTJPA/kKH0i7e0rLZBCwauQFcRE54=
github.com/go-logr/zapr v0.1.0/go.mod h1:tabnROwaDl0UNxkVeFRbY8bwB37GwRv0P8lg6aAiEnk=
github.com/go-logr/zapr v0.2.0/go.mod h1:qhKdvif7YF5GI9NWEpyxTSSBdGmzkNguibrdCNVPunU=
github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8=
github.com/go-ole/go-ole v1.2.4 h1:nNBDSCOigTSiarFpYE9J/KtEA1IOW4CNeqT9TQDqCxI=
@ -250,11 +245,9 @@ github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70t
github.com/go-openapi/analysis v0.17.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.18.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik=
github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk=
github.com/go-openapi/analysis v0.19.5 h1:8b2ZgKfKIUTVQpTb77MoRDIMEIwvDVw40o3aOXdfYzI=
github.com/go-openapi/analysis v0.19.5/go.mod h1:hkEAkxagaIvIP7VTn8ygJNkd4kAYON2rCu0v0ObL0AU=
github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0=
github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY=
github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94=
github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0=
github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M=
@ -272,11 +265,9 @@ github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf
github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU=
github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs=
github.com/go-openapi/loads v0.19.4 h1:5I4CCSqoWzT+82bBkNIvmLc0UOsoKKQ4Fz+3VxOB7SY=
github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk=
github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA=
github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64=
github.com/go-openapi/runtime v0.19.4 h1:csnOgcgAiuGoM/Po7PEpKDoNulCcF3FGbSnbHfxgjMI=
github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4=
github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc=
github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI=
@ -289,7 +280,6 @@ github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pL
github.com/go-openapi/strfmt v0.18.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU=
github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY=
github.com/go-openapi/strfmt v0.19.3/go.mod h1:0yX7dbo8mKIvc3XSKp7MNfxw4JytCfCD6+bY1AVL9LU=
github.com/go-openapi/strfmt v0.19.5 h1:0utjKrw+BAh8s57XE9Xz8DUBsVvPmRUB6styvl9wWIM=
github.com/go-openapi/strfmt v0.19.5/go.mod h1:eftuHTlB/dI8Uq8JJOyRlieZf+WkkxUuk0dgdHXr2Qk=
github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I=
github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg=
@ -299,11 +289,8 @@ github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tF
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4=
github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA=
github.com/go-openapi/validate v0.19.5/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-openapi/validate v0.19.8 h1:YFzsdWIDfVuLvIOF+ZmKjVg1MbPJ1QgY9PihMwei1ys=
github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-test/deep v1.0.2-0.20181118220953-042da051cf31/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-toolsmith/astcast v1.0.0/go.mod h1:mt2OdQTeAQcY4DQgPSArJjHCcOwlX+Wl/kwN+LbLGQ4=
@ -331,7 +318,6 @@ github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20180513044358-24b0969c4cb7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -346,7 +332,6 @@ github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+
github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
@ -382,14 +367,12 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.2 h1:X2ev0eStA3AbceY54o37/0PQ/UWqKEiiO2dKL5OPaFM=
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -402,17 +385,13 @@ github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hf
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510/go.mod h1:pupxD2MaaD3pAXIBCelhxNneeOaAeabZDe5s4K6zSpQ=
github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.3.1 h1:WeAefnSUHlBb0iJKwxFDZdbfGwkd7xRNuV+IpXMJhYk=
github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU=
github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I=
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.4 h1:ynbQIWjLw7iv6HAFdixb30U7Uvcmx+f4KlLJpmhkTK0=
@ -471,7 +450,6 @@ github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc=
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64=
github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ=
@ -483,14 +461,11 @@ github.com/hashicorp/vault/api v1.0.4/go.mod h1:gDcqh3WGcR1cpF5AJz/B1UFheUEneMoI
github.com/hashicorp/vault/sdk v0.1.13/go.mod h1:B+hVj7TpuQY1Y/GPbCpffmgd+tSEwvhkWnjtSYCaS2M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hashicorp/yamux v0.0.0-20181012175058-2f1d1f20f75d/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.8 h1:CGgOkSJeqMRmt0D9XLWExdT4m4F1vd3FV3VPt+0VxkQ=
github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/imdario/mergo v0.3.10 h1:6q5mVkdH/vYmqngx7kZQTjJ5HRsx+ImorDIEQ+beJgc=
github.com/imdario/mergo v0.3.10/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA=
@ -509,8 +484,6 @@ github.com/json-iterator/go v0.0.0-20180701071628-ab8a2e0c74be/go.mod h1:+SdeFBv
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
@ -542,13 +515,12 @@ github.com/konsorten/go-windows-terminal-sequences v1.0.3 h1:CE8S1cTafDpPvMhIxNJ
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kurin/blazer v0.5.4-0.20190613185654-cf2f27cc0be3/go.mod h1:4FCXMUWo9DllR2Do4TtBd377ezyAJ51vB5uTBjt0pGU=
github.com/lensesio/tableprinter v0.0.0-20201125135848-89e81fc956e7 h1:k/1ku0yehLCPqERCHkIHMDqDg1R02AcCScRuHbamU3s=
@ -591,7 +563,6 @@ github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp
github.com/mattn/go-runewidth v0.0.4 h1:2BvfKmzob6Bmd4YsL0zygOqfdFnK7GR4QL06Do4/p7Y=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/goveralls v0.0.2/go.mod h1:8d1ZMHsd7fW6IRPKQh46F2WRpyib5/X4FOpevwGNQEw=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
github.com/miekg/dns v1.1.8/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
@ -621,7 +592,6 @@ github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUb
github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS42BGNg=
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -655,6 +625,7 @@ github.com/nats-io/stan.go v0.4.5/go.mod h1:Ji7mK6gRZJSH1nc3ZJH6vi7zn/QnZhpR9Arm
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
github.com/ncw/directio v1.0.5 h1:JSUBhdjEvVaJvOoyPAbcW0fnd0tvRXD76wEfZ1KcQz4=
github.com/ncw/directio v1.0.5/go.mod h1:rX/pKEYkOXBGOggmcyJeJGloCkleSvphPx2eV3t6ROk=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
github.com/nsqio/go-nsq v1.0.7/go.mod h1:XP5zaUs3pqf+Q71EqUJs3HYfBIqfK6G83WQMdNN+Ito=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
@ -666,7 +637,6 @@ github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.1 h1:jMU0WaQrP0a/YAEq8eJmJKjBoMs+pClEr1vDMlM/Do4=
@ -676,21 +646,22 @@ github.com/onsi/gomega v0.0.0-20190113212917-5533ce8a0da3/go.mod h1:ex+gbHU/CVuB
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.2 h1:aY/nuoWlKJud2J6U0E3NWsjlg+0GtwXxgEqthRdzlcs=
github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1 h1:JMemWkRwHx4Zj+fVxWoMCFm/8sYGGrUVojFA6h/TRcI=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6 h1:lNCW6THrCKBiJBpz8kbVGjC7MgdCGKwuvBgc7LoD6sw=
github.com/orcaman/concurrent-map v0.0.0-20190826125027-8c72a8bb44f6/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
github.com/patrickmn/go-cache v1.0.0 h1:3gD5McaYs9CxjyK5AXGcq8gdeCARtd/9gJDUvVeaZ0Y=
github.com/patrickmn/go-cache v2.1.0+incompatible h1:HRMgzkcYKYpi3C8ajMPV8OFXaaRUnok+kx1WdO15EQc=
github.com/patrickmn/go-cache v2.1.0+incompatible/go.mod h1:3Qf8kWWT7OJRJbdiICTKqZju1ZixQ/KpMGzzAfe6+WQ=
github.com/paulmach/orb v0.1.3/go.mod h1:VFlX/8C+IQ1p6FTRRKzKoOPJnvEtA5G0Veuqwbu//Vk=
github.com/pborman/getopt v0.0.0-20180729010549-6fdd0a2c7117/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o=
github.com/pborman/uuid v1.2.0 h1:J7Q5mO4ysT1dv8hyrUGHb9+ooztCXu1D8MY8DZYsu3g=
github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k=
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI=
@ -716,13 +687,11 @@ github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:
github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.4.1 h1:K0MGApIoQvMw27RTdJkPbr3JZ7DNbtxQNyi5STVM6Kw=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
@ -762,7 +731,6 @@ github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxr
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0 h1:UBcNElsrwanuuMsnGSlYmtmgbb23qDR5dG+6X6Oo89I=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
@ -777,12 +745,10 @@ github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4k
github.com/sourcegraph/go-diff v0.5.1/go.mod h1:j2dHj3m8aZgQO8lMTcTnBcXkRRRqi34cd2MNlA9u1mE=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc=
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU=
github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8=
github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE=
github.com/spf13/cobra v1.1.1 h1:KfztREH0tPxJJ+geloSLaAkaPkr4ki2Er5quFV1TDo4=
github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI=
@ -805,9 +771,7 @@ github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRci
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.3-0.20181224173747-660f15d67dbb/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
@ -848,11 +812,9 @@ github.com/yujunz/go-getter v1.5.1-lite.0.20201201013212-6d9c071adddf/go.mod h1:
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.etcd.io/etcd v0.5.0-alpha.5.0.20200910180754-dd1b699fc489/go.mod h1:yVHk9ub3CSBatqGNg7GRmsnfLWtoW60w4eDYfh7vHDg=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.mongodb.org/mongo-driver v1.1.2 h1:jxcFYjlkl8xaERsgLo+RNquI0epW6zuy/ZRQs6jnrFA=
go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@ -862,18 +824,15 @@ go.starlark.net v0.0.0-20190528202925-30ae18b8564f/go.mod h1:c1/X6cHgvdXj6pUlmWK
go.starlark.net v0.0.0-20200306205701-8dd3e2ee1dd5/go.mod h1:nmDLcffg48OtT/PSW0Hg7FvpRQsQh5OSqIylirxKC7o=
go.uber.org/atomic v0.0.0-20181018215023-8dc6146f7569/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0 h1:cxzIVoETapQEqDhQu3QfnvXAV4AlzcvUCxkVUFw3+EU=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ=
go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A=
go.uber.org/multierr v0.0.0-20180122172545-ddea229ff1df/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.1.0 h1:HoEmRHQPVSqub6w2z2d2EOVs2fjyFRGyofhKuyDq0QI=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU=
go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA=
go.uber.org/zap v0.0.0-20180814183419-67bc79d13d15/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.8.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0 h1:ORx85nbTijNz8ljznvCMR1ZBIPKFn3jQrag10X2AsuM=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
@ -896,10 +855,7 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975 h1:/Tl7pH94bvbAAHBdZJT947M/+gp0+CqQXDtMRC0fseo=
golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0 h1:hb9wdF1z5waM+dSIICn1l0DkLVDT3hqhhQsDNUmHPRE=
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
@ -970,8 +926,6 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
golang.org/x/net v0.0.0-20201021035429-f5854403a974 h1:IX6qOQeG5uLjB/hjjwjedwfjND0hgjPMMyO1RoIXQNI=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b h1:uwuIcX0g4Yl1NC5XAz37xsr2lTtcqevgzYNVt49waME=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -1021,14 +975,12 @@ golang.org/x/sys v0.0.0-20190922100055-0a153f010e69/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191002063906-3421d5a6bb1c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg=
golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1038,7 +990,6 @@ golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd h1:5CtCZbICpIOFdgO940moixOPjc0178IU44m4EjOO5IY=
golang.org/x/sys v0.0.0-20201112073958-5cba982894dd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1047,16 +998,13 @@ golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fq
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20181227161524-e6919f6577db/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4 h1:0YWbFKbhXG/wIiuHDSKpS0Iy7FSA+u45VtBMfQcFTTc=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0 h1:/5xXl8Y5W96D+TtHSlonuFqGHIWVuyCkGJLwGh9JJFs=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e h1:EHBhcS0mlXEAVwNyO2dLfjToGsyY4j24pTs2ScHnX7s=
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -1115,11 +1063,9 @@ golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapK
golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gomodules.xyz/jsonpatch/v2 v2.0.1/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
gomodules.xyz/jsonpatch/v2 v2.1.0/go.mod h1:IhYNNY4jnS53ZnfE4PAmpKtDpTCj1JFXc+3mwe7XcUU=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -1137,8 +1083,8 @@ google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6 h1:lMO5rYAqUxkmaj76jAkRUvt5JZgFymx/+Q5Mzfivuhc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -1171,8 +1117,6 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
google.golang.org/grpc v1.22.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
@ -1187,18 +1131,16 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/asn1-ber.v1 v1.0.0-20181015200546-f715ec2f112d/go.mod h1:cuepJuh7vyXfUyUwEgHQXw849cJrilpS5NeIjOWESAw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f h1:BLraFXnmrev5lT+xlilqcH8XK9/i0At2xKjWk4p6zsU=
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
@ -1232,9 +1174,7 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20190905181640-827449938966/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71 h1:Xe2gvTZUJpsvOWUnvmL/tmhVBZUmHSvLbMjRj6NUUKo=
gopkg.in/yaml.v3 v3.0.0-20200121175148-a6ecf24a6d71/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776 h1:tQIYjPdBoyREyB9XMu+nnTclpTYkz2zFM+lzLJFO4gQ=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
@ -1249,17 +1189,11 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9
k8s.io/api v0.0.0-20190918155943-95b840bb6a1f/go.mod h1:uWuOHnjmNrtQomJrvEBg0c0HRNyQ+8KTEERVsK0PW48=
k8s.io/api v0.16.4/go.mod h1:AtzMnsR45tccQss5q8RnF+W8L81DH6XwXwo/joEx9u0=
k8s.io/api v0.17.0/go.mod h1:npsyOePkeP0CPwyGfXDHxvypiYMJxBWAMpQxCaJ4ZxI=
k8s.io/api v0.17.2/go.mod h1:BS9fjjLc4CMuqfSO8vgbHPKMt5+SF0ET6u/RVDihTo4=
k8s.io/api v0.18.12 h1:97X6znOXMVgCKivTAgpBXGBGlCe3gbM++yFdldgBCaE=
k8s.io/api v0.18.12/go.mod h1:3sS78jmUoGHwERyMbEhxP6owcQ77UxGo+Yy+dKNWrh0=
k8s.io/api v0.20.1/go.mod h1:KqwcCVogGxQY3nBlRpwt+wpAMF/KjaCc7RpywacvqUo=
k8s.io/api v0.20.2 h1:y/HR22XDZY3pniu9hIFDLpUCPq2w5eQ6aV/VFQ7uJMw=
k8s.io/api v0.20.2/go.mod h1:d7n6Ehyzx+S+cE3VhTGfVNNqtGc/oL9DCdYYahlurV8=
k8s.io/apiextensions-apiserver v0.0.0-20190918161926-8f644eb6e783/go.mod h1:xvae1SZB3E17UpV59AWc271W/Ph25N+bjPyR63X6tPY=
k8s.io/apiextensions-apiserver v0.16.4/go.mod h1:HYQwjujEkXmQNhap2C9YDdIVOSskGZ3et0Mvjcyjbto=
k8s.io/apiextensions-apiserver v0.17.2/go.mod h1:4KdMpjkEjjDI2pPfBA15OscyNldHWdBCfsWMDWAmSTs=
k8s.io/apiextensions-apiserver v0.18.12 h1:b0jTgW/qwqZBMIJTMxkLvvAtNRDZboG5yZiIbOFgQv8=
k8s.io/apiextensions-apiserver v0.18.12/go.mod h1:nihADkPed1L37Vxpz2/BrtxO9mCtINH23aNtUe/CRLo=
k8s.io/apiextensions-apiserver v0.20.1/go.mod h1:ntnrZV+6a3dB504qwC5PN/Yg9PBiDNt1EVqbW2kORVk=
k8s.io/apiextensions-apiserver v0.20.2 h1:rfrMWQ87lhd8EzQWRnbQ4gXrniL/yTRBgYH1x1+BLlo=
k8s.io/apiextensions-apiserver v0.20.2/go.mod h1:F6TXp389Xntt+LUq3vw6HFOLttPa0V8821ogLGwb6Zs=
@ -1267,33 +1201,22 @@ k8s.io/apimachinery v0.0.0-20190612125636-6a5db36e93ad/go.mod h1:I4A+glKBHiTgiEj
k8s.io/apimachinery v0.0.0-20190913080033-27d36303b655/go.mod h1:nL6pwRT8NgfF8TT68DBI8uEePRt89cSvoXUVqbkWHq4=
k8s.io/apimachinery v0.16.4/go.mod h1:llRdnznGEAqC3DcNm6yEj472xaFVfLM7hnYofMb12tQ=
k8s.io/apimachinery v0.17.0/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.17.2/go.mod h1:b9qmWdKlLuU9EBh+06BtLcSf/Mu89rWL33naRxs1uZg=
k8s.io/apimachinery v0.18.12 h1:bLFXU4IxOu06F6Z6PV7eqtapXFb1G2q0ni0XBNFtJH8=
k8s.io/apimachinery v0.18.12/go.mod h1:PF5taHbXgTEJLU+xMypMmYTXTWPJ5LaW8bfsisxnEXk=
k8s.io/apimachinery v0.20.1/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apimachinery v0.20.2 h1:hFx6Sbt1oG0n6DZ+g4bFt5f6BoMkOjKWsQFu077M3Vg=
k8s.io/apimachinery v0.20.2/go.mod h1:WlLqWAHZGg07AeltaI0MV5uk1Omp8xaN0JGLY6gkRpU=
k8s.io/apiserver v0.0.0-20190918160949-bfa5e2e684ad/go.mod h1:XPCXEwhjaFN29a8NldXA901ElnKeKLrLtREO9ZhFyhg=
k8s.io/apiserver v0.16.4/go.mod h1:kbLJOak655g6W7C+muqu1F76u9wnEycfKMqbVaXIdAc=
k8s.io/apiserver v0.17.2/go.mod h1:lBmw/TtQdtxvrTk0e2cgtOxHizXI+d0mmGQURIHQZlo=
k8s.io/apiserver v0.18.12/go.mod h1:uFOeW4LlxS6KDgLWy3n3gh0DhC6m41QIFgL33ouk+4w=
k8s.io/apiserver v0.20.1/go.mod h1:ro5QHeQkgMS7ZGpvf4tSMx6bBOgPfE+f52KwvXfScaU=
k8s.io/apiserver v0.20.2/go.mod h1:2nKd93WyMhZx4Hp3RfgH2K5PhwyTrprrkWYnI7id7jA=
k8s.io/cli-runtime v0.18.12 h1:gVWbvntlEttCIvy1jc5UUr2cG/4TmmCM1MY/PGeENBo=
k8s.io/cli-runtime v0.18.12/go.mod h1:wTj8W8za8NDWe505mrlckiZ5H2cZA0YEuv0E7WC+Srs=
k8s.io/cli-runtime v0.20.2 h1:W0/FHdbApnl9oB7xdG643c/Zaf7TZT+43I+zKxwqvhU=
k8s.io/cli-runtime v0.20.2/go.mod h1:FjH6uIZZZP3XmwrXWeeYCbgxcrD6YXxoAykBaWH0VdM=
k8s.io/client-go v0.0.0-20190918160344-1fbdaa4c8d90/go.mod h1:J69/JveO6XESwVgG53q3Uz5OSfgsv4uxpScmmyYOOlk=
k8s.io/client-go v0.16.4/go.mod h1:ZgxhFDxSnoKY0J0U2/Y1C8obKDdlhGPZwA7oHH863Ok=
k8s.io/client-go v0.17.0/go.mod h1:TYgR6EUHs6k45hb6KWjVD6jFZvJV4gHDikv/It0xz+k=
k8s.io/client-go v0.17.2/go.mod h1:QAzRgsa0C2xl4/eVpeVAZMvikCn8Nm81yqVx3Kk9XYI=
k8s.io/client-go v0.18.12 h1:MDGRE2tGidz29g45dI4kfelJo+aRmDqWx0Way8mD88A=
k8s.io/client-go v0.18.12/go.mod h1:0aC8XkA09dX/goYqHQJ/kVv0zL1t+weOZt3pmz9LpxA=
k8s.io/client-go v0.20.1/go.mod h1:/zcHdt1TeWSd5HoUe6elJmHSQ6uLLgp4bIJHVEuy+/Y=
k8s.io/client-go v0.20.2 h1:uuf+iIAbfnCSw8IGAv/Rg0giM+2bOzHLOsbbrwrdhNQ=
k8s.io/client-go v0.20.2/go.mod h1:kH5brqWqp7HDxUFKoEgiI4v8G1xzbe9giaCenUWJzgE=
k8s.io/code-generator v0.0.0-20200306081859-6a048a382944/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/component-base v0.0.0-20190612130303-4062e14deebe h1:GHRdxwv4/80MA+Yy/YVyfc9n6VyOhEGzyM09mEXsIAU=
k8s.io/component-base v0.0.0-20190612130303-4062e14deebe/go.mod h1:MmIDXnint3qMN0cqXHKrSiJ2XQKo3J1BPIz7in7NvO0=
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@ -1313,18 +1236,12 @@ k8s.io/kube-openapi v0.0.0-20190228160746-b3a7cee44a30/go.mod h1:BXM9ceUBTj2QnfH
k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E=
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd h1:sOHNzJIkytDF6qadMNKhhDRpc6ODik8lVC6nOur7B2c=
k8s.io/kube-openapi v0.0.0-20201113171705-d219536bb9fd/go.mod h1:WOJ3KddDSol4tAGcJo0Tvi+dK12EcqSLqcWsryKMpfM=
k8s.io/metrics v0.16.4/go.mod h1:dckkfqvaASo+NrzEmp8ST8yCc9hGt7lx9ABAILyDHx8=
k8s.io/utils v0.0.0-20190221042446-c2654d5206da/go.mod h1:8k8uAuAQ0rXslZKaEWd0c3oVhZz7sSzSiPnVZayjIX0=
k8s.io/utils v0.0.0-20190801114015-581e00157fb1/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f h1:GiPwtSzdP43eI1hpPCbROQCCIgCuiMMNF8YUVLF3vJo=
k8s.io/utils v0.0.0-20191114184206-e782cd3c129f/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920 h1:CbnUZsM497iRC5QMVkHwyl8s2tB3g7yaSHkYPkpgelw=
k8s.io/utils v0.0.0-20201110183641-67b214c5f920/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009 h1:0T5IaWHO3sJTEmCP6mUlBvMukxPKUQWqiI/YuiBNMiQ=
k8s.io/utils v0.0.0-20210111153108-fddb29f9d009/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
@ -1334,11 +1251,7 @@ mvdan.cc/unparam v0.0.0-20190720180237-d51796306d8f/go.mod h1:4G1h5nDURzA3bwVMZI
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7 h1:uuHDyjllyzRyCIvvn0OBjiRB0SgBZGqHNYAmjR7fO50=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT7lCHcxMU+mDHEm+nx46H4zuuHZkDP6icnhu0=
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.14/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
sigs.k8s.io/controller-runtime v0.5.0 h1:CbqIy5fbUX+4E9bpnBFd204YAzRYlM9SWW77BbrcDQo=
sigs.k8s.io/controller-runtime v0.5.0/go.mod h1:REiJzC7Y00U+2YkMbT8wxgrsX5USpXKGhb2sCtAXiT8=
sigs.k8s.io/controller-runtime v0.8.1 h1:O0K2CJ2JavK8/Tf4LfcpAwRxOFBhv8DjyrbmE6Qw59s=
sigs.k8s.io/controller-runtime v0.8.1/go.mod h1:U/l+DUopBc1ecfRZ5aviA9JDmGFQKvLf5YkZNx2e0sU=
sigs.k8s.io/controller-tools v0.2.4/go.mod h1:m/ztfQNocGYBgTTCmFdnK94uVvgxeZeE3LtJvd/jIzA=
@ -1350,12 +1263,9 @@ sigs.k8s.io/kustomize/kyaml v0.10.3 h1:ARSJUMN/c3k31DYxRfZ+vp/UepUQjg9zCwny7Oj90
sigs.k8s.io/kustomize/kyaml v0.10.3/go.mod h1:RA+iCHA2wPCOfv6uG6TfXXWhYsHpgErq/AljxWKuxtg=
sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI=
sigs.k8s.io/structured-merge-diff v0.0.0-20190817042607-6149e4549fca/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
sigs.k8s.io/structured-merge-diff v1.0.1-0.20191108220359-b1b620dd3f06/go.mod h1:/ULNhyfzRopfcjskuui0cTITekDduZ7ycKN3oUT9R18=
sigs.k8s.io/structured-merge-diff v1.0.1 h1:LOs1LZWMsz1xs77Phr/pkB4LFaavH7IVq/3+WTN9XTA=
sigs.k8s.io/structured-merge-diff v1.0.1/go.mod h1:IIgPezJWb76P0hotTxzDbWsMYB8APh18qZnxkomBpxA=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2 h1:YHQV7Dajm86OuqnIR6zAelnDWBRjo+YhYV9PmGrh1s8=
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=

View file

@ -3,13 +3,18 @@ package v1
import (
"encoding/json"
"reflect"
"strings"
)
// HasAutoGenAnnotation checks if a policy has auto-gen annotation
func (p *ClusterPolicy) HasAutoGenAnnotation() bool {
annotations := p.GetAnnotations()
_, ok := annotations["pod-policies.kyverno.io/autogen-controllers"]
return ok
val, ok := annotations["pod-policies.kyverno.io/autogen-controllers"]
if ok && strings.ToLower(val) != "none" {
return true
}
return false
}
//HasMutateOrValidateOrGenerate checks for rule types

View file

@ -1,243 +0,0 @@
package client
import (
"encoding/base64"
"fmt"
"net/url"
"github.com/kyverno/kyverno/pkg/config"
tls "github.com/kyverno/kyverno/pkg/tls"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/rest"
)
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
// Created pair is stored in cluster's secret.
// Returns struct with key/certificate pair.
func (c *Client) InitTLSPemPair(configuration *rest.Config, serverIP string) (*tls.PemPair, error) {
logger := c.log
certProps, err := c.GetTLSCertProps(configuration)
if err != nil {
return nil, err
}
logger.Info("Building key/certificate pair for TLS")
tlsPair, err := c.buildTLSPemPair(certProps, serverIP)
if err != nil {
return nil, err
}
if err = c.WriteTLSPairToSecret(certProps, tlsPair); err != nil {
return nil, fmt.Errorf("Unable to save TLS pair to the cluster: %v", err)
}
return tlsPair, nil
}
// buildTLSPemPair Issues TLS certificate for webhook server using self-signed CA cert
// Returns signed and approved TLS certificate in PEM format
func (c *Client) buildTLSPemPair(props tls.CertificateProps, serverIP string) (*tls.PemPair, error) {
caCert, caPEM, err := tls.GenerateCACert()
if err != nil {
return nil, err
}
if err := c.WriteCACertToSecret(caPEM, props); err != nil {
return nil, fmt.Errorf("failed to write CA cert to secret: %v", err)
}
return tls.GenerateCertPem(caCert, props, serverIP)
}
//ReadRootCASecret returns the RootCA from the pre-defined secret
func (c *Client) ReadRootCASecret() (result []byte) {
logger := c.log.WithName("ReadRootCASecret")
certProps, err := c.GetTLSCertProps(c.clientConfig)
if err != nil {
logger.Error(err, "failed to get TLS Cert Properties")
return result
}
sname := generateRootCASecretName(certProps)
stlsca, err := c.GetResource("", Secrets, certProps.Namespace, sname)
if err != nil {
return result
}
tlsca, err := convertToSecret(stlsca)
if err != nil {
logger.Error(err, "failed to convert secret", "name", sname, "namespace", certProps.Namespace)
return result
}
result = tlsca.Data[rootCAKey]
if len(result) == 0 {
logger.Info("root CA certificate not found in secret", "name", tlsca.Name, "namespace", certProps.Namespace)
return result
}
logger.V(4).Info("using CA bundle defined in secret to validate the webhook's server certificate", "name", tlsca.Name, "namespace", certProps.Namespace)
return result
}
const selfSignedAnnotation string = "self-signed-cert"
const rootCAKey string = "rootCA.crt"
// ReadTLSPair Reads the pair of TLS certificate and key from the specified secret.
func (c *Client) ReadTLSPair(props tls.CertificateProps) *tls.PemPair {
logger := c.log.WithName("ReadTLSPair")
sname := generateTLSPairSecretName(props)
unstrSecret, err := c.GetResource("", Secrets, props.Namespace, sname)
if err != nil {
logger.Error(err, "Failed to get secret", "name", sname, "namespace", props.Namespace)
return nil
}
// If secret contains annotation 'self-signed-cert', then it's created using helper scripts to setup self-signed certificates.
// As the root CA used to sign the certificate is required for webhook cnofiguration, check if the corresponding secret is created
annotations := unstrSecret.GetAnnotations()
if _, ok := annotations[selfSignedAnnotation]; ok {
sname := generateRootCASecretName(props)
_, err := c.GetResource("", Secrets, props.Namespace, sname)
if err != nil {
logger.Error(err, "Root CA secret is required while using self-signed certificates TLS pair, defaulting to generating new TLS pair", "name", sname, "namespace", props.Namespace)
return nil
}
}
secret, err := convertToSecret(unstrSecret)
if err != nil {
return nil
}
pemPair := tls.PemPair{
Certificate: secret.Data[v1.TLSCertKey],
PrivateKey: secret.Data[v1.TLSPrivateKeyKey],
}
if len(pemPair.Certificate) == 0 {
logger.Info("TLS Certificate not found in secret", "name", sname, "namespace", props.Namespace)
return nil
}
if len(pemPair.PrivateKey) == 0 {
logger.Info("TLS PrivateKey not found in secret", "name", sname, "namespace", props.Namespace)
return nil
}
return &pemPair
}
// WriteCACertToSecret stores the CA cert in secret
func (c *Client) WriteCACertToSecret(caPEM *tls.PemPair, props tls.CertificateProps) error {
logger := c.log.WithName("CAcert")
name := generateRootCASecretName(props)
secretUnstr, err := c.GetResource("", Secrets, props.Namespace, name)
if err != nil {
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: props.Namespace,
Annotations: map[string]string{
selfSignedAnnotation: "true",
},
},
Data: map[string][]byte{
rootCAKey: caPEM.Certificate,
},
Type: v1.SecretTypeOpaque,
}
_, err := c.CreateResource("", Secrets, props.Namespace, secret, false)
if err == nil {
logger.Info("secret created", "name", name, "namespace", props.Namespace)
}
return err
}
if _, ok := secretUnstr.GetAnnotations()[selfSignedAnnotation]; !ok {
secretUnstr.SetAnnotations(map[string]string{selfSignedAnnotation: "true"})
}
dataMap := map[string]interface{}{
rootCAKey: base64.StdEncoding.EncodeToString(caPEM.Certificate)}
if err := unstructured.SetNestedMap(secretUnstr.Object, dataMap, "data"); err != nil {
return err
}
_, err = c.UpdateResource("", Secrets, props.Namespace, secretUnstr, false)
if err != nil {
return err
}
logger.Info("secret updated", "name", name, "namespace", props.Namespace)
return nil
}
// WriteTLSPairToSecret Writes the pair of TLS certificate and key to the specified secret.
// Updates existing secret or creates new one.
func (c *Client) WriteTLSPairToSecret(props tls.CertificateProps, pemPair *tls.PemPair) error {
logger := c.log.WithName("WriteTLSPair")
name := generateTLSPairSecretName(props)
secretUnstr, err := c.GetResource("", Secrets, props.Namespace, name)
if err != nil {
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: props.Namespace,
},
Data: map[string][]byte{
v1.TLSCertKey: pemPair.Certificate,
v1.TLSPrivateKeyKey: pemPair.PrivateKey,
},
Type: v1.SecretTypeTLS,
}
_, err := c.CreateResource("", Secrets, props.Namespace, secret, false)
if err == nil {
logger.Info("secret created", "name", name, "namespace", props.Namespace)
}
return err
}
dataMap := map[string]interface{}{
v1.TLSCertKey: base64.StdEncoding.EncodeToString(pemPair.Certificate),
v1.TLSPrivateKeyKey: base64.StdEncoding.EncodeToString(pemPair.PrivateKey),
}
if err := unstructured.SetNestedMap(secretUnstr.Object, dataMap, "data"); err != nil {
return err
}
_, err = c.UpdateResource("", Secrets, props.Namespace, secretUnstr, false)
if err != nil {
return err
}
logger.Info("secret updated", "name", name, "namespace", props.Namespace)
return nil
}
func generateTLSPairSecretName(props tls.CertificateProps) string {
return tls.GenerateInClusterServiceName(props) + ".kyverno-tls-pair"
}
func generateRootCASecretName(props tls.CertificateProps) string {
return tls.GenerateInClusterServiceName(props) + ".kyverno-tls-ca"
}
//GetTLSCertProps provides the TLS Certificate Properties
func (c *Client) GetTLSCertProps(configuration *rest.Config) (certProps tls.CertificateProps, err error) {
apiServerURL, err := url.Parse(configuration.Host)
if err != nil {
return certProps, err
}
certProps = tls.CertificateProps{
Service: config.KyvernoServiceName,
Namespace: config.KyvernoNamespace,
APIServerHost: apiServerURL.Hostname(),
}
return certProps, nil
}

View file

@ -9,7 +9,6 @@ import (
"github.com/go-logr/logr"
openapiv2 "github.com/googleapis/gnostic/openapiv2"
certificates "k8s.io/api/certificates/v1beta1"
v1 "k8s.io/api/core/v1"
helperv1 "k8s.io/apimachinery/pkg/apis/meta/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
@ -198,15 +197,6 @@ func convertToUnstructured(obj interface{}) *unstructured.Unstructured {
return &unstructured.Unstructured{Object: unstructuredObj}
}
//To-Do remove this to use unstructured type
func convertToSecret(obj *unstructured.Unstructured) (v1.Secret, error) {
secret := v1.Secret{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &secret); err != nil {
return secret, err
}
return secret, nil
}
//To-Do remove this to use unstructured type
func convertToCSR(obj *unstructured.Unstructured) (*certificates.CertificateSigningRequest, error) {
csr := certificates.CertificateSigningRequest{}

View file

@ -15,17 +15,6 @@ import (
kubernetesfake "k8s.io/client-go/kubernetes/fake"
)
const (
// CSRs CertificateSigningRequest
CSRs string = "CertificateSigningRequest"
// Secrets Secret
Secrets string = "Secret"
// ConfigMaps ConfigMap
ConfigMaps string = "ConfigMap"
// Namespaces Namespace
Namespaces string = "Namespace"
)
//NewMockClient ---testing utilities
func NewMockClient(scheme *runtime.Scheme, gvrToListKind map[schema.GroupVersionResource]string, objects ...runtime.Object) (*Client, error) {
client := fake.NewSimpleDynamicClientWithCustomListKinds(scheme, gvrToListKind, objects...)

View file

@ -189,13 +189,20 @@ func (eh ExistenceHandler) Handle(handler resourceElementHandler, resourceMap ma
if !ok {
return currentPath, fmt.Errorf("Invalid pattern type %T: Pattern has to be of list to compare against resource", eh.pattern)
}
// get the first item in the pattern array
patternMap := typedPattern[0]
typedPatternMap, ok := patternMap.(map[string]interface{})
if !ok {
return currentPath, fmt.Errorf("Invalid pattern type %T: Pattern has to be of type map to compare against items in resource", eh.pattern)
// loop all item in the pattern array
errorPath := ""
var err error
for _, patternMap := range typedPattern {
typedPatternMap, ok := patternMap.(map[string]interface{})
if !ok {
return currentPath, fmt.Errorf("Invalid pattern type %T: Pattern has to be of type map to compare against items in resource", eh.pattern)
}
errorPath, err = validateExistenceListResource(handler, typedResource, typedPatternMap, originPattern, currentPath, ac)
if err != nil {
return errorPath, err
}
}
return validateExistenceListResource(handler, typedResource, typedPatternMap, originPattern, currentPath, ac)
return errorPath, err
default:
return currentPath, fmt.Errorf("Invalid resource type %T: Existence ^ () anchor can be used only on list/array type resource", value)
}
@ -204,7 +211,7 @@ func (eh ExistenceHandler) Handle(handler resourceElementHandler, resourceMap ma
}
func validateExistenceListResource(handler resourceElementHandler, resourceList []interface{}, patternMap map[string]interface{}, originPattern interface{}, path string, ac *common.AnchorKey) (string, error) {
// the idea is atleast on the elements in the array should satisfy the pattern
// the idea is all the element in the pattern array should be present atleast once in the resource list
// if non satisfy then throw an error
for i, resourceElement := range resourceList {
currentPath := path + strconv.Itoa(i) + "/"

View file

@ -81,7 +81,7 @@ func NewAPIPath(path string) (*APIPath, error) {
return &APIPath{
Root: paths[0],
Group: paths[1],
Version: paths[2],
Version: paths[1] + "/" + paths[2],
ResourceType: paths[3],
}, nil
}
@ -91,7 +91,7 @@ func NewAPIPath(path string) (*APIPath, error) {
return &APIPath{
Root: paths[0],
Group: paths[1],
Version: paths[2],
Version: paths[1] + "/" + paths[2],
ResourceType: paths[3],
Name: paths[4],
}, nil
@ -102,7 +102,7 @@ func NewAPIPath(path string) (*APIPath, error) {
return &APIPath{
Root: paths[0],
Group: paths[1],
Version: paths[2],
Version: paths[1] + "/" + paths[2],
Namespace: paths[4],
ResourceType: paths[5],
}, nil
@ -113,7 +113,7 @@ func NewAPIPath(path string) (*APIPath, error) {
return &APIPath{
Root: paths[0],
Group: paths[1],
Version: paths[2],
Version: paths[1] + "/" + paths[2],
Namespace: paths[4],
ResourceType: paths[5],
Name: paths[6],
@ -125,17 +125,33 @@ func NewAPIPath(path string) (*APIPath, error) {
func (a *APIPath) String() string {
var paths []string
if a.Namespace != "" {
if a.Name == "" {
paths = []string{a.Root, a.Group, a.Version, "namespaces", a.Namespace, a.ResourceType}
if a.Root == "apis" {
if a.Namespace != "" {
if a.Name == "" {
paths = []string{a.Root, a.Version, "namespaces", a.Namespace, a.ResourceType}
} else {
paths = []string{a.Root, a.Version, "namespaces", a.Namespace, a.ResourceType, a.Name}
}
} else {
paths = []string{a.Root, a.Group, a.Version, "namespaces", a.Namespace, a.ResourceType, a.Name}
if a.Name != "" {
paths = []string{a.Root, a.Version, a.ResourceType, a.Name}
} else {
paths = []string{a.Root, a.Version, a.ResourceType}
}
}
} else {
if a.Name != "" {
paths = []string{a.Root, a.Group, a.Version, a.ResourceType, a.Name}
if a.Namespace != "" {
if a.Name == "" {
paths = []string{a.Root, a.Group, "namespaces", a.Namespace, a.ResourceType}
} else {
paths = []string{a.Root, a.Group, "namespaces", a.Namespace, a.ResourceType, a.Name}
}
} else {
paths = []string{a.Root, a.Group, a.Version, a.ResourceType}
if a.Name != "" {
paths = []string{a.Root, a.Group, a.ResourceType, a.Name}
} else {
paths = []string{a.Root, a.Group, a.ResourceType}
}
}
}

View file

@ -26,3 +26,26 @@ func Test_Paths(t *testing.T) {
f("/apis/gloo.solo.io/v1/namespaces/gloo-system/upstreams/ ", "/apis/gloo.solo.io/v1/namespaces/gloo-system/upstreams")
f(" /apis/gloo.solo.io/v1/namespaces/gloo-system/upstreams", "/apis/gloo.solo.io/v1/namespaces/gloo-system/upstreams")
}
func Test_GroupVersions(t *testing.T) {
f := func(path, expected string) {
p, err := NewAPIPath(path)
if err != nil {
t.Error(err)
return
}
if p.Root == "api" {
if p.Group != expected {
t.Errorf("expected %s got %s", expected, p.Group)
}
} else {
if p.Version != expected {
t.Errorf("expected %s got %s", expected, p.Version)
}
}
}
f("/api/v1/namespace/{{ request.namespace }}", "v1")
f("/apis/extensions/v1beta1/namespaces/example/ingresses", "extensions/v1beta1")
}

View file

@ -10,6 +10,7 @@ import (
"github.com/go-logr/logr"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"k8s.io/api/admission/v1beta1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
)
@ -214,6 +215,28 @@ func (ctx *Context) AddNamespace(namespace string) error {
return ctx.AddJSON(objRaw)
}
func (ctx *Context) AddImageInfo(resource *unstructured.Unstructured) error {
initContainersImgs, containersImgs := extractImageInfo(resource, ctx.log)
if len(initContainersImgs) == 0 && len(containersImgs) == 0 {
return nil
}
resourceImg := newResourceImage(initContainersImgs, containersImgs)
images := struct {
Images interface{} `json:"images"`
}{
Images: resourceImg,
}
objRaw, err := json.Marshal(images)
if err != nil {
return err
}
return ctx.AddJSON(objRaw)
}
// Checkpoint creates a copy of the internal state.
// Prior checkpoints will be overridden.
func (ctx *Context) Checkpoint() {

View file

@ -0,0 +1,147 @@
package context
import (
"strings"
"github.com/distribution/distribution/reference"
"github.com/go-logr/logr"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
type imageInfo struct {
Registry string `json:"registry,omitempty"`
Name string `json:"name"`
Tag string `json:"tag,omitempty"`
Digest string `json:"digest,omitempty"`
}
type containerImage struct {
Name string
Image imageInfo
}
type resourceImage struct {
Containers map[string]interface{} `json:"containers"`
InitContainers map[string]interface{} `json:"initContainers,omitempty"`
}
func newResourceImage(initContainersImgs, containersImgs []*containerImage) resourceImage {
initContainers := make(map[string]interface{})
containers := make(map[string]interface{})
for _, resource := range initContainersImgs {
initContainers[resource.Name] = resource.Image
}
for _, resource := range containersImgs {
containers[resource.Name] = resource.Image
}
return resourceImage{
Containers: containers,
InitContainers: initContainers,
}
}
func extractImageInfo(resource *unstructured.Unstructured, log logr.Logger) (initContainersImgs, containersImgs []*containerImage) {
logger := log.WithName("extractImageInfo").WithValues("kind", resource.GetKind(), "ns", resource.GetNamespace(), "name", resource.GetName())
switch resource.GetKind() {
case "Pod":
for i, tag := range []string{"initContainers", "containers"} {
if initContainers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", tag); ok {
img, err := convertToImageInfo(initContainers)
if err != nil {
logger.WithName(tag).Error(err, "failed to extract image info")
continue
}
if i == 0 {
initContainersImgs = append(initContainersImgs, img...)
} else {
containersImgs = append(containersImgs, img...)
}
}
}
case "Deployment", "DaemonSet", "Job", "StatefulSet":
for i, tag := range []string{"initContainers", "containers"} {
if initContainers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "template", "spec", tag); ok {
img, err := convertToImageInfo(initContainers)
if err != nil {
logger.WithName(tag).Error(err, "failed to extract image info")
continue
}
if i == 0 {
initContainersImgs = append(initContainersImgs, img...)
} else {
containersImgs = append(containersImgs, img...)
}
}
}
case "CronJob":
for i, tag := range []string{"initContainers", "containers"} {
if initContainers, ok, _ := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "jobTemplate", "spec", "template", "spec", tag); ok {
img, err := convertToImageInfo(initContainers)
if err != nil {
logger.WithName(tag).Error(err, "failed to extract image info")
continue
}
if i == 0 {
initContainersImgs = append(initContainersImgs, img...)
} else {
containersImgs = append(containersImgs, img...)
}
}
}
}
return
}
func convertToImageInfo(containers []interface{}) (images []*containerImage, err error) {
var errs []string
for _, ctr := range containers {
if container, ok := ctr.(map[string]interface{}); ok {
repo, err := reference.Parse(container["image"].(string))
if err != nil {
errs = append(errs, errors.Wrapf(err, "bad image: %s", container["image"].(string)).Error())
continue
}
var registry, name, tag, digest string
if named, ok := repo.(reference.Named); ok {
registry, name = reference.SplitHostname(named)
}
if tagged, ok := repo.(reference.Tagged); ok {
tag = tagged.Tag()
}
if digested, ok := repo.(reference.Digested); ok {
digest = digested.Digest().String()
}
images = append(images, &containerImage{
Name: container["name"].(string),
Image: imageInfo{
Registry: registry,
Name: name,
Tag: tag,
Digest: digest,
},
})
}
}
if len(errs) == 0 {
return images, nil
}
return images, errors.Errorf("%s", strings.Join(errs, ";"))
}

View file

@ -0,0 +1,45 @@
package context
import (
"testing"
"github.com/kyverno/kyverno/pkg/engine/utils"
"github.com/stretchr/testify/assert"
"sigs.k8s.io/controller-runtime/pkg/log"
)
func Test_extractImageInfo(t *testing.T) {
tests := []struct {
raw []byte
containers []*containerImage
initContainers []*containerImage
}{
{
raw: []byte(`{"apiVersion": "v1","kind": "Pod","metadata": {"name": "myapp"},"spec": {"initContainers": [{"name": "init","image": "index.docker.io/busybox:v1.2.3"}],"containers": [{"name": "nginx","image": "nginx:latest"}]}}`),
initContainers: []*containerImage{{Name: "init", Image: imageInfo{Registry: "index.docker.io", Name: "busybox", Tag: "v1.2.3"}}},
containers: []*containerImage{{Name: "nginx", Image: imageInfo{Name: "nginx", Tag: "latest"}}},
},
{
raw: []byte(`{"apiVersion": "apps/v1","kind": "Deployment","metadata": {"name": "myapp"},"spec": {"selector": {"matchLabels": {"app": "myapp"}},"template": {"metadata": {"labels": {"app": "myapp"}},"spec": {"initContainers": [{"name": "init","image": "fictional.registry.example:10443/imagename:tag@sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}],"containers": [{"name": "myapp","image": "fictional.registry.example:10443/imagename"}]}}}}`),
initContainers: []*containerImage{{Name: "init", Image: imageInfo{Registry: "fictional.registry.example:10443", Name: "imagename", Tag: "tag", Digest: "sha256:ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff"}}},
containers: []*containerImage{{Name: "myapp", Image: imageInfo{Registry: "fictional.registry.example:10443", Name: "imagename"}}}},
{
raw: []byte(`{"apiVersion": "batch/v1beta1","kind": "CronJob","metadata": {"name": "hello"},"spec": {"schedule": "*/1 * * * *","jobTemplate": {"spec": {"template": {"spec": {"containers": [{"name": "hello","image": "b.gcr.io/test.example.com/my-app:test.example.com"}]}}}}}}`),
containers: []*containerImage{{Name: "hello", Image: imageInfo{Registry: "b.gcr.io", Name: "test.example.com/my-app", Tag: "test.example.com"}}},
},
}
for _, test := range tests {
resource, err := utils.ConvertToUnstructured(test.raw)
assert.Nil(t, err)
init, container := extractImageInfo(resource, log.Log.WithName("TestExtractImageInfo"))
if len(test.initContainers) > 0 {
assert.Equal(t, test.initContainers, init, "unexpected initContainers", resource.GetName())
}
if len(test.containers) > 0 {
assert.Equal(t, test.containers, container, "unexpected containers", resource.GetName())
}
}
}

View file

@ -5,6 +5,8 @@ import (
"errors"
"fmt"
"strings"
"github.com/go-logr/logr"
"github.com/jmespath/go-jmespath"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
@ -60,7 +62,12 @@ func loadAPIData(logger logr.Logger, entry kyverno.ContextEntry, ctx *PolicyCont
return nil
}
results, err := applyJMESPath(entry.APICall.JMESPath, jsonData)
path, err := variables.SubstituteAll(logger, ctx.JSONContext, entry.APICall.JMESPath)
if err != nil {
return fmt.Errorf("failed to substitute variables in context entry %s %s: %v", entry.Name, entry.APICall.JMESPath, err)
}
results, err := applyJMESPath(path.(string), jsonData)
if err != nil {
return fmt.Errorf("failed to apply JMESPath for context entry %v: %v", entry, err)
}
@ -193,6 +200,9 @@ func fetchConfigMap(logger logr.Logger, entry kyverno.ContextEntry, lister dynam
return nil, fmt.Errorf("failed to convert configmap %s/%s: %v", namespace, name, err)
}
// update the unstructuredObj["data"] to delimit and split the string value (containing "\n") with "\n"
unstructuredObj["data"] = parseMultilineBlockBody(unstructuredObj["data"].(map[string]interface{}))
// extract configmap data
contextData["data"] = unstructuredObj["data"]
contextData["metadata"] = unstructuredObj["metadata"]
@ -205,3 +215,25 @@ func fetchConfigMap(logger logr.Logger, entry kyverno.ContextEntry, lister dynam
return data, nil
}
// parseMultilineBlockBody recursively iterates through a map and updates its values in the following way
// whenever it encounters a string value containing "\n",
// it converts it into a []string by splitting it by "\n"
func parseMultilineBlockBody(m map[string]interface{}) map[string]interface{} {
for k, v := range m {
switch typedValue := v.(type) {
case string:
trimmedTypedValue := strings.Trim(typedValue, "\n")
if strings.Contains(trimmedTypedValue, "\n") {
m[k] = strings.Split(trimmedTypedValue, "\n")
} else {
m[k] = trimmedTypedValue // trimming a str if it has trailing newline characters
}
case map[string]interface{}:
m[k] = parseMultilineBlockBody(typedValue)
default:
continue
}
}
return m
}

View file

@ -0,0 +1,71 @@
package engine
import (
"bytes"
"encoding/json"
"gotest.tools/assert"
"testing"
)
func Test_parseMultilineBlockBody(t *testing.T) {
tcs := []struct {
multilineBlockRaw []byte
expectedMultilineBlockRaw []byte
expectedErr bool
}{
{
multilineBlockRaw: []byte(`{
"key1": "value",
"key2": "value2",
"key3": "word1\nword2\nword3",
"key4": "word4\n"
}`),
expectedMultilineBlockRaw: []byte(`{"key1":"value","key2":"value2","key3":["word1","word2","word3"],"key4":"word4"}`),
expectedErr: false,
},
{
multilineBlockRaw: []byte(`{
"key1": "value",
"key2": "value2",
"key3": "word1\nword2\nword3",
"key4": "word4"
}`),
expectedMultilineBlockRaw: []byte(`{"key1":"value","key2":"value2","key3":["word1","word2","word3"],"key4":"word4"}`),
expectedErr: false,
},
{
multilineBlockRaw: []byte(`{
"key1": "value1",
"key2": "value2\n",
"key3": "word1",
"key4": "word2"
}`),
expectedMultilineBlockRaw: []byte(`{"key1":"value1","key2":["value2",""]}`),
expectedErr: true,
},
{
multilineBlockRaw: []byte(`{
"key1": "value1",
"key2": "[\"cluster-admin\", \"cluster-operator\", \"tenant-admin\"]"
}`),
expectedMultilineBlockRaw: []byte(`{"key1":"value1","key2":"[\"cluster-admin\", \"cluster-operator\", \"tenant-admin\"]"}`),
expectedErr: false,
},
}
for _, tc := range tcs {
var multilineBlock map[string]interface{}
err := json.Unmarshal(tc.multilineBlockRaw, &multilineBlock)
assert.NilError(t, err)
parsedMultilineBlock := parseMultilineBlockBody(multilineBlock)
parsedMultilineBlockRaw, err := json.Marshal(parsedMultilineBlock)
assert.NilError(t, err)
if tc.expectedErr {
assert.Assert(t, bytes.Compare(parsedMultilineBlockRaw, tc.expectedMultilineBlockRaw) != 0)
} else {
assert.Assert(t, bytes.Compare(parsedMultilineBlockRaw, tc.expectedMultilineBlockRaw) == 0)
}
}
}

View file

@ -64,6 +64,7 @@ func (h patchStrategicMergeHandler) Handle() (response.RuleResponse, unstructure
var err error
if PatchStrategicMerge, err = variables.SubstituteAll(log, h.evalCtx, PatchStrategicMerge); err != nil {
// variable subsitution failed
ruleResponse.Name = h.ruleName
ruleResponse.Success = false
ruleResponse.Message = err.Error()
return ruleResponse, h.patchedResource
@ -121,16 +122,6 @@ func (h patchesJSON6902Handler) Handle() (resp response.RuleResponse, patchedRes
return resp, h.patchedResource
}
skip, err := preProcessJSONPatches(patchesJSON6902, h.patchedResource, h.logger)
if err != nil {
h.logger.Error(err, "failed to preProcessJSONPatches")
}
if skip {
resp.Success = true
return resp, h.patchedResource
}
return ProcessPatchJSON6902(h.ruleName, patchesJSON6902, h.patchedResource, h.logger)
}
@ -173,25 +164,6 @@ func (h patchesHandler) Handle() (resp response.RuleResponse, patchedResource un
resp.Name = h.ruleName
resp.Type = utils.Mutation.String()
// patches is already converted to patchesJSON6902
patchesJSON6902, err := convertPatchesToJSON(h.mutation.PatchesJSON6902)
if err != nil {
resp.Success = false
h.logger.Error(err, "error in type conversion")
resp.Message = err.Error()
return resp, h.patchedResource
}
skip, err := preProcessJSONPatches(patchesJSON6902, h.patchedResource, h.logger)
if err != nil {
h.logger.Error(err, "failed to preProcessJSONPatches")
}
if skip {
resp.Success = true
return resp, h.patchedResource
}
return ProcessPatches(h.logger, h.ruleName, *h.mutation, h.patchedResource)
}

View file

@ -1,18 +1,12 @@
package mutate
import (
"fmt"
"path/filepath"
"reflect"
"regexp"
"strconv"
"strings"
evanjsonpatch "github.com/evanphx/json-patch/v5"
"github.com/go-logr/logr"
"github.com/mattbaird/jsonpatch"
"github.com/minio/minio/pkg/wildcard"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
)
func generatePatches(src, dst []byte) ([][]byte, error) {
@ -152,135 +146,3 @@ func ignorePatch(path string) bool {
return false
}
// preProcessJSONPatches deals with the JsonPatch when reinvocation
// policy is set in webhook, to avoid generating duplicate values.
// This duplicate error only occurs on type array, if it's adding to a map
// the value will be added to the map if nil, otherwise it overwrites the old value
// return skip == true to skip the json patch application
func preProcessJSONPatches(patchesJSON6902 []byte, resource unstructured.Unstructured,
log logr.Logger) (skip bool, err error) {
var patches evanjsonpatch.Patch
log = log.WithName("preProcessJSONPatches")
patches, err = evanjsonpatch.DecodePatch(patchesJSON6902)
if err != nil {
return false, fmt.Errorf("cannot decode patches as an RFC 6902 patch: %v", err)
}
for _, patch := range patches {
if patch.Kind() != "add" {
continue
}
path, err := patch.Path()
if err != nil {
return false, fmt.Errorf("failed to get path in JSON Patch: %v", err)
}
// check if the target is the list
if tail := filepath.Base(path); tail != "-" {
_, err := strconv.Atoi(tail)
if err != nil {
log.V(4).Info("JSON patch does not add to the list, skipping", "path", path)
continue
}
}
resourceObj, err := getObject(path, resource.UnstructuredContent())
if err != nil {
log.V(4).Info("unable to get object by the given path, proceed patchesJson6902 without preprocessing", "path", path, "error", err.Error())
continue
}
val, err := patch.ValueInterface()
if err != nil {
log.V(4).Info("unable to get value by the given path, proceed patchesJson6902 without preprocessing", "path", path, "error", err.Error())
continue
}
// if there's one patch exist in the resource, which indicates
// this is re-invoked JSON patches, skip application
if isSubsetObject(val, resourceObj) {
return true, nil
}
}
return false, nil
}
// - insert to the end of the list
// {"op": "add", "path": "/spec/containers/-", {"value": "{"name":"busyboxx","image":"busybox:latest"}"}
// - insert value to the certain element of the list
// {"op": "add", "path": "/spec/containers/1", {"value": "{"name":"busyboxx","image":"busybox:latest"}"}
func getObject(path string, resource map[string]interface{}) (interface{}, error) {
var strippedResource interface{}
strippedResource = resource
var ok bool
if strings.HasPrefix(path, "/") {
path = path[1:]
}
paths := strings.Split(path, "/")
for i, key := range paths {
switch strippedResource.(type) {
case map[string]interface{}:
strippedResource, ok = strippedResource.(map[string]interface{})[key]
if !ok {
return nil, fmt.Errorf("referenced value does not exist at %s", strings.Join(paths[:i+1], "/"))
}
case []interface{}:
var idx int
if key == "-" {
idx = len(strippedResource.([]interface{})) - 1
} else {
var err error
idx, err = strconv.Atoi(key)
if err != nil {
return nil, fmt.Errorf("cannot parse index in JSON Patch at %s: %v", strings.Join(paths[:i+1], "/"), err)
}
if idx < 0 {
idx = len(strippedResource.([]interface{})) - 1
}
}
if len(strippedResource.([]interface{})) <= idx {
return nil, nil
}
strippedResource = strippedResource.([]interface{})[idx]
}
}
return strippedResource, nil
}
// isSubsetObject returns true if object is subset of resource
// the object/resource is the element inside the list, return false
// if the type is mismatched (not map)
func isSubsetObject(object, resource interface{}) bool {
objectMap, ok := object.(map[string]interface{})
if !ok {
return false
}
resourceMap, ok := resource.(map[string]interface{})
if !ok {
return false
}
for objKey, objVal := range objectMap {
rsrcVal, ok := resourceMap[objKey]
if !ok {
return false
}
if !reflect.DeepEqual(objVal, rsrcVal) {
return false
}
}
return true
}

View file

@ -1,16 +1,12 @@
package mutate
import (
"encoding/json"
"fmt"
"testing"
"github.com/mattbaird/jsonpatch"
assertnew "github.com/stretchr/testify/assert"
"gotest.tools/assert"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"sigs.k8s.io/controller-runtime/pkg/log"
yaml "sigs.k8s.io/yaml"
)
func Test_GeneratePatches(t *testing.T) {
@ -139,120 +135,6 @@ var overlayBytes = []byte(`
var expectBytes = []byte(`{"apiVersion": "apps/v1","kind": "Deployment","metadata": {"name": "wordpress","labels": {"app": "wordpress"}},"spec": {"selector": {"matchLabels": {"app": "wordpress"}},"strategy": {"type": "Recreate"},"template": {"metadata": {"labels": {"app": "wordpress"}},"spec": {"containers": [{"name": "nginx","image": "nginx"},{"image": "wordpress:4.8-apache","name": "wordpress","ports": [{"containerPort": 80,"name": "wordpress"}],"volumeMounts": [{"name": "wordpress-persistent-storage","mountPath": "/var/www/html"}],"env": [{"name": "WORDPRESS_DB_HOST","value": "$(MYSQL_SERVICE)"},{"name": "WORDPRESS_DB_PASSWORD","valueFrom": {"secretKeyRef": {"name": "mysql-pass","key": "password"}}}]}],"volumes": [{"name": "wordpress-persistent-storage"}],"initContainers": [{"name": "init-command","image": "debian","command": ["echo $(WORDPRESS_SERVICE)","echo $(MYSQL_SERVICE)"]}]}}}}`)
var podBytes = []byte(`
{
"kind": "Pod",
"apiVersion": "v1",
"metadata": {
"name": "nginx"
},
"spec": {
"containers": [
{
"name": "nginx",
"image": "nginx:latest"
},
{
"name": "nginx-new",
"image": "nginx:latest"
}
]
}
}
`)
func Test_preProcessJSONPatches_skip(t *testing.T) {
patchesJSON6902 := []byte(`
- op: add
path: /spec/containers/1
value: {"name":"nginx-new","image":"nginx:latest"}
`)
var pod unstructured.Unstructured
assertnew.Nil(t, json.Unmarshal(podBytes, &pod))
patches, err := yaml.YAMLToJSON(patchesJSON6902)
assertnew.Nil(t, err)
skip, err := preProcessJSONPatches(patches, pod, log.Log)
assertnew.Nil(t, err)
assertnew.Equal(t, true, skip)
}
func Test_preProcessJSONPatches_not_skip(t *testing.T) {
patchesJSON6902 := []byte(`
- op: add
path: /spec/containers/1
value: {"name":"my-new-container","image":"nginx:latest"}
`)
patches, err := yaml.YAMLToJSON(patchesJSON6902)
assertnew.Nil(t, err)
var pod unstructured.Unstructured
assertnew.Nil(t, json.Unmarshal(podBytes, &pod))
skip, err := preProcessJSONPatches(patches, pod, log.Log)
assertnew.Nil(t, err)
assertnew.Equal(t, false, skip)
}
func Test_isSubsetObject_true(t *testing.T) {
var object, resource interface{}
objectRaw := []byte(`{"image": "nginx:latest","name": "nginx-new"}`)
resourceRaw := []byte(`{"image": "nginx:latest","name": "random-name"}`)
assertnew.Nil(t, json.Unmarshal(objectRaw, &object))
assertnew.Nil(t, json.Unmarshal(resourceRaw, &resource))
assertnew.Equal(t, false, isSubsetObject(object, resource))
resourceRawNew := []byte(`{"image": "nginx:latest","name": "nginx-new"}`)
assertnew.Nil(t, json.Unmarshal(resourceRawNew, &resource))
assertnew.Equal(t, true, isSubsetObject(object, resource))
}
func Test_getObject_notPresent(t *testing.T) {
path := "/spec/random/1"
var pod unstructured.Unstructured
assertnew.Nil(t, json.Unmarshal(podBytes, &pod))
_, err := getObject(path, pod.UnstructuredContent())
expectedErr := "referenced value does not exist at spec/random"
assertnew.Equal(t, err.Error(), expectedErr)
}
func Test_getObject_outOfIndex(t *testing.T) {
path := "/spec/containers/2"
var pod unstructured.Unstructured
assertnew.Nil(t, json.Unmarshal(podBytes, &pod))
object, err := getObject(path, pod.UnstructuredContent())
assertnew.Nil(t, err)
assertnew.Nil(t, object)
}
func Test_getObject_success(t *testing.T) {
path := "/spec/containers/1"
var pod unstructured.Unstructured
expectedObject := map[string]interface{}{"image": "nginx:latest", "name": "nginx-new"}
assertnew.Nil(t, json.Unmarshal(podBytes, &pod))
object, err := getObject(path, pod.UnstructuredContent())
assertnew.Nil(t, err)
assertnew.Equal(t, expectedObject, object)
}
func Test_getObject_get_last_element(t *testing.T) {
path := "/spec/containers/-"
var pod unstructured.Unstructured
expectedObject := map[string]interface{}{"image": "nginx:latest", "name": "nginx-new"}
assertnew.Nil(t, json.Unmarshal(podBytes, &pod))
object, err := getObject(path, pod.UnstructuredContent())
assertnew.Nil(t, err)
assertnew.Equal(t, expectedObject, object)
}
func Test_ignorePath(t *testing.T) {
tests := []struct {
path string

View file

@ -2,13 +2,13 @@ package engine
import (
"encoding/json"
"reflect"
"testing"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/utils"
"gotest.tools/assert"
"reflect"
"testing"
)
func Test_VariableSubstitutionOverlay(t *testing.T) {

View file

@ -4,13 +4,12 @@ import (
"encoding/json"
"testing"
"k8s.io/api/admission/v1beta1"
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/engine/utils"
utils2 "github.com/kyverno/kyverno/pkg/utils"
"gotest.tools/assert"
"k8s.io/api/admission/v1beta1"
)
func TestGetAnchorsFromMap_ThereAreAnchors(t *testing.T) {

View file

@ -1066,10 +1066,10 @@ func Test_Eval_In_String_Set_Fail(t *testing.T) {
}
}
// test passes if ALL of the values in "key" are NOT in "value" ("key" is not a subset of "value")
// test passes if ONE of the values in "key" is NOT in "value" ("key" is not a subset of "value")
func Test_Eval_NotIn_String_Set_Pass(t *testing.T) {
ctx := context.NewContext()
key := [2]string{"1.1.1.1", "4.4.4.4"}
key := [3]string{"1.1.1.1", "4.4.4.4", "5.5.5.5"}
keyInterface := make([]interface{}, len(key), len(key))
for i := range key {
keyInterface[i] = key[i]

View file

@ -110,19 +110,20 @@ func keyExistsInArray(key string, value interface{}, log logr.Logger) (invalidTy
}
func (in InHandler) validateValueWithStringSetPattern(key []string, value interface{}) (keyExists bool) {
invalidType, keyExists := setExistsInArray(key, value, in.log)
invalidType, isIn := setExistsInArray(key, value, in.log, false)
if invalidType {
in.log.Info("expected type []string", "value", value, "type", fmt.Sprintf("%T", value))
return false
}
return keyExists
return isIn
}
// setExistsInArray checks if the key is a subset of value
// The value can be a string, an array of strings, or a JSON format
// array of strings (e.g. ["val1", "val2", "val3"].
func setExistsInArray(key []string, value interface{}, log logr.Logger) (invalidType bool, keyExists bool) {
// notIn argument if set to true will check for NotIn
func setExistsInArray(key []string, value interface{}, log logr.Logger, notIn bool) (invalidType bool, keyExists bool) {
switch valuesAvailable := value.(type) {
case []interface{}:
@ -134,8 +135,10 @@ func setExistsInArray(key []string, value interface{}, log logr.Logger) (invalid
}
valueSlice = append(valueSlice, v)
}
return false, isSubset(key, valueSlice)
if notIn {
return false, isNotIn(key, valueSlice)
}
return false, isIn(key, valueSlice)
case string:
@ -148,36 +151,53 @@ func setExistsInArray(key []string, value interface{}, log logr.Logger) (invalid
log.Error(err, "failed to unmarshal value to JSON string array", "key", key, "value", value)
return true, false
}
if notIn {
return false, isNotIn(key, arr)
}
return false, isSubset(key, arr)
return false, isIn(key, arr)
default:
return true, false
}
}
// isSubset checks if S1 is a subset of S2 i.e. ALL values of S1 are in S2
func isSubset(key []string, value []string) bool {
set := make(map[string]int)
// isIn checks if all values in S1 are in S2
func isIn(key []string, value []string) bool {
set := make(map[string]bool)
for _, val := range value {
set[val]++
set[val] = true
}
for _, val := range key {
count, found := set[val]
_, found := set[val]
if !found {
return false
} else if count < 1 {
return false
} else {
set[val] = count - 1
}
}
return true
}
// isNotIn checks if any of the values in S1 is not in S2
func isNotIn(key []string, value []string) bool {
set := make(map[string]bool)
for _, val := range value {
set[val] = true
}
for _, val := range key {
_, found := set[val]
if !found {
return true
}
}
return false
}
func (in InHandler) validateValueWithBoolPattern(_ bool, _ interface{}) bool {
return false
}

View file

@ -64,13 +64,13 @@ func (nin NotInHandler) validateValueWithStringPattern(key string, value interfa
}
func (nin NotInHandler) validateValueWithStringSetPattern(key []string, value interface{}) bool {
invalidType, keyExists := setExistsInArray(key, value, nin.log)
invalidType, isNotIn := setExistsInArray(key, value, nin.log, true)
if invalidType {
nin.log.Info("expected type []string", "value", value, "type", fmt.Sprintf("%T", value))
return false
}
return !keyExists
return isNotIn
}
func (nin NotInHandler) validateValueWithBoolPattern(_ bool, _ interface{}) bool {

View file

@ -45,12 +45,13 @@ func compareByCondition(key float64, value float64, op kyverno.ConditionOperator
}
func (noh NumericOperatorHandler) Evaluate(key, value interface{}) bool {
if key, err := noh.subHandler(noh.log, noh.ctx, key); err != nil {
var err error
if key, err = noh.subHandler(noh.log, noh.ctx, key); err != nil {
// Failed to resolve the variable
noh.log.Error(err, "Failed to resolve variable", "variable", key)
return false
}
if value, err := noh.subHandler(noh.log, noh.ctx, value); err != nil {
if value, err = noh.subHandler(noh.log, noh.ctx, value); err != nil {
// Failed to resolve the variable
noh.log.Error(err, "Failed to resolve variable", "variable", value)
return false
@ -133,7 +134,7 @@ func (noh NumericOperatorHandler) validateValueWithStringPattern(key string, val
if err == nil {
return noh.validateValueWithIntPattern(int64key, value)
}
noh.log.Error(fmt.Errorf("Parse Error: "), "Failed to parse both float64 and int64 from the string keyt")
noh.log.Error(err, "Failed to parse both float64 and int64 from the string keyt")
return false
}

View file

@ -12,7 +12,7 @@ import (
kyverno "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/engine/anchor/common"
"github.com/kyverno/kyverno/pkg/engine/context"
ju "github.com/kyverno/kyverno/pkg/engine/json-utils"
jsonUtils "github.com/kyverno/kyverno/pkg/engine/json-utils"
"github.com/kyverno/kyverno/pkg/engine/operator"
)
@ -31,14 +31,53 @@ func IsReference(value string) bool {
return len(groups) != 0
}
//ReplaceAllVars replaces all variables with the value defined in the replacement function
func ReplaceAllVars(src string, repl func(string) string) string {
return regexVariables.ReplaceAllStringFunc(src, repl)
}
func SubstituteAll(log logr.Logger, ctx context.EvalInterface, document interface{}) (_ interface{}, err error) {
document, err = substituteReferences(log, document)
if err != nil {
return kyverno.Rule{}, err
}
return substituteVars(log, ctx, document)
}
func SubstituteAllForceMutate(log logr.Logger, ctx context.EvalInterface, typedRule kyverno.Rule) (_ kyverno.Rule, err error) {
var rule interface{}
rule, err = RuleToUntyped(typedRule)
if err != nil {
return kyverno.Rule{}, err
}
rule, err = substituteReferences(log, rule)
if err != nil {
return kyverno.Rule{}, err
}
if ctx == nil {
rule = replaceSubstituteVariables(rule)
} else {
rule, err = substituteVars(log, ctx, rule)
if err != nil {
return kyverno.Rule{}, err
}
}
return UntypedToRule(rule)
}
//SubstituteVars replaces the variables with the values defined in the context
// - if any variable is invalid or has nil value, it is considered as a failed variable substitution
func substituteVars(log logr.Logger, ctx context.EvalInterface, rule interface{}) (interface{}, error) {
return ju.NewTraversal(rule, substituteVariablesIfAny(log, ctx)).TraverseJSON()
return jsonUtils.NewTraversal(rule, substituteVariablesIfAny(log, ctx)).TraverseJSON()
}
func substituteReferences(log logr.Logger, rule interface{}) (interface{}, error) {
return ju.NewTraversal(rule, substituteReferencesIfAny(log)).TraverseJSON()
return jsonUtils.NewTraversal(rule, substituteReferencesIfAny(log)).TraverseJSON()
}
// NotFoundVariableErr is returned when it is impossible to resolve the variable
@ -61,8 +100,8 @@ func (n NotResolvedReferenceErr) Error() string {
return fmt.Sprintf("reference %s not resolved at path %s", n.reference, n.path)
}
func substituteReferencesIfAny(log logr.Logger) ju.Action {
return ju.OnlyForLeafs(func(data *ju.ActionData) (interface{}, error) {
func substituteReferencesIfAny(log logr.Logger) jsonUtils.Action {
return jsonUtils.OnlyForLeafs(func(data *jsonUtils.ActionData) (interface{}, error) {
value, ok := data.Element.(string)
if !ok {
return data.Element, nil
@ -73,9 +112,9 @@ func substituteReferencesIfAny(log logr.Logger) ju.Action {
if err != nil {
switch err.(type) {
case context.InvalidVariableErr:
return data.Element, err
return nil, err
default:
return data.Element, fmt.Errorf("failed to resolve %v at path %s", v, data.Path)
return nil, fmt.Errorf("failed to resolve %v at path %s", v, data.Path)
}
}
@ -100,8 +139,8 @@ func substituteReferencesIfAny(log logr.Logger) ju.Action {
})
}
func substituteVariablesIfAny(log logr.Logger, ctx context.EvalInterface) ju.Action {
return ju.OnlyForLeafs(func(data *ju.ActionData) (interface{}, error) {
func substituteVariablesIfAny(log logr.Logger, ctx context.EvalInterface) jsonUtils.Action {
return jsonUtils.OnlyForLeafs(func(data *jsonUtils.ActionData) (interface{}, error) {
value, ok := data.Element.(string)
if !ok {
return data.Element, nil
@ -114,6 +153,16 @@ func substituteVariablesIfAny(log logr.Logger, ctx context.EvalInterface) ju.Act
variable := strings.ReplaceAll(v, "{{", "")
variable = strings.ReplaceAll(variable, "}}", "")
variable = strings.TrimSpace(variable)
operation, err := ctx.Query("request.operation")
if err != nil {
return nil, fmt.Errorf("failed to check request.operation")
}
if operation == "DELETE" {
variable = strings.ReplaceAll(variable, "request.object", "request.oldObject")
}
substitutedVar, err := ctx.Query(variable)
if err != nil {
switch err.(type) {
@ -230,18 +279,43 @@ func formAbsolutePath(referencePath, absolutePath string) string {
func getValueFromReference(fullDocument interface{}, path string) (interface{}, error) {
var element interface{}
ju.NewTraversal(fullDocument, ju.OnlyForLeafs(
func(data *ju.ActionData) (interface{}, error) {
if _, err := jsonUtils.NewTraversal(fullDocument, jsonUtils.OnlyForLeafs(
func(data *jsonUtils.ActionData) (interface{}, error) {
if common.RemoveAnchorsFromPath(data.Path) == path {
element = data.Element
}
return data.Element, nil
})).TraverseJSON()
})).TraverseJSON(); err != nil {
return nil, err
}
return element, nil
}
func SubstituteAllInRule(log logr.Logger, ctx context.EvalInterface, typedRule kyverno.Rule) (_ kyverno.Rule, err error) {
var rule interface{}
rule, err = RuleToUntyped(typedRule)
if err != nil {
return typedRule, err
}
rule, err = substituteReferences(log, rule)
if err != nil {
return typedRule, err
}
rule, err = substituteVars(log, ctx, rule)
if err != nil {
return typedRule, err
}
return UntypedToRule(rule)
}
func RuleToUntyped(rule kyverno.Rule) (interface{}, error) {
jsonRule, err := json.Marshal(rule)
if err != nil {
@ -272,61 +346,6 @@ func UntypedToRule(untyped interface{}) (kyverno.Rule, error) {
return rule, nil
}
func SubstituteAllInRule(log logr.Logger, ctx context.EvalInterface, typedRule kyverno.Rule) (_ kyverno.Rule, err error) {
var rule interface{}
rule, err = RuleToUntyped(typedRule)
if err != nil {
return typedRule, err
}
rule, err = substituteReferences(log, rule)
if err != nil {
return typedRule, err
}
rule, err = substituteVars(log, ctx, rule)
if err != nil {
return typedRule, err
}
return UntypedToRule(rule)
}
func SubstituteAll(log logr.Logger, ctx context.EvalInterface, document interface{}) (_ interface{}, err error) {
document, err = substituteReferences(log, document)
if err != nil {
return kyverno.Rule{}, err
}
return substituteVars(log, ctx, document)
}
func SubstituteAllForceMutate(log logr.Logger, ctx context.EvalInterface, typedRule kyverno.Rule) (_ kyverno.Rule, err error) {
var rule interface{}
rule, err = RuleToUntyped(typedRule)
if err != nil {
return kyverno.Rule{}, err
}
rule, err = substituteReferences(log, rule)
if err != nil {
return kyverno.Rule{}, err
}
if ctx == nil {
rule = replaceSubstituteVariables(rule)
} else {
rule, err = substituteVars(log, ctx, rule)
if err != nil {
return kyverno.Rule{}, err
}
}
return UntypedToRule(rule)
}
func replaceSubstituteVariables(document interface{}) interface{} {
rawDocument, err := json.Marshal(document)
if err != nil {

View file

@ -3,6 +3,7 @@ package variables
import (
"encoding/json"
"strings"
"fmt"
"testing"
"github.com/kyverno/kyverno/pkg/engine/context"
@ -133,6 +134,86 @@ func Test_subVars_failed(t *testing.T) {
}
}
func Test_ReplacingPathWhenDeleting(t *testing.T) {
patternRaw := []byte(`"{{request.object.metadata.annotations.target}}"`)
var resourceRaw = []byte(`
{
"request": {
"operation": "DELETE",
"object": {
"metadata": {
"name": "curr",
"namespace": "ns",
"annotations": {
"target": "foo"
}
}
},
"oldObject": {
"metadata": {
"name": "old",
"annotations": {
"target": "bar"
}
}
}
}
}
`)
var pattern interface{}
var err error
err = json.Unmarshal(patternRaw, &pattern)
if err != nil {
t.Error(err)
}
ctx := context.NewContext()
err = ctx.AddJSON(resourceRaw)
assert.NilError(t, err)
pattern, err = SubstituteVars(log.Log, ctx, pattern)
assert.NilError(t, err)
assert.Equal(t, fmt.Sprintf("%v", pattern), "bar")
}
func Test_ReplacingNestedVariableWhenDeleting(t *testing.T) {
patternRaw := []byte(`"{{request.object.metadata.annotations.{{request.object.metadata.annotations.targetnew}}}}"`)
var resourceRaw = []byte(`
{
"request":{
"operation":"DELETE",
"oldObject":{
"metadata":{
"name":"current",
"namespace":"ns",
"annotations":{
"target":"nested_target",
"targetnew":"target"
}
}
}
}
}`)
var pattern interface{}
var err error
err = json.Unmarshal(patternRaw, &pattern)
if err != nil {
t.Error(err)
}
ctx := context.NewContext()
err = ctx.AddJSON(resourceRaw)
assert.NilError(t, err)
pattern, err = SubstituteVars(log.Log, ctx, pattern)
assert.NilError(t, err)
assert.Equal(t, fmt.Sprintf("%v", pattern), "nested_target")
}
var resourceRaw = []byte(`
{
"metadata": {

View file

@ -202,6 +202,11 @@ func (c *Controller) deleteGR(obj interface{}) {
}
func (c *Controller) enqueue(gr *kyverno.GenerateRequest) {
// skip enqueueing Pending requests
if gr.Status.State == kyverno.Pending {
return
}
logger := c.log
key, err := cache.MetaNamespaceKeyFunc(gr)
if err != nil {

View file

@ -123,6 +123,10 @@ func (c *Controller) applyGenerate(resource unstructured.Unstructured, gr kyvern
return nil, err
}
if err := ctx.AddImageInfo(&resource); err != nil {
logger.Error(err, "unable to add image info to variables context")
}
policyContext := &engine.PolicyContext{
NewResource: resource,
Policy: *policyObj,

View file

@ -1,6 +1,7 @@
package generate
import (
"reflect"
"time"
"github.com/go-logr/logr"
@ -175,6 +176,10 @@ func (c *Controller) updatePolicy(old, cur interface{}) {
}
}
if reflect.DeepEqual(curP.Spec, oldP.Spec) {
policyHasGenerate = false
}
if !policyHasGenerate {
return
}
@ -256,7 +261,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
defer utilruntime.HandleCrash()
defer c.queue.ShutDown()
logger.Info("starting")
logger.Info("starting", "workers", workers)
defer logger.Info("shutting down")
if !cache.WaitForCacheSync(stopCh, c.policySynced, c.grSynced) {
@ -274,7 +279,7 @@ func (c *Controller) Run(workers int, stopCh <-chan struct{}) {
// worker runs a worker thread that just dequeues items, processes them, and marks them done.
// It enforces that the syncHandler is never invoked concurrently with the same key.
func (c *Controller) worker() {
c.log.Info("starting new worker...")
c.log.V(3).Info("starting new worker...")
for c.processNextWorkItem() {
}
@ -342,7 +347,7 @@ func (c *Controller) syncGenerateRequest(key string) error {
return c.processGR(gr)
}
// EnqueueGenerateRequestFromWebhook - enqueing generate requests from webhook
// EnqueueGenerateRequestFromWebhook - enqueueing generate requests from webhook
func (c *Controller) EnqueueGenerateRequestFromWebhook(gr *kyverno.GenerateRequest) {
c.enqueueGenerateRequest(gr)
}

View file

@ -205,6 +205,18 @@ func applyCommandHelper(resourcePaths []string, cluster bool, policyReport bool,
return validateEngineResponses, rc, resources, skippedPolicies, err
}
// empty the previous contents of the file just in case if the file already existed before with some content(so as to perform overwrites)
// the truncation of files for the case when mutateLogPath is dir, is handled under pkg/kyverno/apply/common.go
if !mutateLogPathIsDir && mutateLogPath != "" {
_, err := os.OpenFile(mutateLogPath, os.O_TRUNC|os.O_WRONLY, 0644)
if err != nil {
if !sanitizederror.IsErrorSanitized(err) {
return validateEngineResponses, rc, resources, skippedPolicies, sanitizederror.NewWithError("failed to truncate the existing file at "+mutateLogPath, err)
}
return validateEngineResponses, rc, resources, skippedPolicies, err
}
}
mutatedPolicies, err := common.MutatePolices(policies)
if err != nil {
if !sanitizederror.IsErrorSanitized(err) {

View file

@ -16,7 +16,7 @@ func Test_Apply(t *testing.T) {
testcases := []TestCase{
{
PolicyPaths: []string{"../../../samples/best_practices/disallow_latest_tag.yaml"},
PolicyPaths: []string{"../../../test/best_practices/disallow_latest_tag.yaml"},
ResourcePaths: []string{"../../../test/resources/pod_with_version_tag.yaml"},
expectedPolicyReports: []preport.PolicyReport{
{
@ -31,7 +31,7 @@ func Test_Apply(t *testing.T) {
},
},
{
PolicyPaths: []string{"../../../samples/best_practices/require_pod_requests_limits.yaml"},
PolicyPaths: []string{"../../../test/best_practices/require_pod_requests_limits.yaml"},
ResourcePaths: []string{"../../../test/resources/pod_with_latest_tag.yaml"},
expectedPolicyReports: []preport.PolicyReport{
{

View file

@ -386,12 +386,24 @@ func ApplyPolicyOnResource(policy *v1.ClusterPolicy, resource *unstructured.Unst
rcError := false
engineResponses := make([]*response.EngineResponse, 0)
namespaceLabels := make(map[string]string)
resourceNamespace := resource.GetNamespace()
namespaceLabels = namespaceSelectorMap[resource.GetNamespace()]
if resourceNamespace != "default" && len(namespaceLabels) < 1 {
return engineResponses, &response.EngineResponse{}, responseError, rcError, sanitizederror.NewWithError(fmt.Sprintf("failed to get namesapce labels for resource %s. use --values-file flag to pass the namespace labels", resource.GetName()), nil)
policyWithNamespaceSelector := false
for _, p := range policy.Spec.Rules {
if p.MatchResources.ResourceDescription.NamespaceSelector != nil ||
p.ExcludeResources.ResourceDescription.NamespaceSelector != nil {
policyWithNamespaceSelector = true
break
}
}
if policyWithNamespaceSelector {
resourceNamespace := resource.GetNamespace()
namespaceLabels = namespaceSelectorMap[resource.GetNamespace()]
if resourceNamespace != "default" && len(namespaceLabels) < 1 {
return engineResponses, &response.EngineResponse{}, responseError, rcError, sanitizederror.NewWithError(fmt.Sprintf("failed to get namesapce labels for resource %s. use --values-file flag to pass the namespace labels", resource.GetName()), nil)
}
}
resPath := fmt.Sprintf("%s/%s/%s", resource.GetNamespace(), resource.GetKind(), resource.GetName())
log.Log.V(3).Info("applying policy on resource", "policy", policy.Name, "resource", resPath)
@ -525,9 +537,10 @@ func PrintMutatedOutput(mutateLogPath string, mutateLogPathIsDir bool, yaml stri
yaml = yaml + ("\n---\n\n")
if !mutateLogPathIsDir {
// truncation for the case when mutateLogPath is a file (not a directory) is handled under pkg/kyverno/apply/command.go
f, err = os.OpenFile(mutateLogPath, os.O_APPEND|os.O_WRONLY, 0644)
} else {
f, err = os.OpenFile(mutateLogPath+"/"+fileName+".yaml", os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0644)
f, err = os.OpenFile(mutateLogPath+"/"+fileName+".yaml", os.O_CREATE|os.O_WRONLY, 0644)
}
if err != nil {

View file

@ -114,6 +114,7 @@ func testCommandExecute(dirPath []string, valuesFile string, fileName string) (r
var errors []error
fs := memfs.New()
rc = &resultCounts{}
var testYamlCount int
if len(dirPath) == 0 {
return rc, sanitizederror.NewWithError(fmt.Sprintf("a directory is required"), err)
}
@ -123,19 +124,20 @@ func testCommandExecute(dirPath []string, valuesFile string, fileName string) (r
return rc, sanitizederror.NewWithError("failed to parse URL", err)
}
pathElems := strings.Split(gitURL.Path[1:], "/")
if len(pathElems) != 3 {
if len(pathElems) <= 2 {
err := fmt.Errorf("invalid URL path %s - expected https://github.com/:owner/:repository/:branch", gitURL.Path)
fmt.Printf("Error: failed to parse URL \nCause: %s\n", err)
os.Exit(1)
}
gitURL.Path = strings.Join([]string{"/", pathElems[0], pathElems[1]}, "/")
gitURL.Path = strings.Join([]string{pathElems[0], pathElems[1]}, "/")
repoURL := gitURL.String()
cloneRepo, err := clone(repoURL, fs)
if err != nil {
fmt.Printf("Error: failed to clone repository \nCause: %s\n", err)
branch := strings.ReplaceAll(dirPath[0], repoURL+"/", "")
_, cloneErr := clone(repoURL, fs, branch)
if cloneErr != nil {
fmt.Printf("Error: failed to clone repository \nCause: %s\n", cloneErr)
log.Log.V(3).Info(fmt.Sprintf("failed to clone repository %v as it is not valid", repoURL), "error", cloneErr)
os.Exit(1)
}
log.Log.V(3).Info(" clone repository", cloneRepo)
policyYamls, err := listYAMLs(fs, "/")
if err != nil {
return rc, sanitizederror.NewWithError("failed to list YAMLs in repository", err)
@ -144,6 +146,7 @@ func testCommandExecute(dirPath []string, valuesFile string, fileName string) (r
for _, yamlFilePath := range policyYamls {
file, err := fs.Open(yamlFilePath)
if strings.Contains(file.Name(), fileName) {
testYamlCount++
policyresoucePath := strings.Trim(yamlFilePath, fileName)
bytes, err := ioutil.ReadAll(file)
if err != nil {
@ -169,7 +172,7 @@ func testCommandExecute(dirPath []string, valuesFile string, fileName string) (r
if err != nil {
errors = append(errors, err)
}
err := getLocalDirTestFiles(fs, path, fileName, valuesFile, rc)
err := getLocalDirTestFiles(fs, path, fileName, valuesFile, rc, testYamlCount)
if err != nil {
errors = append(errors, err)
}
@ -183,21 +186,25 @@ func testCommandExecute(dirPath []string, valuesFile string, fileName string) (r
if rc.fail > 0 {
os.Exit(1)
}
if testYamlCount == 0 {
fmt.Printf("\n No test yamls available \n")
}
os.Exit(0)
return rc, nil
}
func getLocalDirTestFiles(fs billy.Filesystem, path, fileName, valuesFile string, rc *resultCounts) error {
func getLocalDirTestFiles(fs billy.Filesystem, path, fileName, valuesFile string, rc *resultCounts, testYamlCount int) error {
files, err := ioutil.ReadDir(path)
if err != nil {
return fmt.Errorf("failed to read %v: %v", path, err.Error())
}
for _, file := range files {
if file.IsDir() {
getLocalDirTestFiles(fs, filepath.Join(path, file.Name()), fileName, valuesFile, rc)
getLocalDirTestFiles(fs, filepath.Join(path, file.Name()), fileName, valuesFile, rc, testYamlCount)
continue
}
if strings.Contains(file.Name(), fileName) {
testYamlCount++
yamlFile, err := ioutil.ReadFile(filepath.Join(path, file.Name()))
if err != nil {
sanitizederror.NewWithError("unable to read yaml", err)

View file

@ -1,18 +1,22 @@
package test
import (
"fmt"
"os"
"path/filepath"
"github.com/go-git/go-billy/v5"
"github.com/go-git/go-git/v5"
"github.com/go-git/go-git/v5/plumbing"
"github.com/go-git/go-git/v5/storage/memory"
)
func clone(path string, fs billy.Filesystem) (*git.Repository, error) {
func clone(path string, fs billy.Filesystem, branch string) (*git.Repository, error) {
return git.Clone(memory.NewStorage(), fs, &git.CloneOptions{
URL: path,
Progress: os.Stdout,
URL: path,
ReferenceName: plumbing.ReferenceName(fmt.Sprintf("refs/heads/%s", branch)),
Progress: os.Stdout,
SingleBranch: true,
})
}

View file

@ -50,9 +50,9 @@ func validateMap(patternMap map[string]interface{}, path string, supportedAnchor
if !ok {
return path + "/" + key, fmt.Errorf("Existence anchor should have value of type list")
}
// validate there is only one entry in the list
if len(typedValue) == 0 || len(typedValue) > 1 {
return path + "/" + key, fmt.Errorf("Existence anchor: single value expected, multiple specified")
// validate that there is atleast one entry in the list
if len(typedValue) == 0 {
return path + "/" + key, fmt.Errorf("Existence anchor: should have atleast one value")
}
}
}

View file

@ -26,7 +26,7 @@ func Test_Exclude(t *testing.T) {
dir, err := os.Getwd()
baseDir := filepath.Dir(filepath.Dir(dir))
assert.NilError(t, err)
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
if err != nil {
t.Log(err)
}
@ -57,7 +57,7 @@ func Test_CronJobOnly(t *testing.T) {
dir, err := os.Getwd()
baseDir := filepath.Dir(filepath.Dir(dir))
assert.NilError(t, err)
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
if err != nil {
t.Log(err)
}
@ -90,7 +90,7 @@ func Test_CronJob_hasExclude(t *testing.T) {
baseDir := filepath.Dir(filepath.Dir(dir))
assert.NilError(t, err)
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
if err != nil {
t.Log(err)
}
@ -126,7 +126,7 @@ func Test_CronJobAndDeployment(t *testing.T) {
dir, err := os.Getwd()
baseDir := filepath.Dir(filepath.Dir(dir))
assert.NilError(t, err)
file, err := ioutil.ReadFile(baseDir + "/samples/best_practices/disallow_bind_mounts.yaml")
file, err := ioutil.ReadFile(baseDir + "/test/best_practices/disallow_bind_mounts.yaml")
if err != nil {
t.Log(err)
}

View file

@ -4,6 +4,7 @@ import (
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic/dynamiclister"
"k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
)
// GenericCache - allows operation on a single resource
@ -13,6 +14,7 @@ type GenericCache interface {
Lister() dynamiclister.Lister
NamespacedLister(namespace string) dynamiclister.NamespaceLister
GVR() schema.GroupVersionResource
GetInformer() cache.SharedIndexInformer
}
type genericCache struct {
@ -56,3 +58,8 @@ func (gc *genericCache) Lister() dynamiclister.Lister {
func (gc *genericCache) NamespacedLister(namespace string) dynamiclister.NamespaceLister {
return dynamiclister.New(gc.genericInformer.Informer().GetIndexer(), gc.GVR()).Namespace(namespace)
}
// GetInformer gets SharedIndexInformer
func (gc *genericCache) GetInformer() cache.SharedIndexInformer {
return gc.genericInformer.Informer()
}

View file

@ -10,11 +10,15 @@ import (
)
// ResourceCache - allows the creation, deletion and saving the resource informers as a cache
// the resource cache can be registered by gvks as follows:
// - group/version/kind
// - group/kind
// - kind
type ResourceCache interface {
CreateInformers(resources ...string) []error
CreateGVKInformer(kind string) (GenericCache, error)
StopResourceInformer(resource string)
GetGVRCache(resource string) (GenericCache, bool)
CreateInformers(gvks ...string) []error
CreateGVKInformer(gvk string) (GenericCache, error)
StopResourceInformer(gvk string)
GetGVRCache(gvk string) (GenericCache, bool)
}
type resourceCache struct {

View file

@ -10,10 +10,6 @@ func Test_Mutate_Validate_qos(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_mutate_validate_qos.yaml")
}
func Test_disallow_root_user(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_root_user.yaml")
}
func Test_disallow_priviledged(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_priviledged.yaml")
}
@ -22,18 +18,6 @@ func Test_validate_healthChecks(t *testing.T) {
testScenario(t, "/test/scenarios/other/scenario_validate_healthChecks.yaml")
}
func Test_validate_disallow_latest_tag(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_latest_tag.yaml")
}
func Test_validate_require_image_tag_not_latest_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_latest_tag_pass.yaml")
}
func Test_validate_disallow_default_namespace(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_default_namespace.yaml")
}
func Test_validate_host_network_port(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_host_network_port.yaml")
}
@ -42,10 +26,6 @@ func Test_validate_host_PID_IPC(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_host_pid_ipc.yaml")
}
func Test_validate_ro_rootfs(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/require_ro_rootfs.yaml")
}
//TODO: support generate
// func Test_add_ns_quota(t *testing.T) {
// testScenario(t, "test/scenarios/samples/best_practices/add_ns_quota.yaml")
@ -67,14 +47,6 @@ func Test_validate_volume_whitelist(t *testing.T) {
testScenario(t, "test/scenarios/other/scenario_validate_volume_whiltelist.yaml")
}
func Test_require_pod_requests_limits(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/require_pod_requests_limits.yaml")
}
func Test_require_probes(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/require_probes.yaml")
}
func Test_validate_disallow_bind_mounts_fail(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_bind_mounts_fail.yaml")
}
@ -83,22 +55,10 @@ func Test_validate_disallow_bind_mounts_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_bind_mounts_pass.yaml")
}
func Test_validate_disallow_new_capabilities(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/disallow_new_capabilities.yaml")
}
func Test_disallow_sysctls(t *testing.T) {
testScenario(t, "/test/scenarios/samples/best_practices/disallow_sysctls.yaml")
}
func Test_disallow_docker_sock_mount(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/disallow_docker_sock_mount.yaml")
}
func Test_validate_disallow_helm_tiller(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/scenario_validate_disallow_helm_tiller.yaml")
}
func Test_add_safe_to_evict(t *testing.T) {
testScenario(t, "test/scenarios/samples/best_practices/add_safe_to_evict.yaml")
}
@ -115,14 +75,6 @@ func Test_validate_restrict_automount_sa_token_pass(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_automount_sa_token.yaml")
}
func Test_restrict_node_port(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_node_port.yaml")
}
func Test_validate_restrict_image_registries(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_image_registries.yaml")
}
func Test_known_ingress(t *testing.T) {
testScenario(t, "test/scenarios/samples/more/restrict_ingress_classes.yaml")
}

348
pkg/tls/certRenewer.go Normal file
View file

@ -0,0 +1,348 @@
package tls
import (
"crypto/tls"
"crypto/x509"
"encoding/base64"
"encoding/pem"
"fmt"
"time"
"github.com/cenkalti/backoff"
"github.com/go-logr/logr"
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/rest"
)
const (
// ManagedByLabel is added to Kyverno managed secrets
ManagedByLabel string = "cert.kyverno.io/managed-by"
SelfSignedAnnotation string = "self-signed-cert"
RootCAKey string = "rootCA.crt"
rollingUpdateAnnotation string = "update.kyverno.io/force-rolling-update"
)
// CertRenewer creates rootCA and pem pair to register
// webhook configurations and webhook server
// renews RootCA at the given interval
type CertRenewer struct {
client *client.Client
clientConfig *rest.Config
certRenewalInterval time.Duration
certValidityDuration time.Duration
log logr.Logger
}
// NewCertRenewer returns an instance of CertRenewer
func NewCertRenewer(client *client.Client, clientConfig *rest.Config, certRenewalInterval, certValidityDuration time.Duration, log logr.Logger) *CertRenewer {
return &CertRenewer{
client: client,
clientConfig: clientConfig,
certRenewalInterval: certRenewalInterval,
certValidityDuration: certValidityDuration,
log: log,
}
}
// InitTLSPemPair Loads or creates PEM private key and TLS certificate for webhook server.
// Created pair is stored in cluster's secret.
// Returns struct with key/certificate pair.
func (c *CertRenewer) InitTLSPemPair(serverIP string) (*PemPair, error) {
logger := c.log.WithName("InitTLSPemPair")
certProps, err := GetTLSCertProps(c.clientConfig)
if err != nil {
return nil, err
}
if valid, err := c.ValidCert(); err == nil && valid {
if tlsPair, err := ReadTLSPair(c.clientConfig, c.client); err == nil {
logger.Info("using existing TLS key/certificate pair")
return tlsPair, nil
}
} else {
logger.V(3).Info("unable to find TLS pair", "reason", err.Error())
}
logger.Info("building key/certificate pair for TLS")
return c.buildTLSPemPairAndWriteToSecrets(certProps, serverIP)
}
// buildTLSPemPairAndWriteToSecrets Issues TLS certificate for webhook server using self-signed CA cert
// Returns signed and approved TLS certificate in PEM format
func (c *CertRenewer) buildTLSPemPairAndWriteToSecrets(props CertificateProps, serverIP string) (*PemPair, error) {
caCert, caPEM, err := GenerateCACert(c.certValidityDuration)
if err != nil {
return nil, err
}
if err := c.WriteCACertToSecret(caPEM, props); err != nil {
return nil, fmt.Errorf("failed to write CA cert to secret: %v", err)
}
tlsPair, err := GenerateCertPem(caCert, props, serverIP, c.certValidityDuration)
if err != nil {
return nil, err
}
if err = c.WriteTLSPairToSecret(props, tlsPair); err != nil {
return nil, fmt.Errorf("unable to save TLS pair to the cluster: %v", err)
}
return tlsPair, nil
}
// ReadTLSPair Reads the pair of TLS certificate and key from the specified secret.
// WriteCACertToSecret stores the CA cert in secret
func (c *CertRenewer) WriteCACertToSecret(caPEM *PemPair, props CertificateProps) error {
logger := c.log.WithName("CAcert")
name := generateRootCASecretName(props)
secretUnstr, err := c.client.GetResource("", "Secret", props.Namespace, name)
if err != nil {
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: props.Namespace,
Annotations: map[string]string{
SelfSignedAnnotation: "true",
},
Labels: map[string]string{
ManagedByLabel: "kyverno",
},
},
Data: map[string][]byte{
RootCAKey: caPEM.Certificate,
},
Type: v1.SecretTypeOpaque,
}
_, err := c.client.CreateResource("", "Secret", props.Namespace, secret, false)
if err == nil {
logger.Info("secret created", "name", name, "namespace", props.Namespace)
}
return err
}
if _, ok := secretUnstr.GetAnnotations()[SelfSignedAnnotation]; !ok {
secretUnstr.SetAnnotations(map[string]string{SelfSignedAnnotation: "true"})
}
dataMap := map[string]interface{}{
RootCAKey: base64.StdEncoding.EncodeToString(caPEM.Certificate)}
if err := unstructured.SetNestedMap(secretUnstr.Object, dataMap, "data"); err != nil {
return err
}
_, err = c.client.UpdateResource("", "Secret", props.Namespace, secretUnstr, false)
if err != nil {
return err
}
logger.Info("secret updated", "name", name, "namespace", props.Namespace)
return nil
}
// WriteTLSPairToSecret Writes the pair of TLS certificate and key to the specified secret.
// Updates existing secret or creates new one.
func (c *CertRenewer) WriteTLSPairToSecret(props CertificateProps, pemPair *PemPair) error {
logger := c.log.WithName("WriteTLSPair")
name := generateTLSPairSecretName(props)
secretUnstr, err := c.client.GetResource("", "Secret", props.Namespace, name)
if err != nil {
secret := &v1.Secret{
TypeMeta: metav1.TypeMeta{
Kind: "Secret",
APIVersion: "v1",
},
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: props.Namespace,
Labels: map[string]string{
ManagedByLabel: "kyverno",
},
},
Data: map[string][]byte{
v1.TLSCertKey: pemPair.Certificate,
v1.TLSPrivateKeyKey: pemPair.PrivateKey,
},
Type: v1.SecretTypeTLS,
}
_, err := c.client.CreateResource("", "Secret", props.Namespace, secret, false)
if err == nil {
logger.Info("secret created", "name", name, "namespace", props.Namespace)
}
return err
}
dataMap := map[string][]byte{
v1.TLSCertKey: pemPair.Certificate,
v1.TLSPrivateKeyKey: pemPair.PrivateKey,
}
secret, err := convertToSecret(secretUnstr)
if err != nil {
return err
}
secret.Data = dataMap
_, err = c.client.UpdateResource("", "Secret", props.Namespace, secret, false)
if err != nil {
return err
}
logger.Info("secret updated", "name", name, "namespace", props.Namespace)
return nil
}
// RollingUpdate triggers a rolling update of Kyverno pod.
// It is used when the rootCA is renewed, the restart of
// Kyverno pod will register webhook server with new cert
func (c *CertRenewer) RollingUpdate() error {
update := func() error {
deploy, err := c.client.GetResource("", "Deployment", config.KyvernoNamespace, config.KyvernoDeploymentName)
if err != nil {
return errors.Wrap(err, "failed to find Kyverno")
}
if IsKyvernoIsInRollingUpdate(deploy.UnstructuredContent(), c.log) {
return nil
}
annotations, ok, err := unstructured.NestedStringMap(deploy.UnstructuredContent(), "spec", "template", "metadata", "annotations")
if err != nil {
return errors.Wrap(err, "bad annotations")
}
if !ok {
annotations = map[string]string{}
}
annotations[rollingUpdateAnnotation] = time.Now().String()
if err = unstructured.SetNestedStringMap(deploy.UnstructuredContent(),
annotations,
"spec", "template", "metadata", "annotations",
); err != nil {
return errors.Wrapf(err, "set annotation %s", rollingUpdateAnnotation)
}
if _, err = c.client.UpdateResource("", "Deployment", config.KyvernoNamespace, deploy, false); err != nil {
return errors.Wrap(err, "update Kyverno deployment")
}
return nil
}
exbackoff := &backoff.ExponentialBackOff{
InitialInterval: 500 * time.Millisecond,
RandomizationFactor: 0.5,
Multiplier: 1.5,
MaxInterval: time.Second,
MaxElapsedTime: 3 * time.Second,
Clock: backoff.SystemClock,
}
exbackoff.Reset()
return backoff.Retry(update, exbackoff)
}
// ValidCert validates the CA Cert
func (c *CertRenewer) ValidCert() (bool, error) {
logger := c.log.WithName("ValidCert")
rootCA, err := ReadRootCASecret(c.clientConfig, c.client)
if err != nil {
return false, errors.Wrap(err, "unable to read CA from secret")
}
tlsPair, err := ReadTLSPair(c.clientConfig, c.client)
if err != nil {
// wait till next reconcile
logger.Info("unable to read TLS PEM Pair from secret", "reason", err.Error())
return false, errors.Wrap(err, "unable to read TLS PEM Pair from secret")
}
// build cert pool
pool := x509.NewCertPool()
caPem, _ := pem.Decode(rootCA)
if caPem == nil {
logger.Error(err, "bad certificate")
return false, nil
}
cac, err := x509.ParseCertificate(caPem.Bytes)
if err != nil {
logger.Error(err, "failed to parse CA cert")
return false, nil
}
pool.AddCert(cac)
// valid PEM pair
_, err = tls.X509KeyPair(tlsPair.Certificate, tlsPair.PrivateKey)
if err != nil {
logger.Error(err, "invalid PEM pair")
return false, nil
}
certPem, _ := pem.Decode(tlsPair.Certificate)
if certPem == nil {
logger.Error(err, "bad private key")
return false, nil
}
cert, err := x509.ParseCertificate(certPem.Bytes)
if err != nil {
logger.Error(err, "failed to parse cert")
return false, nil
}
if _, err = cert.Verify(x509.VerifyOptions{
Roots: pool,
CurrentTime: time.Now().Add(c.certRenewalInterval),
}); err != nil {
logger.Error(err, "invalid cert")
return false, nil
}
return true, nil
}
// IsKyvernoIsInRollingUpdate returns true if Kyverno is in rolling update
func IsKyvernoIsInRollingUpdate(deploy map[string]interface{}, logger logr.Logger) bool {
replicas, _, err := unstructured.NestedInt64(deploy, "spec", "replicas")
if err != nil {
logger.Error(err, "unable to fetch spec.replicas")
}
nonTerminatedReplicas, _, err := unstructured.NestedInt64(deploy, "status", "replicas")
if err != nil {
logger.Error(err, "unable to fetch status.replicas")
}
if nonTerminatedReplicas > replicas {
logger.Info("detect Kyverno is in rolling update, won't trigger the update again")
return true
}
return false
}
func generateTLSPairSecretName(props CertificateProps) string {
return generateInClusterServiceName(props) + ".kyverno-tls-pair"
}
func generateRootCASecretName(props CertificateProps) string {
return generateInClusterServiceName(props) + ".kyverno-tls-ca"
}

106
pkg/tls/reader.go Normal file
View file

@ -0,0 +1,106 @@
package tls
import (
"fmt"
"net/url"
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/pkg/errors"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/rest"
)
// ReadRootCASecret returns the RootCA from the pre-defined secret
func ReadRootCASecret(restConfig *rest.Config, client *client.Client) (result []byte, err error) {
certProps, err := GetTLSCertProps(restConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to get TLS Cert Properties")
}
sname := generateRootCASecretName(certProps)
stlsca, err := client.GetResource("", "Secret", certProps.Namespace, sname)
if err != nil {
return nil, err
}
tlsca, err := convertToSecret(stlsca)
if err != nil {
return nil, errors.Wrapf(err, "failed to convert secret %s/%s", certProps.Namespace, sname)
}
result = tlsca.Data[RootCAKey]
if len(result) == 0 {
return nil, errors.Errorf("root CA certificate not found in secret %s/%s", certProps.Namespace, tlsca.Name)
}
return result, nil
}
// ReadTLSPair returns the pem pair from the pre-defined secret
func ReadTLSPair(restConfig *rest.Config, client *client.Client) (*PemPair, error) {
certProps, err := GetTLSCertProps(restConfig)
if err != nil {
return nil, errors.Wrap(err, "failed to get TLS Cert Properties")
}
sname := generateTLSPairSecretName(certProps)
unstrSecret, err := client.GetResource("", "Secret", certProps.Namespace, sname)
if err != nil {
return nil, fmt.Errorf("failed to get secret %s/%s: %v", certProps.Namespace, sname, err)
}
// If secret contains annotation 'self-signed-cert', then it's created using helper scripts to setup self-signed certificates.
// As the root CA used to sign the certificate is required for webhook configuration, check if the corresponding secret is created
annotations := unstrSecret.GetAnnotations()
if _, ok := annotations[SelfSignedAnnotation]; ok {
sname := generateRootCASecretName(certProps)
_, err := client.GetResource("", "Secret", certProps.Namespace, sname)
if err != nil {
return nil, fmt.Errorf("rootCA secret is required while using self-signed certificate TLS pair, defaulting to generating new TLS pair %s/%s", certProps.Namespace, sname)
}
}
secret, err := convertToSecret(unstrSecret)
if err != nil {
return nil, err
}
pemPair := PemPair{
Certificate: secret.Data[v1.TLSCertKey],
PrivateKey: secret.Data[v1.TLSPrivateKeyKey],
}
if len(pemPair.Certificate) == 0 {
return nil, fmt.Errorf("TLS Certificate not found in secret %s/%s", certProps.Namespace, sname)
}
if len(pemPair.PrivateKey) == 0 {
return nil, fmt.Errorf("TLS PrivateKey not found in secret %s/%s", certProps.Namespace, sname)
}
return &pemPair, nil
}
//GetTLSCertProps provides the TLS Certificate Properties
func GetTLSCertProps(configuration *rest.Config) (certProps CertificateProps, err error) {
apiServerURL, err := url.Parse(configuration.Host)
if err != nil {
return certProps, err
}
certProps = CertificateProps{
Service: config.KyvernoServiceName,
Namespace: config.KyvernoNamespace,
APIServerHost: apiServerURL.Hostname(),
}
return certProps, nil
}
func convertToSecret(obj *unstructured.Unstructured) (v1.Secret, error) {
secret := v1.Secret{}
if err := runtime.DefaultUnstructuredConverter.FromUnstructured(obj.UnstructuredContent(), &secret); err != nil {
return secret, err
}
return secret, nil
}

View file

@ -14,7 +14,11 @@ import (
"time"
)
const certValidityDuration = 10 * 365 * 24 * time.Hour
// CertRenewalInterval is the renewal interval for rootCA
const CertRenewalInterval time.Duration = 12 * time.Hour
// CertValidityDuration is the valid duration for a new cert
const CertValidityDuration time.Duration = 365 * 24 * time.Hour
// CertificateProps Properties of TLS certificate which should be issued for webhook server
type CertificateProps struct {
@ -63,7 +67,7 @@ func CertificateToPem(certificateDER []byte) []byte {
// GenerateCACert creates the self-signed CA cert and private key
// it will be used to sign the webhook server certificate
func GenerateCACert() (*KeyPair, *PemPair, error) {
func GenerateCACert(certValidityDuration time.Duration) (*KeyPair, *PemPair, error) {
now := time.Now()
begin := now.Add(-1 * time.Hour)
end := now.Add(certValidityDuration)
@ -110,7 +114,7 @@ func GenerateCACert() (*KeyPair, *PemPair, error) {
// GenerateCertPem takes the results of GenerateCACert and uses it to create the
// PEM-encoded public certificate and private key, respectively
func GenerateCertPem(caCert *KeyPair, props CertificateProps, serverIP string) (*PemPair, error) {
func GenerateCertPem(caCert *KeyPair, props CertificateProps, serverIP string, certValidityDuration time.Duration) (*PemPair, error) {
now := time.Now()
begin := now.Add(-1 * time.Hour)
end := now.Add(certValidityDuration)
@ -121,7 +125,7 @@ func GenerateCertPem(caCert *KeyPair, props CertificateProps, serverIP string) (
dnsNames[1] = fmt.Sprintf("%s.%s", props.Service, props.Namespace)
// The full service name is the CommonName for the certificate
commonName := GenerateInClusterServiceName(props)
commonName := generateInClusterServiceName(props)
dnsNames[2] = fmt.Sprintf("%s", commonName)
var ips []net.IP
@ -174,7 +178,7 @@ func GenerateCertPem(caCert *KeyPair, props CertificateProps, serverIP string) (
}
//GenerateInClusterServiceName The generated service name should be the common name for TLS certificate
func GenerateInClusterServiceName(props CertificateProps) string {
func generateInClusterServiceName(props CertificateProps) string {
return props.Service + "." + props.Namespace + ".svc"
}

View file

@ -127,7 +127,7 @@ func ConvertResource(raw []byte, group, version, kind, namespace string) (unstru
obj.SetGroupVersionKind(schema.GroupVersionKind{Group: group, Version: version, Kind: kind})
if namespace != "" {
if namespace != "" && kind != "Namespace" {
obj.SetNamespace(namespace)
}

View file

@ -4,28 +4,34 @@ import (
"io/ioutil"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/tls"
admregapi "k8s.io/api/admissionregistration/v1beta1"
apps "k8s.io/api/apps/v1"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/apimachinery/pkg/runtime"
rest "k8s.io/client-go/rest"
)
func (wrc *Register) readCaData() []byte {
logger := wrc.log
logger := wrc.log.WithName("readCaData")
var caData []byte
var err error
// Check if ca is defined in the secret tls-ca
// assume the key and signed cert have been defined in secret tls.kyverno
if caData = wrc.client.ReadRootCASecret(); len(caData) != 0 {
if caData, err = tls.ReadRootCASecret(wrc.clientConfig, wrc.client); err == nil {
logger.V(4).Info("read CA from secret")
return caData
}
logger.V(4).Info("failed to read CA from secret, reading from kubeconfig")
logger.V(4).Info("failed to read CA from secret, reading from kubeconfig", "reason", err.Error())
// load the CA from kubeconfig
if caData = extractCA(wrc.clientConfig); len(caData) != 0 {
logger.V(4).Info("read CA from kubeconfig")
return caData
}
logger.V(4).Info("failed to read CA from kubeconfig")
return nil
}
@ -49,8 +55,8 @@ func extractCA(config *rest.Config) (result []byte) {
func (wrc *Register) constructOwner() v1.OwnerReference {
logger := wrc.log
kubePolicyDeployment, err := wrc.getKubePolicyDeployment()
kubePolicyDeployment, _, err := wrc.GetKubePolicyDeployment()
if err != nil {
logger.Error(err, "failed to construct OwnerReference")
return v1.OwnerReference{}
@ -64,17 +70,18 @@ func (wrc *Register) constructOwner() v1.OwnerReference {
}
}
func (wrc *Register) getKubePolicyDeployment() (*apps.Deployment, error) {
// GetKubePolicyDeployment gets Kyverno deployment
func (wrc *Register) GetKubePolicyDeployment() (*apps.Deployment, *unstructured.Unstructured, error) {
lister, _ := wrc.resCache.GetGVRCache("Deployment")
kubePolicyDeployment, err := lister.NamespacedLister(config.KyvernoNamespace).Get(config.KyvernoDeploymentName)
if err != nil {
return nil, err
return nil, nil, err
}
deploy := apps.Deployment{}
if err = runtime.DefaultUnstructuredConverter.FromUnstructured(kubePolicyDeployment.UnstructuredContent(), &deploy); err != nil {
return nil, err
return nil, kubePolicyDeployment, err
}
return &deploy, nil
return &deploy, kubePolicyDeployment, nil
}
// debug mutating webhook

View file

@ -2,12 +2,18 @@ package webhookconfig
import (
"fmt"
"os"
"reflect"
"sync"
"time"
"github.com/go-logr/logr"
dclient "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/config"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/tls"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
"k8s.io/client-go/tools/cache"
)
//maxRetryCount defines the max deadline count
@ -28,17 +34,34 @@ const (
// like the webhook settings.
//
type Monitor struct {
t time.Time
mu sync.RWMutex
log logr.Logger
t time.Time
mu sync.RWMutex
secretQueue chan bool
log logr.Logger
}
//NewMonitor returns a new instance of LastRequestTime store
func NewMonitor(log logr.Logger) *Monitor {
return &Monitor{
t: time.Now(),
log: log,
//NewMonitor returns a new instance of webhook monitor
func NewMonitor(resCache resourcecache.ResourceCache, log logr.Logger) *Monitor {
monitor := &Monitor{
t: time.Now(),
secretQueue: make(chan bool, 1),
log: log,
}
var err error
secretCache, ok := resCache.GetGVRCache("Secret")
if !ok {
if secretCache, err = resCache.CreateGVKInformer("Secret"); err != nil {
log.Error(err, "unable to start Secret's informer")
}
}
secretCache.GetInformer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: monitor.addSecretFunc,
UpdateFunc: monitor.updateSecretFunc,
})
return monitor
}
//Time returns the last request time
@ -56,18 +79,59 @@ func (t *Monitor) SetTime(tm time.Time) {
t.t = tm
}
func (t *Monitor) addSecretFunc(obj interface{}) {
secret := obj.(*unstructured.Unstructured)
if secret.GetNamespace() != config.KyvernoNamespace {
return
}
val, ok := secret.GetAnnotations()[tls.SelfSignedAnnotation]
if !ok || val != "true" {
return
}
t.secretQueue <- true
}
func (t *Monitor) updateSecretFunc(oldObj interface{}, newObj interface{}) {
old := oldObj.(*unstructured.Unstructured)
new := newObj.(*unstructured.Unstructured)
if new.GetNamespace() != config.KyvernoNamespace {
return
}
val, ok := new.GetAnnotations()[tls.SelfSignedAnnotation]
if !ok || val != "true" {
return
}
if reflect.DeepEqual(old.UnstructuredContent()["data"], new.UnstructuredContent()["data"]) {
return
}
t.secretQueue <- true
t.log.V(4).Info("secret updated, reconciling webhook configurations")
}
//Run runs the checker and verify the resource update
func (t *Monitor) Run(register *Register, eventGen event.Interface, client *dclient.Client, stopCh <-chan struct{}) {
func (t *Monitor) Run(register *Register, certRenewer *tls.CertRenewer, eventGen event.Interface, stopCh <-chan struct{}) {
logger := t.log
logger.V(4).Info("starting webhook monitor", "interval", idleCheckInterval)
status := newStatusControl(client, eventGen, logger.WithName("WebhookStatusControl"))
status := newStatusControl(register.client, eventGen, logger.WithName("WebhookStatusControl"))
ticker := time.NewTicker(tickerInterval)
defer ticker.Stop()
certsRenewalTicker := time.NewTicker(tls.CertRenewalInterval)
defer certsRenewalTicker.Stop()
for {
select {
case <-ticker.C:
if skipWebhookCheck(register, logger.WithName("statusCheck/skipWebhookCheck")) {
logger.Info("skip validating webhook status, Kyverno is in rolling update")
continue
}
if err := register.Check(); err != nil {
t.log.Error(err, "missing webhooks")
@ -95,6 +159,10 @@ func (t *Monitor) Run(register *Register, eventGen event.Interface, client *dcli
if timeDiff > idleCheckInterval {
logger.V(1).Info("webhook idle time exceeded", "deadline", idleCheckInterval)
if skipWebhookCheck(register, logger.WithName("skipWebhookCheck")) {
logger.Info("skip validating webhook status, Kyverno is in rolling update")
continue
}
// send request to update the Kyverno deployment
if err := status.IncrementAnnotation(); err != nil {
@ -110,6 +178,40 @@ func (t *Monitor) Run(register *Register, eventGen event.Interface, client *dcli
logger.Error(err, "failed to annotate deployment webhook status to success")
}
case <-certsRenewalTicker.C:
valid, err := certRenewer.ValidCert()
if err != nil {
logger.Error(err, "failed to validate cert")
continue
}
if valid {
continue
}
logger.Info("rootCA is about to expire, trigger a rolling update to renew the cert")
if err := certRenewer.RollingUpdate(); err != nil {
logger.Error(err, "unable to trigger a rolling update to renew rootCA, force restarting")
os.Exit(1)
}
case <-t.secretQueue:
valid, err := certRenewer.ValidCert()
if err != nil {
logger.Error(err, "failed to validate cert")
continue
}
if valid {
continue
}
logger.Info("rootCA has changed, updating webhook configurations")
if err := certRenewer.RollingUpdate(); err != nil {
logger.Error(err, "unable to trigger a rolling update to re-register webhook server, force restarting")
os.Exit(1)
}
case <-stopCh:
// handler termination signal
logger.V(2).Info("stopping webhook monitor")
@ -117,3 +219,14 @@ func (t *Monitor) Run(register *Register, eventGen event.Interface, client *dcli
}
}
}
// skipWebhookCheck returns true if Kyverno is in rolling update
func skipWebhookCheck(register *Register, logger logr.Logger) bool {
_, deploy, err := register.GetKubePolicyDeployment()
if err != nil {
logger.Info("unable to get Kyverno deployment", "reason", err.Error())
return false
}
return tls.IsKyvernoIsInRollingUpdate(deploy.UnstructuredContent(), logger)
}

View file

@ -11,9 +11,11 @@ import (
"github.com/kyverno/kyverno/pkg/config"
client "github.com/kyverno/kyverno/pkg/dclient"
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/tls"
admregapi "k8s.io/api/admissionregistration/v1beta1"
errorsapi "k8s.io/apimachinery/pkg/api/errors"
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
rest "k8s.io/client-go/rest"
)
@ -64,24 +66,29 @@ func (wrc *Register) Register() error {
wrc.removeWebhookConfigurations()
caData := wrc.readCaData()
if caData == nil {
return errors.New("Unable to extract CA data from configuration")
}
errors := make([]string, 0)
if err := wrc.createVerifyMutatingWebhookConfiguration(); err != nil {
if err := wrc.createVerifyMutatingWebhookConfiguration(caData); err != nil {
errors = append(errors, err.Error())
}
if err := wrc.createPolicyValidatingWebhookConfiguration(); err != nil {
if err := wrc.createPolicyValidatingWebhookConfiguration(caData); err != nil {
errors = append(errors, err.Error())
}
if err := wrc.createPolicyMutatingWebhookConfiguration(); err != nil {
if err := wrc.createPolicyMutatingWebhookConfiguration(caData); err != nil {
errors = append(errors, err.Error())
}
if err := wrc.createResourceValidatingWebhookConfiguration(); err != nil {
if err := wrc.createResourceValidatingWebhookConfiguration(caData); err != nil {
errors = append(errors, err.Error())
}
if err := wrc.createResourceMutatingWebhookConfiguration(); err != nil {
if err := wrc.createResourceMutatingWebhookConfiguration(caData); err != nil {
errors = append(errors, err.Error())
}
@ -122,19 +129,46 @@ func (wrc *Register) Check() error {
// Remove removes all webhook configurations
func (wrc *Register) Remove(cleanUp chan<- struct{}) {
defer close(cleanUp)
if !wrc.cleanupKyvernoResource() {
return
}
wrc.removeWebhookConfigurations()
close(cleanUp)
wrc.removeSecrets()
}
func (wrc *Register) createResourceMutatingWebhookConfiguration() error {
var caData []byte
var config *admregapi.MutatingWebhookConfiguration
if caData = wrc.readCaData(); caData == nil {
return errors.New("Unable to extract CA data from configuration")
// cleanupKyvernoResource returns true if Kyverno deployment is terminating
func (wrc *Register) cleanupKyvernoResource() bool {
logger := wrc.log.WithName("cleanupKyvernoResource")
deploy, err := wrc.client.GetResource("", "Deployment", deployNamespace, deployName)
if err != nil {
logger.Error(err, "failed to get deployment")
return false
}
if deploy.GetDeletionTimestamp() != nil {
logger.Info("Kyverno is terminating, clean up Kyverno resources")
return true
}
replicas, _, err := unstructured.NestedInt64(deploy.UnstructuredContent(), "spec", "replicas")
if err != nil {
logger.Error(err, "unable to fetch spec.replicas of Kyverno deployment")
}
if replicas == 0 {
logger.Info("Kyverno is scaled to zero, clean up Kyverno resources")
return true
}
logger.Info("updating Kyverno Pod, won't clean up Kyverno resources")
return false
}
func (wrc *Register) createResourceMutatingWebhookConfiguration(caData []byte) error {
var config *admregapi.MutatingWebhookConfiguration
if wrc.serverIP != "" {
config = wrc.constructDebugMutatingWebhookConfig(caData)
} else {
@ -158,13 +192,9 @@ func (wrc *Register) createResourceMutatingWebhookConfiguration() error {
return nil
}
func (wrc *Register) createResourceValidatingWebhookConfiguration() error {
var caData []byte
func (wrc *Register) createResourceValidatingWebhookConfiguration(caData []byte) error {
var config *admregapi.ValidatingWebhookConfiguration
if caData = wrc.readCaData(); caData == nil {
return errors.New("Unable to extract CA data from configuration")
}
if wrc.serverIP != "" {
config = wrc.constructDebugValidatingWebhookConfig(caData)
} else {
@ -189,15 +219,9 @@ func (wrc *Register) createResourceValidatingWebhookConfiguration() error {
}
//registerPolicyValidatingWebhookConfiguration create a Validating webhook configuration for Policy CRD
func (wrc *Register) createPolicyValidatingWebhookConfiguration() error {
var caData []byte
func (wrc *Register) createPolicyValidatingWebhookConfiguration(caData []byte) error {
var config *admregapi.ValidatingWebhookConfiguration
// read certificate data
if caData = wrc.readCaData(); caData == nil {
return errors.New("Unable to extract CA data from configuration")
}
if wrc.serverIP != "" {
config = wrc.contructDebugPolicyValidatingWebhookConfig(caData)
} else {
@ -217,14 +241,9 @@ func (wrc *Register) createPolicyValidatingWebhookConfiguration() error {
return nil
}
func (wrc *Register) createPolicyMutatingWebhookConfiguration() error {
var caData []byte
func (wrc *Register) createPolicyMutatingWebhookConfiguration(caData []byte) error {
var config *admregapi.MutatingWebhookConfiguration
if caData = wrc.readCaData(); caData == nil {
return errors.New("Unable to extract CA data from configuration")
}
if wrc.serverIP != "" {
config = wrc.contructDebugPolicyMutatingWebhookConfig(caData)
} else {
@ -245,14 +264,9 @@ func (wrc *Register) createPolicyMutatingWebhookConfiguration() error {
return nil
}
func (wrc *Register) createVerifyMutatingWebhookConfiguration() error {
var caData []byte
func (wrc *Register) createVerifyMutatingWebhookConfiguration(caData []byte) error {
var config *admregapi.MutatingWebhookConfiguration
if caData = wrc.readCaData(); caData == nil {
return errors.New("Unable to extract CA data from configuration")
}
if wrc.serverIP != "" {
config = wrc.constructDebugVerifyMutatingWebhookConfig(caData)
} else {
@ -435,3 +449,24 @@ func (wrc *Register) getVerifyWebhookMutatingWebhookName() string {
func (wrc *Register) GetWebhookTimeOut() time.Duration {
return time.Duration(wrc.timeoutSeconds)
}
// removeSecrets removes Kyverno managed secrets
func (wrc *Register) removeSecrets() {
selector := &v1.LabelSelector{
MatchLabels: map[string]string{
tls.ManagedByLabel: "kyverno",
},
}
secretList, err := wrc.client.ListResource("", "Secret", config.KyvernoNamespace, selector)
if err != nil && errorsapi.IsNotFound(err) {
wrc.log.Error(err, "failed to clean up Kyverno managed secrets")
return
}
for _, secret := range secretList.Items {
if err := wrc.client.DeleteResource("", "Secret", secret.GetNamespace(), secret.GetName(), false); err != nil {
wrc.log.Error(err, "failed to delete secret", "ns", secret.GetNamespace(), "name", secret.GetName())
}
}
}

View file

@ -112,7 +112,7 @@ func (ws *WebhookServer) handleUpdate(request *v1beta1.AdmissionRequest, policie
}
}
//handleUpdateCloneSourceResource - handles updation of clone source for generate policy
//handleUpdateCloneSourceResource - handles update of clone source for generate policy
func (ws *WebhookServer) handleUpdateCloneSourceResource(resLabels map[string]string, logger logr.Logger) {
policyNames := strings.Split(resLabels["generate.kyverno.io/clone-policy-name"], ",")
for _, policyName := range policyNames {
@ -131,7 +131,7 @@ func (ws *WebhookServer) handleUpdateCloneSourceResource(resLabels map[string]st
}
}
//handleUpdateTargetResource - handles updation of target resource for generate policy
//handleUpdateTargetResource - handles update of target resource for generate policy
func (ws *WebhookServer) handleUpdateTargetResource(request *v1beta1.AdmissionRequest, policies []*v1.ClusterPolicy, resLabels map[string]string, logger logr.Logger) {
enqueueBool := false
newRes, err := enginutils.ConvertToUnstructured(request.Object.Raw)

View file

@ -1,6 +1,7 @@
package webhooks
import (
"errors"
"reflect"
"sort"
"time"
@ -68,7 +69,7 @@ func (ws *WebhookServer) HandleMutation(
}
if !engineResponse.IsSuccessful() && len(engineResponse.GetFailedRules()) > 0 {
logger.Info("failed to apply policy", "policy", policy.Name, "failed rules", engineResponse.GetFailedRules())
logger.Error(errors.New("some rules failed"), "failed to apply policy", "policy", policy.Name, "failed rules", engineResponse.GetFailedRules())
continue
}
@ -112,7 +113,7 @@ func (ws *WebhookServer) HandleMutation(
// if any of the policies fails, print out the error
if !isResponseSuccessful(engineResponses) {
logger.Info("failed to apply mutation rules on the resource, reporting policy violation", "errors", getErrorMsg(engineResponses))
logger.Error(errors.New(getErrorMsg(engineResponses)), "failed to apply mutation rules on the resource, reporting policy violation")
}
}()

View file

@ -4,7 +4,6 @@ import (
"context"
"crypto/tls"
"encoding/json"
"errors"
"fmt"
"io/ioutil"
"net/http"
@ -27,11 +26,13 @@ import (
"github.com/kyverno/kyverno/pkg/policyreport"
"github.com/kyverno/kyverno/pkg/policystatus"
"github.com/kyverno/kyverno/pkg/resourcecache"
ktls "github.com/kyverno/kyverno/pkg/tls"
tlsutils "github.com/kyverno/kyverno/pkg/tls"
userinfo "github.com/kyverno/kyverno/pkg/userinfo"
"github.com/kyverno/kyverno/pkg/utils"
"github.com/kyverno/kyverno/pkg/webhookconfig"
webhookgenerate "github.com/kyverno/kyverno/pkg/webhooks/generate"
"github.com/pkg/errors"
v1beta1 "k8s.io/api/admission/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
informers "k8s.io/client-go/informers/core/v1"
@ -104,6 +105,8 @@ type WebhookServer struct {
// last request time
webhookMonitor *webhookconfig.Monitor
certRenewer *ktls.CertRenewer
// policy report generator
prGenerator policyreport.GeneratorInterface
@ -121,8 +124,6 @@ type WebhookServer struct {
openAPIController *openapi.Controller
supportMutateValidate bool
// resCache - controls creation and fetching of resource informer cache
resCache resourcecache.ResourceCache
@ -148,12 +149,12 @@ func NewWebhookServer(
pCache policycache.Interface,
webhookRegistrationClient *webhookconfig.Register,
webhookMonitor *webhookconfig.Monitor,
certRenewer *ktls.CertRenewer,
statusSync policystatus.Listener,
configHandler config.Interface,
prGenerator policyreport.GeneratorInterface,
grGenerator *webhookgenerate.Generator,
auditHandler AuditHandler,
supportMutateValidate bool,
cleanUp chan<- struct{},
log logr.Logger,
openAPIController *openapi.Controller,
@ -187,26 +188,26 @@ func NewWebhookServer(
nsLister: namespace.Lister(),
nsListerSynced: namespace.Informer().HasSynced,
crbLister: crbInformer.Lister(),
crLister: crInformer.Lister(),
crbSynced: crbInformer.Informer().HasSynced,
crSynced: crInformer.Informer().HasSynced,
eventGen: eventGen,
pCache: pCache,
webhookRegister: webhookRegistrationClient,
statusListener: statusSync,
configHandler: configHandler,
cleanUp: cleanUp,
webhookMonitor: webhookMonitor,
prGenerator: prGenerator,
grGenerator: grGenerator,
grController: grc,
auditHandler: auditHandler,
log: log,
openAPIController: openAPIController,
supportMutateValidate: supportMutateValidate,
resCache: resCache,
debug: debug,
crbLister: crbInformer.Lister(),
crLister: crInformer.Lister(),
crbSynced: crbInformer.Informer().HasSynced,
crSynced: crInformer.Informer().HasSynced,
eventGen: eventGen,
pCache: pCache,
webhookRegister: webhookRegistrationClient,
statusListener: statusSync,
configHandler: configHandler,
cleanUp: cleanUp,
webhookMonitor: webhookMonitor,
certRenewer: certRenewer,
prGenerator: prGenerator,
grGenerator: grGenerator,
grController: grc,
auditHandler: auditHandler,
log: log,
openAPIController: openAPIController,
resCache: resCache,
debug: debug,
}
mux := httprouter.New()
@ -302,25 +303,15 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
},
}
}
logger.V(6).Info("received an admission request in mutating webhook")
mutatePolicies := ws.pCache.Get(policycache.Mutate, nil)
validatePolicies := ws.pCache.Get(policycache.ValidateEnforce, nil)
generatePolicies := ws.pCache.Get(policycache.Generate, nil)
// Get namespace policies from the cache for the requested resource namespace
nsMutatePolicies := ws.pCache.Get(policycache.Mutate, &request.Namespace)
mutatePolicies = append(mutatePolicies, nsMutatePolicies...)
// getRoleRef only if policy has roles/clusterroles defined
var roles, clusterRoles []string
var err error
if containRBACInfo(mutatePolicies, validatePolicies, generatePolicies) {
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler)
if err != nil {
logger.Error(err, "failed to get RBAC information for request")
}
}
// convert RAW to unstructured
resource, err := utils.ConvertResource(request.Object.Raw, request.Kind.Group, request.Kind.Version, request.Kind.Kind, request.Namespace)
if err != nil {
@ -334,50 +325,44 @@ func (ws *WebhookServer) ResourceMutation(request *v1beta1.AdmissionRequest) *v1
}
}
var roles, clusterRoles []string
// getRoleRef only if policy has roles/clusterroles defined
if containRBACInfo(mutatePolicies, generatePolicies) {
if roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler); err != nil {
logger.Error(err, "failed to get RBAC information for request")
}
}
userRequestInfo := v1.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: *request.UserInfo.DeepCopy()}
// build context
ctx := enginectx.NewContext()
err = ctx.AddRequest(request)
if err != nil {
logger.Error(err, "failed to load incoming request in context")
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
}
err = ctx.AddUserInfo(userRequestInfo)
ctx, err := newVariablesContext(request, &userRequestInfo)
if err != nil {
logger.Error(err, "failed to load userInfo in context")
logger.Error(err, "unable to build variable context")
}
err = ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username)
if err != nil {
logger.Error(err, "failed to load service account in context")
if err := ctx.AddImageInfo(&resource); err != nil {
logger.Error(err, "unable to add image info to variables context")
}
var patches []byte
patchedResource := request.Object.Raw
// MUTATION
if ws.supportMutateValidate {
if resource.GetDeletionTimestamp() == nil {
patches = ws.HandleMutation(request, resource, mutatePolicies, ctx, userRequestInfo)
logger.V(6).Info("", "generated patches", string(patches))
patches = ws.HandleMutation(request, resource, mutatePolicies, ctx, userRequestInfo)
logger.V(6).Info("", "generated patches", string(patches))
// patch the resource with patches before handling validation rules
patchedResource = processResourceWithPatches(patches, request.Object.Raw, logger)
logger.V(6).Info("", "patchedResource", string(patchedResource))
}
} else {
logger.Info("mutate rules are not supported prior to Kubernetes 1.14.0")
}
// patch the resource with patches before handling validation rules
patchedResource = processResourceWithPatches(patches, request.Object.Raw, logger)
logger.V(6).Info("", "patchedResource", string(patchedResource))
// GENERATE
if request.Operation == v1beta1.Create || request.Operation == v1beta1.Update {
newRequest := request.DeepCopy()
newRequest.Object.Raw = patchedResource
go ws.HandleGenerate(newRequest, generatePolicies, ctx, userRequestInfo, ws.configHandler)
}
newRequest := request.DeepCopy()
newRequest.Object.Raw = patchedResource
go ws.HandleGenerate(newRequest, generatePolicies, ctx, userRequestInfo, ws.configHandler)
patchType := v1beta1.PatchTypeJSONPatch
return &v1beta1.AdmissionResponse{
@ -396,16 +381,6 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
ws.handleDelete(request)
}
if !ws.supportMutateValidate {
logger.Info("mutate and validate rules are not supported prior to Kubernetes 1.14.0")
return &v1beta1.AdmissionResponse{
Allowed: true,
Result: &metav1.Status{
Status: "Success",
},
}
}
if excludeKyvernoResources(request.Kind.Kind) {
return &v1beta1.AdmissionResponse{
Allowed: true,
@ -435,7 +410,6 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
if containRBACInfo(policies) {
roles, clusterRoles, err = userinfo.GetRoleRef(ws.rbLister, ws.crbLister, request, ws.configHandler)
if err != nil {
logger.Error(err, "failed to get RBAC information for request")
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
@ -444,30 +418,23 @@ func (ws *WebhookServer) resourceValidation(request *v1beta1.AdmissionRequest) *
},
}
}
logger = logger.WithValues("username", request.UserInfo.Username,
"groups", request.UserInfo.Groups, "roles", roles, "clusterRoles", clusterRoles)
}
userRequestInfo := v1.RequestInfo{
Roles: roles,
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo}
// build context
ctx := enginectx.NewContext()
err = ctx.AddRequest(request)
if err != nil {
logger.Error(err, "failed to load incoming request in context")
AdmissionUserInfo: *request.UserInfo.DeepCopy(),
}
err = ctx.AddUserInfo(userRequestInfo)
ctx, err := newVariablesContext(request, &userRequestInfo)
if err != nil {
logger.Error(err, "failed to load userInfo in context")
}
err = ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username)
if err != nil {
logger.Error(err, "failed to load service account in context")
return &v1beta1.AdmissionResponse{
Allowed: false,
Result: &metav1.Status{
Status: "Failure",
Message: err.Error(),
},
}
}
namespaceLabels := make(map[string]string)
@ -512,7 +479,7 @@ func (ws *WebhookServer) RunAsync(stopCh <-chan struct{}) {
logger.Info("starting service")
if !ws.debug {
go ws.webhookMonitor.Run(ws.webhookRegister, ws.eventGen, ws.client, stopCh)
go ws.webhookMonitor.Run(ws.webhookRegister, ws.certRenewer, ws.eventGen, stopCh)
}
}
@ -565,3 +532,20 @@ func (ws *WebhookServer) bodyToAdmissionReview(request *http.Request, writer htt
return admissionReview
}
func newVariablesContext(request *v1beta1.AdmissionRequest, userRequestInfo *v1.RequestInfo) (*enginectx.Context, error) {
ctx := enginectx.NewContext()
if err := ctx.AddRequest(request); err != nil {
return nil, errors.Wrap(err, "failed to load incoming request in context")
}
if err := ctx.AddUserInfo(*userRequestInfo); err != nil {
return nil, errors.Wrap(err, "failed to load userInfo in context")
}
if err := ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username); err != nil {
return nil, errors.Wrap(err, "failed to load service account in context")
}
return ctx, nil
}

View file

@ -10,7 +10,6 @@ import (
"github.com/go-logr/logr"
v1 "github.com/kyverno/kyverno/pkg/api/kyverno/v1"
"github.com/kyverno/kyverno/pkg/config"
enginectx "github.com/kyverno/kyverno/pkg/engine/context"
"github.com/kyverno/kyverno/pkg/event"
"github.com/kyverno/kyverno/pkg/policycache"
"github.com/kyverno/kyverno/pkg/policyreport"
@ -18,7 +17,6 @@ import (
"github.com/kyverno/kyverno/pkg/resourcecache"
"github.com/kyverno/kyverno/pkg/userinfo"
"github.com/minio/minio/cmd/logger"
"github.com/pkg/errors"
"k8s.io/api/admission/v1beta1"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/wait"
@ -168,20 +166,9 @@ func (h *auditHandler) process(request *v1beta1.AdmissionRequest) error {
ClusterRoles: clusterRoles,
AdmissionUserInfo: request.UserInfo}
// build context
ctx := enginectx.NewContext()
err = ctx.AddRequest(request)
ctx, err := newVariablesContext(request, &userRequestInfo)
if err != nil {
return errors.Wrap(err, "failed to load incoming request in context")
}
err = ctx.AddUserInfo(userRequestInfo)
if err != nil {
return errors.Wrap(err, "failed to load userInfo in context")
}
err = ctx.AddServiceAccount(userRequestInfo.AdmissionUserInfo.Username)
if err != nil {
return errors.Wrap(err, "failed to load service account in context")
logger.Error(err, "unable to build variable context")
}
namespaceLabels := make(map[string]string)

View file

@ -72,6 +72,10 @@ func HandleValidation(
return true, ""
}
if err := ctx.AddImageInfo(&newR); err != nil {
logger.Error(err, "unable to add image info to variables context")
}
policyContext := &engine.PolicyContext{
NewResource: newR,
OldResource: oldR,

View file

@ -1,31 +0,0 @@
# Add default labels to objects
Labels are important pieces of metadata that can be attached to just about anything in Kubernetes. They are often used to tag various resources as being associated in some way. Kubernetes has no ability to assign a series of "default" labels to incoming objects. This sample policy shows you how to assign one or multiple labels by default to any object you wish. Here it shows adding a label called `custom-foo-label` with value `my-bar-default` to resources of type `Pod`, `Service`, and `Namespace` but others can be added or removed as desired.
Alternatively, you may wish to only add the `custom-foo-label` if it is not already present in the creation request. For example, if a user/process submits a request for a new `Namespace` object and the manifest already includes the label `custom-foo-label` with a value of `custom-value`, Kyverno can leave this label untouched which results in the newly-created object having the label `custom-foo-label=custom-value` instead of `my-bar-default`. In order to do this, enclose the label in the sample manifest in `+()` so the key name becomes `+(custom-foo-label)`. This conditional instructs Kyverno to only add the label if absent.
## Policy YAML
[add_default_labels.yaml](more/add_default_labels.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-default-labels
spec:
background: false
rules:
- name: add-default-labels
match:
resources:
kinds:
- Pod
- Service
- Namespace
mutate:
patchStrategicMerge:
metadata:
labels:
custom-foo-label: my-bar-default
```

View file

@ -1,42 +0,0 @@
# Default deny all ingress traffic
By default, Kubernetes allows communications across all pods within a cluster. Network policies and, a CNI that supports network policies, must be used to restrict communications.
A default `NetworkPolicy` should be configured for each namespace to default deny all ingress traffic to the pods in the namespace. Application teams can then configure additional `NetworkPolicy` resources to allow desired traffic to application pods from select sources.
## Policy YAML
[add_network_policy.yaml](best_practices/add_network_policy.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-networkpolicy
spec:
rules:
- name: default-deny-ingress
match:
resources:
kinds:
- Namespace
name: "*"
exclude:
resources:
namespaces:
- "kube-system"
- "default"
- "kube-public"
- "kyverno"
generate:
kind: NetworkPolicy
name: default-deny-ingress
namespace: "{{request.object.metadata.name}}"
data:
spec:
# select all pods in the namespace
podSelector: {}
policyTypes:
- Ingress
````

View file

@ -1,62 +0,0 @@
# Configure namespace limits and quotas
To limit the number of resources like CPU and memory, as well as objects that may be consumed by workloads in a namespace, it is important to configure resource limits and quotas for each namespace. The generated default limitrange sets the default quotas for a container.
## Additional Information
* [Resource Quotas](https://kubernetes.io/docs/concepts/policy/resource-quotas/)
## Policy YAML
[add_ns_quota.yaml](best_practices/add_ns_quota.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-ns-quota
spec:
rules:
- name: generate-resourcequota
match:
resources:
kinds:
- Namespace
exclude:
resources:
namespaces:
- "kube-system"
- "default"
- "kube-public"
- "kyverno"
generate:
kind: ResourceQuota
name: default-resourcequota
namespace: "{{request.object.metadata.name}}"
data:
spec:
hard:
requests.cpu: '4'
requests.memory: '16Gi'
limits.cpu: '4'
limits.memory: '16Gi'
- name: generate-limitrange
match:
resources:
kinds:
- Namespace
generate:
kind: LimitRange
name: default-limitrange
namespace: "{{request.object.metadata.name}}"
data:
spec:
limits:
- default:
cpu: 500m
memory: 1Gi
defaultRequest:
cpu: 200m
memory: 256Mi
type: Container
````

View file

@ -1,50 +0,0 @@
# Mutate pods with `emptyDir` and `hostPath` with `safe-to-evict`
The Kubernetes cluster autoscaler does not evict pods that use `hostPath` or `emptyDir` volumes. To allow eviction of these pods, the following annotation must be added to the pods:
````yaml
cluster-autoscaler.kubernetes.io/safe-to-evict: true
````
This policy matches and mutates pods with `emptyDir` and `hostPath` volumes to add the `safe-to-evict` annotation if it is not specified.
## Policy YAML
[add_safe_to_evict_annotation.yaml](best_practices/add_safe_to_evict.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-safe-to-evict
spec:
rules:
- name: "annotate-empty-dir"
match:
resources:
kinds:
- Pod
mutate:
patchStrategicMerge:
metadata:
annotations:
+(cluster-autoscaler.kubernetes.io/safe-to-evict): "true"
spec:
volumes:
- (emptyDir): {}
- name: annotate-host-path
match:
resources:
kinds:
- Pod
mutate:
patchStrategicMerge:
metadata:
annotations:
+(cluster-autoscaler.kubernetes.io/safe-to-evict): "true"
spec:
volumes:
- (hostPath):
path: "*"
````

View file

@ -1,39 +0,0 @@
# Add RuntimeDefault Seccomp Profile Security Context to pods
Seccomp Profiles restrict the system calls that can be made from a process. The Linux kernel has a few hundred system calls, but most of them are not needed by any given process. If a process can be compromised and tricked into making other system calls, though, it may lead to a security vulnerability that could result in the compromise of the whole system. By restricting what system calls can be made, seccomp is a key component for building application sandboxes. https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp
## Policy YAML
[add_pod_default_seccompprofile.yaml](more/add_pod_default_seccompprofile.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-pod-default-seccompprofile
annotations:
policies.kyverno.io/category: Security
spec:
background: false
validationFailureAction: audit
rules:
- name: add-pod-default-seccompprofile
match:
resources:
kinds:
- Pod
exclude:
resources:
namespaces:
- "kube-system"
- "kube-public"
- "default"
- "kyverno"
mutate:
patchStrategicMerge:
spec:
securityContext:
seccompProfile:
type: RuntimeDefault
```

View file

@ -1,49 +0,0 @@
# Check userID, groupIP & fsgroup
All processes inside the pod can be made to run with a specific user and groupID by setting `runAsUser` and `runAsGroup`, respectively. `fsGroup` can be specified to make sure any file created in the volume will have the specified groupID. These options can be used to validate the IDs used for user and group.
## Policy YAML
[policy_validate_user_group_fsgroup_id.yaml](more/restrict_usergroup_fsgroup_id.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: validate-userid-groupid-fsgroup
spec:
rules:
- name: validate-userid
match:
resources:
kinds:
- Pod
validate:
message: "User ID should be 1000"
pattern:
spec:
securityContext:
runAsUser: '1000'
- name: validate-groupid
match:
resources:
kinds:
- Pod
validate:
message: "Group ID should be 3000"
pattern:
spec:
securityContext:
runAsGroup: '3000'
- name: validate-fsgroup
match:
resources:
kinds:
- Pod
validate:
message: "fsgroup should be 2000"
pattern:
spec:
securityContext:
fsGroup: '2000'
````

View file

@ -1,78 +0,0 @@
# Create Pod Anti-Affinity
In cases where you wish to run applications with multiple replicas, it may be required to ensure those Pods are separated from each other for availability purposes. While a `DaemonSet` resource would accomplish similar goals, your `Deployment` object may need fewer replicas than there are nodes. Pod anti-affinity rules ensures that Pods are separated from each other. Inversely, affinity rules ensure they are co-located.
This sample policy configures all Deployments with Pod anti-affinity rules with the `preferredDuringSchedulingIgnoredDuringExecution` option. It requires the topology key exists on all nodes with the key name of `kubernetes.io/hostname` and requires that that label `app` is applied to the Deployment.
In order to test the policy, you can use this sample Deployment manifest below.
```yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: busybox
distributed: required
name: busybox
spec:
replicas: 2
selector:
matchLabels:
app: busybox
distributed: required
template:
metadata:
labels:
app: busybox
distributed: required
spec:
containers:
- image: busybox:1.28
name: busybox
command: ["sleep", "9999"]
```
## More Information
* [Inter-pod affinity and anti-affinity](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity)
## Policy YAML
[create_pod_antiaffinity.yaml](more/create_pod_antiaffinity.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: insert-podantiaffinity
spec:
rules:
- name: insert-podantiaffinity
match:
resources:
kinds:
- Deployment
preconditions:
# This precondition ensures that the label `app` is applied to Pods within the Deployment resource.
- key: "{{request.object.metadata.labels.app}}"
operator: NotEquals
value: ""
mutate:
patchStrategicMerge:
spec:
template:
spec:
# Add the `affinity` key and others if not already specified in the Deployment manifest.
+(affinity):
+(podAntiAffinity):
+(preferredDuringSchedulingIgnoredDuringExecution):
- weight: 1
podAffinityTerm:
topologyKey: "kubernetes.io/hostname"
labelSelector:
matchExpressions:
- key: app
operator: In
values:
- "{{request.object.metadata.labels.app}}"
```

View file

@ -1,28 +0,0 @@
# Disallow use of bind mounts (`hostPath` volumes)
The volume of type `hostPath` allows pods to use host bind mounts (i.e. directories and volumes mounted to a host path) in containers. Using host resources can be used to access shared data or escalate privileges. Also, this couples pods to a specific host and data persisted in the `hostPath` volume is coupled to the life of the node leading to potential pod scheduling failures. It is highly recommended that applications are designed to be decoupled from the underlying infrastructure (in this case, nodes).
## Policy YAML
[disallow_bind_mounts.yaml](best_practices/disallow_bind_mounts.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-bind-mounts
spec:
validationFailureAction: audit
rules:
- name: validate-hostPath
match:
resources:
kinds:
- Pod
validate:
message: "Host path volumes are not allowed"
pattern:
spec:
=(volumes):
- X(hostPath): "null"
````

View file

@ -1,71 +0,0 @@
# Disallow use of default namespace
Kubernetes namespaces are an optional feature that provide a way to segment and isolate cluster resources across multiple applications and users. As a best practice, workloads should be isolated with namespaces. Namespaces should be required and the default (empty) namespace should not be used.
## Policy YAML
[disallow_default_namespace.yaml](best_practices/disallow_default_namespace.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-default-namespace
annotations:
pod-policies.kyverno.io/autogen-controllers: none
policies.kyverno.io/category: Workload Isolation
policies.kyverno.io/description: Kubernetes namespaces are an optional feature
that provide a way to segment and isolate cluster resources across multiple
applications and users. As a best practice, workloads should be isolated with
namespaces. Namespaces should be required and the default (empty) namespace
should not be used.
spec:
validationFailureAction: audit
rules:
- name: validate-namespace
match:
resources:
kinds:
- Pod
validate:
message: "Using 'default' namespace is not allowed"
pattern:
metadata:
namespace: "!default"
- name: require-namespace
match:
resources:
kinds:
- Pod
validate:
message: "A namespace is required"
pattern:
metadata:
namespace: "?*"
- name: validate-podcontroller-namespace
match:
resources:
kinds:
- DaemonSet
- Deployment
- Job
- StatefulSet
validate:
message: "Using 'default' namespace is not allowed for podcontrollers"
pattern:
metadata:
namespace: "!default"
- name: require-podcontroller-namespace
match:
resources:
kinds:
- DaemonSet
- Deployment
- Job
- StatefulSet
validate:
message: "A namespace is required for podcontrollers"
pattern:
metadata:
namespace: "?*"
````

View file

@ -1,29 +0,0 @@
# Disallow Docker socket bind mount
The Docker socket bind mount allows access to the Docker daemon on the node. This access can be used for privilege escalation and to manage containers outside of Kubernetes, and hence should not be allowed.
## Policy YAML
[disallow_docker_sock_mount.yaml](best_practices/disallow_docker_sock_mount.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-docker-sock-mount
spec:
validationFailureAction: audit
rules:
- name: validate-docker-sock-mount
match:
resources:
kinds:
- Pod
validate:
message: "Use of the Docker Unix socket is not allowed"
pattern:
spec:
=(volumes):
- =(hostPath):
path: "!/var/run/docker.sock"
````

View file

@ -1,29 +0,0 @@
# Disallow Helm Tiller
Tiller, in the [now-deprecated Helm v2](https://helm.sh/blog/helm-v2-deprecation-timeline/), has known security challenges. It requires administrative privileges and acts as a shared resource accessible to any authenticated user. Tiller can lead to privilge escalation as restricted users can impact other users.
## Policy YAML
[disallow_helm_tiller.yaml](best_practices/disallow_helm_tiller.yaml)
````yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-helm-tiller
spec:
validationFailureAction: audit
rules:
- name: validate-helm-tiller
match:
resources:
kinds:
- Pod
validate:
message: "Helm Tiller is not allowed"
pattern:
spec:
containers:
- name: "*"
image: "!*tiller*"
````

View file

@ -1,41 +0,0 @@
# Disallow `hostNetwork` and `hostPort`
Using `hostPort` and `hostNetwork` allows pods to share the host networking stack allowing potential snooping of network traffic across application pods.
## Policy YAML
[disallow_host_network_port.yaml](best_practices/disallow_host_network_port.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-network-port
spec:
validationFailureAction: audit
rules:
- name: validate-host-network
match:
resources:
kinds:
- Pod
validate:
message: "Use of hostNetwork is not allowed"
pattern:
spec:
=(hostNetwork): false
- name: validate-host-port
match:
resources:
kinds:
- Pod
validate:
message: "Use of hostPort is not allowed"
pattern:
spec:
containers:
- name: "*"
=(ports):
- X(hostPort): "null"
````

View file

@ -1,30 +0,0 @@
# Disallow `hostPID` and `hostIPC`
Sharing the host's PID namespace allows an application pod to gain visibility of processes on the host, potentially exposing sensitive information. Sharing the host's IPC namespace also allows the container process to communicate with processes on the host.
To avoid the pod container from having visibility to the host process space, validate that `hostPID` and `hostIPC` are set to `false`.
## Policy YAML
[disallow_host_pid_ipc.yaml](best_practices/disallow_host_pid_ipc.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-host-pid-ipc
spec:
validationFailureAction: audit
rules:
- name: validate-hostPID-hostIPC
match:
resources:
kinds:
- Pod
validate:
message: "Use of host PID and IPC namespaces is not allowed"
pattern:
spec:
=(hostPID): "false"
=(hostIPC): "false"
````

View file

@ -1,39 +0,0 @@
# Disallow latest image tag
The `:latest` tag is mutable and can lead to unexpected errors if the upstream image changes. A best practice is to use an immutable tag that maps to a specific and tested version of an application image.
## Policy YAML
[disallow_latest_tag.yaml](best_practices/disallow_latest_tag.yaml)
````yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-latest-tag
spec:
validationFailureAction: audit
rules:
- name: require-image-tag
match:
resources:
kinds:
- Pod
validate:
message: "An image tag is required"
pattern:
spec:
containers:
- image: "*:*"
- name: validate-image-tag
match:
resources:
kinds:
- Pod
validate:
message: "Using a mutable image tag e.g. 'latest' is not allowed"
pattern:
spec:
containers:
- image: "!*:latest"
````

View file

@ -1,33 +0,0 @@
# Disallow new capabilities
Linux allows defining fine-grained permissions using capabilities. With Kubernetes, it is possible to add capabilities that escalate the level of kernel access and allow other potentially dangerous behaviors. This policy enforces that containers cannot add new capabilities. Other policies can be used to set default capabilities.
## Policy YAML
[disallow_new_capabilities.yaml](best_practices/disallow_new_capabilities.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-new-capabilities
annotations:
pod-policies.kyverno.io/autogen-controllers: none
spec:
validationFailureAction: audit
rules:
- name: validate-add-capabilities
match:
resources:
kinds:
- Pod
validate:
message: "New capabilities cannot be added"
pattern:
spec:
containers:
- name: "*"
=(securityContext):
=(capabilities):
X(add): null
````

View file

@ -1,46 +0,0 @@
# Diallow privileged containers
Privileged containers are defined as any container where the container uid 0 is mapped to the host's uid 0. A process within a privileged container can get unrestricted host access. With `securityContext.allowPrivilegeEscalation` enabled, a process can gain privileges from its parent.
To disallow privileged containers and privilege escalation, run pod containers with `securityContext.privileged` set to `false` and `securityContext.allowPrivilegeEscalation` set to `false`.
## Policy YAML
[disallow_privileged.yaml](best_practices/disallow_privileged.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-privileged
spec:
validationFailureAction: audit
rules:
- name: validate-privileged
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set privileged to false"
pattern:
spec:
containers:
- =(securityContext):
# https://github.com/kubernetes/api/blob/7dc09db16fb8ff2eee16c65dc066c85ab3abb7ce/core/v1/types.go#L5707-L5711
# k8s default to false
=(privileged): false
- name: validate-allowPrivilegeEscalation
match:
resources:
kinds:
- Pod
validate:
message: "Privileged mode is not allowed. Set allowPrivilegeEscalation to false"
pattern:
spec:
containers:
- securityContext:
# https://github.com/kubernetes/api/blob/7dc09db16fb8ff2eee16c65dc066c85ab3abb7ce/core/v1/types.go#L5754
allowPrivilegeEscalation: false
````

View file

@ -1,43 +0,0 @@
# Run as non-root user
By default, all processes in a container run as the root user (uid 0). To prevent potential compromise of container hosts, specify a non-root user and least privileged user ID when building the container image and require that application containers run as non-root users (i.e. set `runAsNonRoot` to `true`).
## Additional Information
* [Pod Security Context](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/)
## Policy YAML
[disallow_root_user.yaml](best_practices/disallow_root_user.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-root-user
spec:
validationFailureAction: audit
rules:
- name: validate-runAsNonRoot
match:
resources:
kinds:
- Pod
validate:
message: "Running as root user is not allowed. Set runAsNonRoot to true"
anyPattern:
- spec:
securityContext:
runAsNonRoot: true
- spec:
securityContext:
runAsUser: ">0"
- spec:
containers:
- securityContext:
runAsNonRoot: true
- spec:
containers:
- securityContext:
runAsUser: ">0"
````

View file

@ -1,38 +0,0 @@
# Disallow Secrets from environment variables
Secrets in Kubernetes are often sensitive pieces of information whose content should be protected. Although they can be used in many ways, when mounting them as environment variables, some applications can write their values to STDOUT revealing this sensitive information in log files and potentially other exposure. As a best practice, Kubernetes Secrets should be mounted instead as volumes.
This sample policy checks any incoming Pod manifests and ensures that Secrets are not mounted as environment variables.
## More Information
* [Kubernetes Secrets](https://kubernetes.io/docs/concepts/configuration/secret/)
## Policy YAML
[disallow_secrets_from_env_vars.yaml](more/disallow_secrets_from_env_vars.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: secrets-not-from-env-vars
spec:
background: false
validationFailureAction: audit
rules:
- name: secrets-not-from-env-vars
match:
resources:
kinds:
- Pod
validate:
message: "Secrets must be mounted as volumes, not as environment variables."
pattern:
spec:
containers:
- name: "*"
=(env):
- =(valueFrom):
X(secretKeyRef): "null"
```

View file

@ -1,32 +0,0 @@
# Disallow changes to kernel parameters
The Sysctl interface allows modifications to kernel parameters at runtime. In a Kubernetes, pod these parameters can be specified under `securityContext.sysctls`. Kernel parameter modifications can be used for exploits and should be restricted.
## Additional Information
* [List of supported namespaced sysctl interfaces](https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/)
## Policy YAML
[disallow_sysctls.yaml](best_practices/disallow_sysctls.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: disallow-sysctls
spec:
validationFailureAction: audit
rules:
- name: validate-sysctls
match:
resources:
kinds:
- Pod
validate:
message: "Changes to kernel parameters are not allowed"
pattern:
spec:
securityContext:
X(sysctls): null
````

View file

@ -1,88 +0,0 @@
# Require `livenessProbe` and `readinessProbe` are different
Pod liveness and readiness probes are often used as a check to ensure either the health of an already running Pod or when one is ready to receive traffic. For a sample policy with more information and which contains a validation rule that both are present, see [require_probes.yaml](RequirePodProbes.md).
This sample checks to ensure that `livenessProbe` and `readinessProbe` are configured differently. When these two probes are configured but are set up the same way, race conditions can result as Kubernetes continues to kill and recreate a Pod never letting it enter a running state. This sample satisfies a common best practice in which these probes, if extant, not overlap and potentially cause this condition.
In this sample policy, a series of `deny` rules exist, one per container, to compare the `livenessProbe` map to the `readinessProbe`. If any container in a Pod potentially having multiple is found to have identical probes, its creation will be blocked. Note that in this sample policy the `validationFailureAction` is set to `enforce` due to the use of a `deny` rule rather than a `validate` rule. By using the annotation `pod-policies.kyverno.io/autogen-controllers`, it modifies the default behavior and ensures that only Pods originating from DaemonSet, Deployment, and StatefulSet objects are validated.
If you may potentially have more than four containers in a Pod against which this policy should operate, duplicate one of the rules found within and change the array member of the `containers` key in fields `key` and `value`. For example, to match against a potential fifth container, duplicate a rule and change `containers[3]` to `containers[4]`.
## More Information
* [Kyverno Deny Rules](https://kyverno.io/docs/writing-policies/validate/#deny-rules)
* [Kyverno Auto-Gen Rules for Pod Controllers](https://kyverno.io/docs/writing-policies/autogen/)
* [Configure Liveness, Readiness and Startup Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
## Policy YAML
[ensure_probes_different.yaml](more/ensure_probes_different.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: validate-probes
annotations:
# Only applies to pods originating from DaemonSet, Deployment, or StatefulSet.
pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet
spec:
validationFailureAction: enforce
background: false
rules:
# Checks the first container in a Pod.
- name: validate-probes-c0
match:
resources:
kinds:
- Pod
validate:
message: "Liveness and readiness probes cannot be the same."
# A `deny` rule is different in structure than a `validate` rule and inverts the check. It uses `conditions` written in JMESPath notation upon which to base its decisions.
deny:
conditions:
# In this condition, it checks the entire map structure of the `readinessProbe` against that of the `livenessProbe`. If both are found to be equal, the Pod creation
# request will be denied.
- key: "{{ request.object.spec.containers[0].readinessProbe }}"
operator: Equals
value: "{{ request.object.spec.containers[0].livenessProbe }}"
# Checks the second container in a Pod.
- name: validate-probes-c1
match:
resources:
kinds:
- Pod
validate:
message: "Liveness and readiness probes cannot be the same."
deny:
conditions:
- key: "{{ request.object.spec.containers[1].readinessProbe }}"
operator: Equals
value: "{{ request.object.spec.containers[1].livenessProbe }}"
# Checks the third container in a Pod.
- name: validate-probes-c2
match:
resources:
kinds:
- Pod
validate:
message: "Liveness and readiness probes cannot be the same."
deny:
conditions:
- key: "{{ request.object.spec.containers[2].readinessProbe }}"
operator: Equals
value: "{{ request.object.spec.containers[2].livenessProbe }}"
# Checks the fourth container in a Pod.
- name: validate-probes-c3
match:
resources:
kinds:
- Pod
validate:
message: "Liveness and readiness probes cannot be the same."
deny:
conditions:
- key: "{{ request.object.spec.containers[3].readinessProbe }}"
operator: Equals
value: "{{ request.object.spec.containers[3].livenessProbe }}"
```

View file

@ -1,88 +0,0 @@
# Sample Policies
Sample policies are designed to be applied to your Kubernetes clusters with minimal changes.
The policies are mostly validation rules in `audit` mode (i.e. your existing workloads will not be impacted, but will be audited for policy compliance). It is recommended that all policies be tested and observed in a non-production environment before setting `enforce` mode.
## Best Practice Policies
These policies are highly recommended.
1. [Disallow root user](DisallowRootUser.md)
1. [Disallow privileged containers](DisallowPrivilegedContainers.md)
1. [Disallow new capabilities](DisallowNewCapabilities.md)
1. [Disallow kernel parameter changes](DisallowSysctls.md)
1. [Disallow use of bind mounts (`hostPath` volumes)](DisallowBindMounts.md)
1. [Disallow docker socket bind mount](DisallowDockerSockMount.md)
1. [Disallow `hostNetwork` and `hostPort`](DisallowHostNetworkPort.md)
1. [Disallow `hostPID` and `hostIPC`](DisallowHostPIDIPC.md)
1. [Disallow use of default namespace](DisallowDefaultNamespace.md)
1. [Disallow latest image tag](DisallowLatestTag.md)
1. [Disallow Helm Tiller](DisallowHelmTiller.md)
1. [Require read-only root filesystem](RequireReadOnlyRootFS.md)
1. [Require pod resource requests and limits](RequirePodRequestsLimits.md)
1. [Require pod `livenessProbe` and `readinessProbe`](RequirePodProbes.md)
1. [Add default network policy](AddDefaultNetworkPolicy.md)
1. [Add namespace quotas](AddNamespaceQuotas.md)
1. [Add `safe-to-evict` for pods with `emptyDir` and `hostPath` volumes](AddSafeToEvict.md)
## Additional Policies
These policies provide additional best practices and are worthy of close consideration. These policies may require specific changes for your workloads and environments.
1. [Restrict image registries](RestrictImageRegistries.md)
1. [Restrict `NodePort` services](RestrictNodePort.md)
1. [Restrict `LoadBalancer` services](RestrictLoadBalancer.md)
1. [Restrict auto-mount of service account credentials](RestrictAutomountSAToken.md)
1. [Restrict ingress classes](RestrictIngressClasses.md)
1. [Restrict User Group](CheckUserGroup.md)
1. [Require pods are labeled](RequireLabels.md)
1. [Require pods have certain labels](RequireCertainLabels.md)
1. [Require Deployments have multiple replicas](RequireDeploymentsHaveReplicas.md)
1. [Spread Pods across topology](SpreadPodsAcrossTopology.md)
1. [Create Pod Anti-Affinity](CreatePodAntiAffinity.md)
1. [Ensure Pod `livenessProbe` and `readinessProbe` are different](EnsurePodProbesDifferent.md)
1. [Disallow mounting Secrets as environment variables](DisallowSecretsFromEnvVars.md)
1. [Add default labels](AddDefaultLabels.md)
1. [Require all Pods drop all capabilities](RequirePodsDropAll.md)
1. [Add seccompProfile securityContext ](AddSeccompProfile.md)
## Miscellaneous Policies
Policies in this group are either highly-specific, involve third-party CRDs, or may be variations on standard Best Practice or Additional policies.
1. [Require `imagePullPolicy` of `Always` for images not using `latest` tags](RequireImagePullPolicyAlways.md)
1. [Require images using `latest` tag not use `imagePullPolicy` of `Always`](RequireLatestImagesNotUseAlways.md)
## Applying the sample policies
To apply these policies to your cluster, install Kyverno and import the policies as follows:
### Install Kyverno
````sh
kubectl create -f https://raw.githubusercontent.com/kyverno/kyverno/main/definitions/release/install.yaml
````
<small>[(installation docs)](../documentation/installation.md)</small>
### Apply Kyverno Policies
To start applying policies to your cluster, first clone the repo:
````bash
git clone https://github.com/kyverno/kyverno.git
cd kyverno
````
Import best practices from [here](best_practices):
````bash
kubectl create -f samples/best_practices
````
Import additional policies from [here](more):
````bash
kubectl create -f samples/more/
````

View file

@ -1,31 +0,0 @@
# Require certain labels
In many cases, you may require that at least a certain number of labels are assigned to each Pod from a select list of approved labels. This sample policy demonstrates the [`anyPattern`](https://kyverno.io/docs/writing-policies/validate/#anypattern---logical-or-across-multiple-validation-patterns) option in a policy by requiring any of the two possible labels defined within. A pod must either have the label `app.kubernetes.io/name` or `app.kubernetes.io/component` defined. If you would rather validate that all Pods have multiple labels in an AND fashion rather than OR, check out the [require_labels](RequireLabels.md) example.
## Policy YAML
[require_certain_labels.yaml](best_practices/require_certain_labels.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-certain-labels
spec:
validationFailureAction: audit
rules:
- name: validate-certain-labels
match:
resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` or `app.kubernetes.io/component` is required."
anyPattern:
- metadata:
labels:
app.kubernetes.io/name: "?*"
- metadata:
labels:
app.kubernetes.io/component: "?*"
```

View file

@ -1,40 +0,0 @@
# Require deployments have multiple replicas
Deployments with only a single replica produce availability concerns should that single replica fail. In most cases, you would want Deployment objects to have more than one replica to ensure continued availability if not scale.
This sample policy requires that Deployments have more than one replica excluding a list of system namespaces.
## More Information
* [Kubernetes Deployments](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/)
## Policy YAML
[require_deployments_have_multiple_replicas.yaml](more/require_deployments_have_multiple_replicas.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: deployment-has-multiple-replicas
spec:
validationFailureAction: audit
rules:
- name: deployment-has-multiple-replicas
match:
resources:
kinds:
- Deployment
exclude:
resources:
namespaces:
- kyverno
- kube-system
- kube-node-lease
- kube-public
validate:
message: "Deployments must have more than one replica to ensure availability."
pattern:
spec:
replicas: ">1"
```

View file

@ -1,29 +0,0 @@
# Require `imagePullPolicy` is set to `Always` for images not using `latest` tags
By default, Kubernetes sets the `imagePullPolicy` for images which specify a tag to be `IfNotPresent`. In some cases, this may not be desired where the image could be rebuilt upstream. This sample policy ensures that all containers have their `imagePullPolicy` set to `Always`.
## Policy YAML
[imagepullpolicy-always.yaml](misc/imagepullpolicy-always.yaml)
```yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: imagepullpolicy-always
spec:
validationFailureAction: audit
background: false
rules:
- name: imagepullpolicy-always
match:
resources:
kinds:
- Pod
validate:
message: "The imagePullPolicy must be set to `Always` for all containers when a tag other than `latest` is used."
pattern:
spec:
containers:
- imagePullPolicy: Always
```

View file

@ -1,36 +0,0 @@
# Require labels
Labels are a fundamental and important way to assign descriptive metadata to Kubernetes resources, especially Pods. Labels are especially important as the number of applications grow and are composed in different ways.
This sample policy requires that the label `app.kubernetes.io/name` be defined on all Pods. If you wish to require that all Pods have multiple labels defined (as opposed to [any labels from an approved list](RequireCertainLabels.md)), this policy can be altered by adding more labels.
## More Information
* [Common labels](https://kubernetes.io/docs/concepts/overview/working-with-objects/common-labels/)
## Policy YAML
[require_labels.yaml](best_practices/require_labels.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-labels
spec:
validationFailureAction: audit
rules:
- name: check-for-labels
match:
resources:
kinds:
- Pod
validate:
message: "The label `app.kubernetes.io/name` is required."
pattern:
metadata:
labels:
app.kubernetes.io/name: "?*"
# You can add more labels if you wish the policy to validate more than just one is present. Uncomment the below line, or add new ones.
#app.kubernetes.io/component: "?*
```

View file

@ -1,32 +0,0 @@
# Require images using `latest` tag set `imagePullPolicy` to not `Always`
When using the `latest` tag for images, although generally [not a best practice](DisallowLatestTag.md), Kubernetes defaults its `imagePullPolicy` to `Always`. Since Docker Hub has instituted a [rate-limiting policy](https://www.docker.com/blog/what-you-need-to-know-about-upcoming-docker-hub-rate-limiting/), this could result in reaching that limit faster than anticipated, which could mean errors for other Pods in the cluster or across the enterprise. Ensuring those `latest`-tagged images do not use the default of `Always` is one way to ensure pulls are only when needed.
This sample policy checks the `image` value and ensures that if `:latest` is defined that the `imagePullPolicy` must use something other than the value of `Always`. Note that if no tag is defined, Kyverno will not see that as a violation of the policy.
## Policy YAML
[latestimage-notalways.yaml](misc/latestimage-notalways.yaml)
```yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: latestimage-notalways
spec:
validationFailureAction: audit
background: false
rules:
- name: latestimage-notalways
match:
resources:
kinds:
- Pod
validate:
message: "When using the `latest` tag, the `imagePullPolicy` must not use `Always`."
pattern:
spec:
containers:
- (image): "*:latest"
imagePullPolicy: "!Always"
```

View file

@ -1,42 +0,0 @@
# Require `livenessProbe` and `readinessProbe`
Liveness and readiness probes need to be configured to correctly manage a pod's lifecycle during deployments, restarts, and upgrades.
For each pod, a periodic `livenessProbe` is performed by the kubelet to determine if the pod's containers are running or need to be restarted. A `readinessProbe` is used by services and deployments to determine if the pod is ready to receive network traffic.
In this sample policy, a validation rule checks to ensure that all Pods have both a liveness and a readiness probe defined by looking at the `periodSeconds` field. By using the annotation `pod-policies.kyverno.io/autogen-controllers`, it modifies the default behavior and ensures that only Pods originating from DaemonSet, Deployment, and StatefulSet objects are validated.
## More Information
* [Kyverno Auto-Gen Rules for Pod Controllers](https://kyverno.io/docs/writing-policies/autogen/)
* [Configure Liveness, Readiness and Startup Probes](https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/)
## Policy YAML
[require_probes.yaml](best_practices/require_probes.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-pod-probes
annotations:
pod-policies.kyverno.io/autogen-controllers: DaemonSet,Deployment,StatefulSet
spec:
validationFailureAction: audit
rules:
- name: validate-livenessProbe-readinessProbe
match:
resources:
kinds:
- Pod
validate:
message: "Liveness and readiness probes are required"
pattern:
spec:
containers:
- livenessProbe:
periodSeconds: ">0"
readinessProbe:
periodSeconds: ">0"
```

View file

@ -1,35 +0,0 @@
# Require pod resource requests and limits
Application workloads share cluster resources. Hence, it is important to manage resources assigned to each pod. It is recommended that `resources.requests.cpu`, `resources.requests.memory` and `resources.limits.memory` are configured per pod. Other resources such as GPUs may also be specified as needed.
If a namespace level request or limit is specified, defaults will automatically be applied to each pod based on the `LimitRange` configuration.
## Policy YAML
[require_pod_requests_limits.yaml](best_practices/require_pod_requests_limits.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-pod-requests-limits
spec:
validationFailureAction: audit
rules:
- name: validate-resources
match:
resources:
kinds:
- Pod
validate:
message: "CPU and memory resource requests and limits are required"
pattern:
spec:
containers:
- resources:
requests:
memory: "?*"
cpu: "?*"
limits:
memory: "?*"
````

View file

@ -1,49 +0,0 @@
# Require Pods Drop All Capabilities
Containers may optionally ask for specific Linux capabilities without requiring root on the node. As a security best practice, containers should only specify exactly which capabilities they need. This starts with dropping all capabilities and only selectively adding ones back.
This example policy requires that all containers drop all capabilities.
## More information
* [Set Capabilities for a Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-capabilities-for-a-container)
## Policy YAML
[require_drop_all.yaml](more/require_drop_all.yaml)
```yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: drop-all-capabilities
spec:
validationFailureAction: audit
rules:
- name: drop-all-containers
match:
resources:
kinds:
- Pod
validate:
message: "Drop all must be defined for every container in the Pod."
pattern:
spec:
containers:
- securityContext:
capabilities:
drop: ["ALL"]
- name: drop-all-initcontainers
match:
resources:
kinds:
- Pod
validate:
message: "Drop all must be defined for every container in the Pod."
pattern:
spec:
initContainers:
- securityContext:
capabilities:
drop: ["ALL"]
```

View file

@ -1,29 +0,0 @@
# Require read-only root filesystem
A read-only root filesystem helps to enforce an immutable infrastructure strategy; the container only needs to write to mounted volumes that can persist state even if the container exits. An immutable root filesystem can also prevent malicious binaries from writing to the host system.
## Policy YAML
[require_ro_rootfs.yaml](best_practices/require_ro_rootfs.yaml)
````yaml
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-ro-rootfs
spec:
validationFailureAction: audit
rules:
- name: validate-readOnlyRootFilesystem
match:
resources:
kinds:
- Pod
validate:
message: "Root filesystem must be read-only"
pattern:
spec:
containers:
- securityContext:
readOnlyRootFilesystem: true
````

View file

@ -1,27 +0,0 @@
# Restrict auto-mount of Service Account tokens
Kubernetes automatically mounts service account credentials in each pod. The service account may be assigned roles allowing pods to access API resources. To restrict access, opt out of auto-mounting tokens by setting `automountServiceAccountToken` to `false`.
## Policy YAML
[restrict_automount_sa_token.yaml](more/restrict_automount_sa_token.yaml)
````yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: restrict-automount-sa-token
spec:
validationFailureAction: audit
rules:
- name: validate-automountServiceAccountToken
match:
resources:
kinds:
- Pod
validate:
message: "Auto-mounting of Service Account tokens is not allowed"
pattern:
spec:
automountServiceAccountToken: false
````

View file

@ -1,31 +0,0 @@
# Disallow unknown image registries
Images from unknown registries may not be scanned and secured. Requiring the use of trusted registries helps reduce threat exposure and is considered a common Kubernetes best practice.
This sample policy requires that all images come from either `k8s.gcr.io` or `gcr.io`. You can customize this policy to allow other or different image registries that you trust. Alternatively, you can invert the check to allow images from all other registries except one (or a list) by changing the `image` field to `image: "!k8s.gcr.io"`.
## Policy YAML
[restrict_image_registries.yaml](more/restrict_image_registries.yaml)
````yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: restrict-image-registries
spec:
validationFailureAction: audit
rules:
- name: validate-registries
match:
resources:
kinds:
- Pod
validate:
message: "Unknown image registry."
pattern:
spec:
containers:
# Allows images from either k8s.gcr.io or gcr.io.
- image: "k8s.gcr.io/* | gcr.io/*"
````

View file

@ -1,28 +0,0 @@
# Restrict ingress classes
It can be useful to restrict Ingress resources to a set of known ingress classes that are allowed in the cluster. You can customize this policy to allow ingress classes that are configured in the cluster.
## Policy YAML
[restrict_ingress_classes.yaml](more/restrict_ingress_classes.yaml)
````yaml
apiVersion : kyverno.io/v1
kind: ClusterPolicy
metadata:
name: restrict-ingress-classes
spec:
validationFailureAction: audit
rules:
- name: validate-ingress
match:
resources:
kinds:
- Ingress
validate:
message: "Unknown ingress class"
pattern:
metadata:
annotations:
kubernetes.io/ingress.class: "F5 | nginx"
````

Some files were not shown because too many files have changed in this diff Show more