1
0
Fork 0
mirror of https://github.com/kyverno/kyverno.git synced 2025-03-28 02:18:15 +00:00

add kuttl tests (#5204)

- add kuttl tests
- try rekor: {url: "https://rekor.sigstore.dev"}
- add rekor{} object to last two policies

Signed-off-by: Chip Zoller <chipzoller@gmail.com>
This commit is contained in:
Chip Zoller 2022-11-04 10:00:31 -04:00 committed by GitHub
parent 42322bae09
commit da18305015
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
166 changed files with 1727 additions and 13 deletions

View file

@ -6,16 +6,24 @@ on:
- 'release*'
jobs:
run-conformace:
run-conformance:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@2541b1294d2704b0964813337f33b291d3f8596b # pin@v3
uses: actions/checkout@93ea575cb5d8a053eaa0ac8fa3b40d7e05a33cc8 # pin@v3.1.0
- name: Unshallow
run: git fetch --prune --unshallow
- name: Setup go
uses: actions/setup-go@268d8c0ca0432bb2cf416faae41297df9d262d7f # pin@v3
uses: actions/setup-go@c4a742cab115ed795e34d4513e2cf7d472deb55f # pin@v3.3.1
with:
go-version: ~1.18.6
- name: Kyverno conformance tests
run: go run ./test/conformance/main.go
- name: Prep environment
run: make kind-create-cluster kind-deploy-kyverno
- name: Wait for Kyverno to start
run: sleep 60
- name: Install kuttl
run: curl -sL https://github.com/kudobuilder/kuttl/releases/download/v0.13.0/kubectl-kuttl_0.13.0_linux_x86_64 -o kuttl && chmod +x kuttl
- name: Test with kuttl
run: ./kuttl test --config ./test/conformance/kuttl/kuttl-test.yaml
# - name: Kyverno conformance tests
# run: go run ./test/conformance/main.go

View file

@ -0,0 +1,8 @@
# An assert file can be a partial representation of an object. What is specified MUST be present for the check to pass and the test to proceed.
# If the specified timeout is reached and the assert does not evaluate to true, the test fails and halts.
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-labels
status:
ready: true

View file

@ -0,0 +1,20 @@
# A file with no reserved name "assert" or "errors" will be created with the below contents. Can be multiple YAML docs in the same file.
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-labels
spec:
rules:
- name: add-labels
match:
resources:
kinds:
- Pod
- Service
- ConfigMap
- Secret
mutate:
patchStrategicMerge:
metadata:
labels:
foo: bar

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: testingsecret
namespace: default
labels:
foo: bar

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: testingsecret
namespace: default
type: Opaque

View file

@ -0,0 +1,8 @@
# Specifying the kind as `TestStep` performs certain behaviors like this delete operation.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: Secret
name: regcred
namespace: bar

View file

@ -0,0 +1,6 @@
### If this resource is found, create an error which fails the test. Since there is no timeout for this step, it will adopt the global defined in the TestSuite.
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,5 @@
# A clean-up is presently required because kuttl does not do a reliable job of cleaning up both cluster-scoped objects.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-secret.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,6 @@
# Some Best Practices
* Don't put anything in index `00` so it can be used in the future.
* Put clean-up as index `99` so it's always last no matter how many steps.
* The `*-errors.yaml` file, like an `*-assert.yaml` file only performs an existence check, not a creation check.
* One test can contain both positive and negative tests by extending the test case. No need to write separate.

View file

@ -0,0 +1,5 @@
# Title
Issue: 1234
This is a description of your test.

View file

@ -0,0 +1,5 @@
# A command can only run a single command, not a pipeline and not a script. The program called must exist on the system where the test is run.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-secret.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,13 @@
## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- script: |
if kubectl apply -f manifests.yaml
then
echo "Tested failed. Policy was created when it shouldn't have been."
exit 1
else
echo "Test succeeded. Policy was not created as intended."
exit 0
fi

View file

@ -0,0 +1,13 @@
## Checks that there is specific output when creating a manifest
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- script: |
if kubectl apply -f manifests.yaml 2>&1 | grep -q 'clusterRoles'
then
echo "Has clusterRoles."
exit 0
else
echo "Does not have clusterRoles."
exit 1
fi

View file

@ -0,0 +1,3 @@
# Title
Tests in the `cornercases` directory should typically correspond either to a specific Kyverno issue (please provide issue number or link) or a Slack conversation if no issue is logged. These are NOT standard tests for basic functionality but outliers or highly specific/esoteric combinations that have exposed a bug in the past.

View file

@ -0,0 +1,3 @@
# Title
Tests in the `standard` directory should only cover basic functionality of a feature. For testing of specific corner cases addressed as acknowledged bugs, please use the `cornercases` directory.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-nosync-clone
status:
ready: true

View file

@ -0,0 +1,30 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-nosync-clone
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- Namespace
generate:
apiVersion: v1
kind: Secret
name: regcred
namespace: "{{request.object.metadata.name}}"
synchronize: false
clone:
namespace: default
name: regcred
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: regcred
namespace: default
type: Opaque

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a generate test to ensure a cloned secret shows properly in the new Namespace.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-nosync-clone
status:
ready: true

View file

@ -0,0 +1,30 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-nosync-clone
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- Namespace
generate:
apiVersion: v1
kind: Secret
name: regcred
namespace: "{{request.object.metadata.name}}"
synchronize: false
clone:
namespace: default
name: regcred
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: regcred
namespace: default
type: Opaque

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,7 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: Secret
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,6 @@
### If this resource is found, create an error which fails the test. Since there is no timeout for this step, it will adopt the global defined in the TestSuite.
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,7 @@
# Title
This test ensures that deletion of a downstream resource created by a ClusterPolicy `generate` rule with sync disabled using a clone declaration does NOT cause it to be regenerated. If the downstream resource is regenerated, the test fails. If it is not regenerated, the test succeeds.
### Tests a clone rule with sync not enabled that deleting a downstream resource shows it is not recreated.
### Because https://github.com/kyverno/kyverno/issues/4457 is not yet fixed for this type, the test will fail.
### Expected result: fail

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-sync-clone
status:
ready: true

View file

@ -0,0 +1,30 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-sync-clone
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- Namespace
generate:
apiVersion: v1
kind: Secret
name: regcred
namespace: "{{request.object.metadata.name}}"
synchronize: true
clone:
namespace: default
name: regcred
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: regcred
namespace: default
type: Opaque

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a generate test to ensure a cloned secret shows properly in the new Namespace.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-sync-clone
status:
ready: true

View file

@ -0,0 +1,30 @@
apiVersion: kyverno.io/v2beta1
kind: ClusterPolicy
metadata:
name: cpol-sync-clone
spec:
rules:
- name: clone-secret
match:
any:
- resources:
kinds:
- Namespace
generate:
apiVersion: v1
kind: Secret
name: regcred
namespace: "{{request.object.metadata.name}}"
synchronize: true
clone:
namespace: default
name: regcred
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: regcred
namespace: default
type: Opaque

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,7 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: Secret
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: sleep 3

View file

@ -0,0 +1,6 @@
### If this resource is found, the step should pass. We expect the downstream resource to be recreated.
apiVersion: v1
kind: Secret
metadata:
name: regcred
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,7 @@
# Title
This test ensures that deletion of a downstream resource created by a ClusterPolicy `generate` rule with sync disabled using a clone declaration does NOT cause it to be regenerated. If the downstream resource is regenerated, the test fails. If it is not regenerated, the test succeeds.
### Tests a clone rule with sync not enabled that deleting a downstream resource shows it is not recreated.
### Because https://github.com/kyverno/kyverno/issues/4457 is not yet fixed for this type, the test will fail.
### Expected result: fail

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
status:
ready: true

View file

@ -0,0 +1,35 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
spec:
generateExistingOnPolicyUpdate: true
rules:
- name: k-kafka-address
match:
any:
- resources:
kinds:
- Namespace
exclude:
any:
- resources:
namespaces:
- kube-system
- default
- kube-public
- kyverno
generate:
synchronize: false
apiVersion: v1
kind: ConfigMap
name: zk-kafka-address
namespace: "{{request.object.metadata.name}}"
data:
kind: ConfigMap
metadata:
labels:
somekey: somevalue
data:
ZK_ADDRESS: "192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181"
KAFKA_ADDRESS: "192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092"

View file

@ -0,0 +1,10 @@
apiVersion: v1
data:
KAFKA_ADDRESS: 192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092
ZK_ADDRESS: 192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181
kind: ConfigMap
metadata:
labels:
somekey: somevalue
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,7 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: v1
kind: ConfigMap
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a generate test to ensure deleting a generate policy using a data declaration with sync enabled deletes the downstream ConfigMap when matching a new Namespace.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
status:
ready: true

View file

@ -0,0 +1,35 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
spec:
generateExistingOnPolicyUpdate: true
rules:
- name: k-kafka-address
match:
any:
- resources:
kinds:
- Namespace
exclude:
any:
- resources:
namespaces:
- kube-system
- default
- kube-public
- kyverno
generate:
synchronize: true
apiVersion: v1
kind: ConfigMap
name: zk-kafka-address
namespace: "{{request.object.metadata.name}}"
data:
kind: ConfigMap
metadata:
labels:
somekey: somevalue
data:
ZK_ADDRESS: "192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181"
KAFKA_ADDRESS: "192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092"

View file

@ -0,0 +1,10 @@
apiVersion: v1
data:
KAFKA_ADDRESS: 192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092
ZK_ADDRESS: 192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181
kind: ConfigMap
metadata:
labels:
somekey: somevalue
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a generate test to ensure a generate policy using a data declaration with sync enabled creates a downstream ConfigMap when matching a new Namespace.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
status:
ready: true

View file

@ -0,0 +1,35 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
spec:
generateExistingOnPolicyUpdate: true
rules:
- name: k-kafka-address
match:
any:
- resources:
kinds:
- Namespace
exclude:
any:
- resources:
namespaces:
- kube-system
- default
- kube-public
- kyverno
generate:
synchronize: true
apiVersion: v1
kind: ConfigMap
name: zk-kafka-address
namespace: "{{request.object.metadata.name}}"
data:
kind: ConfigMap
metadata:
labels:
somekey: somevalue
data:
ZK_ADDRESS: "192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181"
KAFKA_ADDRESS: "192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092"

View file

@ -0,0 +1,10 @@
apiVersion: v1
data:
KAFKA_ADDRESS: 192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092
ZK_ADDRESS: 192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181
kind: ConfigMap
metadata:
labels:
somekey: somevalue
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,10 @@
apiVersion: v1
data:
KAFKA_ADDRESS: 192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092
ZK_ADDRESS: 192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181
kind: ConfigMap
metadata:
labels:
somekey: somevalue
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,6 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
delete:
- apiVersion: kyverno.io/v1
kind: ClusterPolicy
name: zk-kafka-address

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a generate test to ensure deleting a generate policy using a data declaration with sync enabled deletes the downstream ConfigMap when matching a new Namespace.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
status:
ready: true

View file

@ -0,0 +1,35 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
spec:
generateExistingOnPolicyUpdate: true
rules:
- name: k-kafka-address
match:
any:
- resources:
kinds:
- Namespace
exclude:
any:
- resources:
namespaces:
- kube-system
- default
- kube-public
- kyverno
generate:
synchronize: true
apiVersion: v1
kind: ConfigMap
name: zk-kafka-address
namespace: "{{request.object.metadata.name}}"
data:
kind: ConfigMap
metadata:
labels:
somekey: somevalue
data:
ZK_ADDRESS: "192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181"
KAFKA_ADDRESS: "192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092"

View file

@ -0,0 +1,10 @@
apiVersion: v1
data:
KAFKA_ADDRESS: 192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9092
ZK_ADDRESS: 192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181
kind: ConfigMap
metadata:
labels:
somekey: somevalue
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: Namespace
metadata:
name: bar

View file

@ -0,0 +1,10 @@
apiVersion: v1
data:
KAFKA_ADDRESS: 192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9999
ZK_ADDRESS: 192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181
kind: ConfigMap
metadata:
labels:
somekey: somevalue
name: zk-kafka-address
namespace: bar

View file

@ -0,0 +1,35 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: zk-kafka-address
spec:
generateExistingOnPolicyUpdate: true
rules:
- name: k-kafka-address
match:
any:
- resources:
kinds:
- Namespace
exclude:
any:
- resources:
namespaces:
- kube-system
- default
- kube-public
- kyverno
generate:
synchronize: true
apiVersion: v1
kind: ConfigMap
name: zk-kafka-address
namespace: "{{request.object.metadata.name}}"
data:
kind: ConfigMap
metadata:
labels:
somekey: somevalue
data:
ZK_ADDRESS: "192.168.10.10:2181,192.168.10.11:2181,192.168.10.12:2181"
KAFKA_ADDRESS: "192.168.10.13:9092,192.168.10.14:9092,192.168.10.15:9999"

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-ns.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a generate test to ensure a generate policy using a data declaration with sync enabled and modifying the policy/rule propagates those changes to a downstream ConfigMap.

View file

@ -0,0 +1,22 @@
apiVersion: kuttl.dev/v1beta1
kind: TestSuite
testDirs:
# Generate tests
# - ./generate/clusterpolicy/standard/clone/nosync
- ./test/conformance/kuttl/generate/clusterpolicy/standard/clone/sync
- ./test/conformance/kuttl/generate/clusterpolicy/standard/data/sync
- ./test/conformance/kuttl/generate/clusterpolicy/standard/data/nosync
# Mutate tests
- ./test/conformance/kuttl/mutate/clusterpolicy/standard
- ./test/conformance/kuttl/mutate/clusterpolicy/standard/existing
# Validate tests
- ./test/conformance/kuttl/validate/clusterpolicy/standard/audit
- ./test/conformance/kuttl/validate/clusterpolicy/standard/enforce
# verifyImages tests
- ./test/conformance/kuttl/verifyImages/clusterpolicy/standard
# Report tests
- ./test/conformance/kuttl/reports/admission
- ./test/conformance/kuttl/reports/background
startKIND: false
timeout: 15
parallel: 1

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-labels
status:
ready: true

View file

@ -0,0 +1,19 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: add-labels
spec:
rules:
- name: add-labels
match:
resources:
kinds:
- Pod
- Service
- ConfigMap
- Secret
mutate:
patchStrategicMerge:
metadata:
labels:
foo: bar

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: testingsecret
namespace: default
labels:
foo: bar

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: testingsecret
namespace: default
type: Opaque

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml,02-secret.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a basic mutation test.

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: mutate-existing-secret
status:
ready: true

View file

@ -0,0 +1,52 @@
apiVersion: v1
kind: Namespace
metadata:
name: staging
labels:
app-type: corp
annotations:
cloud.platformzero.com/serviceClass: "xl2"
---
apiVersion: v1
data:
foo: bar
kind: ConfigMap
metadata:
name: dictionary-1
namespace: staging
---
apiVersion: v1
data:
foo: YmFy
kind: Secret
metadata:
name: secret-1
namespace: staging
type: Opaque
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: "mutate-existing-secret"
spec:
rules:
- name: "mutate-secret-on-configmap-event"
match:
any:
- resources:
kinds:
- ConfigMap
names:
- dictionary-1
namespaces:
- staging
mutate:
targets:
- apiVersion: v1
kind: Secret
name: secret-1
namespace: "{{ request.object.metadata.namespace }}"
patchStrategicMerge:
metadata:
labels:
foo: bar

View file

@ -0,0 +1,8 @@
apiVersion: v1
data:
foo: bar
dog: dory
kind: ConfigMap
metadata:
name: dictionary-1
namespace: staging

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: Secret
metadata:
name: secret-1
namespace: staging
labels:
foo: bar

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This is a test for mutation of existing resources.

View file

@ -0,0 +1,89 @@
apiVersion: v1
kind: Namespace
metadata:
name: qa
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: chip
rules:
- apiGroups:
- ""
resources:
- configmaps
verbs:
- get
- list
- watch
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: chip-qa-rolebinding
namespace: qa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: chip
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: chip
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: chip-special-role
namespace: qa
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- create
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: chip-qa-specialrb
namespace: qa
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: chip-special-role
subjects:
- apiGroup: rbac.authorization.k8s.io
kind: User
name: chip
---
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: record-creation-details
spec:
background: false
rules:
- name: add-userinfo
match:
any:
- resources:
kinds:
- ConfigMap
preconditions:
any:
- key: "{{request.operation || 'BACKGROUND'}}"
operator: Equals
value: CREATE
mutate:
patchStrategicMerge:
metadata:
annotations:
kyverno.io/created-by: "{{ request.userInfo | to_string(@) }}"
kyverno.io/roles: "{{ request.roles | sort(@) | to_string(@) }}"
kyverno.io/clusterroles: "{{ request.clusterRoles | sort(@) | to_string(@) }}"

View file

@ -0,0 +1,50 @@
## Runs the identity generation script. This assumes that there is only one entry in the kubeconfig.
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- script: |
#!/bin/bash
set -eu
export USERNAME=chip
export NAMESPACE=qa
export CA=ca.crt
####
#### Get CA certificate from kubeconfig assuming it's the first in the list.
kubectl config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 --decode > ca.crt
#### Set CLUSTER_SERVER from kubeconfig assuming it's the first in the list.
CLUSTER_SERVER=$(kubectl config view --raw -o jsonpath='{.clusters[0].cluster.server}')
#### Set CLUSTER from kubeconfig assuming it's the first in the list.
CLUSTER=$(kubectl config view --raw -o jsonpath='{.clusters[0].name}')
#### Generate private key
openssl genrsa -out $USERNAME.key 2048
#### Create CSR
openssl req -new -key $USERNAME.key -out $USERNAME.csr -subj "/O=mygroup/CN=$USERNAME"
#### Send CSR to kube-apiserver for approval
cat <<EOF | kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: $USERNAME
spec:
request: $(cat $USERNAME.csr | base64 | tr -d '\n')
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
#### Approve CSR
kubectl certificate approve $USERNAME
#### Download certificate
kubectl get csr $USERNAME -o jsonpath='{.status.certificate}' | base64 --decode > $USERNAME.crt
####
#### Create the credential object and output the new kubeconfig file
kubectl --kubeconfig=$USERNAME-kubeconfig config set-credentials $USERNAME --client-certificate=$USERNAME.crt --client-key=$USERNAME.key --embed-certs
#### Set the cluster info
kubectl --kubeconfig=$USERNAME-kubeconfig config set-cluster $CLUSTER --server=$CLUSTER_SERVER --certificate-authority=$CA --embed-certs
#### Set the context
kubectl --kubeconfig=$USERNAME-kubeconfig config set-context $USERNAME-$NAMESPACE-$CLUSTER --user=$USERNAME --cluster=$CLUSTER --namespace=$NAMESPACE
#### Use the context
kubectl --kubeconfig=$USERNAME-kubeconfig config use-context $USERNAME-$NAMESPACE-$CLUSTER
### Clean up the approved CSR
kubectl delete certificatesigningrequest chip

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl -n qa create cm foo --from-literal=foo=bar --kubeconfig chip-kubeconfig

View file

@ -0,0 +1,11 @@
apiVersion: v1
data:
foo: bar
kind: ConfigMap
metadata:
annotations:
kyverno.io/clusterroles: '["chip","system:basic-user","system:discovery","system:public-info-viewer"]'
kyverno.io/created-by: '{"groups":["mygroup","system:authenticated"],"username":"chip"}'
kyverno.io/roles: '["qa:chip-special-role"]'
name: foo
namespace: qa

View file

@ -0,0 +1,4 @@
apiVersion: kuttl.dev/v1beta1
kind: TestStep
commands:
- command: kubectl delete -f 01-manifests.yaml --force --wait=true --ignore-not-found=true

View file

@ -0,0 +1,3 @@
# Title
This test verifies that Kyverno is able to pick up and write the `request.userInfo` information from the AdmissionReview payload correctly, as well as the pre-defined vars `request.roles` and `request.clusterRoles` by creating and then performing an action as a new user in the system. The expectation is the custom group and username are both being reflected correctly in a mutation. Similar tests exist for validation flows.

View file

@ -0,0 +1,44 @@
#!/bin/bash
set -euo pipefail
export USERNAME=chip
export NAMESPACE=qa
export CA=ca.crt
####
#### Get CA certificate from kubeconfig assuming it's the first in the list.
kubectl config view --raw -o jsonpath='{.clusters[0].cluster.certificate-authority-data}' | base64 --decode > ca.crt
#### Set CLUSTER_SERVER from kubeconfig assuming it's the first in the list.
CLUSTER_SERVER=$(kubectl config view --raw -o jsonpath='{.clusters[0].cluster.server}')
#### Set CLUSTER from kubeconfig assuming it's the first in the list.
CLUSTER=$(kubectl config view --raw -o jsonpath='{.clusters[0].name}')
#### Generate private key
openssl genrsa -out $USERNAME.key 2048
#### Create CSR
openssl req -new -key $USERNAME.key -out $USERNAME.csr -subj "/O=mygroup/CN=$USERNAME"
#### Send CSR to kube-apiserver for approval
cat <<EOF | kubectl apply -f -
apiVersion: certificates.k8s.io/v1
kind: CertificateSigningRequest
metadata:
name: $USERNAME
spec:
request: $(cat $USERNAME.csr | base64 | tr -d '\n')
signerName: kubernetes.io/kube-apiserver-client
usages:
- client auth
EOF
#### Approve CSR
kubectl certificate approve $USERNAME
#### Download certificate
kubectl get csr $USERNAME -o jsonpath='{.status.certificate}' | base64 --decode > $USERNAME.crt
####
#### Create the credential object and output the new kubeconfig file
kubectl --kubeconfig=$USERNAME-kubeconfig config set-credentials $USERNAME --client-certificate=$USERNAME.crt --client-key=$USERNAME.key --embed-certs
#### Set the cluster info
kubectl --kubeconfig=$USERNAME-kubeconfig config set-cluster $CLUSTER --server=$CLUSTER_SERVER --certificate-authority=$CA --embed-certs
#### Set the context
kubectl --kubeconfig=$USERNAME-kubeconfig config set-context $USERNAME-$NAMESPACE-$CLUSTER --user=$USERNAME --cluster=$CLUSTER --namespace=$NAMESPACE
#### Use the context
kubectl --kubeconfig=$USERNAME-kubeconfig config use-context $USERNAME-$NAMESPACE-$CLUSTER
### Clean up the approved CSR
kubectl delete certificatesigningrequest chip

View file

@ -0,0 +1,6 @@
apiVersion: kyverno.io/v1
kind: ClusterPolicy
metadata:
name: require-owner
status:
ready: true

Some files were not shown because too many files have changed in this diff Show more