mirror of
https://github.com/kyverno/kyverno.git
synced 2024-12-14 11:57:48 +00:00
chore: remove e2e tests (#5742)
* remove e2e tests Signed-off-by: ShutingZhao <shuting@nirmata.com> * fix litmuschaos test Signed-off-by: ShutingZhao <shuting@nirmata.com> Signed-off-by: ShutingZhao <shuting@nirmata.com>
This commit is contained in:
parent
b1ea272567
commit
6dfcac53f2
32 changed files with 17 additions and 5420 deletions
91
.github/workflows/e2e.yaml
vendored
91
.github/workflows/e2e.yaml
vendored
|
@ -1,91 +0,0 @@
|
|||
name: e2e-autogen-internals
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release*'
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
- 'docs/**'
|
||||
- '.github/config.yml'
|
||||
pull_request:
|
||||
branches:
|
||||
- 'main'
|
||||
- 'release*'
|
||||
paths-ignore:
|
||||
- 'README.md'
|
||||
- '.github/config.yml'
|
||||
|
||||
permissions: read-all
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
jobs:
|
||||
e2e-test:
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
k8s-version: [v1.24.7, v1.25.3, v1.26.0]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@755da8c3cf115ac066823e79a1e1788f8940201b # v3.2.0
|
||||
- name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@6edd4406fa81c3da01a34fa6f6343087c207a568 # v3.5.0
|
||||
with:
|
||||
go-version: ~1.19.4
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@5119fcb9089d432beecbf79bb2c7915207344b78 # v3.5
|
||||
with:
|
||||
version: v3.5.0
|
||||
- name: Set up chart-testing
|
||||
uses: helm/chart-testing-action@afea100a513515fbd68b0e72a7bb0ae34cb62aec # v2.3.1
|
||||
- name: Cache Go modules
|
||||
uses: actions/cache@9b0c1fce7a93df8e3bb8926b0d6e9d89e92f20a7 # pin@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cache/go-build
|
||||
~/go/pkg/mod
|
||||
key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-go-
|
||||
- name: Create kind cluster
|
||||
run: |
|
||||
export KIND_IMAGE=kindest/node:${{ matrix.k8s-version }}
|
||||
make kind-create-cluster
|
||||
- name: Build and load dev images in kind cluster
|
||||
run: make kind-load-all
|
||||
- name: Run e2e tests
|
||||
run: |
|
||||
echo ">>> Install Kyverno"
|
||||
make codegen-manifest-e2e
|
||||
cat ${GITHUB_WORKSPACE}/.manifest/e2e.yaml \
|
||||
| sed -e 's/imagePullPolicy:.*$/imagePullPolicy: IfNotPresent/g' \
|
||||
| kubectl apply --server-side -f -
|
||||
kubectl apply -f ${GITHUB_WORKSPACE}/config/e2e/rbac.yaml
|
||||
echo ">>> Check kyverno"
|
||||
chmod a+x ${GITHUB_WORKSPACE}/scripts/verify-deployment.sh
|
||||
sleep 50
|
||||
kubectl get pods -n kyverno
|
||||
${GITHUB_WORKSPACE}/scripts/verify-deployment.sh -n kyverno kyverno
|
||||
sleep 20
|
||||
echo ">>> Expose the Kyverno's service's metric server to the host"
|
||||
kubectl port-forward svc/kyverno-svc-metrics -n kyverno 8000:8000 &
|
||||
echo ">>> Run Kyverno e2e test"
|
||||
make test-e2e
|
||||
echo ">>> Cleanup"
|
||||
kubectl delete -f ${GITHUB_WORKSPACE}/.manifest/e2e.yaml
|
||||
- name: Debug failure
|
||||
if: failure()
|
||||
run: |
|
||||
kubectl get mutatingwebhookconfigurations,validatingwebhookconfigurations
|
||||
kubectl -n kyverno get pod
|
||||
kubectl -n kyverno describe pod | grep -i events -A10
|
||||
kubectl -n kyverno logs deploy/kyverno --all-containers -p || true
|
||||
kubectl -n kyverno logs deploy/kyverno --all-containers
|
||||
kubectl -n kyverno logs deploy/kyverno-cleanup-controller --all-containers -p || true
|
||||
kubectl -n kyverno logs deploy/kyverno-cleanup-controller --all-containers
|
37
Makefile
37
Makefile
|
@ -443,24 +443,9 @@ codegen-manifest-release: $(HELM) ## Create release manifest
|
|||
| $(SED) -e '/^#.*/d' \
|
||||
> ./.manifest/release.yaml
|
||||
|
||||
.PHONY: codegen-manifest-e2e
|
||||
codegen-manifest-e2e: $(HELM) ## Create e2e manifest
|
||||
@echo Create e2e manifest... >&2
|
||||
@mkdir -p ./.manifest
|
||||
@$(HELM) template kyverno --namespace kyverno --skip-tests ./charts/kyverno \
|
||||
--set templating.enabled=true \
|
||||
--set templating.version=$(IMAGE_TAG_DEV) \
|
||||
--set cleanupController.image.repository=$(LOCAL_CLEANUP_IMAGE) \
|
||||
--set cleanupController.image.tag=$(IMAGE_TAG_DEV) \
|
||||
--set image.repository=$(LOCAL_KYVERNO_IMAGE) \
|
||||
--set image.tag=$(IMAGE_TAG_DEV) \
|
||||
--set initImage.repository=$(LOCAL_KYVERNOPRE_IMAGE) \
|
||||
--set initImage.tag=$(IMAGE_TAG_DEV) \
|
||||
| $(SED) -e '/^#.*/d' \
|
||||
> ./.manifest/e2e.yaml
|
||||
|
||||
.PHONY: codegen-manifest-all
|
||||
codegen-manifest-all: codegen-manifest-install codegen-manifest-debug codegen-manifest-release codegen-manifest-e2e ## Create all manifests
|
||||
codegen-manifest-all: codegen-manifest-install codegen-manifest-debug codegen-manifest-release ## Create all manifests
|
||||
|
||||
.PHONY: codegen-quick
|
||||
codegen-quick: codegen-deepcopy-all codegen-crds-all codegen-api-docs codegen-helm-all codegen-manifest-all ## Generate all generated code except client
|
||||
|
@ -548,7 +533,7 @@ CODE_COVERAGE_FILE_TXT := $(CODE_COVERAGE_FILE).txt
|
|||
CODE_COVERAGE_FILE_HTML := $(CODE_COVERAGE_FILE).html
|
||||
|
||||
.PHONY: test
|
||||
test: test-clean test-unit test-e2e ## Clean tests cache then run unit and e2e tests
|
||||
test: test-clean test-unit ## Clean tests cache then run unit tests
|
||||
|
||||
.PHONY: test-clean
|
||||
test-clean: ## Clean tests cache
|
||||
|
@ -627,24 +612,6 @@ test-cli-registry: $(CLI_BIN)
|
|||
# Testing & Code-Coverage
|
||||
##################################
|
||||
|
||||
# Test E2E
|
||||
test-e2e:
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/verifyimages -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/metrics -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/mutate -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/generate -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/validate -v
|
||||
|
||||
test-e2e-local:
|
||||
kubectl apply -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/github/rbac.yaml
|
||||
kubectl port-forward -n kyverno service/kyverno-svc-metrics 8000:8000 &
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/verifyimages -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/metrics -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/mutate -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/generate -v
|
||||
E2E=ok K8S_VERSION=$(K8S_VERSION) go test ./test/e2e/validate -v
|
||||
kill $!
|
||||
|
||||
helm-test-values:
|
||||
sed -i -e "s|nameOverride:.*|nameOverride: kyverno|g" charts/kyverno/values.yaml
|
||||
sed -i -e "s|fullnameOverride:.*|fullnameOverride: kyverno|g" charts/kyverno/values.yaml
|
||||
|
|
|
@ -7,7 +7,6 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
|
@ -16,23 +15,23 @@ import (
|
|||
|
||||
var (
|
||||
// Namespace GVR
|
||||
nsGVR = e2e.GetGVR("", "v1", "namespaces")
|
||||
nsGVR = GetGVR("", "v1", "namespaces")
|
||||
// Chaos service account GVR
|
||||
saGVR = e2e.GetGVR("", "v1", "serviceaccounts")
|
||||
saGVR = GetGVR("", "v1", "serviceaccounts")
|
||||
// Role GVR
|
||||
rGVR = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "roles")
|
||||
rGVR = GetGVR("rbac.authorization.k8s.io", "v1", "roles")
|
||||
// RoleBinding GVR
|
||||
rbGVR = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "rolebindings")
|
||||
rbGVR = GetGVR("rbac.authorization.k8s.io", "v1", "rolebindings")
|
||||
// PodCPUHogExperiment GVR
|
||||
cpuGVR = e2e.GetGVR("litmuschaos.io", "v1alpha1", "chaosexperiments")
|
||||
cpuGVR = GetGVR("litmuschaos.io", "v1alpha1", "chaosexperiments")
|
||||
// ChaosEngine GVR
|
||||
ceGVR = e2e.GetGVR("litmuschaos.io", "v1alpha1", "chaosengines")
|
||||
ceGVR = GetGVR("litmuschaos.io", "v1alpha1", "chaosengines")
|
||||
// Chaos Result GVR
|
||||
crGVR = e2e.GetGVR("litmuschaos.io", "v1alpha1", "chaosresults")
|
||||
crGVR = GetGVR("litmuschaos.io", "v1alpha1", "chaosresults")
|
||||
// Cluster Policy GVR
|
||||
clPolGVR = e2e.GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
clPolGVR = GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
// Kyverno disallow_cri_sock_mount Policy GVR
|
||||
dcsmPolGVR = e2e.GetGVR("", "v1", "pods")
|
||||
dcsmPolGVR = GetGVR("", "v1", "pods")
|
||||
|
||||
// ClusterPolicy Namespace
|
||||
clPolNS = ""
|
||||
|
@ -48,7 +47,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
}
|
||||
|
||||
// Generate E2E Client
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
e2eClient, err := NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
for _, resource := range PodCPUHogTest.TestData {
|
||||
|
@ -59,7 +58,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
By(fmt.Sprintf("Deleting Namespace : %s", nspace))
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace) // Clear Namespace
|
||||
e2eClient.DeleteNamespacedResource(dcsmPolGVR, nspace, resource.testResourceName)
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error { // Wait Till Deletion of Namespace
|
||||
GetWithRetry(1*time.Second, 15, func() error { // Wait Till Deletion of Namespace
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
@ -71,7 +70,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
By(fmt.Sprintf("Creating Namespace %s", saGVR))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(nsGVR, LitmusChaosnamespaceYaml)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error { // Wait Till Creation of Namespace
|
||||
GetWithRetry(1*time.Second, 15, func() error { // Wait Till Creation of Namespace
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, resource.namespace)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -101,7 +100,7 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
|
||||
By(fmt.Sprintf("\nMonitoring status from ChaosResult in %s", nspace))
|
||||
|
||||
e2e.GetWithRetry(1*time.Second, 120, func() error { // Wait Till preparing Chaos engine
|
||||
GetWithRetry(1*time.Second, 120, func() error { // Wait Till preparing Chaos engine
|
||||
chaosresult, err := e2eClient.GetNamespacedResource(crGVR, nspace, "kind-chaos-pod-cpu-hog")
|
||||
if err != nil {
|
||||
return fmt.Errorf("Unable to fatch ChaosResult: %v", err)
|
||||
|
@ -132,8 +131,8 @@ func Test_Pod_CPU_Hog(t *testing.T) {
|
|||
//CleanUp Resources
|
||||
e2eClient.CleanClusterPolicies(clPolGVR) //Clean Cluster Policy
|
||||
e2eClient.CleanClusterPolicies(saGVR)
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace) // Clear Namespace
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error { // Wait Till Deletion of Namespace
|
||||
e2eClient.DeleteClusteredResource(nsGVR, nspace) // Clear Namespace
|
||||
GetWithRetry(1*time.Second, 15, func() error { // Wait Till Deletion of Namespace
|
||||
_, err := e2eClient.GetClusteredResource(nsGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
|
|
|
@ -1,101 +0,0 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
)
|
||||
|
||||
const defaultTestK8sVersion = "1.21.0"
|
||||
|
||||
func CallMetrics() (string, error) {
|
||||
requestObj := e2e.APIRequest{
|
||||
URL: "http://localhost:8000/metrics",
|
||||
Type: "GET",
|
||||
}
|
||||
|
||||
response, err := e2e.CallAPI(requestObj)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
buf := new(bytes.Buffer)
|
||||
_, err = buf.ReadFrom(response.Body)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
newStr := buf.String()
|
||||
return newStr, nil
|
||||
}
|
||||
|
||||
func GetKubernetesVersion() semver.Version {
|
||||
ver, err := semver.Parse(os.Getenv("K8S_VERSION"))
|
||||
if err != nil {
|
||||
return semver.MustParse(defaultTestK8sVersion)
|
||||
}
|
||||
return ver
|
||||
}
|
||||
|
||||
// ProcessMetrics checks the metrics log and identify if the policy is added in cache or not
|
||||
func ProcessMetrics(newStr, e2ePolicyName string) error {
|
||||
splitByNewLine := strings.Split(newStr, "\n")
|
||||
for _, lineSplitByNewLine := range splitByNewLine {
|
||||
// kyverno_policy_rule_info_total{policy_background_mode=\"false\",policy_name=\"gen-cluster-policy\",policy_namespace=\"-\",policy_type=\"cluster\",policy_validation_mode=\"audit\",rule_name=\"gen-cluster-role\",rule_type=\"generate\",status_ready="false"} 1
|
||||
if !strings.HasPrefix(lineSplitByNewLine, "kyverno_policy_rule_info_total{") {
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.HasSuffix(lineSplitByNewLine, "} 1") {
|
||||
continue
|
||||
}
|
||||
|
||||
splitByComma := strings.Split(lineSplitByNewLine, ",")
|
||||
for _, lineSplitByComma := range splitByComma {
|
||||
if strings.HasPrefix(lineSplitByComma, "policy_name=") {
|
||||
splitByQuote := strings.Split(lineSplitByComma, "\"")
|
||||
policyName := splitByQuote[1]
|
||||
if policyName != e2ePolicyName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(lineSplitByComma, "status_ready=") {
|
||||
splitByQuote := strings.Split(lineSplitByComma, "\"")
|
||||
status := splitByQuote[1]
|
||||
if status == "true" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("policy %s not found in metrics %s", e2ePolicyName, newStr)
|
||||
}
|
||||
|
||||
func PolicyCreated(policyName string) error {
|
||||
return e2e.GetWithRetry(1*time.Second, 60, checkPolicyCreated(policyName))
|
||||
}
|
||||
|
||||
func checkPolicyCreated(policyName string) func() error {
|
||||
return func() error {
|
||||
var metricsString string
|
||||
metricsString, err := CallMetrics()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get metrics: %v", err)
|
||||
}
|
||||
|
||||
err = ProcessMetrics(metricsString, policyName)
|
||||
if err != nil {
|
||||
return fmt.Errorf("policy not created: %v", err)
|
||||
}
|
||||
|
||||
// Wait to make sure that the Policy is ready.
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
package client
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/id"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/resource"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/utils"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
type Client interface {
|
||||
CreateResource(resource.Resource) *unstructured.Unstructured
|
||||
GetResource(id.Id) *unstructured.Unstructured
|
||||
DeleteResource(id.Id)
|
||||
}
|
||||
|
||||
type client struct {
|
||||
t *testing.T
|
||||
client *e2e.E2EClient
|
||||
}
|
||||
|
||||
func New(t *testing.T) Client {
|
||||
t.Helper()
|
||||
c, err := e2e.NewE2EClient()
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
return &client{t, c}
|
||||
}
|
||||
|
||||
func (c *client) CreateResource(resource resource.Resource) *unstructured.Unstructured {
|
||||
u := resource.Unstructured()
|
||||
ginkgo.By(fmt.Sprintf("Creating %s : %s", resource.Gvr(), utils.Key(u)))
|
||||
var err error
|
||||
if u.GetNamespace() != "" {
|
||||
u, err = c.client.CreateNamespacedResource(resource.Gvr(), u.GetNamespace(), u)
|
||||
} else {
|
||||
u, err = c.client.CreateClusteredResource(resource.Gvr(), u)
|
||||
}
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
c.t.Cleanup(func() {
|
||||
c.DeleteResource(id.New(resource.Gvr(), u.GetNamespace(), u.GetName()))
|
||||
})
|
||||
return u
|
||||
}
|
||||
|
||||
func (c *client) DeleteResource(id id.Id) {
|
||||
ginkgo.By(fmt.Sprintf("Deleting %s : %s", id.GetGvr(), utils.Key(id)))
|
||||
var err error
|
||||
if id.IsClustered() {
|
||||
err = c.client.DeleteClusteredResource(id.GetGvr(), id.GetName())
|
||||
} else {
|
||||
err = c.client.DeleteNamespacedResource(id.GetGvr(), id.GetNamespace(), id.GetName())
|
||||
}
|
||||
if !apierrors.IsNotFound(err) {
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
if id.IsClustered() {
|
||||
_, err = c.client.GetClusteredResource(id.GetGvr(), id.GetName())
|
||||
} else {
|
||||
_, err = c.client.GetNamespacedResource(id.GetGvr(), id.GetNamespace(), id.GetName())
|
||||
}
|
||||
if err == nil {
|
||||
return fmt.Errorf("resource still exists: %s, %s", id.GetGvr(), utils.Key(id))
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
}
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
}
|
||||
|
||||
func (c *client) GetResource(id id.Id) *unstructured.Unstructured {
|
||||
ginkgo.By(fmt.Sprintf("Getting %s : %s", id.GetGvr(), utils.Key(id)))
|
||||
var u *unstructured.Unstructured
|
||||
err := e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
var err error
|
||||
if id.IsClustered() {
|
||||
u, err = c.client.GetClusteredResource(id.GetGvr(), id.GetName())
|
||||
} else {
|
||||
u, err = c.client.GetNamespacedResource(id.GetGvr(), id.GetNamespace(), id.GetName())
|
||||
}
|
||||
return err
|
||||
})
|
||||
gomega.Expect(err).NotTo(gomega.HaveOccurred())
|
||||
return u
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/client"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/step"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func Setup(t *testing.T) {
|
||||
t.Helper()
|
||||
gomega.RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
}
|
||||
|
||||
func RunTest(t *testing.T, steps ...step.Step) {
|
||||
t.Helper()
|
||||
ginkgo.By("Creating client ...")
|
||||
client := client.New(t)
|
||||
for _, step := range steps {
|
||||
step(client)
|
||||
}
|
||||
ginkgo.By("Cleaning up ...")
|
||||
}
|
||||
|
||||
func RunSubTest(t *testing.T, name string, steps ...step.Step) {
|
||||
t.Helper()
|
||||
t.Run(name, func(t *testing.T) {
|
||||
RunTest(t, steps...)
|
||||
})
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
package gvr
|
||||
|
||||
import "github.com/kyverno/kyverno/test/e2e"
|
||||
|
||||
var (
|
||||
ClusterPolicy = e2e.GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
Namespace = e2e.GetGVR("", "v1", "namespaces")
|
||||
ClusterRole = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "clusterroles")
|
||||
ClusterRoleBinding = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "clusterrolebindings")
|
||||
Role = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "roles")
|
||||
RoleBinding = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "rolebindings")
|
||||
ConfigMap = e2e.GetGVR("", "v1", "configmaps")
|
||||
NetworkPolicy = e2e.GetGVR("networking.k8s.io", "v1", "networkpolicies")
|
||||
)
|
|
@ -1,26 +0,0 @@
|
|||
package id
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/gvr"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func Clustered(gvr schema.GroupVersionResource, name string) Id {
|
||||
return New(gvr, "", name)
|
||||
}
|
||||
|
||||
func ClusterPolicy(name string) Id {
|
||||
return Clustered(gvr.ClusterPolicy, name)
|
||||
}
|
||||
|
||||
func ClusterRole(name string) Id {
|
||||
return Clustered(gvr.ClusterRole, name)
|
||||
}
|
||||
|
||||
func ClusterRoleBinding(name string) Id {
|
||||
return Clustered(gvr.ClusterRoleBinding, name)
|
||||
}
|
||||
|
||||
func Namespace(name string) Id {
|
||||
return Clustered(gvr.Namespace, name)
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package id
|
||||
|
||||
import (
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
type Id struct {
|
||||
gvr schema.GroupVersionResource
|
||||
ns string
|
||||
name string
|
||||
}
|
||||
|
||||
func New(gvr schema.GroupVersionResource, ns, name string) Id { return Id{gvr, ns, name} }
|
||||
func (r Id) GetGvr() schema.GroupVersionResource { return r.gvr }
|
||||
func (r Id) GetNamespace() string { return r.ns }
|
||||
func (r Id) GetName() string { return r.name }
|
||||
func (r Id) IsClustered() bool { return r.ns == "" }
|
||||
func (r Id) IsNamespaced() bool { return !r.IsClustered() }
|
|
@ -1,26 +0,0 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/gvr"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func Clustered(gvr schema.GroupVersionResource, raw []byte) Resource {
|
||||
return Resource{gvr, "", raw}
|
||||
}
|
||||
|
||||
func ClusterPolicy(raw []byte) Resource {
|
||||
return Clustered(gvr.ClusterPolicy, raw)
|
||||
}
|
||||
|
||||
func ClusterRole(raw []byte) Resource {
|
||||
return Clustered(gvr.ClusterRole, raw)
|
||||
}
|
||||
|
||||
func ClusterRoleBinding(raw []byte) Resource {
|
||||
return Clustered(gvr.ClusterRoleBinding, raw)
|
||||
}
|
||||
|
||||
func Namespace(raw []byte) Resource {
|
||||
return Clustered(gvr.Namespace, raw)
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/gvr"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
func Namespaced(gvr schema.GroupVersionResource, ns string, raw []byte) Resource {
|
||||
return Resource{gvr, ns, raw}
|
||||
}
|
||||
|
||||
func Role(ns string, raw []byte) Resource {
|
||||
return Namespaced(gvr.Role, ns, raw)
|
||||
}
|
||||
|
||||
func RoleBinding(ns string, raw []byte) Resource {
|
||||
return Namespaced(gvr.RoleBinding, ns, raw)
|
||||
}
|
||||
|
||||
func ConfigMap(ns string, raw []byte) Resource {
|
||||
return Namespaced(gvr.ConfigMap, ns, raw)
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
package resource
|
||||
|
||||
import (
|
||||
"github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type Resource struct {
|
||||
gvr schema.GroupVersionResource
|
||||
ns string
|
||||
data []byte
|
||||
}
|
||||
|
||||
func (r *Resource) Gvr() schema.GroupVersionResource { return r.gvr }
|
||||
func (r *Resource) Namespace() string { return r.ns }
|
||||
func (r *Resource) Data() []byte { return r.data }
|
||||
func (r *Resource) IsClustered() bool { return r.ns == "" }
|
||||
func (r *Resource) IsNamespaced() bool { return !r.IsClustered() }
|
||||
|
||||
func (r *Resource) Unstructured() *unstructured.Unstructured {
|
||||
var u unstructured.Unstructured
|
||||
gomega.Expect(yaml.Unmarshal(r.data, &u)).To(gomega.Succeed())
|
||||
// TODO: set namespace ?
|
||||
// TODO: ensure GV(R/K) ?
|
||||
return &u
|
||||
}
|
||||
|
||||
func Resources(resources ...Resource) []Resource {
|
||||
return resources
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package step
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/client"
|
||||
"github.com/onsi/ginkgo"
|
||||
)
|
||||
|
||||
func By(message string) Step {
|
||||
return func(client.Client) {
|
||||
ginkgo.By(message)
|
||||
}
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
package step
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/client"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/id"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/utils"
|
||||
"github.com/onsi/ginkgo"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
type ResourceExpectation func(*unstructured.Unstructured)
|
||||
|
||||
func ExpectResource(id id.Id, expectations ...ResourceExpectation) Step {
|
||||
return func(client client.Client) {
|
||||
ginkgo.By(fmt.Sprintf("Checking resource expectations (%s : %s) ...", id.GetGvr(), utils.Key(id)))
|
||||
resource := client.GetResource(id)
|
||||
for _, expectation := range expectations {
|
||||
expectation(resource)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
package step
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e/common"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/client"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/resource"
|
||||
"github.com/onsi/ginkgo"
|
||||
"github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func CreateClusterPolicy(data []byte) Step {
|
||||
return func(client client.Client) {
|
||||
ginkgo.By("Creating cluster policy ...")
|
||||
policy := client.CreateResource(resource.ClusterPolicy(data))
|
||||
gomega.Expect(common.PolicyCreated(policy.GetName())).To(gomega.Succeed())
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
package step
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/client"
|
||||
)
|
||||
|
||||
type Step func(client.Client)
|
|
@ -1,14 +0,0 @@
|
|||
package utils
|
||||
|
||||
type Named interface {
|
||||
GetNamespace() string
|
||||
GetName() string
|
||||
}
|
||||
|
||||
func Key(obj Named) string {
|
||||
n, ns := obj.GetName(), obj.GetNamespace()
|
||||
if ns == "" {
|
||||
return n
|
||||
}
|
||||
return ns + "/" + n
|
||||
}
|
|
@ -1,317 +0,0 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
var (
|
||||
// Cluster Policy GVR
|
||||
clPolGVR = e2e.GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
|
||||
// Namespace GVR
|
||||
nsGVR = e2e.GetGVR("", "v1", "namespaces")
|
||||
|
||||
// ClusterRole GVR
|
||||
crGVR = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "clusterroles")
|
||||
|
||||
// ClusterRoleBinding GVR
|
||||
crbGVR = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "clusterrolebindings")
|
||||
|
||||
// Role GVR
|
||||
rGVR = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "roles")
|
||||
|
||||
// RoleBinding GVR
|
||||
rbGVR = e2e.GetGVR("rbac.authorization.k8s.io", "v1", "rolebindings")
|
||||
|
||||
// ConfigMap GVR
|
||||
cmGVR = e2e.GetGVR("", "v1", "configmaps")
|
||||
|
||||
// Secret GVR
|
||||
secretGVR = e2e.GetGVR("", "v1", "secrets")
|
||||
|
||||
// NetworkPolicy GVR
|
||||
npGVR = e2e.GetGVR("networking.k8s.io", "v1", "networkpolicies")
|
||||
)
|
||||
|
||||
type testCase struct {
|
||||
// TestName - Name of the Test
|
||||
TestName string
|
||||
// ClusterPolicy - ClusterPolicy yaml file
|
||||
ClusterPolicy resource
|
||||
// SourceResources - Source resources yaml files
|
||||
SourceResources []resource
|
||||
// TriggerResource - Trigger resource yaml files
|
||||
TriggerResource resource
|
||||
// ExpectedResources - Expected resources to pass the test
|
||||
ExpectedResources []expectedResource
|
||||
// Steps - Test case steps
|
||||
Steps []testCaseStep
|
||||
}
|
||||
|
||||
// roleTests is E2E Test Config for Role and RoleBinding
|
||||
// TODO:- Clone for Role and RoleBinding
|
||||
var roleTests = []testCase{
|
||||
{
|
||||
TestName: "test-role-rolebinding-without-clone",
|
||||
ClusterPolicy: clusterPolicy(roleRoleBindingYamlWithSync),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idRole("test", "ns-role")),
|
||||
expectation(idRoleBinding("test", "ns-role-binding")),
|
||||
),
|
||||
},
|
||||
{
|
||||
TestName: "test-role-rolebinding-withsync-without-clone",
|
||||
ClusterPolicy: clusterPolicy(roleRoleBindingYamlWithSync),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idRole("test", "ns-role")),
|
||||
expectation(idRoleBinding("test", "ns-role-binding")),
|
||||
),
|
||||
},
|
||||
{
|
||||
TestName: "test-role-rolebinding-with-clone",
|
||||
ClusterPolicy: clusterPolicy(roleRoleBindingYamlWithClone),
|
||||
SourceResources: resources(
|
||||
role("default", sourceRoleYaml),
|
||||
roleBinding("default", sourceRoleBindingYaml),
|
||||
),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idRole("test", "ns-role")),
|
||||
expectation(idRoleBinding("test", "ns-role-binding")),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
// clusterRoleTests - E2E Test Config for ClusterRole and ClusterRoleBinding
|
||||
var clusterRoleTests = []testCase{
|
||||
{
|
||||
TestName: "test-clusterrole-clusterrolebinding-without-clone",
|
||||
ClusterPolicy: clusterPolicy(genClusterRoleYamlWithSync),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idClusterRole("ns-cluster-role")),
|
||||
expectation(idClusterRoleBinding("ns-cluster-role-binding")),
|
||||
),
|
||||
},
|
||||
{
|
||||
TestName: "test-clusterrole-clusterrolebinding-with-sync-without-clone",
|
||||
ClusterPolicy: clusterPolicy(genClusterRoleYamlWithSync),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idClusterRole("ns-cluster-role")),
|
||||
expectation(idClusterRoleBinding("ns-cluster-role-binding")),
|
||||
),
|
||||
},
|
||||
{
|
||||
TestName: "test-clusterrole-clusterrolebinding-with-sync-with-clone",
|
||||
ClusterPolicy: clusterPolicy(clusterRoleRoleBindingYamlWithClone),
|
||||
SourceResources: resources(
|
||||
clusterRole(baseClusterRoleData),
|
||||
clusterRoleBinding(baseClusterRoleBindingData),
|
||||
),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idClusterRole("cloned-cluster-role")),
|
||||
expectation(idClusterRoleBinding("cloned-cluster-role-binding")),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
// networkPolicyGenerateTests - E2E Test Config for networkPolicyGenerateTests
|
||||
var networkPolicyGenerateTests = []testCase{
|
||||
{
|
||||
TestName: "test-generate-policy-for-namespace-with-label",
|
||||
ClusterPolicy: clusterPolicy(genNetworkPolicyYaml),
|
||||
TriggerResource: namespace(namespaceWithLabelYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idNetworkPolicy("test", "allow-dns")),
|
||||
),
|
||||
},
|
||||
}
|
||||
|
||||
var generateNetworkPolicyOnNamespaceWithoutLabelTests = []testCase{
|
||||
{
|
||||
TestName: "test-generate-policy-for-namespace-label-actions",
|
||||
ClusterPolicy: clusterPolicy(genNetworkPolicyYaml),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idNetworkPolicy("test", "allow-dns")),
|
||||
),
|
||||
Steps: []testCaseStep{
|
||||
stepResourceNotFound(npGVR, "test", "allow-dns"),
|
||||
stepUpateResource(nsGVR, "", "test", func(resource *unstructured.Unstructured) error {
|
||||
element, _, err := unstructured.NestedMap(resource.UnstructuredContent(), "metadata", "labels")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
element["security"] = "standard"
|
||||
return unstructured.SetNestedMap(resource.UnstructuredContent(), element, "metadata", "labels")
|
||||
}),
|
||||
stepExpectResource(npGVR, "test", "allow-dns"),
|
||||
stepUpateResource(clPolGVR, "", "add-networkpolicy", func(resource *unstructured.Unstructured) error {
|
||||
return yaml.Unmarshal(updatGenNetworkPolicyYaml, resource)
|
||||
}),
|
||||
stepWaitResource(npGVR, "test", "allow-dns", time.Second, 30, func(resource *unstructured.Unstructured) bool {
|
||||
element, found, err := unstructured.NestedMap(resource.UnstructuredContent(), "spec")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
return loopElement(false, element)
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// NetworkPolicyGenerateTests - E2E Test Config for NetworkPolicyGenerateTests
|
||||
var generateSynchronizeFlagTests = []testCase{
|
||||
{
|
||||
TestName: "test-generate-policy-for-namespace-with-label",
|
||||
ClusterPolicy: clusterPolicy(genNetworkPolicyYaml),
|
||||
TriggerResource: namespace(namespaceWithLabelYaml),
|
||||
// expectation is resource should no longer exists once deleted
|
||||
// if sync is set to false
|
||||
|
||||
Steps: []testCaseStep{
|
||||
stepBy("When synchronize flag is set to true in the policy and someone deletes the generated resource, kyverno generates back the resource"),
|
||||
stepDeleteResource(npGVR, "test", "allow-dns"),
|
||||
stepExpectResource(npGVR, "test", "allow-dns"),
|
||||
stepBy("Change synchronize to false in the policy, the label in generated resource should be updated to policy.kyverno.io/synchronize: disable"),
|
||||
stepUpateResource(clPolGVR, "", "add-networkpolicy", func(resource *unstructured.Unstructured) error {
|
||||
return yaml.Unmarshal(updateSynchronizeInGeneratePolicyYaml, resource)
|
||||
}),
|
||||
stepWaitResource(npGVR, "test", "allow-dns", time.Second, 30, func(resource *unstructured.Unstructured) bool {
|
||||
labels := resource.GetLabels()
|
||||
return labels["policy.kyverno.io/synchronize"] == "disable"
|
||||
}),
|
||||
stepBy("With synchronize is false, one should be able to delete the generated resource"),
|
||||
stepDeleteResource(npGVR, "test", "allow-dns"),
|
||||
stepResourceNotFound(npGVR, "test", "allow-dns"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// ClusterRoleTests - E2E Test Config for ClusterRole and ClusterRoleBinding
|
||||
var sourceResourceUpdateReplicationTests = []testCase{
|
||||
{
|
||||
TestName: "test-clone-source-resource-update-replication",
|
||||
ClusterPolicy: clusterPolicy(genCloneConfigMapPolicyYaml),
|
||||
SourceResources: resources(
|
||||
configMap("default", cloneSourceResource),
|
||||
),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idConfigMap("test", "game-demo")),
|
||||
),
|
||||
Steps: []testCaseStep{
|
||||
stepBy("When a source clone resource is updated, the same changes should be replicated in the generated resource"),
|
||||
stepUpateResource(cmGVR, "default", "game-demo", func(resource *unstructured.Unstructured) error {
|
||||
element, _, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
element["initial_lives"] = "5"
|
||||
return unstructured.SetNestedMap(resource.UnstructuredContent(), element, "data")
|
||||
}),
|
||||
stepWaitResource(cmGVR, "test", "game-demo", time.Second, 15, func(resource *unstructured.Unstructured) bool {
|
||||
element, found, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
return element["initial_lives"] == "5"
|
||||
}),
|
||||
stepBy("When a generated resource is edited with some conflicting changes (with respect to the clone source resource or generate data), kyverno will regenerate the resource"),
|
||||
stepUpateResource(cmGVR, "test", "game-demo", func(resource *unstructured.Unstructured) error {
|
||||
element, _, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
element["initial_lives"] = "15"
|
||||
return unstructured.SetNestedMap(resource.UnstructuredContent(), element, "data")
|
||||
}),
|
||||
stepWaitResource(cmGVR, "test", "game-demo", time.Second, 30, func(resource *unstructured.Unstructured) bool {
|
||||
element, found, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
if err != nil || !found {
|
||||
return false
|
||||
}
|
||||
return element["initial_lives"] == "5"
|
||||
}),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var generatePolicyDeletionforCloneTests = []testCase{
|
||||
{
|
||||
TestName: "test-clone-source-resource-update-replication",
|
||||
ClusterPolicy: clusterPolicy(genCloneConfigMapPolicyYaml),
|
||||
SourceResources: resources(
|
||||
configMap("default", cloneSourceResource),
|
||||
),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idConfigMap("test", "game-demo")),
|
||||
),
|
||||
Steps: []testCaseStep{
|
||||
stepBy("delete policy -> generated resource still exists"),
|
||||
stepDeleteResource(clPolGVR, "", "generate-policy"),
|
||||
stepExpectResource(cmGVR, "test", "game-demo"),
|
||||
stepBy("update source -> generated resource not updated"),
|
||||
stepUpateResource(cmGVR, "default", "game-demo", func(resource *unstructured.Unstructured) error {
|
||||
element, _, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
element["initial_lives"] = "5"
|
||||
return unstructured.SetNestedMap(resource.UnstructuredContent(), element, "data")
|
||||
}),
|
||||
stepExpectResource(cmGVR, "test", "game-demo", func(resource *unstructured.Unstructured) {
|
||||
element, _, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(element["initial_lives"]).To(Equal("2"))
|
||||
}),
|
||||
stepBy("deleted source -> generated resource not deleted"),
|
||||
stepDeleteResource(cmGVR, "default", "game-demo"),
|
||||
stepExpectResource(cmGVR, "test", "game-demo"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
var generatePolicyMultipleCloneTests = []testCase{
|
||||
{
|
||||
TestName: "test-multiple-clone-resources",
|
||||
ClusterPolicy: clusterPolicy(genMultipleClonePolicyYaml),
|
||||
SourceResources: resources(
|
||||
configMap("default", cloneSourceResource),
|
||||
secret("default", cloneSecretSourceResource),
|
||||
),
|
||||
TriggerResource: namespace(namespaceYaml),
|
||||
ExpectedResources: expectations(
|
||||
expectation(idConfigMap("test", "game-demo")),
|
||||
expectation(idSecret("test", "secret-basic-auth")),
|
||||
),
|
||||
Steps: []testCaseStep{
|
||||
stepExpectResource(cmGVR, "test", "game-demo"),
|
||||
stepBy("verify generated resource data in configMap"),
|
||||
stepExpectResource(cmGVR, "test", "game-demo", func(resource *unstructured.Unstructured) {
|
||||
element, _, err := unstructured.NestedMap(resource.UnstructuredContent(), "data")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(element["initial_lives"]).To(Equal("2"))
|
||||
}),
|
||||
|
||||
stepBy("verify generated resource data in secret"),
|
||||
stepExpectResource(secretGVR, "test", "secret-basic-auth"),
|
||||
|
||||
stepBy("deleted source -> generated resource not deleted"),
|
||||
stepDeleteResource(cmGVR, "default", "game-demo"),
|
||||
stepDeleteResource(secretGVR, "default", "secret-basic-auth"),
|
||||
stepExpectResource(cmGVR, "test", "game-demo"),
|
||||
stepExpectResource(secretGVR, "test", "secret-basic-auth"),
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,116 +0,0 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
commonE2E "github.com/kyverno/kyverno/test/e2e/common"
|
||||
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func runTestCases(t *testing.T, testCases ...testCase) {
|
||||
setup(t)
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.TestName, func(t *testing.T) {
|
||||
e2eClient := createClient()
|
||||
|
||||
t.Cleanup(func() {
|
||||
deleteResources(e2eClient, test.ExpectedResources...)
|
||||
})
|
||||
|
||||
// sanity check
|
||||
By("Verifying expected resources do not exist yet in the cluster ...")
|
||||
expectResourcesNotFound(e2eClient, test.ExpectedResources...)
|
||||
|
||||
// create source resources
|
||||
if len(test.SourceResources) > 0 {
|
||||
By("Creating source resources ...")
|
||||
createResources(t, e2eClient, test.SourceResources...)
|
||||
}
|
||||
|
||||
// create policy
|
||||
By("Creating cluster policy ...")
|
||||
policy := createResource(t, e2eClient, test.ClusterPolicy)
|
||||
Expect(commonE2E.PolicyCreated(policy.GetName())).To(Succeed())
|
||||
|
||||
// create trigger
|
||||
By("Creating trigger resource ...")
|
||||
createResource(t, e2eClient, test.TriggerResource)
|
||||
|
||||
time.Sleep(time.Second * 5)
|
||||
|
||||
for _, step := range test.Steps {
|
||||
Expect(step(e2eClient)).To(Succeed())
|
||||
}
|
||||
|
||||
// verify expected resources
|
||||
By("Verifying resource expectations ...")
|
||||
expectResources(e2eClient, test.ExpectedResources...)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ClusterRole_ClusterRoleBinding_Sets(t *testing.T) {
|
||||
runTestCases(t, clusterRoleTests...)
|
||||
}
|
||||
|
||||
func Test_Role_RoleBinding_Sets(t *testing.T) {
|
||||
runTestCases(t, roleTests...)
|
||||
}
|
||||
|
||||
func Test_Generate_NetworkPolicy(t *testing.T) {
|
||||
runTestCases(t, networkPolicyGenerateTests...)
|
||||
}
|
||||
|
||||
func Test_Generate_Namespace_Label_Actions(t *testing.T) {
|
||||
runTestCases(t, generateNetworkPolicyOnNamespaceWithoutLabelTests...)
|
||||
}
|
||||
|
||||
func loopElement(found bool, elementObj interface{}) bool {
|
||||
if found == true {
|
||||
return found
|
||||
}
|
||||
switch typedelementObj := elementObj.(type) {
|
||||
case map[string]interface{}:
|
||||
for k, v := range typedelementObj {
|
||||
if k == "protocol" {
|
||||
if v == "TCP" {
|
||||
found = true
|
||||
return found
|
||||
}
|
||||
} else {
|
||||
found = loopElement(found, v)
|
||||
}
|
||||
}
|
||||
case []interface{}:
|
||||
found = loopElement(found, typedelementObj[0])
|
||||
case string:
|
||||
return found
|
||||
case int64:
|
||||
return found
|
||||
default:
|
||||
fmt.Println("unexpected type :", fmt.Sprintf("%T", elementObj))
|
||||
return found
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func Test_Generate_Synchronize_Flag(t *testing.T) {
|
||||
runTestCases(t, generateSynchronizeFlagTests...)
|
||||
}
|
||||
|
||||
func Test_Source_Resource_Update_Replication(t *testing.T) {
|
||||
runTestCases(t, sourceResourceUpdateReplicationTests...)
|
||||
}
|
||||
|
||||
func Test_Generate_Policy_Deletion_for_Clone(t *testing.T) {
|
||||
runTestCases(t, generatePolicyDeletionforCloneTests...)
|
||||
}
|
||||
|
||||
func Test_Generate_Multiple_Clone(t *testing.T) {
|
||||
runTestCases(t, generatePolicyMultipleCloneTests...)
|
||||
}
|
|
@ -1,391 +0,0 @@
|
|||
package generate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"sigs.k8s.io/yaml"
|
||||
)
|
||||
|
||||
type resource struct {
|
||||
gvr schema.GroupVersionResource
|
||||
ns string
|
||||
raw []byte
|
||||
}
|
||||
|
||||
func clustered(gvr schema.GroupVersionResource, raw []byte) resource { return resource{gvr, "", raw} }
|
||||
func namespaced(gvr schema.GroupVersionResource, ns string, raw []byte) resource {
|
||||
return resource{gvr, ns, raw}
|
||||
}
|
||||
func resources(resources ...resource) []resource { return resources }
|
||||
func role(ns string, raw []byte) resource { return namespaced(rGVR, ns, raw) }
|
||||
func roleBinding(ns string, raw []byte) resource { return namespaced(rbGVR, ns, raw) }
|
||||
func configMap(ns string, raw []byte) resource { return namespaced(cmGVR, ns, raw) }
|
||||
func secret(ns string, raw []byte) resource { return namespaced(secretGVR, ns, raw) }
|
||||
func clusterPolicy(raw []byte) resource { return clustered(clPolGVR, raw) }
|
||||
func clusterRole(raw []byte) resource { return clustered(crGVR, raw) }
|
||||
func clusterRoleBinding(raw []byte) resource { return clustered(crbGVR, raw) }
|
||||
func namespace(raw []byte) resource { return clustered(nsGVR, raw) }
|
||||
|
||||
type _id struct {
|
||||
gvr schema.GroupVersionResource
|
||||
ns string
|
||||
name string
|
||||
}
|
||||
|
||||
func id(gvr schema.GroupVersionResource, ns string, name string) _id {
|
||||
return _id{gvr, ns, name}
|
||||
}
|
||||
|
||||
func idRole(ns, name string) _id { return id(rGVR, ns, name) }
|
||||
func idRoleBinding(ns, name string) _id { return id(rbGVR, ns, name) }
|
||||
func idConfigMap(ns, name string) _id { return id(cmGVR, ns, name) }
|
||||
func idSecret(ns, name string) _id { return id(secretGVR, ns, name) }
|
||||
func idNetworkPolicy(ns, name string) _id { return id(npGVR, ns, name) }
|
||||
func idClusterRole(name string) _id { return id(crGVR, "", name) }
|
||||
func idClusterRoleBinding(name string) _id { return id(crbGVR, "", name) }
|
||||
|
||||
type resourceExpectation func(resource *unstructured.Unstructured)
|
||||
|
||||
type expectedResource struct {
|
||||
_id
|
||||
validate []resourceExpectation
|
||||
}
|
||||
|
||||
func expected(gvr schema.GroupVersionResource, ns string, name string, validate ...resourceExpectation) expectedResource {
|
||||
return expectedResource{id(gvr, ns, name), validate}
|
||||
}
|
||||
|
||||
func expectations(expectations ...expectedResource) []expectedResource {
|
||||
return expectations
|
||||
}
|
||||
|
||||
func expectation(id _id, expectations ...resourceExpectation) expectedResource {
|
||||
return expectedResource{id, expectations}
|
||||
}
|
||||
|
||||
func setup(t *testing.T) {
|
||||
t.Helper()
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
}
|
||||
|
||||
func createClient() *e2e.E2EClient {
|
||||
client, err := e2e.NewE2EClient()
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return client
|
||||
}
|
||||
|
||||
func deleteClusteredResource(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Deleting %s : %s", resource.gvr.String(), resource.name))
|
||||
_ = client.DeleteClusteredResource(resource.gvr, resource.name)
|
||||
err := e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := client.GetClusteredResource(resource.gvr, resource.name)
|
||||
if err == nil {
|
||||
return fmt.Errorf("resource still exists: %s, %s", resource.gvr.String(), resource.name)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func deleteNamespacedResource(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Deleting %s : %s/%s", resource.gvr.String(), resource.ns, resource.name))
|
||||
_ = client.DeleteNamespacedResource(resource.gvr, resource.ns, resource.name)
|
||||
err := e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := client.GetNamespacedResource(resource.gvr, resource.ns, resource.name)
|
||||
if err == nil {
|
||||
return fmt.Errorf("resource still exists: %s, %s/%s", resource.gvr.String(), resource.ns, resource.name)
|
||||
}
|
||||
if apierrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func deleteResource(client *e2e.E2EClient, resource expectedResource) {
|
||||
if resource.ns != "" {
|
||||
deleteNamespacedResource(client, resource)
|
||||
} else {
|
||||
deleteClusteredResource(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteResources(client *e2e.E2EClient, resources ...expectedResource) {
|
||||
for _, resource := range resources {
|
||||
deleteResource(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func createClusteredResource(t *testing.T, client *e2e.E2EClient, resource resource) *unstructured.Unstructured {
|
||||
t.Helper()
|
||||
var u unstructured.Unstructured
|
||||
Expect(yaml.Unmarshal(resource.raw, &u)).To(Succeed())
|
||||
By(fmt.Sprintf("Creating %s : %s", resource.gvr.String(), u.GetName()))
|
||||
result, err := client.CreateClusteredResource(resource.gvr, &u)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
t.Cleanup(func() {
|
||||
deleteResources(client, expected(resource.gvr, result.GetNamespace(), result.GetName()))
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func createNamespacedResource(t *testing.T, client *e2e.E2EClient, resource resource) *unstructured.Unstructured {
|
||||
t.Helper()
|
||||
var u unstructured.Unstructured
|
||||
Expect(yaml.Unmarshal(resource.raw, &u)).To(Succeed())
|
||||
By(fmt.Sprintf("Creating %s : %s/%s", resource.gvr.String(), resource.ns, u.GetName()))
|
||||
result, err := client.CreateNamespacedResource(resource.gvr, resource.ns, &u)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
t.Cleanup(func() {
|
||||
deleteResources(client, expected(resource.gvr, result.GetNamespace(), result.GetName()))
|
||||
})
|
||||
return result
|
||||
}
|
||||
|
||||
func createResource(t *testing.T, client *e2e.E2EClient, resource resource) *unstructured.Unstructured {
|
||||
t.Helper()
|
||||
if resource.ns != "" {
|
||||
return createNamespacedResource(t, client, resource)
|
||||
} else {
|
||||
return createClusteredResource(t, client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func createResources(t *testing.T, client *e2e.E2EClient, resources ...resource) {
|
||||
t.Helper()
|
||||
for _, resource := range resources {
|
||||
createResource(t, client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func getClusteredResource(client *e2e.E2EClient, gvr schema.GroupVersionResource, name string) *unstructured.Unstructured {
|
||||
By(fmt.Sprintf("Getting %s : %s", gvr.String(), name))
|
||||
r, err := client.GetClusteredResource(gvr, name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return r
|
||||
}
|
||||
|
||||
func getNamespacedResource(client *e2e.E2EClient, gvr schema.GroupVersionResource, ns, name string) *unstructured.Unstructured {
|
||||
By(fmt.Sprintf("Getting %s : %s/%s", gvr.String(), ns, name))
|
||||
r, err := client.GetNamespacedResource(gvr, ns, name)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return r
|
||||
}
|
||||
|
||||
// func getResource(client *e2e.E2EClient, gvr schema.GroupVersionResource, ns, name string) *unstructured.Unstructured {
|
||||
// if ns != "" {
|
||||
// return getNamespacedResource(client, gvr, ns, name)
|
||||
// } else {
|
||||
// return getClusteredResource(client, gvr, name)
|
||||
// }
|
||||
// }
|
||||
|
||||
func updateClusteredResource(client *e2e.E2EClient, gvr schema.GroupVersionResource, name string, m func(*unstructured.Unstructured) error) {
|
||||
r := getClusteredResource(client, gvr, name)
|
||||
version := r.GetResourceVersion()
|
||||
Expect(m(r)).To(Succeed())
|
||||
By(fmt.Sprintf("Updating %s : %s", gvr.String(), name))
|
||||
r.SetResourceVersion(version)
|
||||
_, err := client.UpdateClusteredResource(gvr, r)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func updateNamespacedResource(client *e2e.E2EClient, gvr schema.GroupVersionResource, ns, name string, m func(*unstructured.Unstructured) error) {
|
||||
err := e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
r := getNamespacedResource(client, gvr, ns, name)
|
||||
version := r.GetResourceVersion()
|
||||
Expect(m(r)).To(Succeed())
|
||||
By(fmt.Sprintf("Updating %s : %s/%s", gvr.String(), ns, name))
|
||||
r.SetResourceVersion(version)
|
||||
_, err := client.UpdateNamespacedResource(gvr, ns, r)
|
||||
return err
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func updateResource(client *e2e.E2EClient, gvr schema.GroupVersionResource, ns, name string, m func(*unstructured.Unstructured) error) {
|
||||
if ns != "" {
|
||||
updateNamespacedResource(client, gvr, ns, name, m)
|
||||
} else {
|
||||
updateClusteredResource(client, gvr, name, m)
|
||||
}
|
||||
}
|
||||
|
||||
func expectClusteredResource(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Expecting %s : %s", resource.gvr.String(), resource.name))
|
||||
var r *unstructured.Unstructured
|
||||
err := e2e.GetWithRetry(1*time.Second, 30, func() error {
|
||||
get, err := client.GetClusteredResource(resource.gvr, resource.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r = get
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(r).NotTo(BeNil())
|
||||
for _, v := range resource.validate {
|
||||
v(r)
|
||||
}
|
||||
}
|
||||
|
||||
func expectNamespacedResource(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Expecting %s : %s/%s", resource.gvr.String(), resource.ns, resource.name))
|
||||
var r *unstructured.Unstructured
|
||||
err := e2e.GetWithRetry(1*time.Second, 30, func() error {
|
||||
get, err := client.GetNamespacedResource(resource.gvr, resource.ns, resource.name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r = get
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(r).NotTo(BeNil())
|
||||
for _, v := range resource.validate {
|
||||
v(r)
|
||||
}
|
||||
}
|
||||
|
||||
func expectResource(client *e2e.E2EClient, resource expectedResource) {
|
||||
if resource.ns != "" {
|
||||
expectNamespacedResource(client, resource)
|
||||
} else {
|
||||
expectClusteredResource(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func expectResources(client *e2e.E2EClient, resources ...expectedResource) {
|
||||
for _, resource := range resources {
|
||||
expectResource(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func expectClusteredResourceNotExists(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Expecting not exists %s : %s", resource.gvr.String(), resource.name))
|
||||
err := e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := client.GetClusteredResource(resource.gvr, resource.name)
|
||||
return err
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
func expectNamespacedResourceNotExists(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Expecting not exists %s : %s/%s", resource.gvr.String(), resource.ns, resource.name))
|
||||
err := e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := client.GetClusteredResource(resource.gvr, resource.name)
|
||||
return err
|
||||
})
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
func expectResourceNotExists(client *e2e.E2EClient, resource expectedResource) {
|
||||
if resource.ns != "" {
|
||||
expectNamespacedResourceNotExists(client, resource)
|
||||
} else {
|
||||
expectClusteredResourceNotExists(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
// func expectResourcesNotExist(client *e2e.E2EClient, resources ...expectedResource) {
|
||||
// for _, resource := range resources {
|
||||
// expectResourceNotExists(client, resource)
|
||||
// }
|
||||
// }
|
||||
|
||||
func expectClusteredResourceNotFound(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Expecting not found %s : %s", resource.gvr.String(), resource.name))
|
||||
_, err := client.GetClusteredResource(resource.gvr, resource.name)
|
||||
Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}
|
||||
|
||||
func expectNamespacedResourceNotFound(client *e2e.E2EClient, resource expectedResource) {
|
||||
By(fmt.Sprintf("Expecting not found %s : %s/%s", resource.gvr.String(), resource.ns, resource.name))
|
||||
_, err := client.GetClusteredResource(resource.gvr, resource.name)
|
||||
Expect(apierrors.IsNotFound(err)).To(BeTrue())
|
||||
}
|
||||
|
||||
func expectResourceNotFound(client *e2e.E2EClient, resource expectedResource) {
|
||||
if resource.ns != "" {
|
||||
expectNamespacedResourceNotFound(client, resource)
|
||||
} else {
|
||||
expectClusteredResourceNotFound(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
func expectResourcesNotFound(client *e2e.E2EClient, resources ...expectedResource) {
|
||||
for _, resource := range resources {
|
||||
expectResourceNotFound(client, resource)
|
||||
}
|
||||
}
|
||||
|
||||
type testCaseStep func(*e2e.E2EClient) error
|
||||
|
||||
func stepBy(by string) testCaseStep {
|
||||
return func(*e2e.E2EClient) error {
|
||||
By(by)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func stepDeleteResource(gvr schema.GroupVersionResource, ns string, name string) testCaseStep {
|
||||
return func(client *e2e.E2EClient) error {
|
||||
deleteResource(client, expected(gvr, ns, name))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func stepExpectResource(gvr schema.GroupVersionResource, ns, name string, validate ...resourceExpectation) testCaseStep {
|
||||
return func(client *e2e.E2EClient) error {
|
||||
expectResource(client, expected(gvr, ns, name, validate...))
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func stepWaitResource(gvr schema.GroupVersionResource, ns, name string, sleepInterval time.Duration, retryCount int, predicate func(*unstructured.Unstructured) bool) testCaseStep {
|
||||
return func(client *e2e.E2EClient) error {
|
||||
By(fmt.Sprintf("Waiting %s : %s/%s", gvr.String(), ns, name))
|
||||
err := e2e.GetWithRetry(sleepInterval, retryCount, func() error {
|
||||
get, err := client.GetNamespacedResource(gvr, ns, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !predicate(get) {
|
||||
return fmt.Errorf("predicate didn't validate: %s, %s/%s", gvr.String(), ns, name)
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func stepUpateResource(gvr schema.GroupVersionResource, ns, name string, m func(*unstructured.Unstructured) error) testCaseStep {
|
||||
return func(client *e2e.E2EClient) error {
|
||||
updateResource(client, gvr, ns, name, m)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func stepResourceNotFound(gvr schema.GroupVersionResource, ns string, name string) testCaseStep {
|
||||
return func(client *e2e.E2EClient) error {
|
||||
expectResourceNotExists(client, expected(gvr, ns, name))
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -1,450 +0,0 @@
|
|||
package generate
|
||||
|
||||
// Namespace Description
|
||||
var namespaceYaml = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
`)
|
||||
|
||||
// Namespace With Label Description
|
||||
var namespaceWithLabelYaml = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: test
|
||||
labels:
|
||||
security: standard
|
||||
`)
|
||||
|
||||
// Cluster Policy to generate Role and RoleBinding with synchronize=true
|
||||
var roleRoleBindingYamlWithSync = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: "gen-role-policy"
|
||||
spec:
|
||||
background: false
|
||||
rules:
|
||||
- name: "gen-role"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: Role
|
||||
name: "ns-role"
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize: true
|
||||
data:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- name: "gen-role-binding"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: RoleBinding
|
||||
name: "ns-role-binding"
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize: true
|
||||
data:
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: minikube-user
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: ns-role
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
`)
|
||||
|
||||
// Cluster Policy to generate Role and RoleBinding with Clone
|
||||
var roleRoleBindingYamlWithClone = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: "gen-role-policy"
|
||||
spec:
|
||||
background: false
|
||||
rules:
|
||||
- name: "gen-role"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: Role
|
||||
name: "ns-role"
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize: true
|
||||
clone:
|
||||
name: "ns-role"
|
||||
namespace: "default"
|
||||
- name: "gen-role-binding"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: RoleBinding
|
||||
name: "ns-role-binding"
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize: true
|
||||
clone:
|
||||
name: "ns-role-binding"
|
||||
namespace: default
|
||||
`)
|
||||
|
||||
// Source Role from which ROle is Cloned by generate
|
||||
var sourceRoleYaml = []byte(`
|
||||
kind: Role
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
namespace: default
|
||||
name: ns-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["configmaps"]
|
||||
verbs: ["get", "watch", "list", "delete", "create"]
|
||||
`)
|
||||
|
||||
// Source RoleBinding from which RoleBinding is Cloned by generate
|
||||
var sourceRoleBindingYaml = []byte(`
|
||||
kind: RoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: ns-role-binding
|
||||
namespace: default
|
||||
subjects:
|
||||
- apiGroup: rbac.authorization.k8s.io
|
||||
kind: User
|
||||
name: minikube-user
|
||||
roleRef:
|
||||
kind: Role
|
||||
name: ns-role
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
`)
|
||||
|
||||
// ClusterPolicy to generate ClusterRole and ClusterRoleBinding with synchronize = true
|
||||
var genClusterRoleYamlWithSync = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: "gen-cluster-policy"
|
||||
spec:
|
||||
background: false
|
||||
rules:
|
||||
- name: "gen-cluster-role"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ClusterRole
|
||||
name: ns-cluster-role
|
||||
synchronize: true
|
||||
data:
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
- name: "gen-cluster-role-binding"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ClusterRoleBinding
|
||||
name: ns-cluster-role-binding
|
||||
synchronize: true
|
||||
data:
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: ns-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: "kyverno"
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
`)
|
||||
|
||||
var baseClusterRoleData = []byte(`
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: base-cluster-role
|
||||
rules:
|
||||
- apiGroups:
|
||||
- "*"
|
||||
resources:
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
- secrets
|
||||
- configmaps
|
||||
- resourcequotas
|
||||
- limitranges
|
||||
- roles
|
||||
- clusterroles
|
||||
- rolebindings
|
||||
- clusterrolebindings
|
||||
verbs:
|
||||
- create # generate new resources
|
||||
- get # check the contents of exiting resources
|
||||
- update # update existing resource, if required configuration defined in policy is not present
|
||||
- delete # clean-up, if the generate trigger resource is deleted
|
||||
`)
|
||||
|
||||
var baseClusterRoleBindingData = []byte(`
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: base-cluster-role-binding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: base-cluster-role
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kyverno
|
||||
namespace: kyverno
|
||||
`)
|
||||
|
||||
// Cluster Policy to clone ClusterRole and ClusterRoleBinding
|
||||
var clusterRoleRoleBindingYamlWithClone = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: "gen-cluster-policy-3"
|
||||
spec:
|
||||
background: false
|
||||
rules:
|
||||
- name: "gen-role"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ClusterRole
|
||||
name: "cloned-cluster-role"
|
||||
synchronize: true
|
||||
clone:
|
||||
name: "base-cluster-role"
|
||||
- name: "gen-role-binding"
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
generate:
|
||||
kind: ClusterRoleBinding
|
||||
name: "cloned-cluster-role-binding"
|
||||
synchronize: true
|
||||
clone:
|
||||
name: "base-cluster-role-binding"
|
||||
`)
|
||||
|
||||
var genNetworkPolicyYaml = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-networkpolicy
|
||||
spec:
|
||||
background: true
|
||||
generateExistingOnPolicyUpdate: true
|
||||
rules:
|
||||
- name: allow-dns
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
security: standard
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- "kube-system"
|
||||
- "default"
|
||||
- "kube-public"
|
||||
- "nova-kyverno"
|
||||
generate:
|
||||
synchronize: true
|
||||
kind: NetworkPolicy
|
||||
name: allow-dns
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
egress:
|
||||
- ports:
|
||||
- protocol: UDP
|
||||
port: 5353
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
`)
|
||||
|
||||
var updatGenNetworkPolicyYaml = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-networkpolicy
|
||||
spec:
|
||||
background: true
|
||||
generateExistingOnPolicyUpdate: true
|
||||
rules:
|
||||
- name: allow-dns
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
security: standard
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- "kube-system"
|
||||
- "default"
|
||||
- "kube-public"
|
||||
- "nova-kyverno"
|
||||
generate:
|
||||
synchronize: true
|
||||
kind: NetworkPolicy
|
||||
name: allow-dns
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
egress:
|
||||
- ports:
|
||||
- protocol: TCP
|
||||
port: 5353
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
`)
|
||||
|
||||
var updateSynchronizeInGeneratePolicyYaml = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: add-networkpolicy
|
||||
spec:
|
||||
background: true
|
||||
generateExistingOnPolicyUpdate: true
|
||||
rules:
|
||||
- name: allow-dns
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
selector:
|
||||
matchLabels:
|
||||
security: standard
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- "kube-system"
|
||||
- "default"
|
||||
- "kube-public"
|
||||
- "nova-kyverno"
|
||||
generate:
|
||||
synchronize: false
|
||||
kind: NetworkPolicy
|
||||
name: allow-dns
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
data:
|
||||
spec:
|
||||
egress:
|
||||
- ports:
|
||||
- protocol: UDP
|
||||
port: 5353
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
`)
|
||||
|
||||
var cloneSourceResource = []byte(`
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: game-demo
|
||||
data:
|
||||
initial_lives: "2"
|
||||
`)
|
||||
|
||||
var cloneSecretSourceResource = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: secret-basic-auth
|
||||
type: kubernetes.io/basic-auth
|
||||
stringData:
|
||||
username: admin
|
||||
password: t0p-Secret
|
||||
`)
|
||||
|
||||
var genCloneConfigMapPolicyYaml = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: generate-policy
|
||||
spec:
|
||||
rules:
|
||||
- name: copy-game-demo
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- default
|
||||
- kube-public
|
||||
- kyverno
|
||||
generate:
|
||||
kind: ConfigMap
|
||||
name: game-demo
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize: true
|
||||
clone:
|
||||
namespace: default
|
||||
name: game-demo
|
||||
`)
|
||||
|
||||
var genMultipleClonePolicyYaml = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: sync-secret-with-multi-clone
|
||||
spec:
|
||||
generateExistingOnPolicyUpdate: true
|
||||
rules:
|
||||
- name: sync-secret
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Namespace
|
||||
exclude:
|
||||
any:
|
||||
- resources:
|
||||
namespaces:
|
||||
- kube-system
|
||||
- default
|
||||
- kube-public
|
||||
- kyverno
|
||||
generate:
|
||||
namespace: "{{request.object.metadata.name}}"
|
||||
synchronize : true
|
||||
cloneList:
|
||||
namespace: default
|
||||
kinds:
|
||||
- v1/Secret
|
||||
- v1/ConfigMap
|
||||
`)
|
|
@ -1,24 +0,0 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
||||
func Test_MetricsServerAvailability(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
requestObj := e2e.APIRequest{
|
||||
URL: "http://localhost:8000/metrics",
|
||||
Type: "GET",
|
||||
}
|
||||
response, err := e2e.CallAPI(requestObj)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(response.StatusCode).To(Equal(200))
|
||||
}
|
|
@ -1,284 +0,0 @@
|
|||
package mutate
|
||||
|
||||
import (
|
||||
"github.com/blang/semver/v4"
|
||||
"github.com/kyverno/kyverno/test/e2e/common"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
// MutateTests is E2E Test Config for mutation
|
||||
var MutateTests = []struct {
|
||||
// TestName - Name of the Test
|
||||
TestName string
|
||||
// Data - The Yaml file of the ClusterPolicy
|
||||
Data []byte
|
||||
// ResourceNamespace - Namespace of the Resource
|
||||
ResourceNamespace string
|
||||
// PolicyName - Name of the Policy
|
||||
PolicyName string
|
||||
}{
|
||||
{
|
||||
TestName: "test-mutate-with-context",
|
||||
Data: configMapMutationYaml,
|
||||
ResourceNamespace: "test-mutate",
|
||||
PolicyName: "mutate-policy",
|
||||
},
|
||||
{
|
||||
TestName: "test-mutate-with-logic-in-context",
|
||||
Data: configMapMutationWithContextLogicYaml,
|
||||
ResourceNamespace: "test-mutate",
|
||||
PolicyName: "mutate-policy",
|
||||
},
|
||||
{
|
||||
TestName: "test-mutate-with-context-label-selection",
|
||||
Data: configMapMutationWithContextLabelSelectionYaml,
|
||||
ResourceNamespace: "test-mutate",
|
||||
PolicyName: "mutate-policy",
|
||||
},
|
||||
}
|
||||
|
||||
// Note: sometimes deleting namespaces takes time.
|
||||
// Using different names for namespaces prevents collisions.
|
||||
var tests = []struct {
|
||||
// TestDescription - Description of the Test
|
||||
TestDescription string
|
||||
// PolicyName - Name of the Policy
|
||||
PolicyName string
|
||||
// PolicyRaw - The Yaml file of the ClusterPolicy
|
||||
PolicyRaw []byte
|
||||
// ResourceName - Name of the Resource
|
||||
ResourceName string
|
||||
// ResourceNamespace - Namespace of the Resource
|
||||
ResourceNamespace string
|
||||
// ResourceGVR - GVR of the Resource
|
||||
ResourceGVR schema.GroupVersionResource
|
||||
// ResourceRaw - The Yaml file of the ClusterPolicy
|
||||
ResourceRaw []byte
|
||||
// ExpectedPatternRaw - The Yaml file that contains validate pattern for the expected result
|
||||
// This is not the final result. It is just used to validate the result from the engine.
|
||||
ExpectedPatternRaw []byte
|
||||
}{
|
||||
{
|
||||
TestDescription: "checks that runAsNonRoot is added to security context and containers elements security context",
|
||||
PolicyName: "set-runasnonroot-true",
|
||||
PolicyRaw: setRunAsNonRootTrue,
|
||||
ResourceName: "foo",
|
||||
ResourceNamespace: "test-mutate",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: podWithContainers,
|
||||
ExpectedPatternRaw: podWithContainersPattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks that runAsNonRoot is added to security context and containers elements security context and initContainers elements security context",
|
||||
PolicyName: "set-runasnonroot-true",
|
||||
PolicyRaw: setRunAsNonRootTrue,
|
||||
ResourceName: "foo",
|
||||
ResourceNamespace: "test-mutate1",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: podWithContainersAndInitContainers,
|
||||
ExpectedPatternRaw: podWithContainersAndInitContainersPattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks that variables in the keys are working correctly",
|
||||
PolicyName: "structured-logs-sidecar",
|
||||
PolicyRaw: kyverno_2316_policy,
|
||||
ResourceName: "busybox",
|
||||
ResourceNamespace: "test-mutate2",
|
||||
ResourceGVR: deploymentGVR,
|
||||
ResourceRaw: kyverno_2316_resource,
|
||||
ExpectedPatternRaw: kyverno_2316_pattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks that policy mutate env variables of an array with specific index numbers",
|
||||
PolicyName: "add-image-as-env-var",
|
||||
PolicyRaw: kyverno_mutate_json_patch,
|
||||
ResourceName: "foo",
|
||||
ResourceNamespace: "test-mutate-env-array",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: podWithEnvVar,
|
||||
ExpectedPatternRaw: podWithEnvVarPattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks that preconditions are substituted correctly",
|
||||
PolicyName: "replace-docker-hub",
|
||||
PolicyRaw: kyverno_2971_policy,
|
||||
ResourceName: "nginx",
|
||||
ResourceNamespace: "test-mutate-img",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: kyverno_2971_resource,
|
||||
ExpectedPatternRaw: kyverno_2971_pattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks if the imagePullSecrets is set or not",
|
||||
PolicyName: "set-image-pull-secret",
|
||||
PolicyRaw: setImagePullSecret,
|
||||
ResourceName: "nginx",
|
||||
ResourceNamespace: "test-run",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: podWithNoSecrets,
|
||||
ExpectedPatternRaw: podWithNoSecretPattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks the global anchor variables for emptyDir",
|
||||
PolicyName: "add-safe-to-evict",
|
||||
PolicyRaw: annotate_host_path_policy,
|
||||
ResourceName: "pod-with-emptydir",
|
||||
ResourceNamespace: "emptydir",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: podWithEmptyDirAsVolume,
|
||||
ExpectedPatternRaw: podWithVolumePattern,
|
||||
},
|
||||
{
|
||||
TestDescription: "checks the global anchor variables for hostPath",
|
||||
PolicyName: "add-safe-to-evict",
|
||||
PolicyRaw: annotate_host_path_policy,
|
||||
ResourceName: "pod-with-hostpath",
|
||||
ResourceNamespace: "hostpath",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: podWithHostPathAsVolume,
|
||||
ExpectedPatternRaw: podWithVolumePattern,
|
||||
},
|
||||
}
|
||||
|
||||
var ingressTests = struct {
|
||||
testNamespace string
|
||||
cpol []byte
|
||||
policyName string
|
||||
tests []struct {
|
||||
testName string
|
||||
group, version, rsc, resourceName string
|
||||
resource []byte
|
||||
skip bool
|
||||
}
|
||||
}{
|
||||
testNamespace: "test-ingress",
|
||||
cpol: mutateIngressCpol,
|
||||
policyName: "mutate-ingress-host",
|
||||
tests: []struct {
|
||||
testName string
|
||||
group, version, rsc, resourceName string
|
||||
resource []byte
|
||||
skip bool
|
||||
}{
|
||||
{
|
||||
testName: "test-networking-v1-ingress",
|
||||
group: "networking.k8s.io",
|
||||
version: "v1",
|
||||
rsc: "ingresses",
|
||||
resourceName: "kuard-v1",
|
||||
resource: ingressNetworkingV1,
|
||||
skip: common.GetKubernetesVersion().LT(semver.MustParse("1.19.0")),
|
||||
},
|
||||
// the following test can be removed after 1.22 cluster
|
||||
{
|
||||
testName: "test-networking-v1beta1-ingress",
|
||||
group: "networking.k8s.io",
|
||||
version: "v1beta1",
|
||||
rsc: "ingresses",
|
||||
resourceName: "kuard-v1beta1",
|
||||
resource: ingressNetworkingV1beta1,
|
||||
skip: common.GetKubernetesVersion().GTE(semver.MustParse("1.22.0")),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
type mutateExistingOperation string
|
||||
|
||||
const (
|
||||
createTrigger mutateExistingOperation = "createTrigger"
|
||||
deleteTrigger mutateExistingOperation = "deleteTrigger"
|
||||
createPolicy mutateExistingOperation = "createPolicy"
|
||||
)
|
||||
|
||||
// Note: sometimes deleting namespaces takes time.
|
||||
// Using different names for namespaces prevents collisions.
|
||||
var mutateExistingTests = []struct {
|
||||
// TestDescription - Description of the Test
|
||||
TestDescription string
|
||||
// Operation describes how to trigger the policy
|
||||
Operation mutateExistingOperation
|
||||
// PolicyName - Name of the Policy
|
||||
PolicyName string
|
||||
// PolicyRaw - The Yaml file of the ClusterPolicy
|
||||
PolicyRaw []byte
|
||||
// TriggerName - Name of the Trigger Resource
|
||||
TriggerName string
|
||||
// TriggerNamespace - Namespace of the Trigger Resource
|
||||
TriggerNamespace string
|
||||
// TriggerGVR - GVR of the Trigger Resource
|
||||
TriggerGVR schema.GroupVersionResource
|
||||
// TriggerRaw - The Yaml file of the Trigger Resource
|
||||
TriggerRaw []byte
|
||||
// TargetName - Name of the Target Resource
|
||||
TargetName string
|
||||
// TargetNamespace - Namespace of the Target Resource
|
||||
TargetNamespace string
|
||||
// TargetGVR - GVR of the Target Resource
|
||||
TargetGVR schema.GroupVersionResource
|
||||
// TargetRaw - The Yaml file of the Target ClusterPolicy
|
||||
TargetRaw []byte
|
||||
// ExpectedTargetRaw - The Yaml file that contains validate pattern for the expected result
|
||||
// This is not the final result. It is just used to validate the result from the engine.
|
||||
ExpectedTargetRaw []byte
|
||||
}{
|
||||
{
|
||||
TestDescription: "mutate existing on resource creation",
|
||||
Operation: createTrigger,
|
||||
PolicyName: "test-post-mutation-create-trigger",
|
||||
PolicyRaw: policyCreateTrigger,
|
||||
TriggerName: "dictionary-1",
|
||||
TriggerNamespace: "staging-1",
|
||||
TriggerGVR: configmGVR,
|
||||
TriggerRaw: triggerCreateTrigger,
|
||||
TargetName: "test-secret-1",
|
||||
TargetNamespace: "staging-1",
|
||||
TargetGVR: secretGVR,
|
||||
TargetRaw: targetCreateTrigger,
|
||||
ExpectedTargetRaw: expectedTargetCreateTrigger,
|
||||
},
|
||||
{
|
||||
TestDescription: "mutate existing on resource deletion",
|
||||
Operation: deleteTrigger,
|
||||
PolicyName: "test-post-mutation-delete-trigger",
|
||||
PolicyRaw: policyDeleteTrigger,
|
||||
TriggerName: "dictionary-2",
|
||||
TriggerNamespace: "staging-2",
|
||||
TriggerGVR: configmGVR,
|
||||
TriggerRaw: triggerDeleteTrigger,
|
||||
TargetName: "test-secret-2",
|
||||
TargetNamespace: "staging-2",
|
||||
TargetGVR: secretGVR,
|
||||
TargetRaw: targetDeleteTrigger,
|
||||
ExpectedTargetRaw: expectedTargetDeleteTrigger,
|
||||
},
|
||||
{
|
||||
TestDescription: "mutate existing on policy creation",
|
||||
Operation: createPolicy,
|
||||
PolicyName: "test-post-mutation-create-policy",
|
||||
PolicyRaw: policyCreatePolicy,
|
||||
TriggerName: "dictionary-3",
|
||||
TriggerNamespace: "staging-3",
|
||||
TriggerGVR: configmGVR,
|
||||
TriggerRaw: triggerCreatePolicy,
|
||||
TargetName: "test-secret-3",
|
||||
TargetNamespace: "staging-3",
|
||||
TargetGVR: secretGVR,
|
||||
TargetRaw: targetCreatePolicy,
|
||||
ExpectedTargetRaw: expectedTargetCreatePolicy,
|
||||
},
|
||||
{
|
||||
TestDescription: "mutate existing (patchesJson6902) on resource creation",
|
||||
Operation: createTrigger,
|
||||
PolicyName: "test-post-mutation-json-patch-create-trigger",
|
||||
PolicyRaw: policyCreateTriggerJsonPatch,
|
||||
TriggerName: "dictionary-4",
|
||||
TriggerNamespace: "staging-4",
|
||||
TriggerGVR: configmGVR,
|
||||
TriggerRaw: triggerCreateTriggerJsonPatch,
|
||||
TargetName: "test-secret-4",
|
||||
TargetNamespace: "staging-4",
|
||||
TargetGVR: secretGVR,
|
||||
TargetRaw: targetCreateTriggerJsonPatch,
|
||||
ExpectedTargetRaw: expectedCreateTriggerJsonPatch,
|
||||
},
|
||||
}
|
|
@ -1,645 +0,0 @@
|
|||
package mutate
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/engine/validate"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
commonE2E "github.com/kyverno/kyverno/test/e2e/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"gopkg.in/yaml.v2"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
var (
|
||||
// Cluster Policy GVR
|
||||
policyGVR = e2e.GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
// Namespace GVR
|
||||
namespaceGVR = e2e.GetGVR("", "v1", "namespaces")
|
||||
// ConfigMap GVR
|
||||
configMapGVR = e2e.GetGVR("", "v1", "configmaps")
|
||||
|
||||
// ClusterPolicy Namespace
|
||||
policyNamespace = ""
|
||||
)
|
||||
|
||||
func Test_Mutate_Sets(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
// Generate E2E Client
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
for _, tests := range MutateTests {
|
||||
By(fmt.Sprintf("Test to mutate objects : %s", tests.TestName))
|
||||
|
||||
// Clean up Resources
|
||||
By("Cleaning Cluster Policies")
|
||||
e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
// Clear Namespace
|
||||
By(fmt.Sprintf("Deleting Namespace : %s", tests.ResourceNamespace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, tests.ResourceNamespace)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, tests.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Namespace
|
||||
By(fmt.Sprintf("Creating Namespace %s", policyNamespace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml("test-mutate"))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait Till Creation of Namespace
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, tests.ResourceNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create source CM
|
||||
By(fmt.Sprintf("\nCreating source ConfigMap in %s", tests.ResourceNamespace))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(configMapGVR, tests.ResourceNamespace, "", sourceConfigMapYaml)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create CM Policy
|
||||
By(fmt.Sprintf("\nCreating Mutate ConfigMap Policy in %s", policyNamespace))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, "", tests.Data)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(tests.PolicyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create target CM
|
||||
By(fmt.Sprintf("\nCreating target ConfigMap in %s", tests.ResourceNamespace))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(configMapGVR, tests.ResourceNamespace, "", targetConfigMapYaml)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Verify created ConfigMap
|
||||
By(fmt.Sprintf("Verifying ConfigMap in the Namespace : %s", tests.ResourceNamespace))
|
||||
// Wait Till Creation of ConfigMap
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(configMapGVR, tests.ResourceNamespace, "target")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
cmRes, err := e2eClient.GetNamespacedResource(configMapGVR, tests.ResourceNamespace, "target")
|
||||
c, _ := json.Marshal(cmRes)
|
||||
By(fmt.Sprintf("configMap : %s", string(c)))
|
||||
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(cmRes.GetLabels()["kyverno.key/copy-me"]).To(Equal("sample-value"))
|
||||
|
||||
// CleanUp Resources
|
||||
e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
// Clear Namespace
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, tests.ResourceNamespace)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, tests.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Test %s Completed \n\n\n", tests.TestName))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Mutate(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
for _, test := range tests {
|
||||
By(fmt.Sprintf("Mutation Test: %s", test.TestDescription))
|
||||
|
||||
By("Deleting Cluster Policies...")
|
||||
e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
By("Deleting Resource...")
|
||||
e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
|
||||
|
||||
By("Deleting Namespace...")
|
||||
By(fmt.Sprintf("Deleting Namespace: %s...", test.ResourceNamespace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
|
||||
By("Wait Till Deletion of Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Namespace: %s...", test.ResourceNamespace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.ResourceNamespace))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait Till Creation of Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating Policy...")
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(test.PolicyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating Resource...")
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, "", test.ResourceRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that resource is created...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
res, err := e2eClient.GetNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
actualJSON, err := json.Marshal(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var actual interface{}
|
||||
|
||||
err = json.Unmarshal(actualJSON, &actual)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
expected, err := rawYAMLToJSONInterface(test.ExpectedPatternRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Validating created resource with the expected pattern...")
|
||||
err = validate.MatchPattern(logging.GlobalLogger(), actual, expected)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting Cluster Policies...")
|
||||
err = e2eClient.CleanClusterPolicies(policyGVR)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting Resource...")
|
||||
err = e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Deleting Namespace...")
|
||||
err = e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait Till Creation of Namespace...")
|
||||
e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
|
||||
// Do not fail if waiting fails. Sometimes namespace needs time to be deleted.
|
||||
|
||||
By("Done")
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Mutate_Ingress(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
// Generate E2E Client
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
nspace := ingressTests.testNamespace
|
||||
By("Cleaning Cluster Policies")
|
||||
e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
By(fmt.Sprintf("Deleting Namespace : %s", nspace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, nspace)
|
||||
|
||||
// Wait Till Deletion of Namespace
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By("Creating mutate ClusterPolicy")
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(policyGVR, ingressTests.cpol)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(ingressTests.policyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Namespace %s", nspace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(nspace))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
for _, test := range ingressTests.tests {
|
||||
if test.skip {
|
||||
continue
|
||||
}
|
||||
By(fmt.Sprintf("\n\nStart testing %s", test.testName))
|
||||
gvr := e2e.GetGVR(test.group, test.version, test.rsc)
|
||||
By(fmt.Sprintf("Creating Ingress %v in %s", gvr, nspace))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(gvr, nspace, "", test.resource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Verifying Ingress %v in the Namespace : %s", gvr, nspace))
|
||||
var mutatedResource *unstructured.Unstructured
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
mutatedResource, err = e2eClient.GetNamespacedResource(gvr, nspace, test.resourceName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By("Comparing patched field")
|
||||
rules, ok, err := unstructured.NestedSlice(mutatedResource.UnstructuredContent(), "spec", "rules")
|
||||
Expect(err).To(BeNil())
|
||||
Expect(ok).To(BeTrue())
|
||||
rule := rules[0].(map[string]interface{})
|
||||
host := rule["host"].(string)
|
||||
Expect(host).To(Equal("kuard.mycompany.com"))
|
||||
}
|
||||
}
|
||||
|
||||
func Test_Mutate_Existing(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
for _, test := range mutateExistingTests {
|
||||
By(fmt.Sprintf("\nStart Mutate Existing Tests: %s", test.TestDescription))
|
||||
|
||||
By("\nCleaning up resources")
|
||||
By("Deleting Cluster Policies...")
|
||||
e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
By(fmt.Sprintf("Deleting Trigger Resource %v %s/%s...", test.TriggerGVR, test.TriggerNamespace, test.TriggerName))
|
||||
e2eClient.DeleteNamespacedResource(test.TriggerGVR, test.TriggerNamespace, test.TriggerName)
|
||||
|
||||
By(fmt.Sprintf("Deleting Trigger Namespace: %s...", test.TriggerNamespace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, test.TriggerNamespace)
|
||||
|
||||
By("Wait Till Deletion of Trigger Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.TriggerNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Deleting Target Resource %v %s/%s...", test.TargetGVR, test.TargetNamespace, test.TargetName))
|
||||
e2eClient.DeleteNamespacedResource(test.TargetGVR, test.TargetNamespace, test.TargetName)
|
||||
|
||||
By(fmt.Sprintf("Deleting Target Namespace: %s...", test.TargetNamespace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, test.TargetNamespace)
|
||||
|
||||
By("Wait Till Deletion of Target Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.TargetNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Done cleaning up resources\n")
|
||||
|
||||
By(fmt.Sprintf("Creating target Namespace: %s...", test.TargetNamespace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.TargetNamespace))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait Till Creation of Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.TargetNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Target Resource %v, %s/%s...", test.TargetGVR, test.TargetNamespace, test.TargetName))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(test.TargetGVR, test.TargetNamespace, test.TargetName, test.TargetRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that resource is created...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(test.TargetGVR, test.TargetNamespace, test.TargetName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
switch test.Operation {
|
||||
case createTrigger:
|
||||
By("Operation: createTrigger\n Creating Policy...")
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(test.PolicyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Trigger Resource %v, %s/%s...", test.TriggerGVR, test.TriggerNamespace, test.TriggerName))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(test.TriggerGVR, test.TriggerNamespace, test.TriggerName, test.TriggerRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that resource is created...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(test.TriggerGVR, test.TriggerNamespace, test.TriggerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// wait for UR to be completed
|
||||
// TODO: this should be changed to check the UR for the right state.
|
||||
// Any hard-coded timer may fail in some cases.
|
||||
time.Sleep(5 * time.Second)
|
||||
|
||||
res, err := e2eClient.GetNamespacedResource(test.TargetGVR, test.TargetNamespace, test.TargetName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
actualJSON, err := json.Marshal(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var actual interface{}
|
||||
|
||||
err = json.Unmarshal(actualJSON, &actual)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
expected, err := rawYAMLToJSONInterface(test.ExpectedTargetRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Validating created resource with the expected pattern...")
|
||||
err = validate.MatchPattern(logging.GlobalLogger(), actual, expected)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case deleteTrigger:
|
||||
By(fmt.Sprintf("Operation: deleteTrigger\n Creating Trigger Resource %v, %s/%s...", test.TriggerGVR, test.TriggerNamespace, test.TriggerName))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(test.TriggerGVR, test.TriggerNamespace, test.TriggerName, test.TriggerRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that resource is created...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(test.TriggerGVR, test.TriggerNamespace, test.TriggerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating Policy...")
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(test.PolicyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Deleting Trigger Resource to Trigger Policy %v %s/%s...", test.TriggerGVR, test.TriggerNamespace, test.TriggerName))
|
||||
e2eClient.DeleteNamespacedResource(test.TriggerGVR, test.TriggerNamespace, test.TriggerName)
|
||||
|
||||
// wait for UR to be completed
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
res, err := e2eClient.GetNamespacedResource(test.TargetGVR, test.TargetNamespace, test.TargetName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
actualJSON, err := json.Marshal(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var actual interface{}
|
||||
|
||||
err = json.Unmarshal(actualJSON, &actual)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
expected, err := rawYAMLToJSONInterface(test.ExpectedTargetRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Validating created resource with the expected pattern...")
|
||||
err = validate.MatchPattern(logging.GlobalLogger(), actual, expected)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
case createPolicy:
|
||||
By(fmt.Sprintf("Operation: createPolicy\n Creating Trigger Resource %v, %s/%s...", test.TriggerGVR, test.TriggerNamespace, test.TriggerName))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(test.TriggerGVR, test.TriggerNamespace, test.TriggerName, test.TriggerRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Checking that resource is created...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetNamespacedResource(test.TriggerGVR, test.TriggerNamespace, test.TriggerName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Creating Policy...")
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(test.PolicyName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// wait for UR to be completed
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
res, err := e2eClient.GetNamespacedResource(test.TargetGVR, test.TargetNamespace, test.TargetName)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
actualJSON, err := json.Marshal(res)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
var actual interface{}
|
||||
|
||||
err = json.Unmarshal(actualJSON, &actual)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
expected, err := rawYAMLToJSONInterface(test.ExpectedTargetRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Validating created resource with the expected pattern...")
|
||||
err = validate.MatchPattern(logging.GlobalLogger(), actual, expected)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
By("Deleting Cluster Policies...")
|
||||
e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
By(fmt.Sprintf("Deleting Trigger Resource %v %s/%s...", test.TriggerGVR, test.TriggerNamespace, test.TriggerName))
|
||||
e2eClient.DeleteNamespacedResource(test.TriggerGVR, test.TriggerNamespace, test.TriggerName)
|
||||
|
||||
By(fmt.Sprintf("Deleting Trigger Namespace: %s...", test.TriggerNamespace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, test.TriggerNamespace)
|
||||
|
||||
By("Wait Till Deletion of Trigger Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.TriggerNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
|
||||
// Do not fail if waiting fails. Sometimes namespace needs time to be deleted.
|
||||
if err != nil {
|
||||
By(err.Error())
|
||||
}
|
||||
|
||||
By(fmt.Sprintf("Deleting Target Resource %v %s/%s...", test.TargetGVR, test.TargetNamespace, test.TargetName))
|
||||
e2eClient.DeleteNamespacedResource(test.TargetGVR, test.TargetNamespace, test.TargetName)
|
||||
|
||||
By(fmt.Sprintf("Deleting Target Namespace: %s...", test.TargetNamespace))
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, test.TargetNamespace)
|
||||
|
||||
By("Wait Till Deletion of Target Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.TargetNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
|
||||
// Do not fail if waiting fails. Sometimes namespace needs time to be deleted.
|
||||
if err != nil {
|
||||
By(err.Error())
|
||||
}
|
||||
|
||||
By("Done\n\n")
|
||||
}
|
||||
}
|
||||
|
||||
func rawYAMLToJSONInterface(y []byte) (interface{}, error) {
|
||||
var temp, result interface{}
|
||||
var err error
|
||||
|
||||
err = UnmarshalYAML(y, &temp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
jsonRaw, err := json.Marshal(temp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
json.Unmarshal(jsonRaw, &result)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// UnmarshalYAML unmarshals YAML to map[string]interface{} instead of map[interface{}]interface{}.
|
||||
func UnmarshalYAML(in []byte, out interface{}) error {
|
||||
var res interface{}
|
||||
|
||||
if err := yaml.Unmarshal(in, &res); err != nil {
|
||||
return err
|
||||
}
|
||||
*out.(*interface{}) = cleanupMapValue(res)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupInterfaceArray(in []interface{}) []interface{} {
|
||||
res := make([]interface{}, len(in))
|
||||
for i, v := range in {
|
||||
res[i] = cleanupMapValue(v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func cleanupInterfaceMap(in map[interface{}]interface{}) map[string]interface{} {
|
||||
res := make(map[string]interface{})
|
||||
for k, v := range in {
|
||||
res[fmt.Sprintf("%v", k)] = cleanupMapValue(v)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func cleanupMapValue(v interface{}) interface{} {
|
||||
switch v := v.(type) {
|
||||
case []interface{}:
|
||||
return cleanupInterfaceArray(v)
|
||||
case map[interface{}]interface{}:
|
||||
return cleanupInterfaceMap(v)
|
||||
case string:
|
||||
return v
|
||||
default:
|
||||
return fmt.Sprintf("%v", v)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load diff
|
@ -1,89 +0,0 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
podGVR = e2e.GetGVR("", "v1", "pods")
|
||||
kustomizeGVR = e2e.GetGVR("kustomize.toolkit.fluxcd.io", "v1beta1", "kustomizations")
|
||||
)
|
||||
|
||||
type ValidationTest struct {
|
||||
// TestDescription - Description of the Test
|
||||
TestDescription string
|
||||
// PolicyName - Name of the Policy
|
||||
PolicyName string
|
||||
// PolicyRaw - The Yaml file of the ClusterPolicy
|
||||
PolicyRaw []byte
|
||||
// ResourceName - Name of the Resource
|
||||
ResourceName string
|
||||
// ResourceNamespace - Namespace of the Resource
|
||||
ResourceNamespace string
|
||||
// ResourceGVR - GVR of the Resource
|
||||
ResourceGVR schema.GroupVersionResource
|
||||
// ResourceRaw - The Yaml file of the ClusterPolicy
|
||||
ResourceRaw []byte
|
||||
// MustSucceed - indicates if validation must succeed
|
||||
MustSucceed bool
|
||||
}
|
||||
|
||||
var FluxValidateTests = []ValidationTest{
|
||||
{
|
||||
TestDescription: "test-validate-with-flux-and-variable-substitution-2043",
|
||||
PolicyName: "flux-multi-tenancy",
|
||||
PolicyRaw: kyverno2043Policy,
|
||||
ResourceName: "dev-team",
|
||||
ResourceNamespace: "test-validate",
|
||||
ResourceGVR: kustomizeGVR,
|
||||
ResourceRaw: kyverno2043Fluxkustomization,
|
||||
MustSucceed: false,
|
||||
},
|
||||
{
|
||||
TestDescription: "test-validate-with-flux-and-variable-substitution-2241",
|
||||
PolicyName: "flux-multi-tenancy-2",
|
||||
PolicyRaw: kyverno2241Policy,
|
||||
ResourceName: "tenants",
|
||||
ResourceNamespace: "test-validate",
|
||||
ResourceGVR: kustomizeGVR,
|
||||
ResourceRaw: kyverno2241Fluxkustomization,
|
||||
MustSucceed: true,
|
||||
},
|
||||
}
|
||||
|
||||
var ValidateTests = []ValidationTest{
|
||||
{
|
||||
// Case for https://github.com/kyverno/kyverno/issues/2345 issue
|
||||
TestDescription: "checks that contains function works properly with string list",
|
||||
PolicyName: "drop-cap-net-raw",
|
||||
PolicyRaw: kyverno2345Policy,
|
||||
ResourceName: "test",
|
||||
ResourceNamespace: "test-validate1",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: kyverno2345Resource,
|
||||
MustSucceed: false,
|
||||
},
|
||||
{
|
||||
// Case for small image validation
|
||||
TestDescription: "checks that images are small",
|
||||
PolicyName: "check-small-images",
|
||||
PolicyRaw: kyvernoSmallImagePolicy,
|
||||
ResourceName: "pod-with-small-image",
|
||||
ResourceNamespace: "test-validate",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: kyvernoPodWithSmallImage,
|
||||
MustSucceed: true,
|
||||
},
|
||||
{
|
||||
// Case for small image validation
|
||||
TestDescription: "checks that images are small",
|
||||
PolicyName: "check-large-images",
|
||||
PolicyRaw: kyvernoSmallImagePolicy,
|
||||
ResourceName: "pod-with-large-image",
|
||||
ResourceNamespace: "test-validate",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: kyvernoPodWithLargeImage,
|
||||
MustSucceed: false,
|
||||
},
|
||||
}
|
|
@ -1,680 +0,0 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
)
|
||||
|
||||
func newNamespaceYaml(name string) []byte {
|
||||
ns := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: %s
|
||||
`, name)
|
||||
return []byte(ns)
|
||||
}
|
||||
|
||||
// Regression: https://github.com/kyverno/kyverno/issues/2043
|
||||
// Policy: https://github.com/fluxcd/flux2-multi-tenancy/blob/main/infrastructure/kyverno-policies/flux-multi-tenancy.yaml
|
||||
var kyverno2043Policy = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: flux-multi-tenancy
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: serviceAccountName
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
validate:
|
||||
message: ".spec.serviceAccountName is required"
|
||||
pattern:
|
||||
spec:
|
||||
serviceAccountName: "?*"
|
||||
- name: sourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
validate:
|
||||
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
`)
|
||||
|
||||
var kyverno2241Policy = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: flux-multi-tenancy-2
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: serviceAccountName
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
validate:
|
||||
message: ".spec.serviceAccountName is required"
|
||||
pattern:
|
||||
spec:
|
||||
serviceAccountName: "?*"
|
||||
- name: sourceRefNamespace
|
||||
exclude:
|
||||
resources:
|
||||
namespaces:
|
||||
- flux-system
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Kustomization
|
||||
preconditions:
|
||||
any:
|
||||
- key: "{{request.object.spec.sourceRef.namespace || ''}}"
|
||||
operator: NotEquals
|
||||
value: ""
|
||||
validate:
|
||||
message: "spec.sourceRef.namespace must be the same as metadata.namespace"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "{{request.object.spec.sourceRef.namespace}}"
|
||||
operator: NotEquals
|
||||
value: "{{request.object.metadata.namespace}}"
|
||||
`)
|
||||
|
||||
var kyverno2043Fluxcrd = []byte(`
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
annotations:
|
||||
controller-gen.kubebuilder.io/version: v0.5.0
|
||||
creationTimestamp: null
|
||||
name: kustomizations.kustomize.toolkit.fluxcd.io
|
||||
spec:
|
||||
group: kustomize.toolkit.fluxcd.io
|
||||
names:
|
||||
kind: Kustomization
|
||||
listKind: KustomizationList
|
||||
plural: kustomizations
|
||||
shortNames:
|
||||
- ks
|
||||
singular: kustomization
|
||||
scope: Namespaced
|
||||
versions:
|
||||
- additionalPrinterColumns:
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].status
|
||||
name: Ready
|
||||
type: string
|
||||
- jsonPath: .status.conditions[?(@.type=="Ready")].message
|
||||
name: Status
|
||||
type: string
|
||||
- jsonPath: .metadata.creationTimestamp
|
||||
name: Age
|
||||
type: date
|
||||
name: v1beta1
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
description: Kustomization is the Schema for the kustomizations API.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
metadata:
|
||||
type: object
|
||||
spec:
|
||||
description: KustomizationSpec defines the desired state of a kustomization.
|
||||
properties:
|
||||
decryption:
|
||||
description: Decrypt Kubernetes secrets before applying them on the cluster.
|
||||
properties:
|
||||
provider:
|
||||
description: Provider is the name of the decryption engine.
|
||||
enum:
|
||||
- sops
|
||||
type: string
|
||||
secretRef:
|
||||
description: The secret name containing the private OpenPGP keys used for decryption.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
required:
|
||||
- provider
|
||||
type: object
|
||||
dependsOn:
|
||||
description: DependsOn may contain a dependency.CrossNamespaceDependencyReference slice with references to Kustomization resources that must be ready before this Kustomization can be reconciled.
|
||||
items:
|
||||
description: CrossNamespaceDependencyReference holds the reference to a dependency.
|
||||
properties:
|
||||
name:
|
||||
description: Name holds the name reference of a dependency.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace holds the namespace reference of a dependency.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
force:
|
||||
default: false
|
||||
description: Force instructs the controller to recreate resources when patching fails due to an immutable field change.
|
||||
type: boolean
|
||||
healthChecks:
|
||||
description: A list of resources to be included in the health assessment.
|
||||
items:
|
||||
description: NamespacedObjectKindReference contains enough information to let you locate the typed referenced object in any namespace
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent, if not specified the Kubernetes preferred version will be used
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referent
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referent
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace of the referent, when not specified it acts as LocalObjectReference
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
images:
|
||||
description: Images is a list of (image name, new name, new tag or digest) for changing image names, tags or digests. This can also be achieved with a patch, but this operator is simpler to specify.
|
||||
items:
|
||||
description: Image contains an image name, a new name, a new tag or digest, which will replace the original name and tag.
|
||||
properties:
|
||||
digest:
|
||||
description: Digest is the value used to replace the original image tag. If digest is present NewTag value is ignored.
|
||||
type: string
|
||||
name:
|
||||
description: Name is a tag-less image name.
|
||||
type: string
|
||||
newName:
|
||||
description: NewName is the value used to replace the original name.
|
||||
type: string
|
||||
newTag:
|
||||
description: NewTag is the value used to replace the original tag.
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
interval:
|
||||
description: The interval at which to reconcile the Kustomization.
|
||||
type: string
|
||||
kubeConfig:
|
||||
description: The KubeConfig for reconciling the Kustomization on a remote cluster. When specified, KubeConfig takes precedence over ServiceAccountName.
|
||||
properties:
|
||||
secretRef:
|
||||
description: SecretRef holds the name to a secret that contains a 'value' key with the kubeconfig file as the value. It must be in the same namespace as the Kustomization. It is recommended that the kubeconfig is self-contained, and the secret is regularly updated if credentials such as a cloud-access-token expire.
|
||||
properties:
|
||||
name:
|
||||
description: Name of the referent
|
||||
type: string
|
||||
required:
|
||||
- name
|
||||
type: object
|
||||
type: object
|
||||
patches:
|
||||
description: Patches (also called overlays), defined as inline YAML objects.
|
||||
items:
|
||||
description: Patch contains either a StrategicMerge or a JSON6902 patch, either a file or inline, and the target the patch should be applied to.
|
||||
properties:
|
||||
patch:
|
||||
description: Patch contains the JSON6902 patch document with an array of operation objects.
|
||||
type: string
|
||||
target:
|
||||
description: Target points to the resources that the patch document should be applied to.
|
||||
properties:
|
||||
annotationSelector:
|
||||
description: AnnotationSelector is a string that follows the label selection expression https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api It matches with the resource annotations.
|
||||
type: string
|
||||
group:
|
||||
description: Group is the API group to select resources from. Together with Version and Kind it is capable of unambiguously identifying and/or selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the API Group to select resources from. Together with Group and Version it is capable of unambiguously identifying and/or selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is a string that follows the label selection expression https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api It matches with the resource labels.
|
||||
type: string
|
||||
name:
|
||||
description: Name to match resources with.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace to select resources from.
|
||||
type: string
|
||||
version:
|
||||
description: Version of the API Group to select resources from. Together with Group and Kind it is capable of unambiguously identifying and/or selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
type: object
|
||||
type: object
|
||||
type: array
|
||||
patchesJson6902:
|
||||
description: JSON 6902 patches, defined as inline YAML objects.
|
||||
items:
|
||||
description: JSON6902Patch contains a JSON6902 patch and the target the patch should be applied to.
|
||||
properties:
|
||||
patch:
|
||||
description: Patch contains the JSON6902 patch document with an array of operation objects.
|
||||
items:
|
||||
description: JSON6902 is a JSON6902 operation object. https://tools.ietf.org/html/rfc6902#section-4
|
||||
properties:
|
||||
from:
|
||||
type: string
|
||||
op:
|
||||
enum:
|
||||
- test
|
||||
- remove
|
||||
- add
|
||||
- replace
|
||||
- move
|
||||
- copy
|
||||
type: string
|
||||
path:
|
||||
type: string
|
||||
value:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
required:
|
||||
- op
|
||||
- path
|
||||
type: object
|
||||
type: array
|
||||
target:
|
||||
description: Target points to the resources that the patch document should be applied to.
|
||||
properties:
|
||||
annotationSelector:
|
||||
description: AnnotationSelector is a string that follows the label selection expression https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api It matches with the resource annotations.
|
||||
type: string
|
||||
group:
|
||||
description: Group is the API group to select resources from. Together with Version and Kind it is capable of unambiguously identifying and/or selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the API Group to select resources from. Together with Group and Version it is capable of unambiguously identifying and/or selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is a string that follows the label selection expression https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#api It matches with the resource labels.
|
||||
type: string
|
||||
name:
|
||||
description: Name to match resources with.
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace to select resources from.
|
||||
type: string
|
||||
version:
|
||||
description: Version of the API Group to select resources from. Together with Group and Kind it is capable of unambiguously identifying and/or selecting resources. https://github.com/kubernetes/community/blob/master/contributors/design-proposals/api-machinery/api-group.md
|
||||
type: string
|
||||
type: object
|
||||
required:
|
||||
- patch
|
||||
- target
|
||||
type: object
|
||||
type: array
|
||||
patchesStrategicMerge:
|
||||
description: Strategic merge patches, defined as inline YAML objects.
|
||||
items:
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
type: array
|
||||
path:
|
||||
description: Path to the directory containing the kustomization.yaml file, or the set of plain YAMLs a kustomization.yaml should be generated for. Defaults to 'None', which translates to the root path of the SourceRef.
|
||||
type: string
|
||||
postBuild:
|
||||
description: PostBuild describes which actions to perform on the YAML manifest generated by building the kustomize overlay.
|
||||
properties:
|
||||
substitute:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: Substitute holds a map of key/value pairs. The variables defined in your YAML manifests that match any of the keys defined in the map will be substituted with the set value. Includes support for bash string replacement functions e.g. ${var:=default}, ${var:position} and ${var/substring/replacement}.
|
||||
type: object
|
||||
substituteFrom:
|
||||
description: SubstituteFrom holds references to ConfigMaps and Secrets containing the variables and their values to be substituted in the YAML manifests. The ConfigMap and the Secret data keys represent the var names and they must match the vars declared in the manifests for the substitution to happen.
|
||||
items:
|
||||
description: SubstituteReference contains a reference to a resource containing the variables name and value.
|
||||
properties:
|
||||
kind:
|
||||
description: Kind of the values referent, valid values are ('Secret', 'ConfigMap').
|
||||
enum:
|
||||
- Secret
|
||||
- ConfigMap
|
||||
type: string
|
||||
name:
|
||||
description: Name of the values referent. Should reside in the same namespace as the referring resource.
|
||||
maxLength: 253
|
||||
minLength: 1
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
type: array
|
||||
type: object
|
||||
prune:
|
||||
description: Prune enables garbage collection.
|
||||
type: boolean
|
||||
retryInterval:
|
||||
description: The interval at which to retry a previously failed reconciliation. When not specified, the controller uses the KustomizationSpec.Interval value to retry failures.
|
||||
type: string
|
||||
serviceAccountName:
|
||||
description: The name of the Kubernetes service account to impersonate when reconciling this Kustomization.
|
||||
type: string
|
||||
sourceRef:
|
||||
description: Reference of the source where the kustomization file is.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: API version of the referent
|
||||
type: string
|
||||
kind:
|
||||
description: Kind of the referent
|
||||
enum:
|
||||
- GitRepository
|
||||
- Bucket
|
||||
type: string
|
||||
name:
|
||||
description: Name of the referent
|
||||
type: string
|
||||
namespace:
|
||||
description: Namespace of the referent, defaults to the Kustomization namespace
|
||||
type: string
|
||||
required:
|
||||
- kind
|
||||
- name
|
||||
type: object
|
||||
suspend:
|
||||
description: This flag tells the controller to suspend subsequent kustomize executions, it does not apply to already started executions. Defaults to false.
|
||||
type: boolean
|
||||
targetNamespace:
|
||||
description: TargetNamespace sets or overrides the namespace in the kustomization.yaml file.
|
||||
maxLength: 63
|
||||
minLength: 1
|
||||
type: string
|
||||
timeout:
|
||||
description: Timeout for validation, apply and health checking operations. Defaults to 'Interval' duration.
|
||||
type: string
|
||||
validation:
|
||||
description: Validate the Kubernetes objects before applying them on the cluster. The validation strategy can be 'client' (local dry-run), 'server' (APIServer dry-run) or 'none'. When 'Force' is 'true', validation will fallback to 'client' if set to 'server' because server-side validation is not supported in this scenario.
|
||||
enum:
|
||||
- none
|
||||
- client
|
||||
- server
|
||||
type: string
|
||||
required:
|
||||
- interval
|
||||
- prune
|
||||
- sourceRef
|
||||
type: object
|
||||
status:
|
||||
description: KustomizationStatus defines the observed state of a kustomization.
|
||||
properties:
|
||||
conditions:
|
||||
items:
|
||||
description: "Condition contains details for one aspect of the current state of this API Resource. --- This struct is intended for direct use as an array at the field path .status.conditions."
|
||||
properties:
|
||||
lastTransitionTime:
|
||||
description: lastTransitionTime is the last time the condition transitioned from one status to another. This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
|
||||
format: date-time
|
||||
type: string
|
||||
message:
|
||||
description: message is a human readable message indicating details about the transition. This may be an empty string.
|
||||
maxLength: 32768
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: observedGeneration represents the .metadata.generation that the condition was set based upon. For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date with respect to the current state of the instance.
|
||||
format: int64
|
||||
minimum: 0
|
||||
type: integer
|
||||
reason:
|
||||
description: reason contains a programmatic identifier indicating the reason for the condition's last transition. Producers of specific condition types may define expected values and meanings for this field, and whether the values are considered a guaranteed API. The value should be a CamelCase string. This field may not be empty.
|
||||
maxLength: 1024
|
||||
minLength: 1
|
||||
pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
|
||||
type: string
|
||||
status:
|
||||
description: status of the condition, one of True, False, Unknown.
|
||||
enum:
|
||||
- "True"
|
||||
- "False"
|
||||
- Unknown
|
||||
type: string
|
||||
type:
|
||||
description: type of condition in CamelCase or in foo.example.com/CamelCase. --- Many .condition.type values are consistent across resources like Available, but because arbitrary conditions can be useful (see .node.status.conditions), the ability to deconflict is important. The regex it matches is (dns1123SubdomainFmt/)?(qualifiedNameFmt)
|
||||
maxLength: 316
|
||||
pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
|
||||
type: string
|
||||
required:
|
||||
- lastTransitionTime
|
||||
- message
|
||||
- reason
|
||||
- status
|
||||
- type
|
||||
type: object
|
||||
type: array
|
||||
lastAppliedRevision:
|
||||
description: The last successfully applied revision. The revision format for Git sources is <branch|tag>/<commit-sha>.
|
||||
type: string
|
||||
lastAttemptedRevision:
|
||||
description: LastAttemptedRevision is the revision of the last reconciliation attempt.
|
||||
type: string
|
||||
lastHandledReconcileAt:
|
||||
description: LastHandledReconcileAt holds the value of the most recent reconcile request value, so a change can be detected.
|
||||
type: string
|
||||
observedGeneration:
|
||||
description: ObservedGeneration is the last reconciled generation.
|
||||
format: int64
|
||||
type: integer
|
||||
snapshot:
|
||||
description: The last successfully applied revision metadata.
|
||||
properties:
|
||||
checksum:
|
||||
description: The manifests sha1 checksum.
|
||||
type: string
|
||||
entries:
|
||||
description: A list of Kubernetes kinds grouped by namespace.
|
||||
items:
|
||||
description: Snapshot holds the metadata of namespaced Kubernetes objects
|
||||
properties:
|
||||
kinds:
|
||||
additionalProperties:
|
||||
type: string
|
||||
description: The list of Kubernetes kinds.
|
||||
type: object
|
||||
namespace:
|
||||
description: The namespace of this entry.
|
||||
type: string
|
||||
required:
|
||||
- kinds
|
||||
type: object
|
||||
type: array
|
||||
required:
|
||||
- checksum
|
||||
- entries
|
||||
type: object
|
||||
type: object
|
||||
type: object
|
||||
served: true
|
||||
storage: true
|
||||
subresources:
|
||||
status: {}
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: []
|
||||
storedVersions: []
|
||||
`)
|
||||
|
||||
var kyverno2043Fluxkustomization = []byte(`
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: dev-team
|
||||
namespace: test-validate
|
||||
spec:
|
||||
serviceAccountName: dev-team
|
||||
interval: 5m
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: dev-team
|
||||
prune: true
|
||||
validation: client
|
||||
`)
|
||||
|
||||
var kyverno2241Fluxkustomization = []byte(`
|
||||
apiVersion: kustomize.toolkit.fluxcd.io/v1beta1
|
||||
kind: Kustomization
|
||||
metadata:
|
||||
name: tenants
|
||||
namespace: test-validate
|
||||
spec:
|
||||
serviceAccountName: dev-team
|
||||
interval: 5m
|
||||
sourceRef:
|
||||
kind: GitRepository
|
||||
name: flux-system
|
||||
path: ./tenants/production
|
||||
prune: true
|
||||
validation: client
|
||||
`)
|
||||
|
||||
var kyverno2345Policy = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: drop-cap-net-raw
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
rules:
|
||||
- name: drop-cap-net-raw
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
validate:
|
||||
deny:
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.object.spec.containers[].securityContext.capabilities.drop[] | contains(@, 'NET_RAW') }}"
|
||||
operator: Equals
|
||||
value: false
|
||||
`)
|
||||
|
||||
var kyverno2345Resource = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test
|
||||
namespace: test-validate1
|
||||
spec:
|
||||
initContainers:
|
||||
- name: jimmy
|
||||
image: defdasdabian:923
|
||||
command: ["/bin/sh", "-c", "sleep infinity"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- XXXNET_RAWYYY
|
||||
- SETUID
|
||||
containers:
|
||||
- name: test
|
||||
image: defdasdabian:923
|
||||
command: ["/bin/sh", "-c", "sleep infinity"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- XXXNET_RAWYYY
|
||||
- SETUID
|
||||
- CAP_FOO_BAR
|
||||
- name: asdf
|
||||
image: defdasdabian:923
|
||||
command: ["/bin/sh", "-c", "sleep infinity"]
|
||||
securityContext:
|
||||
capabilities:
|
||||
drop:
|
||||
- CAP_SOMETHING
|
||||
`)
|
||||
|
||||
var kyvernoSmallImagePolicy = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: images
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: only-allow-small-images
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
preconditions:
|
||||
- key: "{{request.operation}}"
|
||||
operator: NotEquals
|
||||
value: DELETE
|
||||
validate:
|
||||
message: "images with size greater than 2Gi not allowed"
|
||||
foreach:
|
||||
- list: "request.object.spec.containers"
|
||||
context:
|
||||
- name: imageSize
|
||||
imageRegistry:
|
||||
reference: "{{ element.image }}"
|
||||
# Note that we need to use "to_string" here to allow kyverno to treat it like a resource quantity of type memory
|
||||
# the total size of an image as calculated by docker is the total sum of its layer sizes
|
||||
jmesPath: "to_string(sum(manifest.layers[*].size))"
|
||||
deny:
|
||||
conditions:
|
||||
- key: "2Gi"
|
||||
operator: LessThan
|
||||
value: "{{imageSize}}"
|
||||
`)
|
||||
|
||||
var kyvernoPodWithSmallImage = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: small-image
|
||||
spec:
|
||||
containers:
|
||||
- name: small-image
|
||||
image: busybox:latest
|
||||
`)
|
||||
|
||||
var kyvernoPodWithLargeImage = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: large-image
|
||||
spec:
|
||||
containers:
|
||||
- name: large-image
|
||||
image: nvidia/cuda:11.6.0-devel-ubi8
|
||||
`)
|
|
@ -1,234 +0,0 @@
|
|||
package validate
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
commonE2E "github.com/kyverno/kyverno/test/e2e/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
k8sErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
// Cluster Polict GVR
|
||||
policyGVR = e2e.GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
// Namespace GVR
|
||||
namespaceGVR = e2e.GetGVR("", "v1", "namespaces")
|
||||
|
||||
crdGVR = e2e.GetGVR("apiextensions.k8s.io", "v1", "customresourcedefinitions")
|
||||
|
||||
// ClusterPolicy Namespace
|
||||
policyNamespace = ""
|
||||
|
||||
crdName = "kustomizations.kustomize.toolkit.fluxcd.io"
|
||||
)
|
||||
|
||||
func Test_Validate_Flux_Sets(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
// Generate E2E Client
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
// Create Flux CRD
|
||||
err = createKustomizationCRD(e2eClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Created CRD is not a guarantee that we already can create new resources
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
for _, test := range FluxValidateTests {
|
||||
By(fmt.Sprintf("Validate Test: %s", test.TestDescription))
|
||||
|
||||
err = deleteClusterPolicy(e2eClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteResource(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteNamespace(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = createNamespace(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = createPolicy(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = createResource(e2eClient, test)
|
||||
|
||||
if test.MustSucceed {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
err = deleteClusterPolicy(e2eClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteResource(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteNamespace(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Test passed successfully:" + test.TestDescription)
|
||||
}
|
||||
|
||||
err = deleteKustomizationCRD(e2eClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
func TestValidate(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
for _, test := range ValidateTests {
|
||||
By(fmt.Sprintf("Validate Test: %s", test.TestDescription))
|
||||
|
||||
err = deleteClusterPolicy(e2eClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteResource(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteNamespace(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = createNamespace(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = createPolicy(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = createResource(e2eClient, test)
|
||||
|
||||
statusErr, ok := err.(*k8sErrors.StatusError)
|
||||
validationError := ok && statusErr.ErrStatus.Code == 400 // Validation error is always Bad Request
|
||||
|
||||
if test.MustSucceed || !validationError {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
err = deleteClusterPolicy(e2eClient)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteResource(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = deleteNamespace(e2eClient, test)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Done")
|
||||
}
|
||||
}
|
||||
|
||||
func createNamespace(e2eClient *e2e.E2EClient, test ValidationTest) error {
|
||||
By(fmt.Sprintf("Creating Namespace: %s...", test.ResourceNamespace))
|
||||
_, err := e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.ResourceNamespace))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait Till Creation of Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 240, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func createPolicy(e2eClient *e2e.E2EClient, test ValidationTest) error {
|
||||
By("Creating Policy...")
|
||||
_, err := e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
err = commonE2E.PolicyCreated(test.PolicyName)
|
||||
return err
|
||||
}
|
||||
|
||||
func createResource(e2eClient *e2e.E2EClient, test ValidationTest) error {
|
||||
By("Creating Resource...")
|
||||
_, err := e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, test.ResourceName, test.ResourceRaw)
|
||||
return err
|
||||
}
|
||||
|
||||
func createKustomizationCRD(e2eClient *e2e.E2EClient) error {
|
||||
By("Creating Flux CRD")
|
||||
_, err := e2eClient.CreateClusteredResourceYaml(crdGVR, kyverno2043Fluxcrd)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait till CRD is created
|
||||
By("Wait Till Creation of CRD...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 240, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to create CRD: %v", err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteClusterPolicy(e2eClient *e2e.E2EClient) error {
|
||||
By("Deleting Cluster Policies...")
|
||||
err := e2eClient.CleanClusterPolicies(policyGVR)
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteResource(e2eClient *e2e.E2EClient, test ValidationTest) error {
|
||||
By("Deleting Resource...")
|
||||
err := e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
|
||||
if k8sErrors.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteNamespace(e2eClient *e2e.E2EClient, test ValidationTest) error {
|
||||
By("Deleting Namespace...")
|
||||
By(fmt.Sprintf("Deleting Namespace: %s...", test.ResourceNamespace))
|
||||
_ = e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
|
||||
By("Wait Till Deletion of Namespace...")
|
||||
err := e2e.GetWithRetry(1*time.Second, 240, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func deleteKustomizationCRD(e2eClient *e2e.E2EClient) error {
|
||||
By("Deleting Flux CRD")
|
||||
_ = e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
// Wait till CRD is deleted
|
||||
By("Wait Till Deletion of CRD...")
|
||||
err := e2e.GetWithRetry(1*time.Second, 240, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete CRD: %v", err)
|
||||
})
|
||||
return err
|
||||
}
|
|
@ -1,108 +0,0 @@
|
|||
package verifyimages
|
||||
|
||||
import (
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
||||
var (
|
||||
taskGVR = e2e.GetGVR("tekton.dev", "v1beta1", "tasks")
|
||||
podGVR = e2e.GetGVR("", "v1", "pods")
|
||||
)
|
||||
|
||||
var VerifyImagesTests = []struct {
|
||||
// TestName - Name of the Test
|
||||
TestName string
|
||||
// PolicyName - Name of the Policy
|
||||
PolicyName string
|
||||
// PolicyRaw - The Yaml file of the ClusterPolicy
|
||||
PolicyRaw []byte
|
||||
// ResourceName - Name of the Resource
|
||||
ResourceName string
|
||||
// ResourceNamespace - Namespace of the Resource
|
||||
ResourceNamespace string
|
||||
// ResourceGVR - GVR of the Resource
|
||||
ResourceGVR schema.GroupVersionResource
|
||||
// ResourceRaw - The Yaml file of the ClusterPolicy
|
||||
ResourceRaw []byte
|
||||
// MustSucceed - indicates if validation must succeed
|
||||
MustSucceed bool
|
||||
}{
|
||||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated with simple extractor",
|
||||
PolicyName: "tasks-simple",
|
||||
PolicyRaw: kyvernoTaskPolicyWithSimpleExtractor,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: taskGVR,
|
||||
ResourceRaw: tektonTask,
|
||||
MustSucceed: false,
|
||||
},
|
||||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated with complex extractor",
|
||||
PolicyName: "tasks-complex",
|
||||
PolicyRaw: kyvernoTaskPolicyWithComplexExtractor,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: taskGVR,
|
||||
ResourceRaw: tektonTask,
|
||||
MustSucceed: false,
|
||||
},
|
||||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are not populated",
|
||||
PolicyName: "tasks-no-extractor",
|
||||
PolicyRaw: kyvernoTaskPolicyWithoutExtractor,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: taskGVR,
|
||||
ResourceRaw: tektonTask,
|
||||
MustSucceed: true,
|
||||
},
|
||||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated and verified",
|
||||
PolicyName: "tasks-keyless",
|
||||
PolicyRaw: kyvernoTaskPolicyKeyless,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: taskGVR,
|
||||
ResourceRaw: tektonTaskVerified,
|
||||
MustSucceed: true,
|
||||
},
|
||||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated and verified for all images",
|
||||
PolicyName: "tasks-keyless-required",
|
||||
PolicyRaw: kyvernoTaskPolicyKeylessRequired,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: taskGVR,
|
||||
ResourceRaw: tektonTaskVerified,
|
||||
MustSucceed: true,
|
||||
},
|
||||
{
|
||||
// Success case to check secret in attestors.entries.keys
|
||||
TestName: "secret-in-keys-success",
|
||||
PolicyName: "secret-in-keys",
|
||||
PolicyRaw: kyvernoPolicyWithSecretInKeys,
|
||||
ResourceName: "test-secret-pod",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: secretPodResourceSuccess,
|
||||
MustSucceed: true,
|
||||
},
|
||||
{
|
||||
// Failed case to check secret in attestors.entries.keys
|
||||
TestName: "secret-in-keys-failure",
|
||||
PolicyName: "secret-in-keys",
|
||||
PolicyRaw: kyvernoPolicyWithSecretInKeys,
|
||||
ResourceName: "test-secret-pod",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
ResourceGVR: podGVR,
|
||||
ResourceRaw: secretPodResourceFailed,
|
||||
MustSucceed: false,
|
||||
},
|
||||
}
|
|
@ -1,308 +0,0 @@
|
|||
package verifyimages
|
||||
|
||||
import "fmt"
|
||||
|
||||
func newNamespaceYaml(name string) []byte {
|
||||
ns := fmt.Sprintf(`
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: %s
|
||||
`, name)
|
||||
return []byte(ns)
|
||||
}
|
||||
|
||||
var tektonTaskCRD = []byte(`
|
||||
apiVersion: apiextensions.k8s.io/v1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: tasks.tekton.dev
|
||||
spec:
|
||||
group: tekton.dev
|
||||
preserveUnknownFields: false
|
||||
versions:
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
names:
|
||||
kind: Task
|
||||
plural: tasks
|
||||
categories:
|
||||
- tekton
|
||||
- tekton-pipelines
|
||||
scope: Namespaced
|
||||
`)
|
||||
|
||||
var tektonTask = []byte(`
|
||||
apiVersion: tekton.dev/v1beta1
|
||||
kind: Task
|
||||
metadata:
|
||||
name: example-task-name
|
||||
spec:
|
||||
steps:
|
||||
- name: ubuntu-example
|
||||
image: ubuntu:bionic
|
||||
`)
|
||||
|
||||
var tektonTaskVerified = []byte(`
|
||||
apiVersion: tekton.dev/v1beta1
|
||||
kind: Task
|
||||
metadata:
|
||||
name: example-task-name
|
||||
spec:
|
||||
steps:
|
||||
- name: cosign
|
||||
image: ghcr.io/sigstore/cosign/cosign
|
||||
`)
|
||||
|
||||
// not adding cosign.key and cosign.password as we only need cosign.pub
|
||||
var secretResource = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: testsecret
|
||||
namespace: test-verify-images
|
||||
data:
|
||||
cosign.pub: LS0tLS1CRUdJTiBQVUJMSUMgS0VZLS0tLS0KTUZrd0V3WUhLb1pJemowQ0FRWUlLb1pJemowREFRY0RRZ0FFOG5YUmg5NTBJWmJSajhSYS9OOXNicU9QWnJmTQo1L0tBUU4wL0tqSGNvcm0vSjV5Y3RWZDdpRWNuZXNzUlFqVTkxN2htS082SldWR0hwRGd1SXlha1pBPT0KLS0tLS1FTkQgUFVCTElDIEtFWS0tLS0t
|
||||
type: Opaque
|
||||
`)
|
||||
|
||||
var secretPodResourceSuccess = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-secret-pod
|
||||
namespace: test-verify-images
|
||||
spec:
|
||||
containers:
|
||||
- image: ghcr.io/kyverno/test-verify-image:signed
|
||||
name: test-secret
|
||||
`)
|
||||
|
||||
var secretPodResourceFailed = []byte(`
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: test-secret-pod
|
||||
namespace: test-verify-images
|
||||
spec:
|
||||
containers:
|
||||
- image: ghcr.io/kyverno/test-verify-image:unsigned
|
||||
name: test-secret
|
||||
`)
|
||||
|
||||
var kyvernoPolicyWithSecretInKeys = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: secret-in-keys
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
background: false
|
||||
webhookTimeoutSeconds: 30
|
||||
failurePolicy: Fail
|
||||
rules:
|
||||
- name: check-secret-in-keys
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Pod
|
||||
verifyImages:
|
||||
- imageReferences:
|
||||
- "ghcr.io/kyverno/test-verify-image:*"
|
||||
attestors:
|
||||
- entries:
|
||||
- keys:
|
||||
secret:
|
||||
name: testsecret
|
||||
namespace: test-verify-images
|
||||
`)
|
||||
|
||||
var kyvernoTaskPolicyWithSimpleExtractor = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks-simple
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: verify-images
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
value: DELETE
|
||||
imageExtractors:
|
||||
Task:
|
||||
- path: /spec/steps/*/image
|
||||
verifyImages:
|
||||
- image: "*"
|
||||
key: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM
|
||||
5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
||||
|
||||
var kyvernoTaskPolicyWithComplexExtractor = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks-complex
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: verify-images
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
value: DELETE
|
||||
imageExtractors:
|
||||
Task:
|
||||
- path: /spec/steps/*
|
||||
name: steps
|
||||
value: image
|
||||
key: name
|
||||
verifyImages:
|
||||
- image: "*"
|
||||
key: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM
|
||||
5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
||||
|
||||
var kyvernoTaskPolicyKeyless = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks-keyless
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
webhookTimeoutSeconds: 30
|
||||
rules:
|
||||
- name: verify-images
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
value: DELETE
|
||||
imageExtractors:
|
||||
Task:
|
||||
- path: /spec/steps/*/image
|
||||
verifyImages:
|
||||
- imageReferences:
|
||||
- "ghcr.io/*"
|
||||
attestors:
|
||||
- count: 1
|
||||
entries:
|
||||
- keyless:
|
||||
issuer: "https://token.actions.githubusercontent.com"
|
||||
subject: "https://github.com/*"
|
||||
rekor:
|
||||
url: https://rekor.sigstore.dev
|
||||
required: false
|
||||
`)
|
||||
|
||||
var kyvernoTaskPolicyKeylessRequired = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks-keyless-required
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
webhookTimeoutSeconds: 30
|
||||
rules:
|
||||
- name: verify-images
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
value: DELETE
|
||||
imageExtractors:
|
||||
Task:
|
||||
- path: /spec/steps/*/image
|
||||
verifyImages:
|
||||
- imageReferences:
|
||||
- "ghcr.io/*"
|
||||
attestors:
|
||||
- count: 1
|
||||
entries:
|
||||
- keyless:
|
||||
issuer: "https://token.actions.githubusercontent.com"
|
||||
subject: "https://github.com/*"
|
||||
rekor:
|
||||
url: https://rekor.sigstore.dev
|
||||
required: true
|
||||
`)
|
||||
|
||||
var kyvernoTaskPolicyWithoutExtractor = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks-no-extractor
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: verify-images
|
||||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
value: DELETE
|
||||
verifyImages:
|
||||
- image: "*"
|
||||
key: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAE8nXRh950IZbRj8Ra/N9sbqOPZrfM
|
||||
5/KAQN0/KjHcorm/J5yctVd7iEcnessRQjU917hmKO6JWVGHpDguIyakZA==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
||||
|
||||
var cpolVerifyImages = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: verify-images
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
- name: check-image-sig
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
verifyImages:
|
||||
- image: "harbor2.zoller.com/cosign/*"
|
||||
mutateDigest: false
|
||||
verifyDigest: false
|
||||
required: false
|
||||
key: |-
|
||||
-----BEGIN PUBLIC KEY-----
|
||||
MFkwEwYHKoZIzj0CAQYIKoZIzj0DAQcDQgAEpNlOGZ323zMlhs4bcKSpAKQvbcWi
|
||||
5ZLRmijm6SqXDy0Fp0z0Eal+BekFnLzs8rUXUaXlhZ3hNudlgFJH+nFNMw==
|
||||
-----END PUBLIC KEY-----
|
||||
`)
|
|
@ -1,186 +0,0 @@
|
|||
package verifyimages
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/id"
|
||||
"github.com/kyverno/kyverno/test/e2e/framework/step"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
)
|
||||
|
||||
var (
|
||||
// Cluster Polict GVR
|
||||
policyGVR = e2e.GetGVR("kyverno.io", "v1", "clusterpolicies")
|
||||
// Namespace GVR
|
||||
namespaceGVR = e2e.GetGVR("", "v1", "namespaces")
|
||||
// Secret GVR
|
||||
secretGVR = e2e.GetGVR("", "v1", "secrets")
|
||||
|
||||
crdGVR = e2e.GetGVR("apiextensions.k8s.io", "v1", "customresourcedefinitions")
|
||||
|
||||
// Namespace Name
|
||||
// Hardcoded in YAML Definition
|
||||
nspace = "test-image-verify"
|
||||
crdName = "tasks.tekton.dev"
|
||||
)
|
||||
|
||||
func TestImageVerify(t *testing.T) {
|
||||
RegisterTestingT(t)
|
||||
if os.Getenv("E2E") == "" {
|
||||
t.Skip("Skipping E2E Test")
|
||||
}
|
||||
|
||||
// Generate E2E Client
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
|
||||
By(fmt.Sprintf("Deleting CRD: %s...", crdName))
|
||||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
By("Wait Till Deletion of CRD...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to crd: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Tekton CRD
|
||||
By("Creating Tekton CRD")
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(crdGVR, tektonTaskCRD)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait till CRD is created
|
||||
e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Waiting for CRD to be created...")
|
||||
})
|
||||
|
||||
// Created CRD is not a garantee that we already can create new resources
|
||||
time.Sleep(15 * time.Second)
|
||||
|
||||
for _, tcase := range VerifyImagesTests {
|
||||
test := tcase
|
||||
By("Deleting Cluster Policies...")
|
||||
_ = e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
By("Deleting Resource...")
|
||||
_ = e2eClient.DeleteNamespacedResource(test.ResourceGVR, test.ResourceNamespace, test.ResourceName)
|
||||
|
||||
By("Deleting Namespace...")
|
||||
By(fmt.Sprintf("Deleting Namespace: %s...", test.ResourceNamespace))
|
||||
_ = e2eClient.DeleteClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
|
||||
By("Wait Till Deletion of Namespace...")
|
||||
// deleting test-secret-pod might take some time. hence increasing timeout period
|
||||
err = e2e.GetWithRetry(20*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("failed to delete namespace: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Namespace: %s...", test.ResourceNamespace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.ResourceNamespace))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By("Wait Till Creation of Namespace...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, test.ResourceNamespace)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create policy
|
||||
By(fmt.Sprintf("Creating policy \"%s\"", test.PolicyName))
|
||||
err = e2e.GetWithRetry(1*time.Second, 30, func() error {
|
||||
_, err := e2eClient.CreateClusteredResourceYaml(policyGVR, test.PolicyRaw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
if test.PolicyName == "secret-in-keys" {
|
||||
By("Creating testsecret...")
|
||||
_, err := e2eClient.CreateNamespacedResourceYaml(secretGVR, test.ResourceNamespace, "testsecret", secretResource)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
}
|
||||
|
||||
Expect(e2eClient.ClusterPolicyReady(test.PolicyName)).To(BeTrue())
|
||||
|
||||
By("Creating Resource...")
|
||||
_, err := e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, test.ResourceName, test.ResourceRaw)
|
||||
if test.MustSucceed {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
Expect(err).To(HaveOccurred())
|
||||
}
|
||||
|
||||
// Clean up policies
|
||||
By("Deleting Cluster Policies...")
|
||||
err = e2eClient.CleanClusterPolicies(policyGVR)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Clear Namespace
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, nspace)
|
||||
// Wait Till Deletion of Namespace
|
||||
// deleting test-secret-pod might take some time. hence increasing timeout period
|
||||
e2e.GetWithRetry(time.Duration(20*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(namespaceGVR, nspace)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Deleting Namespace")
|
||||
})
|
||||
|
||||
By(fmt.Sprintf("Test %s Completed \n\n\n", test.TestName))
|
||||
|
||||
}
|
||||
// CleanUp CRDs
|
||||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
}
|
||||
|
||||
func Test_BoolFields(t *testing.T) {
|
||||
framework.Setup(t)
|
||||
for _, field := range []string{"mutateDigest", "verifyDigest", "required"} {
|
||||
framework.RunSubTest(t, field,
|
||||
step.CreateClusterPolicy(cpolVerifyImages),
|
||||
step.By(fmt.Sprintf("Checking spec.rules[0].verifyImages[0].%s is false ...", field)),
|
||||
step.ExpectResource(id.ClusterPolicy("verify-images"), func(resource *unstructured.Unstructured) {
|
||||
rules, found, err := unstructured.NestedSlice(resource.UnstructuredContent(), "spec", "rules")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(found).To(BeTrue())
|
||||
verifyImages, found, err := unstructured.NestedSlice(rules[0].(map[string]interface{}), "verifyImages")
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(found).To(BeTrue())
|
||||
mutateDigest, found, err := unstructured.NestedBool(verifyImages[0].(map[string]interface{}), field)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
Expect(found).To(BeTrue())
|
||||
Expect(mutateDigest).To(BeFalse())
|
||||
}),
|
||||
)
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue