mirror of
https://github.com/kyverno/kyverno.git
synced 2025-04-08 18:15:48 +00:00
Enable tests in makefile (#3699)
This commit is contained in:
parent
96b33f6200
commit
dd0f6baa7d
5 changed files with 99 additions and 85 deletions
6
Makefile
6
Makefile
|
@ -267,7 +267,7 @@ test-clean: ## Clean tests cache
|
|||
go clean -testcache ./...
|
||||
|
||||
.PHONY: test-cli
|
||||
test-cli: test-cli-policies test-cli-local test-cli-local-mutate test-cli-test-case-selector-flag
|
||||
test-cli: test-cli-policies test-cli-local test-cli-local-mutate test-cli-test-case-selector-flag test-cli-registry
|
||||
|
||||
.PHONY: test-cli-policies
|
||||
test-cli-policies: cli
|
||||
|
@ -287,7 +287,7 @@ test-cli-test-case-selector-flag: cli
|
|||
|
||||
.PHONY: test-cli-registry
|
||||
test-cli-registry: cli
|
||||
cmd/cli/kubectl-kyverno/kyverno test ./test/cli/registry
|
||||
cmd/cli/kubectl-kyverno/kyverno test ./test/cli/registry --registry
|
||||
|
||||
test-unit: $(GO_ACC) ## Run unit tests
|
||||
@echo " running unit tests"
|
||||
|
@ -302,6 +302,7 @@ code-cov-report: ## Generate code coverage report
|
|||
# Test E2E
|
||||
test-e2e:
|
||||
$(eval export E2E="ok")
|
||||
go test ./test/e2e/verifyimages -v
|
||||
go test ./test/e2e/metrics -v
|
||||
go test ./test/e2e/mutate -v
|
||||
go test ./test/e2e/generate -v
|
||||
|
@ -311,6 +312,7 @@ test-e2e-local:
|
|||
$(eval export E2E="ok")
|
||||
kubectl apply -f https://raw.githubusercontent.com/kyverno/kyverno/main/config/github/rbac.yaml
|
||||
kubectl port-forward -n kyverno service/kyverno-svc-metrics 8000:8000 &
|
||||
go test ./test/e2e/verifyimages -v
|
||||
go test ./test/e2e/metrics -v
|
||||
go test ./test/e2e/mutate -v
|
||||
go test ./test/e2e/generate -v
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
|
@ -18,7 +19,8 @@ import (
|
|||
|
||||
// E2EClient ...
|
||||
type E2EClient struct {
|
||||
Client dynamic.Interface
|
||||
Client dynamic.Interface
|
||||
KClient versioned.Interface
|
||||
}
|
||||
|
||||
type APIRequest struct {
|
||||
|
@ -38,9 +40,17 @@ func NewE2EClient() (*E2EClient, error) {
|
|||
return nil, err
|
||||
}
|
||||
e2eClient := new(E2EClient)
|
||||
dClient, err := dynamic.NewForConfig(config)
|
||||
e2eClient.Client = dClient
|
||||
return e2eClient, err
|
||||
if dClient, err := dynamic.NewForConfig(config); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
e2eClient.Client = dClient
|
||||
}
|
||||
if kclient, err := versioned.NewForConfig(config); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
e2eClient.KClient = kclient
|
||||
}
|
||||
return e2eClient, nil
|
||||
}
|
||||
|
||||
// GetGVR :- gets GroupVersionResource for dynamic client
|
||||
|
@ -48,6 +58,34 @@ func GetGVR(group, version, resource string) schema.GroupVersionResource {
|
|||
return schema.GroupVersionResource{Group: group, Version: version, Resource: resource}
|
||||
}
|
||||
|
||||
func (e2e *E2EClient) ClusterPolicyReady(policyName string) bool {
|
||||
return GetWithRetry(1*time.Second, 15, func() error {
|
||||
if cpol, err := e2e.KClient.KyvernoV1().ClusterPolicies().Get(context.TODO(), policyName, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !cpol.IsReady() {
|
||||
return fmt.Errorf("cluster policy %s is not ready", policyName)
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
}) == nil
|
||||
}
|
||||
|
||||
func (e2e *E2EClient) PolicyReady(namespace string, policyName string) bool {
|
||||
return GetWithRetry(1*time.Second, 15, func() error {
|
||||
if pol, err := e2e.KClient.KyvernoV1().Policies(namespace).Get(context.TODO(), policyName, metav1.GetOptions{}); err != nil {
|
||||
return err
|
||||
} else {
|
||||
if !pol.IsReady() {
|
||||
return fmt.Errorf("cluster policy %s is not ready", policyName)
|
||||
}
|
||||
time.Sleep(2 * time.Second)
|
||||
return nil
|
||||
}
|
||||
}) != nil
|
||||
}
|
||||
|
||||
// CleanClusterPolicies ;- Deletes all the cluster policies
|
||||
func (e2e *E2EClient) CleanClusterPolicies(gvr schema.GroupVersionResource) error {
|
||||
namespace := ""
|
||||
|
|
|
@ -28,7 +28,7 @@ var VerifyImagesTests = []struct {
|
|||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated with simple extractor",
|
||||
PolicyName: "tasks",
|
||||
PolicyName: "tasks-simple",
|
||||
PolicyRaw: kyvernoTaskPolicyWithSimpleExtractor,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
|
@ -39,7 +39,7 @@ var VerifyImagesTests = []struct {
|
|||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated with complex extractor",
|
||||
PolicyName: "tasks",
|
||||
PolicyName: "tasks-complex",
|
||||
PolicyRaw: kyvernoTaskPolicyWithComplexExtractor,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
|
@ -50,7 +50,7 @@ var VerifyImagesTests = []struct {
|
|||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are not populated",
|
||||
PolicyName: "tasks",
|
||||
PolicyName: "tasks-no-extractor",
|
||||
PolicyRaw: kyvernoTaskPolicyWithoutExtractor,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
|
@ -61,7 +61,7 @@ var VerifyImagesTests = []struct {
|
|||
{
|
||||
// Case for custom image extraction
|
||||
TestName: "checks that custom images are populated and verified",
|
||||
PolicyName: "tasks",
|
||||
PolicyName: "tasks-keyless",
|
||||
PolicyRaw: kyvernoTaskPolicyKeyless,
|
||||
ResourceName: "example-task-name",
|
||||
ResourceNamespace: "test-verify-images",
|
||||
|
|
|
@ -21,15 +21,6 @@ spec:
|
|||
group: tekton.dev
|
||||
preserveUnknownFields: false
|
||||
versions:
|
||||
- name: v1alpha1
|
||||
served: true
|
||||
storage: false
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
- name: v1beta1
|
||||
served: true
|
||||
storage: true
|
||||
|
@ -39,15 +30,6 @@ spec:
|
|||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
- name: v1
|
||||
served: false
|
||||
storage: false
|
||||
schema:
|
||||
openAPIV3Schema:
|
||||
type: object
|
||||
x-kubernetes-preserve-unknown-fields: true
|
||||
subresources:
|
||||
status: {}
|
||||
names:
|
||||
kind: Task
|
||||
plural: tasks
|
||||
|
@ -55,14 +37,6 @@ spec:
|
|||
- tekton
|
||||
- tekton-pipelines
|
||||
scope: Namespaced
|
||||
conversion:
|
||||
strategy: Webhook
|
||||
webhook:
|
||||
conversionReviewVersions: ["v1beta1"]
|
||||
clientConfig:
|
||||
service:
|
||||
name: tekton-pipelines-webhook
|
||||
namespace: tekton-pipelines
|
||||
`)
|
||||
|
||||
var tektonTask = []byte(`
|
||||
|
@ -91,7 +65,7 @@ var kyvernoTaskPolicyWithSimpleExtractor = []byte(`
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks
|
||||
name: tasks-simple
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
|
@ -99,7 +73,7 @@ spec:
|
|||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Task
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
|
@ -120,7 +94,7 @@ var kyvernoTaskPolicyWithComplexExtractor = []byte(`
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks
|
||||
name: tasks-complex
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
|
@ -128,7 +102,7 @@ spec:
|
|||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Task
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
|
@ -152,7 +126,7 @@ var kyvernoTaskPolicyKeyless = []byte(`
|
|||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks
|
||||
name: tasks-keyless
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
webhookTimeoutSeconds: 30
|
||||
|
@ -161,7 +135,7 @@ spec:
|
|||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Task
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
|
@ -173,13 +147,14 @@ spec:
|
|||
- image: "ghcr.io/*"
|
||||
subject: "https://github.com/*"
|
||||
issuer: "https://token.actions.githubusercontent.com"
|
||||
required: false
|
||||
`)
|
||||
|
||||
var kyvernoTaskPolicyWithoutExtractor = []byte(`
|
||||
apiVersion: kyverno.io/v1
|
||||
kind: ClusterPolicy
|
||||
metadata:
|
||||
name: tasks
|
||||
name: tasks-no-extractor
|
||||
spec:
|
||||
validationFailureAction: enforce
|
||||
rules:
|
||||
|
@ -187,7 +162,7 @@ spec:
|
|||
match:
|
||||
resources:
|
||||
kinds:
|
||||
- Task
|
||||
- tekton.dev/v1beta1/Task
|
||||
preconditions:
|
||||
- key: '{{request.operation}}'
|
||||
operator: NotEquals
|
||||
|
|
|
@ -8,7 +8,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/kyverno/kyverno/test/e2e"
|
||||
commonE2E "github.com/kyverno/kyverno/test/e2e/common"
|
||||
. "github.com/onsi/ginkgo"
|
||||
. "github.com/onsi/gomega"
|
||||
)
|
||||
|
@ -38,7 +37,40 @@ func TestImageVerify(t *testing.T) {
|
|||
// Generate E2E Client
|
||||
e2eClient, err := e2e.NewE2EClient()
|
||||
Expect(err).To(BeNil())
|
||||
for _, test := range VerifyImagesTests {
|
||||
|
||||
By(fmt.Sprintf("Deleting CRD: %s...", crdName))
|
||||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
By("Wait Till Deletion of CRD...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to crd: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Tekton CRD
|
||||
By("Creating Tekton CRD")
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(crdGVR, tektonTaskCRD)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait till CRD is created
|
||||
e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Waiting for CRD to be created...")
|
||||
})
|
||||
|
||||
// Created CRD is not a garantee that we already can create new resources
|
||||
time.Sleep(10 * time.Second)
|
||||
|
||||
for _, tcase := range VerifyImagesTests {
|
||||
test := tcase
|
||||
By("Deleting Cluster Policies...")
|
||||
_ = e2eClient.CleanClusterPolicies(policyGVR)
|
||||
|
||||
|
@ -59,20 +91,6 @@ func TestImageVerify(t *testing.T) {
|
|||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Deleting CRD: %s...", policyNamespace))
|
||||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
By("Wait Till Deletion of CRD...")
|
||||
err = e2e.GetWithRetry(1*time.Second, 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("failed to crd: %v", err)
|
||||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
By(fmt.Sprintf("Creating Namespace: %s...", test.ResourceNamespace))
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(namespaceGVR, newNamespaceYaml(test.ResourceNamespace))
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
@ -88,33 +106,15 @@ func TestImageVerify(t *testing.T) {
|
|||
})
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Create Tekton CRD
|
||||
By("Creating Tekton CRD")
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(crdGVR, tektonTaskCRD)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
// Wait till CRD is created
|
||||
e2e.GetWithRetry(time.Duration(1*time.Second), 15, func() error {
|
||||
_, err := e2eClient.GetClusteredResource(crdGVR, crdName)
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
return errors.New("Waiting for CRD to be created...")
|
||||
})
|
||||
|
||||
// Created CRD is not a garantee that we already can create new resources
|
||||
time.Sleep(3 * time.Second)
|
||||
|
||||
// Create policy
|
||||
By(fmt.Sprintf("Creating policy in \"%s\"", policyNamespace))
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(policyGVR, policyNamespace, test.PolicyName, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
err = commonE2E.PolicyCreated(test.PolicyName)
|
||||
_, err = e2eClient.CreateClusteredResourceYaml(policyGVR, test.PolicyRaw)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
Expect(e2eClient.ClusterPolicyReady(test.PolicyName)).To(BeTrue())
|
||||
|
||||
By("Creating Resource...")
|
||||
_, err = e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, test.PolicyName, test.ResourceRaw)
|
||||
|
||||
_, err := e2eClient.CreateNamespacedResourceYaml(test.ResourceGVR, test.ResourceNamespace, test.ResourceName, test.ResourceRaw)
|
||||
if test.MustSucceed {
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
} else {
|
||||
|
@ -126,9 +126,6 @@ func TestImageVerify(t *testing.T) {
|
|||
err = e2eClient.CleanClusterPolicies(policyGVR)
|
||||
Expect(err).NotTo(HaveOccurred())
|
||||
|
||||
//CleanUp CRDs
|
||||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
// Clear Namespace
|
||||
e2eClient.DeleteClusteredResource(namespaceGVR, nspace)
|
||||
// Wait Till Deletion of Namespace
|
||||
|
@ -143,5 +140,7 @@ func TestImageVerify(t *testing.T) {
|
|||
By(fmt.Sprintf("Test %s Completed \n\n\n", test.TestName))
|
||||
|
||||
}
|
||||
//CleanUp CRDs
|
||||
e2eClient.DeleteClusteredResource(crdGVR, crdName)
|
||||
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue