mirror of
https://github.com/kyverno/kyverno.git
synced 2025-03-28 02:18:15 +00:00
feat: add CleanupPolicy validation code to CleanupPolicyHandler (#5338)
* add validation code for cleanupPolicyHandler Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com> * update Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com> * fix Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> * register kuttl tests for cleanup policy Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com> Signed-off-by: Nikhil Sharma <nikhilsharma230303@gmail.com> Signed-off-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Co-authored-by: Charles-Edouard Brétéché <charles.edouard@nirmata.com> Co-authored-by: Charles-Edouard Brétéché <charled.breteche@gmail.com>
This commit is contained in:
parent
c37e9d4625
commit
0fb45ed53a
18 changed files with 307 additions and 83 deletions
|
@ -214,7 +214,7 @@ The command removes all the Kubernetes components associated with the chart and
|
|||
| grafana.enabled | bool | `false` | Enable grafana dashboard creation. |
|
||||
| grafana.namespace | string | `nil` | Namespace to create the grafana dashboard configmap. If not set, it will be created in the same namespace where the chart is deployed. |
|
||||
| grafana.annotations | object | `{}` | Grafana dashboard configmap annotations. |
|
||||
| cleanupController.enabled | bool | `false` | Enable cleanup controller. |
|
||||
| cleanupController.enabled | bool | `true` | Enable cleanup controller. |
|
||||
| cleanupController.image.registry | string | `nil` | Image registry |
|
||||
| cleanupController.image.repository | string | `"ghcr.io/kyverno/cleanup-controller"` | Image repository |
|
||||
| cleanupController.image.tag | string | `nil` | Image tag Defaults to appVersion in Chart.yaml if omitted |
|
||||
|
|
|
@ -40,7 +40,7 @@ webhooks:
|
|||
service:
|
||||
name: {{ template "kyverno.cleanup-controller.deploymentName" . }}
|
||||
namespace: {{ template "kyverno.namespace" . }}
|
||||
path: /todo
|
||||
path: /validate
|
||||
port: 443
|
||||
failurePolicy: Fail
|
||||
matchPolicy: Equivalent
|
||||
|
|
|
@ -478,7 +478,7 @@ grafana:
|
|||
|
||||
cleanupController:
|
||||
# -- Enable cleanup controller.
|
||||
enabled: false
|
||||
enabled: true
|
||||
|
||||
image:
|
||||
# -- Image registry
|
||||
|
|
39
cmd/cleanup-controller/handlers.go
Normal file
39
cmd/cleanup-controller/handlers.go
Normal file
|
@ -0,0 +1,39 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kyverno/kyverno/cmd/cleanup-controller/validate"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
type cleanupPolicyHandlers struct {
|
||||
client dclient.Interface
|
||||
}
|
||||
|
||||
func NewHandlers(client dclient.Interface) CleanupPolicyHandlers {
|
||||
return &cleanupPolicyHandlers{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (h *cleanupPolicyHandlers) Validate(logger logr.Logger, request *admissionv1.AdmissionRequest, _ time.Time) *admissionv1.AdmissionResponse {
|
||||
if request.SubResource != "" {
|
||||
logger.V(4).Info("skip policy validation on status update")
|
||||
return admissionutils.ResponseSuccess()
|
||||
}
|
||||
policy, _, err := admissionutils.GetCleanupPolicies(request)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to unmarshal policies from admission request")
|
||||
return admissionutils.Response(err)
|
||||
}
|
||||
err = validate.ValidateCleanupPolicy(policy, h.client, false)
|
||||
if err != nil {
|
||||
logger.Error(err, "policy validation errors")
|
||||
return admissionutils.Response(err)
|
||||
}
|
||||
return admissionutils.Response(err)
|
||||
}
|
|
@ -2,4 +2,4 @@ package logger
|
|||
|
||||
import "github.com/kyverno/kyverno/pkg/logging"
|
||||
|
||||
var Logger = logging.WithName("cleanupwebhooks")
|
||||
var Logger = logging.WithName("cleanuppolicywebhooks")
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
|
@ -11,8 +12,12 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/kyverno/kyverno/pkg/client/clientset/versioned"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
kyvernoclient "github.com/kyverno/kyverno/pkg/clients/wrappers"
|
||||
"github.com/kyverno/kyverno/pkg/config"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
"github.com/kyverno/kyverno/pkg/metrics"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
kubeinformers "k8s.io/client-go/informers"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
@ -24,10 +29,16 @@ var (
|
|||
clientRateLimitQPS float64
|
||||
clientRateLimitBurst int
|
||||
logFormat string
|
||||
otel string
|
||||
otelCollector string
|
||||
metricsPort string
|
||||
transportCreds string
|
||||
disableMetricsExport bool
|
||||
)
|
||||
|
||||
const (
|
||||
resyncPeriod = 15 * time.Minute
|
||||
resyncPeriod = 15 * time.Minute
|
||||
metadataResyncPeriod = 15 * time.Minute
|
||||
)
|
||||
|
||||
func parseFlags() error {
|
||||
|
@ -36,6 +47,11 @@ func parseFlags() error {
|
|||
flag.StringVar(&kubeconfig, "kubeconfig", "", "Path to a kubeconfig. Only required if out-of-cluster.")
|
||||
flag.Float64Var(&clientRateLimitQPS, "clientRateLimitQPS", 20, "Configure the maximum QPS to the Kubernetes API server from Kyverno. Uses the client default if zero.")
|
||||
flag.IntVar(&clientRateLimitBurst, "clientRateLimitBurst", 50, "Configure the maximum burst for throttle. Uses the client default if zero.")
|
||||
flag.StringVar(&otel, "otelConfig", "prometheus", "Set this flag to 'grpc', to enable exporting metrics to an Opentelemetry Collector. The default collector is set to \"prometheus\"")
|
||||
flag.StringVar(&otelCollector, "otelCollector", "opentelemetrycollector.kyverno.svc.cluster.local", "Set this flag to the OpenTelemetry Collector Service Address. Kyverno will try to connect to this on the metrics port.")
|
||||
flag.StringVar(&transportCreds, "transportCreds", "", "Set this flag to the CA secret containing the certificate which is used by our Opentelemetry Metrics Client. If empty string is set, means an insecure connection will be used")
|
||||
flag.StringVar(&metricsPort, "metricsPort", "8000", "Expose prometheus metrics at the given port, default to 8000.")
|
||||
flag.BoolVar(&disableMetricsExport, "disableMetrics", false, "Set this flag to 'true' to disable metrics.")
|
||||
if err := flag.Set("v", "2"); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -57,6 +73,59 @@ func createKubeClients(logger logr.Logger) (*rest.Config, *kubernetes.Clientset,
|
|||
return clientConfig, kubeClient, nil
|
||||
}
|
||||
|
||||
func createInstrumentedClients(ctx context.Context, logger logr.Logger, clientConfig *rest.Config, kubeClient *kubernetes.Clientset, metricsConfig *metrics.MetricsConfig) (versioned.Interface, dclient.Interface, error) {
|
||||
logger = logger.WithName("instrumented-clients")
|
||||
logger.Info("create instrumented clients...", "kubeconfig", kubeconfig, "qps", clientRateLimitQPS, "burst", clientRateLimitBurst)
|
||||
kyvernoClient, err := kyvernoclient.NewForConfig(clientConfig, metricsConfig)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
dynamicClient, err := dclient.NewClient(ctx, clientConfig, kubeClient, metricsConfig, metadataResyncPeriod)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return kyvernoClient, dynamicClient, nil
|
||||
}
|
||||
|
||||
func setupMetrics(logger logr.Logger, kubeClient kubernetes.Interface) (*metrics.MetricsConfig, context.CancelFunc, error) {
|
||||
logger = logger.WithName("metrics")
|
||||
logger.Info("setup metrics...", "otel", otel, "port", metricsPort, "collector", otelCollector, "creds", transportCreds)
|
||||
metricsConfigData, err := config.NewMetricsConfigData(kubeClient)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
metricsAddr := ":" + metricsPort
|
||||
metricsConfig, metricsServerMux, metricsPusher, err := metrics.InitMetrics(
|
||||
disableMetricsExport,
|
||||
otel,
|
||||
metricsAddr,
|
||||
otelCollector,
|
||||
metricsConfigData,
|
||||
transportCreds,
|
||||
kubeClient,
|
||||
logging.WithName("metrics"),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var cancel context.CancelFunc
|
||||
if otel == "grpc" {
|
||||
cancel = func() {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
metrics.ShutDownController(ctx, metricsPusher)
|
||||
}
|
||||
}
|
||||
if otel == "prometheus" {
|
||||
go func() {
|
||||
if err := http.ListenAndServe(metricsAddr, metricsServerMux); err != nil {
|
||||
logger.Error(err, "failed to enable metrics", "address", metricsAddr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
return metricsConfig, cancel, nil
|
||||
}
|
||||
|
||||
func setupSignals() (context.Context, context.CancelFunc) {
|
||||
return signal.NotifyContext(context.Background(), os.Interrupt, syscall.SIGTERM)
|
||||
}
|
||||
|
@ -79,17 +148,38 @@ func main() {
|
|||
}
|
||||
logger := logging.WithName("setup")
|
||||
// create client config and kube clients
|
||||
_, kubeClient, err := createKubeClients(logger)
|
||||
clientConfig, kubeClient, err := createKubeClients(logger)
|
||||
if err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
kubeKyvernoInformer := kubeinformers.NewSharedInformerFactoryWithOptions(kubeClient, resyncPeriod, kubeinformers.WithNamespace(config.KyvernoNamespace()))
|
||||
|
||||
// setup signals
|
||||
signalCtx, signalCancel := setupSignals()
|
||||
defer signalCancel()
|
||||
metricsConfig, metricsShutdown, err := setupMetrics(logger, kubeClient)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to setup metrics")
|
||||
os.Exit(1)
|
||||
}
|
||||
if metricsShutdown != nil {
|
||||
defer metricsShutdown()
|
||||
}
|
||||
_, dynamicClient, err := createInstrumentedClients(signalCtx, logger, clientConfig, kubeClient, metricsConfig)
|
||||
if err != nil {
|
||||
logger.Error(err, "failed to create instrument clients")
|
||||
os.Exit(1)
|
||||
}
|
||||
policyHandlers := NewHandlers(
|
||||
dynamicClient,
|
||||
)
|
||||
secretLister := kubeKyvernoInformer.Core().V1().Secrets().Lister()
|
||||
// start informers and wait for cache sync
|
||||
// we need to call start again because we potentially registered new informers
|
||||
if !startInformersAndWaitForCacheSync(signalCtx, kubeKyvernoInformer) {
|
||||
os.Exit(1)
|
||||
}
|
||||
server := NewServer(
|
||||
policyHandlers,
|
||||
func() ([]byte, []byte, error) {
|
||||
secret, err := secretLister.Secrets(config.KyvernoNamespace()).Get("cleanup-controller-tls")
|
||||
if err != nil {
|
||||
|
@ -98,11 +188,6 @@ func main() {
|
|||
return secret.Data[corev1.TLSCertKey], secret.Data[corev1.TLSPrivateKeyKey], nil
|
||||
},
|
||||
)
|
||||
// start informers and wait for cache sync
|
||||
// we need to call start again because we potentially registered new informers
|
||||
if !startInformersAndWaitForCacheSync(signalCtx, kubeKyvernoInformer) {
|
||||
os.Exit(1)
|
||||
}
|
||||
// start webhooks server
|
||||
server.Run(signalCtx.Done())
|
||||
// wait for termination signal
|
||||
|
|
|
@ -8,12 +8,15 @@ import (
|
|||
|
||||
"github.com/go-logr/logr"
|
||||
"github.com/julienschmidt/httprouter"
|
||||
"github.com/kyverno/kyverno/cmd/cleanup-controller/logger"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
admissionutils "github.com/kyverno/kyverno/pkg/utils/admission"
|
||||
"github.com/kyverno/kyverno/pkg/webhooks/handlers"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
// ValidatingWebhookServicePath is the path for validation webhook
|
||||
const ValidatingWebhookServicePath = "/validate"
|
||||
|
||||
type Server interface {
|
||||
// Run TLS server in separate thread and returns control immediately
|
||||
Run(<-chan struct{})
|
||||
|
@ -21,26 +24,28 @@ type Server interface {
|
|||
Stop(context.Context)
|
||||
}
|
||||
|
||||
type CleanupPolicyHandlers interface {
|
||||
// Validate performs the validation check on policy resources
|
||||
Validate(logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse
|
||||
}
|
||||
|
||||
type server struct {
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
type TlsProvider func() ([]byte, []byte, error)
|
||||
|
||||
func TODO(logr.Logger, *admissionv1.AdmissionRequest, time.Time) *admissionv1.AdmissionResponse {
|
||||
return admissionutils.ResponseSuccess()
|
||||
}
|
||||
|
||||
// NewServer creates new instance of server accordingly to given configuration
|
||||
func NewServer(
|
||||
policyHandlers CleanupPolicyHandlers,
|
||||
tlsProvider TlsProvider,
|
||||
) Server {
|
||||
mux := httprouter.New()
|
||||
mux.HandlerFunc(
|
||||
"POST",
|
||||
"/todo",
|
||||
handlers.AdmissionHandler(TODO).
|
||||
WithAdmission(logging.WithName("todo")),
|
||||
ValidatingWebhookServicePath,
|
||||
handlers.AdmissionHandler(policyHandlers.Validate).
|
||||
WithAdmission(logger.Logger.WithName("validate")),
|
||||
)
|
||||
return &server{
|
||||
server: &http.Server{
|
||||
|
|
|
@ -8,8 +8,6 @@ import (
|
|||
"github.com/kyverno/kyverno/cmd/cleanup-controller/logger"
|
||||
"github.com/kyverno/kyverno/pkg/clients/dclient"
|
||||
"github.com/kyverno/kyverno/pkg/engine/variables"
|
||||
"github.com/kyverno/kyverno/pkg/logging"
|
||||
"github.com/kyverno/kyverno/pkg/openapi"
|
||||
"github.com/kyverno/kyverno/pkg/policy/generate"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
|
@ -59,8 +57,8 @@ func (c *Cleanup) CanIDelete(kind, namespace string) error {
|
|||
}
|
||||
|
||||
// Validate checks the policy and rules declarations for required configurations
|
||||
func ValidateCleanupPolicy(cleanuppolicy kyvernov1alpha1.CleanupPolicyInterface, client dclient.Interface, mock bool, openApiManager openapi.Manager) error {
|
||||
namespace := cleanuppolicy.GetNamespace()
|
||||
func ValidateCleanupPolicy(cleanuppolicy kyvernov1alpha1.CleanupPolicyInterface, client dclient.Interface, mock bool) error {
|
||||
// namespace := cleanuppolicy.GetNamespace()
|
||||
var res []*metav1.APIResourceList
|
||||
clusterResources := sets.NewString()
|
||||
|
||||
|
@ -88,11 +86,11 @@ func ValidateCleanupPolicy(cleanuppolicy kyvernov1alpha1.CleanupPolicyInterface,
|
|||
return errs.ToAggregate()
|
||||
}
|
||||
|
||||
for kind := range clusterResources {
|
||||
checker := NewCleanup(client, *cleanuppolicy.GetSpec(), logging.GlobalLogger())
|
||||
if err := checker.CanIDelete(kind, namespace); err != nil {
|
||||
return fmt.Errorf("cannot delete kind %s in namespace %s", kind, namespace)
|
||||
}
|
||||
}
|
||||
// for kind := range clusterResources {
|
||||
// checker := NewCleanup(client, *cleanuppolicy.GetSpec(), logging.GlobalLogger())
|
||||
// if err := checker.CanIDelete(kind, namespace); err != nil {
|
||||
// return fmt.Errorf("cannot delete kind %s in namespace %s", kind, namespace)
|
||||
// }
|
||||
// }
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
package utils
|
||||
package admission
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
@ -9,18 +9,20 @@ import (
|
|||
)
|
||||
|
||||
func UnmarshalCleanupPolicy(kind string, raw []byte) (kyvernov1alpha1.CleanupPolicyInterface, error) {
|
||||
var policy kyvernov1alpha1.CleanupPolicyInterface
|
||||
if kind == "CleanupPolicy" {
|
||||
var policy *kyvernov1alpha1.CleanupPolicy
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
return policy, err
|
||||
return nil, err
|
||||
}
|
||||
return policy, nil
|
||||
} else if kind == "ClusterCleanupPolicy" {
|
||||
var policy *kyvernov1alpha1.ClusterCleanupPolicy
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return policy, nil
|
||||
}
|
||||
return policy, fmt.Errorf("admission request does not contain a cleanuppolicy")
|
||||
}
|
||||
|
||||
func GetCleanupPolicy(request *admissionv1.AdmissionRequest) (kyvernov1alpha1.CleanupPolicyInterface, error) {
|
||||
return UnmarshalCleanupPolicy(request.Kind.Kind, request.Object.Raw)
|
||||
return nil, fmt.Errorf("admission request does not contain a cleanuppolicy")
|
||||
}
|
||||
|
||||
func GetCleanupPolicies(request *admissionv1.AdmissionRequest) (kyvernov1alpha1.CleanupPolicyInterface, kyvernov1alpha1.CleanupPolicyInterface, error) {
|
||||
|
@ -35,11 +37,3 @@ func GetCleanupPolicies(request *admissionv1.AdmissionRequest) (kyvernov1alpha1.
|
|||
}
|
||||
return policy, emptypolicy, nil
|
||||
}
|
||||
|
||||
func GetResourceName(request *admissionv1.AdmissionRequest) string {
|
||||
resourceName := request.Kind.Kind + "/" + request.Name
|
||||
if request.Namespace != "" {
|
||||
resourceName = request.Namespace + "/" + resourceName
|
||||
}
|
||||
return resourceName
|
||||
}
|
42
pkg/utils/admission/policy.go
Normal file
42
pkg/utils/admission/policy.go
Normal file
|
@ -0,0 +1,42 @@
|
|||
package admission
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
func UnmarshalPolicy(kind string, raw []byte) (kyvernov1.PolicyInterface, error) {
|
||||
if kind == "ClusterPolicy" {
|
||||
var policy *kyvernov1.ClusterPolicy
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return policy, nil
|
||||
} else if kind == "Policy" {
|
||||
var policy *kyvernov1.Policy
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return policy, nil
|
||||
}
|
||||
return nil, fmt.Errorf("admission request does not contain a policy")
|
||||
}
|
||||
|
||||
func GetPolicy(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, error) {
|
||||
return UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
|
||||
}
|
||||
|
||||
func GetPolicies(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, kyvernov1.PolicyInterface, error) {
|
||||
policy, err := UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
|
||||
if err != nil {
|
||||
return policy, nil, err
|
||||
}
|
||||
if request.Operation == admissionv1.Update {
|
||||
oldPolicy, err := UnmarshalPolicy(request.Kind.Kind, request.OldObject.Raw)
|
||||
return policy, oldPolicy, err
|
||||
}
|
||||
return policy, nil, nil
|
||||
}
|
|
@ -1,46 +1,9 @@
|
|||
package admission
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
kyvernov1 "github.com/kyverno/kyverno/api/kyverno/v1"
|
||||
admissionv1 "k8s.io/api/admission/v1"
|
||||
)
|
||||
|
||||
func UnmarshalPolicy(kind string, raw []byte) (kyvernov1.PolicyInterface, error) {
|
||||
if kind == "ClusterPolicy" {
|
||||
var policy *kyvernov1.ClusterPolicy
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return policy, nil
|
||||
} else if kind == "Policy" {
|
||||
var policy *kyvernov1.Policy
|
||||
if err := json.Unmarshal(raw, &policy); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return policy, nil
|
||||
}
|
||||
return nil, fmt.Errorf("admission request does not contain a policy")
|
||||
}
|
||||
|
||||
func GetPolicy(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, error) {
|
||||
return UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
|
||||
}
|
||||
|
||||
func GetPolicies(request *admissionv1.AdmissionRequest) (kyvernov1.PolicyInterface, kyvernov1.PolicyInterface, error) {
|
||||
policy, err := UnmarshalPolicy(request.Kind.Kind, request.Object.Raw)
|
||||
if err != nil {
|
||||
return policy, nil, err
|
||||
}
|
||||
if request.Operation == admissionv1.Update {
|
||||
oldPolicy, err := UnmarshalPolicy(request.Kind.Kind, request.OldObject.Raw)
|
||||
return policy, oldPolicy, err
|
||||
}
|
||||
return policy, nil, nil
|
||||
}
|
||||
|
||||
func GetResourceName(request *admissionv1.AdmissionRequest) string {
|
||||
resourceName := request.Kind.Kind + "/" + request.Name
|
||||
if request.Namespace != "" {
|
||||
|
|
|
@ -17,6 +17,7 @@ testDirs:
|
|||
- ./test/conformance/kuttl/validate/clusterpolicy/standard/audit
|
||||
- ./test/conformance/kuttl/validate/clusterpolicy/standard/enforce
|
||||
- ./test/conformance/kuttl/validate/clusterpolicy/cornercases
|
||||
- ./test/conformance/kuttl/validate/cleanuppolicy
|
||||
# Verify image tests
|
||||
- ./test/conformance/kuttl/verifyImages/clusterpolicy/standard
|
||||
# Report tests
|
||||
|
|
17
test/conformance/kuttl/validate/cleanuppolicy/00-assert.yaml
Normal file
17
test/conformance/kuttl/validate/cleanuppolicy/00-assert.yaml
Normal file
|
@ -0,0 +1,17 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: CleanupPolicy
|
||||
metadata:
|
||||
name: cleanuppolicy
|
||||
namespace: default
|
||||
spec:
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.name }}"
|
||||
operator: Equals
|
||||
value: example
|
||||
schedule: "* * * * *"
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: CleanupPolicy
|
||||
metadata:
|
||||
name: cleanuppolicy
|
||||
namespace: default
|
||||
spec:
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.name }}"
|
||||
operator: Equals
|
||||
value: example
|
||||
schedule: "* * * * *"
|
16
test/conformance/kuttl/validate/cleanuppolicy/01-assert.yaml
Normal file
16
test/conformance/kuttl/validate/cleanuppolicy/01-assert.yaml
Normal file
|
@ -0,0 +1,16 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterCleanupPolicy
|
||||
metadata:
|
||||
name: cleanuppolicy
|
||||
spec:
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.name }}"
|
||||
operator: Equals
|
||||
value: example
|
||||
schedule: "* * * * *"
|
|
@ -0,0 +1,16 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: ClusterCleanupPolicy
|
||||
metadata:
|
||||
name: cleanuppolicy
|
||||
spec:
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.name }}"
|
||||
operator: Equals
|
||||
value: example
|
||||
schedule: "* * * * *"
|
|
@ -0,0 +1,14 @@
|
|||
## Checks that the manifests.yaml file CANNOT be successfully created. If it can, fail the test as this is incorrect.
|
||||
|
||||
apiVersion: kuttl.dev/v1beta1
|
||||
kind: TestStep
|
||||
commands:
|
||||
- script: |
|
||||
if kubectl apply -f badcleanuppolicy.yaml
|
||||
then
|
||||
echo "Tested failed. Task was created when it shouldn't have been."
|
||||
exit 1
|
||||
else
|
||||
echo "Test succeeded. Task was not created as intended."
|
||||
exit 0
|
||||
fi
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: kyverno.io/v1alpha1
|
||||
kind: CleanupPolicy
|
||||
metadata:
|
||||
name: cleanuppolicy
|
||||
namespace: default
|
||||
spec:
|
||||
match:
|
||||
any:
|
||||
- resources:
|
||||
kinds:
|
||||
- Pod
|
||||
conditions:
|
||||
any:
|
||||
- key: "{{ request.name }}"
|
||||
operator: Equals
|
||||
value: example
|
||||
schedule: "invalid-schedule"
|
Loading…
Add table
Reference in a new issue