mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-20 19:29:10 +00:00
Check if operator has access to storageclass | Add logic to verify if storageclass exist | Add e2e test for invalid storageclass | RBAC to access storageclass
Signed-off-by: Chirayu Kapoor <chirayu.kapoor@suse.com>
This commit is contained in:
parent
42ffa7cdd2
commit
7f9bdca5d3
17 changed files with 411 additions and 20 deletions
Documentation
bundle.yamlcmd/operator
example/rbac/prometheus-operator
jsonnet/prometheus-operator
pkg
alertmanager
operator
prometheus
thanos
test
|
@ -106,6 +106,12 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
```
|
||||
|
||||
> Note: A cluster admin is required to create this `ClusterRole` and create a `ClusterRoleBinding` or `RoleBinding` to the `ServiceAccount` used by the Prometheus Operator `Pod`. The `ServiceAccount` used by the Prometheus Operator `Pod` can be specified in the `Deployment` object used to deploy it.
|
||||
|
|
|
@ -106,6 +106,12 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
```
|
||||
|
||||
Similarly to Prometheus, Prometheus Agent will also require permission to scrape targets. Because of this, we will create a new service account for the Agent with the necessary permissions to scrape targets.
|
||||
|
|
6
bundle.yaml
generated
6
bundle.yaml
generated
|
@ -41360,6 +41360,12 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
|
|
|
@ -39,6 +39,7 @@ import (
|
|||
"github.com/prometheus/common/version"
|
||||
"golang.org/x/sync/errgroup"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
storagev1 "k8s.io/api/storage/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
klog "k8s.io/klog/v2"
|
||||
|
@ -311,6 +312,27 @@ func run() int {
|
|||
}
|
||||
cfg.KubernetesVersion = *kubernetesVersion
|
||||
level.Info(logger).Log("msg", "connection established", "cluster-version", cfg.KubernetesVersion)
|
||||
// Check if we can read the storage classs
|
||||
canReadStorageClass, err := checkPrerequisites(
|
||||
ctx,
|
||||
logger,
|
||||
kclient,
|
||||
nil,
|
||||
storagev1.SchemeGroupVersion,
|
||||
storagev1.SchemeGroupVersion.WithResource("storageclasses").Resource,
|
||||
k8sutil.ResourceAttribute{
|
||||
Group: storagev1.GroupName,
|
||||
Version: storagev1.SchemeGroupVersion.Version,
|
||||
Resource: storagev1.SchemeGroupVersion.WithResource("storageclasses").Resource,
|
||||
Verbs: []string{"get"},
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "failed to check StorageClass support", "err", err)
|
||||
cancel()
|
||||
return 1
|
||||
}
|
||||
|
||||
scrapeConfigSupported, err := checkPrerequisites(
|
||||
ctx,
|
||||
|
@ -332,7 +354,7 @@ func run() int {
|
|||
return 1
|
||||
}
|
||||
|
||||
po, err := prometheuscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "prometheusoperator"), r, scrapeConfigSupported)
|
||||
po, err := prometheuscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "prometheusoperator"), r, scrapeConfigSupported, canReadStorageClass)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "instantiating prometheus controller failed: ", err)
|
||||
cancel()
|
||||
|
@ -366,7 +388,7 @@ func run() int {
|
|||
|
||||
var pao *prometheusagentcontroller.Operator
|
||||
if prometheusAgentSupported {
|
||||
pao, err = prometheusagentcontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "prometheusagentoperator"), r, scrapeConfigSupported)
|
||||
pao, err = prometheusagentcontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "prometheusagentoperator"), r, scrapeConfigSupported, canReadStorageClass)
|
||||
if err != nil {
|
||||
level.Error(logger).Log("msg", "instantiating prometheus-agent controller failed", "err", err)
|
||||
cancel()
|
||||
|
@ -374,14 +396,14 @@ func run() int {
|
|||
}
|
||||
}
|
||||
|
||||
ao, err := alertmanagercontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "alertmanageroperator"), r)
|
||||
ao, err := alertmanagercontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "alertmanageroperator"), r, canReadStorageClass)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "instantiating alertmanager controller failed: ", err)
|
||||
cancel()
|
||||
return 1
|
||||
}
|
||||
|
||||
to, err := thanoscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "thanosoperator"), r)
|
||||
to, err := thanoscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "thanosoperator"), r, canReadStorageClass)
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, "instantiating thanos controller failed: ", err)
|
||||
cancel()
|
||||
|
|
|
@ -84,3 +84,9 @@ rules:
|
|||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- storage.k8s.io
|
||||
resources:
|
||||
- storageclasses
|
||||
verbs:
|
||||
- get
|
||||
|
|
|
@ -136,6 +136,11 @@ function(params) {
|
|||
resources: ['ingresses'],
|
||||
verbs: ['get', 'list', 'watch'],
|
||||
},
|
||||
{
|
||||
apiGroups: ['storage.k8s.io'],
|
||||
resources: ['storageclasses'],
|
||||
verbs: ['get'],
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
|
|
|
@ -92,6 +92,8 @@ type Operator struct {
|
|||
metrics *operator.Metrics
|
||||
reconciliations *operator.ReconciliationTracker
|
||||
|
||||
canReadStorageClass bool
|
||||
|
||||
config Config
|
||||
}
|
||||
|
||||
|
@ -109,7 +111,7 @@ type Config struct {
|
|||
}
|
||||
|
||||
// New creates a new controller.
|
||||
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer) (*Operator, error) {
|
||||
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, canReadStorageClass bool) (*Operator, error) {
|
||||
client, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
|
||||
|
@ -137,8 +139,9 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
|
|||
logger: logger,
|
||||
accessor: operator.NewAccessor(logger),
|
||||
|
||||
metrics: operator.NewMetrics(r),
|
||||
reconciliations: &operator.ReconciliationTracker{},
|
||||
metrics: operator.NewMetrics(r),
|
||||
reconciliations: &operator.ReconciliationTracker{},
|
||||
canReadStorageClass: canReadStorageClass,
|
||||
config: Config{
|
||||
KubernetesVersion: c.KubernetesVersion,
|
||||
LocalHost: c.LocalHost,
|
||||
|
@ -646,6 +649,10 @@ func (c *Operator) sync(ctx context.Context, key string) error {
|
|||
|
||||
level.Info(logger).Log("msg", "sync alertmanager")
|
||||
|
||||
if err := operator.CheckStorageClass(ctx, c.canReadStorageClass, c.kclient, am.Spec.Storage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
assetStore := assets.NewStore(c.kclient.CoreV1(), c.kclient.CoreV1())
|
||||
|
||||
if err := c.provisionAlertmanagerConfiguration(ctx, am, assetStore); err != nil {
|
||||
|
|
48
pkg/operator/storageclass.go
Normal file
48
pkg/operator/storageclass.go
Normal file
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2023 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package operator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
)
|
||||
|
||||
func CheckStorageClass(ctx context.Context, canReadStorageClass bool, kclient kubernetes.Interface, storage *monitoringv1.StorageSpec) error {
|
||||
// Check the existence of the storage class if not empty and if the operator has enough permissions.
|
||||
if !canReadStorageClass || storage == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
storageClassName := StringPtrValOrDefault(storage.VolumeClaimTemplate.Spec.StorageClassName, "")
|
||||
if storageClassName == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
_, err := kclient.StorageV1().StorageClasses().Get(ctx, storageClassName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
if apierrors.IsNotFound(err) {
|
||||
return fmt.Errorf("storage class %q does not exist", storageClassName)
|
||||
}
|
||||
return fmt.Errorf("cannot get %q storageclass: %w", storageClassName, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -81,12 +81,13 @@ type Operator struct {
|
|||
config operator.Config
|
||||
endpointSliceSupported bool
|
||||
scrapeConfigSupported bool
|
||||
canReadStorageClass bool
|
||||
|
||||
statusReporter prompkg.StatusReporter
|
||||
}
|
||||
|
||||
// New creates a new controller.
|
||||
func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported bool) (*Operator, error) {
|
||||
func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported bool, canReadStorageClass bool) (*Operator, error) {
|
||||
client, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
|
||||
|
@ -117,6 +118,7 @@ func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, log
|
|||
metrics: operator.NewMetrics(r),
|
||||
reconciliations: &operator.ReconciliationTracker{},
|
||||
scrapeConfigSupported: scrapeConfigSupported,
|
||||
canReadStorageClass: canReadStorageClass,
|
||||
}
|
||||
c.metrics.MustRegister(
|
||||
c.reconciliations,
|
||||
|
@ -519,6 +521,10 @@ func (c *Operator) sync(ctx context.Context, key string) error {
|
|||
|
||||
level.Info(logger).Log("msg", "sync prometheus")
|
||||
|
||||
if err := operator.CheckStorageClass(ctx, c.canReadStorageClass, c.kclient, p.Spec.Storage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cg, err := prompkg.NewConfigGenerator(c.logger, p, c.endpointSliceSupported)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -91,12 +91,13 @@ type Operator struct {
|
|||
config operator.Config
|
||||
endpointSliceSupported bool
|
||||
scrapeConfigSupported bool
|
||||
canReadStorageClass bool
|
||||
|
||||
statusReporter prompkg.StatusReporter
|
||||
}
|
||||
|
||||
// New creates a new controller.
|
||||
func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported bool) (*Operator, error) {
|
||||
func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported bool, canReadStorageClass bool) (*Operator, error) {
|
||||
client, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
|
||||
|
@ -163,6 +164,7 @@ func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, log
|
|||
Help: "Number of node endpoints synchronisation failures",
|
||||
}),
|
||||
scrapeConfigSupported: scrapeConfigSupported,
|
||||
canReadStorageClass: canReadStorageClass,
|
||||
}
|
||||
c.metrics.MustRegister(
|
||||
c.nodeAddressLookupErrors,
|
||||
|
@ -1164,6 +1166,10 @@ func (c *Operator) sync(ctx context.Context, key string) error {
|
|||
logger := log.With(c.logger, "key", key)
|
||||
logDeprecatedFields(logger, p)
|
||||
|
||||
if err := operator.CheckStorageClass(ctx, c.canReadStorageClass, c.kclient, p.Spec.Storage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if p.Spec.Paused {
|
||||
level.Info(logger).Log("msg", "the resource is paused, not reconciling")
|
||||
return nil
|
||||
|
|
|
@ -70,8 +70,9 @@ type Operator struct {
|
|||
nsThanosRulerInf cache.SharedIndexInformer
|
||||
nsRuleInf cache.SharedIndexInformer
|
||||
|
||||
metrics *operator.Metrics
|
||||
reconciliations *operator.ReconciliationTracker
|
||||
metrics *operator.Metrics
|
||||
reconciliations *operator.ReconciliationTracker
|
||||
canReadStorageClass bool
|
||||
|
||||
config Config
|
||||
}
|
||||
|
@ -91,7 +92,7 @@ type Config struct {
|
|||
}
|
||||
|
||||
// New creates a new controller.
|
||||
func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, logger log.Logger, r prometheus.Registerer) (*Operator, error) {
|
||||
func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, logger log.Logger, r prometheus.Registerer, canReadStorageClass bool) (*Operator, error) {
|
||||
client, err := kubernetes.NewForConfig(restConfig)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
|
||||
|
@ -115,13 +116,14 @@ func New(ctx context.Context, restConfig *rest.Config, conf operator.Config, log
|
|||
r = prometheus.WrapRegistererWith(prometheus.Labels{"controller": "thanos"}, r)
|
||||
|
||||
o := &Operator{
|
||||
kclient: client,
|
||||
mdClient: mdClient,
|
||||
mclient: mclient,
|
||||
logger: logger,
|
||||
accessor: operator.NewAccessor(logger),
|
||||
metrics: operator.NewMetrics(r),
|
||||
reconciliations: &operator.ReconciliationTracker{},
|
||||
kclient: client,
|
||||
mdClient: mdClient,
|
||||
mclient: mclient,
|
||||
logger: logger,
|
||||
accessor: operator.NewAccessor(logger),
|
||||
metrics: operator.NewMetrics(r),
|
||||
reconciliations: &operator.ReconciliationTracker{},
|
||||
canReadStorageClass: canReadStorageClass,
|
||||
config: Config{
|
||||
KubernetesVersion: conf.KubernetesVersion,
|
||||
ReloaderConfig: conf.ReloaderConfig,
|
||||
|
@ -538,6 +540,10 @@ func (o *Operator) sync(ctx context.Context, key string) error {
|
|||
logger := log.With(o.logger, "key", key)
|
||||
level.Info(logger).Log("msg", "sync thanos-ruler")
|
||||
|
||||
if err := operator.CheckStorageClass(ctx, o.canReadStorageClass, o.kclient, tr.Spec.Storage); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ruleConfigMapNames, err := o.createOrUpdateRuleConfigMaps(ctx, tr)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -247,6 +247,50 @@ func testAMStorageUpdate(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Invalid storageclass e2e test
|
||||
|
||||
_, err = framework.PatchAlertmanager(
|
||||
context.Background(),
|
||||
am.Name,
|
||||
am.Namespace,
|
||||
monitoringv1.AlertmanagerSpec{
|
||||
Storage: &monitoringv1.StorageSpec{
|
||||
VolumeClaimTemplate: monitoringv1.EmbeddedPersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("unknown-storage-class"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("200Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var loopError error
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, framework.DefaultTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
current, err := framework.MonClientV1.Alertmanagers(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
loopError = fmt.Errorf("failed to get object: %w", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := framework.AssertCondition(current.Status.Conditions, monitoringv1.Reconciled, monitoringv1.ConditionFalse); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%v: %v", err, loopError)
|
||||
}
|
||||
}
|
||||
|
||||
func testAMExposingWithKubernetesAPI(t *testing.T) {
|
||||
|
|
|
@ -313,6 +313,7 @@ func testAllNSPrometheus(t *testing.T) {
|
|||
"ScrapeConfigKubeNode": testScrapeConfigKubernetesNodeRole,
|
||||
"ScrapeConfigDNSSD": testScrapeConfigDNSSDConfig,
|
||||
"PrometheusWithStatefulsetCreationFailure": testPrometheusWithStatefulsetCreationFailure,
|
||||
"PrometheusAgentCheckStorageClass": testAgentCheckStorageClass,
|
||||
}
|
||||
|
||||
for name, f := range testFuncs {
|
||||
|
@ -330,6 +331,7 @@ func testAllNSThanosRuler(t *testing.T) {
|
|||
"ThanosRulerMinReadySeconds": testTRMinReadySeconds,
|
||||
"ThanosRulerAlertmanagerConfig": testTRAlertmanagerConfig,
|
||||
"ThanosRulerQueryConfig": testTRQueryConfig,
|
||||
"ThanosRulerCheckStorageClass": testTRCheckStorageClass,
|
||||
}
|
||||
for name, f := range testFuncs {
|
||||
t.Run(name, f)
|
||||
|
|
|
@ -1114,6 +1114,52 @@ func testPromStorageUpdate(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Invalid storageclass e2e test
|
||||
|
||||
_, err = framework.PatchPrometheus(
|
||||
context.Background(),
|
||||
p.Name,
|
||||
ns,
|
||||
monitoringv1.PrometheusSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
Storage: &monitoringv1.StorageSpec{
|
||||
VolumeClaimTemplate: monitoringv1.EmbeddedPersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("unknown-storage-class"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("200Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var loopError error
|
||||
err = wait.PollUntilContextTimeout(context.Background(), 5*time.Second, framework.DefaultTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
current, err := framework.MonClientV1.Prometheuses(ns).Get(ctx, p.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
loopError = fmt.Errorf("failed to get object: %w", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := framework.AssertCondition(current.Status.Conditions, monitoringv1.Reconciled, monitoringv1.ConditionFalse); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%v: %v", err, loopError)
|
||||
}
|
||||
}
|
||||
|
||||
func testPromReloadConfig(t *testing.T) {
|
||||
|
|
|
@ -16,7 +16,18 @@ package e2e
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
|
||||
)
|
||||
|
||||
func testCreatePrometheusAgent(t *testing.T) {
|
||||
|
@ -69,3 +80,67 @@ func testAgentAndServerNameColision(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func testAgentCheckStorageClass(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
testCtx := framework.NewTestCtx(t)
|
||||
defer testCtx.Cleanup(t)
|
||||
|
||||
ns := framework.CreateNamespace(ctx, t, testCtx)
|
||||
framework.SetupPrometheusRBAC(ctx, t, testCtx, ns)
|
||||
name := "test"
|
||||
|
||||
prometheusAgentCRD := framework.MakeBasicPrometheusAgent(ns, name, name, 1)
|
||||
|
||||
prometheusAgentCRD, err := framework.CreatePrometheusAgentAndWaitUntilReady(ctx, ns, prometheusAgentCRD)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Invalid storageclass e2e test
|
||||
|
||||
_, err = framework.PatchPrometheusAgent(
|
||||
context.Background(),
|
||||
prometheusAgentCRD.Name,
|
||||
ns,
|
||||
monitoringv1alpha1.PrometheusAgentSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
Storage: &monitoringv1.StorageSpec{
|
||||
VolumeClaimTemplate: monitoringv1.EmbeddedPersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("unknown-storage-class"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("200Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var loopError error
|
||||
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, framework.DefaultTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
current, err := framework.MonClientV1alpha1.PrometheusAgents(ns).Get(ctx, name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
loopError = fmt.Errorf("failed to get object: %w", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := framework.AssertCondition(current.Status.Conditions, monitoringv1.Reconciled, monitoringv1.ConditionFalse); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%v: %v", err, loopError)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,8 +23,10 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
"google.golang.org/protobuf/proto"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
)
|
||||
|
@ -448,3 +450,64 @@ func testTRQueryConfig(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func testTRCheckStorageClass(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.Background()
|
||||
testCtx := framework.NewTestCtx(t)
|
||||
defer testCtx.Cleanup(t)
|
||||
|
||||
ns := framework.CreateNamespace(ctx, t, testCtx)
|
||||
framework.SetupPrometheusRBAC(ctx, t, testCtx, ns)
|
||||
|
||||
tr := framework.MakeBasicThanosRuler("test", 1, "http://test.example.com")
|
||||
|
||||
tr, err := framework.CreateThanosRulerAndWaitUntilReady(ctx, ns, tr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Invalid storageclass e2e test
|
||||
|
||||
_, err = framework.PatchThanosRuler(
|
||||
context.Background(),
|
||||
tr.Name,
|
||||
ns,
|
||||
monitoringv1.ThanosRulerSpec{
|
||||
Storage: &monitoringv1.StorageSpec{
|
||||
VolumeClaimTemplate: monitoringv1.EmbeddedPersistentVolumeClaim{
|
||||
Spec: v1.PersistentVolumeClaimSpec{
|
||||
StorageClassName: ptr.To("unknown-storage-class"),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceStorage: resource.MustParse("200Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var loopError error
|
||||
err = wait.PollUntilContextTimeout(ctx, 5*time.Second, framework.DefaultTimeout, true, func(ctx context.Context) (bool, error) {
|
||||
current, err := framework.MonClientV1.ThanosRulers(ns).Get(ctx, tr.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
loopError = fmt.Errorf("failed to get object: %w", err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if err := framework.AssertCondition(current.Status.Conditions, monitoringv1.Reconciled, monitoringv1.ConditionFalse); err == nil {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("%v: %v", err, loopError)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ package framework
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -23,11 +24,15 @@ import (
|
|||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
"github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring"
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
|
||||
"github.com/prometheus-operator/prometheus-operator/pkg/operator"
|
||||
"github.com/prometheus-operator/prometheus-operator/pkg/prometheus/agent"
|
||||
prometheusagent "github.com/prometheus-operator/prometheus-operator/pkg/prometheus/agent"
|
||||
)
|
||||
|
||||
func (f *Framework) MakeBasicPrometheusAgent(ns, name, group string, replicas int32) *monitoringv1alpha1.PrometheusAgent {
|
||||
|
@ -128,3 +133,35 @@ func (f *Framework) DeletePrometheusAgentAndWaitUntilGone(ctx context.Context, n
|
|||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) PatchPrometheusAgent(ctx context.Context, name, ns string, spec monitoringv1alpha1.PrometheusAgentSpec) (*monitoringv1alpha1.PrometheusAgent, error) {
|
||||
b, err := json.Marshal(
|
||||
&monitoringv1alpha1.PrometheusAgent{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: monitoringv1alpha1.PrometheusAgentsKind,
|
||||
APIVersion: schema.GroupVersion{Group: monitoring.GroupName, Version: monitoringv1alpha1.Version}.String(),
|
||||
},
|
||||
Spec: spec,
|
||||
},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf(err.Error(), "failed to marshal PrometheusAgent spec")
|
||||
}
|
||||
|
||||
p, err := f.MonClientV1alpha1.PrometheusAgents(ns).Patch(
|
||||
ctx,
|
||||
name,
|
||||
types.ApplyPatchType,
|
||||
b,
|
||||
metav1.PatchOptions{
|
||||
Force: ptr.To(true),
|
||||
FieldManager: "e2e-test",
|
||||
},
|
||||
)
|
||||
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue