mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-21 03:38:43 +00:00
chore: fix testScrapeConfigKubernetesNodeRole()
Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
parent
fb35b483d3
commit
677c2d565f
4 changed files with 85 additions and 9 deletions
7
.github/workflows/e2e.yaml
vendored
7
.github/workflows/e2e.yaml
vendored
|
@ -78,9 +78,14 @@ jobs:
|
|||
cluster_name: e2e
|
||||
- name: Wait for cluster to finish bootstraping
|
||||
run: |
|
||||
echo "Waiting for all nodes to be ready..."
|
||||
kubectl wait --for=condition=Ready nodes --all --timeout=120s
|
||||
kubectl get nodes
|
||||
echo "Waiting for all pods to be ready..."
|
||||
kubectl wait --for=condition=Ready pods --all --all-namespaces --timeout=300s
|
||||
kubectl cluster-info
|
||||
kubectl get pods -A
|
||||
echo "Cluster information"
|
||||
kubectl cluster-info
|
||||
- name: Load images
|
||||
run: |
|
||||
kind load docker-image -n e2e quay.io/prometheus-operator/prometheus-operator:$(git rev-parse --short HEAD)
|
||||
|
|
|
@ -433,9 +433,11 @@ func testScrapeConfigKubernetesNodeRole(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
// Check that the targets appear in Prometheus and does proper scrapping
|
||||
if err := framework.WaitForHealthyTargets(context.Background(), ns, "prometheus-operated", 1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
nodes, err := framework.Nodes(context.Background())
|
||||
require.NoError(t, err)
|
||||
|
||||
err = framework.WaitForHealthyTargets(context.Background(), ns, "prometheus-operated", len(nodes))
|
||||
require.NoError(t, err)
|
||||
|
||||
// Remove the ScrapeConfig
|
||||
err = framework.DeleteScrapeConfig(context.Background(), ns, "scrape-config")
|
||||
|
|
|
@ -17,6 +17,7 @@ package framework
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
@ -113,6 +114,15 @@ func New(kubeconfig, opImage, exampleDir, resourcesDir string, operatorVersion s
|
|||
return nil, fmt.Errorf("creating v1beta1 monitoring client failed: %w", err)
|
||||
}
|
||||
|
||||
nodes, err := cli.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list nodes: %w", err)
|
||||
}
|
||||
|
||||
if len(nodes.Items) < 1 {
|
||||
return nil, errors.New("no nodes returned")
|
||||
}
|
||||
|
||||
f := &Framework{
|
||||
RestConfig: config,
|
||||
MasterHost: config.Host,
|
||||
|
@ -757,11 +767,17 @@ func (f *Framework) CreateOrUpdateAdmissionWebhookServer(
|
|||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Deploy only 1 replica because the end-to-end environment (single node
|
||||
// cluster) can't satisfy the anti-affinity rules.
|
||||
deploy.Spec.Replicas = ptr.To(int32(1))
|
||||
deploy.Spec.Template.Spec.Affinity = nil
|
||||
deploy.Spec.Strategy = appsv1.DeploymentStrategy{}
|
||||
// Adjust replica count in case of single-node clusters because the
|
||||
// deployment manifest has anti-affinity rules.
|
||||
nodes, err := f.Nodes(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(nodes) == 1 {
|
||||
deploy.Spec.Replicas = ptr.To(int32(1))
|
||||
deploy.Spec.Template.Spec.Affinity = nil
|
||||
deploy.Spec.Strategy = appsv1.DeploymentStrategy{}
|
||||
}
|
||||
|
||||
deploy.Spec.Template.Spec.Containers[0].Args = append(deploy.Spec.Template.Spec.Containers[0].Args, "--log-level=debug")
|
||||
|
||||
|
|
53
test/framework/node.go
Normal file
53
test/framework/node.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2024 The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package framework
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
)
|
||||
|
||||
// Nodes returns the list of nodes in the cluster.
|
||||
func (f *Framework) Nodes(ctx context.Context) ([]v1.Node, error) {
|
||||
var (
|
||||
loopErr error
|
||||
nodes *v1.NodeList
|
||||
)
|
||||
|
||||
err := wait.PollUntilContextTimeout(ctx, time.Second, time.Minute*1, true, func(_ context.Context) (bool, error) {
|
||||
nodes, loopErr = f.KubeClient.CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
|
||||
if loopErr != nil {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if len(nodes.Items) < 1 {
|
||||
loopErr = errors.New("no nodes returned")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to list nodes: %v: %v", err, loopErr)
|
||||
}
|
||||
|
||||
return nodes.Items, nil
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue