mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-21 19:49:46 +00:00
test: extend prometheus/alertmanager e2e test suites
Prometheus test suite now consists of: - create/delete - scale up/down - version migration - config generation, target discovery and basic query - alertmanager discovery Alertmanager test suite now consists of: - create/delete - scale up/down - version migration
This commit is contained in:
parent
4e11a19ae8
commit
fde52882a1
8 changed files with 623 additions and 15 deletions
10
Makefile
10
Makefile
|
@ -4,6 +4,8 @@ REPO?=quay.io/coreos/prometheus-operator
|
|||
TAG?=$(shell git rev-parse --short HEAD)
|
||||
NAMESPACE?=prometheus-operator-e2e-tests-$(shell head /dev/urandom | tr -dc a-z0-9 | head -c 13 ; echo '')
|
||||
|
||||
CLUSTER_IP?=$(shell minikube ip)
|
||||
|
||||
build:
|
||||
./scripts/check_license.sh
|
||||
go build github.com/coreos/prometheus-operator/cmd/operator
|
||||
|
@ -13,13 +15,17 @@ container:
|
|||
docker build -t $(REPO):$(TAG) .
|
||||
|
||||
e2e-test:
|
||||
go test -v ./test/e2e/ --kubeconfig "$(HOME)/.kube/config" --operator-image=quay.io/coreos/prometheus-operator:$(TAG) --namespace=$(NAMESPACE)
|
||||
go test -timeout 20m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig "$(HOME)/.kube/config" --operator-image=quay.io/coreos/prometheus-operator:$(TAG) --namespace=$(NAMESPACE) --cluster-ip=$(CLUSTER_IP)
|
||||
|
||||
e2e-status:
|
||||
kubectl get prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods --all-namespaces
|
||||
|
||||
e2e:
|
||||
$(MAKE) container
|
||||
$(MAKE) e2e-test
|
||||
|
||||
clean-e2e:
|
||||
kubectl delete namespace prometheus-operator-e2e-tests
|
||||
kubectl -n $(NAMESPACE) delete prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods --all
|
||||
kubectl delete namespace $(NAMESPACE)
|
||||
|
||||
.PHONY: all build container e2e clean-e2e
|
||||
|
|
|
@ -180,6 +180,18 @@ func makeStatefulSetSpec(a *v1alpha1.Alertmanager) v1beta1.StatefulSetSpec {
|
|||
SubPath: subPathForStorage(a.Spec.Storage),
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: path.Clean(webRoutePrefix + "/api/v1/status"),
|
||||
Port: intstr.FromString("web"),
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 3,
|
||||
TimeoutSeconds: 3,
|
||||
PeriodSeconds: 5,
|
||||
FailureThreshold: 10,
|
||||
},
|
||||
}, {
|
||||
Name: "config-reloader",
|
||||
Image: "jimmidyson/configmap-reload",
|
||||
|
|
|
@ -31,3 +31,51 @@ func TestAlertmanagerCreateDeleteCluster(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestAlertmanagerScaling(t *testing.T) {
|
||||
name := "alertmanager-test"
|
||||
|
||||
defer func() {
|
||||
if err := framework.DeleteAlertmanagerAndWaitUntilGone(name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := framework.CreateAlertmanagerAndWaitUntilReady(framework.MakeBasicAlertmanager(name, 3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := framework.UpdateAlertmanagerAndWaitUntilReady(framework.MakeBasicAlertmanager(name, 5)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := framework.UpdateAlertmanagerAndWaitUntilReady(framework.MakeBasicAlertmanager(name, 3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
//func TestAlertmanagerVersionMigration(t *testing.T) {
|
||||
// name := "alertmanager-test"
|
||||
//
|
||||
// defer func() {
|
||||
// if err := framework.DeleteAlertmanagerAndWaitUntilGone(name); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
// }()
|
||||
//
|
||||
// am := framework.MakeBasicAlertmanager(name, 3)
|
||||
// am.Spec.Version = "v0.5.0"
|
||||
// if err := framework.CreateAlertmanagerAndWaitUntilReady(am); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// am.Spec.Version = "v0.5.1"
|
||||
// if err := framework.UpdateAlertmanagerAndWaitUntilReady(am); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
//
|
||||
// am.Spec.Version = "v0.5.0"
|
||||
// if err := framework.UpdateAlertmanagerAndWaitUntilReady(am); err != nil {
|
||||
// t.Fatal(err)
|
||||
// }
|
||||
//}
|
||||
|
|
|
@ -16,9 +16,11 @@ package framework
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/util/intstr"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/alertmanager"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
|
@ -45,11 +47,38 @@ func (f *Framework) MakeBasicAlertmanager(name string, replicas int32) *v1alpha1
|
|||
},
|
||||
Spec: v1alpha1.AlertmanagerSpec{
|
||||
Replicas: replicas,
|
||||
Version: "v0.5.0",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakeAlertmanagerService(name, group string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"group": group,
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: "NodePort",
|
||||
Ports: []v1.ServicePort{
|
||||
v1.ServicePort{
|
||||
Name: "web",
|
||||
Port: 9093,
|
||||
TargetPort: intstr.FromString("web"),
|
||||
NodePort: 30903,
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"alertmanager": name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) CreateAlertmanagerAndWaitUntilReady(a *v1alpha1.Alertmanager) error {
|
||||
log.Printf("Creating Alertmanager (%s/%s)", f.Namespace.Name, a.Name)
|
||||
_, err := f.KubeClient.CoreV1().ConfigMaps(f.Namespace.Name).Create(
|
||||
&v1.ConfigMap{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
|
@ -69,21 +98,46 @@ func (f *Framework) CreateAlertmanagerAndWaitUntilReady(a *v1alpha1.Alertmanager
|
|||
return err
|
||||
}
|
||||
|
||||
_, err = f.WaitForPodsReady(time.Minute*2, int(a.Spec.Replicas), alertmanager.ListOptions(a.Name))
|
||||
_, err = f.WaitForPodsReady(time.Minute*2, int(a.Spec.Replicas), amImage(a.Spec.Version), alertmanager.ListOptions(a.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create an Alertmanager cluster (%s) with %d instances: %v", a.Name, a.Spec.Replicas, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) UpdateAlertmanagerAndWaitUntilReady(a *v1alpha1.Alertmanager) error {
|
||||
log.Printf("Updating Alertmanager (%s/%s)", f.Namespace.Name, a.Name)
|
||||
_, err := f.MonClient.Alertmanagers(f.Namespace.Name).Update(a)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.WaitForPodsReady(time.Minute*2, int(a.Spec.Replicas), amImage(a.Spec.Version), alertmanager.ListOptions(a.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update %d Alertmanager instances (%s): %v", a.Spec.Replicas, a.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) DeleteAlertmanagerAndWaitUntilGone(name string) error {
|
||||
log.Printf("Deleting Alertmanager (%s/%s)", f.Namespace.Name, name)
|
||||
a, err := f.MonClient.Alertmanagers(f.Namespace.Name).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.MonClient.Alertmanagers(f.Namespace.Name).Delete(name, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := f.WaitForPodsReady(time.Minute*2, 0, alertmanager.ListOptions(name)); err != nil {
|
||||
if _, err := f.WaitForPodsReady(time.Minute*2, 0, amImage(a.Spec.Version), alertmanager.ListOptions(name)); err != nil {
|
||||
return fmt.Errorf("failed to teardown Alertmanager (%s) instances: %v", name, err)
|
||||
}
|
||||
|
||||
return f.KubeClient.CoreV1().ConfigMaps(f.Namespace.Name).Delete(name, nil)
|
||||
}
|
||||
|
||||
func amImage(version string) string {
|
||||
return fmt.Sprintf("quay.io/prometheus/alertmanager:%s", version)
|
||||
}
|
||||
|
|
|
@ -41,10 +41,11 @@ type Framework struct {
|
|||
MasterHost string
|
||||
Namespace *v1.Namespace
|
||||
OperatorPod *v1.Pod
|
||||
ClusterIP string
|
||||
}
|
||||
|
||||
// Setup setups a test framework and returns it.
|
||||
func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
||||
func New(ns, kubeconfig, opImage, ip string) (*Framework, error) {
|
||||
config, err := clientcmd.BuildConfigFromFlags("", kubeconfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -80,6 +81,7 @@ func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
|||
MonClient: mclient,
|
||||
HTTPClient: httpc,
|
||||
Namespace: namespace,
|
||||
ClusterIP: ip,
|
||||
}
|
||||
|
||||
err = f.setup(opImage)
|
||||
|
@ -124,7 +126,7 @@ func (f *Framework) setupPrometheusOperator(opImage string) error {
|
|||
}
|
||||
|
||||
opts := v1.ListOptions{LabelSelector: fields.SelectorFromSet(fields.Set(deploy.Spec.Template.ObjectMeta.Labels)).String()}
|
||||
pl, err := f.WaitForPodsReady(60*time.Second, 1, opts)
|
||||
pl, err := f.WaitForPodsReady(60*time.Second, 1, opImage, opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -154,11 +156,11 @@ func (f *Framework) Teardown() error {
|
|||
|
||||
// WaitForPodsReady waits for a selection of Pods to be running and each
|
||||
// container to pass its readiness check.
|
||||
func (f *Framework) WaitForPodsReady(timeout time.Duration, expectedReplicas int, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
return waitForPodsReady(f.KubeClient.Core(), timeout, expectedReplicas, f.Namespace.Name, opts)
|
||||
func (f *Framework) WaitForPodsReady(timeout time.Duration, expectedReplicas int, image string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
return waitForPodsReady(f.KubeClient.Core(), timeout, expectedReplicas, image, f.Namespace.Name, opts)
|
||||
}
|
||||
|
||||
func waitForPodsReady(client v1client.CoreV1Interface, timeout time.Duration, expectedRunning int, namespace string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
func waitForPodsReady(client v1client.CoreV1Interface, timeout time.Duration, expectedRunning int, image, namespace string, opts v1.ListOptions) (*v1.PodList, error) {
|
||||
t := time.After(timeout)
|
||||
ticker := time.NewTicker(500 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
|
@ -180,7 +182,8 @@ func waitForPodsReady(client v1client.CoreV1Interface, timeout time.Duration, ex
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if isRunningAndReady {
|
||||
|
||||
if isRunningAndReady && podRunsImage(p, image) {
|
||||
runningAndReady++
|
||||
}
|
||||
}
|
||||
|
@ -192,6 +195,16 @@ func waitForPodsReady(client v1client.CoreV1Interface, timeout time.Duration, ex
|
|||
}
|
||||
}
|
||||
|
||||
func podRunsImage(p v1.Pod, image string) bool {
|
||||
for _, c := range p.Spec.Containers {
|
||||
if image == c.Image {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (f *Framework) CreateDeployment(kclient kubernetes.Interface, ns string, deploy *v1beta1.Deployment) error {
|
||||
if _, err := f.KubeClient.Extensions().Deployments(ns).Create(deploy); err != nil {
|
||||
return err
|
||||
|
|
|
@ -16,32 +16,109 @@ package framework
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/pkg/api/resource"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
metav1 "k8s.io/client-go/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/util/intstr"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/prometheus"
|
||||
)
|
||||
|
||||
func (f *Framework) MakeBasicPrometheus(name string, replicas int32) *v1alpha1.Prometheus {
|
||||
func (f *Framework) MakeBasicPrometheus(name, group string, replicas int32) *v1alpha1.Prometheus {
|
||||
return &v1alpha1.Prometheus{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: v1alpha1.PrometheusSpec{
|
||||
Replicas: replicas,
|
||||
Version: "v1.4.0",
|
||||
ServiceMonitorSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": group,
|
||||
},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) AddAlertingToPrometheus(p *v1alpha1.Prometheus, name string) {
|
||||
p.Spec.Alerting = v1alpha1.AlertingSpec{
|
||||
Alertmanagers: []v1alpha1.AlertmanagerEndpoints{
|
||||
v1alpha1.AlertmanagerEndpoints{
|
||||
Namespace: f.Namespace.Name,
|
||||
Name: name,
|
||||
Port: intstr.FromString("web"),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakeBasicServiceMonitor(name string) *v1alpha1.ServiceMonitor {
|
||||
return &v1alpha1.ServiceMonitor{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"group": name,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ServiceMonitorSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": name,
|
||||
},
|
||||
},
|
||||
Endpoints: []v1alpha1.Endpoint{
|
||||
v1alpha1.Endpoint{
|
||||
Port: "web",
|
||||
Interval: "30s",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakePrometheusService(name, group string) *v1.Service {
|
||||
return &v1.Service{
|
||||
ObjectMeta: v1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"group": group,
|
||||
},
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Type: "NodePort",
|
||||
Ports: []v1.ServicePort{
|
||||
v1.ServicePort{
|
||||
Name: "web",
|
||||
Port: 9090,
|
||||
TargetPort: intstr.FromString("web"),
|
||||
NodePort: 30900,
|
||||
},
|
||||
},
|
||||
Selector: map[string]string{
|
||||
"prometheus": name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) CreatePrometheusAndWaitUntilReady(p *v1alpha1.Prometheus) error {
|
||||
log.Printf("Creating Prometheus (%s/%s)", f.Namespace.Name, p.Name)
|
||||
_, err := f.MonClient.Prometheuses(f.Namespace.Name).Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.WaitForPodsReady(time.Minute*2, int(p.Spec.Replicas), prometheus.ListOptions(p.Name))
|
||||
_, err = f.WaitForPodsReady(time.Minute*2, int(p.Spec.Replicas), promImage(p.Spec.Version), prometheus.ListOptions(p.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create %d Prometheus instances (%s): %v", p.Spec.Replicas, p.Name, err)
|
||||
}
|
||||
|
@ -49,14 +126,39 @@ func (f *Framework) CreatePrometheusAndWaitUntilReady(p *v1alpha1.Prometheus) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) UpdatePrometheusAndWaitUntilReady(p *v1alpha1.Prometheus) error {
|
||||
log.Printf("Updating Prometheus (%s/%s)", f.Namespace.Name, p.Name)
|
||||
_, err := f.MonClient.Prometheuses(f.Namespace.Name).Update(p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = f.WaitForPodsReady(time.Minute*2, int(p.Spec.Replicas), promImage(p.Spec.Version), prometheus.ListOptions(p.Name))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update %d Prometheus instances (%s): %v", p.Spec.Replicas, p.Name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) DeletePrometheusAndWaitUntilGone(name string) error {
|
||||
log.Printf("Deleting Prometheus (%s/%s)", f.Namespace.Name, name)
|
||||
p, err := f.MonClient.Prometheuses(f.Namespace.Name).Get(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := f.MonClient.Prometheuses(f.Namespace.Name).Delete(name, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := f.WaitForPodsReady(time.Minute*2, 0, prometheus.ListOptions(name)); err != nil {
|
||||
if _, err := f.WaitForPodsReady(time.Minute*2, 0, promImage(p.Spec.Version), prometheus.ListOptions(name)); err != nil {
|
||||
return fmt.Errorf("failed to teardown Prometheus instances (%s): %v", name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func promImage(version string) string {
|
||||
return fmt.Sprintf("quay.io/prometheus/prometheus:%s", version)
|
||||
}
|
||||
|
|
|
@ -25,15 +25,19 @@ import (
|
|||
|
||||
var framework *operatorFramework.Framework
|
||||
|
||||
// Basic set of e2e tests for the operator:
|
||||
// - config reload (with and without external url)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
kubeconfig := flag.String("kubeconfig", "", "kube config path, e.g. $HOME/.kube/config")
|
||||
opImage := flag.String("operator-image", "", "operator image, e.g. quay.io/coreos/prometheus-operator")
|
||||
ns := flag.String("namespace", "prometheus-operator-e2e-tests", "e2e test namespace")
|
||||
ip := flag.String("cluster-ip", "", "ip of the kubernetes cluster to use for external requests")
|
||||
flag.Parse()
|
||||
|
||||
log.Println("setting up e2e tests ...")
|
||||
var err error
|
||||
if framework, err = operatorFramework.New(*ns, *kubeconfig, *opImage); err != nil {
|
||||
if framework, err = operatorFramework.New(*ns, *kubeconfig, *opImage, *ip); err != nil {
|
||||
log.Printf("failed to setup framework: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,19 @@
|
|||
package e2e
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/client-go/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/alertmanager"
|
||||
"github.com/coreos/prometheus-operator/pkg/prometheus"
|
||||
)
|
||||
|
||||
func TestPrometheusCreateDeleteCluster(t *testing.T) {
|
||||
|
@ -27,7 +39,364 @@ func TestPrometheusCreateDeleteCluster(t *testing.T) {
|
|||
}
|
||||
}()
|
||||
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(framework.MakeBasicPrometheus(name, 1)); err != nil {
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(framework.MakeBasicPrometheus(name, name, 1)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusScaleUpDownCluster(t *testing.T) {
|
||||
name := "prometheus-test"
|
||||
|
||||
defer func() {
|
||||
if err := framework.DeletePrometheusAndWaitUntilGone(name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(framework.MakeBasicPrometheus(name, name, 1)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := framework.UpdatePrometheusAndWaitUntilReady(framework.MakeBasicPrometheus(name, name, 3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := framework.UpdatePrometheusAndWaitUntilReady(framework.MakeBasicPrometheus(name, name, 2)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusVersionMigration(t *testing.T) {
|
||||
name := "prometheus-test"
|
||||
|
||||
defer func() {
|
||||
if err := framework.DeletePrometheusAndWaitUntilGone(name); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
p := framework.MakeBasicPrometheus(name, name, 1)
|
||||
|
||||
p.Spec.Version = "v1.4.0"
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p.Spec.Version = "v1.4.1"
|
||||
if err := framework.UpdatePrometheusAndWaitUntilReady(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p.Spec.Version = "v1.4.0"
|
||||
if err := framework.UpdatePrometheusAndWaitUntilReady(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusDiscovery(t *testing.T) {
|
||||
prometheusName := "prometheus-test"
|
||||
group := "servicediscovery-test"
|
||||
|
||||
defer func() {
|
||||
if err := framework.DeletePrometheusAndWaitUntilGone(prometheusName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := framework.MonClient.ServiceMonitors(framework.Namespace.Name).Delete(group, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := framework.KubeClient.CoreV1().Services(framework.Namespace.Name).Delete(prometheusName, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Print("Creating Prometheus Service")
|
||||
svc := framework.MakePrometheusService(prometheusName, group)
|
||||
if _, err := framework.KubeClient.CoreV1().Services(framework.Namespace.Name).Create(svc); err != nil {
|
||||
t.Fatalf("Creating ServiceMonitor failed: %s", err)
|
||||
}
|
||||
|
||||
log.Print("Creating Prometheus ServiceMonitor")
|
||||
s := framework.MakeBasicServiceMonitor(group)
|
||||
if _, err := framework.MonClient.ServiceMonitors(framework.Namespace.Name).Create(s); err != nil {
|
||||
t.Fatalf("Creating ServiceMonitor failed: %s", err)
|
||||
}
|
||||
|
||||
p := framework.MakeBasicPrometheus(prometheusName, group, 1)
|
||||
p.Spec.Version = "v1.5.0"
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Print("Validating Prometheus ConfigMap was created")
|
||||
_, err := framework.KubeClient.CoreV1().ConfigMaps(framework.Namespace.Name).Get(prometheusName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Generated ConfigMap could not be retrieved: %s", err)
|
||||
}
|
||||
|
||||
log.Print("Validating Prometheus Targets were properly discovered")
|
||||
err = poll(18*time.Minute, 30*time.Second, isDiscoveryWorking(prometheusName))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusAlertmanagerDiscovery(t *testing.T) {
|
||||
prometheusName := "prometheus-test"
|
||||
alertmanagerName := "alertmanager-test"
|
||||
group := "servicediscovery-test"
|
||||
|
||||
defer func() {
|
||||
if err := framework.DeleteAlertmanagerAndWaitUntilGone(alertmanagerName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := framework.KubeClient.CoreV1().Services(framework.Namespace.Name).Delete(alertmanagerName, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := framework.DeletePrometheusAndWaitUntilGone(prometheusName); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := framework.MonClient.ServiceMonitors(framework.Namespace.Name).Delete(group, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := framework.KubeClient.CoreV1().Services(framework.Namespace.Name).Delete(prometheusName, nil); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Print("Creating Prometheus Service")
|
||||
svc := framework.MakePrometheusService(prometheusName, group)
|
||||
if _, err := framework.KubeClient.CoreV1().Services(framework.Namespace.Name).Create(svc); err != nil {
|
||||
t.Fatalf("Creating ServiceMonitor failed: %s", err)
|
||||
}
|
||||
|
||||
log.Print("Creating Prometheus ServiceMonitor")
|
||||
s := framework.MakeBasicServiceMonitor(group)
|
||||
if _, err := framework.MonClient.ServiceMonitors(framework.Namespace.Name).Create(s); err != nil {
|
||||
t.Fatalf("Creating ServiceMonitor failed: %s", err)
|
||||
}
|
||||
|
||||
p := framework.MakeBasicPrometheus(prometheusName, group, 1)
|
||||
framework.AddAlertingToPrometheus(p, alertmanagerName)
|
||||
p.Spec.Version = "v1.5.0"
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Print("Validating Prometheus ConfigMap was created")
|
||||
_, err := framework.KubeClient.CoreV1().ConfigMaps(framework.Namespace.Name).Get(prometheusName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
t.Fatalf("Generated ConfigMap could not be retrieved: %s", err)
|
||||
}
|
||||
|
||||
log.Print("Creating Alertmanager Service")
|
||||
amsvc := framework.MakeAlertmanagerService(alertmanagerName, group)
|
||||
if _, err := framework.KubeClient.CoreV1().Services(framework.Namespace.Name).Create(amsvc); err != nil {
|
||||
t.Fatalf("Creating ServiceMonitor failed: %s", err)
|
||||
}
|
||||
|
||||
time.Sleep(time.Minute)
|
||||
|
||||
if err := framework.CreateAlertmanagerAndWaitUntilReady(framework.MakeBasicAlertmanager(alertmanagerName, 3)); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
log.Print("Validating Prometheus properly discovered alertmanagers")
|
||||
err = poll(18*time.Minute, 30*time.Second, isAlertmanagerDiscoveryWorking(alertmanagerName))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func isDiscoveryWorking(prometheusName string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods, err := framework.KubeClient.CoreV1().Pods(framework.Namespace.Name).List(prometheus.ListOptions(prometheusName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if 1 != len(pods.Items) {
|
||||
return false, nil
|
||||
}
|
||||
podIP := pods.Items[0].Status.PodIP
|
||||
expectedTargets := []string{fmt.Sprintf("http://%s:9090/metrics", podIP)}
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:30900/api/v1/targets", framework.ClusterIP))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
rt := prometheusTargetAPIResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rt); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !assertExpectedTargets(rt.Data.ActiveTargets, expectedTargets) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
working, err := basicQueryWorking()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if !working {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
|
||||
type resultVector struct {
|
||||
Metric map[string]string `json:"metric"`
|
||||
Value []interface{} `json:"value"`
|
||||
}
|
||||
|
||||
type queryResult struct {
|
||||
ResultType string `json:"resultType"`
|
||||
Result []*resultVector `json:"result"`
|
||||
}
|
||||
|
||||
type prometheusQueryAPIResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data *queryResult `json:"data"`
|
||||
}
|
||||
|
||||
func basicQueryWorking() (bool, error) {
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:30900/api/v1/query?query=up", framework.ClusterIP))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
rq := prometheusQueryAPIResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&rq); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if rq.Status != "success" && rq.Data.Result[0].Value[1] == "1" {
|
||||
log.Printf("Query Response not successful.")
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func isAlertmanagerDiscoveryWorking(alertmanagerName string) func() (bool, error) {
|
||||
return func() (bool, error) {
|
||||
pods, err := framework.KubeClient.CoreV1().Pods(framework.Namespace.Name).List(alertmanager.ListOptions(alertmanagerName))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if 3 != len(pods.Items) {
|
||||
return false, nil
|
||||
}
|
||||
expectedAlertmanagerTargets := []string{}
|
||||
for _, p := range pods.Items {
|
||||
expectedAlertmanagerTargets = append(expectedAlertmanagerTargets, fmt.Sprintf("http://%s:9093/api/v1/alerts", p.Status.PodIP))
|
||||
}
|
||||
|
||||
resp, err := http.Get(fmt.Sprintf("http://%s:30900/api/v1/alertmanagers", framework.ClusterIP))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
ra := prometheusAlertmanagerAPIResponse{}
|
||||
if err := json.NewDecoder(resp.Body).Decode(&ra); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if assertExpectedAlertmanagerTargets(ra.Data.ActiveAlertmanagers, expectedAlertmanagerTargets) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
|
||||
func assertExpectedTargets(targets []*target, expectedTargets []string) bool {
|
||||
log.Printf("Expected Targets: %#+v\n", expectedTargets)
|
||||
|
||||
existingTargets := []string{}
|
||||
|
||||
for _, t := range targets {
|
||||
existingTargets = append(existingTargets, t.ScrapeURL)
|
||||
}
|
||||
|
||||
sort.Strings(expectedTargets)
|
||||
sort.Strings(existingTargets)
|
||||
|
||||
if !reflect.DeepEqual(expectedTargets, existingTargets) {
|
||||
log.Printf("Existing Targets: %#+v\n", existingTargets)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func assertExpectedAlertmanagerTargets(ams []*alertmanagerTarget, expectedTargets []string) bool {
|
||||
log.Printf("Expected Alertmanager Targets: %#+v\n", expectedTargets)
|
||||
|
||||
existingTargets := []string{}
|
||||
|
||||
for _, am := range ams {
|
||||
existingTargets = append(existingTargets, am.URL)
|
||||
}
|
||||
|
||||
sort.Strings(expectedTargets)
|
||||
sort.Strings(existingTargets)
|
||||
|
||||
if !reflect.DeepEqual(expectedTargets, existingTargets) {
|
||||
log.Printf("Existing Alertmanager Targets: %#+v\n", existingTargets)
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
type target struct {
|
||||
ScrapeURL string `json:"scrapeUrl"`
|
||||
}
|
||||
|
||||
type targetDiscovery struct {
|
||||
ActiveTargets []*target `json:"activeTargets"`
|
||||
}
|
||||
|
||||
type prometheusTargetAPIResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data *targetDiscovery `json:"data"`
|
||||
}
|
||||
|
||||
type alertmanagerTarget struct {
|
||||
URL string `json:"url"`
|
||||
}
|
||||
|
||||
type alertmanagerDiscovery struct {
|
||||
ActiveAlertmanagers []*alertmanagerTarget `json:"activeAlertmanagers"`
|
||||
}
|
||||
|
||||
type prometheusAlertmanagerAPIResponse struct {
|
||||
Status string `json:"status"`
|
||||
Data *alertmanagerDiscovery `json:"data"`
|
||||
}
|
||||
|
||||
func poll(timeout, pollInterval time.Duration, pollFunc func() (bool, error)) error {
|
||||
t := time.After(timeout)
|
||||
ticker := time.NewTicker(pollInterval)
|
||||
defer ticker.Stop()
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-t:
|
||||
return fmt.Errorf("timed out")
|
||||
case <-ticker.C:
|
||||
b, err := pollFunc()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue