mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-21 11:48:53 +00:00
*: adapt implementation and tests to use v1 client
This commit is contained in:
parent
a7e8b412ea
commit
6fdd2afa7f
16 changed files with 361 additions and 227 deletions
pkg
alertmanager
api
k8sutil
prometheus
test
|
@ -15,7 +15,7 @@
|
|||
package alertmanager
|
||||
|
||||
import (
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -48,11 +48,11 @@ func (c *alertmanagerCollector) Describe(ch chan<- *prometheus.Desc) {
|
|||
// Collect implements the prometheus.Collector interface.
|
||||
func (c *alertmanagerCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
for _, p := range c.store.List() {
|
||||
c.collectAlertmanager(ch, p.(*v1alpha1.Alertmanager))
|
||||
c.collectAlertmanager(ch, p.(*v1.Alertmanager))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *alertmanagerCollector) collectAlertmanager(ch chan<- prometheus.Metric, a *v1alpha1.Alertmanager) {
|
||||
func (c *alertmanagerCollector) collectAlertmanager(ch chan<- prometheus.Metric, a *v1.Alertmanager) {
|
||||
replicas := float64(minReplicas)
|
||||
if a.Spec.Replicas != nil {
|
||||
replicas = float64(*a.Spec.Replicas)
|
||||
|
|
|
@ -21,7 +21,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/analytics"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/k8sutil"
|
||||
prometheusoperator "github.com/coreos/prometheus-operator/pkg/prometheus"
|
||||
|
||||
|
@ -46,16 +47,14 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
crdAlertmanager = v1alpha1.AlertmanagerName + "." + v1alpha1.Group
|
||||
|
||||
resyncPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Operator manages lify cycle of Alertmanager deployments and
|
||||
// monitoring configurations.
|
||||
type Operator struct {
|
||||
kclient *kubernetes.Clientset
|
||||
mclient *v1alpha1.MonitoringV1alpha1Client
|
||||
kclient kubernetes.Interface
|
||||
mclient monitoring.Interface
|
||||
crdclient apiextensionsclient.Interface
|
||||
logger log.Logger
|
||||
|
||||
|
@ -86,7 +85,7 @@ func New(c prometheusoperator.Config, logger log.Logger) (*Operator, error) {
|
|||
return nil, errors.Wrap(err, "instantiating kubernetes client failed")
|
||||
}
|
||||
|
||||
mclient, err := v1alpha1.NewForConfig(cfg)
|
||||
mclient, err := monitoring.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "instantiating monitoring client failed")
|
||||
}
|
||||
|
@ -107,10 +106,10 @@ func New(c prometheusoperator.Config, logger log.Logger) (*Operator, error) {
|
|||
|
||||
o.alrtInf = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: o.mclient.Alertmanagers(api.NamespaceAll).List,
|
||||
WatchFunc: o.mclient.Alertmanagers(api.NamespaceAll).Watch,
|
||||
ListFunc: o.mclient.MonitoringV1().Alertmanagers(api.NamespaceAll).List,
|
||||
WatchFunc: o.mclient.MonitoringV1().Alertmanagers(api.NamespaceAll).Watch,
|
||||
},
|
||||
&v1alpha1.Alertmanager{}, resyncPeriod, cache.Indexers{},
|
||||
&monitoringv1.Alertmanager{}, resyncPeriod, cache.Indexers{},
|
||||
)
|
||||
o.ssetInf = cache.NewSharedIndexInformer(
|
||||
cache.NewListWatchFromClient(o.kclient.Apps().RESTClient(), "statefulsets", api.NamespaceAll, nil),
|
||||
|
@ -231,7 +230,7 @@ func (c *Operator) enqueue(obj interface{}) {
|
|||
// enqueueForNamespace enqueues all Alertmanager object keys that belong to the given namespace.
|
||||
func (c *Operator) enqueueForNamespace(ns string) {
|
||||
cache.ListAll(c.alrtInf.GetStore(), labels.Everything(), func(obj interface{}) {
|
||||
am := obj.(*v1alpha1.Alertmanager)
|
||||
am := obj.(*monitoringv1.Alertmanager)
|
||||
if am.Namespace == ns {
|
||||
c.enqueue(am)
|
||||
}
|
||||
|
@ -264,7 +263,7 @@ func (c *Operator) processNextWorkItem() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *Operator) alertmanagerForStatefulSet(sset interface{}) *v1alpha1.Alertmanager {
|
||||
func (c *Operator) alertmanagerForStatefulSet(sset interface{}) *monitoringv1.Alertmanager {
|
||||
key, ok := c.keyFunc(sset)
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -279,7 +278,7 @@ func (c *Operator) alertmanagerForStatefulSet(sset interface{}) *v1alpha1.Alertm
|
|||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return a.(*v1alpha1.Alertmanager)
|
||||
return a.(*monitoringv1.Alertmanager)
|
||||
}
|
||||
|
||||
func alertmanagerNameFromStatefulSetName(name string) string {
|
||||
|
@ -379,7 +378,7 @@ func (c *Operator) sync(key string) error {
|
|||
return c.destroyAlertmanager(key)
|
||||
}
|
||||
|
||||
am := obj.(*v1alpha1.Alertmanager)
|
||||
am := obj.(*monitoringv1.Alertmanager)
|
||||
if am.Spec.Paused {
|
||||
return nil
|
||||
}
|
||||
|
@ -435,7 +434,7 @@ func ListOptions(name string) metav1.ListOptions {
|
|||
// create new pods.
|
||||
//
|
||||
// TODO(brancz): remove this once the 1.6 support is removed.
|
||||
func (c *Operator) syncVersion(a *v1alpha1.Alertmanager) error {
|
||||
func (c *Operator) syncVersion(a *monitoringv1.Alertmanager) error {
|
||||
if c.config.StatefulSetUpdatesAvailable {
|
||||
return nil
|
||||
}
|
||||
|
@ -475,8 +474,8 @@ func (c *Operator) syncVersion(a *v1alpha1.Alertmanager) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func AlertmanagerStatus(kclient *kubernetes.Clientset, a *v1alpha1.Alertmanager) (*v1alpha1.AlertmanagerStatus, []v1.Pod, error) {
|
||||
res := &v1alpha1.AlertmanagerStatus{Paused: a.Spec.Paused}
|
||||
func AlertmanagerStatus(kclient kubernetes.Interface, a *monitoringv1.Alertmanager) (*monitoringv1.AlertmanagerStatus, []v1.Pod, error) {
|
||||
res := &monitoringv1.AlertmanagerStatus{Paused: a.Spec.Paused}
|
||||
|
||||
pods, err := kclient.Core().Pods(a.Namespace).List(ListOptions(a.Name))
|
||||
if err != nil {
|
||||
|
@ -570,20 +569,7 @@ func (c *Operator) destroyAlertmanager(key string) error {
|
|||
|
||||
func (c *Operator) createCRDs() error {
|
||||
crds := []*extensionsobj.CustomResourceDefinition{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdAlertmanager,
|
||||
},
|
||||
Spec: extensionsobj.CustomResourceDefinitionSpec{
|
||||
Group: v1alpha1.Group,
|
||||
Version: v1alpha1.Version,
|
||||
Scope: extensionsobj.NamespaceScoped,
|
||||
Names: extensionsobj.CustomResourceDefinitionNames{
|
||||
Plural: v1alpha1.AlertmanagerName,
|
||||
Kind: v1alpha1.AlertmanagersKind,
|
||||
},
|
||||
},
|
||||
},
|
||||
k8sutil.NewAlertmanagerCustomResourceDefinition(),
|
||||
}
|
||||
|
||||
crdClient := c.crdclient.ApiextensionsV1beta1().CustomResourceDefinitions()
|
||||
|
@ -596,20 +582,12 @@ func (c *Operator) createCRDs() error {
|
|||
}
|
||||
|
||||
// We have to wait for the CRDs to be ready. Otherwise the initial watch may fail.
|
||||
return k8sutil.WaitForCRDReady(c.kclient.CoreV1().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.AlertmanagerName)
|
||||
return k8sutil.WaitForCRDReady(c.mclient.MonitoringV1().Alertmanagers(api.NamespaceAll).List)
|
||||
}
|
||||
|
||||
func (c *Operator) createTPRs() error {
|
||||
tprs := []*extensionsobjold.ThirdPartyResource{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "alertmanager." + v1alpha1.Group,
|
||||
},
|
||||
Versions: []extensionsobjold.APIVersion{
|
||||
{Name: v1alpha1.Version},
|
||||
},
|
||||
Description: "Managed Alertmanager cluster",
|
||||
},
|
||||
k8sutil.NewAlertmanagerTPRDefinition(),
|
||||
}
|
||||
tprClient := c.kclient.Extensions().ThirdPartyResources()
|
||||
|
||||
|
@ -621,5 +599,5 @@ func (c *Operator) createTPRs() error {
|
|||
}
|
||||
|
||||
// We have to wait for the TPRs to be ready. Otherwise the initial watch may fail.
|
||||
return k8sutil.WaitForCRDReady(c.kclient.CoreV1().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.AlertmanagerName)
|
||||
return k8sutil.WaitForCRDReady(c.mclient.MonitoringV1alpha1().Alertmanagers(api.NamespaceAll).List)
|
||||
}
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
|
@ -41,7 +41,7 @@ var (
|
|||
probeTimeoutSeconds int32 = 3
|
||||
)
|
||||
|
||||
func makeStatefulSet(am *v1alpha1.Alertmanager, old *v1beta1.StatefulSet, config Config) (*v1beta1.StatefulSet, error) {
|
||||
func makeStatefulSet(am *monitoringv1.Alertmanager, old *v1beta1.StatefulSet, config Config) (*v1beta1.StatefulSet, error) {
|
||||
// TODO(fabxc): is this the right point to inject defaults?
|
||||
// Ideally we would do it before storing but that's currently not possible.
|
||||
// Potentially an update handler on first insertion.
|
||||
|
@ -107,7 +107,7 @@ func makeStatefulSet(am *v1alpha1.Alertmanager, old *v1beta1.StatefulSet, config
|
|||
return statefulset, nil
|
||||
}
|
||||
|
||||
func makeStatefulSetService(p *v1alpha1.Alertmanager) *v1.Service {
|
||||
func makeStatefulSetService(p *monitoringv1.Alertmanager) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: governingServiceName,
|
||||
|
@ -139,7 +139,7 @@ func makeStatefulSetService(p *v1alpha1.Alertmanager) *v1.Service {
|
|||
return svc
|
||||
}
|
||||
|
||||
func makeStatefulSetSpec(a *v1alpha1.Alertmanager, config Config) (*v1beta1.StatefulSetSpec, error) {
|
||||
func makeStatefulSetSpec(a *monitoringv1.Alertmanager, config Config) (*v1beta1.StatefulSetSpec, error) {
|
||||
image := fmt.Sprintf("%s:%s", a.Spec.BaseImage, a.Spec.Version)
|
||||
versionStr := strings.TrimLeft(a.Spec.Version, "v")
|
||||
|
||||
|
@ -297,7 +297,7 @@ func prefixedName(name string) string {
|
|||
return fmt.Sprintf("alertmanager-%s", name)
|
||||
}
|
||||
|
||||
func subPathForStorage(s *v1alpha1.StorageSpec) string {
|
||||
func subPathForStorage(s *monitoringv1.StorageSpec) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -23,14 +23,14 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/k8sutil"
|
||||
"github.com/coreos/prometheus-operator/pkg/prometheus"
|
||||
)
|
||||
|
||||
type API struct {
|
||||
kclient *kubernetes.Clientset
|
||||
mclient *v1alpha1.MonitoringV1alpha1Client
|
||||
mclient *v1.MonitoringV1Client
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
|
@ -45,7 +45,7 @@ func New(conf prometheus.Config, l log.Logger) (*API, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
mclient, err := v1alpha1.NewForConfig(cfg)
|
||||
mclient, err := v1.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -58,7 +58,7 @@ func New(conf prometheus.Config, l log.Logger) (*API, error) {
|
|||
}
|
||||
|
||||
var (
|
||||
prometheusRoute = regexp.MustCompile("/apis/monitoring.coreos.com/v1alpha1/namespaces/(.*)/prometheuses/(.*)/status")
|
||||
prometheusRoute = regexp.MustCompile("/apis/monitoring.coreos.com/" + v1.Version + "/namespaces/(.*)/prometheuses/(.*)/status")
|
||||
)
|
||||
|
||||
func (api *API) Register(mux *http.ServeMux) {
|
||||
|
|
|
@ -20,27 +20,27 @@ import (
|
|||
"net/url"
|
||||
"time"
|
||||
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
version "github.com/hashicorp/go-version"
|
||||
"github.com/pkg/errors"
|
||||
extensionsobj "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/discovery"
|
||||
clientv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
extensionsobjold "k8s.io/client-go/pkg/apis/extensions/v1beta1"
|
||||
"k8s.io/client-go/rest"
|
||||
)
|
||||
|
||||
// WaitForCRDReady waits for a third party resource to be available
|
||||
// for use.
|
||||
// TODO(gouthamve): Move to clientset.Get()
|
||||
func WaitForCRDReady(restClient rest.Interface, crdGroup, crdVersion, crdName string) error {
|
||||
err := wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
res := restClient.Get().AbsPath("apis", crdGroup, crdVersion, crdName).Do()
|
||||
err := res.Error()
|
||||
// WaitForCRDReady waits for a third party resource to be available for use.
|
||||
func WaitForCRDReady(listFunc func(opts metav1.ListOptions) (runtime.Object, error)) error {
|
||||
err := wait.Poll(3*time.Second, 10*time.Minute, func() (bool, error) {
|
||||
_, err := listFunc(metav1.ListOptions{})
|
||||
if err != nil {
|
||||
// RESTClient returns *apierrors.StatusError for any status codes < 200 or > 206
|
||||
// and http.Client.Do errors are returned directly.
|
||||
if se, ok := err.(*apierrors.StatusError); ok {
|
||||
if se.Status().Code == http.StatusNotFound {
|
||||
return false, nil
|
||||
|
@ -48,17 +48,10 @@ func WaitForCRDReady(restClient rest.Interface, crdGroup, crdVersion, crdName st
|
|||
}
|
||||
return false, err
|
||||
}
|
||||
|
||||
var statusCode int
|
||||
res.StatusCode(&statusCode)
|
||||
if statusCode != http.StatusOK {
|
||||
return false, fmt.Errorf("invalid status code: %d", statusCode)
|
||||
}
|
||||
|
||||
return true, nil
|
||||
})
|
||||
|
||||
return errors.Wrap(err, fmt.Sprintf("timed out waiting for TPR %s", crdName))
|
||||
return errors.Wrap(err, fmt.Sprintf("timed out waiting for Custom Resoruce"))
|
||||
}
|
||||
|
||||
// PodRunningAndReady returns whether a pod is running and each container has
|
||||
|
@ -175,3 +168,90 @@ func GetMinorVersion(dclient discovery.DiscoveryInterface) (int, error) {
|
|||
|
||||
return ver.Segments()[1], nil
|
||||
}
|
||||
|
||||
func NewPrometheusTPRDefinition() *extensionsobjold.ThirdPartyResource {
|
||||
return &extensionsobjold.ThirdPartyResource{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "prometheus." + v1alpha1.Group,
|
||||
},
|
||||
Versions: []extensionsobjold.APIVersion{
|
||||
{Name: v1alpha1.Version},
|
||||
},
|
||||
Description: "Managed Prometheus server",
|
||||
}
|
||||
}
|
||||
|
||||
func NewServiceMonitorTPRDefinition() *extensionsobjold.ThirdPartyResource {
|
||||
return &extensionsobjold.ThirdPartyResource{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-monitor." + v1alpha1.Group,
|
||||
},
|
||||
Versions: []extensionsobjold.APIVersion{
|
||||
{Name: v1alpha1.Version},
|
||||
},
|
||||
Description: "Prometheus monitoring for a service",
|
||||
}
|
||||
}
|
||||
|
||||
func NewAlertmanagerTPRDefinition() *extensionsobjold.ThirdPartyResource {
|
||||
return &extensionsobjold.ThirdPartyResource{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "alertmanager." + v1alpha1.Group,
|
||||
},
|
||||
Versions: []extensionsobjold.APIVersion{
|
||||
{Name: v1alpha1.Version},
|
||||
},
|
||||
Description: "Managed Alertmanager cluster",
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrometheusCustomResourceDefinition() *extensionsobj.CustomResourceDefinition {
|
||||
return &extensionsobj.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monitoringv1.PrometheusName + "." + monitoringv1.Group,
|
||||
},
|
||||
Spec: extensionsobj.CustomResourceDefinitionSpec{
|
||||
Group: monitoringv1.Group,
|
||||
Version: monitoringv1.Version,
|
||||
Scope: extensionsobj.NamespaceScoped,
|
||||
Names: extensionsobj.CustomResourceDefinitionNames{
|
||||
Plural: monitoringv1.PrometheusName,
|
||||
Kind: monitoringv1.PrometheusesKind,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewServiceMonitorCustomResourceDefinition() *extensionsobj.CustomResourceDefinition {
|
||||
return &extensionsobj.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monitoringv1.ServiceMonitorName + "." + monitoringv1.Group,
|
||||
},
|
||||
Spec: extensionsobj.CustomResourceDefinitionSpec{
|
||||
Group: monitoringv1.Group,
|
||||
Version: monitoringv1.Version,
|
||||
Scope: extensionsobj.NamespaceScoped,
|
||||
Names: extensionsobj.CustomResourceDefinitionNames{
|
||||
Plural: monitoringv1.ServiceMonitorName,
|
||||
Kind: monitoringv1.ServiceMonitorsKind,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func NewAlertmanagerCustomResourceDefinition() *extensionsobj.CustomResourceDefinition {
|
||||
return &extensionsobj.CustomResourceDefinition{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: monitoringv1.AlertmanagerName + "." + monitoringv1.Group,
|
||||
},
|
||||
Spec: extensionsobj.CustomResourceDefinitionSpec{
|
||||
Group: monitoringv1.Group,
|
||||
Version: monitoringv1.Version,
|
||||
Scope: extensionsobj.NamespaceScoped,
|
||||
Names: extensionsobj.CustomResourceDefinitionNames{
|
||||
Plural: monitoringv1.AlertmanagerName,
|
||||
Kind: monitoringv1.AlertmanagersKind,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
@ -48,11 +48,11 @@ func (c *prometheusCollector) Describe(ch chan<- *prometheus.Desc) {
|
|||
// Collect implements the prometheus.Collector interface.
|
||||
func (c *prometheusCollector) Collect(ch chan<- prometheus.Metric) {
|
||||
for _, p := range c.store.List() {
|
||||
c.collectPrometheus(ch, p.(*v1alpha1.Prometheus))
|
||||
c.collectPrometheus(ch, p.(*v1.Prometheus))
|
||||
}
|
||||
}
|
||||
|
||||
func (c *prometheusCollector) collectPrometheus(ch chan<- prometheus.Metric, p *v1alpha1.Prometheus) {
|
||||
func (c *prometheusCollector) collectPrometheus(ch chan<- prometheus.Metric, p *v1.Prometheus) {
|
||||
replicas := float64(minReplicas)
|
||||
if p.Spec.Replicas != nil {
|
||||
replicas = float64(*p.Spec.Replicas)
|
||||
|
|
|
@ -22,7 +22,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/analytics"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/k8sutil"
|
||||
|
||||
"github.com/go-kit/kit/log"
|
||||
|
@ -49,17 +50,14 @@ import (
|
|||
const (
|
||||
configFilename = "prometheus.yaml"
|
||||
|
||||
crdServiceMonitor = v1alpha1.ServiceMonitorName + "." + v1alpha1.Group
|
||||
crdPrometheus = v1alpha1.PrometheusName + "." + v1alpha1.Group
|
||||
|
||||
resyncPeriod = 5 * time.Minute
|
||||
)
|
||||
|
||||
// Operator manages lify cycle of Prometheus deployments and
|
||||
// monitoring configurations.
|
||||
type Operator struct {
|
||||
kclient *kubernetes.Clientset
|
||||
mclient *v1alpha1.MonitoringV1alpha1Client
|
||||
kclient kubernetes.Interface
|
||||
mclient monitoring.Interface
|
||||
crdclient apiextensionsclient.Interface
|
||||
logger log.Logger
|
||||
|
||||
|
@ -108,7 +106,7 @@ func New(conf Config, logger log.Logger) (*Operator, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
mclient, err := v1alpha1.NewForConfig(cfg)
|
||||
mclient, err := monitoring.NewForConfig(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -147,10 +145,10 @@ func New(conf Config, logger log.Logger) (*Operator, error) {
|
|||
|
||||
c.promInf = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: mclient.Prometheuses(api.NamespaceAll).List,
|
||||
WatchFunc: mclient.Prometheuses(api.NamespaceAll).Watch,
|
||||
ListFunc: mclient.MonitoringV1().Prometheuses(api.NamespaceAll).List,
|
||||
WatchFunc: mclient.MonitoringV1().Prometheuses(api.NamespaceAll).Watch,
|
||||
},
|
||||
&v1alpha1.Prometheus{}, resyncPeriod, cache.Indexers{},
|
||||
&monitoringv1.Prometheus{}, resyncPeriod, cache.Indexers{},
|
||||
)
|
||||
c.promInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.handleAddPrometheus,
|
||||
|
@ -160,10 +158,10 @@ func New(conf Config, logger log.Logger) (*Operator, error) {
|
|||
|
||||
c.smonInf = cache.NewSharedIndexInformer(
|
||||
&cache.ListWatch{
|
||||
ListFunc: mclient.ServiceMonitors(api.NamespaceAll).List,
|
||||
WatchFunc: mclient.ServiceMonitors(api.NamespaceAll).Watch,
|
||||
ListFunc: mclient.MonitoringV1().ServiceMonitors(api.NamespaceAll).List,
|
||||
WatchFunc: mclient.MonitoringV1().ServiceMonitors(api.NamespaceAll).Watch,
|
||||
},
|
||||
&v1alpha1.ServiceMonitor{}, resyncPeriod, cache.Indexers{},
|
||||
&monitoringv1.ServiceMonitor{}, resyncPeriod, cache.Indexers{},
|
||||
)
|
||||
c.smonInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: c.handleSmonAdd,
|
||||
|
@ -235,13 +233,6 @@ func (c *Operator) Run(stopc <-chan struct{}) error {
|
|||
mv, err := k8sutil.GetMinorVersion(c.kclient.Discovery())
|
||||
if mv < 7 {
|
||||
c.config.StatefulSetUpdatesAvailable = false
|
||||
if err := c.createTPRs(); err != nil {
|
||||
errChan <- errors.Wrap(err, "creating TPRs failed")
|
||||
return
|
||||
}
|
||||
|
||||
errChan <- nil
|
||||
return
|
||||
}
|
||||
|
||||
c.config.StatefulSetUpdatesAvailable = true
|
||||
|
@ -527,7 +518,7 @@ func (c *Operator) enqueue(obj interface{}) {
|
|||
// enqueueForNamespace enqueues all Prometheus object keys that belong to the given namespace.
|
||||
func (c *Operator) enqueueForNamespace(ns string) {
|
||||
cache.ListAll(c.promInf.GetStore(), labels.Everything(), func(obj interface{}) {
|
||||
p := obj.(*v1alpha1.Prometheus)
|
||||
p := obj.(*monitoringv1.Prometheus)
|
||||
if p.Namespace == ns {
|
||||
c.enqueue(p)
|
||||
}
|
||||
|
@ -560,7 +551,7 @@ func (c *Operator) processNextWorkItem() bool {
|
|||
return true
|
||||
}
|
||||
|
||||
func (c *Operator) prometheusForStatefulSet(sset interface{}) *v1alpha1.Prometheus {
|
||||
func (c *Operator) prometheusForStatefulSet(sset interface{}) *monitoringv1.Prometheus {
|
||||
key, ok := c.keyFunc(sset)
|
||||
if !ok {
|
||||
return nil
|
||||
|
@ -575,7 +566,7 @@ func (c *Operator) prometheusForStatefulSet(sset interface{}) *v1alpha1.Promethe
|
|||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return p.(*v1alpha1.Prometheus)
|
||||
return p.(*monitoringv1.Prometheus)
|
||||
}
|
||||
|
||||
func prometheusNameFromStatefulSetName(name string) string {
|
||||
|
@ -642,7 +633,7 @@ func (c *Operator) sync(key string) error {
|
|||
return c.destroyPrometheus(key)
|
||||
}
|
||||
|
||||
p := obj.(*v1alpha1.Prometheus)
|
||||
p := obj.(*monitoringv1.Prometheus)
|
||||
if p.Spec.Paused {
|
||||
return nil
|
||||
}
|
||||
|
@ -711,7 +702,7 @@ func (c *Operator) sync(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Operator) ruleFileConfigMaps(p *v1alpha1.Prometheus) ([]*v1.ConfigMap, error) {
|
||||
func (c *Operator) ruleFileConfigMaps(p *monitoringv1.Prometheus) ([]*v1.ConfigMap, error) {
|
||||
res := []*v1.ConfigMap{}
|
||||
|
||||
ruleSelector, err := metav1.LabelSelectorAsSelector(p.Spec.RuleSelector)
|
||||
|
@ -743,7 +734,7 @@ func ListOptions(name string) metav1.ListOptions {
|
|||
// create new pods.
|
||||
//
|
||||
// TODO(brancz): remove this once the 1.6 support is removed.
|
||||
func (c *Operator) syncVersion(key string, p *v1alpha1.Prometheus) error {
|
||||
func (c *Operator) syncVersion(key string, p *monitoringv1.Prometheus) error {
|
||||
if c.config.StatefulSetUpdatesAvailable {
|
||||
return nil
|
||||
}
|
||||
|
@ -787,8 +778,8 @@ func (c *Operator) syncVersion(key string, p *v1alpha1.Prometheus) error {
|
|||
// PrometheusStatus evaluates the current status of a Prometheus deployment with respect
|
||||
// to its specified resource object. It return the status and a list of pods that
|
||||
// are not updated.
|
||||
func PrometheusStatus(kclient kubernetes.Interface, p *v1alpha1.Prometheus) (*v1alpha1.PrometheusStatus, []v1.Pod, error) {
|
||||
res := &v1alpha1.PrometheusStatus{Paused: p.Spec.Paused}
|
||||
func PrometheusStatus(kclient kubernetes.Interface, p *monitoringv1.Prometheus) (*monitoringv1.PrometheusStatus, []v1.Pod, error) {
|
||||
res := &monitoringv1.PrometheusStatus{Paused: p.Spec.Paused}
|
||||
|
||||
pods, err := kclient.Core().Pods(p.Namespace).List(ListOptions(p.Name))
|
||||
if err != nil {
|
||||
|
@ -901,7 +892,7 @@ func (c *Operator) destroyPrometheus(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Operator) loadBasicAuthSecrets(mons map[string]*v1alpha1.ServiceMonitor, s *v1.SecretList) (map[string]BasicAuthCredentials, error) {
|
||||
func (c *Operator) loadBasicAuthSecrets(mons map[string]*monitoringv1.ServiceMonitor, s *v1.SecretList) (map[string]BasicAuthCredentials, error) {
|
||||
|
||||
secrets := map[string]BasicAuthCredentials{}
|
||||
|
||||
|
@ -957,7 +948,7 @@ func (c *Operator) loadBasicAuthSecrets(mons map[string]*v1alpha1.ServiceMonitor
|
|||
|
||||
}
|
||||
|
||||
func (c *Operator) createConfig(p *v1alpha1.Prometheus, ruleFileConfigMaps []*v1.ConfigMap) error {
|
||||
func (c *Operator) createConfig(p *monitoringv1.Prometheus, ruleFileConfigMaps []*v1.ConfigMap) error {
|
||||
smons, err := c.selectServiceMonitors(p)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "selecting ServiceMonitors failed")
|
||||
|
@ -1019,9 +1010,9 @@ func (c *Operator) createConfig(p *v1alpha1.Prometheus, ruleFileConfigMaps []*v1
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *Operator) selectServiceMonitors(p *v1alpha1.Prometheus) (map[string]*v1alpha1.ServiceMonitor, error) {
|
||||
func (c *Operator) selectServiceMonitors(p *monitoringv1.Prometheus) (map[string]*monitoringv1.ServiceMonitor, error) {
|
||||
// Selectors might overlap. Deduplicate them along the keyFunc.
|
||||
res := make(map[string]*v1alpha1.ServiceMonitor)
|
||||
res := make(map[string]*monitoringv1.ServiceMonitor)
|
||||
|
||||
selector, err := metav1.LabelSelectorAsSelector(p.Spec.ServiceMonitorSelector)
|
||||
if err != nil {
|
||||
|
@ -1033,7 +1024,7 @@ func (c *Operator) selectServiceMonitors(p *v1alpha1.Prometheus) (map[string]*v1
|
|||
cache.ListAllByNamespace(c.smonInf.GetIndexer(), p.Namespace, selector, func(obj interface{}) {
|
||||
k, ok := c.keyFunc(obj)
|
||||
if ok {
|
||||
res[k] = obj.(*v1alpha1.ServiceMonitor)
|
||||
res[k] = obj.(*monitoringv1.ServiceMonitor)
|
||||
}
|
||||
})
|
||||
|
||||
|
@ -1042,24 +1033,8 @@ func (c *Operator) selectServiceMonitors(p *v1alpha1.Prometheus) (map[string]*v1
|
|||
|
||||
func (c *Operator) createTPRs() error {
|
||||
tprs := []*extensionsobjold.ThirdPartyResource{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "service-monitor." + v1alpha1.Group,
|
||||
},
|
||||
Versions: []extensionsobjold.APIVersion{
|
||||
{Name: v1alpha1.Version},
|
||||
},
|
||||
Description: "Prometheus monitoring for a service",
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "prometheus." + v1alpha1.Group,
|
||||
},
|
||||
Versions: []extensionsobjold.APIVersion{
|
||||
{Name: v1alpha1.Version},
|
||||
},
|
||||
Description: "Managed Prometheus server",
|
||||
},
|
||||
k8sutil.NewPrometheusTPRDefinition(),
|
||||
k8sutil.NewServiceMonitorTPRDefinition(),
|
||||
}
|
||||
tprClient := c.kclient.Extensions().ThirdPartyResources()
|
||||
|
||||
|
@ -1071,44 +1046,17 @@ func (c *Operator) createTPRs() error {
|
|||
}
|
||||
|
||||
// We have to wait for the TPRs to be ready. Otherwise the initial watch may fail.
|
||||
err := k8sutil.WaitForCRDReady(c.kclient.CoreV1().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.PrometheusName)
|
||||
err := k8sutil.WaitForCRDReady(c.mclient.MonitoringV1alpha1().Prometheuses(api.NamespaceAll).List)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return k8sutil.WaitForCRDReady(c.kclient.CoreV1().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.ServiceMonitorName)
|
||||
return k8sutil.WaitForCRDReady(c.mclient.MonitoringV1alpha1().ServiceMonitors(api.NamespaceAll).List)
|
||||
}
|
||||
|
||||
func (c *Operator) createCRDs() error {
|
||||
crds := []*extensionsobj.CustomResourceDefinition{
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdServiceMonitor,
|
||||
},
|
||||
Spec: extensionsobj.CustomResourceDefinitionSpec{
|
||||
Group: v1alpha1.Group,
|
||||
Version: v1alpha1.Version,
|
||||
Scope: extensionsobj.NamespaceScoped,
|
||||
Names: extensionsobj.CustomResourceDefinitionNames{
|
||||
Plural: v1alpha1.ServiceMonitorName,
|
||||
Kind: v1alpha1.ServiceMonitorsKind,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: crdPrometheus,
|
||||
},
|
||||
Spec: extensionsobj.CustomResourceDefinitionSpec{
|
||||
Group: v1alpha1.Group,
|
||||
Version: v1alpha1.Version,
|
||||
Scope: extensionsobj.NamespaceScoped,
|
||||
Names: extensionsobj.CustomResourceDefinitionNames{
|
||||
Plural: v1alpha1.PrometheusName,
|
||||
Kind: v1alpha1.PrometheusesKind,
|
||||
ShortNames: []string{v1alpha1.PrometheusShort},
|
||||
},
|
||||
},
|
||||
},
|
||||
k8sutil.NewPrometheusCustomResourceDefinition(),
|
||||
k8sutil.NewServiceMonitorCustomResourceDefinition(),
|
||||
}
|
||||
|
||||
crdClient := c.crdclient.ApiextensionsV1beta1().CustomResourceDefinitions()
|
||||
|
@ -1121,9 +1069,9 @@ func (c *Operator) createCRDs() error {
|
|||
}
|
||||
|
||||
// We have to wait for the CRDs to be ready. Otherwise the initial watch may fail.
|
||||
err := k8sutil.WaitForCRDReady(c.kclient.CoreV1().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.PrometheusName)
|
||||
err := k8sutil.WaitForCRDReady(c.mclient.MonitoringV1().Prometheuses(api.NamespaceAll).List)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return k8sutil.WaitForCRDReady(c.kclient.CoreV1().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.ServiceMonitorName)
|
||||
return k8sutil.WaitForCRDReady(c.mclient.MonitoringV1().ServiceMonitors(api.NamespaceAll).List)
|
||||
}
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
yaml "gopkg.in/yaml.v2"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -50,7 +50,7 @@ func stringMapToMapSlice(m map[string]string) yaml.MapSlice {
|
|||
return res
|
||||
}
|
||||
|
||||
func generateConfig(p *v1alpha1.Prometheus, mons map[string]*v1alpha1.ServiceMonitor, ruleConfigMaps int, basicAuthSecrets map[string]BasicAuthCredentials) ([]byte, error) {
|
||||
func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleConfigMaps int, basicAuthSecrets map[string]BasicAuthCredentials) ([]byte, error) {
|
||||
versionStr := p.Spec.Version
|
||||
if versionStr == "" {
|
||||
versionStr = DefaultVersion
|
||||
|
@ -127,7 +127,7 @@ func generateConfig(p *v1alpha1.Prometheus, mons map[string]*v1alpha1.ServiceMon
|
|||
return yaml.Marshal(cfg)
|
||||
}
|
||||
|
||||
func generateServiceMonitorConfig(version semver.Version, m *v1alpha1.ServiceMonitor, ep v1alpha1.Endpoint, i int, basicAuthSecrets map[string]BasicAuthCredentials) yaml.MapSlice {
|
||||
func generateServiceMonitorConfig(version semver.Version, m *v1.ServiceMonitor, ep v1.Endpoint, i int, basicAuthSecrets map[string]BasicAuthCredentials) yaml.MapSlice {
|
||||
cfg := yaml.MapSlice{
|
||||
{
|
||||
Key: "job_name",
|
||||
|
@ -351,7 +351,7 @@ func generateServiceMonitorConfig(version semver.Version, m *v1alpha1.ServiceMon
|
|||
return cfg
|
||||
}
|
||||
|
||||
func k8sSDFromServiceMonitor(m *v1alpha1.ServiceMonitor) yaml.MapItem {
|
||||
func k8sSDFromServiceMonitor(m *v1.ServiceMonitor) yaml.MapItem {
|
||||
nsel := m.Spec.NamespaceSelector
|
||||
namespaces := []string{}
|
||||
if !nsel.Any && len(nsel.MatchNames) == 0 {
|
||||
|
@ -403,7 +403,7 @@ func k8sSDAllNamespaces() yaml.MapItem {
|
|||
}
|
||||
}
|
||||
|
||||
func generateAlertmanagerConfig(version semver.Version, am v1alpha1.AlertmanagerEndpoints) yaml.MapSlice {
|
||||
func generateAlertmanagerConfig(version semver.Version, am v1.AlertmanagerEndpoints) yaml.MapSlice {
|
||||
if am.Scheme == "" {
|
||||
am.Scheme = "http"
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
)
|
||||
|
||||
func TestConfigGeneration(t *testing.T) {
|
||||
|
@ -49,14 +49,14 @@ func TestConfigGeneration(t *testing.T) {
|
|||
func generateTestConfig(version string) ([]byte, error) {
|
||||
replicas := int32(1)
|
||||
return generateConfig(
|
||||
&v1alpha1.Prometheus{
|
||||
&monitoringv1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "default",
|
||||
},
|
||||
Spec: v1alpha1.PrometheusSpec{
|
||||
Alerting: v1alpha1.AlertingSpec{
|
||||
Alertmanagers: []v1alpha1.AlertmanagerEndpoints{
|
||||
Spec: monitoringv1.PrometheusSpec{
|
||||
Alerting: monitoringv1.AlertingSpec{
|
||||
Alertmanagers: []monitoringv1.AlertmanagerEndpoints{
|
||||
{
|
||||
Name: "alertmanager-main",
|
||||
Namespace: "default",
|
||||
|
@ -89,10 +89,10 @@ func generateTestConfig(version string) ([]byte, error) {
|
|||
)
|
||||
}
|
||||
|
||||
func makeServiceMonitors() map[string]*v1alpha1.ServiceMonitor {
|
||||
res := map[string]*v1alpha1.ServiceMonitor{}
|
||||
func makeServiceMonitors() map[string]*monitoringv1.ServiceMonitor {
|
||||
res := map[string]*monitoringv1.ServiceMonitor{}
|
||||
|
||||
res["servicemonitor1"] = &v1alpha1.ServiceMonitor{
|
||||
res["servicemonitor1"] = &monitoringv1.ServiceMonitor{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testservicemonitor1",
|
||||
Namespace: "default",
|
||||
|
@ -100,14 +100,14 @@ func makeServiceMonitors() map[string]*v1alpha1.ServiceMonitor {
|
|||
"group": "group1",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ServiceMonitorSpec{
|
||||
Spec: monitoringv1.ServiceMonitorSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": "group1",
|
||||
},
|
||||
},
|
||||
Endpoints: []v1alpha1.Endpoint{
|
||||
v1alpha1.Endpoint{
|
||||
Endpoints: []monitoringv1.Endpoint{
|
||||
monitoringv1.Endpoint{
|
||||
Port: "web",
|
||||
Interval: "30s",
|
||||
},
|
||||
|
@ -115,7 +115,7 @@ func makeServiceMonitors() map[string]*v1alpha1.ServiceMonitor {
|
|||
},
|
||||
}
|
||||
|
||||
res["servicemonitor2"] = &v1alpha1.ServiceMonitor{
|
||||
res["servicemonitor2"] = &monitoringv1.ServiceMonitor{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "testservicemonitor2",
|
||||
Namespace: "default",
|
||||
|
@ -123,15 +123,15 @@ func makeServiceMonitors() map[string]*v1alpha1.ServiceMonitor {
|
|||
"group": "group2",
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ServiceMonitorSpec{
|
||||
Spec: monitoringv1.ServiceMonitorSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": "group2",
|
||||
"group3": "group3",
|
||||
},
|
||||
},
|
||||
Endpoints: []v1alpha1.Endpoint{
|
||||
v1alpha1.Endpoint{
|
||||
Endpoints: []monitoringv1.Endpoint{
|
||||
monitoringv1.Endpoint{
|
||||
Port: "web",
|
||||
Interval: "30s",
|
||||
},
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/pkg/errors"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
)
|
||||
|
@ -70,7 +70,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func makeStatefulSet(p v1alpha1.Prometheus, old *v1beta1.StatefulSet, config *Config, ruleConfigMaps []*v1.ConfigMap) (*v1beta1.StatefulSet, error) {
|
||||
func makeStatefulSet(p monitoringv1.Prometheus, old *v1beta1.StatefulSet, config *Config, ruleConfigMaps []*v1.ConfigMap) (*v1beta1.StatefulSet, error) {
|
||||
// TODO(fabxc): is this the right point to inject defaults?
|
||||
// Ideally we would do it before storing but that's currently not possible.
|
||||
// Potentially an update handler on first insertion.
|
||||
|
@ -239,7 +239,7 @@ func makeConfigSecret(name string, configMaps []*v1.ConfigMap) (*v1.Secret, erro
|
|||
}, nil
|
||||
}
|
||||
|
||||
func makeStatefulSetService(p *v1alpha1.Prometheus) *v1.Service {
|
||||
func makeStatefulSetService(p *monitoringv1.Prometheus) *v1.Service {
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: governingServiceName,
|
||||
|
@ -264,7 +264,7 @@ func makeStatefulSetService(p *v1alpha1.Prometheus) *v1.Service {
|
|||
return svc
|
||||
}
|
||||
|
||||
func makeStatefulSetSpec(p v1alpha1.Prometheus, c *Config, ruleConfigMaps []*v1.ConfigMap) (*v1beta1.StatefulSetSpec, error) {
|
||||
func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []*v1.ConfigMap) (*v1beta1.StatefulSetSpec, error) {
|
||||
// Prometheus may take quite long to shut down to checkpoint existing data.
|
||||
// Allow up to 10 minutes for clean termination.
|
||||
terminationGracePeriod := int64(600)
|
||||
|
@ -506,7 +506,7 @@ func prefixedName(name string) string {
|
|||
return fmt.Sprintf("prometheus-%s", name)
|
||||
}
|
||||
|
||||
func subPathForStorage(s *v1alpha1.StorageSpec) string {
|
||||
func subPathForStorage(s *monitoringv1.StorageSpec) string {
|
||||
if s == nil {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -15,13 +15,14 @@
|
|||
package prometheus
|
||||
|
||||
import (
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
"k8s.io/client-go/pkg/apis/apps/v1beta1"
|
||||
"reflect"
|
||||
"testing"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -38,7 +39,7 @@ func TestStatefulSetLabelingAndAnnotations(t *testing.T) {
|
|||
"testannotation": "testannotationvalue",
|
||||
}
|
||||
|
||||
sset, err := makeStatefulSet(v1alpha1.Prometheus{
|
||||
sset, err := makeStatefulSet(monitoringv1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
|
@ -72,13 +73,13 @@ func TestStatefulSetPVC(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
sset, err := makeStatefulSet(v1alpha1.Prometheus{
|
||||
sset, err := makeStatefulSet(monitoringv1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: labels,
|
||||
Annotations: annotations,
|
||||
},
|
||||
Spec: v1alpha1.PrometheusSpec{
|
||||
Storage: &v1alpha1.StorageSpec{
|
||||
Spec: monitoringv1.PrometheusSpec{
|
||||
Storage: &monitoringv1.StorageSpec{
|
||||
VolumeClaimTemplate: pvc,
|
||||
},
|
||||
},
|
||||
|
@ -159,8 +160,8 @@ func TestStatefulSetVolumeInitial(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
sset, err := makeStatefulSet(v1alpha1.Prometheus{
|
||||
Spec: v1alpha1.PrometheusSpec{
|
||||
sset, err := makeStatefulSet(monitoringv1.Prometheus{
|
||||
Spec: monitoringv1.PrometheusSpec{
|
||||
Secrets: []string{
|
||||
"test-secret1",
|
||||
},
|
||||
|
@ -241,8 +242,8 @@ func TestStatefulSetVolumeSkip(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
sset, err := makeStatefulSet(v1alpha1.Prometheus{
|
||||
Spec: v1alpha1.PrometheusSpec{
|
||||
sset, err := makeStatefulSet(monitoringv1.Prometheus{
|
||||
Spec: monitoringv1.PrometheusSpec{
|
||||
Secrets: []string{
|
||||
"test-secret1",
|
||||
"test-secret2",
|
||||
|
|
|
@ -20,6 +20,9 @@ import (
|
|||
"os"
|
||||
"testing"
|
||||
|
||||
"k8s.io/client-go/pkg/api"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/k8sutil"
|
||||
operatorFramework "github.com/coreos/prometheus-operator/test/framework"
|
||||
)
|
||||
|
||||
|
@ -44,6 +47,24 @@ func TestMain(m *testing.M) {
|
|||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = k8sutil.WaitForCRDReady(framework.MonClient.Prometheuses(api.NamespaceAll).List)
|
||||
if err != nil {
|
||||
log.Printf("Prometheus CRD not ready: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = k8sutil.WaitForCRDReady(framework.MonClient.ServiceMonitors(api.NamespaceAll).List)
|
||||
if err != nil {
|
||||
log.Printf("ServiceMonitor CRD not ready: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
err = k8sutil.WaitForCRDReady(framework.MonClient.Alertmanagers(api.NamespaceAll).List)
|
||||
if err != nil {
|
||||
log.Printf("Alertmanagers CRD not ready: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
defer func() {
|
||||
if err := framework.Teardown(); err != nil {
|
||||
log.Printf("failed to teardown framework: %v\n", err)
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/alertmanager"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/prometheus"
|
||||
testFramework "github.com/coreos/prometheus-operator/test/framework"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -170,12 +170,12 @@ func TestPrometheusReloadConfig(t *testing.T) {
|
|||
|
||||
name := "test"
|
||||
replicas := int32(1)
|
||||
p := &v1alpha1.Prometheus{
|
||||
p := &monitoringv1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: v1alpha1.PrometheusSpec{
|
||||
Spec: monitoringv1.PrometheusSpec{
|
||||
Replicas: &replicas,
|
||||
Version: "v1.5.0",
|
||||
Resources: v1.ResourceRequirements{
|
||||
|
@ -430,21 +430,21 @@ func TestPrometheusDiscoverTargetPort(t *testing.T) {
|
|||
group := "servicediscovery-test"
|
||||
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
|
||||
|
||||
if _, err := framework.MonClient.ServiceMonitors(ns).Create(&v1alpha1.ServiceMonitor{
|
||||
if _, err := framework.MonClient.ServiceMonitors(ns).Create(&monitoringv1.ServiceMonitor{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: prometheusName,
|
||||
Labels: map[string]string{
|
||||
"group": group,
|
||||
},
|
||||
},
|
||||
Spec: v1alpha1.ServiceMonitorSpec{
|
||||
Spec: monitoringv1.ServiceMonitorSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": group,
|
||||
},
|
||||
},
|
||||
Endpoints: []v1alpha1.Endpoint{
|
||||
v1alpha1.Endpoint{
|
||||
Endpoints: []monitoringv1.Endpoint{
|
||||
monitoringv1.Endpoint{
|
||||
TargetPort: intstr.FromInt(9090),
|
||||
Interval: "30s",
|
||||
},
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/alertmanager"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
@ -45,7 +46,18 @@ receivers:
|
|||
- url: 'http://alertmanagerwh:30500/'
|
||||
`
|
||||
|
||||
func (f *Framework) MakeBasicAlertmanager(name string, replicas int32) *v1alpha1.Alertmanager {
|
||||
func (f *Framework) MakeBasicAlertmanager(name string, replicas int32) *monitoringv1.Alertmanager {
|
||||
return &monitoringv1.Alertmanager{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
},
|
||||
Spec: monitoringv1.AlertmanagerSpec{
|
||||
Replicas: &replicas,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakeBasicAlertmanagerV1alpha1(name string, replicas int32) *v1alpha1.Alertmanager {
|
||||
return &v1alpha1.Alertmanager{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
@ -107,7 +119,7 @@ func (f *Framework) AlertmanagerConfigSecret(name string) (*v1.Secret, error) {
|
|||
return s, nil
|
||||
}
|
||||
|
||||
func (f *Framework) CreateAlertmanagerAndWaitUntilReady(ns string, a *v1alpha1.Alertmanager) error {
|
||||
func (f *Framework) CreateAlertmanagerAndWaitUntilReady(ns string, a *monitoringv1.Alertmanager) error {
|
||||
amConfigSecretName := fmt.Sprintf("alertmanager-%s", a.Name)
|
||||
s, err := f.AlertmanagerConfigSecret(amConfigSecretName)
|
||||
if err != nil {
|
||||
|
@ -138,7 +150,7 @@ func (f *Framework) WaitForAlertmanagerReady(ns, name string, replicas int) erro
|
|||
return errors.Wrap(err, fmt.Sprintf("failed to create an Alertmanager cluster (%s) with %d instances", name, replicas))
|
||||
}
|
||||
|
||||
func (f *Framework) UpdateAlertmanagerAndWaitUntilReady(ns string, a *v1alpha1.Alertmanager) error {
|
||||
func (f *Framework) UpdateAlertmanagerAndWaitUntilReady(ns string, a *monitoringv1.Alertmanager) error {
|
||||
_, err := f.MonClient.Alertmanagers(ns).Update(a)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -15,6 +15,9 @@
|
|||
package framework
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -27,19 +30,20 @@ import (
|
|||
"k8s.io/client-go/rest"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/k8sutil"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type Framework struct {
|
||||
KubeClient kubernetes.Interface
|
||||
MonClient *v1alpha1.MonitoringV1alpha1Client
|
||||
MonClient monitoringv1.MonitoringV1Interface
|
||||
HTTPClient *http.Client
|
||||
MasterHost string
|
||||
Namespace *v1.Namespace
|
||||
OperatorPod *v1.Pod
|
||||
DefaultTimeout time.Duration
|
||||
operatorLogs *bytes.Buffer
|
||||
}
|
||||
|
||||
// Setup setups a test framework and returns it.
|
||||
|
@ -59,7 +63,7 @@ func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
|||
return nil, errors.Wrap(err, "creating http-client failed")
|
||||
}
|
||||
|
||||
mclient, err := v1alpha1.NewForConfig(config)
|
||||
mclient, err := monitoringv1.NewForConfig(config)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "creating monitoring client failed")
|
||||
}
|
||||
|
@ -76,6 +80,7 @@ func New(ns, kubeconfig, opImage string) (*Framework, error) {
|
|||
HTTPClient: httpc,
|
||||
Namespace: namespace,
|
||||
DefaultTimeout: time.Minute,
|
||||
operatorLogs: bytes.NewBuffer(nil),
|
||||
}
|
||||
|
||||
err = f.Setup(opImage)
|
||||
|
@ -90,6 +95,49 @@ func (f *Framework) Setup(opImage string) error {
|
|||
if err := f.setupPrometheusOperator(opImage); err != nil {
|
||||
return errors.Wrap(err, "setup prometheus operator failed")
|
||||
}
|
||||
|
||||
go f.retryOperatorLogs()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) retryOperatorLogs() {
|
||||
for {
|
||||
err := f.recordOperatorLogs()
|
||||
if err != nil {
|
||||
errtxt := fmt.Sprintf("\n--- There was an error capturing logs (%s), retrying... ---\n", err)
|
||||
f.operatorLogs.Write([]byte(errtxt))
|
||||
time.Sleep(time.Second)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) recordOperatorLogs() error {
|
||||
deploy, err := f.KubeClient.AppsV1beta1().Deployments(f.Namespace.Name).Get("prometheus-operator", metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
opts := metav1.ListOptions{LabelSelector: fields.SelectorFromSet(fields.Set(deploy.Spec.Template.ObjectMeta.Labels)).String()}
|
||||
list, err := f.KubeClient.CoreV1().Pods(f.Namespace.Name).List(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(list.Items) != 1 {
|
||||
return fmt.Errorf("1 Prometheus Operator Pod expected, but found %d", len(list.Items))
|
||||
}
|
||||
|
||||
r, err := f.KubeClient.CoreV1().Pods(f.Namespace.Name).GetLogs(list.Items[0].Name, &v1.PodLogOptions{}).Stream()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
_, err = io.Copy(f.operatorLogs, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -137,17 +185,7 @@ func (f *Framework) setupPrometheusOperator(opImage string) error {
|
|||
}
|
||||
f.OperatorPod = &pl.Items[0]
|
||||
|
||||
err = k8sutil.WaitForCRDReady(f.KubeClient.Core().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.PrometheusName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = k8sutil.WaitForCRDReady(f.KubeClient.Core().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.ServiceMonitorName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return k8sutil.WaitForCRDReady(f.KubeClient.Core().RESTClient(), v1alpha1.Group, v1alpha1.Version, v1alpha1.AlertmanagerName)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ctx *TestCtx) SetupPrometheusRBAC(t *testing.T, ns string, kubeClient kubernetes.Interface) {
|
||||
|
@ -166,6 +204,8 @@ func (ctx *TestCtx) SetupPrometheusRBAC(t *testing.T, ns string, kubeClient kube
|
|||
|
||||
// Teardown tears down a previously initialized test environment.
|
||||
func (f *Framework) Teardown() error {
|
||||
fmt.Println("Prometheus Operator Logs Captured: \n\n", f.operatorLogs.String())
|
||||
|
||||
if err := f.KubeClient.Core().Services(f.Namespace.Name).Delete("prometheus-operated", nil); err != nil && !k8sutil.IsResourceNotFoundError(err) {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -27,12 +27,42 @@ import (
|
|||
"k8s.io/apimachinery/pkg/util/wait"
|
||||
"k8s.io/client-go/pkg/api/v1"
|
||||
|
||||
monitoringv1 "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1"
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1alpha1"
|
||||
"github.com/coreos/prometheus-operator/pkg/prometheus"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func (f *Framework) MakeBasicPrometheus(ns, name, group string, replicas int32) *v1alpha1.Prometheus {
|
||||
func (f *Framework) MakeBasicPrometheus(ns, name, group string, replicas int32) *monitoringv1.Prometheus {
|
||||
return &monitoringv1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: monitoringv1.PrometheusSpec{
|
||||
Replicas: &replicas,
|
||||
Version: prometheus.DefaultVersion,
|
||||
ServiceMonitorSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": group,
|
||||
},
|
||||
},
|
||||
ServiceAccountName: "prometheus",
|
||||
RuleSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"role": "rulefile",
|
||||
},
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceMemory: resource.MustParse("400Mi"),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakeBasicPrometheusV1alpha1(ns, name, group string, replicas int32) *v1alpha1.Prometheus {
|
||||
return &v1alpha1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
@ -61,10 +91,10 @@ func (f *Framework) MakeBasicPrometheus(ns, name, group string, replicas int32)
|
|||
}
|
||||
}
|
||||
|
||||
func (f *Framework) AddAlertingToPrometheus(p *v1alpha1.Prometheus, ns, name string) {
|
||||
p.Spec.Alerting = v1alpha1.AlertingSpec{
|
||||
Alertmanagers: []v1alpha1.AlertmanagerEndpoints{
|
||||
v1alpha1.AlertmanagerEndpoints{
|
||||
func (f *Framework) AddAlertingToPrometheus(p *monitoringv1.Prometheus, ns, name string) {
|
||||
p.Spec.Alerting = monitoringv1.AlertingSpec{
|
||||
Alertmanagers: []monitoringv1.AlertmanagerEndpoints{
|
||||
monitoringv1.AlertmanagerEndpoints{
|
||||
Namespace: ns,
|
||||
Name: fmt.Sprintf("alertmanager-%s", name),
|
||||
Port: intstr.FromString("web"),
|
||||
|
@ -73,7 +103,31 @@ func (f *Framework) AddAlertingToPrometheus(p *v1alpha1.Prometheus, ns, name str
|
|||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakeBasicServiceMonitor(name string) *v1alpha1.ServiceMonitor {
|
||||
func (f *Framework) MakeBasicServiceMonitor(name string) *monitoringv1.ServiceMonitor {
|
||||
return &monitoringv1.ServiceMonitor{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Labels: map[string]string{
|
||||
"group": name,
|
||||
},
|
||||
},
|
||||
Spec: monitoringv1.ServiceMonitorSpec{
|
||||
Selector: metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"group": name,
|
||||
},
|
||||
},
|
||||
Endpoints: []monitoringv1.Endpoint{
|
||||
monitoringv1.Endpoint{
|
||||
Port: "web",
|
||||
Interval: "30s",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (f *Framework) MakeBasicServiceMonitorV1alpha1(name string) *v1alpha1.ServiceMonitor {
|
||||
return &v1alpha1.ServiceMonitor{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
|
@ -122,7 +176,7 @@ func (f *Framework) MakePrometheusService(name, group string, serviceType v1.Ser
|
|||
return service
|
||||
}
|
||||
|
||||
func (f *Framework) CreatePrometheusAndWaitUntilReady(ns string, p *v1alpha1.Prometheus) error {
|
||||
func (f *Framework) CreatePrometheusAndWaitUntilReady(ns string, p *monitoringv1.Prometheus) error {
|
||||
_, err := f.MonClient.Prometheuses(ns).Create(p)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -135,7 +189,7 @@ func (f *Framework) CreatePrometheusAndWaitUntilReady(ns string, p *v1alpha1.Pro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) UpdatePrometheusAndWaitUntilReady(ns string, p *v1alpha1.Prometheus) error {
|
||||
func (f *Framework) UpdatePrometheusAndWaitUntilReady(ns string, p *monitoringv1.Prometheus) error {
|
||||
_, err := f.MonClient.Prometheuses(ns).Update(p)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -147,7 +201,7 @@ func (f *Framework) UpdatePrometheusAndWaitUntilReady(ns string, p *v1alpha1.Pro
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) WaitForPrometheusReady(p *v1alpha1.Prometheus, timeout time.Duration) error {
|
||||
func (f *Framework) WaitForPrometheusReady(p *monitoringv1.Prometheus, timeout time.Duration) error {
|
||||
return wait.Poll(2*time.Second, timeout, func() (bool, error) {
|
||||
st, _, err := prometheus.PrometheusStatus(f.KubeClient, p)
|
||||
if err != nil {
|
||||
|
@ -181,7 +235,7 @@ func (f *Framework) DeletePrometheusAndWaitUntilGone(ns, name string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *Framework) WaitForPrometheusRunImageAndReady(ns string, p *v1alpha1.Prometheus) error {
|
||||
func (f *Framework) WaitForPrometheusRunImageAndReady(ns string, p *monitoringv1.Prometheus) error {
|
||||
if err := WaitForPodsRunImage(f.KubeClient, ns, int(*p.Spec.Replicas), promImage(p.Spec.Version), prometheus.ListOptions(p.Name)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue