1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-20 19:29:10 +00:00

feat: emit events for invalid configurations ()

* feat: emit events for invalid configurations

Emit events when the controller rejects a resource, owing to an invalid
configuration.

Fixes: 

Signed-off-by: Pranshu Srivastava <rexagod@gmail.com>

* Decouple event recorder from operator metrics

Signed-off-by: Arthur Silva Sens <arthur.sens@coralogix.com>

* Only emit events if permissions were given

Signed-off-by: Arthur Silva Sens <arthur.sens@coralogix.com>

* Keep operator name consistent across telemetry

Signed-off-by: Arthur Silva Sens <arthur.sens@coralogix.com>

* Address comments

Signed-off-by: Arthur Silva Sens <arthur.sens@coralogix.com>

---------

Signed-off-by: Pranshu Srivastava <rexagod@gmail.com>
Signed-off-by: Arthur Silva Sens <arthur.sens@coralogix.com>
Co-authored-by: Pranshu Srivastava <rexagod@gmail.com>
This commit is contained in:
Arthur Silva Sens 2024-01-10 08:59:47 -03:00 committed by GitHub
parent 4cf0f5eae7
commit 895cb3b005
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
16 changed files with 162 additions and 25 deletions

View file

@ -98,6 +98,13 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- patch
- create
- apiGroups:
- networking.k8s.io
resources:

View file

@ -98,6 +98,13 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- patch
- create
- apiGroups:
- networking.k8s.io
resources:

7
bundle.yaml generated
View file

@ -42741,6 +42741,13 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- patch
- create
- apiGroups:
- networking.k8s.io
resources:

View file

@ -32,6 +32,7 @@ import (
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/prometheus/common/version"
"golang.org/x/sync/errgroup"
corev1 "k8s.io/api/core/v1"
storagev1 "k8s.io/api/storage/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
@ -241,6 +242,24 @@ func run(fs *flag.FlagSet) int {
return 1
}
canEmitEvents, reasons, err := k8sutil.IsAllowed(ctx, kclient.AuthorizationV1().SelfSubjectAccessReviews(), nil,
k8sutil.ResourceAttribute{
Group: corev1.GroupName,
Version: corev1.SchemeGroupVersion.Version,
Resource: corev1.SchemeGroupVersion.WithResource("events").Resource,
Verbs: []string{"create", "patch"},
})
if err != nil {
level.Error(logger).Log("msg", "failed to check Events support", "err", err)
cancel()
return 1
}
if !canEmitEvents {
for _, reason := range reasons {
level.Warn(logger).Log("msg", "missing permission to emit events", "reason", reason)
}
}
scrapeConfigSupported, err := checkPrerequisites(
ctx,
logger,
@ -261,7 +280,7 @@ func run(fs *flag.FlagSet) int {
return 1
}
po, err := prometheuscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "prometheusoperator"), r, scrapeConfigSupported, canReadStorageClass)
po, err := prometheuscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", prometheuscontroller.ControllerName), r, scrapeConfigSupported, canReadStorageClass, canEmitEvents)
if err != nil {
level.Error(logger).Log("msg", "instantiating prometheus controller failed", "err", err)
cancel()
@ -296,7 +315,7 @@ func run(fs *flag.FlagSet) int {
var pao *prometheusagentcontroller.Operator
if prometheusAgentSupported {
pao, err = prometheusagentcontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "prometheusagentoperator"), r, scrapeConfigSupported, canReadStorageClass)
pao, err = prometheusagentcontroller.New(ctx, restConfig, cfg, log.With(logger, "component", prometheusagentcontroller.ControllerName), r, scrapeConfigSupported, canReadStorageClass, canEmitEvents)
if err != nil {
level.Error(logger).Log("msg", "instantiating prometheus-agent controller failed", "err", err)
cancel()
@ -304,14 +323,14 @@ func run(fs *flag.FlagSet) int {
}
}
ao, err := alertmanagercontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "alertmanageroperator"), r, canReadStorageClass)
ao, err := alertmanagercontroller.New(ctx, restConfig, cfg, log.With(logger, "component", alertmanagercontroller.ControllerName), r, canReadStorageClass, canEmitEvents)
if err != nil {
level.Error(logger).Log("msg", "instantiating alertmanager controller failed", "err", err)
cancel()
return 1
}
to, err := thanoscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", "thanosoperator"), r, canReadStorageClass)
to, err := thanoscontroller.New(ctx, restConfig, cfg, log.With(logger, "component", thanoscontroller.ControllerName), r, canReadStorageClass, canEmitEvents)
if err != nil {
level.Error(logger).Log("msg", "instantiating thanos controller failed", "err", err)
cancel()

View file

@ -76,6 +76,13 @@ rules:
- get
- list
- watch
- apiGroups:
- ""
resources:
- events
verbs:
- patch
- create
- apiGroups:
- networking.k8s.io
resources:

View file

@ -133,6 +133,11 @@ function(params) {
resources: ['namespaces'],
verbs: ['get', 'list', 'watch'],
},
{
apiGroups: [''],
resources: ['events'],
verbs: ['patch', 'create'],
},
{
apiGroups: ['networking.k8s.io'],
resources: ['ingresses'],

View file

@ -40,6 +40,7 @@ import (
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"github.com/prometheus-operator/prometheus-operator/pkg/alertmanager/validation"
validationv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/alertmanager/validation/v1alpha1"
@ -56,7 +57,8 @@ import (
)
const (
resyncPeriod = 5 * time.Minute
resyncPeriod = 5 * time.Minute
ControllerName = "alertmanager-controller"
)
// Config defines the operator's parameters for the Alertmanager controller.
@ -95,13 +97,15 @@ type Operator struct {
metrics *operator.Metrics
reconciliations *operator.ReconciliationTracker
eventRecorder record.EventRecorder
canReadStorageClass bool
config Config
}
// New creates a new controller.
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, canReadStorageClass bool) (*Operator, error) {
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, canReadStorageClass, canEmitEvents bool) (*Operator, error) {
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("instantiating kubernetes client failed: %w", err)
@ -120,6 +124,11 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
// All the metrics exposed by the controller get the controller="alertmanager" label.
r = prometheus.WrapRegistererWith(prometheus.Labels{"controller": "alertmanager"}, r)
var eventsClient kubernetes.Interface
if canEmitEvents {
eventsClient = client
}
o := &Operator{
kclient: client,
mdClient: mdClient,
@ -131,6 +140,7 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
metrics: operator.NewMetrics(r),
reconciliations: &operator.ReconciliationTracker{},
eventRecorder: operator.NewEventRecorder(eventsClient, ControllerName),
canReadStorageClass: canReadStorageClass,
config: Config{
@ -1069,6 +1079,7 @@ func (c *Operator) selectAlertmanagerConfigs(ctx context.Context, am *monitoring
"namespace", am.Namespace,
"alertmanager", am.Name,
)
c.eventRecorder.Eventf(amc, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "AlertmanagerConfig %s was rejected due to invalid configuration: %v", amc.GetName(), err)
continue
}

View file

@ -24,15 +24,24 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus/client_golang/prometheus"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
"github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/scheme"
)
const PrometheusOperatorFieldManager = "PrometheusOperator"
const (
PrometheusOperatorFieldManager = "PrometheusOperator"
InvalidConfigurationEvent = "InvalidConfiguration"
)
var (
syncsDesc = prometheus.NewDesc(
@ -259,6 +268,19 @@ func NewMetrics(r prometheus.Registerer) *Metrics {
return &m
}
func NewEventRecorder(client kubernetes.Interface, component string) record.EventRecorder {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartStructuredLogging(0)
// Client can be nil in tests or when the operator doesn't have permissions to create events.
if client != nil {
eventBroadcaster.StartRecordingToSink(&typedv1.EventSinkImpl{Interface: client.CoreV1().Events("")})
}
return eventBroadcaster.NewRecorder(scheme.Scheme, v1.EventSource{Component: component})
}
// StsDeleteCreateCounter returns a counter to track statefulset's recreations.
func (m *Metrics) StsDeleteCreateCounter() prometheus.Counter {
return m.stsDeleteCreateCounter

View file

@ -23,8 +23,10 @@ import (
"github.com/go-kit/log"
"github.com/go-kit/log/level"
"github.com/prometheus/prometheus/model/rulefmt"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/record"
"sigs.k8s.io/yaml"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@ -46,10 +48,13 @@ type PrometheusRuleSelector struct {
ruleSelector labels.Selector
nsLabeler *namespacelabeler.Labeler
ruleInformer *informers.ForResource
logger log.Logger
eventRecorder record.EventRecorder
logger log.Logger
}
func NewPrometheusRuleSelector(ruleFormat RuleConfigurationFormat, version string, labelSelector *metav1.LabelSelector, nsLabeler *namespacelabeler.Labeler, ruleInformer *informers.ForResource, logger log.Logger) (*PrometheusRuleSelector, error) {
func NewPrometheusRuleSelector(ruleFormat RuleConfigurationFormat, version string, labelSelector *metav1.LabelSelector, nsLabeler *namespacelabeler.Labeler, ruleInformer *informers.ForResource, eventRecorder record.EventRecorder, logger log.Logger) (*PrometheusRuleSelector, error) {
componentVersion, err := semver.ParseTolerant(version)
if err != nil {
return nil, fmt.Errorf("failed to parse version: %w", err)
@ -61,12 +66,13 @@ func NewPrometheusRuleSelector(ruleFormat RuleConfigurationFormat, version strin
}
return &PrometheusRuleSelector{
ruleFormat: ruleFormat,
version: componentVersion,
ruleSelector: ruleSelector,
nsLabeler: nsLabeler,
ruleInformer: ruleInformer,
logger: logger,
ruleFormat: ruleFormat,
version: componentVersion,
ruleSelector: ruleSelector,
nsLabeler: nsLabeler,
ruleInformer: ruleInformer,
eventRecorder: eventRecorder,
logger: logger,
}, nil
}
@ -203,6 +209,7 @@ func (prs *PrometheusRuleSelector) Select(namespaces []string) (map[string]strin
"prometheusrule", promRule.Name,
"namespace", promRule.Namespace,
)
prs.eventRecorder.Eventf(promRule, v1.EventTypeWarning, "InvalidConfiguration", "PrometheusRule %s was rejected due to invalid configuration: %v", promRule.Name, err)
continue
}

View file

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
@ -49,7 +50,8 @@ import (
)
const (
resyncPeriod = 5 * time.Minute
resyncPeriod = 5 * time.Minute
ControllerName = "prometheusagent-controller"
)
// Operator manages life cycle of Prometheus agent deployments and
@ -84,11 +86,13 @@ type Operator struct {
scrapeConfigSupported bool
canReadStorageClass bool
eventRecorder record.EventRecorder
statusReporter prompkg.StatusReporter
}
// New creates a new controller.
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported bool, canReadStorageClass bool) (*Operator, error) {
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported, canReadStorageClass, canEmitEvents bool) (*Operator, error) {
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("instantiating kubernetes client failed: %w", err)
@ -107,6 +111,11 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
// All the metrics exposed by the controller get the controller="prometheus-agent" label.
r = prometheus.WrapRegistererWith(prometheus.Labels{"controller": "prometheus-agent"}, r)
var eventsClient kubernetes.Interface
if canEmitEvents {
eventsClient = client
}
o := &Operator{
kclient: client,
mdClient: mdClient,
@ -124,6 +133,7 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
reconciliations: &operator.ReconciliationTracker{},
scrapeConfigSupported: scrapeConfigSupported,
canReadStorageClass: canReadStorageClass,
eventRecorder: operator.NewEventRecorder(eventsClient, ControllerName),
}
o.metrics.MustRegister(
o.reconciliations,
@ -687,7 +697,7 @@ func (c *Operator) sync(ctx context.Context, key string) error {
}
func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *monitoringv1alpha1.PrometheusAgent, cg *prompkg.ConfigGenerator, store *assets.Store) error {
resourceSelector := prompkg.NewResourceSelector(c.logger, p, store, c.nsMonInf, c.metrics)
resourceSelector := prompkg.NewResourceSelector(c.logger, p, store, c.nsMonInf, c.metrics, c.eventRecorder)
smons, err := resourceSelector.SelectServiceMonitors(ctx, c.smonInfs.ListAllByNamespace)
if err != nil {

View file

@ -27,10 +27,12 @@ import (
"github.com/go-kit/log/level"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/relabel"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@ -47,17 +49,20 @@ type ResourceSelector struct {
namespaceInformers cache.SharedIndexInformer
metrics *operator.Metrics
accessor *operator.Accessor
eventRecorder record.EventRecorder
}
type ListAllByNamespaceFn func(namespace string, selector labels.Selector, appendFn cache.AppendFunc) error
func NewResourceSelector(l log.Logger, p monitoringv1.PrometheusInterface, store *assets.Store, namespaceInformers cache.SharedIndexInformer, metrics *operator.Metrics) *ResourceSelector {
func NewResourceSelector(l log.Logger, p monitoringv1.PrometheusInterface, store *assets.Store, namespaceInformers cache.SharedIndexInformer, metrics *operator.Metrics, eventRecorder record.EventRecorder) *ResourceSelector {
return &ResourceSelector{
l: l,
p: p,
store: store,
namespaceInformers: namespaceInformers,
metrics: metrics,
eventRecorder: eventRecorder,
accessor: operator.NewAccessor(l),
}
}
@ -173,6 +178,7 @@ func (rs *ResourceSelector) SelectServiceMonitors(ctx context.Context, listFn Li
"namespace", objMeta.GetNamespace(),
"prometheus", objMeta.GetName(),
)
rs.eventRecorder.Eventf(sm, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "ServiceMonitor %s was rejected due to invalid configuration: %v", sm.GetName(), err)
continue
}
@ -422,6 +428,7 @@ func (rs *ResourceSelector) SelectPodMonitors(ctx context.Context, listFn ListAl
"namespace", objMeta.GetNamespace(),
"prometheus", objMeta.GetName(),
)
rs.eventRecorder.Eventf(pm, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "PodMonitor %s was rejected due to invalid configuration: %v", pm.GetName(), err)
continue
}
@ -503,6 +510,7 @@ func (rs *ResourceSelector) SelectProbes(ctx context.Context, listFn ListAllByNa
"namespace", objMeta.GetNamespace(),
"prometheus", objMeta.GetName(),
)
rs.eventRecorder.Eventf(probe, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "Probe %s was rejected due to invalid configuration: %v", probe.GetName(), err)
}
if err = probe.Spec.Targets.Validate(); err != nil {
@ -665,6 +673,7 @@ func (rs *ResourceSelector) SelectScrapeConfigs(ctx context.Context, listFn List
"namespace", objMeta.GetNamespace(),
"prometheus", objMeta.GetName(),
)
rs.eventRecorder.Eventf(sc, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "ScrapeConfig %s was rejected due to invalid configuration: %v", sc.GetName(), err)
}
if err = validateRelabelConfigs(rs.p, sc.Spec.RelabelConfigs); err != nil {

View file

@ -29,6 +29,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
"k8s.io/utils/ptr"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@ -577,6 +578,7 @@ func TestSelectProbes(t *testing.T) {
nil,
nil,
operator.NewMetrics(prometheus.NewPedanticRegistry()),
record.NewFakeRecorder(1),
)
probe := &monitoringv1.Probe{
@ -970,6 +972,7 @@ func TestSelectServiceMonitors(t *testing.T) {
assets.NewStore(cs.CoreV1(), cs.CoreV1()),
nil,
operator.NewMetrics(prometheus.NewPedanticRegistry()),
record.NewFakeRecorder(1),
)
sm := &monitoringv1.ServiceMonitor{
@ -1070,6 +1073,7 @@ func TestSelectPodMonitors(t *testing.T) {
nil,
nil,
operator.NewMetrics(prometheus.NewPedanticRegistry()),
record.NewFakeRecorder(1),
)
pm := &monitoringv1.PodMonitor{
@ -1758,6 +1762,7 @@ func TestSelectScrapeConfigs(t *testing.T) {
assets.NewStore(cs.CoreV1(), cs.CoreV1()),
nil,
operator.NewMetrics(prometheus.NewPedanticRegistry()),
record.NewFakeRecorder(1),
)
sc := &monitoringv1alpha1.ScrapeConfig{

View file

@ -35,6 +35,7 @@ import (
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
@ -49,7 +50,8 @@ import (
)
const (
resyncPeriod = 5 * time.Minute
resyncPeriod = 5 * time.Minute
ControllerName = "prometheus-controller"
)
// Operator manages life cycle of Prometheus deployments and
@ -85,10 +87,12 @@ type Operator struct {
endpointSliceSupported bool
scrapeConfigSupported bool
canReadStorageClass bool
eventRecorder record.EventRecorder
}
// New creates a new controller.
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported bool, canReadStorageClass bool) (*Operator, error) {
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, scrapeConfigSupported, canReadStorageClass, canEmitEvents bool) (*Operator, error) {
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("instantiating kubernetes client failed: %w", err)
@ -107,6 +111,11 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
// All the metrics exposed by the controller get the controller="prometheus" label.
r = prometheus.WrapRegistererWith(prometheus.Labels{"controller": "prometheus"}, r)
var eventsClient kubernetes.Interface
if canEmitEvents {
eventsClient = client
}
o := &Operator{
kclient: client,
mdClient: mdClient,
@ -127,6 +136,8 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
scrapeConfigSupported: scrapeConfigSupported,
canReadStorageClass: canReadStorageClass,
eventRecorder: operator.NewEventRecorder(eventsClient, ControllerName),
}
o.metrics.MustRegister(o.reconciliations)
@ -1285,7 +1296,7 @@ func (c *Operator) createOrUpdateConfigurationSecret(ctx context.Context, p *mon
return nil
}
resourceSelector := prompkg.NewResourceSelector(c.logger, p, store, c.nsMonInf, c.metrics)
resourceSelector := prompkg.NewResourceSelector(c.logger, p, store, c.nsMonInf, c.metrics, c.eventRecorder)
smons, err := resourceSelector.SelectServiceMonitors(ctx, c.smonInfs.ListAllByNamespace)
if err != nil {

View file

@ -67,7 +67,7 @@ func (c *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, p *monitori
logger := log.With(c.logger, "prometheus", p.Name, "namespace", p.Namespace)
promVersion := operator.StringValOrDefault(p.GetCommonPrometheusFields().Version, operator.DefaultPrometheusVersion)
promRuleSelector, err := operator.NewPrometheusRuleSelector(operator.PrometheusFormat, promVersion, p.Spec.RuleSelector, nsLabeler, c.ruleInfs, logger)
promRuleSelector, err := operator.NewPrometheusRuleSelector(operator.PrometheusFormat, promVersion, p.Spec.RuleSelector, nsLabeler, c.ruleInfs, c.eventRecorder, logger)
if err != nil {
return nil, fmt.Errorf("initializing PrometheusRules failed: %w", err)
}

View file

@ -34,6 +34,7 @@ import (
"k8s.io/client-go/metadata"
"k8s.io/client-go/rest"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/record"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
monitoringv1ac "github.com/prometheus-operator/prometheus-operator/pkg/client/applyconfiguration/monitoring/v1"
@ -47,6 +48,7 @@ import (
const (
resyncPeriod = 5 * time.Minute
thanosRulerLabel = "thanos-ruler"
ControllerName = "thanos-controller"
)
// Operator manages life cycle of Thanos deployments and
@ -72,6 +74,8 @@ type Operator struct {
reconciliations *operator.ReconciliationTracker
canReadStorageClass bool
eventRecorder record.EventRecorder
config Config
}
@ -87,7 +91,7 @@ type Config struct {
}
// New creates a new controller.
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, canReadStorageClass bool) (*Operator, error) {
func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger log.Logger, r prometheus.Registerer, canReadStorageClass, canEmitEvents bool) (*Operator, error) {
client, err := kubernetes.NewForConfig(restConfig)
if err != nil {
return nil, fmt.Errorf("instantiating kubernetes client failed: %w", err)
@ -106,6 +110,11 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
// All the metrics exposed by the controller get the controller="thanos" label.
r = prometheus.WrapRegistererWith(prometheus.Labels{"controller": "thanos"}, r)
var eventsClient kubernetes.Interface
if canEmitEvents {
eventsClient = client
}
o := &Operator{
kclient: client,
mdClient: mdClient,
@ -113,6 +122,7 @@ func New(ctx context.Context, restConfig *rest.Config, c operator.Config, logger
logger: logger,
accessor: operator.NewAccessor(logger),
metrics: operator.NewMetrics(r),
eventRecorder: operator.NewEventRecorder(eventsClient, ControllerName),
reconciliations: &operator.ReconciliationTracker{},
canReadStorageClass: canReadStorageClass,
config: Config{

View file

@ -68,7 +68,7 @@ func (o *Operator) createOrUpdateRuleConfigMaps(ctx context.Context, t *monitori
logger := log.With(o.logger, "thanos", t.Name, "namespace", t.Namespace)
thanosVersion := operator.StringValOrDefault(t.Spec.Version, operator.DefaultThanosVersion)
promRuleSelector, err := operator.NewPrometheusRuleSelector(operator.ThanosFormat, thanosVersion, t.Spec.RuleSelector, nsLabeler, o.ruleInfs, logger)
promRuleSelector, err := operator.NewPrometheusRuleSelector(operator.ThanosFormat, thanosVersion, t.Spec.RuleSelector, nsLabeler, o.ruleInfs, o.eventRecorder, logger)
if err != nil {
return nil, fmt.Errorf("initializing PrometheusRules failed: %w", err)
}