mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-09 02:24:16 +00:00
[BUGFIX] Fixing pod and service monitor selector validation (#7217)
* [BUGFIX] Fixing pod and service monitor selector validation Signed-off-by: Nicolas Takashi <nicolas.tcs@hotmail.com> Co-authored-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
parent
54669ad94e
commit
fa1aea3401
5 changed files with 18 additions and 26 deletions
|
@ -1,3 +1,7 @@
|
|||
# Unreleased
|
||||
|
||||
* [BUGFIX] Fix pod and service monitor selector validation. #7214
|
||||
|
||||
# 0.79.1 / 2024-12-17
|
||||
|
||||
* [CHANGE] Rename the field `scrapeFallbackProtocol` to `fallbackScrapeProtocol` to match with naming as in Prometheus #7199
|
||||
|
|
|
@ -33,8 +33,6 @@ import (
|
|||
"gopkg.in/yaml.v2"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
|
@ -2229,14 +2227,11 @@ func (cg *ConfigGenerator) generateK8SSDConfig(
|
|||
|
||||
func (cg *ConfigGenerator) generateRoleSelectorConfig(k8sSDConfig yaml.MapSlice, roles []string, selector metav1.LabelSelector) yaml.MapSlice {
|
||||
selectors := make([]yaml.MapSlice, 0, len(roles))
|
||||
labelSelector := labels.SelectorFromValidatedSet(labels.Set(selector.MatchLabels))
|
||||
|
||||
for _, exp := range selector.MatchExpressions {
|
||||
requirement, err := labels.NewRequirement(exp.Key, selection.Operator(strings.ToLower(string(exp.Operator))), exp.Values)
|
||||
if err != nil {
|
||||
panic(fmt.Errorf("failed to create label requirement: %w", err))
|
||||
}
|
||||
labelSelector = labelSelector.Add(*requirement)
|
||||
labelSelector, err := metav1.LabelSelectorAsSelector(&selector)
|
||||
if err != nil {
|
||||
// The field must have been validated by the controller beforehand.
|
||||
// If we fail here, it's a functional bug.
|
||||
panic(fmt.Errorf("failed to convert label selector to selector: %w", err))
|
||||
}
|
||||
|
||||
for _, role := range roles {
|
||||
|
|
|
@ -12613,6 +12613,10 @@ func TestPodMonitorSelectors(t *testing.T) {
|
|||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"group2"},
|
||||
},
|
||||
{
|
||||
Key: "groupb",
|
||||
Operator: metav1.LabelSelectorOpDoesNotExist,
|
||||
},
|
||||
},
|
||||
},
|
||||
PodMetricsEndpoints: []monitoringv1.PodMetricsEndpoint{
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
"k8s.io/apimachinery/pkg/selection"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
"k8s.io/client-go/tools/record"
|
||||
"k8s.io/utils/ptr"
|
||||
|
@ -140,9 +139,9 @@ func (rs *ResourceSelector) SelectServiceMonitors(ctx context.Context, listFn Li
|
|||
rs.eventRecorder.Eventf(sm, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "ServiceMonitor %s was rejected due to invalid configuration: %v", sm.GetName(), err)
|
||||
}
|
||||
|
||||
err = validaMatchExpressions(sm.Spec.Selector.MatchExpressions)
|
||||
_, err = metav1.LabelSelectorAsSelector(&sm.Spec.Selector)
|
||||
if err != nil {
|
||||
rejectFn(sm, fmt.Errorf("failed to create label requirement: %w", err))
|
||||
rejectFn(sm, fmt.Errorf("failed to parse label selector: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -385,16 +384,6 @@ func validateScrapeClass(p monitoringv1.PrometheusInterface, sc *string) error {
|
|||
return fmt.Errorf("scrapeClass %q not found in Prometheus scrapeClasses", *sc)
|
||||
}
|
||||
|
||||
func validaMatchExpressions(matchExpressions []metav1.LabelSelectorRequirement) error {
|
||||
for _, exp := range matchExpressions {
|
||||
_, err := labels.NewRequirement(exp.Key, selection.Operator(strings.ToLower(string(exp.Operator))), exp.Values)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateMonitorSelectorMechanism(selectorMechanism *monitoringv1.SelectorMechanism, version semver.Version) error {
|
||||
if ptr.Deref(selectorMechanism, monitoringv1.SelectorMechanismRelabel) == monitoringv1.SelectorMechanismRole && !version.GTE(semver.MustParse("2.17.0")) {
|
||||
return fmt.Errorf("RoleSelector selectorMechanism is only supported in Prometheus 2.17.0 and newer")
|
||||
|
@ -466,9 +455,9 @@ func (rs *ResourceSelector) SelectPodMonitors(ctx context.Context, listFn ListAl
|
|||
rs.eventRecorder.Eventf(pm, v1.EventTypeWarning, operator.InvalidConfigurationEvent, "PodMonitor %s was rejected due to invalid configuration: %v", pm.GetName(), err)
|
||||
}
|
||||
|
||||
err = validaMatchExpressions(pm.Spec.Selector.MatchExpressions)
|
||||
_, err = metav1.LabelSelectorAsSelector(&pm.Spec.Selector)
|
||||
if err != nil {
|
||||
rejectFn(pm, fmt.Errorf("failed to create label requirement: %w", err))
|
||||
rejectFn(pm, fmt.Errorf("failed to parse label selector: %w", err))
|
||||
continue
|
||||
}
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ scrape_configs:
|
|||
- default
|
||||
selectors:
|
||||
- role: pod
|
||||
label: group=group1,group in (group2)
|
||||
label: group=group1,group in (group2),!groupb
|
||||
scrape_interval: 30s
|
||||
relabel_configs:
|
||||
- source_labels:
|
||||
|
|
Loading…
Add table
Reference in a new issue