1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-16 09:16:38 +00:00

chore: add RBAC for endpointslices to Prometheus SA

This change also adds e2e tests for the new EndpointSlice discovery
role.

Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
Simon Pasquier 2024-07-25 10:16:30 +02:00
parent 813bc2b6b9
commit 309b7d06ad
No known key found for this signature in database
GPG key ID: 0190A66C0A10FC4F
5 changed files with 66 additions and 49 deletions

View file

@ -169,6 +169,11 @@ rules:
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs: ["get", "list", "watch"]
- apiGroups:
- networking.k8s.io
resources:

View file

@ -166,6 +166,11 @@ rules:
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs: ["get", "list", "watch"]
- apiGroups:
- networking.k8s.io
resources:

View file

@ -15,6 +15,11 @@ rules:
resources:
- configmaps
verbs: ["get"]
- apiGroups:
- discovery.k8s.io
resources:
- endpointslices
verbs: ["get", "list", "watch"]
- apiGroups:
- networking.k8s.io
resources:

View file

@ -70,9 +70,11 @@ type ConfigGenerator struct {
}
// NewConfigGenerator creates a ConfigGenerator for the provided Prometheus resource.
func NewConfigGenerator(logger log.Logger,
func NewConfigGenerator(
logger log.Logger,
p monitoringv1.PrometheusInterface,
endpointSliceSupported bool) (*ConfigGenerator, error) {
endpointSliceSupported bool,
) (*ConfigGenerator, error) {
if logger == nil {
logger = log.NewNopLogger()
}
@ -96,28 +98,11 @@ func NewConfigGenerator(logger log.Logger,
return nil, fmt.Errorf("failed to parse scrape classes: %w", err)
}
endpointSliceConfigured := false // Always assume false to preserve original prometheus-operator behaviour.
// Check if the user has explicitly set the service discovery role to use.
switch serviceDiscoveryRole := ptr.Deref(cpf.ServiceDiscoveryRole, monitoringv1.EndpointsRole); serviceDiscoveryRole {
case monitoringv1.EndpointSliceRole:
level.Info(logger).Log("msg", "using endpointslice as service discovery role")
endpointSliceConfigured = true
case monitoringv1.EndpointsRole:
level.Info(logger).Log("msg", "using endpoints as service discovery role")
endpointSliceConfigured = false
default:
level.Warn(logger).Log("msg",
"unknown service discovery role %q, defaulting to endpoints. Configure serviceDiscoveryRole to 'EndpointSlice' to use endpointslice as service discovery role.",
serviceDiscoveryRole)
endpointSliceConfigured = false
}
return &ConfigGenerator{
logger: logger,
version: version,
prom: p,
useEndpointSlice: endpointSliceConfigured && endpointSliceSupported,
useEndpointSlice: endpointSliceSupported && ptr.Deref(cpf.ServiceDiscoveryRole, monitoringv1.EndpointsRole) == monitoringv1.EndpointSliceRole,
scrapeClasses: scrapeClasses,
defaultScrapeClassName: defaultScrapeClassName,
}, nil

View file

@ -2110,6 +2110,20 @@ func testPromWhenDeleteCRDCleanUpViaOwnerRef(t *testing.T) {
}
func testPromDiscovery(t *testing.T) {
for _, tc := range []struct {
role *monitoringv1.ServiceDiscoveryRole
}{
{
role: nil,
},
{
role: ptr.To(monitoringv1.EndpointsRole),
},
{
role: ptr.To(monitoringv1.EndpointSliceRole),
},
} {
t.Run(fmt.Sprintf("role=%s", ptr.Deref(tc.role, "<nil>")), func(t *testing.T) {
t.Parallel()
testCtx := framework.NewTestCtx(t)
defer testCtx.Cleanup(t)
@ -2126,6 +2140,7 @@ func testPromDiscovery(t *testing.T) {
}
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
p.Spec.ServiceDiscoveryRole = tc.role
_, err := framework.CreatePrometheusAndWaitUntilReady(context.Background(), ns, p)
if err != nil {
t.Fatal(err)
@ -2146,6 +2161,8 @@ func testPromDiscovery(t *testing.T) {
if err != nil {
t.Fatal(fmt.Errorf("validating Prometheus target discovery failed: %w", err))
}
})
}
}
func testPromSharedResourcesReconciliation(t *testing.T) {