1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-09 02:24:16 +00:00

chore: refactor shared functions in pkg/prometeus

Signed-off-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
Simon Pasquier 2024-10-23 10:59:20 +02:00
parent 2fc85bb66d
commit 7ac474a3b8
No known key found for this signature in database
GPG key ID: 0190A66C0A10FC4F
10 changed files with 225 additions and 236 deletions

View file

@ -1,28 +0,0 @@
// Copyright 2023 The prometheus-operator Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prometheusagent
import (
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
prompkg "github.com/prometheus-operator/prometheus-operator/pkg/prometheus"
)
func buildAgentArgs(
cpf monitoringv1.CommonPrometheusFields,
cg *prompkg.ConfigGenerator,
) []monitoringv1.Argument {
promArgs := prompkg.BuildCommonPrometheusArgs(cpf, cg)
return appendAgentArgs(promArgs, cg, cpf.WALCompression)
}

View file

@ -17,7 +17,6 @@ package prometheusagent
import (
"fmt"
"golang.org/x/exp/slices"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -91,11 +90,7 @@ func makeDaemonSetSpec(
return nil, err
}
if cg.Version().Major == 2 && !slices.Contains(cpf.EnableFeatures, "agent") {
cpf.EnableFeatures = append(cpf.EnableFeatures, "agent")
}
promArgs := buildAgentArgs(cpf, cg)
promArgs := buildAgentArgs(cg, cpf.WALCompression)
volumes, promVolumeMounts, err := prompkg.BuildCommonVolumes(p, tlsSecrets, false)
if err != nil {
@ -118,9 +113,9 @@ func makeDaemonSetSpec(
configReloaderWebConfigFile = confArg.Value
configReloaderVolumeMounts = append(configReloaderVolumeMounts, configMount...)
startupProbe, readinessProbe, livenessProbe := prompkg.MakeProbes(cpf, cg)
startupProbe, readinessProbe, livenessProbe := cg.BuildProbes()
podAnnotations, podLabels := prompkg.BuildPodMetadata(cpf, cg)
podAnnotations, podLabels := cg.BuildPodMetadata()
// In cases where an existing selector label is modified, or a new one is added, new daemonset cannot match existing pods.
// We should try to avoid removing such immutable fields whenever possible since doing
// so forces us to enter the 'recreate cycle' and can potentially lead to downtime.

View file

@ -1107,8 +1107,6 @@ func (c *Operator) enqueueForNamespace(store cache.Store, nsName string) {
return
}
c.logger.Info("we are gonna check if it Matches")
if ScrapeConfigNSSelector.Matches(labels.Set(ns.Labels)) {
c.rr.EnqueueForReconciliation(p)
return

View file

@ -17,7 +17,6 @@ package prometheusagent
import (
"fmt"
"golang.org/x/exp/slices"
appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -153,11 +152,7 @@ func makeStatefulSetSpec(
return nil, err
}
if cg.Version().Major == 2 && !slices.Contains(cpf.EnableFeatures, "agent") {
cpf.EnableFeatures = append(cpf.EnableFeatures, "agent")
}
promArgs := buildAgentArgs(cpf, cg)
promArgs := buildAgentArgs(cg, cpf.WALCompression)
volumes, promVolumeMounts, err := prompkg.BuildCommonVolumes(p, tlsSecrets, true)
if err != nil {
@ -193,9 +188,9 @@ func makeStatefulSetSpec(
webConfigGenerator.Warn("web.config.file")
}
startupProbe, readinessProbe, livenessProbe := prompkg.MakeProbes(cpf, webConfigGenerator)
startupProbe, readinessProbe, livenessProbe := cg.BuildProbes()
podAnnotations, podLabels := prompkg.BuildPodMetadata(cpf, cg)
podAnnotations, podLabels := cg.BuildPodMetadata()
// In cases where an existing selector label is modified, or a new one is added, new sts cannot match existing pods.
// We should try to avoid removing such immutable fields whenever possible since doing
// so forces us to enter the 'recreate cycle' and can potentially lead to downtime.
@ -359,12 +354,14 @@ func makeStatefulSetService(p *monitoringv1alpha1.PrometheusAgent, config prompk
return svc
}
// appendAgentArgs appends arguments that are only valid for the Prometheus agent.
func appendAgentArgs(
promArgs []monitoringv1.Argument,
cg *prompkg.ConfigGenerator,
walCompression *bool) []monitoringv1.Argument {
if cg.Version().Major == 3 {
// buildAgentArgs returns the CLI arguments that are only valid for the Prometheus agent.
func buildAgentArgs(cg *prompkg.ConfigGenerator, walCompression *bool) []monitoringv1.Argument {
promArgs := cg.BuildCommonPrometheusArgs()
switch cg.Version().Major {
case 2:
promArgs = append(promArgs, monitoringv1.Argument{Name: "enable-feature", Value: "agent"})
case 3:
promArgs = append(promArgs, monitoringv1.Argument{Name: "agent"})
}
@ -379,5 +376,6 @@ func appendAgentArgs(
}
promArgs = cg.AppendCommandlineArgument(promArgs, arg)
}
return promArgs
}

View file

@ -17,15 +17,12 @@ package prometheus
import (
"bytes"
"fmt"
"math"
"net/url"
"path"
"path/filepath"
"strings"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
@ -195,80 +192,6 @@ func queryLogFilePath(queryLogFile string) string {
return filepath.Join(DefaultQueryLogDirectory, queryLogFile)
}
// BuildCommonPrometheusArgs builds a slice of arguments that are common between Prometheus Server and Agent.
func BuildCommonPrometheusArgs(cpf monitoringv1.CommonPrometheusFields, cg *ConfigGenerator) []monitoringv1.Argument {
promArgs := []monitoringv1.Argument{
{Name: "web.console.templates", Value: "/etc/prometheus/consoles"},
{Name: "web.console.libraries", Value: "/etc/prometheus/console_libraries"},
{Name: "config.file", Value: path.Join(ConfOutDir, ConfigEnvsubstFilename)},
}
if ptr.Deref(cpf.ReloadStrategy, monitoringv1.HTTPReloadStrategyType) == monitoringv1.HTTPReloadStrategyType {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.enable-lifecycle"})
}
if cpf.Web != nil {
if cpf.Web.PageTitle != nil {
promArgs = cg.WithMinimumVersion("2.6.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "web.page-title", Value: *cpf.Web.PageTitle})
}
if cpf.Web.MaxConnections != nil {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.max-connections", Value: fmt.Sprintf("%d", *cpf.Web.MaxConnections)})
}
}
if cpf.EnableRemoteWriteReceiver {
promArgs = cg.WithMinimumVersion("2.33.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "web.enable-remote-write-receiver"})
if len(cpf.RemoteWriteReceiverMessageVersions) > 0 {
versions := make([]string, 0, len(cpf.RemoteWriteReceiverMessageVersions))
for _, v := range cpf.RemoteWriteReceiverMessageVersions {
versions = append(versions, toProtobufMessageVersion(v))
}
promArgs = cg.WithMinimumVersion("2.54.0").AppendCommandlineArgument(
promArgs,
monitoringv1.Argument{
Name: "web.remote-write-receiver.accepted-protobuf-messages",
Value: strings.Join(versions, ","),
},
)
}
}
for _, rw := range cpf.RemoteWrite {
if ptr.Deref(rw.MessageVersion, monitoringv1.RemoteWriteMessageVersion1_0) == monitoringv1.RemoteWriteMessageVersion2_0 {
promArgs = cg.WithMinimumVersion("2.54.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "enable-feature", Value: "metadata-wal-records"})
}
}
if len(cpf.EnableFeatures) > 0 {
efs := make([]string, len(cpf.EnableFeatures))
for i := range cpf.EnableFeatures {
efs[i] = string(cpf.EnableFeatures[i])
}
promArgs = cg.WithMinimumVersion("2.25.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "enable-feature", Value: strings.Join(efs, ",")})
}
if cpf.ExternalURL != "" {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.external-url", Value: cpf.ExternalURL})
}
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.route-prefix", Value: cpf.WebRoutePrefix()})
if cpf.LogLevel != "" && cpf.LogLevel != "info" {
promArgs = append(promArgs, monitoringv1.Argument{Name: "log.level", Value: cpf.LogLevel})
}
if cpf.LogFormat != "" && cpf.LogFormat != "logfmt" {
promArgs = cg.WithMinimumVersion("2.6.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "log.format", Value: cpf.LogFormat})
}
if cpf.ListenLocal {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.listen-address", Value: "127.0.0.1:9090"})
}
return promArgs
}
// BuildCommonVolumes returns a set of volumes to be mounted on the spec that are common between Prometheus Server and Agent.
func BuildCommonVolumes(p monitoringv1.PrometheusInterface, tlsSecrets *operator.ShardedSecret, statefulSet bool) ([]v1.Volume, []v1.VolumeMount, error) {
cpf := p.GetCommonPrometheusFields()
@ -378,51 +301,6 @@ func VolumeClaimName(p monitoringv1.PrometheusInterface, cpf monitoringv1.Common
return volName
}
func ProbeHandler(probePath string, cpf monitoringv1.CommonPrometheusFields, webConfigGenerator *ConfigGenerator) v1.ProbeHandler {
probePath = path.Clean(cpf.WebRoutePrefix() + probePath)
handler := v1.ProbeHandler{}
if cpf.ListenLocal {
probeURL := url.URL{
Scheme: "http",
Host: "localhost:9090",
Path: probePath,
}
handler.Exec = operator.ExecAction(probeURL.String())
return handler
}
handler.HTTPGet = &v1.HTTPGetAction{
Path: probePath,
Port: intstr.FromString(cpf.PortName),
}
if cpf.Web != nil && cpf.Web.TLSConfig != nil && webConfigGenerator.IsCompatible() {
handler.HTTPGet.Scheme = v1.URISchemeHTTPS
}
return handler
}
func BuildPodMetadata(cpf monitoringv1.CommonPrometheusFields, cg *ConfigGenerator) (map[string]string, map[string]string) {
podAnnotations := map[string]string{
"kubectl.kubernetes.io/default-container": "prometheus",
}
podLabels := map[string]string{
"app.kubernetes.io/version": cg.version.String(),
}
if cpf.PodMetadata != nil {
for k, v := range cpf.PodMetadata.Labels {
podLabels[k] = v
}
for k, v := range cpf.PodMetadata.Annotations {
podAnnotations[k] = v
}
}
return podAnnotations, podLabels
}
func BuildConfigReloader(
p monitoringv1.PrometheusInterface,
c Config,
@ -518,20 +396,6 @@ func MakeK8sTopologySpreadConstraint(selectorLabels map[string]string, tscs []mo
return coreTscs
}
func GetStatupProbePeriodSecondsAndFailureThreshold(cfp monitoringv1.CommonPrometheusFields) (int32, int32) {
var startupPeriodSeconds float64 = 15
var startupFailureThreshold float64 = 60
maximumStartupDurationSeconds := float64(ptr.Deref(cfp.MaximumStartupDurationSeconds, 0))
if maximumStartupDurationSeconds >= 60 {
startupFailureThreshold = math.Ceil(maximumStartupDurationSeconds / 60)
startupPeriodSeconds = math.Ceil(maximumStartupDurationSeconds / startupFailureThreshold)
}
return int32(startupPeriodSeconds), int32(startupFailureThreshold)
}
func MakeContainerPorts(cpf monitoringv1.CommonPrometheusFields) []v1.ContainerPort {
if cpf.ListenLocal {
return nil
@ -575,41 +439,3 @@ func BuildWebconfig(
return webConfig.GetMountParameters()
}
// The /-/ready handler returns OK only after the TSDB initialization has
// completed. The WAL replay can take a significant time for large setups
// hence we enable the startup probe with a generous failure threshold (15
// minutes) to ensure that the readiness probe only comes into effect once
// Prometheus is effectively ready.
// We don't want to use the /-/healthy handler here because it returns OK as
// soon as the web server is started (irrespective of the WAL replay).
func MakeProbes(
cpf monitoringv1.CommonPrometheusFields,
webConfigGenerator *ConfigGenerator,
) (*v1.Probe, *v1.Probe, *v1.Probe) {
readyProbeHandler := ProbeHandler("/-/ready", cpf, webConfigGenerator)
startupPeriodSeconds, startupFailureThreshold := GetStatupProbePeriodSecondsAndFailureThreshold(cpf)
startupProbe := &v1.Probe{
ProbeHandler: readyProbeHandler,
TimeoutSeconds: ProbeTimeoutSeconds,
PeriodSeconds: startupPeriodSeconds,
FailureThreshold: startupFailureThreshold,
}
readinessProbe := &v1.Probe{
ProbeHandler: readyProbeHandler,
TimeoutSeconds: ProbeTimeoutSeconds,
PeriodSeconds: 5,
FailureThreshold: 3,
}
livenessProbe := &v1.Probe{
ProbeHandler: ProbeHandler("/-/healthy", cpf, webConfigGenerator),
TimeoutSeconds: ProbeTimeoutSeconds,
PeriodSeconds: 5,
FailureThreshold: 6,
}
return startupProbe, readinessProbe, livenessProbe
}

View file

@ -88,9 +88,7 @@ func TestStartupProbeTimeoutSeconds(t *testing.T) {
}
for _, test := range tests {
startupPeriodSeconds, startupFailureThreshold := GetStatupProbePeriodSecondsAndFailureThreshold(monitoringv1.CommonPrometheusFields{
MaximumStartupDurationSeconds: test.maximumStartupDurationSeconds,
})
startupPeriodSeconds, startupFailureThreshold := getStatupProbePeriodSecondsAndFailureThreshold(test.maximumStartupDurationSeconds)
require.Equal(t, test.expectedStartupPeriodSeconds, startupPeriodSeconds)
require.Equal(t, test.expectedStartupFailureThreshold, startupFailureThreshold)
@ -147,7 +145,7 @@ func TestBuildCommonPrometheusArgsWithRemoteWriteMessageV2(t *testing.T) {
cg, err := NewConfigGenerator(NewLogger(), p)
require.NoError(t, err)
args := BuildCommonPrometheusArgs(p.GetCommonPrometheusFields(), cg)
args := cg.BuildCommonPrometheusArgs()
var found bool
for _, arg := range args {

View file

@ -20,6 +20,7 @@ import (
"fmt"
"log/slog"
"math"
"net/url"
"os"
"path"
"reflect"
@ -31,7 +32,9 @@ import (
"github.com/blang/semver/v4"
"github.com/prometheus/common/model"
"gopkg.in/yaml.v2"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/utils/ptr"
"github.com/prometheus-operator/prometheus-operator/internal/util"
@ -954,6 +957,187 @@ func initRelabelings() []yaml.MapSlice {
}
}
// BuildCommonPrometheusArgs builds a slice of arguments that are common between Prometheus Server and Agent.
func (cg *ConfigGenerator) BuildCommonPrometheusArgs() []monitoringv1.Argument {
cpf := cg.prom.GetCommonPrometheusFields()
promArgs := []monitoringv1.Argument{
{Name: "web.console.templates", Value: "/etc/prometheus/consoles"},
{Name: "web.console.libraries", Value: "/etc/prometheus/console_libraries"},
{Name: "config.file", Value: path.Join(ConfOutDir, ConfigEnvsubstFilename)},
}
if ptr.Deref(cpf.ReloadStrategy, monitoringv1.HTTPReloadStrategyType) == monitoringv1.HTTPReloadStrategyType {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.enable-lifecycle"})
}
if cpf.Web != nil {
if cpf.Web.PageTitle != nil {
promArgs = cg.WithMinimumVersion("2.6.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "web.page-title", Value: *cpf.Web.PageTitle})
}
if cpf.Web.MaxConnections != nil {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.max-connections", Value: fmt.Sprintf("%d", *cpf.Web.MaxConnections)})
}
}
if cpf.EnableRemoteWriteReceiver {
promArgs = cg.WithMinimumVersion("2.33.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "web.enable-remote-write-receiver"})
if len(cpf.RemoteWriteReceiverMessageVersions) > 0 {
versions := make([]string, 0, len(cpf.RemoteWriteReceiverMessageVersions))
for _, v := range cpf.RemoteWriteReceiverMessageVersions {
versions = append(versions, toProtobufMessageVersion(v))
}
promArgs = cg.WithMinimumVersion("2.54.0").AppendCommandlineArgument(
promArgs,
monitoringv1.Argument{
Name: "web.remote-write-receiver.accepted-protobuf-messages",
Value: strings.Join(versions, ","),
},
)
}
}
for _, rw := range cpf.RemoteWrite {
if ptr.Deref(rw.MessageVersion, monitoringv1.RemoteWriteMessageVersion1_0) == monitoringv1.RemoteWriteMessageVersion2_0 {
promArgs = cg.WithMinimumVersion("2.54.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "enable-feature", Value: "metadata-wal-records"})
}
}
if len(cpf.EnableFeatures) > 0 {
efs := make([]string, len(cpf.EnableFeatures))
for i := range cpf.EnableFeatures {
efs[i] = string(cpf.EnableFeatures[i])
}
promArgs = cg.WithMinimumVersion("2.25.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "enable-feature", Value: strings.Join(efs, ",")})
}
if cpf.ExternalURL != "" {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.external-url", Value: cpf.ExternalURL})
}
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.route-prefix", Value: cpf.WebRoutePrefix()})
if cpf.LogLevel != "" && cpf.LogLevel != "info" {
promArgs = append(promArgs, monitoringv1.Argument{Name: "log.level", Value: cpf.LogLevel})
}
if cpf.LogFormat != "" && cpf.LogFormat != "logfmt" {
promArgs = cg.WithMinimumVersion("2.6.0").AppendCommandlineArgument(promArgs, monitoringv1.Argument{Name: "log.format", Value: cpf.LogFormat})
}
if cpf.ListenLocal {
promArgs = append(promArgs, monitoringv1.Argument{Name: "web.listen-address", Value: "127.0.0.1:9090"})
}
return promArgs
}
func (cg *ConfigGenerator) BuildPodMetadata() (map[string]string, map[string]string) {
podAnnotations := map[string]string{
"kubectl.kubernetes.io/default-container": "prometheus",
}
podLabels := map[string]string{
"app.kubernetes.io/version": cg.version.String(),
}
podMetadata := cg.prom.GetCommonPrometheusFields().PodMetadata
if podMetadata != nil {
for k, v := range podMetadata.Labels {
podLabels[k] = v
}
for k, v := range podMetadata.Annotations {
podAnnotations[k] = v
}
}
return podAnnotations, podLabels
}
// BuildProbes returns a tuple of 3 probe definitions:
// 1. startup probe
// 2. readiness probe
// 3. liveness probe
//
// The /-/ready handler returns OK only after the TSDB initialization has
// completed. The WAL replay can take a significant time for large setups
// hence we enable the startup probe with a generous failure threshold (15
// minutes) to ensure that the readiness probe only comes into effect once
// Prometheus is effectively ready.
// We don't want to use the /-/healthy handler here because it returns OK as
// soon as the web server is started (irrespective of the WAL replay).
func (cg *ConfigGenerator) BuildProbes() (*v1.Probe, *v1.Probe, *v1.Probe) {
readyProbeHandler := cg.buildProbeHandler("/-/ready")
startupPeriodSeconds, startupFailureThreshold := getStatupProbePeriodSecondsAndFailureThreshold(cg.prom.GetCommonPrometheusFields().MaximumStartupDurationSeconds)
startupProbe := &v1.Probe{
ProbeHandler: readyProbeHandler,
TimeoutSeconds: ProbeTimeoutSeconds,
PeriodSeconds: startupPeriodSeconds,
FailureThreshold: startupFailureThreshold,
}
readinessProbe := &v1.Probe{
ProbeHandler: readyProbeHandler,
TimeoutSeconds: ProbeTimeoutSeconds,
PeriodSeconds: 5,
FailureThreshold: 3,
}
livenessProbe := &v1.Probe{
ProbeHandler: cg.buildProbeHandler("/-/healthy"),
TimeoutSeconds: ProbeTimeoutSeconds,
PeriodSeconds: 5,
FailureThreshold: 6,
}
return startupProbe, readinessProbe, livenessProbe
}
func (cg *ConfigGenerator) buildProbeHandler(probePath string) v1.ProbeHandler {
cpf := cg.prom.GetCommonPrometheusFields()
probePath = path.Clean(cpf.WebRoutePrefix() + probePath)
handler := v1.ProbeHandler{}
if cpf.ListenLocal {
probeURL := url.URL{
Scheme: "http",
Host: "localhost:9090",
Path: probePath,
}
handler.Exec = operator.ExecAction(probeURL.String())
return handler
}
handler.HTTPGet = &v1.HTTPGetAction{
Path: probePath,
Port: intstr.FromString(cpf.PortName),
}
if cpf.Web != nil && cpf.Web.TLSConfig != nil && cg.IsCompatible() {
handler.HTTPGet.Scheme = v1.URISchemeHTTPS
}
return handler
}
func getStatupProbePeriodSecondsAndFailureThreshold(maxStartupDurationSeconds *int32) (int32, int32) {
var (
startupPeriodSeconds float64 = 15
startupFailureThreshold float64 = 60
)
maximumStartupDurationSeconds := float64(ptr.Deref(maxStartupDurationSeconds, 0))
if maximumStartupDurationSeconds >= 60 {
startupFailureThreshold = math.Ceil(maximumStartupDurationSeconds / 60)
startupPeriodSeconds = math.Ceil(maximumStartupDurationSeconds / startupFailureThreshold)
}
return int32(startupPeriodSeconds), int32(startupFailureThreshold)
}
func (cg *ConfigGenerator) generatePodMonitorConfig(
m *monitoringv1.PodMonitor,
ep monitoringv1.PodMetricsEndpoint,

View file

@ -207,8 +207,7 @@ func makeStatefulSetSpec(
return nil, err
}
promArgs := prompkg.BuildCommonPrometheusArgs(cpf, cg)
promArgs = appendServerArgs(promArgs, cg, p)
promArgs := buildServerArgs(cg, p)
volumes, promVolumeMounts, err := prompkg.BuildCommonVolumes(p, tlsSecrets, true)
if err != nil {
@ -246,9 +245,9 @@ func makeStatefulSetSpec(
webConfigGenerator.Warn("web.config.file")
}
startupProbe, readinessProbe, livenessProbe := prompkg.MakeProbes(cpf, webConfigGenerator)
startupProbe, readinessProbe, livenessProbe := cg.BuildProbes()
podAnnotations, podLabels := prompkg.BuildPodMetadata(cpf, cg)
podAnnotations, podLabels := cg.BuildPodMetadata()
// In cases where an existing selector label is modified, or a new one is added, new sts cannot match existing pods.
// We should try to avoid removing such immutable fields whenever possible since doing
// so forces us to enter the 'recreate cycle' and can potentially lead to downtime.
@ -426,12 +425,14 @@ func makeStatefulSetSpec(
return &spec, nil
}
// appendServerArgs appends arguments that are only valid for the Prometheus server.
func appendServerArgs(promArgs []monitoringv1.Argument, cg *prompkg.ConfigGenerator, p *monitoringv1.Prometheus) []monitoringv1.Argument {
// buildServerArgs returns the CLI arguments that are only valid for the Prometheus server.
func buildServerArgs(cg *prompkg.ConfigGenerator, p *monitoringv1.Prometheus) []monitoringv1.Argument {
var (
promArgs = cg.BuildCommonPrometheusArgs()
retentionTimeFlagName = "storage.tsdb.retention.time"
retentionTimeFlagValue = string(p.Spec.Retention)
)
if cg.WithMaximumVersion("2.7.0").IsCompatible() {
retentionTimeFlagName = "storage.tsdb.retention"
if p.Spec.Retention == "" {

View file

@ -66,6 +66,7 @@ func (f *Framework) NewTestCtx(t *testing.T) *TestCtx {
tc.collectAlertmanagers(b, f)
tc.collectPrometheuses(b, f)
tc.collectThanosRulers(b, f)
tc.collectPrometheusAgents(b, f)
tc.collectLogs(b, f)
tc.collectEvents(b, f)
@ -154,6 +155,21 @@ func (ctx *TestCtx) collectPrometheuses(w io.Writer, f *Framework) {
}
}
func (ctx *TestCtx) collectPrometheusAgents(w io.Writer, f *Framework) {
fmt.Fprintln(w, "=== PrometheusAgents")
for _, ns := range ctx.namespaces {
ps, err := f.MonClientV1alpha1.PrometheusAgents(ns).List(context.Background(), metav1.ListOptions{})
if err != nil {
fmt.Fprintf(w, "%s: failed to get prometheusagents: %v\n", ns, err)
continue
}
for _, p := range ps.Items {
collectConditions(w, fmt.Sprintf("PrometheusAgent=%s/%s", p.Namespace, p.Name), p.Status.Conditions)
}
}
}
func (ctx *TestCtx) collectThanosRulers(w io.Writer, f *Framework) {
fmt.Fprintln(w, "=== ThanosRulers")
for _, ns := range ctx.namespaces {

View file

@ -71,6 +71,7 @@ func (f *Framework) WritePodLogs(ctx context.Context, w io.Writer, ns, pod strin
fmt.Fprintf(w, "=== Logs (pod=%s/%s container=%s)\n", ns, pod, c)
_, err = w.Write(resp)
fmt.Fprint(w, "\n")
if err != nil {
return fmt.Errorf("failed to write logs: %w", err)
}