1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-15 16:56:24 +00:00

prometheus: enforce external labels

Add external labels `prometheus` and `prometheus_replica` via Pod env
variables and Prometheus config expansion.
This commit is contained in:
Fabian Reinartz 2018-03-22 15:59:22 +01:00 committed by Max Leonard Inden
parent df92241c26
commit 50d7b9f56e
No known key found for this signature in database
GPG key ID: 5403C5464810BC26
8 changed files with 86 additions and 70 deletions

View file

@ -50,7 +50,7 @@ func init() {
flagset.StringVar(&cfg.TLSConfig.CAFile, "ca-file", "", "- NOT RECOMMENDED FOR PRODUCTION - Path to TLS CA file.")
flagset.StringVar(&cfg.KubeletObject, "kubelet-service", "", "Service/Endpoints object to write kubelets into in format \"namespace/name\"")
flagset.BoolVar(&cfg.TLSInsecure, "tls-insecure", false, "- NOT RECOMMENDED FOR PRODUCTION - Don't verify API server's CA certificate.")
flagset.StringVar(&cfg.PrometheusConfigReloader, "prometheus-config-reloader", "quay.io/coreos/prometheus-config-reloader:v0.0.3", "Config and rule reload image")
flagset.StringVar(&cfg.PrometheusConfigReloader, "prometheus-config-reloader", "quay.io/coreos/prometheus-config-reloader:v0.0.4", "Config and rule reload image")
flagset.StringVar(&cfg.ConfigReloaderImage, "config-reloader-image", "quay.io/coreos/configmap-reload:v0.0.1", "Reload Image")
flagset.StringVar(&cfg.AlertmanagerDefaultBaseImage, "alertmanager-default-base-image", "quay.io/prometheus/alertmanager", "Alertmanager default base image")
flagset.StringVar(&cfg.PrometheusDefaultBaseImage, "prometheus-default-base-image", "quay.io/prometheus/prometheus", "Prometheus default base image")

View file

@ -30,7 +30,7 @@ spec:
mountPath: /etc/prometheus/rules
readOnly: true
- name: prometheus-config-reloader
image: quay.io/coreos/prometheus-config-reloader:v0.0.3
image: quay.io/coreos/prometheus-config-reloader:v0.0.4
args:
- '-config-volume-dir=/etc/prometheus/config'
- '-rule-volume-dir=/etc/prometheus/rules'

View file

@ -186,7 +186,6 @@ func (rf *ruleFetcher) refresh(ctx context.Context, cms []*configMapRef) error {
return err
}
}
return nil
}
if err := os.RemoveAll(rf.outDir); err != nil {

View file

@ -45,8 +45,6 @@ import (
)
const (
configFilename = "prometheus.yaml"
resyncPeriod = 5 * time.Minute
)
@ -1015,10 +1013,12 @@ func (c *Operator) createConfig(p *monitoringv1.Prometheus, ruleFileConfigMaps [
return err
}
generatedConf := s.Data[configFilename]
generatedConfigMaps := s.Data[configMapsFilename]
curConfig, curConfigFound := curSecret.Data[configFilename]
curConfigMaps, curConfigMapsFound := curSecret.Data[configMapsFilename]
var (
generatedConf = s.Data[configFilename]
generatedConfigMaps = s.Data[ruleConfigmapsFilename]
curConfig, curConfigFound = curSecret.Data[configFilename]
curConfigMaps, curConfigMapsFound = curSecret.Data[ruleConfigmapsFilename]
)
if curConfigFound && curConfigMapsFound {
if bytes.Equal(curConfig, generatedConf) && bytes.Equal(curConfigMaps, generatedConfigMaps) {
c.logger.Log("msg", "updating config skipped, no configuration change")

View file

@ -37,7 +37,7 @@ func sanitizeLabelName(name string) string {
}
func configMapRuleFileFolder(configMapNumber int) string {
return fmt.Sprintf("/etc/prometheus/rules/rules-%d/", configMapNumber)
return fmt.Sprintf("/etc/prometheus/config_out/rules/rules-%d/", configMapNumber)
}
func stringMapToMapSlice(m map[string]string) yaml.MapSlice {
@ -78,6 +78,18 @@ func addTLStoYaml(cfg yaml.MapSlice, tls *v1.TLSConfig) yaml.MapSlice {
return cfg
}
func buildExternalLabels(p *v1.Prometheus) yaml.MapSlice {
m := map[string]string{}
m["prometheus"] = fmt.Sprintf("%s/%s", p.Namespace, p.Name)
m["prometheus_replica"] = "$(POD_NAME)"
for n, v := range p.Spec.ExternalLabels {
m[n] = v
}
return stringMapToMapSlice(m)
}
func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleConfigMaps int, basicAuthSecrets map[string]BasicAuthCredentials) ([]byte, error) {
versionStr := p.Spec.Version
if versionStr == "" {
@ -106,7 +118,7 @@ func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleCo
Value: yaml.MapSlice{
{Key: "evaluation_interval", Value: evaluationInterval},
{Key: "scrape_interval", Value: scrapeInterval},
{Key: "external_labels", Value: stringMapToMapSlice(p.Spec.ExternalLabels)},
{Key: "external_labels", Value: buildExternalLabels(p)},
},
})
@ -231,20 +243,17 @@ func generateServiceMonitorConfig(version semver.Version, m *v1.ServiceMonitor,
// Filter targets by services selected by the monitor.
// Exact label matches.
labelKeys := make([]string, len(m.Spec.Selector.MatchLabels))
i = 0
for k, _ := range m.Spec.Selector.MatchLabels {
labelKeys[i] = k
i++
var labelKeys []string
for k := range m.Spec.Selector.MatchLabels {
labelKeys = append(labelKeys, k)
}
sort.Strings(labelKeys)
for i := range labelKeys {
k := labelKeys[i]
v := m.Spec.Selector.MatchLabels[k]
for _, k := range labelKeys {
relabelings = append(relabelings, yaml.MapSlice{
{Key: "action", Value: "keep"},
{Key: "source_labels", Value: []string{"__meta_kubernetes_service_label_" + sanitizeLabelName(k)}},
{Key: "regex", Value: v},
{Key: "regex", Value: m.Spec.Selector.MatchLabels[k]},
})
}
// Set based label matching. We have to map the valid relations

View file

@ -117,7 +117,9 @@ func TestAlertmanagerBearerToken(t *testing.T) {
expected := `global:
evaluation_interval: 30s
scrape_interval: 30s
external_labels: {}
external_labels:
prometheus: default/test
prometheus_replica: $(POD_NAME)
scrape_configs: []
alerting:
alertmanagers:

View file

@ -36,15 +36,17 @@ import (
)
const (
governingServiceName = "prometheus-operated"
DefaultVersion = "v2.2.1"
defaultRetention = "24h"
configMapsFilename = "configmaps.json"
prometheusConfDir = "/etc/prometheus/config"
prometheusConfFile = prometheusConfDir + "/prometheus.yaml"
prometheusStorageDir = "/prometheus"
prometheusRulesDir = "/etc/prometheus/rules"
prometheusSecretsDir = "/etc/prometheus/secrets/"
governingServiceName = "prometheus-operated"
DefaultVersion = "v2.2.1"
defaultRetention = "24h"
storageDir = "/prometheus"
confDir = "/etc/prometheus/config"
confOutDir = "/etc/prometheus/config_out"
rulesDir = "/etc/prometheus/config_out/rules"
secretsDir = "/etc/prometheus/secrets/"
configFilename = "prometheus.yaml"
configEnvsubstFilename = "prometheus.env.yaml"
ruleConfigmapsFilename = "configmaps.json"
)
var (
@ -274,8 +276,8 @@ func makeConfigSecret(p *monitoringv1.Prometheus, configMaps []*v1.ConfigMap, co
},
},
Data: map[string][]byte{
configFilename: []byte{},
configMapsFilename: b,
configFilename: []byte{},
ruleConfigmapsFilename: b,
},
}, nil
}
@ -325,9 +327,10 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
promArgs = append(promArgs,
"-storage.local.retention="+p.Spec.Retention,
"-storage.local.num-fingerprint-mutexes=4096",
fmt.Sprintf("-storage.local.path=%s", prometheusStorageDir),
fmt.Sprintf("-storage.local.path=%s", storageDir),
"-storage.local.chunk-encoding-version=2",
fmt.Sprintf("-config.file=%s", prometheusConfFile))
fmt.Sprintf("-config.file=%s", path.Join(confOutDir, configEnvsubstFilename)),
)
// We attempt to specify decent storage tuning flags based on how much the
// requested memory can fit. The user has to specify an appropriate buffering
// in memory limits to catch increased memory usage during query bursts.
@ -356,8 +359,8 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
securityContext = &v1.PodSecurityContext{}
case 2:
promArgs = append(promArgs,
fmt.Sprintf("-config.file=%s", prometheusConfFile),
fmt.Sprintf("-storage.tsdb.path=%s", prometheusStorageDir),
fmt.Sprintf("-config.file=%s", path.Join(confOutDir, configEnvsubstFilename)),
fmt.Sprintf("-storage.tsdb.path=%s", storageDir),
"-storage.tsdb.retention="+p.Spec.Retention,
"-web.enable-lifecycle",
"-storage.tsdb.no-lockfile",
@ -430,7 +433,7 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
},
},
{
Name: "rules",
Name: "config-out",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
@ -439,18 +442,13 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
promVolumeMounts := []v1.VolumeMount{
{
Name: "config",
Name: "config-out",
ReadOnly: true,
MountPath: prometheusConfDir,
},
{
Name: "rules",
ReadOnly: true,
MountPath: prometheusRulesDir,
MountPath: confOutDir,
},
{
Name: volumeName(p.Name),
MountPath: prometheusStorageDir,
MountPath: storageDir,
SubPath: subPathForStorage(p.Spec.Storage),
},
}
@ -467,26 +465,27 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
Name: "secret-" + s,
ReadOnly: true,
MountPath: prometheusSecretsDir + s,
MountPath: secretsDir + s,
})
}
configReloadVolumeMounts := []v1.VolumeMount{
{
Name: "config",
ReadOnly: true,
MountPath: prometheusConfDir,
MountPath: confDir,
},
{
Name: "rules",
MountPath: prometheusRulesDir,
Name: "config-out",
MountPath: confOutDir,
},
}
configReloadArgs := []string{
fmt.Sprintf("-reload-url=%s", localReloadURL),
fmt.Sprintf("-config-volume-dir=%s", prometheusConfDir),
fmt.Sprintf("-rule-volume-dir=%s", prometheusRulesDir),
fmt.Sprintf("--reload-url=%s", localReloadURL),
fmt.Sprintf("--config-file=%s", path.Join(confDir, configFilename)),
fmt.Sprintf("--rule-list-file=%s", path.Join(confDir, ruleConfigmapsFilename)),
fmt.Sprintf("--config-envsubst-file=%s", path.Join(confOutDir, configEnvsubstFilename)),
fmt.Sprintf("--rule-dir=%s", rulesDir),
}
var livenessProbeHandler v1.Handler
@ -551,10 +550,10 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
}
}
}
podLabels["app"] = "prometheus"
podLabels["prometheus"] = p.Name
finalLabels := c.Labels.Merge(podLabels)
return &appsv1.StatefulSetSpec{
ServiceName: governingServiceName,
Replicas: p.Spec.Replicas,
@ -562,12 +561,9 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
Type: appsv1.RollingUpdateStatefulSetStrategyType,
},
Selector: &metav1.LabelSelector{
MatchLabels: finalLabels,
},
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: finalLabels,
Labels: c.Labels.Merge(podLabels),
Annotations: podAnnotations,
},
Spec: v1.PodSpec{
@ -582,8 +578,16 @@ func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMaps []
ReadinessProbe: readinessProbe,
Resources: p.Spec.Resources,
}, {
Name: "prometheus-config-reloader",
Image: c.PrometheusConfigReloader,
Name: "prometheus-config-reloader",
Image: c.PrometheusConfigReloader,
Env: []v1.EnvVar{
{
Name: "POD_NAME",
ValueFrom: &v1.EnvVarSource{
FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"},
},
},
},
Args: configReloadArgs,
VolumeMounts: configReloadVolumeMounts,
Resources: v1.ResourceRequirements{

View file

@ -159,14 +159,9 @@ func TestStatefulSetVolumeInitial(t *testing.T) {
{
VolumeMounts: []v1.VolumeMount{
{
Name: "config",
Name: "config-out",
ReadOnly: true,
MountPath: "/etc/prometheus/config",
SubPath: "",
}, {
Name: "rules",
ReadOnly: true,
MountPath: "/etc/prometheus/rules",
MountPath: "/etc/prometheus/config_out",
SubPath: "",
}, {
Name: "prometheus--db",
@ -192,7 +187,7 @@ func TestStatefulSetVolumeInitial(t *testing.T) {
},
},
{
Name: "rules",
Name: "config-out",
VolumeSource: v1.VolumeSource{
EmptyDir: &v1.EmptyDirVolumeSource{},
},
@ -227,8 +222,15 @@ func TestStatefulSetVolumeInitial(t *testing.T) {
require.NoError(t, err)
if !reflect.DeepEqual(expected.Spec.Template.Spec.Volumes, sset.Spec.Template.Spec.Volumes) || !reflect.DeepEqual(expected.Spec.Template.Spec.Containers[0].VolumeMounts, sset.Spec.Template.Spec.Containers[0].VolumeMounts) {
t.Fatal("Volumes mounted in a Pod are not created correctly initially.")
if !reflect.DeepEqual(expected.Spec.Template.Spec.Volumes, sset.Spec.Template.Spec.Volumes) {
t.Fatalf("Unexpected volumes: want %v, got %v",
expected.Spec.Template.Spec.Volumes,
sset.Spec.Template.Spec.Volumes)
}
if !reflect.DeepEqual(expected.Spec.Template.Spec.Containers[0].VolumeMounts, sset.Spec.Template.Spec.Containers[0].VolumeMounts) {
t.Fatalf("Unexpected volume mounts: want %v, got %v",
expected.Spec.Template.Spec.Containers[0].VolumeMounts,
sset.Spec.Template.Spec.Containers[0].VolumeMounts)
}
}