mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-21 11:48:53 +00:00
Merge pull request #1246 from mxinden/additional-config
prometheus: Allow specifying additional scrape configs via secret
This commit is contained in:
commit
e45f7653a7
17 changed files with 206 additions and 5 deletions
Documentation
example
additional-scrape-configs
additional-scrape-configs.yamlprometheus-additional.yamlprometheus-cluster-role-binding.yamlprometheus-cluster-role.yamlprometheus-service-account.yamlprometheus.yaml
prometheus-operator-crd
pkg
client/monitoring/v1
prometheus
test
|
@ -225,6 +225,7 @@ Specification of the desired behavior of the Prometheus cluster. More info: http
|
|||
| securityContext | SecurityContext holds pod-level security attributes and common container settings. This defaults to non root user with uid 1000 and gid 2000 for Prometheus >v2.0 and default PodSecurityContext for other versions. | *v1.PodSecurityContext | false |
|
||||
| listenLocal | ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. | bool | false |
|
||||
| containers | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | []v1.Container | false |
|
||||
| additionalScrapeConfigs | AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Prometheus scrape configurations. Scrape configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade. | *[v1.SecretKeySelector](https://v1-6.docs.kubernetes.io/docs/api-reference/v1.6/#secretkeyselector-v1-core) | false |
|
||||
|
||||
[Back to TOC](#table-of-contents)
|
||||
|
||||
|
|
|
@ -3,6 +3,10 @@
|
|||
<i class="fa fa-exclamation-triangle"></i><b> Note:</b> Starting with v0.12.0, Prometheus Operator requires use of Kubernetes v1.7.x and up.
|
||||
</div>
|
||||
|
||||
|
||||
**Deprecation Warning:** The _custom configuration_ option of the Prometheus Operator will be deprecated in favor of the [_additional scrape config_](./api.md#prometheusspec) option.
|
||||
|
||||
|
||||
# Custom Configuration
|
||||
|
||||
There are a few reasons, why one may want to provide a custom configuration to Prometheus instances, instead of having the Prometheus Operator generate the configuration based on `ServiceMonitor` objects.
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
# this file was generated with the following command:
|
||||
# $ kubectl create secret generic additional-scrape-configs --from-file=prometheus-additional.yaml --dry-run -oyaml > additional-scrape-configs.yaml
|
||||
apiVersion: v1
|
||||
data:
|
||||
prometheus-additional.yaml: LSBqb2JfbmFtZTogInByb21ldGhldXMiCiAgc3RhdGljX2NvbmZpZ3M6CiAgLSB0YXJnZXRzOiBbImxvY2FsaG9zdDo5MDkwIl0K
|
||||
kind: Secret
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: additional-scrape-configs
|
|
@ -0,0 +1,3 @@
|
|||
- job_name: "prometheus"
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus
|
||||
namespace: default
|
|
@ -0,0 +1,18 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
- services
|
||||
- endpoints
|
||||
- pods
|
||||
verbs: ["get", "list", "watch"]
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs: ["get"]
|
||||
- nonResourceURLs: ["/metrics"]
|
||||
verbs: ["get"]
|
|
@ -0,0 +1,4 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: prometheus
|
23
example/additional-scrape-configs/prometheus.yaml
Normal file
23
example/additional-scrape-configs/prometheus.yaml
Normal file
|
@ -0,0 +1,23 @@
|
|||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
metadata:
|
||||
name: prometheus
|
||||
labels:
|
||||
prometheus: prometheus
|
||||
spec:
|
||||
replicas: 2
|
||||
serviceAccountName: prometheus
|
||||
serviceMonitorSelector:
|
||||
matchLabels:
|
||||
team: frontend
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- namespace: default
|
||||
name: alertmanager
|
||||
port: web
|
||||
resources:
|
||||
requests:
|
||||
memory: 400Mi
|
||||
additionalScrapeConfigs:
|
||||
name: additional-scrape-configs
|
||||
key: prometheus-additional.yaml
|
|
@ -28,6 +28,21 @@ spec:
|
|||
description: 'Specification of the desired behavior of the Prometheus cluster.
|
||||
More info: https://github.com/kubernetes/community/blob/master/contributors/devel/api-conventions.md#spec-and-status'
|
||||
properties:
|
||||
additionalScrapeConfigs:
|
||||
description: SecretKeySelector selects a key of a Secret.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must be a valid
|
||||
secret key.
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or it's key must be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
affinity:
|
||||
description: Affinity is a group of affinity scheduling rules.
|
||||
properties:
|
||||
|
|
|
@ -923,11 +923,17 @@ func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenA
|
|||
},
|
||||
},
|
||||
},
|
||||
"additionalScrapeConfigs": {
|
||||
SchemaProps: spec.SchemaProps{
|
||||
Description: "AdditionalScrapeConfigs allows specifying a key of a Secret containing additional Prometheus scrape configurations. Scrape configurations specified are appended to the configurations generated by the Prometheus Operator. Job configurations specified must have the form as specified in the official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>. As scrape configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible scrape configs are going to break Prometheus after the upgrade.",
|
||||
Ref: ref("k8s.io/api/core/v1.SecretKeySelector"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Dependencies: []string{
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.AlertingSpec", "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.RemoteReadSpec", "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.RemoteWriteSpec", "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.StorageSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.AlertingSpec", "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.RemoteReadSpec", "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.RemoteWriteSpec", "github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.StorageSpec", "k8s.io/api/core/v1.Affinity", "k8s.io/api/core/v1.Container", "k8s.io/api/core/v1.LocalObjectReference", "k8s.io/api/core/v1.PodSecurityContext", "k8s.io/api/core/v1.ResourceRequirements", "k8s.io/api/core/v1.SecretKeySelector", "k8s.io/api/core/v1.Toleration", "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector", "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"},
|
||||
},
|
||||
"github.com/coreos/prometheus-operator/pkg/client/monitoring/v1.PrometheusStatus": {
|
||||
Schema: spec.Schema{
|
||||
|
|
|
@ -135,6 +135,18 @@ type PrometheusSpec struct {
|
|||
// Containers allows injecting additional containers. This is meant to
|
||||
// allow adding an authentication proxy to a Prometheus pod.
|
||||
Containers []v1.Container `json:"containers,omitempty"`
|
||||
// AdditionalScrapeConfigs allows specifying a key of a Secret containing
|
||||
// additional Prometheus scrape configurations. Scrape configurations
|
||||
// specified are appended to the configurations generated by the Prometheus
|
||||
// Operator. Job configurations specified must have the form as specified
|
||||
// in the official Prometheus documentation:
|
||||
// https://prometheus.io/docs/prometheus/latest/configuration/configuration/#<scrape_config>.
|
||||
// As scrape configs are appended, the user is responsible to make sure it
|
||||
// is valid. Note that using this feature may expose the possibility to
|
||||
// break upgrades of Prometheus. It is advised to review Prometheus release
|
||||
// notes to ensure that no incompatible scrape configs are going to break
|
||||
// Prometheus after the upgrade.
|
||||
AdditionalScrapeConfigs *v1.SecretKeySelector `json:"additionalScrapeConfigs,omitempty"`
|
||||
}
|
||||
|
||||
// Most recent observed status of the Prometheus cluster. Read-only. Not
|
||||
|
|
|
@ -567,6 +567,15 @@ func (in *PrometheusSpec) DeepCopyInto(out *PrometheusSpec) {
|
|||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.AdditionalScrapeConfigs != nil {
|
||||
in, out := &in.AdditionalScrapeConfigs, &out.AdditionalScrapeConfigs
|
||||
if *in == nil {
|
||||
*out = nil
|
||||
} else {
|
||||
*out = new(core_v1.SecretKeySelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -897,6 +897,22 @@ func (c *Operator) destroyPrometheus(key string) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func loadAdditionalScrapeConfigsSecret(additionalScrapeConfigs *v1.SecretKeySelector, s *v1.SecretList) ([]byte, error) {
|
||||
if additionalScrapeConfigs != nil {
|
||||
for _, secret := range s.Items {
|
||||
if secret.Name == additionalScrapeConfigs.Name {
|
||||
if c, ok := secret.Data[additionalScrapeConfigs.Key]; ok {
|
||||
return c, nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("key %v could not be found in Secret %v.", additionalScrapeConfigs.Key, additionalScrapeConfigs.Name)
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("secret %v could not be found.", additionalScrapeConfigs.Name)
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func loadBasicAuthSecret(basicAuth *monitoringv1.BasicAuth, s *v1.SecretList) (BasicAuthCredentials, error) {
|
||||
var username string
|
||||
var password string
|
||||
|
@ -999,8 +1015,13 @@ func (c *Operator) createConfig(p *monitoringv1.Prometheus, ruleFileConfigMaps [
|
|||
return err
|
||||
}
|
||||
|
||||
additionalScrapeConfigs, err := loadAdditionalScrapeConfigsSecret(p.Spec.AdditionalScrapeConfigs, listSecrets)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "loading additional scrape configs from Secret failed")
|
||||
}
|
||||
|
||||
// Update secret based on the most recent configuration.
|
||||
conf, err := generateConfig(p, smons, len(ruleFileConfigMaps), basicAuthSecrets)
|
||||
conf, err := generateConfig(p, smons, len(ruleFileConfigMaps), basicAuthSecrets, additionalScrapeConfigs)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "generating config failed")
|
||||
}
|
||||
|
|
|
@ -90,7 +90,7 @@ func buildExternalLabels(p *v1.Prometheus) yaml.MapSlice {
|
|||
return stringMapToMapSlice(m)
|
||||
}
|
||||
|
||||
func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleConfigMaps int, basicAuthSecrets map[string]BasicAuthCredentials) ([]byte, error) {
|
||||
func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleConfigMaps int, basicAuthSecrets map[string]BasicAuthCredentials, additionalScrapeConfigs []byte) ([]byte, error) {
|
||||
versionStr := p.Spec.Version
|
||||
if versionStr == "" {
|
||||
versionStr = DefaultVersion
|
||||
|
@ -156,9 +156,15 @@ func generateConfig(p *v1.Prometheus, mons map[string]*v1.ServiceMonitor, ruleCo
|
|||
}
|
||||
}
|
||||
|
||||
var additionalScrapeConfigsYaml []yaml.MapSlice
|
||||
err = yaml.Unmarshal([]byte(additionalScrapeConfigs), &additionalScrapeConfigsYaml)
|
||||
if err != nil {
|
||||
errors.Wrap(err, "unmarshalling additional scrape configs failed")
|
||||
}
|
||||
|
||||
cfg = append(cfg, yaml.MapItem{
|
||||
Key: "scrape_configs",
|
||||
Value: scrapeConfigs,
|
||||
Value: append(scrapeConfigs, additionalScrapeConfigsYaml...),
|
||||
})
|
||||
|
||||
var alertRelabelConfigs []yaml.MapSlice
|
||||
|
|
|
@ -106,6 +106,7 @@ func TestAlertmanagerBearerToken(t *testing.T) {
|
|||
nil,
|
||||
0,
|
||||
map[string]BasicAuthCredentials{},
|
||||
nil,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -276,6 +277,7 @@ func generateTestConfig(version string) ([]byte, error) {
|
|||
makeServiceMonitors(),
|
||||
1,
|
||||
map[string]BasicAuthCredentials{},
|
||||
nil,
|
||||
)
|
||||
}
|
||||
|
||||
|
|
|
@ -268,6 +268,62 @@ scrape_configs:
|
|||
}
|
||||
}
|
||||
|
||||
func TestPrometheusAdditionalScrapeConfig(t *testing.T) {
|
||||
ctx := framework.NewTestCtx(t)
|
||||
defer ctx.Cleanup(t)
|
||||
ns := ctx.CreateNamespace(t, framework.KubeClient)
|
||||
ctx.SetupPrometheusRBAC(t, ns, framework.KubeClient)
|
||||
|
||||
prometheusName := "test"
|
||||
group := "additional-config-test"
|
||||
svc := framework.MakePrometheusService(prometheusName, group, v1.ServiceTypeClusterIP)
|
||||
|
||||
s := framework.MakeBasicServiceMonitor(group)
|
||||
if _, err := framework.MonClient.ServiceMonitors(ns).Create(s); err != nil {
|
||||
t.Fatal("Creating ServiceMonitor failed: ", err)
|
||||
}
|
||||
|
||||
additionalConfig := `
|
||||
- job_name: "prometheus"
|
||||
static_configs:
|
||||
- targets: ["localhost:9090"]
|
||||
`
|
||||
secret := v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "additional-scrape-configs",
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
"prometheus-additional.yaml": []byte(additionalConfig),
|
||||
},
|
||||
}
|
||||
_, err := framework.KubeClient.CoreV1().Secrets(ns).Create(&secret)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
p := framework.MakeBasicPrometheus(ns, prometheusName, group, 1)
|
||||
p.Spec.AdditionalScrapeConfigs = &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: "additional-scrape-configs",
|
||||
},
|
||||
Key: "prometheus-additional.yaml",
|
||||
}
|
||||
if err := framework.CreatePrometheusAndWaitUntilReady(ns, p); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if finalizerFn, err := testFramework.CreateServiceAndWaitUntilReady(framework.KubeClient, ns, svc); err != nil {
|
||||
t.Fatal(errors.Wrap(err, "creating prometheus service failed"))
|
||||
} else {
|
||||
ctx.AddFinalizerFn(finalizerFn)
|
||||
}
|
||||
|
||||
// Wait for ServiceMonitor target, as well as additional-config target
|
||||
if err := framework.WaitForTargets(ns, svc.Name, 2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrometheusReloadRules(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
|
|
@ -260,7 +260,7 @@ func promImage(version string) string {
|
|||
func (f *Framework) WaitForTargets(ns, svcName string, amount int) error {
|
||||
var targets []*Target
|
||||
|
||||
if err := wait.Poll(time.Second, time.Minute*10, func() (bool, error) {
|
||||
if err := wait.Poll(time.Second, time.Minute*5, func() (bool, error) {
|
||||
var err error
|
||||
targets, err = f.GetActiveTargets(ns, svcName)
|
||||
if err != nil {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue