1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-21 03:38:43 +00:00

prometheus: use a secret for the config

This commit is contained in:
Frederic Branczyk 2017-03-09 10:04:28 +01:00
parent 81543a37bf
commit cadaee5421
No known key found for this signature in database
GPG key ID: CA14788B1E48B256
6 changed files with 80 additions and 66 deletions

View file

@ -12,11 +12,11 @@ The third party resources that the Prometheus Operator introduces are:
The `Prometheus` third party resource (TPR) declaratively defines a desired Prometheus setup to run in a Kubernetes cluster. It provides options to configure replication, persistent storage, and Alertmanagers to which the deployed Prometheus instances send alerts to.
For each `Prometheus` TPR, the Operator deploys a properly configured `StatefulSet` in the same namespace. The Prometheus `Pod`s are configured to include two ConfigMaps, `<prometheus-name>` and `<prometheus-name>-rules`, which respectively hold the used configuration file and multiple Prometheus rule files that may contain alerting and recording rules.
For each `Prometheus` TPR, the Operator deploys a properly configured `StatefulSet` in the same namespace. The Prometheus `Pod`s are configured to mount a `Secret` called `<prometheus-name>` containing the configuration for Prometheus and a `ConfigMap` called `<prometheus-name>-rules`, which holds Prometheus rule files that may contain alerting and recording rules.
The TPR allows to specify which `ServiceMonitor`s should be covered by the deployed Prometheus instances based on label selection. The Operator then generates a configuration based on the included `ServiceMonitor`s and updates it in the ConfigMap. It continuously does so for all changes that are made to `ServiceMonitor`s or the `Prometheus` TPR itself.
The TPR allows to specify which `ServiceMonitor`s should be covered by the deployed Prometheus instances based on label selection. The Operator then generates a configuration based on the included `ServiceMonitor`s and updates it in the `Secret` containing the configuration. It continuously does so for all changes that are made to `ServiceMonitor`s or the `Prometheus` TPR itself.
If no selection of `ServiceMonitor`s is provided, the Operator leaves management of the ConfigMap to the user, which allows to provide custom configurations while still benefiting from the Operator's capabilities of managing Prometheus setups.
If no selection of `ServiceMonitor`s is provided, the Operator leaves management of the `Secret` to the user, which allows to provide custom configurations while still benefiting from the Operator's capabilities of managing Prometheus setups.
## ServiceMonitor

View file

@ -33,14 +33,14 @@ e2e-test:
go test -timeout 20m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig "$(HOME)/.kube/config" --operator-image=$(REPO):$(TAG) --namespace=$(NAMESPACE) --cluster-ip=$(CLUSTER_IP)
e2e-status:
kubectl get prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods,cm,replicationcontrollers --all-namespaces
kubectl get prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods,cm,secrets,replicationcontrollers --all-namespaces
e2e:
$(MAKE) container
$(MAKE) e2e-test
clean-e2e:
kubectl -n $(NAMESPACE) delete prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods,cm,replicationcontrollers --all
kubectl -n $(NAMESPACE) delete prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods,cm,secrets,replicationcontrollers --all
kubectl delete namespace $(NAMESPACE)
promu:

View file

@ -1,18 +0,0 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: alertmanager-example
data:
alertmanager.yaml: |-
global:
resolve_timeout: 5m
route:
group_by: ['job']
group_wait: 30s
group_interval: 5m
repeat_interval: 12h
receiver: 'webhook'
receivers:
- name: 'webhook'
webhook_configs:
- url: 'http://alertmanagerwh:30500/'

View file

@ -59,6 +59,7 @@ type Operator struct {
promInf cache.SharedIndexInformer
smonInf cache.SharedIndexInformer
cmapInf cache.SharedIndexInformer
secrInf cache.SharedIndexInformer
ssetInf cache.SharedIndexInformer
nodeInf cache.SharedIndexInformer
@ -155,6 +156,13 @@ func New(conf Config, logger log.Logger) (*Operator, error) {
c.cmapInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.handleConfigmapDelete,
})
c.secrInf = cache.NewSharedIndexInformer(
cache.NewListWatchFromClient(c.kclient.Core().RESTClient(), "secrets", api.NamespaceAll, nil),
&v1.Secret{}, resyncPeriod, cache.Indexers{},
)
c.secrInf.AddEventHandler(cache.ResourceEventHandlerFuncs{
DeleteFunc: c.handleSecretDelete,
})
c.ssetInf = cache.NewSharedIndexInformer(
cache.NewListWatchFromClient(c.kclient.Apps().RESTClient(), "statefulsets", api.NamespaceAll, nil),
@ -216,6 +224,7 @@ func (c *Operator) Run(stopc <-chan struct{}) error {
go c.promInf.Run(stopc)
go c.smonInf.Run(stopc)
go c.cmapInf.Run(stopc)
go c.secrInf.Run(stopc)
go c.ssetInf.Run(stopc)
if c.kubeletSyncEnabled {
@ -363,6 +372,26 @@ func (c *Operator) handleSmonDelete(obj interface{}) {
}
}
func (c *Operator) handleSecretDelete(obj interface{}) {
o, ok := c.getObject(obj)
if !ok {
return
}
key, ok := c.keyFunc(o)
if !ok {
return
}
_, exists, err := c.promInf.GetIndexer().GetByKey(key)
if err != nil {
c.logger.Log("msg", "index lookup failed", "err", err)
}
if exists {
c.enqueue(key)
}
}
func (c *Operator) handleConfigmapDelete(obj interface{}) {
o, ok := c.getObject(obj)
if !ok {
@ -546,11 +575,12 @@ func (c *Operator) sync(key string) error {
}
}
// Create ConfigMaps if they don't exist.
cmClient := c.kclient.Core().ConfigMaps(p.Namespace)
if _, err := cmClient.Create(makeEmptyConfig(p.Name)); err != nil && !apierrors.IsAlreadyExists(err) {
// Create Secret and ConfigMap if they don't exist.
sClient := c.kclient.Core().Secrets(p.Namespace)
if _, err := sClient.Create(makeEmptyConfig(p.Name)); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrap(err, "creating empty config file failed")
}
cmClient := c.kclient.Core().ConfigMaps(p.Namespace)
if _, err := cmClient.Create(makeEmptyRules(p.Name)); err != nil && !apierrors.IsAlreadyExists(err) {
return errors.Wrap(err, "creating empty rules file failed")
}
@ -710,11 +740,11 @@ func (c *Operator) destroyPrometheus(key string) error {
// Delete the auto-generate configuration.
// TODO(fabxc): add an ownerRef at creation so we don't delete config maps
// manually created for Prometheus servers with no ServiceMonitor selectors.
cm := c.kclient.Core().ConfigMaps(sset.Namespace)
if err := cm.Delete(sset.Name, nil); err != nil {
s := c.kclient.Core().Secrets(sset.Namespace)
if err := s.Delete(sset.Name, nil); err != nil {
return errors.Wrap(err, "deleting config file failed")
}
cm := c.kclient.Core().ConfigMaps(sset.Namespace)
if err := cm.Delete(fmt.Sprintf("%s-rules", sset.Name), nil); err != nil {
return errors.Wrap(err, "deleting rules file failed")
}
@ -726,28 +756,28 @@ func (c *Operator) createConfig(p *v1alpha1.Prometheus) error {
if err != nil {
return errors.Wrap(err, "selecting ServiceMonitors failed")
}
// Update config map based on the most recent configuration.
b, err := generateConfig(p, smons)
// Update secret based on the most recent configuration.
conf, err := generateConfig(p, smons)
if err != nil {
return errors.Wrap(err, "generating config failed")
}
cm := &v1.ConfigMap{
s := &v1.Secret{
ObjectMeta: apimetav1.ObjectMeta{
Name: configConfigMapName(p.Name),
Name: configSecretName(p.Name),
},
Data: map[string]string{
"prometheus.yaml": string(b),
Data: map[string][]byte{
"prometheus.yaml": []byte(conf),
},
}
cmClient := c.kclient.CoreV1().ConfigMaps(p.Namespace)
sClient := c.kclient.CoreV1().Secrets(p.Namespace)
_, err = cmClient.Get(cm.Name, metav1.GetOptions{})
_, err = sClient.Get(s.Name, metav1.GetOptions{})
if apierrors.IsNotFound(err) {
_, err = cmClient.Create(cm)
_, err = sClient.Create(s)
} else if err == nil {
_, err = cmClient.Update(cm)
_, err = sClient.Update(s)
}
return err
}

View file

@ -104,13 +104,13 @@ func makeStatefulSet(p v1alpha1.Prometheus, old *v1beta1.StatefulSet, config *Co
return statefulset
}
func makeEmptyConfig(name string) *v1.ConfigMap {
return &v1.ConfigMap{
func makeEmptyConfig(name string) *v1.Secret {
return &v1.Secret{
ObjectMeta: apimetav1.ObjectMeta{
Name: configConfigMapName(name),
Name: configSecretName(name),
},
Data: map[string]string{
"prometheus.yaml": "",
Data: map[string][]byte{
"prometheus.yaml": []byte{},
},
}
}
@ -285,10 +285,8 @@ func makeStatefulSetSpec(p v1alpha1.Prometheus, c *Config) v1beta1.StatefulSetSp
{
Name: "config",
VolumeSource: v1.VolumeSource{
ConfigMap: &v1.ConfigMapVolumeSource{
LocalObjectReference: v1.LocalObjectReference{
Name: configConfigMapName(p.Name),
},
Secret: &v1.SecretVolumeSource{
SecretName: configSecretName(p.Name),
},
},
},
@ -308,7 +306,7 @@ func makeStatefulSetSpec(p v1alpha1.Prometheus, c *Config) v1beta1.StatefulSetSp
}
}
func configConfigMapName(name string) string {
func configSecretName(name string) string {
return prefixedName(name)
}

View file

@ -115,12 +115,7 @@ func TestPrometheusReloadConfig(t *testing.T) {
},
}
cfg := &v1.ConfigMap{
ObjectMeta: apimetav1.ObjectMeta{
Name: fmt.Sprintf("prometheus-%s", name),
},
Data: map[string]string{
"prometheus.yaml": `
firstConfig := `
global:
scrape_interval: 1m
scrape_configs:
@ -129,7 +124,14 @@ scrape_configs:
static_configs:
- targets:
- 111.111.111.111:9090
`,
`
cfg := &v1.Secret{
ObjectMeta: apimetav1.ObjectMeta{
Name: fmt.Sprintf("prometheus-%s", name),
},
Data: map[string][]byte{
"prometheus.yaml": []byte(firstConfig),
},
}
@ -144,7 +146,7 @@ scrape_configs:
}
}()
if _, err := framework.KubeClient.CoreV1().ConfigMaps(framework.Namespace.Name).Create(cfg); err != nil {
if _, err := framework.KubeClient.CoreV1().Secrets(framework.Namespace.Name).Create(cfg); err != nil {
t.Fatal(err)
}
@ -160,7 +162,7 @@ scrape_configs:
t.Fatal(err)
}
cfg.Data["prometheus.yaml"] = `
secondConfig := `
global:
scrape_interval: 1m
scrape_configs:
@ -169,9 +171,11 @@ scrape_configs:
static_configs:
- targets:
- 111.111.111.111:9090
- 111.111.111.112:9090
- 111.111.111.112:9090
`
if _, err := framework.KubeClient.CoreV1().ConfigMaps(framework.Namespace.Name).Update(cfg); err != nil {
cfg.Data["prometheus.yaml"] = []byte(secondConfig)
if _, err := framework.KubeClient.CoreV1().Secrets(framework.Namespace.Name).Update(cfg); err != nil {
t.Fatal(err)
}
@ -257,10 +261,10 @@ func TestPrometheusDiscovery(t *testing.T) {
t.Fatal(err)
}
log.Print("Validating Prometheus ConfigMap was created")
_, err := framework.KubeClient.CoreV1().ConfigMaps(framework.Namespace.Name).Get(fmt.Sprintf("prometheus-%s", prometheusName), apimetav1.GetOptions{})
log.Print("Validating Prometheus config Secret was created")
_, err := framework.KubeClient.CoreV1().Secrets(framework.Namespace.Name).Get(fmt.Sprintf("prometheus-%s", prometheusName), apimetav1.GetOptions{})
if err != nil {
t.Fatalf("Generated ConfigMap could not be retrieved: ", err)
t.Fatalf("Generated Secret could not be retrieved: ", err)
}
log.Print("Validating Prometheus Targets were properly discovered")
@ -313,10 +317,10 @@ func TestPrometheusAlertmanagerDiscovery(t *testing.T) {
t.Fatalf("Creating ServiceMonitor failed: ", err)
}
log.Print("Validating Prometheus ConfigMap was created")
_, err := framework.KubeClient.CoreV1().ConfigMaps(framework.Namespace.Name).Get(fmt.Sprintf("prometheus-%s", prometheusName), apimetav1.GetOptions{})
log.Print("Validating Prometheus config Secret was created")
_, err := framework.KubeClient.CoreV1().Secrets(framework.Namespace.Name).Get(fmt.Sprintf("prometheus-%s", prometheusName), apimetav1.GetOptions{})
if err != nil {
t.Fatalf("Generated ConfigMap could not be retrieved: ", err)
t.Fatalf("Generated Secret could not be retrieved: ", err)
}
if err := framework.CreateAlertmanagerAndWaitUntilReady(framework.MakeBasicAlertmanager(alertmanagerName, 3)); err != nil {