mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2024-12-14 11:58:02 +00:00
c7db025a48
Should allow track caches where Dragonfly is not responsive to I/O due to big CPU tasks. Also, update the local grafana dashboard. Signed-off-by: Roman Gershman <roman@dragonflydb.io>
52 lines
1.4 KiB
YAML
52 lines
1.4 KiB
YAML
# my global config
|
|
global:
|
|
scrape_interval: 15s # By default, scrape targets every 15 seconds.
|
|
evaluation_interval: 15s # By default, scrape targets every 15 seconds.
|
|
# scrape_timeout is set to the global default (10s).
|
|
|
|
# Attach these labels to any time series or alerts when communicating with
|
|
# external systems (federation, remote storage, Alertmanager).
|
|
external_labels:
|
|
monitor: 'my-project'
|
|
|
|
# Load and evaluate rules in this file every 'evaluation_interval' seconds.
|
|
# rule_files:
|
|
# - 'alert.rules'
|
|
# - "first.rules"
|
|
# - "second.rules"
|
|
|
|
# alert
|
|
# alerting:
|
|
# alertmanagers:
|
|
# - scheme: http
|
|
# static_configs:
|
|
# - targets:
|
|
# - "alertmanager:9093"
|
|
|
|
# A scrape configuration containing exactly one endpoint to scrape:
|
|
# Here it's Prometheus itself.
|
|
scrape_configs:
|
|
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
|
|
|
|
- job_name: dragonfly
|
|
scrape_interval: 1s
|
|
static_configs:
|
|
- targets: ['host.docker.internal:6379']
|
|
|
|
- job_name: 'prometheus'
|
|
|
|
# Override the global default and scrape targets from this job every 5 seconds.
|
|
scrape_interval: 1s
|
|
|
|
static_configs:
|
|
- targets: ['localhost:9090']
|
|
|
|
|
|
- job_name: 'node-exporter'
|
|
|
|
# Override the global default and scrape targets from this job every 5 seconds.
|
|
scrape_interval: 1s
|
|
static_configs:
|
|
- targets: ['node-exporter:9100']
|
|
labels:
|
|
instance: node
|