1
0
Fork 0
mirror of https://github.com/monitoring-mixins/website.git synced 2024-12-14 11:37:31 +00:00

assets,site/content: daily assets regeneration

This commit is contained in:
github-actions[bot] 2024-11-27 03:39:38 +00:00
parent 763258c7bb
commit 16f1c7dd05
4 changed files with 43 additions and 14 deletions

View file

@ -1051,8 +1051,20 @@ groups:
}}, the configuration may not be supported
summary: The number of subsystems defined to the gateway exceeds supported values
on cluster {{ $labels.cluster }}
expr: count by(gateway_host, cluster) (label_replace(ceph_nvmeof_subsystem_metadata,"gateway_host","$1","instance","(.*):.*"))
> 16.00
expr: count by(gateway_host, cluster) (label_replace(ceph_nvmeof_subsystem_metadata,"gateway_host","$1","instance","(.*?)(?::.*)?"))
> 128.00
for: 1m
labels:
severity: warning
type: ceph_default
- alert: NVMeoFTooManyNamespaces
annotations:
description: Although you may continue to create namespaces in {{ $labels.gateway_host
}}, the configuration may not be supported
summary: The number of namespaces defined to the gateway exceeds supported values
on cluster {{ $labels.cluster }}
expr: sum by(gateway_host, cluster) (label_replace(ceph_nvmeof_subsystem_namespace_count,"gateway_host","$1","instance","(.*?)(?::.*)?"))
> 1024.00
for: 1m
labels:
severity: warning

View file

@ -52,10 +52,10 @@ groups:
severity: warning
- alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
annotations:
description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus
{{$labels.instance}} to Alertmanager {{$labels.alertmanager}}.'
summary: Prometheus has encountered more than 1% errors sending alerts to a
specific Alertmanager.
description: '{{ printf "%.1f" $value }}% of alerts sent by Prometheus {{$labels.instance}}
to Alertmanager {{$labels.alertmanager}} were affected by errors.'
summary: More than 1% of alerts sent by Prometheus to a specific Alertmanager
were affected by errors.
expr: |
(
rate(prometheus_notifications_errors_total{job="prometheus"}[5m])

View file

@ -1407,8 +1407,25 @@ annotations:
}}, the configuration may not be supported
summary: The number of subsystems defined to the gateway exceeds supported values
on cluster {{ $labels.cluster }}
expr: count by(gateway_host, cluster) (label_replace(ceph_nvmeof_subsystem_metadata,"gateway_host","$1","instance","(.*):.*"))
> 16.00
expr: count by(gateway_host, cluster) (label_replace(ceph_nvmeof_subsystem_metadata,"gateway_host","$1","instance","(.*?)(?::.*)?"))
> 128.00
for: 1m
labels:
severity: warning
type: ceph_default
{{< /code >}}
##### NVMeoFTooManyNamespaces
{{< code lang="yaml" >}}
alert: NVMeoFTooManyNamespaces
annotations:
description: Although you may continue to create namespaces in {{ $labels.gateway_host
}}, the configuration may not be supported
summary: The number of namespaces defined to the gateway exceeds supported values
on cluster {{ $labels.cluster }}
expr: sum by(gateway_host, cluster) (label_replace(ceph_nvmeof_subsystem_namespace_count,"gateway_host","$1","instance","(.*?)(?::.*)?"))
> 1024.00
for: 1m
labels:
severity: warning

View file

@ -89,17 +89,17 @@ labels:
{{< /code >}}
##### PrometheusErrorSendingAlertsToSomeAlertmanagers
'{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus
'{{ printf "%.1f" $value }}% of alerts sent by Prometheus {{$labels.instance}}
Prometheus has encountered more than 1% errors sending alerts to a specific
More than 1% of alerts sent by Prometheus to a specific Alertmanager were
{{< code lang="yaml" >}}
alert: PrometheusErrorSendingAlertsToSomeAlertmanagers
annotations:
description: '{{ printf "%.1f" $value }}% errors while sending alerts from Prometheus
{{$labels.instance}} to Alertmanager {{$labels.alertmanager}}.'
summary: Prometheus has encountered more than 1% errors sending alerts to a specific
Alertmanager.
description: '{{ printf "%.1f" $value }}% of alerts sent by Prometheus {{$labels.instance}}
to Alertmanager {{$labels.alertmanager}} were affected by errors.'
summary: More than 1% of alerts sent by Prometheus to a specific Alertmanager were
affected by errors.
expr: |
(
rate(prometheus_notifications_errors_total{job="prometheus"}[5m])