2019-09-04 23:37:13 +00:00
|
|
|
package watchdog
|
2019-09-07 01:59:50 +00:00
|
|
|
|
|
|
|
import (
|
2020-09-05 01:31:28 +00:00
|
|
|
"encoding/json"
|
2020-07-24 20:45:51 +00:00
|
|
|
"fmt"
|
2019-09-07 01:59:50 +00:00
|
|
|
"github.com/TwinProduction/gatus/config"
|
|
|
|
"github.com/TwinProduction/gatus/core"
|
2019-11-16 20:47:28 +00:00
|
|
|
"github.com/TwinProduction/gatus/metric"
|
2019-09-09 01:07:08 +00:00
|
|
|
"log"
|
2019-09-07 01:59:50 +00:00
|
|
|
"sync"
|
|
|
|
"time"
|
|
|
|
)
|
|
|
|
|
|
|
|
var (
|
|
|
|
serviceResults = make(map[string][]*core.Result)
|
2020-09-05 01:31:28 +00:00
|
|
|
|
|
|
|
// serviceResultsMutex is used to prevent concurrent map access
|
|
|
|
serviceResultsMutex sync.RWMutex
|
|
|
|
|
|
|
|
// monitoringMutex is used to prevent multiple services from being evaluated at the same time.
|
|
|
|
// Without this, conditions using response time may become inaccurate.
|
|
|
|
monitoringMutex sync.Mutex
|
2019-09-07 01:59:50 +00:00
|
|
|
)
|
|
|
|
|
2020-09-05 01:31:28 +00:00
|
|
|
// GetJsonEncodedServiceResults returns a list of the last 20 results for each services encoded using json.Marshal.
|
|
|
|
// The reason why the encoding is done here is because we use a mutex to prevent concurrent map access.
|
|
|
|
func GetJsonEncodedServiceResults() ([]byte, error) {
|
|
|
|
serviceResultsMutex.RLock()
|
|
|
|
data, err := json.Marshal(serviceResults)
|
|
|
|
serviceResultsMutex.RUnlock()
|
|
|
|
return data, err
|
2019-09-07 01:59:50 +00:00
|
|
|
}
|
|
|
|
|
2020-08-18 00:25:29 +00:00
|
|
|
// Monitor loops over each services and starts a goroutine to monitor each services separately
|
2019-12-04 21:44:35 +00:00
|
|
|
func Monitor(cfg *config.Config) {
|
|
|
|
for _, service := range cfg.Services {
|
2020-04-06 22:58:13 +00:00
|
|
|
go monitor(service)
|
|
|
|
// To prevent multiple requests from running at the same time
|
2020-04-14 23:20:00 +00:00
|
|
|
time.Sleep(1111 * time.Millisecond)
|
2020-04-06 22:58:13 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-08-18 00:25:29 +00:00
|
|
|
// monitor monitors a single service in a loop
|
2020-04-06 22:58:13 +00:00
|
|
|
func monitor(service *core.Service) {
|
2020-09-05 01:31:28 +00:00
|
|
|
cfg := config.Get()
|
2020-04-06 22:58:13 +00:00
|
|
|
for {
|
|
|
|
// By placing the lock here, we prevent multiple services from being monitored at the exact same time, which
|
|
|
|
// could cause performance issues and return inaccurate results
|
2020-09-05 01:31:28 +00:00
|
|
|
monitoringMutex.Lock()
|
|
|
|
if cfg.Debug {
|
|
|
|
log.Printf("[watchdog][monitor] Monitoring serviceName=%s", service.Name)
|
|
|
|
}
|
2020-04-06 22:58:13 +00:00
|
|
|
result := service.EvaluateConditions()
|
|
|
|
metric.PublishMetricsForService(service, result)
|
2020-09-05 01:31:28 +00:00
|
|
|
serviceResultsMutex.Lock()
|
2020-04-06 22:58:13 +00:00
|
|
|
serviceResults[service.Name] = append(serviceResults[service.Name], result)
|
|
|
|
if len(serviceResults[service.Name]) > 20 {
|
|
|
|
serviceResults[service.Name] = serviceResults[service.Name][1:]
|
|
|
|
}
|
2020-09-05 01:31:28 +00:00
|
|
|
serviceResultsMutex.Unlock()
|
2020-07-24 20:45:51 +00:00
|
|
|
var extra string
|
|
|
|
if !result.Success {
|
|
|
|
extra = fmt.Sprintf("responseBody=%s", result.Body)
|
|
|
|
}
|
2020-04-06 22:58:13 +00:00
|
|
|
log.Printf(
|
2020-09-05 01:31:28 +00:00
|
|
|
"[watchdog][monitor] Monitored serviceName=%s; success=%v; errors=%d; requestDuration=%s; %s",
|
2020-04-06 22:58:13 +00:00
|
|
|
service.Name,
|
2020-09-05 01:31:28 +00:00
|
|
|
result.Success,
|
2020-04-06 22:58:13 +00:00
|
|
|
len(result.Errors),
|
|
|
|
result.Duration.Round(time.Millisecond),
|
2020-07-24 20:45:51 +00:00
|
|
|
extra,
|
2020-04-06 22:58:13 +00:00
|
|
|
)
|
2020-09-04 22:23:56 +00:00
|
|
|
handleAlerting(service, result)
|
2020-09-05 01:31:28 +00:00
|
|
|
if cfg.Debug {
|
|
|
|
log.Printf("[watchdog][monitor] Waiting for interval=%s before monitoring serviceName=%s again", service.Interval, service.Name)
|
|
|
|
}
|
|
|
|
monitoringMutex.Unlock()
|
2020-09-04 22:23:56 +00:00
|
|
|
time.Sleep(service.Interval)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
func handleAlerting(service *core.Service, result *core.Result) {
|
|
|
|
cfg := config.Get()
|
|
|
|
if cfg.Alerting == nil {
|
|
|
|
return
|
|
|
|
}
|
|
|
|
if result.Success {
|
|
|
|
if service.NumberOfFailuresInARow > 0 {
|
|
|
|
for _, alert := range service.Alerts {
|
2020-09-05 01:31:28 +00:00
|
|
|
if !alert.Enabled || !alert.SendOnResolved || alert.Threshold > service.NumberOfFailuresInARow {
|
2020-09-04 22:23:56 +00:00
|
|
|
continue
|
|
|
|
}
|
2020-09-05 01:31:28 +00:00
|
|
|
var alertProvider *core.CustomAlertProvider
|
|
|
|
if alert.Type == core.SlackAlert {
|
|
|
|
if len(cfg.Alerting.Slack) > 0 {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Sending Slack alert because alert with description=%s has been resolved", alert.Description)
|
2020-09-05 01:31:28 +00:00
|
|
|
alertProvider = core.CreateSlackCustomAlertProvider(cfg.Alerting.Slack, service, alert, result, true)
|
|
|
|
} else {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Not sending Slack alert despite being triggered, because there is no Slack webhook configured")
|
2020-09-05 01:31:28 +00:00
|
|
|
}
|
|
|
|
} else if alert.Type == core.TwilioAlert {
|
|
|
|
if cfg.Alerting.Twilio != nil && cfg.Alerting.Twilio.IsValid() {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Sending Twilio alert because alert with description=%s has been resolved", alert.Description)
|
2020-09-05 01:57:31 +00:00
|
|
|
alertProvider = core.CreateTwilioCustomAlertProvider(cfg.Alerting.Twilio, fmt.Sprintf("RESOLVED: %s - %s", service.Name, alert.Description))
|
2020-09-05 01:31:28 +00:00
|
|
|
} else {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Not sending Twilio alert despite being resolved, because Twilio isn't configured properly")
|
2020-09-05 01:31:28 +00:00
|
|
|
}
|
|
|
|
} else if alert.Type == core.CustomAlert {
|
|
|
|
if cfg.Alerting.Custom != nil && cfg.Alerting.Custom.IsValid() {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Sending custom alert because alert with description=%s has been resolved", alert.Description)
|
2020-09-05 01:31:28 +00:00
|
|
|
alertProvider = &core.CustomAlertProvider{
|
|
|
|
Url: cfg.Alerting.Custom.Url,
|
|
|
|
Method: cfg.Alerting.Custom.Method,
|
|
|
|
Body: cfg.Alerting.Custom.Body,
|
|
|
|
Headers: cfg.Alerting.Custom.Headers,
|
|
|
|
}
|
|
|
|
} else {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Not sending custom alert despite being resolved, because the custom provider isn't configured properly")
|
2020-09-05 01:31:28 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if alertProvider != nil {
|
2020-09-05 01:57:31 +00:00
|
|
|
err := alertProvider.Send(service.Name, alert.Description, true)
|
2020-09-05 01:31:28 +00:00
|
|
|
if err != nil {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Ran into error sending an alert: %s", err.Error())
|
2020-09-05 01:31:28 +00:00
|
|
|
}
|
|
|
|
}
|
2020-09-04 22:23:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
service.NumberOfFailuresInARow = 0
|
|
|
|
} else {
|
|
|
|
service.NumberOfFailuresInARow++
|
|
|
|
for _, alert := range service.Alerts {
|
|
|
|
// If the alert hasn't been triggered, move to the next one
|
|
|
|
if !alert.Enabled || alert.Threshold != service.NumberOfFailuresInARow {
|
|
|
|
continue
|
|
|
|
}
|
|
|
|
var alertProvider *core.CustomAlertProvider
|
|
|
|
if alert.Type == core.SlackAlert {
|
|
|
|
if len(cfg.Alerting.Slack) > 0 {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Sending Slack alert because alert with description=%s has been triggered", alert.Description)
|
2020-09-05 01:31:28 +00:00
|
|
|
alertProvider = core.CreateSlackCustomAlertProvider(cfg.Alerting.Slack, service, alert, result, false)
|
2020-09-04 22:23:56 +00:00
|
|
|
} else {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Not sending Slack alert despite being triggered, because there is no Slack webhook configured")
|
2020-09-04 22:23:56 +00:00
|
|
|
}
|
|
|
|
} else if alert.Type == core.TwilioAlert {
|
|
|
|
if cfg.Alerting.Twilio != nil && cfg.Alerting.Twilio.IsValid() {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Sending Twilio alert because alert with description=%s has been triggered", alert.Description)
|
2020-09-05 01:57:31 +00:00
|
|
|
alertProvider = core.CreateTwilioCustomAlertProvider(cfg.Alerting.Twilio, fmt.Sprintf("TRIGGERED: %s - %s", service.Name, alert.Description))
|
2020-09-04 22:23:56 +00:00
|
|
|
} else {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Not sending Twilio alert despite being triggered, because Twilio config settings missing")
|
2020-08-28 02:23:21 +00:00
|
|
|
}
|
2020-09-04 22:23:56 +00:00
|
|
|
} else if alert.Type == core.CustomAlert {
|
|
|
|
if cfg.Alerting.Custom != nil && cfg.Alerting.Custom.IsValid() {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Sending custom alert because alert with description=%s has been triggered", alert.Description)
|
2020-09-04 22:23:56 +00:00
|
|
|
alertProvider = &core.CustomAlertProvider{
|
|
|
|
Url: cfg.Alerting.Custom.Url,
|
|
|
|
Method: cfg.Alerting.Custom.Method,
|
|
|
|
Body: cfg.Alerting.Custom.Body,
|
|
|
|
Headers: cfg.Alerting.Custom.Headers,
|
2020-08-28 02:23:21 +00:00
|
|
|
}
|
2020-09-04 22:23:56 +00:00
|
|
|
} else {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Not sending custom alert despite being triggered, because there is no custom url configured")
|
2020-09-04 22:23:56 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
if alertProvider != nil {
|
2020-09-05 01:57:31 +00:00
|
|
|
err := alertProvider.Send(service.Name, alert.Description, false)
|
2020-09-04 22:23:56 +00:00
|
|
|
if err != nil {
|
2020-09-05 02:15:22 +00:00
|
|
|
log.Printf("[watchdog][handleAlerting] Ran into error sending an alert: %s", err.Error())
|
2020-08-21 01:11:22 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
2019-09-07 01:59:50 +00:00
|
|
|
}
|
|
|
|
}
|