mirror of
https://github.com/TwiN/gatus.git
synced 2024-12-14 11:58:04 +00:00
refactor: Clean up code and change log format (#719)
This commit is contained in:
parent
979d467e36
commit
922638e071
19 changed files with 95 additions and 96 deletions
|
@ -106,7 +106,7 @@ func (config *Config) GetAlertingProviderByAlertType(alertType alert.Type) provi
|
|||
return fieldValue.Interface().(provider.AlertProvider)
|
||||
}
|
||||
}
|
||||
log.Printf("[alerting][GetAlertingProviderByAlertType] No alerting provider found for alert type %s", alertType)
|
||||
log.Printf("[alerting.GetAlertingProviderByAlertType] No alerting provider found for alert type %s", alertType)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ func (provider *AlertProvider) IsValid() bool {
|
|||
|
||||
// Send an alert using the provider
|
||||
func (provider *AlertProvider) Send(endpoint *core.Endpoint, alert *alert.Alert, result *core.Result, resolved bool) error {
|
||||
sess, err := provider.CreateSesSession()
|
||||
sess, err := provider.createSession()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -153,14 +153,12 @@ func (provider *AlertProvider) GetDefaultAlert() *alert.Alert {
|
|||
return provider.DefaultAlert
|
||||
}
|
||||
|
||||
func (provider AlertProvider) CreateSesSession() (*session.Session, error) {
|
||||
func (provider *AlertProvider) createSession() (*session.Session, error) {
|
||||
config := &aws.Config{
|
||||
Region: aws.String(provider.Region),
|
||||
}
|
||||
|
||||
if len(provider.AccessKeyID) > 0 && len(provider.SecretAccessKey) > 0 {
|
||||
config.Credentials = credentials.NewStaticCredentials(provider.AccessKeyID, provider.SecretAccessKey, "")
|
||||
}
|
||||
|
||||
return session.NewSession(config)
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func (provider *AlertProvider) Send(endpoint *core.Endpoint, alert *alert.Alert,
|
|||
var payload pagerDutyResponsePayload
|
||||
if err = json.Unmarshal(body, &payload); err != nil {
|
||||
// Silently fail. We don't want to create tons of alerts just because we failed to parse the body.
|
||||
log.Printf("[pagerduty][Send] Ran into error unmarshaling pagerduty response: %s", err.Error())
|
||||
log.Printf("[pagerduty.Send] Ran into error unmarshaling pagerduty response: %s", err.Error())
|
||||
} else {
|
||||
alert.ResolveKey = payload.DedupKey
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ func ResponseTimeChart(c *fiber.Ctx) error {
|
|||
c.Set("Expires", "0")
|
||||
c.Status(http.StatusOK)
|
||||
if err := graph.Render(chart.SVG, c); err != nil {
|
||||
log.Println("[api][ResponseTimeChart] Failed to render response time chart:", err.Error())
|
||||
log.Println("[api.ResponseTimeChart] Failed to render response time chart:", err.Error())
|
||||
return c.Status(500).SendString(err.Error())
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -2,6 +2,7 @@ package api
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
|
@ -26,19 +27,19 @@ func EndpointStatuses(cfg *config.Config) fiber.Handler {
|
|||
if !exists {
|
||||
endpointStatuses, err := store.Get().GetAllEndpointStatuses(paging.NewEndpointStatusParams().WithResults(page, pageSize))
|
||||
if err != nil {
|
||||
log.Printf("[api][EndpointStatuses] Failed to retrieve endpoint statuses: %s", err.Error())
|
||||
log.Printf("[api.EndpointStatuses] Failed to retrieve endpoint statuses: %s", err.Error())
|
||||
return c.Status(500).SendString(err.Error())
|
||||
}
|
||||
// ALPHA: Retrieve endpoint statuses from remote instances
|
||||
if endpointStatusesFromRemote, err := getEndpointStatusesFromRemoteInstances(cfg.Remote); err != nil {
|
||||
log.Printf("[handler][EndpointStatuses] Silently failed to retrieve endpoint statuses from remote: %s", err.Error())
|
||||
log.Printf("[handler.EndpointStatuses] Silently failed to retrieve endpoint statuses from remote: %s", err.Error())
|
||||
} else if endpointStatusesFromRemote != nil {
|
||||
endpointStatuses = append(endpointStatuses, endpointStatusesFromRemote...)
|
||||
}
|
||||
// Marshal endpoint statuses to JSON
|
||||
data, err = json.Marshal(endpointStatuses)
|
||||
if err != nil {
|
||||
log.Printf("[api][EndpointStatuses] Unable to marshal object to JSON: %s", err.Error())
|
||||
log.Printf("[api.EndpointStatuses] Unable to marshal object to JSON: %s", err.Error())
|
||||
return c.Status(500).SendString("unable to marshal object to JSON")
|
||||
}
|
||||
cache.SetWithTTL(fmt.Sprintf("endpoint-status-%d-%d", page, pageSize), data, cacheTTL)
|
||||
|
@ -64,13 +65,13 @@ func getEndpointStatusesFromRemoteInstances(remoteConfig *remote.Config) ([]*cor
|
|||
body, err := io.ReadAll(response.Body)
|
||||
if err != nil {
|
||||
_ = response.Body.Close()
|
||||
log.Printf("[handler][getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
||||
log.Printf("[handler.getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
||||
continue
|
||||
}
|
||||
var endpointStatuses []*core.EndpointStatus
|
||||
if err = json.Unmarshal(body, &endpointStatuses); err != nil {
|
||||
_ = response.Body.Close()
|
||||
log.Printf("[handler][getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
||||
log.Printf("[handler.getEndpointStatusesFromRemoteInstances] Silently failed to retrieve endpoint statuses from %s: %s", instance.URL, err.Error())
|
||||
continue
|
||||
}
|
||||
_ = response.Body.Close()
|
||||
|
@ -87,19 +88,19 @@ func EndpointStatus(c *fiber.Ctx) error {
|
|||
page, pageSize := extractPageAndPageSizeFromRequest(c)
|
||||
endpointStatus, err := store.Get().GetEndpointStatusByKey(c.Params("key"), paging.NewEndpointStatusParams().WithResults(page, pageSize).WithEvents(1, common.MaximumNumberOfEvents))
|
||||
if err != nil {
|
||||
if err == common.ErrEndpointNotFound {
|
||||
if errors.Is(err, common.ErrEndpointNotFound) {
|
||||
return c.Status(404).SendString(err.Error())
|
||||
}
|
||||
log.Printf("[api][EndpointStatus] Failed to retrieve endpoint status: %s", err.Error())
|
||||
log.Printf("[api.EndpointStatus] Failed to retrieve endpoint status: %s", err.Error())
|
||||
return c.Status(500).SendString(err.Error())
|
||||
}
|
||||
if endpointStatus == nil { // XXX: is this check necessary?
|
||||
log.Printf("[api][EndpointStatus] Endpoint with key=%s not found", c.Params("key"))
|
||||
log.Printf("[api.EndpointStatus] Endpoint with key=%s not found", c.Params("key"))
|
||||
return c.Status(404).SendString("not found")
|
||||
}
|
||||
output, err := json.Marshal(endpointStatus)
|
||||
if err != nil {
|
||||
log.Printf("[api][EndpointStatus] Unable to marshal object to JSON: %s", err.Error())
|
||||
log.Printf("[api.EndpointStatus] Unable to marshal object to JSON: %s", err.Error())
|
||||
return c.Status(500).SendString("unable to marshal object to JSON")
|
||||
}
|
||||
c.Set("Content-Type", "application/json")
|
||||
|
|
|
@ -15,14 +15,14 @@ func SinglePageApplication(ui *ui.Config) fiber.Handler {
|
|||
t, err := template.ParseFS(static.FileSystem, static.IndexPath)
|
||||
if err != nil {
|
||||
// This should never happen, because ui.ValidateAndSetDefaults validates that the template works.
|
||||
log.Println("[api][SinglePageApplication] Failed to parse template. This should never happen, because the template is validated on start. Error:", err.Error())
|
||||
log.Println("[api.SinglePageApplication] Failed to parse template. This should never happen, because the template is validated on start. Error:", err.Error())
|
||||
return c.Status(500).SendString("Failed to parse template. This should never happen, because the template is validated on start.")
|
||||
}
|
||||
c.Set("Content-Type", "text/html")
|
||||
err = t.Execute(c, ui)
|
||||
if err != nil {
|
||||
// This should never happen, because ui.ValidateAndSetDefaults validates that the template works.
|
||||
log.Println("[api][SinglePageApplication] Failed to execute template. This should never happen, because the template is validated on start. Error:", err.Error())
|
||||
log.Println("[api.SinglePageApplication] Failed to execute template. This should never happen, because the template is validated on start. Error:", err.Error())
|
||||
return c.Status(500).SendString("Failed to parse template. This should never happen, because the template is validated on start.")
|
||||
}
|
||||
return c.SendStatus(200)
|
||||
|
|
|
@ -191,7 +191,7 @@ func (c *Config) getHTTPClient() *http.Client {
|
|||
if c.ProxyURL != "" {
|
||||
proxyURL, err := url.Parse(c.ProxyURL)
|
||||
if err != nil {
|
||||
log.Println("[client][getHTTPClient] THIS SHOULD NOT HAPPEN. Silently ignoring custom proxy due to error:", err.Error())
|
||||
log.Println("[client.getHTTPClient] THIS SHOULD NOT HAPPEN. Silently ignoring custom proxy due to error:", err.Error())
|
||||
} else {
|
||||
c.httpClient.Transport.(*http.Transport).Proxy = http.ProxyURL(proxyURL)
|
||||
}
|
||||
|
@ -201,7 +201,7 @@ func (c *Config) getHTTPClient() *http.Client {
|
|||
if err != nil {
|
||||
// We're ignoring the error, because it should have been validated on startup ValidateAndSetDefaults.
|
||||
// It shouldn't happen, but if it does, we'll log it... Better safe than sorry ;)
|
||||
log.Println("[client][getHTTPClient] THIS SHOULD NOT HAPPEN. Silently ignoring invalid DNS resolver due to error:", err.Error())
|
||||
log.Println("[client.getHTTPClient] THIS SHOULD NOT HAPPEN. Silently ignoring invalid DNS resolver due to error:", err.Error())
|
||||
} else {
|
||||
dialer := &net.Dialer{
|
||||
Resolver: &net.Resolver{
|
||||
|
@ -218,7 +218,7 @@ func (c *Config) getHTTPClient() *http.Client {
|
|||
}
|
||||
}
|
||||
if c.HasOAuth2Config() && c.HasIAPConfig() {
|
||||
log.Println("[client][getHTTPClient] Error: Both Identity-Aware-Proxy and Oauth2 configuration are present.")
|
||||
log.Println("[client.getHTTPClient] Error: Both Identity-Aware-Proxy and Oauth2 configuration are present.")
|
||||
} else if c.HasOAuth2Config() {
|
||||
c.httpClient = configureOAuth2(c.httpClient, *c.OAuth2Config)
|
||||
} else if c.HasIAPConfig() {
|
||||
|
@ -233,18 +233,18 @@ func (c *Config) getHTTPClient() *http.Client {
|
|||
func validateIAPToken(ctx context.Context, c IAPConfig) bool {
|
||||
ts, err := idtoken.NewTokenSource(ctx, c.Audience)
|
||||
if err != nil {
|
||||
log.Println("[client][ValidateIAPToken] Claiming Identity token failed. error:", err.Error())
|
||||
log.Println("[client.ValidateIAPToken] Claiming Identity token failed. error:", err.Error())
|
||||
return false
|
||||
}
|
||||
tok, err := ts.Token()
|
||||
if err != nil {
|
||||
log.Println("[client][ValidateIAPToken] Get Identity-Aware-Proxy token failed. error:", err.Error())
|
||||
log.Println("[client.ValidateIAPToken] Get Identity-Aware-Proxy token failed. error:", err.Error())
|
||||
return false
|
||||
}
|
||||
payload, err := idtoken.Validate(ctx, tok.AccessToken, c.Audience)
|
||||
_ = payload
|
||||
if err != nil {
|
||||
log.Println("[client][ValidateIAPToken] Token Validation failed. error:", err.Error())
|
||||
log.Println("[client.ValidateIAPToken] Token Validation failed. error:", err.Error())
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
@ -257,7 +257,7 @@ func configureIAP(httpClient *http.Client, c IAPConfig) *http.Client {
|
|||
if validateIAPToken(ctx, c) {
|
||||
ts, err := idtoken.NewTokenSource(ctx, c.Audience)
|
||||
if err != nil {
|
||||
log.Println("[client][ConfigureIAP] Claiming Token Source failed. error:", err.Error())
|
||||
log.Println("[client.ConfigureIAP] Claiming Token Source failed. error:", err.Error())
|
||||
return httpClient
|
||||
}
|
||||
client := oauth2.NewClient(ctx, ts)
|
||||
|
|
|
@ -161,13 +161,13 @@ func LoadConfiguration(configPath string) (*Config, error) {
|
|||
if fileInfo.IsDir() {
|
||||
err := walkConfigDir(configPath, func(path string, d fs.DirEntry, err error) error {
|
||||
if err != nil {
|
||||
log.Printf("[config][LoadConfiguration] Error walking path=%s: %s", path, err)
|
||||
log.Printf("[config.LoadConfiguration] Error walking path=%s: %s", path, err)
|
||||
return err
|
||||
}
|
||||
log.Printf("[config][LoadConfiguration] Reading configuration from %s", path)
|
||||
log.Printf("[config.LoadConfiguration] Reading configuration from %s", path)
|
||||
data, err := os.ReadFile(path)
|
||||
if err != nil {
|
||||
log.Printf("[config][LoadConfiguration] Error reading configuration from %s: %s", path, err)
|
||||
log.Printf("[config.LoadConfiguration] Error reading configuration from %s: %s", path, err)
|
||||
return fmt.Errorf("error reading configuration from file %s: %w", path, err)
|
||||
}
|
||||
configBytes, err = deepmerge.YAML(configBytes, data)
|
||||
|
@ -177,7 +177,7 @@ func LoadConfiguration(configPath string) (*Config, error) {
|
|||
return nil, fmt.Errorf("error reading configuration from directory %s: %w", usedConfigPath, err)
|
||||
}
|
||||
} else {
|
||||
log.Printf("[config][LoadConfiguration] Reading configuration from configFile=%s", configPath)
|
||||
log.Printf("[config.LoadConfiguration] Reading configuration from configFile=%s", configPath)
|
||||
if data, err := os.ReadFile(usedConfigPath); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
|
@ -326,13 +326,13 @@ func validateWebConfig(config *Config) error {
|
|||
func validateEndpointsConfig(config *Config) error {
|
||||
for _, endpoint := range config.Endpoints {
|
||||
if config.Debug {
|
||||
log.Printf("[config][validateEndpointsConfig] Validating endpoint '%s'", endpoint.Name)
|
||||
log.Printf("[config.validateEndpointsConfig] Validating endpoint '%s'", endpoint.Name)
|
||||
}
|
||||
if err := endpoint.ValidateAndSetDefaults(); err != nil {
|
||||
return fmt.Errorf("invalid endpoint %s: %w", endpoint.DisplayName(), err)
|
||||
}
|
||||
}
|
||||
log.Printf("[config][validateEndpointsConfig] Validated %d endpoints", len(config.Endpoints))
|
||||
log.Printf("[config.validateEndpointsConfig] Validated %d endpoints", len(config.Endpoints))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -340,7 +340,7 @@ func validateSecurityConfig(config *Config) error {
|
|||
if config.Security != nil {
|
||||
if config.Security.IsValid() {
|
||||
if config.Debug {
|
||||
log.Printf("[config][validateSecurityConfig] Basic security configuration has been validated")
|
||||
log.Printf("[config.validateSecurityConfig] Basic security configuration has been validated")
|
||||
}
|
||||
} else {
|
||||
// If there was an attempt to configure security, then it must mean that some confidential or private
|
||||
|
@ -357,7 +357,7 @@ func validateSecurityConfig(config *Config) error {
|
|||
// sets the default alert values when none are set.
|
||||
func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*core.Endpoint, debug bool) {
|
||||
if alertingConfig == nil {
|
||||
log.Printf("[config][validateAlertingConfig] Alerting is not configured")
|
||||
log.Printf("[config.validateAlertingConfig] Alerting is not configured")
|
||||
return
|
||||
}
|
||||
alertTypes := []alert.Type{
|
||||
|
@ -393,7 +393,7 @@ func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*core.E
|
|||
for alertIndex, endpointAlert := range endpoint.Alerts {
|
||||
if alertType == endpointAlert.Type {
|
||||
if debug {
|
||||
log.Printf("[config][validateAlertingConfig] Parsing alert %d with provider's default alert for provider=%s in endpoint=%s", alertIndex, alertType, endpoint.Name)
|
||||
log.Printf("[config.validateAlertingConfig] Parsing alert %d with provider's default alert for provider=%s in endpoint=%s", alertIndex, alertType, endpoint.Name)
|
||||
}
|
||||
provider.ParseWithDefaultAlert(alertProvider.GetDefaultAlert(), endpointAlert)
|
||||
}
|
||||
|
@ -402,7 +402,7 @@ func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*core.E
|
|||
}
|
||||
validProviders = append(validProviders, alertType)
|
||||
} else {
|
||||
log.Printf("[config][validateAlertingConfig] Ignoring provider=%s because configuration is invalid", alertType)
|
||||
log.Printf("[config.validateAlertingConfig] Ignoring provider=%s because configuration is invalid", alertType)
|
||||
invalidProviders = append(invalidProviders, alertType)
|
||||
alertingConfig.SetAlertingProviderToNil(alertProvider)
|
||||
}
|
||||
|
@ -410,5 +410,5 @@ func validateAlertingConfig(alertingConfig *alerting.Config, endpoints []*core.E
|
|||
invalidProviders = append(invalidProviders, alertType)
|
||||
}
|
||||
}
|
||||
log.Printf("[config][validateAlertingConfig] configuredProviders=%s; ignoredProviders=%s", validProviders, invalidProviders)
|
||||
log.Printf("[config.validateAlertingConfig] configuredProviders=%s; ignoredProviders=%s", validProviders, invalidProviders)
|
||||
}
|
||||
|
|
|
@ -25,19 +25,19 @@ func Handle(cfg *config.Config) {
|
|||
if os.Getenv("ROUTER_TEST") == "true" {
|
||||
return
|
||||
}
|
||||
log.Println("[controller][Handle] Listening on " + cfg.Web.SocketAddress())
|
||||
log.Println("[controller.Handle] Listening on " + cfg.Web.SocketAddress())
|
||||
if cfg.Web.HasTLS() {
|
||||
err := app.ListenTLS(cfg.Web.SocketAddress(), cfg.Web.TLS.CertificateFile, cfg.Web.TLS.PrivateKeyFile)
|
||||
if err != nil {
|
||||
log.Fatal("[controller][Handle]", err)
|
||||
log.Fatal("[controller.Handle]", err)
|
||||
}
|
||||
} else {
|
||||
err := app.Listen(cfg.Web.SocketAddress())
|
||||
if err != nil {
|
||||
log.Fatal("[controller][Handle]", err)
|
||||
log.Fatal("[controller.Handle]", err)
|
||||
}
|
||||
}
|
||||
log.Println("[controller][Handle] Server has shut down successfully")
|
||||
log.Println("[controller.Handle] Server has shut down successfully")
|
||||
}
|
||||
|
||||
// Shutdown stops the server
|
||||
|
|
|
@ -150,7 +150,7 @@ func (c Condition) evaluate(result *Result, dontResolveFailedConditions bool) bo
|
|||
return false
|
||||
}
|
||||
if !success {
|
||||
//log.Printf("[Condition][evaluate] Condition '%s' did not succeed because '%s' is false", condition, condition)
|
||||
//log.Printf("[Condition.evaluate] Condition '%s' did not succeed because '%s' is false", condition, condition)
|
||||
}
|
||||
result.ConditionResults = append(result.ConditionResults, &ConditionResult{Condition: conditionToDisplay, Success: success})
|
||||
return success
|
||||
|
|
|
@ -151,7 +151,7 @@ func (s *SSH) ValidateAndSetDefaults() error {
|
|||
}
|
||||
|
||||
// IsEnabled returns whether the endpoint is enabled or not
|
||||
func (endpoint Endpoint) IsEnabled() bool {
|
||||
func (endpoint *Endpoint) IsEnabled() bool {
|
||||
if endpoint.Enabled == nil {
|
||||
return true
|
||||
}
|
||||
|
@ -159,7 +159,7 @@ func (endpoint Endpoint) IsEnabled() bool {
|
|||
}
|
||||
|
||||
// Type returns the endpoint type
|
||||
func (endpoint Endpoint) Type() EndpointType {
|
||||
func (endpoint *Endpoint) Type() EndpointType {
|
||||
switch {
|
||||
case endpoint.DNS != nil:
|
||||
return EndpointTypeDNS
|
||||
|
@ -264,7 +264,7 @@ func (endpoint *Endpoint) ValidateAndSetDefaults() error {
|
|||
}
|
||||
|
||||
// DisplayName returns an identifier made up of the Name and, if not empty, the Group.
|
||||
func (endpoint Endpoint) DisplayName() string {
|
||||
func (endpoint *Endpoint) DisplayName() string {
|
||||
if len(endpoint.Group) > 0 {
|
||||
return endpoint.Group + "/" + endpoint.Name
|
||||
}
|
||||
|
@ -272,7 +272,7 @@ func (endpoint Endpoint) DisplayName() string {
|
|||
}
|
||||
|
||||
// Key returns the unique key for the Endpoint
|
||||
func (endpoint Endpoint) Key() string {
|
||||
func (endpoint *Endpoint) Key() string {
|
||||
return util.ConvertGroupAndEndpointNameToKey(endpoint.Group, endpoint.Name)
|
||||
}
|
||||
|
||||
|
|
|
@ -241,13 +241,13 @@ func TestEndpoint(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestEndpoint_IsEnabled(t *testing.T) {
|
||||
if !(Endpoint{Enabled: nil}).IsEnabled() {
|
||||
if !(&Endpoint{Enabled: nil}).IsEnabled() {
|
||||
t.Error("endpoint.IsEnabled() should've returned true, because Enabled was set to nil")
|
||||
}
|
||||
if value := false; (Endpoint{Enabled: &value}).IsEnabled() {
|
||||
if value := false; (&Endpoint{Enabled: &value}).IsEnabled() {
|
||||
t.Error("endpoint.IsEnabled() should've returned false, because Enabled was set to false")
|
||||
}
|
||||
if value := true; !(Endpoint{Enabled: &value}).IsEnabled() {
|
||||
if value := true; !(&Endpoint{Enabled: &value}).IsEnabled() {
|
||||
t.Error("Endpoint.IsEnabled() should've returned true, because Enabled was set to true")
|
||||
}
|
||||
}
|
||||
|
|
8
main.go
8
main.go
|
@ -85,7 +85,7 @@ func initializeStorage(cfg *config.Config) {
|
|||
}
|
||||
numberOfEndpointStatusesDeleted := store.Get().DeleteAllEndpointStatusesNotInKeys(keys)
|
||||
if numberOfEndpointStatusesDeleted > 0 {
|
||||
log.Printf("[main][initializeStorage] Deleted %d endpoint statuses because their matching endpoints no longer existed", numberOfEndpointStatusesDeleted)
|
||||
log.Printf("[main.initializeStorage] Deleted %d endpoint statuses because their matching endpoints no longer existed", numberOfEndpointStatusesDeleted)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,15 +93,15 @@ func listenToConfigurationFileChanges(cfg *config.Config) {
|
|||
for {
|
||||
time.Sleep(30 * time.Second)
|
||||
if cfg.HasLoadedConfigurationBeenModified() {
|
||||
log.Println("[main][listenToConfigurationFileChanges] Configuration file has been modified")
|
||||
log.Println("[main.listenToConfigurationFileChanges] Configuration file has been modified")
|
||||
stop(cfg)
|
||||
time.Sleep(time.Second) // Wait a bit to make sure everything is done.
|
||||
save()
|
||||
updatedConfig, err := loadConfiguration()
|
||||
if err != nil {
|
||||
if cfg.SkipInvalidConfigUpdate {
|
||||
log.Println("[main][listenToConfigurationFileChanges] Failed to load new configuration:", err.Error())
|
||||
log.Println("[main][listenToConfigurationFileChanges] The configuration file was updated, but it is not valid. The old configuration will continue being used.")
|
||||
log.Println("[main.listenToConfigurationFileChanges] Failed to load new configuration:", err.Error())
|
||||
log.Println("[main.listenToConfigurationFileChanges] The configuration file was updated, but it is not valid. The old configuration will continue being used.")
|
||||
// Update the last file modification time to avoid trying to process the same invalid configuration again
|
||||
cfg.UpdateLastFileModTime()
|
||||
continue
|
||||
|
|
|
@ -99,7 +99,7 @@ func (c *Config) IsAuthenticated(ctx *fiber.Ctx) bool {
|
|||
// TODO: Update g8 to support fasthttp natively? (see g8's fasthttp branch)
|
||||
request, err := adaptor.ConvertRequest(ctx, false)
|
||||
if err != nil {
|
||||
log.Printf("[IsAuthenticated] Unexpected error converting request: %v", err)
|
||||
log.Printf("[security.IsAuthenticated] Unexpected error converting request: %v", err)
|
||||
return false
|
||||
}
|
||||
token := c.gate.ExtractTokenFromRequest(request)
|
||||
|
|
|
@ -124,7 +124,7 @@ func (c *OIDCConfig) callbackHandler(w http.ResponseWriter, r *http.Request) { /
|
|||
return
|
||||
}
|
||||
}
|
||||
log.Printf("[security][callbackHandler] Subject %s is not in the list of allowed subjects", idToken.Subject)
|
||||
log.Printf("[security.callbackHandler] Subject %s is not in the list of allowed subjects", idToken.Subject)
|
||||
http.Redirect(w, r, "/?error=access_denied", http.StatusFound)
|
||||
}
|
||||
|
||||
|
|
|
@ -231,16 +231,16 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
|||
}
|
||||
endpointID, err := s.getEndpointID(tx, endpoint)
|
||||
if err != nil {
|
||||
if err == common.ErrEndpointNotFound {
|
||||
if errors.Is(err, common.ErrEndpointNotFound) {
|
||||
// Endpoint doesn't exist in the database, insert it
|
||||
if endpointID, err = s.insertEndpoint(tx, endpoint); err != nil {
|
||||
_ = tx.Rollback()
|
||||
log.Printf("[sql][Insert] Failed to create endpoint with group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to create endpoint with group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
_ = tx.Rollback()
|
||||
log.Printf("[sql][Insert] Failed to retrieve id of endpoint with group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve id of endpoint with group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -256,7 +256,7 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
|||
numberOfEvents, err := s.getNumberOfEventsByEndpointID(tx, endpointID)
|
||||
if err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql][Insert] Failed to retrieve total number of events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve total number of events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
if numberOfEvents == 0 {
|
||||
// There's no events yet, which means we need to add the EventStart and the first healthy/unhealthy event
|
||||
|
@ -266,18 +266,18 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
|||
})
|
||||
if err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql][Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", core.EventStart, endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", core.EventStart, endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
event := core.NewEventFromResult(result)
|
||||
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql][Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
} else {
|
||||
// Get the success value of the previous result
|
||||
var lastResultSuccess bool
|
||||
if lastResultSuccess, err = s.getLastEndpointResultSuccessValue(tx, endpointID); err != nil {
|
||||
log.Printf("[sql][Insert] Failed to retrieve outcome of previous result for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve outcome of previous result for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
} else {
|
||||
// If we managed to retrieve the outcome of the previous result, we'll compare it with the new result.
|
||||
// If the final outcome (success or failure) of the previous and the new result aren't the same, it means
|
||||
|
@ -287,7 +287,7 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
|||
event := core.NewEventFromResult(result)
|
||||
if err = s.insertEndpointEvent(tx, endpointID, event); err != nil {
|
||||
// Silently fail
|
||||
log.Printf("[sql][Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert event=%s for group=%s; endpoint=%s: %s", event.Type, endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -296,40 +296,40 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
|||
// (since we're only deleting MaximumNumberOfEvents at a time instead of 1)
|
||||
if numberOfEvents > eventsCleanUpThreshold {
|
||||
if err = s.deleteOldEndpointEvents(tx, endpointID); err != nil {
|
||||
log.Printf("[sql][Insert] Failed to delete old events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to delete old events for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Second, we need to insert the result.
|
||||
if err = s.insertEndpointResult(tx, endpointID, result); err != nil {
|
||||
log.Printf("[sql][Insert] Failed to insert result for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to insert result for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
_ = tx.Rollback() // If we can't insert the result, we'll rollback now since there's no point continuing
|
||||
return err
|
||||
}
|
||||
// Clean up old results
|
||||
numberOfResults, err := s.getNumberOfResultsByEndpointID(tx, endpointID)
|
||||
if err != nil {
|
||||
log.Printf("[sql][Insert] Failed to retrieve total number of results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve total number of results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
} else {
|
||||
if numberOfResults > resultsCleanUpThreshold {
|
||||
if err = s.deleteOldEndpointResults(tx, endpointID); err != nil {
|
||||
log.Printf("[sql][Insert] Failed to delete old results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to delete old results for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
// Finally, we need to insert the uptime data.
|
||||
// Because the uptime data significantly outlives the results, we can't rely on the results for determining the uptime
|
||||
if err = s.updateEndpointUptime(tx, endpointID, result); err != nil {
|
||||
log.Printf("[sql][Insert] Failed to update uptime for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to update uptime for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
// Clean up old uptime entries
|
||||
ageOfOldestUptimeEntry, err := s.getAgeOfOldestEndpointUptimeEntry(tx, endpointID)
|
||||
if err != nil {
|
||||
log.Printf("[sql][Insert] Failed to retrieve oldest endpoint uptime entry for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to retrieve oldest endpoint uptime entry for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
} else {
|
||||
if ageOfOldestUptimeEntry > uptimeCleanUpThreshold {
|
||||
if err = s.deleteOldUptimeEntries(tx, endpointID, time.Now().Add(-(uptimeRetention + time.Hour))); err != nil {
|
||||
log.Printf("[sql][Insert] Failed to delete old uptime entries for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
log.Printf("[sql.Insert] Failed to delete old uptime entries for group=%s; endpoint=%s: %s", endpoint.Group, endpoint.Name, err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -339,7 +339,7 @@ func (s *Store) Insert(endpoint *core.Endpoint, result *core.Result) error {
|
|||
s.writeThroughCache.Delete(cacheKey)
|
||||
endpointKey, params, err := extractKeyAndParamsFromCacheKey(cacheKey)
|
||||
if err != nil {
|
||||
log.Printf("[sql][Insert] Silently deleting cache key %s instead of refreshing due to error: %s", cacheKey, err.Error())
|
||||
log.Printf("[sql.Insert] Silently deleting cache key %s instead of refreshing due to error: %s", cacheKey, err.Error())
|
||||
continue
|
||||
}
|
||||
// Retrieve the endpoint status by key, which will in turn refresh the cache
|
||||
|
@ -370,7 +370,7 @@ func (s *Store) DeleteAllEndpointStatusesNotInKeys(keys []string) int {
|
|||
result, err = s.db.Exec(query, args...)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[sql][DeleteAllEndpointStatusesNotInKeys] Failed to delete rows that do not belong to any of keys=%v: %s", keys, err.Error())
|
||||
log.Printf("[sql.DeleteAllEndpointStatusesNotInKeys] Failed to delete rows that do not belong to any of keys=%v: %s", keys, err.Error())
|
||||
return 0
|
||||
}
|
||||
if s.writeThroughCache != nil {
|
||||
|
@ -406,7 +406,7 @@ func (s *Store) Close() {
|
|||
|
||||
// insertEndpoint inserts an endpoint in the store and returns the generated id of said endpoint
|
||||
func (s *Store) insertEndpoint(tx *sql.Tx, endpoint *core.Endpoint) (int64, error) {
|
||||
//log.Printf("[sql][insertEndpoint] Inserting endpoint with group=%s and name=%s", endpoint.Group, endpoint.Name)
|
||||
//log.Printf("[sql.insertEndpoint] Inserting endpoint with group=%s and name=%s", endpoint.Group, endpoint.Name)
|
||||
var id int64
|
||||
err := tx.QueryRow(
|
||||
"INSERT INTO endpoints (endpoint_key, endpoint_name, endpoint_group) VALUES ($1, $2, $3) RETURNING endpoint_id",
|
||||
|
@ -531,12 +531,12 @@ func (s *Store) getEndpointStatusByKey(tx *sql.Tx, key string, parameters *pagin
|
|||
endpointStatus := core.NewEndpointStatus(group, endpointName)
|
||||
if parameters.EventsPageSize > 0 {
|
||||
if endpointStatus.Events, err = s.getEndpointEventsByEndpointID(tx, endpointID, parameters.EventsPage, parameters.EventsPageSize); err != nil {
|
||||
log.Printf("[sql][getEndpointStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
||||
log.Printf("[sql.getEndpointStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
||||
}
|
||||
}
|
||||
if parameters.ResultsPageSize > 0 {
|
||||
if endpointStatus.Results, err = s.getEndpointResultsByEndpointID(tx, endpointID, parameters.ResultsPage, parameters.ResultsPageSize); err != nil {
|
||||
log.Printf("[sql][getEndpointStatusByKey] Failed to retrieve results for key=%s: %s", key, err.Error())
|
||||
log.Printf("[sql.getEndpointStatusByKey] Failed to retrieve results for key=%s: %s", key, err.Error())
|
||||
}
|
||||
}
|
||||
if s.writeThroughCache != nil {
|
||||
|
@ -611,7 +611,7 @@ func (s *Store) getEndpointResultsByEndpointID(tx *sql.Tx, endpointID int64, pag
|
|||
var joinedErrors string
|
||||
err = rows.Scan(&id, &result.Success, &joinedErrors, &result.Connected, &result.HTTPStatus, &result.DNSRCode, &result.CertificateExpiration, &result.DomainExpiration, &result.Hostname, &result.IP, &result.Duration, &result.Timestamp)
|
||||
if err != nil {
|
||||
log.Printf("[sql][getEndpointResultsByEndpointID] Silently failed to retrieve endpoint result for endpointID=%d: %s", endpointID, err.Error())
|
||||
log.Printf("[sql.getEndpointResultsByEndpointID] Silently failed to retrieve endpoint result for endpointID=%d: %s", endpointID, err.Error())
|
||||
err = nil
|
||||
}
|
||||
if len(joinedErrors) != 0 {
|
||||
|
|
|
@ -75,7 +75,7 @@ var (
|
|||
func Get() Store {
|
||||
if !initialized {
|
||||
// This only happens in tests
|
||||
log.Println("[store][Get] Provider requested before it was initialized, automatically initializing")
|
||||
log.Println("[store.Get] Provider requested before it was initialized, automatically initializing")
|
||||
err := Initialize(nil)
|
||||
if err != nil {
|
||||
panic("failed to automatically initialize store: " + err.Error())
|
||||
|
@ -94,11 +94,11 @@ func Initialize(cfg *storage.Config) error {
|
|||
}
|
||||
if cfg == nil {
|
||||
// This only happens in tests
|
||||
log.Println("[store][Initialize] nil storage config passed as parameter. This should only happen in tests. Defaulting to an empty config.")
|
||||
log.Println("[store.Initialize] nil storage config passed as parameter. This should only happen in tests. Defaulting to an empty config.")
|
||||
cfg = &storage.Config{}
|
||||
}
|
||||
if len(cfg.Path) == 0 && cfg.Type != storage.TypePostgres {
|
||||
log.Printf("[store][Initialize] Creating storage provider of type=%s", cfg.Type)
|
||||
log.Printf("[store.Initialize] Creating storage provider of type=%s", cfg.Type)
|
||||
}
|
||||
ctx, cancelFunc = context.WithCancel(context.Background())
|
||||
switch cfg.Type {
|
||||
|
@ -120,13 +120,13 @@ func autoSave(ctx context.Context, store Store, interval time.Duration) {
|
|||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("[store][autoSave] Stopping active job")
|
||||
log.Printf("[store.autoSave] Stopping active job")
|
||||
return
|
||||
case <-time.After(interval):
|
||||
log.Printf("[store][autoSave] Saving")
|
||||
log.Printf("[store.autoSave] Saving")
|
||||
err := store.Save()
|
||||
if err != nil {
|
||||
log.Println("[store][autoSave] Save failed:", err.Error())
|
||||
log.Println("[store.autoSave] Save failed:", err.Error())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,13 +31,13 @@ func handleAlertsToTrigger(endpoint *core.Endpoint, result *core.Result, alertin
|
|||
}
|
||||
if endpointAlert.Triggered {
|
||||
if debug {
|
||||
log.Printf("[watchdog][handleAlertsToTrigger] Alert for endpoint=%s with description='%s' has already been TRIGGERED, skipping", endpoint.Name, endpointAlert.GetDescription())
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Alert for endpoint=%s with description='%s' has already been TRIGGERED, skipping", endpoint.Name, endpointAlert.GetDescription())
|
||||
}
|
||||
continue
|
||||
}
|
||||
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
|
||||
if alertProvider != nil {
|
||||
log.Printf("[watchdog][handleAlertsToTrigger] Sending %s alert because alert for endpoint=%s with description='%s' has been TRIGGERED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Sending %s alert because alert for endpoint=%s with description='%s' has been TRIGGERED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
|
||||
var err error
|
||||
if os.Getenv("MOCK_ALERT_PROVIDER") == "true" {
|
||||
if os.Getenv("MOCK_ALERT_PROVIDER_ERROR") == "true" {
|
||||
|
@ -47,12 +47,12 @@ func handleAlertsToTrigger(endpoint *core.Endpoint, result *core.Result, alertin
|
|||
err = alertProvider.Send(endpoint, endpointAlert, result, false)
|
||||
}
|
||||
if err != nil {
|
||||
log.Printf("[watchdog][handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
|
||||
log.Printf("[watchdog.handleAlertsToTrigger] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
|
||||
} else {
|
||||
endpointAlert.Triggered = true
|
||||
}
|
||||
} else {
|
||||
log.Printf("[watchdog][handleAlertsToResolve] Not sending alert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Not sending alert of type=%s despite being TRIGGERED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -71,13 +71,13 @@ func handleAlertsToResolve(endpoint *core.Endpoint, result *core.Result, alertin
|
|||
}
|
||||
alertProvider := alertingConfig.GetAlertingProviderByAlertType(endpointAlert.Type)
|
||||
if alertProvider != nil {
|
||||
log.Printf("[watchdog][handleAlertsToResolve] Sending %s alert because alert for endpoint=%s with description='%s' has been RESOLVED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Sending %s alert because alert for endpoint=%s with description='%s' has been RESOLVED", endpointAlert.Type, endpoint.Name, endpointAlert.GetDescription())
|
||||
err := alertProvider.Send(endpoint, endpointAlert, result, true)
|
||||
if err != nil {
|
||||
log.Printf("[watchdog][handleAlertsToResolve] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Failed to send an alert for endpoint=%s: %s", endpoint.Name, err.Error())
|
||||
}
|
||||
} else {
|
||||
log.Printf("[watchdog][handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||
log.Printf("[watchdog.handleAlertsToResolve] Not sending alert of type=%s despite being RESOLVED, because the provider wasn't configured properly", endpointAlert.Type)
|
||||
}
|
||||
}
|
||||
endpoint.NumberOfFailuresInARow = 0
|
||||
|
|
|
@ -44,7 +44,7 @@ func monitor(endpoint *core.Endpoint, alertingConfig *alerting.Config, maintenan
|
|||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
log.Printf("[watchdog][monitor] Canceling current execution of group=%s; endpoint=%s", endpoint.Group, endpoint.Name)
|
||||
log.Printf("[watchdog.monitor] Canceling current execution of group=%s; endpoint=%s", endpoint.Group, endpoint.Name)
|
||||
return
|
||||
case <-time.After(endpoint.Interval):
|
||||
execute(endpoint, alertingConfig, maintenanceConfig, connectivityConfig, disableMonitoringLock, enabledMetrics, debug)
|
||||
|
@ -61,11 +61,11 @@ func execute(endpoint *core.Endpoint, alertingConfig *alerting.Config, maintenan
|
|||
}
|
||||
// If there's a connectivity checker configured, check if Gatus has internet connectivity
|
||||
if connectivityConfig != nil && connectivityConfig.Checker != nil && !connectivityConfig.Checker.IsConnected() {
|
||||
log.Println("[watchdog][execute] No connectivity; skipping execution")
|
||||
log.Println("[watchdog.execute] No connectivity; skipping execution")
|
||||
return
|
||||
}
|
||||
if debug {
|
||||
log.Printf("[watchdog][execute] Monitoring group=%s; endpoint=%s", endpoint.Group, endpoint.Name)
|
||||
log.Printf("[watchdog.execute] Monitoring group=%s; endpoint=%s", endpoint.Group, endpoint.Name)
|
||||
}
|
||||
result := endpoint.EvaluateHealth()
|
||||
if enabledMetrics {
|
||||
|
@ -73,25 +73,25 @@ func execute(endpoint *core.Endpoint, alertingConfig *alerting.Config, maintenan
|
|||
}
|
||||
UpdateEndpointStatuses(endpoint, result)
|
||||
if debug && !result.Success {
|
||||
log.Printf("[watchdog][execute] Monitored group=%s; endpoint=%s; success=%v; errors=%d; duration=%s; body=%s", endpoint.Group, endpoint.Name, result.Success, len(result.Errors), result.Duration.Round(time.Millisecond), result.Body)
|
||||
log.Printf("[watchdog.execute] Monitored group=%s; endpoint=%s; success=%v; errors=%d; duration=%s; body=%s", endpoint.Group, endpoint.Name, result.Success, len(result.Errors), result.Duration.Round(time.Millisecond), result.Body)
|
||||
} else {
|
||||
log.Printf("[watchdog][execute] Monitored group=%s; endpoint=%s; success=%v; errors=%d; duration=%s", endpoint.Group, endpoint.Name, result.Success, len(result.Errors), result.Duration.Round(time.Millisecond))
|
||||
log.Printf("[watchdog.execute] Monitored group=%s; endpoint=%s; success=%v; errors=%d; duration=%s", endpoint.Group, endpoint.Name, result.Success, len(result.Errors), result.Duration.Round(time.Millisecond))
|
||||
}
|
||||
if !maintenanceConfig.IsUnderMaintenance() {
|
||||
// TODO: Consider moving this after the monitoring lock is unlocked? I mean, how much noise can a single alerting provider cause...
|
||||
HandleAlerting(endpoint, result, alertingConfig, debug)
|
||||
} else if debug {
|
||||
log.Println("[watchdog][execute] Not handling alerting because currently in the maintenance window")
|
||||
log.Println("[watchdog.execute] Not handling alerting because currently in the maintenance window")
|
||||
}
|
||||
if debug {
|
||||
log.Printf("[watchdog][execute] Waiting for interval=%s before monitoring group=%s endpoint=%s again", endpoint.Interval, endpoint.Group, endpoint.Name)
|
||||
log.Printf("[watchdog.execute] Waiting for interval=%s before monitoring group=%s endpoint=%s again", endpoint.Interval, endpoint.Group, endpoint.Name)
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateEndpointStatuses updates the slice of endpoint statuses
|
||||
func UpdateEndpointStatuses(endpoint *core.Endpoint, result *core.Result) {
|
||||
if err := store.Get().Insert(endpoint, result); err != nil {
|
||||
log.Println("[watchdog][UpdateEndpointStatuses] Failed to insert data in storage:", err.Error())
|
||||
log.Println("[watchdog.UpdateEndpointStatuses] Failed to insert data in storage:", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue