mirror of
https://github.com/TwiN/gatus.git
synced 2024-12-14 11:58:04 +00:00
Implement paging and refactor stores to match new store interface with paging
This commit is contained in:
parent
677c7faffe
commit
7126d36d85
19 changed files with 421 additions and 404 deletions
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
"github.com/TwinProduction/gatus/storage"
|
"github.com/TwinProduction/gatus/storage"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -25,7 +26,7 @@ func badgeHandler(writer http.ResponseWriter, request *http.Request) {
|
||||||
}
|
}
|
||||||
identifier := variables["identifier"]
|
identifier := variables["identifier"]
|
||||||
key := strings.TrimSuffix(identifier, ".svg")
|
key := strings.TrimSuffix(identifier, ".svg")
|
||||||
serviceStatus := storage.Get().GetServiceStatusByKey(key)
|
serviceStatus := storage.Get().GetServiceStatusByKey(key, paging.NewServiceStatusParams().WithUptime())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
writer.WriteHeader(http.StatusNotFound)
|
writer.WriteHeader(http.StatusNotFound)
|
||||||
_, _ = writer.Write([]byte("Requested service not found"))
|
_, _ = writer.Write([]byte("Requested service not found"))
|
||||||
|
|
|
@ -13,8 +13,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/config"
|
"github.com/TwinProduction/gatus/config"
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
"github.com/TwinProduction/gatus/security"
|
"github.com/TwinProduction/gatus/security"
|
||||||
"github.com/TwinProduction/gatus/storage"
|
"github.com/TwinProduction/gatus/storage"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
"github.com/TwinProduction/gocache"
|
"github.com/TwinProduction/gocache"
|
||||||
"github.com/TwinProduction/health"
|
"github.com/TwinProduction/health"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
|
@ -115,7 +117,7 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||||
var err error
|
var err error
|
||||||
buffer := &bytes.Buffer{}
|
buffer := &bytes.Buffer{}
|
||||||
gzipWriter := gzip.NewWriter(buffer)
|
gzipWriter := gzip.NewWriter(buffer)
|
||||||
data, err = json.Marshal(storage.Get().GetAllServiceStatusesWithResultPagination(page, pageSize))
|
data, err = json.Marshal(storage.Get().GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(page, pageSize)))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
|
log.Printf("[controller][serviceStatusesHandler] Unable to marshal object to JSON: %s", err.Error())
|
||||||
writer.WriteHeader(http.StatusInternalServerError)
|
writer.WriteHeader(http.StatusInternalServerError)
|
||||||
|
@ -142,7 +144,7 @@ func serviceStatusesHandler(writer http.ResponseWriter, r *http.Request) {
|
||||||
func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||||
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
page, pageSize := extractPageAndPageSizeFromRequest(r)
|
||||||
vars := mux.Vars(r)
|
vars := mux.Vars(r)
|
||||||
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"])
|
serviceStatus := storage.Get().GetServiceStatusByKey(vars["key"], paging.NewServiceStatusParams().WithResults(page, pageSize).WithEvents(1, core.MaximumNumberOfEvents).WithUptime())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"])
|
log.Printf("[controller][serviceStatusHandler] Service with key=%s not found", vars["key"])
|
||||||
writer.WriteHeader(http.StatusNotFound)
|
writer.WriteHeader(http.StatusNotFound)
|
||||||
|
@ -150,7 +152,7 @@ func serviceStatusHandler(writer http.ResponseWriter, r *http.Request) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
data := map[string]interface{}{
|
data := map[string]interface{}{
|
||||||
"serviceStatus": serviceStatus.WithResultPagination(page, pageSize),
|
"serviceStatus": serviceStatus,
|
||||||
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
|
// The following fields, while present on core.ServiceStatus, are annotated to remain hidden so that we can
|
||||||
// expose only the necessary data on /api/v1/statuses.
|
// expose only the necessary data on /api/v1/statuses.
|
||||||
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
|
// Since the /api/v1/statuses/{key} endpoint does need this data, however, we explicitly expose it here
|
||||||
|
|
|
@ -3,6 +3,8 @@ package controller
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -13,7 +15,7 @@ const (
|
||||||
DefaultPageSize = 20
|
DefaultPageSize = 20
|
||||||
|
|
||||||
// MaximumPageSize is the maximum page size allowed
|
// MaximumPageSize is the maximum page size allowed
|
||||||
MaximumPageSize = 100
|
MaximumPageSize = core.MaximumNumberOfResults
|
||||||
)
|
)
|
||||||
|
|
||||||
func extractPageAndPageSizeFromRequest(r *http.Request) (page int, pageSize int) {
|
func extractPageAndPageSizeFromRequest(r *http.Request) (page int, pageSize int) {
|
||||||
|
|
|
@ -48,58 +48,3 @@ func NewServiceStatus(serviceKey, serviceGroup, serviceName string) *ServiceStat
|
||||||
Uptime: NewUptime(),
|
Uptime: NewUptime(),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// WithResultPagination returns a shallow copy of the ServiceStatus with only the results
|
|
||||||
// within the range defined by the page and pageSize parameters
|
|
||||||
func (ss ServiceStatus) WithResultPagination(page, pageSize int) *ServiceStatus {
|
|
||||||
shallowCopy := ss
|
|
||||||
numberOfResults := len(shallowCopy.Results)
|
|
||||||
start := numberOfResults - (page * pageSize)
|
|
||||||
end := numberOfResults - ((page - 1) * pageSize)
|
|
||||||
if start > numberOfResults {
|
|
||||||
start = -1
|
|
||||||
} else if start < 0 {
|
|
||||||
start = 0
|
|
||||||
}
|
|
||||||
if end > numberOfResults {
|
|
||||||
end = numberOfResults
|
|
||||||
}
|
|
||||||
if start < 0 || end < 0 {
|
|
||||||
shallowCopy.Results = []*Result{}
|
|
||||||
} else {
|
|
||||||
shallowCopy.Results = shallowCopy.Results[start:end]
|
|
||||||
}
|
|
||||||
return &shallowCopy
|
|
||||||
}
|
|
||||||
|
|
||||||
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
|
||||||
// no more than MaximumNumberOfResults results in the Results slice
|
|
||||||
func (ss *ServiceStatus) AddResult(result *Result) {
|
|
||||||
if len(ss.Results) > 0 {
|
|
||||||
// Check if there's any change since the last result
|
|
||||||
// OR there's only 1 event, which only happens when there's a start event
|
|
||||||
if ss.Results[len(ss.Results)-1].Success != result.Success || len(ss.Events) == 1 {
|
|
||||||
event := &Event{Timestamp: result.Timestamp}
|
|
||||||
if result.Success {
|
|
||||||
event.Type = EventHealthy
|
|
||||||
} else {
|
|
||||||
event.Type = EventUnhealthy
|
|
||||||
}
|
|
||||||
ss.Events = append(ss.Events, event)
|
|
||||||
if len(ss.Events) > MaximumNumberOfEvents {
|
|
||||||
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
|
||||||
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
|
||||||
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
|
||||||
ss.Events = ss.Events[len(ss.Events)-MaximumNumberOfEvents:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
ss.Results = append(ss.Results, result)
|
|
||||||
if len(ss.Results) > MaximumNumberOfResults {
|
|
||||||
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
|
||||||
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
|
||||||
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
|
||||||
ss.Results = ss.Results[len(ss.Results)-MaximumNumberOfResults:]
|
|
||||||
}
|
|
||||||
ss.Uptime.ProcessResult(result)
|
|
||||||
}
|
|
||||||
|
|
|
@ -1,92 +0,0 @@
|
||||||
package core
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
var (
|
|
||||||
firstCondition = Condition("[STATUS] == 200")
|
|
||||||
secondCondition = Condition("[RESPONSE_TIME] < 500")
|
|
||||||
thirdCondition = Condition("[CERTIFICATE_EXPIRATION] < 72h")
|
|
||||||
|
|
||||||
timestamp = time.Now()
|
|
||||||
|
|
||||||
testService = Service{
|
|
||||||
Name: "name",
|
|
||||||
Group: "group",
|
|
||||||
URL: "https://example.org/what/ever",
|
|
||||||
Method: "GET",
|
|
||||||
Body: "body",
|
|
||||||
Interval: 30 * time.Second,
|
|
||||||
Conditions: []*Condition{&firstCondition, &secondCondition, &thirdCondition},
|
|
||||||
Alerts: nil,
|
|
||||||
Insecure: false,
|
|
||||||
NumberOfFailuresInARow: 0,
|
|
||||||
NumberOfSuccessesInARow: 0,
|
|
||||||
}
|
|
||||||
testSuccessfulResult = Result{
|
|
||||||
Hostname: "example.org",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
HTTPStatus: 200,
|
|
||||||
body: []byte("body"),
|
|
||||||
Errors: nil,
|
|
||||||
Connected: true,
|
|
||||||
Success: true,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
Duration: 150 * time.Millisecond,
|
|
||||||
CertificateExpiration: 10 * time.Hour,
|
|
||||||
ConditionResults: []*ConditionResult{
|
|
||||||
{
|
|
||||||
Condition: "[STATUS] == 200",
|
|
||||||
Success: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Condition: "[RESPONSE_TIME] < 500",
|
|
||||||
Success: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
|
||||||
Success: true,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
testUnsuccessfulResult = Result{
|
|
||||||
Hostname: "example.org",
|
|
||||||
IP: "127.0.0.1",
|
|
||||||
HTTPStatus: 200,
|
|
||||||
body: []byte("body"),
|
|
||||||
Errors: []string{"error-1", "error-2"},
|
|
||||||
Connected: true,
|
|
||||||
Success: false,
|
|
||||||
Timestamp: timestamp,
|
|
||||||
Duration: 750 * time.Millisecond,
|
|
||||||
CertificateExpiration: 10 * time.Hour,
|
|
||||||
ConditionResults: []*ConditionResult{
|
|
||||||
{
|
|
||||||
Condition: "[STATUS] == 200",
|
|
||||||
Success: true,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Condition: "[RESPONSE_TIME] < 500",
|
|
||||||
Success: false,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
Condition: "[CERTIFICATE_EXPIRATION] < 72h",
|
|
||||||
Success: false,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
func BenchmarkServiceStatus_WithResultPagination(b *testing.B) {
|
|
||||||
service := &testService
|
|
||||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
|
||||||
for i := 0; i < MaximumNumberOfResults; i++ {
|
|
||||||
serviceStatus.AddResult(&testSuccessfulResult)
|
|
||||||
}
|
|
||||||
for n := 0; n < b.N; n++ {
|
|
||||||
serviceStatus.WithResultPagination(1, 20)
|
|
||||||
}
|
|
||||||
b.ReportAllocs()
|
|
||||||
}
|
|
|
@ -2,7 +2,6 @@ package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestNewServiceStatus(t *testing.T) {
|
func TestNewServiceStatus(t *testing.T) {
|
||||||
|
@ -18,49 +17,3 @@ func TestNewServiceStatus(t *testing.T) {
|
||||||
t.Errorf("expected %s, got %s", "group_name", serviceStatus.Key)
|
t.Errorf("expected %s, got %s", "group_name", serviceStatus.Key)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceStatus_AddResult(t *testing.T) {
|
|
||||||
service := &Service{Name: "name", Group: "group"}
|
|
||||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
|
||||||
for i := 0; i < MaximumNumberOfResults+10; i++ {
|
|
||||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
|
||||||
}
|
|
||||||
if len(serviceStatus.Results) != MaximumNumberOfResults {
|
|
||||||
t.Errorf("expected serviceStatus.Results to not exceed a length of %d", MaximumNumberOfResults)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestServiceStatus_WithResultPagination(t *testing.T) {
|
|
||||||
service := &Service{Name: "name", Group: "group"}
|
|
||||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
|
||||||
for i := 0; i < 25; i++ {
|
|
||||||
serviceStatus.AddResult(&Result{Timestamp: time.Now()})
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(1, 1).Results) != 1 {
|
|
||||||
t.Errorf("expected to have 1 result")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(5, 0).Results) != 0 {
|
|
||||||
t.Errorf("expected to have 0 results")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(-1, 20).Results) != 0 {
|
|
||||||
t.Errorf("expected to have 0 result, because the page was invalid")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(1, -1).Results) != 0 {
|
|
||||||
t.Errorf("expected to have 0 result, because the page size was invalid")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(1, 10).Results) != 10 {
|
|
||||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(2, 10).Results) != 10 {
|
|
||||||
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(3, 10).Results) != 5 {
|
|
||||||
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(4, 10).Results) != 0 {
|
|
||||||
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
|
||||||
}
|
|
||||||
if len(serviceStatus.WithResultPagination(1, 50).Results) != 25 {
|
|
||||||
t.Errorf("expected to have 25 results, because there's only 25 results")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
107
core/uptime.go
107
core/uptime.go
|
@ -1,7 +1,6 @@
|
||||||
package core
|
package core
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,109 +43,3 @@ func NewUptime() *Uptime {
|
||||||
HourlyStatistics: make(map[int64]*HourlyUptimeStatistics),
|
HourlyStatistics: make(map[int64]*HourlyUptimeStatistics),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// ProcessResult processes the result by extracting the relevant from the result and recalculating the uptime
|
|
||||||
// if necessary
|
|
||||||
func (uptime *Uptime) ProcessResult(result *Result) {
|
|
||||||
// XXX: Remove this on v3.0.0
|
|
||||||
if len(uptime.SuccessfulExecutionsPerHour) != 0 || len(uptime.TotalExecutionsPerHour) != 0 {
|
|
||||||
uptime.migrateToHourlyStatistics()
|
|
||||||
}
|
|
||||||
if uptime.HourlyStatistics == nil {
|
|
||||||
uptime.HourlyStatistics = make(map[int64]*HourlyUptimeStatistics)
|
|
||||||
}
|
|
||||||
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
|
||||||
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
|
||||||
if hourlyStats == nil {
|
|
||||||
hourlyStats = &HourlyUptimeStatistics{}
|
|
||||||
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
|
||||||
}
|
|
||||||
if result.Success {
|
|
||||||
hourlyStats.SuccessfulExecutions++
|
|
||||||
}
|
|
||||||
hourlyStats.TotalExecutions++
|
|
||||||
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
|
||||||
// Clean up only when we're starting to have too many useless keys
|
|
||||||
// Note that this is only triggered when there are more entries than there should be after
|
|
||||||
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
|
||||||
// This is to prevent re-iterating on every `ProcessResult` as soon as the uptime has been logged for 7 days.
|
|
||||||
if len(uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
|
||||||
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour)).Unix()
|
|
||||||
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
|
||||||
if sevenDaysAgo > hourlyUnixTimestamp {
|
|
||||||
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if result.Success {
|
|
||||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 100%
|
|
||||||
// If they're all 100%, then recalculating the uptime would be useless unless
|
|
||||||
// the result added was a failure (!result.Success)
|
|
||||||
if uptime.LastSevenDays != 1 || uptime.LastTwentyFourHours != 1 || uptime.LastHour != 1 {
|
|
||||||
uptime.recalculate()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 0%
|
|
||||||
// If they're all 0%, then recalculating the uptime would be useless unless
|
|
||||||
// the result added was a success (result.Success)
|
|
||||||
if uptime.LastSevenDays != 0 || uptime.LastTwentyFourHours != 0 || uptime.LastHour != 0 {
|
|
||||||
uptime.recalculate()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (uptime *Uptime) recalculate() {
|
|
||||||
uptimeBrackets := make(map[string]uint64)
|
|
||||||
now := time.Now()
|
|
||||||
// The oldest uptime bracket starts 7 days ago, so we'll start from there
|
|
||||||
timestamp := now.Add(-sevenDays)
|
|
||||||
for now.Sub(timestamp) >= 0 {
|
|
||||||
hourlyUnixTimestamp := timestamp.Truncate(time.Hour).Unix()
|
|
||||||
hourlyStats := uptime.HourlyStatistics[hourlyUnixTimestamp]
|
|
||||||
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
|
||||||
timestamp = timestamp.Add(time.Hour)
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
uptimeBrackets["7d_success"] += hourlyStats.SuccessfulExecutions
|
|
||||||
uptimeBrackets["7d_total"] += hourlyStats.TotalExecutions
|
|
||||||
if now.Sub(timestamp) <= 24*time.Hour {
|
|
||||||
uptimeBrackets["24h_success"] += hourlyStats.SuccessfulExecutions
|
|
||||||
uptimeBrackets["24h_total"] += hourlyStats.TotalExecutions
|
|
||||||
}
|
|
||||||
if now.Sub(timestamp) <= time.Hour {
|
|
||||||
uptimeBrackets["1h_success"] += hourlyStats.SuccessfulExecutions
|
|
||||||
uptimeBrackets["1h_total"] += hourlyStats.TotalExecutions
|
|
||||||
}
|
|
||||||
timestamp = timestamp.Add(time.Hour)
|
|
||||||
}
|
|
||||||
if uptimeBrackets["7d_total"] > 0 {
|
|
||||||
uptime.LastSevenDays = float64(uptimeBrackets["7d_success"]) / float64(uptimeBrackets["7d_total"])
|
|
||||||
}
|
|
||||||
if uptimeBrackets["24h_total"] > 0 {
|
|
||||||
uptime.LastTwentyFourHours = float64(uptimeBrackets["24h_success"]) / float64(uptimeBrackets["24h_total"])
|
|
||||||
}
|
|
||||||
if uptimeBrackets["1h_total"] > 0 {
|
|
||||||
uptime.LastHour = float64(uptimeBrackets["1h_success"]) / float64(uptimeBrackets["1h_total"])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// XXX: Remove this on v3.0.0
|
|
||||||
// Deprecated
|
|
||||||
func (uptime *Uptime) migrateToHourlyStatistics() {
|
|
||||||
log.Println("[migrateToHourlyStatistics] Got", len(uptime.SuccessfulExecutionsPerHour), "entries for successful executions and", len(uptime.TotalExecutionsPerHour), "entries for total executions")
|
|
||||||
uptime.HourlyStatistics = make(map[int64]*HourlyUptimeStatistics)
|
|
||||||
for hourlyUnixTimestamp, totalExecutions := range uptime.TotalExecutionsPerHour {
|
|
||||||
if totalExecutions == 0 {
|
|
||||||
log.Println("[migrateToHourlyStatistics] Skipping entry at", hourlyUnixTimestamp, "because total number of executions is 0")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
uptime.HourlyStatistics[hourlyUnixTimestamp] = &HourlyUptimeStatistics{
|
|
||||||
TotalExecutions: totalExecutions,
|
|
||||||
SuccessfulExecutions: uptime.SuccessfulExecutionsPerHour[hourlyUnixTimestamp],
|
|
||||||
TotalExecutionsResponseTime: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Println("[migrateToHourlyStatistics] Migrated", len(uptime.HourlyStatistics), "entries")
|
|
||||||
uptime.SuccessfulExecutionsPerHour = nil
|
|
||||||
uptime.TotalExecutionsPerHour = nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
"github.com/TwinProduction/gatus/util"
|
"github.com/TwinProduction/gatus/util"
|
||||||
_ "modernc.org/sqlite"
|
_ "modernc.org/sqlite"
|
||||||
)
|
)
|
||||||
|
@ -140,11 +141,9 @@ func (s *Store) createSchema() error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: add parameter event and uptime & only fetch them if necessary
|
// GetAllServiceStatuses returns all monitored core.ServiceStatus
|
||||||
|
|
||||||
// GetAllServiceStatusesWithResultPagination returns all monitored core.ServiceStatus
|
|
||||||
// with a subset of core.Result defined by the page and pageSize parameters
|
// with a subset of core.Result defined by the page and pageSize parameters
|
||||||
func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus {
|
func (s *Store) GetAllServiceStatuses(params *paging.ServiceStatusParams) map[string]*core.ServiceStatus {
|
||||||
tx, err := s.db.Begin()
|
tx, err := s.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -156,7 +155,7 @@ func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) ma
|
||||||
}
|
}
|
||||||
serviceStatuses := make(map[string]*core.ServiceStatus, len(keys))
|
serviceStatuses := make(map[string]*core.ServiceStatus, len(keys))
|
||||||
for _, key := range keys {
|
for _, key := range keys {
|
||||||
serviceStatus, err := s.getServiceStatusByKey(tx, key, 0, 0, page, pageSize, false)
|
serviceStatus, err := s.getServiceStatusByKey(tx, key, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -169,17 +168,17 @@ func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) ma
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServiceStatus returns the service status for a given service name in the given group
|
// GetServiceStatus returns the service status for a given service name in the given group
|
||||||
func (s *Store) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
func (s *Store) GetServiceStatus(groupName, serviceName string, parameters *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName))
|
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName), parameters)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServiceStatusByKey returns the service status for a given key
|
// GetServiceStatusByKey returns the service status for a given key
|
||||||
func (s *Store) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
func (s *Store) GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||||
tx, err := s.db.Begin()
|
tx, err := s.db.Begin()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
serviceStatus, err := s.getServiceStatusByKey(tx, key, 1, core.MaximumNumberOfEvents, 1, core.MaximumNumberOfResults, true)
|
serviceStatus, err := s.getServiceStatusByKey(tx, key, params)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_ = tx.Rollback()
|
_ = tx.Rollback()
|
||||||
return nil
|
return nil
|
||||||
|
@ -439,23 +438,23 @@ func (s *Store) getAllServiceKeys(tx *sql.Tx) (keys []string, err error) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Store) getServiceStatusByKey(tx *sql.Tx, key string, eventsPage, eventsPageSize, resultsPage, resultsPageSize int, includeUptime bool) (*core.ServiceStatus, error) {
|
func (s *Store) getServiceStatusByKey(tx *sql.Tx, key string, parameters *paging.ServiceStatusParams) (*core.ServiceStatus, error) {
|
||||||
serviceID, serviceName, serviceGroup, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
serviceID, serviceName, serviceGroup, err := s.getServiceIDGroupAndNameByKey(tx, key)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
serviceStatus := core.NewServiceStatus(key, serviceGroup, serviceName)
|
serviceStatus := core.NewServiceStatus(key, serviceGroup, serviceName)
|
||||||
if eventsPageSize > 0 {
|
if parameters.EventsPageSize > 0 {
|
||||||
if serviceStatus.Events, err = s.getEventsByServiceID(tx, serviceID, eventsPage, eventsPageSize); err != nil {
|
if serviceStatus.Events, err = s.getEventsByServiceID(tx, serviceID, parameters.EventsPage, parameters.EventsPageSize); err != nil {
|
||||||
log.Printf("[database][getServiceStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
log.Printf("[database][getServiceStatusByKey] Failed to retrieve events for key=%s: %s", key, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if resultsPageSize > 0 {
|
if parameters.ResultsPageSize > 0 {
|
||||||
if serviceStatus.Results, err = s.getResultsByServiceID(tx, serviceID, resultsPage, resultsPageSize); err != nil {
|
if serviceStatus.Results, err = s.getResultsByServiceID(tx, serviceID, parameters.ResultsPage, parameters.ResultsPageSize); err != nil {
|
||||||
log.Printf("[database][getServiceStatusByKey] Failed to retrieve results for key=%s: %s", key, err.Error())
|
log.Printf("[database][getServiceStatusByKey] Failed to retrieve results for key=%s: %s", key, err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if includeUptime {
|
if parameters.IncludeUptime {
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
serviceStatus.Uptime.LastHour, _, _ = s.getServiceUptime(tx, serviceID, now.Add(-time.Hour), now)
|
serviceStatus.Uptime.LastHour, _, _ = s.getServiceUptime(tx, serviceID, now.Add(-time.Hour), now)
|
||||||
serviceStatus.Uptime.LastTwentyFourHours, _, _ = s.getServiceUptime(tx, serviceID, now.Add(-24*time.Hour), now)
|
serviceStatus.Uptime.LastTwentyFourHours, _, _ = s.getServiceUptime(tx, serviceID, now.Add(-24*time.Hour), now)
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -101,7 +102,7 @@ func TestStore_Insert(t *testing.T) {
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
key := fmt.Sprintf("%s_%s", testService.Group, testService.Name)
|
||||||
serviceStatus := store.GetServiceStatusByKey(key)
|
serviceStatus := store.GetServiceStatusByKey(key, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("Store should've had key '%s', but didn't", key)
|
t.Fatalf("Store should've had key '%s', but didn't", key)
|
||||||
}
|
}
|
||||||
|
@ -109,7 +110,7 @@ func TestStore_Insert(t *testing.T) {
|
||||||
t.Fatalf("Service '%s' should've had 2 results, but actually returned %d", serviceStatus.Name, len(serviceStatus.Results))
|
t.Fatalf("Service '%s' should've had 2 results, but actually returned %d", serviceStatus.Name, len(serviceStatus.Results))
|
||||||
}
|
}
|
||||||
for i, r := range serviceStatus.Results {
|
for i, r := range serviceStatus.Results {
|
||||||
expectedResult := store.GetServiceStatus(testService.Group, testService.Name).Results[i]
|
expectedResult := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime()).Results[i]
|
||||||
if r.HTTPStatus != expectedResult.HTTPStatus {
|
if r.HTTPStatus != expectedResult.HTTPStatus {
|
||||||
t.Errorf("Result at index %d should've had a HTTPStatus of %d, but was actually %d", i, expectedResult.HTTPStatus, r.HTTPStatus)
|
t.Errorf("Result at index %d should've had a HTTPStatus of %d, but was actually %d", i, expectedResult.HTTPStatus, r.HTTPStatus)
|
||||||
}
|
}
|
||||||
|
@ -152,7 +153,7 @@ func TestStore_GetServiceStatus(t *testing.T) {
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name)
|
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||||
}
|
}
|
||||||
|
@ -175,15 +176,15 @@ func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||||
defer store.db.Close()
|
defer store.db.Close()
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname", paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
|
||||||
if serviceStatus != nil {
|
if serviceStatus != nil {
|
||||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
|
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
|
||||||
}
|
}
|
||||||
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname")
|
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname", paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
|
||||||
if serviceStatus != nil {
|
if serviceStatus != nil {
|
||||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
|
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
|
||||||
}
|
}
|
||||||
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name)
|
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name, paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
|
||||||
if serviceStatus != nil {
|
if serviceStatus != nil {
|
||||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
|
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
|
||||||
}
|
}
|
||||||
|
@ -195,7 +196,7 @@ func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatusByKey(testService.Key())
|
serviceStatus := store.GetServiceStatusByKey(testService.Key(), paging.NewServiceStatusParams().WithEvents(1, core.MaximumNumberOfEvents).WithResults(1, core.MaximumNumberOfResults).WithUptime())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||||
}
|
}
|
||||||
|
@ -213,8 +214,8 @@ func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
func TestStore_GetAllServiceStatuses(t *testing.T) {
|
||||||
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetAllServiceStatusesWithResultPagination.db")
|
store, _ := NewStore("sqlite", t.TempDir()+"/TestStore_GetAllServiceStatuses.db")
|
||||||
defer store.db.Close()
|
defer store.db.Close()
|
||||||
firstResult := &testSuccessfulResult
|
firstResult := &testSuccessfulResult
|
||||||
secondResult := &testUnsuccessfulResult
|
secondResult := &testUnsuccessfulResult
|
||||||
|
@ -223,7 +224,7 @@ func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
||||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||||
firstResult.Timestamp = time.Time{}
|
firstResult.Timestamp = time.Time{}
|
||||||
secondResult.Timestamp = time.Time{}
|
secondResult.Timestamp = time.Time{}
|
||||||
serviceStatuses := store.GetAllServiceStatusesWithResultPagination(1, 20)
|
serviceStatuses := store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20))
|
||||||
if len(serviceStatuses) != 1 {
|
if len(serviceStatuses) != 1 {
|
||||||
t.Fatal("expected 1 service status")
|
t.Fatal("expected 1 service status")
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,6 +6,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
"github.com/TwinProduction/gatus/util"
|
"github.com/TwinProduction/gatus/util"
|
||||||
"github.com/TwinProduction/gocache"
|
"github.com/TwinProduction/gocache"
|
||||||
)
|
)
|
||||||
|
@ -39,24 +40,24 @@ func NewStore(file string) (*Store, error) {
|
||||||
return store, nil
|
return store, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetAllServiceStatusesWithResultPagination returns all monitored core.ServiceStatus
|
// GetAllServiceStatuses returns all monitored core.ServiceStatus
|
||||||
// with a subset of core.Result defined by the page and pageSize parameters
|
// with a subset of core.Result defined by the page and pageSize parameters
|
||||||
func (s *Store) GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus {
|
func (s *Store) GetAllServiceStatuses(params *paging.ServiceStatusParams) map[string]*core.ServiceStatus {
|
||||||
serviceStatuses := s.cache.GetAll()
|
serviceStatuses := s.cache.GetAll()
|
||||||
pagedServiceStatuses := make(map[string]*core.ServiceStatus, len(serviceStatuses))
|
pagedServiceStatuses := make(map[string]*core.ServiceStatus, len(serviceStatuses))
|
||||||
for k, v := range serviceStatuses {
|
for k, v := range serviceStatuses {
|
||||||
pagedServiceStatuses[k] = v.(*core.ServiceStatus).WithResultPagination(page, pageSize)
|
pagedServiceStatuses[k] = ShallowCopyServiceStatus(v.(*core.ServiceStatus), params)
|
||||||
}
|
}
|
||||||
return pagedServiceStatuses
|
return pagedServiceStatuses
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServiceStatus returns the service status for a given service name in the given group
|
// GetServiceStatus returns the service status for a given service name in the given group
|
||||||
func (s *Store) GetServiceStatus(groupName, serviceName string) *core.ServiceStatus {
|
func (s *Store) GetServiceStatus(groupName, serviceName string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||||
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName))
|
return s.GetServiceStatusByKey(util.ConvertGroupAndServiceToKey(groupName, serviceName), params)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetServiceStatusByKey returns the service status for a given key
|
// GetServiceStatusByKey returns the service status for a given key
|
||||||
func (s *Store) GetServiceStatusByKey(key string) *core.ServiceStatus {
|
func (s *Store) GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||||
serviceStatus := s.cache.GetValue(key)
|
serviceStatus := s.cache.GetValue(key)
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
return nil
|
return nil
|
||||||
|
@ -76,7 +77,7 @@ func (s *Store) Insert(service *core.Service, result *core.Result) {
|
||||||
Timestamp: time.Now(),
|
Timestamp: time.Now(),
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
serviceStatus.(*core.ServiceStatus).AddResult(result)
|
AddResult(serviceStatus.(*core.ServiceStatus), result)
|
||||||
s.cache.Set(key, serviceStatus)
|
s.cache.Set(key, serviceStatus)
|
||||||
s.Unlock()
|
s.Unlock()
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,6 +5,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -88,7 +89,7 @@ func TestStore_Insert(t *testing.T) {
|
||||||
t.Fatalf("expected 1 ServiceStatus, got %d", store.cache.Count())
|
t.Fatalf("expected 1 ServiceStatus, got %d", store.cache.Count())
|
||||||
}
|
}
|
||||||
key := testService.Key()
|
key := testService.Key()
|
||||||
serviceStatus := store.GetServiceStatusByKey(key)
|
serviceStatus := store.GetServiceStatusByKey(key, paging.NewServiceStatusParams())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("Store should've had key '%s', but didn't", key)
|
t.Fatalf("Store should've had key '%s', but didn't", key)
|
||||||
}
|
}
|
||||||
|
@ -96,7 +97,7 @@ func TestStore_Insert(t *testing.T) {
|
||||||
t.Fatalf("Service '%s' should've had 2 results, but actually returned %d", serviceStatus.Name, len(serviceStatus.Results))
|
t.Fatalf("Service '%s' should've had 2 results, but actually returned %d", serviceStatus.Name, len(serviceStatus.Results))
|
||||||
}
|
}
|
||||||
for i, r := range serviceStatus.Results {
|
for i, r := range serviceStatus.Results {
|
||||||
expectedResult := store.GetServiceStatus(testService.Group, testService.Name).Results[i]
|
expectedResult := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams().WithResults(1, 20)).Results[i]
|
||||||
if r.HTTPStatus != expectedResult.HTTPStatus {
|
if r.HTTPStatus != expectedResult.HTTPStatus {
|
||||||
t.Errorf("Result at index %d should've had a HTTPStatus of %d, but was actually %d", i, expectedResult.HTTPStatus, r.HTTPStatus)
|
t.Errorf("Result at index %d should've had a HTTPStatus of %d, but was actually %d", i, expectedResult.HTTPStatus, r.HTTPStatus)
|
||||||
}
|
}
|
||||||
|
@ -138,7 +139,7 @@ func TestStore_GetServiceStatus(t *testing.T) {
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name)
|
serviceStatus := store.GetServiceStatus(testService.Group, testService.Name, paging.NewServiceStatusParams())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||||
}
|
}
|
||||||
|
@ -160,15 +161,15 @@ func TestStore_GetServiceStatusForMissingStatusReturnsNil(t *testing.T) {
|
||||||
store, _ := NewStore("")
|
store, _ := NewStore("")
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname")
|
serviceStatus := store.GetServiceStatus("nonexistantgroup", "nonexistantname", paging.NewServiceStatusParams())
|
||||||
if serviceStatus != nil {
|
if serviceStatus != nil {
|
||||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
|
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, testService.Name)
|
||||||
}
|
}
|
||||||
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname")
|
serviceStatus = store.GetServiceStatus(testService.Group, "nonexistantname", paging.NewServiceStatusParams())
|
||||||
if serviceStatus != nil {
|
if serviceStatus != nil {
|
||||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
|
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", testService.Group, "nonexistantname")
|
||||||
}
|
}
|
||||||
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name)
|
serviceStatus = store.GetServiceStatus("nonexistantgroup", testService.Name, paging.NewServiceStatusParams())
|
||||||
if serviceStatus != nil {
|
if serviceStatus != nil {
|
||||||
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
|
t.Errorf("Returned service status for group '%s' and name '%s' not nil after inserting the service into the store", "nonexistantgroup", testService.Name)
|
||||||
}
|
}
|
||||||
|
@ -179,7 +180,7 @@ func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||||
store.Insert(&testService, &testSuccessfulResult)
|
store.Insert(&testService, &testSuccessfulResult)
|
||||||
store.Insert(&testService, &testUnsuccessfulResult)
|
store.Insert(&testService, &testUnsuccessfulResult)
|
||||||
|
|
||||||
serviceStatus := store.GetServiceStatusByKey(testService.Key())
|
serviceStatus := store.GetServiceStatusByKey(testService.Key(), paging.NewServiceStatusParams())
|
||||||
if serviceStatus == nil {
|
if serviceStatus == nil {
|
||||||
t.Fatalf("serviceStatus shouldn't have been nil")
|
t.Fatalf("serviceStatus shouldn't have been nil")
|
||||||
}
|
}
|
||||||
|
@ -197,7 +198,7 @@ func TestStore_GetServiceStatusByKey(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
func TestStore_GetAllServiceStatusesWithResults(t *testing.T) {
|
||||||
store, _ := NewStore("")
|
store, _ := NewStore("")
|
||||||
firstResult := &testSuccessfulResult
|
firstResult := &testSuccessfulResult
|
||||||
secondResult := &testUnsuccessfulResult
|
secondResult := &testUnsuccessfulResult
|
||||||
|
@ -206,7 +207,32 @@ func TestStore_GetAllServiceStatusesWithResultPagination(t *testing.T) {
|
||||||
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||||
firstResult.Timestamp = time.Time{}
|
firstResult.Timestamp = time.Time{}
|
||||||
secondResult.Timestamp = time.Time{}
|
secondResult.Timestamp = time.Time{}
|
||||||
serviceStatuses := store.GetAllServiceStatusesWithResultPagination(1, 20)
|
serviceStatuses := store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20))
|
||||||
|
if len(serviceStatuses) != 1 {
|
||||||
|
t.Fatal("expected 1 service status")
|
||||||
|
}
|
||||||
|
actual, exists := serviceStatuses[testService.Key()]
|
||||||
|
if !exists {
|
||||||
|
t.Fatal("expected service status to exist")
|
||||||
|
}
|
||||||
|
if len(actual.Results) != 2 {
|
||||||
|
t.Error("expected 2 results, got", len(actual.Results))
|
||||||
|
}
|
||||||
|
if len(actual.Events) != 0 {
|
||||||
|
t.Error("expected 0 events, got", len(actual.Events))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestStore_GetAllServiceStatusesWithResultsAndEvents(t *testing.T) {
|
||||||
|
store, _ := NewStore("")
|
||||||
|
firstResult := &testSuccessfulResult
|
||||||
|
secondResult := &testUnsuccessfulResult
|
||||||
|
store.Insert(&testService, firstResult)
|
||||||
|
store.Insert(&testService, secondResult)
|
||||||
|
// Can't be bothered dealing with timezone issues on the worker that runs the automated tests
|
||||||
|
firstResult.Timestamp = time.Time{}
|
||||||
|
secondResult.Timestamp = time.Time{}
|
||||||
|
serviceStatuses := store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20).WithEvents(1, 50))
|
||||||
if len(serviceStatuses) != 1 {
|
if len(serviceStatuses) != 1 {
|
||||||
t.Fatal("expected 1 service status")
|
t.Fatal("expected 1 service status")
|
||||||
}
|
}
|
||||||
|
@ -232,20 +258,20 @@ func TestStore_DeleteAllServiceStatusesNotInKeys(t *testing.T) {
|
||||||
if store.cache.Count() != 2 {
|
if store.cache.Count() != 2 {
|
||||||
t.Errorf("expected cache to have 2 keys, got %d", store.cache.Count())
|
t.Errorf("expected cache to have 2 keys, got %d", store.cache.Count())
|
||||||
}
|
}
|
||||||
if store.GetServiceStatusByKey(firstService.Key()) == nil {
|
if store.GetServiceStatusByKey(firstService.Key(), paging.NewServiceStatusParams()) == nil {
|
||||||
t.Fatal("firstService should exist")
|
t.Fatal("firstService should exist")
|
||||||
}
|
}
|
||||||
if store.GetServiceStatusByKey(secondService.Key()) == nil {
|
if store.GetServiceStatusByKey(secondService.Key(), paging.NewServiceStatusParams()) == nil {
|
||||||
t.Fatal("secondService should exist")
|
t.Fatal("secondService should exist")
|
||||||
}
|
}
|
||||||
store.DeleteAllServiceStatusesNotInKeys([]string{firstService.Key()})
|
store.DeleteAllServiceStatusesNotInKeys([]string{firstService.Key()})
|
||||||
if store.cache.Count() != 1 {
|
if store.cache.Count() != 1 {
|
||||||
t.Fatalf("expected cache to have 1 keys, got %d", store.cache.Count())
|
t.Fatalf("expected cache to have 1 keys, got %d", store.cache.Count())
|
||||||
}
|
}
|
||||||
if store.GetServiceStatusByKey(firstService.Key()) == nil {
|
if store.GetServiceStatusByKey(firstService.Key(), paging.NewServiceStatusParams()) == nil {
|
||||||
t.Error("secondService should've been deleted")
|
t.Error("secondService should've been deleted")
|
||||||
}
|
}
|
||||||
if store.GetServiceStatusByKey(secondService.Key()) != nil {
|
if store.GetServiceStatusByKey(secondService.Key(), paging.NewServiceStatusParams()) != nil {
|
||||||
t.Error("firstService should still exist")
|
t.Error("firstService should still exist")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
119
storage/store/memory/uptime.go
Normal file
119
storage/store/memory/uptime.go
Normal file
|
@ -0,0 +1,119 @@
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
numberOfHoursInTenDays = 10 * 24
|
||||||
|
sevenDays = 7 * 24 * time.Hour
|
||||||
|
)
|
||||||
|
|
||||||
|
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||||
|
// if necessary
|
||||||
|
func processUptimeAfterResult(uptime *core.Uptime, result *core.Result) {
|
||||||
|
// XXX: Remove this on v3.0.0
|
||||||
|
if len(uptime.SuccessfulExecutionsPerHour) != 0 || len(uptime.TotalExecutionsPerHour) != 0 {
|
||||||
|
migrateUptimeToHourlyStatistics(uptime)
|
||||||
|
}
|
||||||
|
if uptime.HourlyStatistics == nil {
|
||||||
|
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
||||||
|
}
|
||||||
|
unixTimestampFlooredAtHour := result.Timestamp.Truncate(time.Hour).Unix()
|
||||||
|
hourlyStats, _ := uptime.HourlyStatistics[unixTimestampFlooredAtHour]
|
||||||
|
if hourlyStats == nil {
|
||||||
|
hourlyStats = &core.HourlyUptimeStatistics{}
|
||||||
|
uptime.HourlyStatistics[unixTimestampFlooredAtHour] = hourlyStats
|
||||||
|
}
|
||||||
|
if result.Success {
|
||||||
|
hourlyStats.SuccessfulExecutions++
|
||||||
|
}
|
||||||
|
hourlyStats.TotalExecutions++
|
||||||
|
hourlyStats.TotalExecutionsResponseTime += uint64(result.Duration.Milliseconds())
|
||||||
|
// Clean up only when we're starting to have too many useless keys
|
||||||
|
// Note that this is only triggered when there are more entries than there should be after
|
||||||
|
// 10 days, despite the fact that we are deleting everything that's older than 7 days.
|
||||||
|
// This is to prevent re-iterating on every `processUptimeAfterResult` as soon as the uptime has been logged for 7 days.
|
||||||
|
if len(uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||||
|
sevenDaysAgo := time.Now().Add(-(sevenDays + time.Hour)).Unix()
|
||||||
|
for hourlyUnixTimestamp := range uptime.HourlyStatistics {
|
||||||
|
if sevenDaysAgo > hourlyUnixTimestamp {
|
||||||
|
delete(uptime.HourlyStatistics, hourlyUnixTimestamp)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if result.Success {
|
||||||
|
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 100%
|
||||||
|
// If they're all 100%, then recalculating the uptime would be useless unless
|
||||||
|
// the result added was a failure (!result.Success)
|
||||||
|
if uptime.LastSevenDays != 1 || uptime.LastTwentyFourHours != 1 || uptime.LastHour != 1 {
|
||||||
|
recalculateUptime(uptime)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Recalculate uptime if at least one of the 1h, 24h or 7d uptime are not 0%
|
||||||
|
// If they're all 0%, then recalculating the uptime would be useless unless
|
||||||
|
// the result added was a success (result.Success)
|
||||||
|
if uptime.LastSevenDays != 0 || uptime.LastTwentyFourHours != 0 || uptime.LastHour != 0 {
|
||||||
|
recalculateUptime(uptime)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func recalculateUptime(uptime *core.Uptime) {
|
||||||
|
uptimeBrackets := make(map[string]uint64)
|
||||||
|
now := time.Now()
|
||||||
|
// The oldest uptime bracket starts 7 days ago, so we'll start from there
|
||||||
|
timestamp := now.Add(-sevenDays)
|
||||||
|
for now.Sub(timestamp) >= 0 {
|
||||||
|
hourlyUnixTimestamp := timestamp.Truncate(time.Hour).Unix()
|
||||||
|
hourlyStats := uptime.HourlyStatistics[hourlyUnixTimestamp]
|
||||||
|
if hourlyStats == nil || hourlyStats.TotalExecutions == 0 {
|
||||||
|
timestamp = timestamp.Add(time.Hour)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uptimeBrackets["7d_success"] += hourlyStats.SuccessfulExecutions
|
||||||
|
uptimeBrackets["7d_total"] += hourlyStats.TotalExecutions
|
||||||
|
if now.Sub(timestamp) <= 24*time.Hour {
|
||||||
|
uptimeBrackets["24h_success"] += hourlyStats.SuccessfulExecutions
|
||||||
|
uptimeBrackets["24h_total"] += hourlyStats.TotalExecutions
|
||||||
|
}
|
||||||
|
if now.Sub(timestamp) <= time.Hour {
|
||||||
|
uptimeBrackets["1h_success"] += hourlyStats.SuccessfulExecutions
|
||||||
|
uptimeBrackets["1h_total"] += hourlyStats.TotalExecutions
|
||||||
|
}
|
||||||
|
timestamp = timestamp.Add(time.Hour)
|
||||||
|
}
|
||||||
|
if uptimeBrackets["7d_total"] > 0 {
|
||||||
|
uptime.LastSevenDays = float64(uptimeBrackets["7d_success"]) / float64(uptimeBrackets["7d_total"])
|
||||||
|
}
|
||||||
|
if uptimeBrackets["24h_total"] > 0 {
|
||||||
|
uptime.LastTwentyFourHours = float64(uptimeBrackets["24h_success"]) / float64(uptimeBrackets["24h_total"])
|
||||||
|
}
|
||||||
|
if uptimeBrackets["1h_total"] > 0 {
|
||||||
|
uptime.LastHour = float64(uptimeBrackets["1h_success"]) / float64(uptimeBrackets["1h_total"])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// XXX: Remove this on v3.0.0
|
||||||
|
// Deprecated
|
||||||
|
func migrateUptimeToHourlyStatistics(uptime *core.Uptime) {
|
||||||
|
log.Println("[migrateUptimeToHourlyStatistics] Got", len(uptime.SuccessfulExecutionsPerHour), "entries for successful executions and", len(uptime.TotalExecutionsPerHour), "entries for total executions")
|
||||||
|
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
||||||
|
for hourlyUnixTimestamp, totalExecutions := range uptime.TotalExecutionsPerHour {
|
||||||
|
if totalExecutions == 0 {
|
||||||
|
log.Println("[migrateUptimeToHourlyStatistics] Skipping entry at", hourlyUnixTimestamp, "because total number of executions is 0")
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
uptime.HourlyStatistics[hourlyUnixTimestamp] = &core.HourlyUptimeStatistics{
|
||||||
|
TotalExecutions: totalExecutions,
|
||||||
|
SuccessfulExecutions: uptime.SuccessfulExecutionsPerHour[hourlyUnixTimestamp],
|
||||||
|
TotalExecutionsResponseTime: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
log.Println("[migrateUptimeToHourlyStatistics] Migrated", len(uptime.HourlyStatistics), "entries")
|
||||||
|
uptime.SuccessfulExecutionsPerHour = nil
|
||||||
|
uptime.TotalExecutionsPerHour = nil
|
||||||
|
}
|
|
@ -1,18 +1,20 @@
|
||||||
package core
|
package memory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkUptime_ProcessResult(b *testing.B) {
|
func BenchmarkProcessUptimeAfterResult(b *testing.B) {
|
||||||
uptime := NewUptime()
|
uptime := core.NewUptime()
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||||
// Start 12000 days ago
|
// Start 12000 days ago
|
||||||
timestamp := now.Add(-12000 * 24 * time.Hour)
|
timestamp := now.Add(-12000 * 24 * time.Hour)
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
uptime.ProcessResult(&Result{
|
processUptimeAfterResult(uptime, &core.Result{
|
||||||
Duration: 18 * time.Millisecond,
|
Duration: 18 * time.Millisecond,
|
||||||
Success: n%15 == 0,
|
Success: n%15 == 0,
|
||||||
Timestamp: timestamp,
|
Timestamp: timestamp,
|
|
@ -1,62 +1,64 @@
|
||||||
package core
|
package memory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestUptime_ProcessResult(t *testing.T) {
|
func TestProcessUptimeAfterResult(t *testing.T) {
|
||||||
service := &Service{Name: "name", Group: "group"}
|
service := &core.Service{Name: "name", Group: "group"}
|
||||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||||
uptime := serviceStatus.Uptime
|
uptime := serviceStatus.Uptime
|
||||||
|
|
||||||
checkUptimes(t, serviceStatus, 0.00, 0.00, 0.00)
|
checkUptimes(t, serviceStatus, 0.00, 0.00, 0.00)
|
||||||
|
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-7 * 24 * time.Hour), Success: true})
|
||||||
checkUptimes(t, serviceStatus, 1.00, 0.00, 0.00)
|
checkUptimes(t, serviceStatus, 1.00, 0.00, 0.00)
|
||||||
|
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-6 * 24 * time.Hour), Success: false})
|
||||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||||
|
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-8 * 24 * time.Hour), Success: true})
|
||||||
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
checkUptimes(t, serviceStatus, 0.50, 0.00, 0.00)
|
||||||
|
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-24 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-12 * time.Hour), Success: true})
|
||||||
checkUptimes(t, serviceStatus, 0.75, 1.00, 0.00)
|
checkUptimes(t, serviceStatus, 0.75, 1.00, 0.00)
|
||||||
|
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-1 * time.Hour), Success: true, Duration: 10 * time.Millisecond})
|
||||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 10, 1, 1)
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-30 * time.Minute), Success: false, Duration: 500 * time.Millisecond})
|
||||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 510, 2, 1)
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-15 * time.Minute), Success: false, Duration: 25 * time.Millisecond})
|
||||||
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
checkHourlyStatistics(t, uptime.HourlyStatistics[now.Unix()-now.Unix()%3600-3600], 535, 3, 1)
|
||||||
|
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-10 * time.Minute), Success: false})
|
||||||
checkUptimes(t, serviceStatus, 0.50, 0.50, 0.25)
|
checkUptimes(t, serviceStatus, 0.50, 0.50, 0.25)
|
||||||
|
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-120 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-119 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-118 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-117 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-10 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-8 * time.Hour), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-30 * time.Minute), Success: true})
|
||||||
uptime.ProcessResult(&Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
processUptimeAfterResult(uptime, &core.Result{Timestamp: now.Add(-25 * time.Minute), Success: true})
|
||||||
checkUptimes(t, serviceStatus, 0.75, 0.70, 0.50)
|
checkUptimes(t, serviceStatus, 0.75, 0.70, 0.50)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
func TestAddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||||
service := &Service{Name: "name", Group: "group"}
|
service := &core.Service{Name: "name", Group: "group"}
|
||||||
serviceStatus := NewServiceStatus(service.Key(), service.Group, service.Name)
|
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||||
now := time.Now()
|
now := time.Now()
|
||||||
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
now = time.Date(now.Year(), now.Month(), now.Day(), now.Hour(), 0, 0, 0, now.Location())
|
||||||
// Start 12 days ago
|
// Start 12 days ago
|
||||||
timestamp := now.Add(-12 * 24 * time.Hour)
|
timestamp := now.Add(-12 * 24 * time.Hour)
|
||||||
for timestamp.Unix() <= now.Unix() {
|
for timestamp.Unix() <= now.Unix() {
|
||||||
serviceStatus.AddResult(&Result{Timestamp: timestamp, Success: true})
|
AddResult(serviceStatus, &core.Result{Timestamp: timestamp, Success: true})
|
||||||
if len(serviceStatus.Uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
if len(serviceStatus.Uptime.HourlyStatistics) > numberOfHoursInTenDays {
|
||||||
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(serviceStatus.Uptime.HourlyStatistics))
|
t.Errorf("At no point in time should there be more than %d entries in serviceStatus.SuccessfulExecutionsPerHour, but there are %d", numberOfHoursInTenDays, len(serviceStatus.Uptime.HourlyStatistics))
|
||||||
}
|
}
|
||||||
|
@ -71,7 +73,7 @@ func TestServiceStatus_AddResultUptimeIsCleaningUpAfterItself(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkUptimes(t *testing.T, status *ServiceStatus, expectedUptimeDuringLastSevenDays, expectedUptimeDuringLastTwentyFourHours, expectedUptimeDuringLastHour float64) {
|
func checkUptimes(t *testing.T, status *core.ServiceStatus, expectedUptimeDuringLastSevenDays, expectedUptimeDuringLastTwentyFourHours, expectedUptimeDuringLastHour float64) {
|
||||||
if status.Uptime.LastSevenDays != expectedUptimeDuringLastSevenDays {
|
if status.Uptime.LastSevenDays != expectedUptimeDuringLastSevenDays {
|
||||||
t.Errorf("expected status.Uptime.LastSevenDays to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastSevenDays)
|
t.Errorf("expected status.Uptime.LastSevenDays to be %f, got %f", expectedUptimeDuringLastHour, status.Uptime.LastSevenDays)
|
||||||
}
|
}
|
||||||
|
@ -83,7 +85,7 @@ func checkUptimes(t *testing.T, status *ServiceStatus, expectedUptimeDuringLastS
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
func checkHourlyStatistics(t *testing.T, hourlyUptimeStatistics *core.HourlyUptimeStatistics, expectedTotalExecutionsResponseTime uint64, expectedTotalExecutions uint64, expectedSuccessfulExecutions uint64) {
|
||||||
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
if hourlyUptimeStatistics.TotalExecutionsResponseTime != expectedTotalExecutionsResponseTime {
|
||||||
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
t.Error("TotalExecutionsResponseTime should've been", expectedTotalExecutionsResponseTime, "got", hourlyUptimeStatistics.TotalExecutionsResponseTime)
|
||||||
}
|
}
|
86
storage/store/memory/util.go
Normal file
86
storage/store/memory/util.go
Normal file
|
@ -0,0 +1,86 @@
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ShallowCopyServiceStatus returns a shallow copy of a ServiceStatus with only the results
|
||||||
|
// within the range defined by the page and pageSize parameters
|
||||||
|
func ShallowCopyServiceStatus(ss *core.ServiceStatus, params *paging.ServiceStatusParams) *core.ServiceStatus {
|
||||||
|
shallowCopy := &core.ServiceStatus{
|
||||||
|
Name: ss.Name,
|
||||||
|
Group: ss.Group,
|
||||||
|
Key: ss.Key,
|
||||||
|
Uptime: core.NewUptime(),
|
||||||
|
}
|
||||||
|
numberOfResults := len(ss.Results)
|
||||||
|
resultsStart, resultsEnd := getStartAndEndIndex(numberOfResults, params.ResultsPage, params.ResultsPageSize)
|
||||||
|
if resultsStart < 0 || resultsEnd < 0 {
|
||||||
|
shallowCopy.Results = []*core.Result{}
|
||||||
|
} else {
|
||||||
|
shallowCopy.Results = ss.Results[resultsStart:resultsEnd]
|
||||||
|
}
|
||||||
|
numberOfEvents := len(ss.Events)
|
||||||
|
eventsStart, eventsEnd := getStartAndEndIndex(numberOfEvents, params.EventsPage, params.EventsPageSize)
|
||||||
|
if eventsStart < 0 || eventsEnd < 0 {
|
||||||
|
shallowCopy.Events = []*core.Event{}
|
||||||
|
} else {
|
||||||
|
shallowCopy.Events = ss.Events[eventsStart:eventsEnd]
|
||||||
|
}
|
||||||
|
if params.IncludeUptime {
|
||||||
|
shallowCopy.Uptime.LastHour = ss.Uptime.LastHour
|
||||||
|
shallowCopy.Uptime.LastTwentyFourHours = ss.Uptime.LastTwentyFourHours
|
||||||
|
shallowCopy.Uptime.LastSevenDays = ss.Uptime.LastSevenDays
|
||||||
|
}
|
||||||
|
return shallowCopy
|
||||||
|
}
|
||||||
|
|
||||||
|
func getStartAndEndIndex(numberOfResults int, page, pageSize int) (int, int) {
|
||||||
|
start := numberOfResults - (page * pageSize)
|
||||||
|
end := numberOfResults - ((page - 1) * pageSize)
|
||||||
|
if start > numberOfResults {
|
||||||
|
start = -1
|
||||||
|
} else if start < 0 {
|
||||||
|
start = 0
|
||||||
|
}
|
||||||
|
if end > numberOfResults {
|
||||||
|
end = numberOfResults
|
||||||
|
}
|
||||||
|
return start, end
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddResult adds a Result to ServiceStatus.Results and makes sure that there are
|
||||||
|
// no more than MaximumNumberOfResults results in the Results slice
|
||||||
|
func AddResult(ss *core.ServiceStatus, result *core.Result) {
|
||||||
|
if ss == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(ss.Results) > 0 {
|
||||||
|
// Check if there's any change since the last result
|
||||||
|
// OR there's only 1 event, which only happens when there's a start event
|
||||||
|
if ss.Results[len(ss.Results)-1].Success != result.Success || len(ss.Events) == 1 {
|
||||||
|
event := &core.Event{Timestamp: result.Timestamp}
|
||||||
|
if result.Success {
|
||||||
|
event.Type = core.EventHealthy
|
||||||
|
} else {
|
||||||
|
event.Type = core.EventUnhealthy
|
||||||
|
}
|
||||||
|
ss.Events = append(ss.Events, event)
|
||||||
|
if len(ss.Events) > core.MaximumNumberOfEvents {
|
||||||
|
// Doing ss.Events[1:] would usually be sufficient, but in the case where for some reason, the slice has
|
||||||
|
// more than one extra element, we can get rid of all of them at once and thus returning the slice to a
|
||||||
|
// length of MaximumNumberOfEvents by using ss.Events[len(ss.Events)-MaximumNumberOfEvents:] instead
|
||||||
|
ss.Events = ss.Events[len(ss.Events)-core.MaximumNumberOfEvents:]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
ss.Results = append(ss.Results, result)
|
||||||
|
if len(ss.Results) > core.MaximumNumberOfResults {
|
||||||
|
// Doing ss.Results[1:] would usually be sufficient, but in the case where for some reason, the slice has more
|
||||||
|
// than one extra element, we can get rid of all of them at once and thus returning the slice to a length of
|
||||||
|
// MaximumNumberOfResults by using ss.Results[len(ss.Results)-MaximumNumberOfResults:] instead
|
||||||
|
ss.Results = ss.Results[len(ss.Results)-core.MaximumNumberOfResults:]
|
||||||
|
}
|
||||||
|
processUptimeAfterResult(ss.Uptime, result)
|
||||||
|
}
|
20
storage/store/memory/util_bench_test.go
Normal file
20
storage/store/memory/util_bench_test.go
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
|
)
|
||||||
|
|
||||||
|
func BenchmarkShallowCopyServiceStatus(b *testing.B) {
|
||||||
|
service := &testService
|
||||||
|
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||||
|
for i := 0; i < core.MaximumNumberOfResults; i++ {
|
||||||
|
AddResult(serviceStatus, &testSuccessfulResult)
|
||||||
|
}
|
||||||
|
for n := 0; n < b.N; n++ {
|
||||||
|
ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 20))
|
||||||
|
}
|
||||||
|
b.ReportAllocs()
|
||||||
|
}
|
55
storage/store/memory/util_test.go
Normal file
55
storage/store/memory/util_test.go
Normal file
|
@ -0,0 +1,55 @@
|
||||||
|
package memory
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestAddResult(t *testing.T) {
|
||||||
|
service := &core.Service{Name: "name", Group: "group"}
|
||||||
|
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||||
|
for i := 0; i < core.MaximumNumberOfResults+10; i++ {
|
||||||
|
AddResult(serviceStatus, &core.Result{Timestamp: time.Now()})
|
||||||
|
}
|
||||||
|
if len(serviceStatus.Results) != core.MaximumNumberOfResults {
|
||||||
|
t.Errorf("expected serviceStatus.Results to not exceed a length of %d", core.MaximumNumberOfResults)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestShallowCopyServiceStatus(t *testing.T) {
|
||||||
|
service := &core.Service{Name: "name", Group: "group"}
|
||||||
|
serviceStatus := core.NewServiceStatus(service.Key(), service.Group, service.Name)
|
||||||
|
for i := 0; i < 25; i++ {
|
||||||
|
AddResult(serviceStatus, &core.Result{Timestamp: time.Now()})
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 1)).Results) != 1 {
|
||||||
|
t.Errorf("expected to have 1 result")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(5, 0)).Results) != 0 {
|
||||||
|
t.Errorf("expected to have 0 results")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(-1, 20)).Results) != 0 {
|
||||||
|
t.Errorf("expected to have 0 result, because the page was invalid")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, -1)).Results) != 0 {
|
||||||
|
t.Errorf("expected to have 0 result, because the page size was invalid")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 10)).Results) != 10 {
|
||||||
|
t.Errorf("expected to have 10 results, because given a page size of 10, page 1 should have 10 elements")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(2, 10)).Results) != 10 {
|
||||||
|
t.Errorf("expected to have 10 results, because given a page size of 10, page 2 should have 10 elements")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(3, 10)).Results) != 5 {
|
||||||
|
t.Errorf("expected to have 5 results, because given a page size of 10, page 3 should have 5 elements")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(4, 10)).Results) != 0 {
|
||||||
|
t.Errorf("expected to have 0 results, because given a page size of 10, page 4 should have 0 elements")
|
||||||
|
}
|
||||||
|
if len(ShallowCopyServiceStatus(serviceStatus, paging.NewServiceStatusParams().WithResults(1, 50)).Results) != 25 {
|
||||||
|
t.Errorf("expected to have 25 results, because there's only 25 results")
|
||||||
|
}
|
||||||
|
}
|
|
@ -4,19 +4,20 @@ import (
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
"github.com/TwinProduction/gatus/storage/store/database"
|
"github.com/TwinProduction/gatus/storage/store/database"
|
||||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Store is the interface that each stores should implement
|
// Store is the interface that each stores should implement
|
||||||
type Store interface {
|
type Store interface {
|
||||||
// GetAllServiceStatusesWithResultPagination returns the JSON encoding of all monitored core.ServiceStatus
|
// GetAllServiceStatuses returns the JSON encoding of all monitored core.ServiceStatus
|
||||||
// with a subset of core.Result defined by the page and pageSize parameters
|
// with a subset of core.Result defined by the page and pageSize parameters
|
||||||
GetAllServiceStatusesWithResultPagination(page, pageSize int) map[string]*core.ServiceStatus
|
GetAllServiceStatuses(params *paging.ServiceStatusParams) map[string]*core.ServiceStatus
|
||||||
|
|
||||||
// GetServiceStatus returns the service status for a given service name in the given group
|
// GetServiceStatus returns the service status for a given service name in the given group
|
||||||
GetServiceStatus(groupName, serviceName string) *core.ServiceStatus
|
GetServiceStatus(groupName, serviceName string, params *paging.ServiceStatusParams) *core.ServiceStatus
|
||||||
|
|
||||||
// GetServiceStatusByKey returns the service status for a given key
|
// GetServiceStatusByKey returns the service status for a given key
|
||||||
GetServiceStatusByKey(key string) *core.ServiceStatus
|
GetServiceStatusByKey(key string, params *paging.ServiceStatusParams) *core.ServiceStatus
|
||||||
|
|
||||||
// Insert adds the observed result for the specified service into the store
|
// Insert adds the observed result for the specified service into the store
|
||||||
Insert(service *core.Service, result *core.Result)
|
Insert(service *core.Service, result *core.Result)
|
||||||
|
|
|
@ -7,6 +7,7 @@ import (
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
"github.com/TwinProduction/gatus/storage/store/database"
|
"github.com/TwinProduction/gatus/storage/store/database"
|
||||||
"github.com/TwinProduction/gatus/storage/store/memory"
|
"github.com/TwinProduction/gatus/storage/store/memory"
|
||||||
|
"github.com/TwinProduction/gatus/storage/store/paging"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -77,12 +78,12 @@ var (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func BenchmarkStore_GetAllServiceStatusesWithResultPagination(b *testing.B) {
|
func BenchmarkStore_GetAllServiceStatuses(b *testing.B) {
|
||||||
memoryStore, err := memory.NewStore("")
|
memoryStore, err := memory.NewStore("")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal("failed to create store:", err.Error())
|
b.Fatal("failed to create store:", err.Error())
|
||||||
}
|
}
|
||||||
databaseStore, err := database.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetAllServiceStatusesWithResultPagination.db")
|
databaseStore, err := database.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_GetAllServiceStatuses.db")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal("failed to create store:", err.Error())
|
b.Fatal("failed to create store:", err.Error())
|
||||||
}
|
}
|
||||||
|
@ -121,12 +122,12 @@ func BenchmarkStore_GetAllServiceStatusesWithResultPagination(b *testing.B) {
|
||||||
if scenario.Parallel {
|
if scenario.Parallel {
|
||||||
b.RunParallel(func(pb *testing.PB) {
|
b.RunParallel(func(pb *testing.PB) {
|
||||||
for pb.Next() {
|
for pb.Next() {
|
||||||
scenario.Store.GetAllServiceStatusesWithResultPagination(1, 20)
|
scenario.Store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20))
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
} else {
|
} else {
|
||||||
for n := 0; n < b.N; n++ {
|
for n := 0; n < b.N; n++ {
|
||||||
scenario.Store.GetAllServiceStatusesWithResultPagination(1, 20)
|
scenario.Store.GetAllServiceStatuses(paging.NewServiceStatusParams().WithResults(1, 20))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
b.ReportAllocs()
|
b.ReportAllocs()
|
||||||
|
@ -136,10 +137,10 @@ func BenchmarkStore_GetAllServiceStatusesWithResultPagination(b *testing.B) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func BenchmarkStore_Insert(b *testing.B) {
|
func BenchmarkStore_Insert(b *testing.B) {
|
||||||
memoryStore, err := memory.NewStore("")
|
//memoryStore, err := memory.NewStore("")
|
||||||
if err != nil {
|
//if err != nil {
|
||||||
b.Fatal("failed to create store:", err.Error())
|
// b.Fatal("failed to create store:", err.Error())
|
||||||
}
|
//}
|
||||||
databaseStore, err := database.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_Insert.db")
|
databaseStore, err := database.NewStore("sqlite", b.TempDir()+"/BenchmarkStore_Insert.db")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
b.Fatal("failed to create store:", err.Error())
|
b.Fatal("failed to create store:", err.Error())
|
||||||
|
@ -151,16 +152,16 @@ func BenchmarkStore_Insert(b *testing.B) {
|
||||||
Parallel bool
|
Parallel bool
|
||||||
}
|
}
|
||||||
scenarios := []Scenario{
|
scenarios := []Scenario{
|
||||||
{
|
//{
|
||||||
Name: "memory",
|
// Name: "memory",
|
||||||
Store: memoryStore,
|
// Store: memoryStore,
|
||||||
Parallel: false,
|
// Parallel: false,
|
||||||
},
|
//},
|
||||||
{
|
//{
|
||||||
Name: "memory-parallel",
|
// Name: "memory-parallel",
|
||||||
Store: memoryStore,
|
// Store: memoryStore,
|
||||||
Parallel: true,
|
// Parallel: true,
|
||||||
},
|
//},
|
||||||
{
|
{
|
||||||
Name: "database",
|
Name: "database",
|
||||||
Store: databaseStore,
|
Store: databaseStore,
|
||||||
|
|
Loading…
Reference in a new issue