mirror of
https://github.com/TwiN/gatus.git
synced 2024-12-14 11:58:04 +00:00
Remove old memory uptime implementation and auto migration
This commit is contained in:
parent
6e38114e27
commit
66e312b72f
2 changed files with 0 additions and 36 deletions
|
@ -3,16 +3,6 @@ package core
|
||||||
// Uptime is the struct that contains the relevant data for calculating the uptime as well as the uptime itself
|
// Uptime is the struct that contains the relevant data for calculating the uptime as well as the uptime itself
|
||||||
// and some other statistics
|
// and some other statistics
|
||||||
type Uptime struct {
|
type Uptime struct {
|
||||||
// SuccessfulExecutionsPerHour is a map containing the number of successes (value)
|
|
||||||
// for every hourly unix timestamps (key)
|
|
||||||
// Deprecated
|
|
||||||
SuccessfulExecutionsPerHour map[int64]uint64 `json:"-"`
|
|
||||||
|
|
||||||
// TotalExecutionsPerHour is a map containing the total number of checks (value)
|
|
||||||
// for every hourly unix timestamps (key)
|
|
||||||
// Deprecated
|
|
||||||
TotalExecutionsPerHour map[int64]uint64 `json:"-"`
|
|
||||||
|
|
||||||
// HourlyStatistics is a map containing metrics collected (value) for every hourly unix timestamps (key)
|
// HourlyStatistics is a map containing metrics collected (value) for every hourly unix timestamps (key)
|
||||||
//
|
//
|
||||||
// Used only if the storage type is memory
|
// Used only if the storage type is memory
|
||||||
|
|
|
@ -1,7 +1,6 @@
|
||||||
package memory
|
package memory
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"log"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/TwinProduction/gatus/core"
|
"github.com/TwinProduction/gatus/core"
|
||||||
|
@ -15,10 +14,6 @@ const (
|
||||||
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
// processUptimeAfterResult processes the result by extracting the relevant from the result and recalculating the uptime
|
||||||
// if necessary
|
// if necessary
|
||||||
func processUptimeAfterResult(uptime *core.Uptime, result *core.Result) {
|
func processUptimeAfterResult(uptime *core.Uptime, result *core.Result) {
|
||||||
// XXX: Remove this on v3.0.0
|
|
||||||
if len(uptime.SuccessfulExecutionsPerHour) != 0 || len(uptime.TotalExecutionsPerHour) != 0 {
|
|
||||||
migrateUptimeToHourlyStatistics(uptime)
|
|
||||||
}
|
|
||||||
if uptime.HourlyStatistics == nil {
|
if uptime.HourlyStatistics == nil {
|
||||||
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
||||||
}
|
}
|
||||||
|
@ -46,24 +41,3 @@ func processUptimeAfterResult(uptime *core.Uptime, result *core.Result) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// XXX: Remove this on v3.0.0
|
|
||||||
// Deprecated
|
|
||||||
func migrateUptimeToHourlyStatistics(uptime *core.Uptime) {
|
|
||||||
log.Println("[migrateUptimeToHourlyStatistics] Got", len(uptime.SuccessfulExecutionsPerHour), "entries for successful executions and", len(uptime.TotalExecutionsPerHour), "entries for total executions")
|
|
||||||
uptime.HourlyStatistics = make(map[int64]*core.HourlyUptimeStatistics)
|
|
||||||
for hourlyUnixTimestamp, totalExecutions := range uptime.TotalExecutionsPerHour {
|
|
||||||
if totalExecutions == 0 {
|
|
||||||
log.Println("[migrateUptimeToHourlyStatistics] Skipping entry at", hourlyUnixTimestamp, "because total number of executions is 0")
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
uptime.HourlyStatistics[hourlyUnixTimestamp] = &core.HourlyUptimeStatistics{
|
|
||||||
TotalExecutions: totalExecutions,
|
|
||||||
SuccessfulExecutions: uptime.SuccessfulExecutionsPerHour[hourlyUnixTimestamp],
|
|
||||||
TotalExecutionsResponseTime: 0,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
log.Println("[migrateUptimeToHourlyStatistics] Migrated", len(uptime.HourlyStatistics), "entries")
|
|
||||||
uptime.SuccessfulExecutionsPerHour = nil
|
|
||||||
uptime.TotalExecutionsPerHour = nil
|
|
||||||
}
|
|
||||||
|
|
Loading…
Reference in a new issue