mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
Code cleanup.
This commit is contained in:
parent
674885291c
commit
a64d92a97b
3 changed files with 11 additions and 7 deletions
|
@ -34,7 +34,7 @@ const (
|
|||
MemberPhaseFailed MemberPhase = "Failed"
|
||||
// MemberPhaseCleanOut indicates that a dbserver is in the process of being cleaned out
|
||||
MemberPhaseCleanOut MemberPhase = "CleanOut"
|
||||
// MemberPhaseDrain indicates that a dbserver is n the process of being cleaned out as result of draining a node
|
||||
// MemberPhaseDrain indicates that a dbserver is in the process of being cleaned out as result of draining a node
|
||||
MemberPhaseDrain MemberPhase = "Drain"
|
||||
// MemberPhaseShuttingDown indicates that a member is shutting down
|
||||
MemberPhaseShuttingDown MemberPhase = "ShuttingDown"
|
||||
|
@ -48,3 +48,8 @@ const (
|
|||
func (p MemberPhase) IsFailed() bool {
|
||||
return p == MemberPhaseFailed
|
||||
}
|
||||
|
||||
// IsCreatedOrDrain returns true when given phase is MemberPhaseCreated or MemberPhaseDrain
|
||||
func (p MemberPhase) IsCreatedOrDrain() bool {
|
||||
return p == MemberPhaseCreated || p == MemberPhaseDrain
|
||||
}
|
||||
|
|
|
@ -120,7 +120,7 @@ func createPlan(log zerolog.Logger, apiObject k8sutil.APIObject,
|
|||
|
||||
// Check for cleaned out dbserver in created state
|
||||
for _, m := range status.Members.DBServers {
|
||||
if len(plan) == 0 && (m.Phase == api.MemberPhaseCreated || m.Phase == api.MemberPhaseDrain) && m.Conditions.IsTrue(api.ConditionTypeCleanedOut) {
|
||||
if len(plan) == 0 && m.Phase.IsCreatedOrDrain() && m.Conditions.IsTrue(api.ConditionTypeCleanedOut) {
|
||||
log.Debug().
|
||||
Str("id", m.ID).
|
||||
Str("role", api.ServerGroupDBServers.AsRole()).
|
||||
|
|
|
@ -187,14 +187,16 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol
|
|||
return nil
|
||||
}
|
||||
// Not cleaned out yet, check member status
|
||||
if memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) || (memberStatus.Phase == api.MemberPhaseCreated && !k8sutil.IsPodReady(p)) {
|
||||
if memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
||||
log.Warn().Msg("Member is already terminated before it could be cleaned out. Not good, but removing dbserver pod because we cannot do anything further")
|
||||
// At this point we have to set CleanedOut to true,
|
||||
// because we can no longer reason about the state in the agency and
|
||||
// bringing back the dbserver again may result in an cleaned out server without us knowing
|
||||
memberStatus.Conditions.Update(api.ConditionTypeCleanedOut, true, "Draining server failed", "")
|
||||
memberStatus.Phase = api.MemberPhaseCreated
|
||||
memberStatus.CleanoutJobID = ""
|
||||
if memberStatus.Phase == api.MemberPhaseDrain {
|
||||
memberStatus.Phase = api.MemberPhaseCreated
|
||||
}
|
||||
if err := updateMember(memberStatus); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
|
@ -234,9 +236,6 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol
|
|||
memberStatus.CleanoutJobID = ""
|
||||
return maskAny(fmt.Errorf("Clean out server job failed"))
|
||||
}
|
||||
} else {
|
||||
log.Warn().Msgf("Unexpected MemberPhase %s, allow removal", memberStatus.Phase)
|
||||
return nil
|
||||
}
|
||||
|
||||
return maskAny(fmt.Errorf("Server is not yet cleaned out"))
|
||||
|
|
Loading…
Reference in a new issue