1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00

Merge pull request #288 from arangodb/bug-fix/scaling-removing-wrong-nodes

Revisited scale up and scale down.
This commit is contained in:
Max Neunhöffer 2018-11-02 13:35:52 +01:00 committed by GitHub
commit a3dbb9670b
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
5 changed files with 29 additions and 23 deletions

View file

@ -142,7 +142,12 @@ func (l MemberStatusList) SelectMemberToRemove() (MemberStatus, error) {
if len(l) > 0 {
// Try to find a not ready member
for _, m := range l {
if m.Phase == MemberPhaseNone || !m.Conditions.IsTrue(ConditionTypeReady) {
if m.Phase == MemberPhaseNone {
return m, nil
}
}
for _, m := range l {
if !m.Conditions.IsTrue(ConditionTypeReady) {
return m, nil
}
}

View file

@ -95,6 +95,10 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e
// We wanted to remove and it is already gone. All ok
return true, false, nil
}
// do not try to clean out a pod that was not initialized
if !m.IsInitialized {
return false, true, nil
}
c, err := a.actionCtx.GetDatabaseClient(ctx)
if err != nil {
log.Debug().Err(err).Msg("Failed to create database client")

View file

@ -63,6 +63,10 @@ func (a *actionShutdownMember) Start(ctx context.Context) (bool, error) {
return true, nil
}
if group.IsArangod() {
// do not try to shut down a pod that is not ready
if !m.Conditions.IsTrue(api.ConditionTypeReady) {
return true, nil
}
// Invoke shutdown endpoint
c, err := a.actionCtx.GetServerClient(ctx, group, a.action.MemberID)
if err != nil {

View file

@ -371,17 +371,13 @@ func createScalePlan(log zerolog.Logger, members api.MemberStatusList, group api
Str("member-id", m.ID).
Str("phase", string(m.Phase)).
Msg("Found member to remove")
if m.Conditions.IsTrue(api.ConditionTypeReady) {
if group == api.ServerGroupDBServers {
plan = append(plan,
api.NewAction(api.ActionTypeCleanOutMember, group, m.ID),
)
}
if group == api.ServerGroupDBServers {
plan = append(plan,
api.NewAction(api.ActionTypeShutdownMember, group, m.ID),
api.NewAction(api.ActionTypeCleanOutMember, group, m.ID),
)
}
plan = append(plan,
api.NewAction(api.ActionTypeShutdownMember, group, m.ID),
api.NewAction(api.ActionTypeRemoveMember, group, m.ID),
)
log.Debug().

View file

@ -176,9 +176,11 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
}
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
assert.True(t, changed)
require.Len(t, newPlan, 1) // Note: Downscaling is only down 1 at a time
assert.Equal(t, api.ActionTypeRemoveMember, newPlan[0].Type)
require.Len(t, newPlan, 2) // Note: Downscaling is only down 1 at a time
assert.Equal(t, api.ActionTypeShutdownMember, newPlan[0].Type)
assert.Equal(t, api.ActionTypeRemoveMember, newPlan[1].Type)
assert.Equal(t, api.ServerGroupSingle, newPlan[0].Group)
assert.Equal(t, api.ServerGroupSingle, newPlan[1].Group)
}
// TestCreatePlanClusterScale creates a `cluster` deployment to test the creating of scaling plan.
@ -261,12 +263,6 @@ func TestCreatePlanClusterScale(t *testing.T) {
api.MemberStatus{
ID: "cr1",
PodName: "coordinator1",
Conditions: api.ConditionList{
api.Condition{
Type: api.ConditionTypeReady,
Status: v1.ConditionTrue,
},
},
},
api.MemberStatus{
ID: "cr2",
@ -277,14 +273,15 @@ func TestCreatePlanClusterScale(t *testing.T) {
spec.Coordinators.Count = util.NewInt(1)
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
assert.True(t, changed)
fmt.Printf("%v", newPlan)
require.Len(t, newPlan, 3) // Note: Downscaling is done 1 at a time
assert.Equal(t, api.ActionTypeRemoveMember, newPlan[0].Type)
require.Len(t, newPlan, 5) // Note: Downscaling is done 1 at a time
assert.Equal(t, api.ActionTypeCleanOutMember, newPlan[0].Type)
assert.Equal(t, api.ActionTypeShutdownMember, newPlan[1].Type)
assert.Equal(t, api.ActionTypeRemoveMember, newPlan[2].Type)
assert.Equal(t, api.ActionTypeShutdownMember, newPlan[3].Type)
assert.Equal(t, api.ActionTypeRemoveMember, newPlan[4].Type)
assert.Equal(t, api.ServerGroupDBServers, newPlan[0].Group)
assert.Equal(t, api.ServerGroupCoordinators, newPlan[1].Group)
assert.Equal(t, api.ServerGroupCoordinators, newPlan[2].Group)
assert.Equal(t, api.ServerGroupDBServers, newPlan[1].Group)
assert.Equal(t, api.ServerGroupDBServers, newPlan[2].Group)
assert.Equal(t, api.ServerGroupCoordinators, newPlan[3].Group)
assert.Equal(t, api.ServerGroupCoordinators, newPlan[4].Group)
}