mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Bugfix] Fix restart procedure in case of failing members (#884)
This commit is contained in:
parent
3cf0fa875e
commit
6d3050cdef
14 changed files with 262 additions and 121 deletions
|
@ -15,6 +15,7 @@
|
|||
- Add metrics for the plan actions
|
||||
- Add ArangoClusterSynchronization Operator
|
||||
- Update licenses
|
||||
- Fix restart procedure in case of failing members
|
||||
|
||||
## [1.2.6](https://github.com/arangodb/kube-arangodb/tree/1.2.6) (2021-12-15)
|
||||
- Add ArangoBackup backoff functionality
|
||||
|
|
|
@ -23,6 +23,7 @@ package v1
|
|||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
|
@ -148,6 +149,8 @@ type ServerGroupSpec struct {
|
|||
InternalPort *int `json:"internalPort,omitempty"`
|
||||
// AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false.
|
||||
AllowMemberRecreation *bool `json:"allowMemberRecreation,omitempty"`
|
||||
// TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation
|
||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||
}
|
||||
|
||||
// ServerGroupSpecSecurityContext contains specification for pod security context
|
||||
|
@ -700,3 +703,12 @@ func (s ServerGroupSpec) GetShutdownDelay(group ServerGroup) int {
|
|||
}
|
||||
return *s.ShutdownDelay
|
||||
}
|
||||
|
||||
// GetTerminationGracePeriod returns termination grace period as Duration
|
||||
func (s ServerGroupSpec) GetTerminationGracePeriod(group ServerGroup) time.Duration {
|
||||
if v := s.TerminationGracePeriodSeconds; v == nil {
|
||||
return group.DefaultTerminationGracePeriod()
|
||||
} else {
|
||||
return time.Second * time.Duration(*v)
|
||||
}
|
||||
}
|
||||
|
|
5
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
5
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
|
@ -2043,6 +2043,11 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) {
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.TerminationGracePeriodSeconds != nil {
|
||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -23,6 +23,7 @@ package v2alpha1
|
|||
import (
|
||||
"math"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
|
@ -148,6 +149,8 @@ type ServerGroupSpec struct {
|
|||
InternalPort *int `json:"internalPort,omitempty"`
|
||||
// AllowMemberRecreation allows to recreate member. Value is used only for Coordinator and DBServer with default to True, for all other groups set to false.
|
||||
AllowMemberRecreation *bool `json:"allowMemberRecreation,omitempty"`
|
||||
// TerminationGracePeriodSeconds override default TerminationGracePeriodSeconds for pods - via silent rotation
|
||||
TerminationGracePeriodSeconds *int64 `json:"terminationGracePeriodSeconds,omitempty"`
|
||||
}
|
||||
|
||||
// ServerGroupSpecSecurityContext contains specification for pod security context
|
||||
|
@ -700,3 +703,12 @@ func (s ServerGroupSpec) GetShutdownDelay(group ServerGroup) int {
|
|||
}
|
||||
return *s.ShutdownDelay
|
||||
}
|
||||
|
||||
// GetTerminationGracePeriod returns termination grace period as Duration
|
||||
func (s ServerGroupSpec) GetTerminationGracePeriod(group ServerGroup) time.Duration {
|
||||
if v := s.TerminationGracePeriodSeconds; v == nil {
|
||||
return group.DefaultTerminationGracePeriod()
|
||||
} else {
|
||||
return time.Second * time.Duration(*v)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2043,6 +2043,11 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) {
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.TerminationGracePeriodSeconds != nil {
|
||||
in, out := &in.TerminationGracePeriodSeconds, &out.TerminationGracePeriodSeconds
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -23,8 +23,6 @@ package reconcile
|
|||
import (
|
||||
"context"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
||||
|
@ -35,12 +33,11 @@ func createBootstrapPlan(ctx context.Context,
|
|||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) api.Plan {
|
||||
|
||||
if !status.Conditions.IsTrue(api.ConditionTypeReady) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if condition, hasBootstrap := status.Conditions.Get(api.ConditionTypeBootstrapCompleted); !hasBootstrap || condition.Status == core.ConditionTrue {
|
||||
if status.Conditions.IsTrue(api.ConditionTypeBootstrapCompleted) {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ func createNormalPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil
|
|||
// Check for members to be removed
|
||||
ApplyIfEmpty(createReplaceMemberPlan).
|
||||
// Check for the need to rotate one or more members
|
||||
ApplyIfEmpty(createMarkToRemovePlan).
|
||||
ApplyIfEmpty(createRotateOrUpgradePlan).
|
||||
// Disable maintenance if upgrade process was done. Upgrade task throw IDLE Action if upgrade is pending
|
||||
ApplyIfEmpty(createMaintenanceManagementPlan).
|
||||
|
|
|
@ -23,13 +23,13 @@ package reconcile
|
|||
import (
|
||||
"context"
|
||||
|
||||
"github.com/arangodb/go-driver"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/rotation"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||
|
||||
"github.com/arangodb/go-driver"
|
||||
upgraderules "github.com/arangodb/go-upgrade-rules"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
@ -82,91 +82,11 @@ func createRotateOrUpgradePlan(ctx context.Context,
|
|||
return plan
|
||||
}
|
||||
|
||||
func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) (api.Plan, bool) {
|
||||
|
||||
var newPlan api.Plan
|
||||
var upgradeNotAllowed bool
|
||||
var fromVersion, toVersion driver.Version
|
||||
var fromLicense, toLicense upgraderules.License
|
||||
|
||||
status.Members.ForeachServerGroup(func(group api.ServerGroup, members api.MemberStatusList) error {
|
||||
for _, m := range members {
|
||||
if m.Phase != api.MemberPhaseCreated || m.PodName == "" {
|
||||
// Only rotate when phase is created
|
||||
continue
|
||||
}
|
||||
|
||||
// Got pod, compare it with what it should be
|
||||
decision := podNeedsUpgrading(log, m, spec, status.Images)
|
||||
if decision.Hold {
|
||||
return nil
|
||||
}
|
||||
|
||||
if decision.UpgradeNeeded && !decision.UpgradeAllowed {
|
||||
// Oops, upgrade is not allowed
|
||||
upgradeNotAllowed = true
|
||||
fromVersion = decision.FromVersion
|
||||
fromLicense = decision.FromLicense
|
||||
toVersion = decision.ToVersion
|
||||
toLicense = decision.ToLicense
|
||||
return nil
|
||||
}
|
||||
|
||||
if !newPlan.IsEmpty() {
|
||||
// Only rotate/upgrade 1 pod at a time
|
||||
continue
|
||||
}
|
||||
|
||||
if decision.UpgradeNeeded {
|
||||
// Yes, upgrade is needed (and allowed)
|
||||
newPlan = createUpgradeMemberPlan(log, m, group, "Version upgrade", spec, status,
|
||||
!decision.AutoUpgradeNeeded)
|
||||
} else {
|
||||
if rotation.CheckPossible(m) {
|
||||
if m.Conditions.IsTrue(api.ConditionTypeRestart) {
|
||||
newPlan = createRotateMemberPlan(log, m, group, "Restart flag present")
|
||||
} else if m.Conditions.IsTrue(api.ConditionTypeUpdating) || m.Conditions.IsTrue(api.ConditionTypeUpdateFailed) {
|
||||
continue
|
||||
} else if m.Conditions.IsTrue(api.ConditionTypePendingUpdate) {
|
||||
arangoMember, ok := cachedStatus.ArangoMember(m.ArangoMemberName(apiObject.GetName(), group))
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
p, ok := cachedStatus.Pod(m.PodName)
|
||||
if !ok {
|
||||
p = nil
|
||||
}
|
||||
|
||||
if mode, p, reason, err := rotation.IsRotationRequired(log, cachedStatus, spec, m, group, p, arangoMember.Spec.Template, arangoMember.Status.Template); err != nil {
|
||||
log.Err(err).Msgf("Error while generating update plan")
|
||||
continue
|
||||
} else if mode != rotation.InPlaceRotation {
|
||||
newPlan = api.Plan{api.NewAction(api.ActionTypeSetMemberCondition, group, m.ID, "Cleaning update").
|
||||
AddParam(api.ConditionTypePendingUpdate.String(), "").AddParam(api.ConditionTypeUpdating.String(), "T")}
|
||||
continue
|
||||
} else {
|
||||
p = p.After(
|
||||
api.NewAction(api.ActionTypeWaitForMemberUp, group, m.ID),
|
||||
api.NewAction(api.ActionTypeWaitForMemberInSync, group, m.ID))
|
||||
|
||||
p = p.Wrap(api.NewAction(api.ActionTypeSetMemberCondition, group, m.ID, reason).
|
||||
AddParam(api.ConditionTypePendingUpdate.String(), "").AddParam(api.ConditionTypeUpdating.String(), "T"),
|
||||
api.NewAction(api.ActionTypeSetMemberCondition, group, m.ID, reason).
|
||||
AddParam(api.ConditionTypeUpdating.String(), ""))
|
||||
|
||||
newPlan = p
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !newPlan.IsEmpty() {
|
||||
// Only rotate/upgrade 1 pod at a time
|
||||
continue
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
func createMarkToRemovePlan(ctx context.Context,
|
||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) api.Plan {
|
||||
var plan api.Plan
|
||||
|
||||
status.Members.ForeachServerInGroups(func(group api.ServerGroup, members api.MemberStatusList) error {
|
||||
for _, m := range members {
|
||||
|
@ -175,11 +95,6 @@ func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.API
|
|||
continue
|
||||
}
|
||||
|
||||
if !newPlan.IsEmpty() {
|
||||
// Only rotate/upgrade 1 pod at a time
|
||||
continue
|
||||
}
|
||||
|
||||
pod, found := cachedStatus.Pod(m.PodName)
|
||||
if !found {
|
||||
continue
|
||||
|
@ -188,7 +103,7 @@ func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.API
|
|||
if pod.Annotations != nil {
|
||||
if _, ok := pod.Annotations[deployment.ArangoDeploymentPodReplaceAnnotation]; ok && (group == api.ServerGroupDBServers || group == api.ServerGroupAgents || group == api.ServerGroupCoordinators) {
|
||||
if !m.Conditions.IsTrue(api.ConditionTypeMarkedToRemove) {
|
||||
newPlan = api.Plan{api.NewAction(api.ActionTypeMarkToRemoveMember, group, m.ID, "Replace flag present")}
|
||||
plan = append(plan, api.NewAction(api.ActionTypeMarkToRemoveMember, group, m.ID, "Replace flag present"))
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
@ -198,25 +113,120 @@ func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.API
|
|||
return nil
|
||||
}, rotationByAnnotationOrder...)
|
||||
|
||||
if upgradeNotAllowed {
|
||||
context.CreateEvent(k8sutil.NewUpgradeNotAllowedEvent(apiObject, fromVersion, toVersion, fromLicense, toLicense))
|
||||
} else if !newPlan.IsEmpty() {
|
||||
if clusterReadyForUpgrade(context) {
|
||||
// Use the new plan
|
||||
return newPlan, false
|
||||
return plan
|
||||
}
|
||||
|
||||
func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) (api.Plan, bool) {
|
||||
member, group, decision, update := createRotateOrUpgradeDecision(log, spec, status, context)
|
||||
if !update {
|
||||
// Nothing to do
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if decision != nil {
|
||||
// Upgrade phase
|
||||
if decision.Hold {
|
||||
// Holding upgrade
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if !decision.UpgradeNeeded {
|
||||
// In upgrade scenario but upgrade is not needed
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if !decision.UpgradeAllowed {
|
||||
context.CreateEvent(k8sutil.NewUpgradeNotAllowedEvent(apiObject, decision.FromVersion, decision.ToVersion, decision.FromLicense, decision.ToLicense))
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if groupReadyForRestart(context, spec, status, member, group) {
|
||||
return createUpgradeMemberPlan(log, member, group, "Version upgrade", spec, status, !decision.AutoUpgradeNeeded), false
|
||||
} else if util.BoolOrDefault(spec.AllowUnsafeUpgrade, false) {
|
||||
log.Info().Msg("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready, but unsafe upgrade is allowed")
|
||||
return createUpgradeMemberPlan(log, member, group, "Version upgrade", spec, status, !decision.AutoUpgradeNeeded), false
|
||||
} else {
|
||||
if util.BoolOrDefault(spec.AllowUnsafeUpgrade, false) {
|
||||
log.Info().Msg("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready, but unsafe upgrade is allowed")
|
||||
// Use the new plan
|
||||
return newPlan, false
|
||||
} else {
|
||||
log.Info().Msg("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready.")
|
||||
return nil, true
|
||||
log.Info().Msg("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready.")
|
||||
return nil, true
|
||||
}
|
||||
}
|
||||
|
||||
// Rotate phase
|
||||
if !rotation.CheckPossible(member) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
if member.Conditions.IsTrue(api.ConditionTypeRestart) {
|
||||
return createRotateMemberPlan(log, member, group, "Restart flag present"), false
|
||||
}
|
||||
|
||||
if member.Conditions.IsTrue(api.ConditionTypePendingUpdate) {
|
||||
arangoMember, ok := cachedStatus.ArangoMember(member.ArangoMemberName(apiObject.GetName(), group))
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
p, ok := cachedStatus.Pod(member.PodName)
|
||||
if !ok {
|
||||
p = nil
|
||||
}
|
||||
|
||||
if mode, p, reason, err := rotation.IsRotationRequired(log, cachedStatus, spec, member, group, p, arangoMember.Spec.Template, arangoMember.Status.Template); err != nil {
|
||||
log.Err(err).Msgf("Error while generating update plan")
|
||||
return nil, false
|
||||
} else if mode != rotation.InPlaceRotation {
|
||||
return api.Plan{api.NewAction(api.ActionTypeSetMemberCondition, group, member.ID, "Cleaning update").
|
||||
AddParam(api.ConditionTypePendingUpdate.String(), "").
|
||||
AddParam(api.ConditionTypeUpdating.String(), "T")}, false
|
||||
} else {
|
||||
p = p.After(
|
||||
api.NewAction(api.ActionTypeWaitForMemberUp, group, member.ID),
|
||||
api.NewAction(api.ActionTypeWaitForMemberInSync, group, member.ID))
|
||||
|
||||
p = p.Wrap(api.NewAction(api.ActionTypeSetMemberCondition, group, member.ID, reason).
|
||||
AddParam(api.ConditionTypePendingUpdate.String(), "").AddParam(api.ConditionTypeUpdating.String(), "T"),
|
||||
api.NewAction(api.ActionTypeSetMemberCondition, group, member.ID, reason).
|
||||
AddParam(api.ConditionTypeUpdating.String(), ""))
|
||||
|
||||
return p, false
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
func createRotateOrUpgradeDecision(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) (api.MemberStatus, api.ServerGroup, *upgradeDecision, bool) {
|
||||
// Upgrade phase
|
||||
for _, m := range status.Members.AsList() {
|
||||
if m.Member.Phase != api.MemberPhaseCreated || m.Member.PodName == "" {
|
||||
// Only rotate when phase is created
|
||||
continue
|
||||
}
|
||||
|
||||
// Got pod, compare it with what it should be
|
||||
decision := podNeedsUpgrading(log, m.Member, spec, status.Images)
|
||||
|
||||
if decision.UpgradeNeeded || decision.Hold {
|
||||
return m.Member, m.Group, &decision, true
|
||||
}
|
||||
}
|
||||
|
||||
// Update phase
|
||||
for _, m := range status.Members.AsList() {
|
||||
if !groupReadyForRestart(context, spec, status, m.Member, m.Group) {
|
||||
continue
|
||||
}
|
||||
|
||||
if rotation.CheckPossible(m.Member) {
|
||||
if m.Member.Conditions.IsTrue(api.ConditionTypeRestart) {
|
||||
return m.Member, m.Group, nil, true
|
||||
} else if m.Member.Conditions.IsTrue(api.ConditionTypePendingUpdate) {
|
||||
if !m.Member.Conditions.IsTrue(api.ConditionTypeUpdating) && !m.Member.Conditions.IsTrue(api.ConditionTypeUpdateFailed) {
|
||||
return m.Member, m.Group, nil, true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, false
|
||||
return api.MemberStatus{}, api.ServerGroupUnknown, nil, false
|
||||
}
|
||||
|
||||
// podNeedsUpgrading decides if an upgrade of the pod is needed (to comply with
|
||||
|
@ -364,10 +374,29 @@ func arangoMemberPodTemplateNeedsUpdate(ctx context.Context, log zerolog.Logger,
|
|||
// clusterReadyForUpgrade returns true if the cluster is ready for the next update, that is:
|
||||
// - all shards are in sync
|
||||
// - all members are ready and fine
|
||||
func clusterReadyForUpgrade(context PlanBuilderContext) bool {
|
||||
status, _ := context.GetStatus()
|
||||
allInSync := context.GetShardSyncStatus()
|
||||
return allInSync && status.Conditions.IsTrue(api.ConditionTypeReady)
|
||||
func groupReadyForRestart(context PlanBuilderContext, spec api.DeploymentSpec, status api.DeploymentStatus, member api.MemberStatus, group api.ServerGroup) bool {
|
||||
if util.BoolOrDefault(spec.AllowUnsafeUpgrade, false) {
|
||||
return true
|
||||
}
|
||||
|
||||
if !status.Conditions.IsTrue(api.ConditionTypeBootstrapCompleted) {
|
||||
// Restart is allowed always when bootstrap is not yet completed
|
||||
return true
|
||||
}
|
||||
|
||||
// If current member is not ready, kill anyway
|
||||
if !member.Conditions.IsTrue(api.ConditionTypeReady) {
|
||||
return true
|
||||
}
|
||||
|
||||
switch group {
|
||||
case api.ServerGroupDBServers:
|
||||
// TODO: Improve shard placement discovery and keep WriteConcern
|
||||
return context.GetShardSyncStatus() && status.Members.MembersOfGroup(group).AllMembersReady()
|
||||
default:
|
||||
// In case of agents we can kill only one agent at same time
|
||||
return status.Members.MembersOfGroup(group).AllMembersReady()
|
||||
}
|
||||
}
|
||||
|
||||
// createUpgradeMemberPlan creates a plan to upgrade (stop-recreateWithAutoUpgrade-stop-start) an existing
|
||||
|
|
|
@ -261,7 +261,7 @@ func (m *MemberArangoDPod) AsInput() pod.Input {
|
|||
}
|
||||
|
||||
func (m *MemberArangoDPod) Init(_ context.Context, _ interfaces.Inspector, pod *core.Pod) error {
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.group.DefaultTerminationGracePeriod().Seconds()))
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.groupSpec.GetTerminationGracePeriod(m.group).Seconds()))
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
pod.Spec.PriorityClassName = m.groupSpec.PriorityClassName
|
||||
|
||||
|
|
|
@ -313,7 +313,7 @@ func (m *MemberSyncPod) GetContainerCreator() interfaces.ContainerCreator {
|
|||
|
||||
// Init initializes the arangosync pod.
|
||||
func (m *MemberSyncPod) Init(ctx context.Context, cachedStatus interfaces.Inspector, pod *core.Pod) error {
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.group.DefaultTerminationGracePeriod().Seconds()))
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.groupSpec.GetTerminationGracePeriod(m.group).Seconds()))
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
pod.Spec.PriorityClassName = m.groupSpec.PriorityClassName
|
||||
|
||||
|
|
|
@ -342,11 +342,6 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
|||
return nil
|
||||
})
|
||||
|
||||
// Update overall conditions
|
||||
if _, hasReady := status.Conditions.Get(api.ConditionTypeReady); !hasReady {
|
||||
// Ready was never set, set BootstrapComplete to false
|
||||
status.Conditions.Update(api.ConditionTypeBootstrapCompleted, false, "Bootstrap waiting", "Waiting for deployment")
|
||||
}
|
||||
spec := r.context.GetSpec()
|
||||
allMembersReady := status.Members.AllMembersReady(spec.GetMode(), spec.Sync.IsEnabled())
|
||||
status.Conditions.Update(api.ConditionTypeReady, allMembersReady, "", "")
|
||||
|
|
|
@ -33,6 +33,11 @@ func podCompare(_ api.DeploymentSpec, _ api.ServerGroup, spec, status *core.PodS
|
|||
mode = mode.And(SilentRotation)
|
||||
}
|
||||
|
||||
if !util.CompareInt64p(spec.TerminationGracePeriodSeconds, status.TerminationGracePeriodSeconds) {
|
||||
status.TerminationGracePeriodSeconds = spec.TerminationGracePeriodSeconds
|
||||
mode = mode.And(SilentRotation)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@ package rotation
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/topology"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
|
@ -68,6 +70,46 @@ func Test_ArangoD_SchedulerName(t *testing.T) {
|
|||
runTestCases(t)(testCases...)
|
||||
}
|
||||
|
||||
func Test_ArangoD_TerminationGracePeriodSeconds(t *testing.T) {
|
||||
testCases := []TestCase{
|
||||
{
|
||||
name: "Add",
|
||||
spec: buildPodSpec(func(pod *core.PodTemplateSpec) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = nil
|
||||
}),
|
||||
status: buildPodSpec(func(pod *core.PodTemplateSpec) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = util.NewInt64(30)
|
||||
}),
|
||||
|
||||
expectedMode: SilentRotation,
|
||||
},
|
||||
{
|
||||
name: "Remove",
|
||||
spec: buildPodSpec(func(pod *core.PodTemplateSpec) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = util.NewInt64(30)
|
||||
}),
|
||||
status: buildPodSpec(func(pod *core.PodTemplateSpec) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = nil
|
||||
}),
|
||||
|
||||
expectedMode: SilentRotation,
|
||||
},
|
||||
{
|
||||
name: "Update",
|
||||
spec: buildPodSpec(func(pod *core.PodTemplateSpec) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = util.NewInt64(33)
|
||||
}),
|
||||
status: buildPodSpec(func(pod *core.PodTemplateSpec) {
|
||||
pod.Spec.TerminationGracePeriodSeconds = util.NewInt64(30)
|
||||
}),
|
||||
|
||||
expectedMode: SilentRotation,
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t)(testCases...)
|
||||
}
|
||||
|
||||
func Test_ArangoD_Affinity(t *testing.T) {
|
||||
testCases := []TestCase{
|
||||
{
|
||||
|
|
37
pkg/util/int.go
Normal file
37
pkg/util/int.go
Normal file
|
@ -0,0 +1,37 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package util
|
||||
|
||||
func CompareInt64(a, b int64) bool {
|
||||
return a == b
|
||||
}
|
||||
|
||||
func CompareInt64p(a, b *int64) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return CompareInt64(*a, *b)
|
||||
}
|
Loading…
Reference in a new issue