mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-15 17:51:03 +00:00
[Feature] Core containers (#871)
This commit is contained in:
parent
9a00f404a8
commit
69f08e18d3
19 changed files with 685 additions and 143 deletions
|
@ -9,6 +9,7 @@
|
||||||
- Add debug mode (Golang DLV)
|
- Add debug mode (Golang DLV)
|
||||||
- License V2 for ArangoDB 3.9.0+
|
- License V2 for ArangoDB 3.9.0+
|
||||||
- Add ArangoClusterSynchronization v1 API
|
- Add ArangoClusterSynchronization v1 API
|
||||||
|
- Add core containers names to follow their terminations
|
||||||
|
|
||||||
## [1.2.6](https://github.com/arangodb/kube-arangodb/tree/1.2.6) (2021-12-15)
|
## [1.2.6](https://github.com/arangodb/kube-arangodb/tree/1.2.6) (2021-12-15)
|
||||||
- Add ArangoBackup backoff functionality
|
- Add ArangoBackup backoff functionality
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
//
|
|
||||||
|
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
|
@ -28,11 +26,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
|
||||||
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -537,3 +536,19 @@ func (s DeploymentSpec) Checksum() (string, error) {
|
||||||
|
|
||||||
return fmt.Sprintf("%0x", sha256.Sum256(data)), nil
|
return fmt.Sprintf("%0x", sha256.Sum256(data)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetCoreContainers returns all containers' names which must running in the pod for the given group of servers.
|
||||||
|
func (s DeploymentSpec) GetCoreContainers(group ServerGroup) utils.StringList {
|
||||||
|
groupSpec := s.GetServerGroupSpec(group)
|
||||||
|
if len(groupSpec.SidecarCoreNames) == 0 {
|
||||||
|
return utils.StringList{k8sutil.ServerContainerName}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(utils.StringList, 0, len(groupSpec.SidecarCoreNames)+1)
|
||||||
|
if !utils.StringList(groupSpec.SidecarCoreNames).Has(k8sutil.ServerContainerName) {
|
||||||
|
result = append(result, k8sutil.ServerContainerName)
|
||||||
|
}
|
||||||
|
result = append(result, groupSpec.SidecarCoreNames...)
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
|
@ -17,17 +17,17 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
//
|
|
||||||
|
|
||||||
package v1
|
package v1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDeploymentSpecValidate(t *testing.T) {
|
func TestDeploymentSpecValidate(t *testing.T) {
|
||||||
|
@ -122,3 +122,80 @@ func TestDeploymentSpecResetImmutableFields(t *testing.T) {
|
||||||
assert.Equal(t, test.Expected, test.Target)
|
assert.Equal(t, test.Expected, test.Target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeploymentSpec_GetCoreContainers(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
Single ServerGroupSpec
|
||||||
|
Agents ServerGroupSpec
|
||||||
|
DBServers ServerGroupSpec
|
||||||
|
Coordinators ServerGroupSpec
|
||||||
|
SyncMasters ServerGroupSpec
|
||||||
|
SyncWorkers ServerGroupSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
group ServerGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := map[string]struct {
|
||||||
|
fields fields
|
||||||
|
args args
|
||||||
|
want utils.StringList
|
||||||
|
}{
|
||||||
|
"one sidecar container": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: []string{"other"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server", "other"},
|
||||||
|
},
|
||||||
|
"one predefined container and one sidecar container": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: []string{"server", "other"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server", "other"},
|
||||||
|
},
|
||||||
|
"zero core containers": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server"},
|
||||||
|
},
|
||||||
|
"two non-core containers": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: []string{"other1", "other2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server", "other1", "other2"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for testName, test := range tests {
|
||||||
|
t.Run(testName, func(t *testing.T) {
|
||||||
|
s := DeploymentSpec{
|
||||||
|
DBServers: test.fields.DBServers,
|
||||||
|
}
|
||||||
|
|
||||||
|
got := s.GetCoreContainers(test.args.group)
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -128,7 +128,8 @@ func (ds DeploymentStatusMembers) ForServerGroup(cb func(group ServerGroup, list
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemberStatusByPodName returns a reference to the element in the given set of lists that has the given pod name.
|
// MemberStatusByPodName returns a reference to the element in the given set of lists that has the given pod name.
|
||||||
// If no such element exists, nil is returned.
|
// Returns member status and group which the pod belong to.
|
||||||
|
// If no such element exists, false is returned.
|
||||||
func (ds DeploymentStatusMembers) MemberStatusByPodName(podName string) (MemberStatus, ServerGroup, bool) {
|
func (ds DeploymentStatusMembers) MemberStatusByPodName(podName string) (MemberStatus, ServerGroup, bool) {
|
||||||
if result, found := ds.Single.ElementByPodName(podName); found {
|
if result, found := ds.Single.ElementByPodName(podName); found {
|
||||||
return result, ServerGroupSingle, true
|
return result, ServerGroupSingle, true
|
||||||
|
|
|
@ -126,6 +126,9 @@ type ServerGroupSpec struct {
|
||||||
Affinity *core.PodAffinity `json:"affinity,omitempty"`
|
Affinity *core.PodAffinity `json:"affinity,omitempty"`
|
||||||
// NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions
|
// NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions
|
||||||
NodeAffinity *core.NodeAffinity `json:"nodeAffinity,omitempty"`
|
NodeAffinity *core.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||||
|
// SidecarCoreNames is a list of sidecar containers which must run in the pod.
|
||||||
|
// Some names (e.g.: "server", "worker") are reserved, and they don't have any impact.
|
||||||
|
SidecarCoreNames []string `json:"sidecarCoreNames,omitempty"`
|
||||||
// Sidecars specifies a list of additional containers to be started
|
// Sidecars specifies a list of additional containers to be started
|
||||||
Sidecars []core.Container `json:"sidecars,omitempty"`
|
Sidecars []core.Container `json:"sidecars,omitempty"`
|
||||||
// SecurityContext specifies security context for group
|
// SecurityContext specifies security context for group
|
||||||
|
|
5
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
5
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
|
@ -1976,6 +1976,11 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) {
|
||||||
*out = new(corev1.NodeAffinity)
|
*out = new(corev1.NodeAffinity)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.SidecarCoreNames != nil {
|
||||||
|
in, out := &in.SidecarCoreNames, &out.SidecarCoreNames
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.Sidecars != nil {
|
if in.Sidecars != nil {
|
||||||
in, out := &in.Sidecars, &out.Sidecars
|
in, out := &in.Sidecars, &out.Sidecars
|
||||||
*out = make([]corev1.Container, len(*in))
|
*out = make([]corev1.Container, len(*in))
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
//
|
|
||||||
|
|
||||||
package v2alpha1
|
package v2alpha1
|
||||||
|
|
||||||
|
@ -28,11 +26,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
|
||||||
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -537,3 +536,19 @@ func (s DeploymentSpec) Checksum() (string, error) {
|
||||||
|
|
||||||
return fmt.Sprintf("%0x", sha256.Sum256(data)), nil
|
return fmt.Sprintf("%0x", sha256.Sum256(data)), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// GetCoreContainers returns all containers' names which must running in the pod for the given group of servers.
|
||||||
|
func (s DeploymentSpec) GetCoreContainers(group ServerGroup) utils.StringList {
|
||||||
|
groupSpec := s.GetServerGroupSpec(group)
|
||||||
|
if len(groupSpec.SidecarCoreNames) == 0 {
|
||||||
|
return utils.StringList{k8sutil.ServerContainerName}
|
||||||
|
}
|
||||||
|
|
||||||
|
result := make(utils.StringList, 0, len(groupSpec.SidecarCoreNames)+1)
|
||||||
|
if !utils.StringList(groupSpec.SidecarCoreNames).Has(k8sutil.ServerContainerName) {
|
||||||
|
result = append(result, k8sutil.ServerContainerName)
|
||||||
|
}
|
||||||
|
result = append(result, groupSpec.SidecarCoreNames...)
|
||||||
|
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
|
@ -17,17 +17,17 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
//
|
|
||||||
|
|
||||||
package v2alpha1
|
package v2alpha1
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDeploymentSpecValidate(t *testing.T) {
|
func TestDeploymentSpecValidate(t *testing.T) {
|
||||||
|
@ -122,3 +122,80 @@ func TestDeploymentSpecResetImmutableFields(t *testing.T) {
|
||||||
assert.Equal(t, test.Expected, test.Target)
|
assert.Equal(t, test.Expected, test.Target)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestDeploymentSpec_GetCoreContainers(t *testing.T) {
|
||||||
|
type fields struct {
|
||||||
|
Single ServerGroupSpec
|
||||||
|
Agents ServerGroupSpec
|
||||||
|
DBServers ServerGroupSpec
|
||||||
|
Coordinators ServerGroupSpec
|
||||||
|
SyncMasters ServerGroupSpec
|
||||||
|
SyncWorkers ServerGroupSpec
|
||||||
|
}
|
||||||
|
|
||||||
|
type args struct {
|
||||||
|
group ServerGroup
|
||||||
|
}
|
||||||
|
|
||||||
|
tests := map[string]struct {
|
||||||
|
fields fields
|
||||||
|
args args
|
||||||
|
want utils.StringList
|
||||||
|
}{
|
||||||
|
"one sidecar container": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: []string{"other"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server", "other"},
|
||||||
|
},
|
||||||
|
"one predefined container and one sidecar container": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: []string{"server", "other"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server", "other"},
|
||||||
|
},
|
||||||
|
"zero core containers": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: nil,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server"},
|
||||||
|
},
|
||||||
|
"two non-core containers": {
|
||||||
|
fields: fields{
|
||||||
|
DBServers: ServerGroupSpec{
|
||||||
|
SidecarCoreNames: []string{"other1", "other2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
args: args{
|
||||||
|
group: ServerGroupDBServers,
|
||||||
|
},
|
||||||
|
want: utils.StringList{"server", "other1", "other2"},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
for testName, test := range tests {
|
||||||
|
t.Run(testName, func(t *testing.T) {
|
||||||
|
s := DeploymentSpec{
|
||||||
|
DBServers: test.fields.DBServers,
|
||||||
|
}
|
||||||
|
|
||||||
|
got := s.GetCoreContainers(test.args.group)
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -128,7 +128,8 @@ func (ds DeploymentStatusMembers) ForServerGroup(cb func(group ServerGroup, list
|
||||||
}
|
}
|
||||||
|
|
||||||
// MemberStatusByPodName returns a reference to the element in the given set of lists that has the given pod name.
|
// MemberStatusByPodName returns a reference to the element in the given set of lists that has the given pod name.
|
||||||
// If no such element exists, nil is returned.
|
// Returns member status and group which the pod belong to.
|
||||||
|
// If no such element exists, false is returned.
|
||||||
func (ds DeploymentStatusMembers) MemberStatusByPodName(podName string) (MemberStatus, ServerGroup, bool) {
|
func (ds DeploymentStatusMembers) MemberStatusByPodName(podName string) (MemberStatus, ServerGroup, bool) {
|
||||||
if result, found := ds.Single.ElementByPodName(podName); found {
|
if result, found := ds.Single.ElementByPodName(podName); found {
|
||||||
return result, ServerGroupSingle, true
|
return result, ServerGroupSingle, true
|
||||||
|
|
|
@ -126,6 +126,9 @@ type ServerGroupSpec struct {
|
||||||
Affinity *core.PodAffinity `json:"affinity,omitempty"`
|
Affinity *core.PodAffinity `json:"affinity,omitempty"`
|
||||||
// NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions
|
// NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions
|
||||||
NodeAffinity *core.NodeAffinity `json:"nodeAffinity,omitempty"`
|
NodeAffinity *core.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||||
|
// SidecarCoreNames is a list of sidecar containers which must run in the pod.
|
||||||
|
// Some names (e.g.: "server", "worker") are reserved, and they don't have any impact.
|
||||||
|
SidecarCoreNames []string `json:"sidecarCoreNames,omitempty"`
|
||||||
// Sidecars specifies a list of additional containers to be started
|
// Sidecars specifies a list of additional containers to be started
|
||||||
Sidecars []core.Container `json:"sidecars,omitempty"`
|
Sidecars []core.Container `json:"sidecars,omitempty"`
|
||||||
// SecurityContext specifies security context for group
|
// SecurityContext specifies security context for group
|
||||||
|
|
|
@ -1976,6 +1976,11 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) {
|
||||||
*out = new(v1.NodeAffinity)
|
*out = new(v1.NodeAffinity)
|
||||||
(*in).DeepCopyInto(*out)
|
(*in).DeepCopyInto(*out)
|
||||||
}
|
}
|
||||||
|
if in.SidecarCoreNames != nil {
|
||||||
|
in, out := &in.SidecarCoreNames, &out.SidecarCoreNames
|
||||||
|
*out = make([]string, len(*in))
|
||||||
|
copy(*out, *in)
|
||||||
|
}
|
||||||
if in.Sidecars != nil {
|
if in.Sidecars != nil {
|
||||||
in, out := &in.Sidecars, &out.Sidecars
|
in, out := &in.Sidecars, &out.Sidecars
|
||||||
*out = make([]v1.Container, len(*in))
|
*out = make([]v1.Container, len(*in))
|
||||||
|
|
|
@ -27,18 +27,18 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces"
|
||||||
|
@ -141,7 +141,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
pod, err := ib.Context.GetCachedStatus().PodReadInterface().Get(ctxChild, podName, metav1.GetOptions{})
|
pod, err := ib.Context.GetCachedStatus().PodReadInterface().Get(ctxChild, podName, metav1.GetOptions{})
|
||||||
if err == nil {
|
if err == nil {
|
||||||
// Pod found
|
// Pod found
|
||||||
if k8sutil.IsPodFailed(pod) {
|
if k8sutil.IsPodFailed(pod, utils.StringList{k8sutil.ServerContainerName}) {
|
||||||
// Wait some time before deleting the pod
|
// Wait some time before deleting the pod
|
||||||
if time.Now().After(pod.GetCreationTimestamp().Add(30 * time.Second)) {
|
if time.Now().After(pod.GetCreationTimestamp().Add(30 * time.Second)) {
|
||||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
//
|
|
||||||
|
|
||||||
package resources
|
package resources
|
||||||
|
|
||||||
|
@ -26,14 +24,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
|
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -55,15 +52,19 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context, cachedStatus insp
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !(k8sutil.IsPodSucceeded(pod) || k8sutil.IsPodFailed(pod) || k8sutil.IsPodTerminating(pod)) {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find member status
|
// Find member status
|
||||||
memberStatus, group, found := status.Members.MemberStatusByPodName(pod.GetName())
|
memberStatus, group, found := status.Members.MemberStatusByPodName(pod.GetName())
|
||||||
if !found {
|
if !found {
|
||||||
log.Debug().Str("pod", pod.GetName()).Msg("no memberstatus found for pod. Performing cleanup")
|
log.Debug().Str("pod", pod.GetName()).Msg("no memberstatus found for pod. Performing cleanup")
|
||||||
} else {
|
} else {
|
||||||
|
spec := r.context.GetSpec()
|
||||||
|
coreContainers := spec.GetCoreContainers(group)
|
||||||
|
if !(k8sutil.IsPodSucceeded(pod, coreContainers) || k8sutil.IsPodFailed(pod, coreContainers) ||
|
||||||
|
k8sutil.IsPodTerminating(pod)) {
|
||||||
|
// The pod is not being terminated or failed or succeeded.
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// Check member termination condition
|
// Check member termination condition
|
||||||
if !memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
if !memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
||||||
if !group.IsStateless() {
|
if !group.IsStateless() {
|
||||||
|
|
|
@ -289,6 +289,10 @@ func (m *MemberArangoDPod) Validate(cachedStatus interfaces.Inspector) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if err := validateSidecars(m.groupSpec.SidecarCoreNames, m.groupSpec.GetSidecars()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -364,6 +368,7 @@ func (m *MemberArangoDPod) GetSidecars(pod *core.Pod) error {
|
||||||
// A sidecar provided by the user
|
// A sidecar provided by the user
|
||||||
sidecars := m.groupSpec.GetSidecars()
|
sidecars := m.groupSpec.GetSidecars()
|
||||||
if len(sidecars) > 0 {
|
if len(sidecars) > 0 {
|
||||||
|
addLifecycleSidecar(m.groupSpec.SidecarCoreNames, sidecars)
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
|
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -634,3 +639,60 @@ func (a *ArangoVersionCheckContainer) GetName() string {
|
||||||
func (a *ArangoVersionCheckContainer) GetProbes() (*core.Probe, *core.Probe, *core.Probe, error) {
|
func (a *ArangoVersionCheckContainer) GetProbes() (*core.Probe, *core.Probe, *core.Probe, error) {
|
||||||
return nil, nil, nil, nil
|
return nil, nil, nil, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// validateSidecars checks if all core names are in the sidecar list.
|
||||||
|
// It returns error when at least one core name is missing.
|
||||||
|
func validateSidecars(coreNames []string, sidecars []core.Container) error {
|
||||||
|
for _, coreName := range coreNames {
|
||||||
|
if api.IsReservedServerGroupContainerName(coreName) {
|
||||||
|
return fmt.Errorf("sidecar core name \"%s\" can not be used because it is reserved", coreName)
|
||||||
|
}
|
||||||
|
|
||||||
|
found := false
|
||||||
|
for _, sidecar := range sidecars {
|
||||||
|
if sidecar.Name == coreName {
|
||||||
|
found = true
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if !found {
|
||||||
|
return fmt.Errorf("sidecar core name \"%s\" does not exist on the sidecars' list", coreName)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
// addLifecycleSidecar adds lifecycle to all core sidecar unless the sidecar contains its own custom lifecycle.
|
||||||
|
func addLifecycleSidecar(coreNames []string, sidecars []core.Container) error {
|
||||||
|
for _, coreName := range coreNames {
|
||||||
|
for i, sidecar := range sidecars {
|
||||||
|
if coreName != sidecar.Name {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if sidecar.Lifecycle != nil && sidecar.Lifecycle.PreStop != nil {
|
||||||
|
// A user provided a custom lifecycle preStop, so break and check next core name container.
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
lifecycle, err := k8sutil.NewLifecycleFinalizers()
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if sidecar.Lifecycle == nil {
|
||||||
|
sidecars[i].Lifecycle = lifecycle
|
||||||
|
} else {
|
||||||
|
// Set only preStop, because user can provide postStart lifecycle.
|
||||||
|
sidecars[i].Lifecycle.PreStop = lifecycle.PreStop
|
||||||
|
}
|
||||||
|
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
|
@ -251,6 +251,7 @@ func (m *MemberSyncPod) GetSidecars(pod *core.Pod) error {
|
||||||
// A sidecar provided by the user
|
// A sidecar provided by the user
|
||||||
sidecars := m.groupSpec.GetSidecars()
|
sidecars := m.groupSpec.GetSidecars()
|
||||||
if len(sidecars) > 0 {
|
if len(sidecars) > 0 {
|
||||||
|
addLifecycleSidecar(m.groupSpec.SidecarCoreNames, sidecars)
|
||||||
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
|
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -361,6 +362,10 @@ func (m *MemberSyncPod) Init(ctx context.Context, cachedStatus interfaces.Inspec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *MemberSyncPod) Validate(_ interfaces.Inspector) error {
|
func (m *MemberSyncPod) Validate(_ interfaces.Inspector) error {
|
||||||
|
if err := validateSidecars(m.groupSpec.SidecarCoreNames, m.groupSpec.GetSidecars()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -52,6 +52,8 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu
|
||||||
log := r.log.With().Str("pod-name", p.GetName()).Logger()
|
log := r.log.With().Str("pod-name", p.GetName()).Logger()
|
||||||
var removalList []string
|
var removalList []string
|
||||||
|
|
||||||
|
// When the main container is terminated, then the whole pod should be terminated,
|
||||||
|
// so sidecar core containers' names should not be checked here.
|
||||||
isServerContainerDead := !k8sutil.IsPodServerContainerRunning(p)
|
isServerContainerDead := !k8sutil.IsPodServerContainerRunning(p)
|
||||||
|
|
||||||
for _, f := range p.ObjectMeta.GetFinalizers() {
|
for _, f := range p.ObjectMeta.GetFinalizers() {
|
||||||
|
|
|
@ -96,9 +96,12 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
spec := r.context.GetSpec()
|
||||||
|
coreContainers := spec.GetCoreContainers(group)
|
||||||
|
|
||||||
// Update state
|
// Update state
|
||||||
updateMemberStatusNeeded := false
|
updateMemberStatusNeeded := false
|
||||||
if k8sutil.IsPodSucceeded(pod) {
|
if k8sutil.IsPodSucceeded(pod, coreContainers) {
|
||||||
// Pod has terminated with exit code 0.
|
// Pod has terminated with exit code 0.
|
||||||
wasTerminated := memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated)
|
wasTerminated := memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated)
|
||||||
if memberStatus.Conditions.Update(api.ConditionTypeTerminated, true, "Pod Succeeded", "") {
|
if memberStatus.Conditions.Update(api.ConditionTypeTerminated, true, "Pod Succeeded", "") {
|
||||||
|
@ -112,7 +115,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
||||||
r.InvalidateSyncStatus()
|
r.InvalidateSyncStatus()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else if k8sutil.IsPodFailed(pod) {
|
} else if k8sutil.IsPodFailed(pod, coreContainers) {
|
||||||
// Pod has terminated with at least 1 container with a non-zero exit code.
|
// Pod has terminated with at least 1 container with a non-zero exit code.
|
||||||
wasTerminated := memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated)
|
wasTerminated := memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated)
|
||||||
if memberStatus.Conditions.Update(api.ConditionTypeTerminated, true, "Pod Failed", "") {
|
if memberStatus.Conditions.Update(api.ConditionTypeTerminated, true, "Pod Failed", "") {
|
||||||
|
@ -121,11 +124,9 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
||||||
switch container {
|
switch container {
|
||||||
case api.ServerGroupReservedInitContainerNameVersionCheck:
|
case api.ServerGroupReservedInitContainerNameVersionCheck:
|
||||||
if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.InitContainerStatuses, container); ok {
|
if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.InitContainerStatuses, container); ok {
|
||||||
if t := c.State.Terminated; t != nil {
|
if t := c.State.Terminated; t != nil && t.ExitCode == 11 {
|
||||||
if t := c.State.Terminated; t != nil && t.ExitCode == 11 {
|
memberStatus.Upgrade = true
|
||||||
memberStatus.Upgrade = true
|
updateMemberStatusNeeded = true
|
||||||
updateMemberStatusNeeded = true
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
case api.ServerGroupReservedInitContainerNameUpgrade:
|
case api.ServerGroupReservedInitContainerNameUpgrade:
|
||||||
|
@ -133,20 +134,18 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
||||||
}
|
}
|
||||||
|
|
||||||
if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.InitContainerStatuses, container); ok {
|
if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.InitContainerStatuses, container); ok {
|
||||||
if t := c.State.Terminated; t != nil {
|
if t := c.State.Terminated; t != nil && t.ExitCode != 0 {
|
||||||
if t := c.State.Terminated; t != nil && t.ExitCode != 0 {
|
log.Warn().Str("member", memberStatus.ID).
|
||||||
log.Warn().Str("member", memberStatus.ID).
|
Str("pod", pod.GetName()).
|
||||||
Str("pod", pod.GetName()).
|
Str("container", container).
|
||||||
Str("container", container).
|
Str("uid", string(pod.GetUID())).
|
||||||
Str("uid", string(pod.GetUID())).
|
Int32("exit-code", t.ExitCode).
|
||||||
Int32("exit-code", t.ExitCode).
|
Str("reason", t.Reason).
|
||||||
Str("reason", t.Reason).
|
Str("message", t.Message).
|
||||||
Str("message", t.Message).
|
Int32("signal", t.Signal).
|
||||||
Int32("signal", t.Signal).
|
Time("started", t.StartedAt.Time).
|
||||||
Time("started", t.StartedAt.Time).
|
Time("finished", t.FinishedAt.Time).
|
||||||
Time("finished", t.FinishedAt.Time).
|
Msgf("Pod failed in unexpected way: Init Container failed")
|
||||||
Msgf("Pod failed in unexpected way: Init Container failed")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -155,20 +154,18 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
||||||
if containers := k8sutil.GetFailedContainerNames(pod.Status.ContainerStatuses); len(containers) > 0 {
|
if containers := k8sutil.GetFailedContainerNames(pod.Status.ContainerStatuses); len(containers) > 0 {
|
||||||
for _, container := range containers {
|
for _, container := range containers {
|
||||||
if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.ContainerStatuses, container); ok {
|
if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.ContainerStatuses, container); ok {
|
||||||
if t := c.State.Terminated; t != nil {
|
if t := c.State.Terminated; t != nil && t.ExitCode != 0 {
|
||||||
if t := c.State.Terminated; t != nil && t.ExitCode != 0 {
|
log.Warn().Str("member", memberStatus.ID).
|
||||||
log.Warn().Str("member", memberStatus.ID).
|
Str("pod", pod.GetName()).
|
||||||
Str("pod", pod.GetName()).
|
Str("container", container).
|
||||||
Str("container", container).
|
Str("uid", string(pod.GetUID())).
|
||||||
Str("uid", string(pod.GetUID())).
|
Int32("exit-code", t.ExitCode).
|
||||||
Int32("exit-code", t.ExitCode).
|
Str("reason", t.Reason).
|
||||||
Str("reason", t.Reason).
|
Str("message", t.Message).
|
||||||
Str("message", t.Message).
|
Int32("signal", t.Signal).
|
||||||
Int32("signal", t.Signal).
|
Time("started", t.StartedAt.Time).
|
||||||
Time("started", t.StartedAt.Time).
|
Time("finished", t.FinishedAt.Time).
|
||||||
Time("finished", t.FinishedAt.Time).
|
Msgf("Pod failed in unexpected way: Core Container failed")
|
||||||
Msgf("Pod failed in unexpected way: Core Container failed")
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -222,7 +219,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
||||||
}
|
}
|
||||||
// End of Topology labels
|
// End of Topology labels
|
||||||
|
|
||||||
if k8sutil.IsContainerReady(pod, k8sutil.ServerContainerName) {
|
if k8sutil.AreContainersReady(pod, coreContainers) {
|
||||||
// Pod is now ready
|
// Pod is now ready
|
||||||
if memberStatus.Conditions.Update(api.ConditionTypeReady, true, "Pod Ready", "") {
|
if memberStatus.Conditions.Update(api.ConditionTypeReady, true, "Pod Ready", "") {
|
||||||
log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Ready & Initialised to true")
|
log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Ready & Initialised to true")
|
||||||
|
|
|
@ -17,9 +17,6 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
// Author Tomasz Mielech
|
|
||||||
//
|
|
||||||
|
|
||||||
package k8sutil
|
package k8sutil
|
||||||
|
|
||||||
|
@ -30,21 +27,17 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
|
core "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces"
|
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/apimachinery/pkg/util/json"
|
"k8s.io/apimachinery/pkg/util/json"
|
||||||
|
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
core "k8s.io/api/core/v1"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -102,9 +95,10 @@ func IsPodReady(pod *core.Pod) bool {
|
||||||
return condition != nil && condition.Status == core.ConditionTrue
|
return condition != nil && condition.Status == core.ConditionTrue
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsContainerReady returns true if the PodReady condition on
|
// AreContainersReady checks whether Pod is considered as ready.
|
||||||
// the given pod is set to true.
|
// Returns true if the PodReady condition on the given pod is set to true,
|
||||||
func IsContainerReady(pod *core.Pod, container string) bool {
|
// or all provided containers' names are running and are not in the list of failed containers.
|
||||||
|
func AreContainersReady(pod *core.Pod, coreContainers utils.StringList) bool {
|
||||||
condition := getPodCondition(&pod.Status, core.PodReady)
|
condition := getPodCondition(&pod.Status, core.PodReady)
|
||||||
if condition == nil {
|
if condition == nil {
|
||||||
return false
|
return false
|
||||||
|
@ -114,21 +108,32 @@ func IsContainerReady(pod *core.Pod, container string) bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if !IsContainerRunning(pod, container) {
|
// Check if all required containers are running.
|
||||||
return false
|
for _, c := range coreContainers {
|
||||||
|
if !IsContainerRunning(pod, c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// From here on all required containers are running, but unready condition must be checked additionally.
|
||||||
switch condition.Reason {
|
switch condition.Reason {
|
||||||
case ServerContainerConditionContainersNotReady:
|
case ServerContainerConditionContainersNotReady:
|
||||||
if strings.HasPrefix(condition.Message, ServerContainerConditionPrefix) {
|
if !strings.HasPrefix(condition.Message, ServerContainerConditionPrefix) {
|
||||||
n := strings.TrimPrefix(condition.Message, ServerContainerConditionPrefix)
|
return false
|
||||||
|
|
||||||
return !strings.Contains(n, container)
|
|
||||||
}
|
}
|
||||||
return false
|
|
||||||
default:
|
unreadyContainers := strings.TrimPrefix(condition.Message, ServerContainerConditionPrefix)
|
||||||
return false
|
for _, c := range coreContainers {
|
||||||
|
if strings.Contains(unreadyContainers, c) {
|
||||||
|
// The container is on the list with unready containers.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// GetPodByName returns pod if it exists among the pods' list
|
// GetPodByName returns pod if it exists among the pods' list
|
||||||
|
@ -163,45 +168,77 @@ func IsContainerRunning(pod *core.Pod, name string) bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPodSucceeded returns true if the arangodb container of the pod
|
// IsPodSucceeded returns true when all core containers are terminated wih a zero exit code,
|
||||||
// has terminated with exit code 0.
|
// or the whole pod has been succeeded.
|
||||||
func IsPodSucceeded(pod *core.Pod) bool {
|
func IsPodSucceeded(pod *core.Pod, coreContainers utils.StringList) bool {
|
||||||
if pod.Status.Phase == core.PodSucceeded {
|
if pod.Status.Phase == core.PodSucceeded {
|
||||||
return true
|
return true
|
||||||
} else {
|
|
||||||
for _, c := range pod.Status.ContainerStatuses {
|
|
||||||
if c.Name != ServerContainerName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
t := c.State.Terminated
|
|
||||||
if t != nil {
|
|
||||||
return t.ExitCode == 0
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
core, succeeded := 0, 0
|
||||||
|
for _, c := range pod.Status.ContainerStatuses {
|
||||||
|
if !coreContainers.Has(c.Name) {
|
||||||
|
// It is not core container, so check next one status.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
core++
|
||||||
|
if t := c.State.Terminated; t != nil && t.ExitCode == 0 {
|
||||||
|
succeeded++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if core > 0 && core == succeeded {
|
||||||
|
// If there are some core containers and all of them succeeded then return that the whole pod succeeded.
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsPodFailed returns true if the arangodb container of the pod
|
// IsPodFailed returns true when one of the core containers is terminated wih a non-zero exit code,
|
||||||
// has terminated wih a non-zero exit code.
|
// or the whole pod has been failed.
|
||||||
func IsPodFailed(pod *core.Pod) bool {
|
func IsPodFailed(pod *core.Pod, coreContainers utils.StringList) bool {
|
||||||
if pod.Status.Phase == core.PodFailed {
|
if pod.Status.Phase == core.PodFailed {
|
||||||
return true
|
return true
|
||||||
} else {
|
}
|
||||||
for _, c := range pod.Status.ContainerStatuses {
|
|
||||||
if c.Name != ServerContainerName {
|
|
||||||
continue
|
|
||||||
}
|
|
||||||
|
|
||||||
t := c.State.Terminated
|
allCore, succeeded, failed := 0, 0, 0
|
||||||
if t != nil {
|
for _, c := range pod.Status.ContainerStatuses {
|
||||||
return t.ExitCode != 0
|
if !coreContainers.Has(c.Name) {
|
||||||
}
|
// It is not core container, so check next one status.
|
||||||
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
allCore++
|
||||||
|
if t := c.State.Terminated; t != nil {
|
||||||
|
// A core container is terminated.
|
||||||
|
if t.ExitCode != 0 {
|
||||||
|
failed++
|
||||||
|
} else {
|
||||||
|
succeeded++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if failed == 0 && succeeded == 0 {
|
||||||
|
// All core containers are not terminated.
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if failed > 0 {
|
||||||
|
// Some (or all) core containers have been terminated.
|
||||||
|
// Some other core containers can be still running or succeeded,
|
||||||
|
// but the whole pod is considered as failed.
|
||||||
|
return true
|
||||||
|
} else if allCore == succeeded {
|
||||||
|
// All core containers are succeeded, so the pod is not failed.
|
||||||
|
// The function `IsPodSucceeded` should recognize it in next iteration.
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Some core containers are succeeded, but not all of them.
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsContainerFailed returns true if the arangodb container
|
// IsContainerFailed returns true if the arangodb container
|
||||||
|
|
|
@ -17,8 +17,6 @@
|
||||||
//
|
//
|
||||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
//
|
//
|
||||||
// Author Ewout Prangsma
|
|
||||||
//
|
|
||||||
|
|
||||||
package k8sutil
|
package k8sutil
|
||||||
|
|
||||||
|
@ -27,6 +25,8 @@ import (
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
v1 "k8s.io/api/core/v1"
|
v1 "k8s.io/api/core/v1"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/backup/utils"
|
||||||
)
|
)
|
||||||
|
|
||||||
// TestIsPodReady tests IsPodReady.
|
// TestIsPodReady tests IsPodReady.
|
||||||
|
@ -56,30 +56,265 @@ func TestIsPodReady(t *testing.T) {
|
||||||
|
|
||||||
// TestIsPodFailed tests IsPodFailed.
|
// TestIsPodFailed tests IsPodFailed.
|
||||||
func TestIsPodFailed(t *testing.T) {
|
func TestIsPodFailed(t *testing.T) {
|
||||||
assert.False(t, IsPodFailed(&v1.Pod{}))
|
type args struct {
|
||||||
assert.False(t, IsPodFailed(&v1.Pod{
|
pod *v1.Pod
|
||||||
Status: v1.PodStatus{
|
coreContainers utils.StringList
|
||||||
Phase: v1.PodRunning,
|
}
|
||||||
|
tests := map[string]struct {
|
||||||
|
args args
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
"empty pod": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}))
|
"pod is running": {
|
||||||
assert.True(t, IsPodFailed(&v1.Pod{
|
args: args{
|
||||||
Status: v1.PodStatus{
|
pod: &v1.Pod{
|
||||||
Phase: v1.PodFailed,
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodRunning,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}))
|
"pod is failed": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodFailed,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
"one core container failed": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"something", "core_container"},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
"one non-core container failed": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "non_core_container",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 1,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"something", "core_container"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"one core container succeeded": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"something", "core_container"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"first core container succeeded and second is still running": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container1",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Running: &v1.ContainerStateRunning{},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "core_container2",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"core_container1", "core_container2"},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
"all containers succeeded": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container1",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "core_container2",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"core_container1", "core_container2"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for testName, test := range tests {
|
||||||
|
t.Run(testName, func(t *testing.T) {
|
||||||
|
got := IsPodFailed(test.args.pod, test.args.coreContainers)
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TestIsPodSucceeded tests IsPodSucceeded.
|
|
||||||
func TestIsPodSucceeded(t *testing.T) {
|
func TestIsPodSucceeded(t *testing.T) {
|
||||||
assert.False(t, IsPodSucceeded(&v1.Pod{}))
|
type args struct {
|
||||||
assert.False(t, IsPodSucceeded(&v1.Pod{
|
pod *v1.Pod
|
||||||
Status: v1.PodStatus{
|
coreContainers utils.StringList
|
||||||
Phase: v1.PodRunning,
|
}
|
||||||
|
tests := map[string]struct {
|
||||||
|
args args
|
||||||
|
want bool
|
||||||
|
}{
|
||||||
|
"empty pod": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{},
|
||||||
|
},
|
||||||
},
|
},
|
||||||
}))
|
"pod is succeeded": {
|
||||||
assert.True(t, IsPodSucceeded(&v1.Pod{
|
args: args{
|
||||||
Status: v1.PodStatus{
|
pod: &v1.Pod{
|
||||||
Phase: v1.PodSucceeded,
|
Status: v1.PodStatus{
|
||||||
|
Phase: v1.PodSucceeded,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
},
|
},
|
||||||
}))
|
"all core containers succeeded": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container1",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "core_container2",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "non-core_container",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"core_container1", "core_container2"},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
"non-core container succeeded": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container1",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "non-core_container",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"core_container1"},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
"the only one core container succeeded": {
|
||||||
|
args: args{
|
||||||
|
pod: &v1.Pod{
|
||||||
|
Status: v1.PodStatus{
|
||||||
|
ContainerStatuses: []v1.ContainerStatus{
|
||||||
|
{
|
||||||
|
Name: "core_container1",
|
||||||
|
State: v1.ContainerState{
|
||||||
|
Terminated: &v1.ContainerStateTerminated{
|
||||||
|
ExitCode: 0,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "non-core_container",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
coreContainers: utils.StringList{"core_container1"},
|
||||||
|
},
|
||||||
|
want: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for testName, test := range tests {
|
||||||
|
t.Run(testName, func(t *testing.T) {
|
||||||
|
got := IsPodSucceeded(test.args.pod, test.args.coreContainers)
|
||||||
|
assert.Equal(t, test.want, got)
|
||||||
|
})
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in a new issue