mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] Add Affinity to GroupSpec (#534)
This commit is contained in:
parent
2b86360df4
commit
bf7c9f9e9a
12 changed files with 1168 additions and 150 deletions
|
@ -1,6 +1,7 @@
|
|||
# Change Log
|
||||
|
||||
## [master](https://github.com/arangodb/kube-arangodb/tree/master) (N/A)
|
||||
- Added Customizable Affinity settings for ArangoDB Member Pods
|
||||
- Added possibility to override default images used by ArangoDeployment
|
||||
- Added possibility to set probes on all groups
|
||||
- Added Image Discovery type in ArangoDeployment spec
|
||||
|
|
|
@ -70,6 +70,12 @@ type ServerGroupSpec struct {
|
|||
VolumeClaimTemplate *v1.PersistentVolumeClaim `json:"volumeClaimTemplate,omitempty"`
|
||||
// VolumeResizeMode specified resize mode for pvc
|
||||
VolumeResizeMode *PVCResizeMode `json:"pvcResizeMode,omitempty"`
|
||||
// AntiAffinity specified additional antiAffinity settings in ArangoDB Pod definitions
|
||||
AntiAffinity *v1.PodAntiAffinity `json:"antiAffinity,omitempty"`
|
||||
// Affinity specified additional affinity settings in ArangoDB Pod definitions
|
||||
Affinity *v1.PodAffinity `json:"affinity,omitempty"`
|
||||
// NodeAffinity specified additional nodeAffinity settings in ArangoDB Pod definitions
|
||||
NodeAffinity *v1.NodeAffinity `json:"nodeAffinity,omitempty"`
|
||||
// Sidecars specifies a list of additional containers to be started
|
||||
Sidecars []v1.Container `json:"sidecars,omitempty"`
|
||||
// SecurityContext specifies security context for group
|
||||
|
|
15
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
15
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
|
@ -965,6 +965,21 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) {
|
|||
*out = new(PVCResizeMode)
|
||||
**out = **in
|
||||
}
|
||||
if in.AntiAffinity != nil {
|
||||
in, out := &in.AntiAffinity, &out.AntiAffinity
|
||||
*out = new(corev1.PodAntiAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Affinity != nil {
|
||||
in, out := &in.Affinity, &out.Affinity
|
||||
*out = new(corev1.PodAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.NodeAffinity != nil {
|
||||
in, out := &in.NodeAffinity, &out.NodeAffinity
|
||||
*out = new(corev1.NodeAffinity)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Sidecars != nil {
|
||||
in, out := &in.Sidecars, &out.Sidecars
|
||||
*out = make([]corev1.Container, len(*in))
|
||||
|
|
691
pkg/deployment/deployment_affinity_test.go
Normal file
691
pkg/deployment/deployment_affinity_test.go
Normal file
|
@ -0,0 +1,691 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func modifyAffinity(name, group string, required bool, role string, mods ...func(a *core.Affinity)) *core.Affinity {
|
||||
affinity := k8sutil.CreateAffinity(name, group,
|
||||
required, role)
|
||||
|
||||
for _, mod := range mods {
|
||||
mod(affinity)
|
||||
}
|
||||
|
||||
return affinity
|
||||
}
|
||||
|
||||
func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
||||
testAffinity := core.PodAffinityTerm{
|
||||
TopologyKey: "myTopologyKey",
|
||||
}
|
||||
|
||||
weight := core.WeightedPodAffinityTerm{
|
||||
Weight: 6,
|
||||
PodAffinityTerm: testAffinity,
|
||||
}
|
||||
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "DBserver POD with antiAffinity required",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
AntiAffinity: &core.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
|
||||
testAffinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with antiAffinity prefered",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
AntiAffinity: &core.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{
|
||||
weight,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with antiAffinity both",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
AntiAffinity: &core.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{
|
||||
weight,
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
|
||||
testAffinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight)
|
||||
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with antiAffinity mixed",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
AntiAffinity: &core.PodAntiAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{
|
||||
weight,
|
||||
weight,
|
||||
weight,
|
||||
weight,
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
|
||||
testAffinity,
|
||||
testAffinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight, weight, weight, weight)
|
||||
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity, testAffinity)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
||||
|
||||
func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
||||
testAffinity := core.PodAffinityTerm{
|
||||
TopologyKey: "myTopologyKey",
|
||||
}
|
||||
|
||||
weight := core.WeightedPodAffinityTerm{
|
||||
Weight: 6,
|
||||
PodAffinityTerm: testAffinity,
|
||||
}
|
||||
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "DBserver POD with affinity required",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Affinity: &core.PodAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
|
||||
testAffinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
if a.PodAffinity == nil {
|
||||
a.PodAffinity = &core.PodAffinity{}
|
||||
}
|
||||
a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with affinity prefered",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Affinity: &core.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{
|
||||
weight,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
if a.PodAffinity == nil {
|
||||
a.PodAffinity = &core.PodAffinity{}
|
||||
}
|
||||
a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with affinity both",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Affinity: &core.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{
|
||||
weight,
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
|
||||
testAffinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
if a.PodAffinity == nil {
|
||||
a.PodAffinity = &core.PodAffinity{}
|
||||
}
|
||||
a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight)
|
||||
a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with affinity mixed",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Affinity: &core.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []core.WeightedPodAffinityTerm{
|
||||
weight,
|
||||
weight,
|
||||
weight,
|
||||
weight,
|
||||
},
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []core.PodAffinityTerm{
|
||||
testAffinity,
|
||||
testAffinity,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
if a.PodAffinity == nil {
|
||||
a.PodAffinity = &core.PodAffinity{}
|
||||
}
|
||||
a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight, weight, weight, weight)
|
||||
a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity, testAffinity)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
||||
|
||||
func TestEnsurePod_ArangoDB_NodeAffinity(t *testing.T) {
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "DBserver POD with nodeAffinity required",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
NodeAffinity: &core.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{
|
||||
{
|
||||
MatchFields: []core.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "key",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, "", func(a *core.Affinity) {
|
||||
n := core.NodeSelectorTerm{
|
||||
MatchFields: []core.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "key",
|
||||
},
|
||||
},
|
||||
}
|
||||
a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, n)
|
||||
}),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
|
@ -23,9 +23,14 @@
|
|||
package deployment
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -70,6 +75,9 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
pods, err := d.deps.KubeCli.CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
if util.BoolOrDefault(testCase.CompareChecksum, true) {
|
||||
compareSpec(t, testCase.ExpectedPod.Spec, pods.Items[0].Spec)
|
||||
}
|
||||
require.Equal(t, testCase.ExpectedPod.Spec, pods.Items[0].Spec)
|
||||
require.Equal(t, testCase.ExpectedPod.ObjectMeta, pods.Items[0].ObjectMeta)
|
||||
|
||||
|
@ -106,3 +114,20 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
}
|
||||
})
|
||||
}
|
||||
|
||||
func compareSpec(t *testing.T, a, b core.PodSpec) {
|
||||
ac, err := k8sutil.GetPodSpecChecksum(a)
|
||||
require.NoError(t, err)
|
||||
|
||||
bc, err := k8sutil.GetPodSpecChecksum(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
aj, err := json.Marshal(a)
|
||||
require.NoError(t, err)
|
||||
|
||||
bj, err := json.Marshal(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, string(aj), string(bj))
|
||||
require.Equal(t, ac, bc)
|
||||
}
|
||||
|
|
|
@ -65,6 +65,7 @@ type testCaseStruct struct {
|
|||
ArangoDeployment *api.ArangoDeployment
|
||||
Helper func(*testing.T, *Deployment, *testCaseStruct)
|
||||
config Config
|
||||
CompareChecksum *bool
|
||||
ExpectedError error
|
||||
ExpectedEvent string
|
||||
ExpectedPod core.Pod
|
||||
|
|
|
@ -29,8 +29,10 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
|
@ -41,9 +43,12 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
var _ k8sutil.PodCreator = &ImageUpdatePod{}
|
||||
|
||||
type ImageUpdatePod struct {
|
||||
spec api.DeploymentSpec
|
||||
image string
|
||||
spec api.DeploymentSpec
|
||||
apiObject k8sutil.APIObject
|
||||
image string
|
||||
}
|
||||
|
||||
type ArangoDImageUpdateContainer struct {
|
||||
|
@ -191,8 +196,9 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, ima
|
|||
}
|
||||
|
||||
imagePod := ImageUpdatePod{
|
||||
spec: ib.Spec,
|
||||
image: image,
|
||||
spec: ib.Spec,
|
||||
image: image,
|
||||
apiObject: ib.APIObject,
|
||||
}
|
||||
|
||||
pod, err := resources.RenderArangoPod(ib.APIObject, role, id, podName, args, &imagePod)
|
||||
|
@ -213,14 +219,14 @@ func (a *ArangoDImageUpdateContainer) GetExecutor() string {
|
|||
return resources.ArangoDExecutor
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetProbes() (*v1.Probe, *v1.Probe, error) {
|
||||
func (a *ArangoDImageUpdateContainer) GetProbes() (*core.Probe, *core.Probe, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetResourceRequirements() v1.ResourceRequirements {
|
||||
return v1.ResourceRequirements{
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
func (a *ArangoDImageUpdateContainer) GetResourceRequirements() core.ResourceRequirements {
|
||||
return core.ResourceRequirements{
|
||||
Limits: make(core.ResourceList),
|
||||
Requests: make(core.ResourceList),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -228,8 +234,8 @@ func (a *ArangoDImageUpdateContainer) GetImage() string {
|
|||
return a.image
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetEnvs() []v1.EnvVar {
|
||||
env := make([]v1.EnvVar, 0)
|
||||
func (a *ArangoDImageUpdateContainer) GetEnvs() []core.EnvVar {
|
||||
env := make([]core.EnvVar, 0)
|
||||
|
||||
if a.spec.License.HasSecretName() {
|
||||
env = append(env, k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey,
|
||||
|
@ -243,15 +249,23 @@ func (a *ArangoDImageUpdateContainer) GetEnvs() []v1.EnvVar {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetLifecycle() (*v1.Lifecycle, error) {
|
||||
func (a *ArangoDImageUpdateContainer) GetLifecycle() (*core.Lifecycle, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetImagePullPolicy() v1.PullPolicy {
|
||||
func (a *ArangoDImageUpdateContainer) GetImagePullPolicy() core.PullPolicy {
|
||||
return a.spec.GetImagePullPolicy()
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) Init(pod *v1.Pod) {
|
||||
func (i *ImageUpdatePod) GetName() string {
|
||||
return i.apiObject.GetName()
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetRole() string {
|
||||
return "id"
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) Init(pod *core.Pod) {
|
||||
terminationGracePeriodSeconds := int64((time.Second * 30).Seconds())
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
}
|
||||
|
@ -271,9 +285,9 @@ func (i *ImageUpdatePod) GetAffinityRole() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetVolumes() ([]v1.Volume, []v1.VolumeMount) {
|
||||
var volumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
func (i *ImageUpdatePod) GetVolumes() ([]core.Volume, []core.VolumeMount) {
|
||||
var volumes []core.Volume
|
||||
var volumeMounts []core.VolumeMount
|
||||
|
||||
volumes = append(volumes, k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName))
|
||||
volumeMounts = append(volumeMounts, k8sutil.ArangodVolumeMount())
|
||||
|
@ -281,10 +295,10 @@ func (i *ImageUpdatePod) GetVolumes() ([]v1.Volume, []v1.VolumeMount) {
|
|||
return volumes, volumeMounts
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetSidecars(*v1.Pod) {
|
||||
func (i *ImageUpdatePod) GetSidecars(*core.Pod) {
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetInitContainers() ([]v1.Container, error) {
|
||||
func (i *ImageUpdatePod) GetInitContainers() ([]core.Container, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -292,14 +306,14 @@ func (i *ImageUpdatePod) GetFinalizers() []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetTolerations() []v1.Toleration {
|
||||
func (i *ImageUpdatePod) GetTolerations() []core.Toleration {
|
||||
|
||||
shortDur := k8sutil.TolerationDuration{
|
||||
Forever: false,
|
||||
TimeSpan: time.Second * 5,
|
||||
}
|
||||
|
||||
tolerations := make([]v1.Toleration, 0, 2)
|
||||
tolerations := make([]core.Toleration, 0, 2)
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations,
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeNotReady, shortDur))
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations,
|
||||
|
@ -322,8 +336,28 @@ func (i *ImageUpdatePod) GetServiceAccountName() string {
|
|||
return ""
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetSecurityContext() *v1.SecurityContext {
|
||||
func (a *ArangoDImageUpdateContainer) GetSecurityContext() *core.SecurityContext {
|
||||
// Default security context
|
||||
var v api.ServerGroupSpecSecurityContext
|
||||
return v.NewSecurityContext()
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetPodAntiAffinity() *core.PodAntiAffinity {
|
||||
a := core.PodAntiAffinity{}
|
||||
|
||||
pod.AppendPodAntiAffinityDefault(i, &a)
|
||||
|
||||
return pod.ReturnPodAntiAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetPodAffinity() *core.PodAffinity {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetNodeAffinity() *core.NodeAffinity {
|
||||
a := core.NodeAffinity{}
|
||||
|
||||
pod.AppendNodeSelector(&a)
|
||||
|
||||
return pod.ReturnNodeAffinityOrNil(a)
|
||||
}
|
||||
|
|
165
pkg/deployment/pod/affinity.go
Normal file
165
pkg/deployment/pod/affinity.go
Normal file
|
@ -0,0 +1,165 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package pod
|
||||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func AppendPodAntiAffinityDefault(p k8sutil.PodCreator, a *core.PodAntiAffinity) {
|
||||
labels := k8sutil.LabelsForDeployment(p.GetName(), p.GetRole())
|
||||
labelSelector := &meta.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
}
|
||||
|
||||
if !p.IsDeploymentMode() {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution = append(a.RequiredDuringSchedulingIgnoredDuringExecution, core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: k8sutil.TopologyKeyHostname,
|
||||
})
|
||||
} else {
|
||||
a.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PreferredDuringSchedulingIgnoredDuringExecution, core.WeightedPodAffinityTerm{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: k8sutil.TopologyKeyHostname,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func AppendNodeSelector(a *core.NodeAffinity) {
|
||||
if a.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution = &core.NodeSelector{}
|
||||
}
|
||||
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(a.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, core.NodeSelectorTerm{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{
|
||||
{
|
||||
Key: "beta.kubernetes.io/arch",
|
||||
Operator: "In",
|
||||
Values: []string{"amd64"},
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
func AppendAffinityWithRole(p k8sutil.PodCreator, a *core.PodAffinity, role string) {
|
||||
labelSelector := &meta.LabelSelector{
|
||||
MatchLabels: k8sutil.LabelsForDeployment(p.GetName(), role),
|
||||
}
|
||||
if !p.IsDeploymentMode() {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution = append(a.RequiredDuringSchedulingIgnoredDuringExecution, core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: k8sutil.TopologyKeyHostname,
|
||||
})
|
||||
} else {
|
||||
a.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PreferredDuringSchedulingIgnoredDuringExecution, core.WeightedPodAffinityTerm{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: k8sutil.TopologyKeyHostname,
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func MergePodAntiAffinity(a, b *core.PodAntiAffinity) {
|
||||
if a == nil || b == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, rule := range b.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
a.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PreferredDuringSchedulingIgnoredDuringExecution, rule)
|
||||
}
|
||||
|
||||
for _, rule := range b.RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution = append(a.RequiredDuringSchedulingIgnoredDuringExecution, rule)
|
||||
}
|
||||
}
|
||||
|
||||
func MergePodAffinity(a, b *core.PodAffinity) {
|
||||
if a == nil || b == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, rule := range b.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
a.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PreferredDuringSchedulingIgnoredDuringExecution, rule)
|
||||
}
|
||||
|
||||
for _, rule := range b.RequiredDuringSchedulingIgnoredDuringExecution {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution = append(a.RequiredDuringSchedulingIgnoredDuringExecution, rule)
|
||||
}
|
||||
}
|
||||
|
||||
func MergeNodeAffinity(a, b *core.NodeAffinity) {
|
||||
if a == nil || b == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, rule := range b.PreferredDuringSchedulingIgnoredDuringExecution {
|
||||
a.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PreferredDuringSchedulingIgnoredDuringExecution, rule)
|
||||
}
|
||||
|
||||
if b.RequiredDuringSchedulingIgnoredDuringExecution != nil {
|
||||
if a.RequiredDuringSchedulingIgnoredDuringExecution == nil {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution = b.RequiredDuringSchedulingIgnoredDuringExecution.DeepCopy()
|
||||
} else {
|
||||
for _, rule := range b.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
|
||||
a.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms = append(a.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms, rule)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func ReturnPodAffinityOrNil(a core.PodAffinity) *core.PodAffinity {
|
||||
if len(a.RequiredDuringSchedulingIgnoredDuringExecution) > 0 || len(a.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
|
||||
return &a
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReturnPodAntiAffinityOrNil(a core.PodAntiAffinity) *core.PodAntiAffinity {
|
||||
if len(a.RequiredDuringSchedulingIgnoredDuringExecution) > 0 || len(a.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
|
||||
return &a
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func ReturnNodeAffinityOrNil(a core.NodeAffinity) *core.NodeAffinity {
|
||||
if len(a.PreferredDuringSchedulingIgnoredDuringExecution) > 0 {
|
||||
return &a
|
||||
}
|
||||
|
||||
if s := a.RequiredDuringSchedulingIgnoredDuringExecution; s != nil {
|
||||
if len(s.NodeSelectorTerms) > 0 {
|
||||
return &a
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -661,9 +661,12 @@ func RenderArangoPod(deployment k8sutil.APIObject, role, id, podName string,
|
|||
p.Spec.Containers = append(p.Spec.Containers, c)
|
||||
podCreator.GetSidecars(&p)
|
||||
|
||||
// Add (anti-)affinity
|
||||
p.Spec.Affinity = k8sutil.CreateAffinity(deployment.GetName(), role, !podCreator.IsDeploymentMode(),
|
||||
podCreator.GetAffinityRole())
|
||||
// Add affinity
|
||||
p.Spec.Affinity = &core.Affinity{
|
||||
NodeAffinity: podCreator.GetNodeAffinity(),
|
||||
PodAntiAffinity: podCreator.GetPodAntiAffinity(),
|
||||
PodAffinity: podCreator.GetPodAffinity(),
|
||||
}
|
||||
|
||||
return &p, nil
|
||||
}
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
@ -40,6 +42,8 @@ const (
|
|||
ArangoDBOverrideDetectedTotalMemoryEnv = "ARANGODB_OVERRIDE_DETECTED_TOTAL_MEMORY"
|
||||
)
|
||||
|
||||
var _ k8sutil.PodCreator = &MemberArangoDPod{}
|
||||
|
||||
type MemberArangoDPod struct {
|
||||
status api.MemberStatus
|
||||
tlsKeyfileSecretName string
|
||||
|
@ -161,12 +165,44 @@ func (m *MemberArangoDPod) Init(pod *core.Pod) {
|
|||
pod.Spec.PriorityClassName = m.groupSpec.PriorityClassName
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetName() string {
|
||||
return m.resources.context.GetAPIObject().GetName()
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetRole() string {
|
||||
return m.group.AsRole()
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetImagePullSecrets() []string {
|
||||
return m.spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetAffinityRole() string {
|
||||
return ""
|
||||
func (m *MemberArangoDPod) GetPodAntiAffinity() *core.PodAntiAffinity {
|
||||
a := core.PodAntiAffinity{}
|
||||
|
||||
pod.AppendPodAntiAffinityDefault(m, &a)
|
||||
|
||||
pod.MergePodAntiAffinity(&a, m.groupSpec.AntiAffinity)
|
||||
|
||||
return pod.ReturnPodAntiAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetPodAffinity() *core.PodAffinity {
|
||||
a := core.PodAffinity{}
|
||||
|
||||
pod.MergePodAffinity(&a, m.groupSpec.Affinity)
|
||||
|
||||
return pod.ReturnPodAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetNodeAffinity() *core.NodeAffinity {
|
||||
a := core.NodeAffinity{}
|
||||
|
||||
pod.AppendNodeSelector(&a)
|
||||
|
||||
pod.MergeNodeAffinity(&a, m.groupSpec.NodeAffinity)
|
||||
|
||||
return pod.ReturnNodeAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetNodeSelector() map[string]string {
|
||||
|
|
|
@ -25,11 +25,13 @@ package resources
|
|||
import (
|
||||
"math"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -44,6 +46,8 @@ type ArangoSyncContainer struct {
|
|||
imageInfo api.ImageInfo
|
||||
}
|
||||
|
||||
var _ k8sutil.PodCreator = &MemberSyncPod{}
|
||||
|
||||
type MemberSyncPod struct {
|
||||
tlsKeyfileSecretName string
|
||||
clientAuthCASecretName string
|
||||
|
@ -60,12 +64,12 @@ func (a *ArangoSyncContainer) GetExecutor() string {
|
|||
return ArangoSyncExecutor
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetSecurityContext() *v1.SecurityContext {
|
||||
func (a *ArangoSyncContainer) GetSecurityContext() *core.SecurityContext {
|
||||
return a.groupSpec.SecurityContext.NewSecurityContext()
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetProbes() (*v1.Probe, *v1.Probe, error) {
|
||||
var liveness, readiness *v1.Probe
|
||||
func (a *ArangoSyncContainer) GetProbes() (*core.Probe, *core.Probe, error) {
|
||||
var liveness, readiness *core.Probe
|
||||
|
||||
probeLivenessConfig, err := a.resources.getLivenessProbe(a.spec, a.group, a.imageInfo.ArangoDBVersion)
|
||||
if err != nil {
|
||||
|
@ -88,18 +92,18 @@ func (a *ArangoSyncContainer) GetProbes() (*v1.Probe, *v1.Probe, error) {
|
|||
return liveness, readiness, nil
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetResourceRequirements() v1.ResourceRequirements {
|
||||
func (a *ArangoSyncContainer) GetResourceRequirements() core.ResourceRequirements {
|
||||
return k8sutil.ExtractPodResourceRequirement(a.groupSpec.Resources)
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetLifecycle() (*v1.Lifecycle, error) {
|
||||
func (a *ArangoSyncContainer) GetLifecycle() (*core.Lifecycle, error) {
|
||||
if a.resources.context.GetLifecycleImage() != "" {
|
||||
return k8sutil.NewLifecycle()
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetImagePullPolicy() v1.PullPolicy {
|
||||
func (a *ArangoSyncContainer) GetImagePullPolicy() core.PullPolicy {
|
||||
return a.spec.GetImagePullPolicy()
|
||||
}
|
||||
|
||||
|
@ -107,8 +111,8 @@ func (a *ArangoSyncContainer) GetImage() string {
|
|||
return a.imageInfo.Image
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetEnvs() []v1.EnvVar {
|
||||
envs := make([]v1.EnvVar, 0)
|
||||
func (a *ArangoSyncContainer) GetEnvs() []core.EnvVar {
|
||||
envs := make([]core.EnvVar, 0)
|
||||
|
||||
if a.spec.Sync.Monitoring.GetTokenSecretName() != "" {
|
||||
env := k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
|
||||
|
@ -135,17 +139,50 @@ func (a *ArangoSyncContainer) GetEnvs() []v1.EnvVar {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetAffinityRole() string {
|
||||
if m.group == api.ServerGroupSyncWorkers {
|
||||
return api.ServerGroupDBServers.AsRole()
|
||||
}
|
||||
return ""
|
||||
func (m *MemberSyncPod) GetName() string {
|
||||
return m.resources.context.GetAPIObject().GetName()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetRole() string {
|
||||
return m.group.AsRole()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetImagePullSecrets() []string {
|
||||
return m.spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetPodAntiAffinity() *core.PodAntiAffinity {
|
||||
a := core.PodAntiAffinity{}
|
||||
|
||||
pod.AppendPodAntiAffinityDefault(m, &a)
|
||||
|
||||
pod.MergePodAntiAffinity(&a, m.groupSpec.AntiAffinity)
|
||||
|
||||
return pod.ReturnPodAntiAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetPodAffinity() *core.PodAffinity {
|
||||
a := core.PodAffinity{}
|
||||
|
||||
if m.group == api.ServerGroupSyncWorkers {
|
||||
pod.AppendAffinityWithRole(m, &a, api.ServerGroupDBServers.AsRole())
|
||||
}
|
||||
|
||||
pod.MergePodAffinity(&a, m.groupSpec.Affinity)
|
||||
|
||||
return pod.ReturnPodAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetNodeAffinity() *core.NodeAffinity {
|
||||
a := core.NodeAffinity{}
|
||||
|
||||
pod.AppendNodeSelector(&a)
|
||||
|
||||
pod.MergeNodeAffinity(&a, m.groupSpec.NodeAffinity)
|
||||
|
||||
return pod.ReturnNodeAffinityOrNil(a)
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetNodeSelector() map[string]string {
|
||||
return m.groupSpec.GetNodeSelector()
|
||||
}
|
||||
|
@ -154,7 +191,7 @@ func (m *MemberSyncPod) GetServiceAccountName() string {
|
|||
return m.groupSpec.GetServiceAccountName()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetSidecars(pod *v1.Pod) {
|
||||
func (m *MemberSyncPod) GetSidecars(pod *core.Pod) {
|
||||
// A sidecar provided by the user
|
||||
sidecars := m.groupSpec.GetSidecars()
|
||||
if len(sidecars) > 0 {
|
||||
|
@ -162,9 +199,9 @@ func (m *MemberSyncPod) GetSidecars(pod *v1.Pod) {
|
|||
}
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetVolumes() ([]v1.Volume, []v1.VolumeMount) {
|
||||
var volumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
func (m *MemberSyncPod) GetVolumes() ([]core.Volume, []core.VolumeMount) {
|
||||
var volumes []core.Volume
|
||||
var volumeMounts []core.VolumeMount
|
||||
|
||||
if m.resources.context.GetLifecycleImage() != "" {
|
||||
volumes = append(volumes, k8sutil.LifecycleVolume())
|
||||
|
@ -205,8 +242,8 @@ func (m *MemberSyncPod) IsDeploymentMode() bool {
|
|||
return m.spec.IsDevelopment()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetInitContainers() ([]v1.Container, error) {
|
||||
var initContainers []v1.Container
|
||||
func (m *MemberSyncPod) GetInitContainers() ([]core.Container, error) {
|
||||
var initContainers []core.Container
|
||||
|
||||
lifecycleImage := m.resources.context.GetLifecycleImage()
|
||||
if lifecycleImage != "" {
|
||||
|
@ -225,7 +262,7 @@ func (m *MemberSyncPod) GetFinalizers() []string {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetTolerations() []v1.Toleration {
|
||||
func (m *MemberSyncPod) GetTolerations() []core.Toleration {
|
||||
return m.resources.CreatePodTolerations(m.group, m.groupSpec)
|
||||
}
|
||||
|
||||
|
@ -239,7 +276,7 @@ func (m *MemberSyncPod) GetContainerCreator() k8sutil.ContainerCreator {
|
|||
}
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) Init(pod *v1.Pod) {
|
||||
func (m *MemberSyncPod) Init(pod *core.Pod) {
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.group.DefaultTerminationGracePeriod().Seconds()))
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
pod.Spec.PriorityClassName = m.groupSpec.PriorityClassName
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
@ -59,15 +59,19 @@ const (
|
|||
)
|
||||
|
||||
type PodCreator interface {
|
||||
Init(*v1.Pod)
|
||||
GetVolumes() ([]v1.Volume, []v1.VolumeMount)
|
||||
GetSidecars(*v1.Pod)
|
||||
GetInitContainers() ([]v1.Container, error)
|
||||
Init(*core.Pod)
|
||||
GetName() string
|
||||
GetRole() string
|
||||
GetVolumes() ([]core.Volume, []core.VolumeMount)
|
||||
GetSidecars(*core.Pod)
|
||||
GetInitContainers() ([]core.Container, error)
|
||||
GetFinalizers() []string
|
||||
GetTolerations() []v1.Toleration
|
||||
GetTolerations() []core.Toleration
|
||||
GetNodeSelector() map[string]string
|
||||
GetServiceAccountName() string
|
||||
GetAffinityRole() string
|
||||
GetPodAntiAffinity() *core.PodAntiAffinity
|
||||
GetPodAffinity() *core.PodAffinity
|
||||
GetNodeAffinity() *core.NodeAffinity
|
||||
GetContainerCreator() ContainerCreator
|
||||
GetImagePullSecrets() []string
|
||||
IsDeploymentMode() bool
|
||||
|
@ -75,37 +79,37 @@ type PodCreator interface {
|
|||
|
||||
type ContainerCreator interface {
|
||||
GetExecutor() string
|
||||
GetProbes() (*v1.Probe, *v1.Probe, error)
|
||||
GetResourceRequirements() v1.ResourceRequirements
|
||||
GetLifecycle() (*v1.Lifecycle, error)
|
||||
GetImagePullPolicy() v1.PullPolicy
|
||||
GetProbes() (*core.Probe, *core.Probe, error)
|
||||
GetResourceRequirements() core.ResourceRequirements
|
||||
GetLifecycle() (*core.Lifecycle, error)
|
||||
GetImagePullPolicy() core.PullPolicy
|
||||
GetImage() string
|
||||
GetEnvs() []v1.EnvVar
|
||||
GetSecurityContext() *v1.SecurityContext
|
||||
GetEnvs() []core.EnvVar
|
||||
GetSecurityContext() *core.SecurityContext
|
||||
}
|
||||
|
||||
// IsPodReady returns true if the PodReady condition on
|
||||
// the given pod is set to true.
|
||||
func IsPodReady(pod *v1.Pod) bool {
|
||||
condition := getPodCondition(&pod.Status, v1.PodReady)
|
||||
return condition != nil && condition.Status == v1.ConditionTrue
|
||||
func IsPodReady(pod *core.Pod) bool {
|
||||
condition := getPodCondition(&pod.Status, core.PodReady)
|
||||
return condition != nil && condition.Status == core.ConditionTrue
|
||||
}
|
||||
|
||||
// GetPodByName returns pod if it exists among the pods' list
|
||||
// Returns false if not found.
|
||||
func GetPodByName(pods []v1.Pod, podName string) (v1.Pod, bool) {
|
||||
func GetPodByName(pods []core.Pod, podName string) (core.Pod, bool) {
|
||||
for _, pod := range pods {
|
||||
if pod.GetName() == podName {
|
||||
return pod, true
|
||||
}
|
||||
}
|
||||
return v1.Pod{}, false
|
||||
return core.Pod{}, false
|
||||
}
|
||||
|
||||
// IsPodSucceeded returns true if the arangodb container of the pod
|
||||
// has terminated with exit code 0.
|
||||
func IsPodSucceeded(pod *v1.Pod) bool {
|
||||
if pod.Status.Phase == v1.PodSucceeded {
|
||||
func IsPodSucceeded(pod *core.Pod) bool {
|
||||
if pod.Status.Phase == core.PodSucceeded {
|
||||
return true
|
||||
} else {
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
|
@ -124,8 +128,8 @@ func IsPodSucceeded(pod *v1.Pod) bool {
|
|||
|
||||
// IsPodFailed returns true if the arangodb container of the pod
|
||||
// has terminated wih a non-zero exit code.
|
||||
func IsPodFailed(pod *v1.Pod) bool {
|
||||
if pod.Status.Phase == v1.PodFailed {
|
||||
func IsPodFailed(pod *core.Pod) bool {
|
||||
if pod.Status.Phase == core.PodFailed {
|
||||
return true
|
||||
} else {
|
||||
for _, c := range pod.Status.ContainerStatuses {
|
||||
|
@ -144,40 +148,40 @@ func IsPodFailed(pod *v1.Pod) bool {
|
|||
}
|
||||
|
||||
// IsPodScheduled returns true if the pod has been scheduled.
|
||||
func IsPodScheduled(pod *v1.Pod) bool {
|
||||
condition := getPodCondition(&pod.Status, v1.PodScheduled)
|
||||
return condition != nil && condition.Status == v1.ConditionTrue
|
||||
func IsPodScheduled(pod *core.Pod) bool {
|
||||
condition := getPodCondition(&pod.Status, core.PodScheduled)
|
||||
return condition != nil && condition.Status == core.ConditionTrue
|
||||
}
|
||||
|
||||
// IsPodNotScheduledFor returns true if the pod has not been scheduled
|
||||
// for longer than the given duration.
|
||||
func IsPodNotScheduledFor(pod *v1.Pod, timeout time.Duration) bool {
|
||||
condition := getPodCondition(&pod.Status, v1.PodScheduled)
|
||||
func IsPodNotScheduledFor(pod *core.Pod, timeout time.Duration) bool {
|
||||
condition := getPodCondition(&pod.Status, core.PodScheduled)
|
||||
return condition != nil &&
|
||||
condition.Status == v1.ConditionFalse &&
|
||||
condition.Status == core.ConditionFalse &&
|
||||
condition.LastTransitionTime.Time.Add(timeout).Before(time.Now())
|
||||
}
|
||||
|
||||
// IsPodMarkedForDeletion returns true if the pod has been marked for deletion.
|
||||
func IsPodMarkedForDeletion(pod *v1.Pod) bool {
|
||||
func IsPodMarkedForDeletion(pod *core.Pod) bool {
|
||||
return pod.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
// IsPodTerminating returns true if the pod has been marked for deletion
|
||||
// but is still running.
|
||||
func IsPodTerminating(pod *v1.Pod) bool {
|
||||
return IsPodMarkedForDeletion(pod) && pod.Status.Phase == v1.PodRunning
|
||||
func IsPodTerminating(pod *core.Pod) bool {
|
||||
return IsPodMarkedForDeletion(pod) && pod.Status.Phase == core.PodRunning
|
||||
}
|
||||
|
||||
// IsArangoDBImageIDAndVersionPod returns true if the given pod is used for fetching image ID and ArangoDB version of an image
|
||||
func IsArangoDBImageIDAndVersionPod(p v1.Pod) bool {
|
||||
func IsArangoDBImageIDAndVersionPod(p core.Pod) bool {
|
||||
role, found := p.GetLabels()[LabelKeyRole]
|
||||
return found && role == ImageIDAndVersionRole
|
||||
}
|
||||
|
||||
// getPodCondition returns the condition of given type in the given status.
|
||||
// If not found, nil is returned.
|
||||
func getPodCondition(status *v1.PodStatus, condType v1.PodConditionType) *v1.PodCondition {
|
||||
func getPodCondition(status *core.PodStatus, condType core.PodConditionType) *core.PodCondition {
|
||||
for i := range status.Conditions {
|
||||
if status.Conditions[i].Type == condType {
|
||||
return &status.Conditions[i]
|
||||
|
@ -208,62 +212,62 @@ func CreateTLSKeyfileSecretName(deploymentName, role, id string) string {
|
|||
}
|
||||
|
||||
// ArangodVolumeMount creates a volume mount structure for arangod.
|
||||
func ArangodVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func ArangodVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: ArangodVolumeName,
|
||||
MountPath: ArangodVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// TlsKeyfileVolumeMount creates a volume mount structure for a TLS keyfile.
|
||||
func TlsKeyfileVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func TlsKeyfileVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: TlsKeyfileVolumeName,
|
||||
MountPath: TLSKeyfileVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// ClientAuthCACertificateVolumeMount creates a volume mount structure for a client-auth CA certificate (ca.crt).
|
||||
func ClientAuthCACertificateVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func ClientAuthCACertificateVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: ClientAuthCAVolumeName,
|
||||
MountPath: ClientAuthCAVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// MasterJWTVolumeMount creates a volume mount structure for a master JWT secret (token).
|
||||
func MasterJWTVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func MasterJWTVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: MasterJWTSecretVolumeName,
|
||||
MountPath: MasterJWTSecretVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// ClusterJWTVolumeMount creates a volume mount structure for a cluster JWT secret (token).
|
||||
func ClusterJWTVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func ClusterJWTVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: ClusterJWTSecretVolumeName,
|
||||
MountPath: ClusterJWTSecretVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
func ExporterJWTVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func ExporterJWTVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: ExporterJWTVolumeName,
|
||||
MountPath: ExporterJWTVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// RocksdbEncryptionVolumeMount creates a volume mount structure for a RocksDB encryption key.
|
||||
func RocksdbEncryptionVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
func RocksdbEncryptionVolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount{
|
||||
Name: RocksdbEncryptionVolumeName,
|
||||
MountPath: RocksDBEncryptionVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// ArangodInitContainer creates a container configured to initalize a UUID file.
|
||||
func ArangodInitContainer(name, id, engine, alpineImage string, requireUUID bool, securityContext *v1.SecurityContext) v1.Container {
|
||||
func ArangodInitContainer(name, id, engine, alpineImage string, requireUUID bool, securityContext *core.SecurityContext) core.Container {
|
||||
uuidFile := filepath.Join(ArangodVolumeMountDir, "UUID")
|
||||
engineFile := filepath.Join(ArangodVolumeMountDir, "ENGINE")
|
||||
var command string
|
||||
|
@ -280,7 +284,7 @@ func ArangodInitContainer(name, id, engine, alpineImage string, requireUUID bool
|
|||
} else {
|
||||
command = fmt.Sprintf("test -f %s || echo '%s' > %s", uuidFile, id, uuidFile)
|
||||
}
|
||||
c := v1.Container{
|
||||
c := core.Container{
|
||||
Name: name,
|
||||
Image: alpineImage,
|
||||
Command: []string{
|
||||
|
@ -288,17 +292,17 @@ func ArangodInitContainer(name, id, engine, alpineImage string, requireUUID bool
|
|||
"-c",
|
||||
command,
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Requests: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("10Mi"),
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceCPU: resource.MustParse("100m"),
|
||||
core.ResourceMemory: resource.MustParse("10Mi"),
|
||||
},
|
||||
Limits: v1.ResourceList{
|
||||
v1.ResourceCPU: resource.MustParse("100m"),
|
||||
v1.ResourceMemory: resource.MustParse("50Mi"),
|
||||
Limits: core.ResourceList{
|
||||
core.ResourceCPU: resource.MustParse("100m"),
|
||||
core.ResourceMemory: resource.MustParse("50Mi"),
|
||||
},
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
ArangodVolumeMount(),
|
||||
},
|
||||
SecurityContext: securityContext,
|
||||
|
@ -307,47 +311,47 @@ func ArangodInitContainer(name, id, engine, alpineImage string, requireUUID bool
|
|||
}
|
||||
|
||||
// ExtractPodResourceRequirement filters resource requirements for Pods.
|
||||
func ExtractPodResourceRequirement(resources v1.ResourceRequirements) v1.ResourceRequirements {
|
||||
func ExtractPodResourceRequirement(resources core.ResourceRequirements) core.ResourceRequirements {
|
||||
|
||||
filterStorage := func(list v1.ResourceList) v1.ResourceList {
|
||||
newlist := make(v1.ResourceList)
|
||||
if q, ok := list[v1.ResourceCPU]; ok {
|
||||
newlist[v1.ResourceCPU] = q
|
||||
filterStorage := func(list core.ResourceList) core.ResourceList {
|
||||
newlist := make(core.ResourceList)
|
||||
if q, ok := list[core.ResourceCPU]; ok {
|
||||
newlist[core.ResourceCPU] = q
|
||||
}
|
||||
if q, ok := list[v1.ResourceMemory]; ok {
|
||||
newlist[v1.ResourceMemory] = q
|
||||
if q, ok := list[core.ResourceMemory]; ok {
|
||||
newlist[core.ResourceMemory] = q
|
||||
}
|
||||
return newlist
|
||||
}
|
||||
|
||||
return v1.ResourceRequirements{
|
||||
return core.ResourceRequirements{
|
||||
Limits: filterStorage(resources.Limits),
|
||||
Requests: filterStorage(resources.Requests),
|
||||
}
|
||||
}
|
||||
|
||||
// NewContainer creates a container for specified creator
|
||||
func NewContainer(args []string, containerCreator ContainerCreator) (v1.Container, error) {
|
||||
func NewContainer(args []string, containerCreator ContainerCreator) (core.Container, error) {
|
||||
|
||||
liveness, readiness, err := containerCreator.GetProbes()
|
||||
if err != nil {
|
||||
return v1.Container{}, err
|
||||
return core.Container{}, err
|
||||
}
|
||||
|
||||
lifecycle, err := containerCreator.GetLifecycle()
|
||||
if err != nil {
|
||||
return v1.Container{}, err
|
||||
return core.Container{}, err
|
||||
}
|
||||
|
||||
return v1.Container{
|
||||
return core.Container{
|
||||
Name: ServerContainerName,
|
||||
Image: containerCreator.GetImage(),
|
||||
Command: append([]string{containerCreator.GetExecutor()}, args...),
|
||||
Ports: []v1.ContainerPort{
|
||||
Ports: []core.ContainerPort{
|
||||
{
|
||||
Name: "server",
|
||||
ContainerPort: int32(ArangoPort),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: core.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Env: containerCreator.GetEnvs(),
|
||||
|
@ -361,19 +365,19 @@ func NewContainer(args []string, containerCreator ContainerCreator) (v1.Containe
|
|||
}
|
||||
|
||||
// NewPod creates a basic Pod for given settings.
|
||||
func NewPod(deploymentName, role, id, podName string, podCreator PodCreator) v1.Pod {
|
||||
func NewPod(deploymentName, role, id, podName string, podCreator PodCreator) core.Pod {
|
||||
|
||||
hostname := CreatePodHostName(deploymentName, role, id)
|
||||
p := v1.Pod{
|
||||
p := core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: LabelsForDeployment(deploymentName, role),
|
||||
Finalizers: podCreator.GetFinalizers(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Spec: core.PodSpec{
|
||||
Hostname: hostname,
|
||||
Subdomain: CreateHeadlessServiceName(deploymentName),
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
Tolerations: podCreator.GetTolerations(),
|
||||
ServiceAccountName: podCreator.GetServiceAccountName(),
|
||||
NodeSelector: podCreator.GetNodeSelector(),
|
||||
|
@ -383,9 +387,9 @@ func NewPod(deploymentName, role, id, podName string, podCreator PodCreator) v1.
|
|||
// Add ImagePullSecrets
|
||||
imagePullSecrets := podCreator.GetImagePullSecrets()
|
||||
if imagePullSecrets != nil {
|
||||
imagePullSecretsReference := make([]v1.LocalObjectReference, len(imagePullSecrets))
|
||||
imagePullSecretsReference := make([]core.LocalObjectReference, len(imagePullSecrets))
|
||||
for id := range imagePullSecrets {
|
||||
imagePullSecretsReference[id] = v1.LocalObjectReference{
|
||||
imagePullSecretsReference[id] = core.LocalObjectReference{
|
||||
Name: imagePullSecrets[id],
|
||||
}
|
||||
}
|
||||
|
@ -396,7 +400,7 @@ func NewPod(deploymentName, role, id, podName string, podCreator PodCreator) v1.
|
|||
}
|
||||
|
||||
// GetPodSpecChecksum return checksum of requested pod spec
|
||||
func GetPodSpecChecksum(podSpec v1.PodSpec) (string, error) {
|
||||
func GetPodSpecChecksum(podSpec core.PodSpec) (string, error) {
|
||||
// Do not calculate init containers
|
||||
podSpec.InitContainers = nil
|
||||
|
||||
|
@ -411,7 +415,7 @@ func GetPodSpecChecksum(podSpec v1.PodSpec) (string, error) {
|
|||
// CreatePod adds an owner to the given pod and calls the k8s api-server to created it.
|
||||
// If the pod already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func CreatePod(kubecli kubernetes.Interface, pod *v1.Pod, ns string, owner metav1.OwnerReference) (types.UID, string, error) {
|
||||
func CreatePod(kubecli kubernetes.Interface, pod *core.Pod, ns string, owner metav1.OwnerReference) (types.UID, string, error) {
|
||||
addOwnerRefToObject(pod.GetObjectMeta(), &owner)
|
||||
|
||||
checksum, err := GetPodSpecChecksum(pod.Spec)
|
||||
|
@ -426,55 +430,55 @@ func CreatePod(kubecli kubernetes.Interface, pod *v1.Pod, ns string, owner metav
|
|||
}
|
||||
}
|
||||
|
||||
func CreateVolumeEmptyDir(name string) v1.Volume {
|
||||
return v1.Volume{
|
||||
func CreateVolumeEmptyDir(name string) core.Volume {
|
||||
return core.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
VolumeSource: core.VolumeSource{
|
||||
EmptyDir: &core.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateVolumeWithSecret(name, secretName string) v1.Volume {
|
||||
return v1.Volume{
|
||||
func CreateVolumeWithSecret(name, secretName string) core.Volume {
|
||||
return core.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
VolumeSource: core.VolumeSource{
|
||||
Secret: &core.SecretVolumeSource{
|
||||
SecretName: secretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateVolumeWithPersitantVolumeClaim(name, claimName string) v1.Volume {
|
||||
return v1.Volume{
|
||||
func CreateVolumeWithPersitantVolumeClaim(name, claimName string) core.Volume {
|
||||
return core.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
VolumeSource: core.VolumeSource{
|
||||
PersistentVolumeClaim: &core.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: claimName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateEnvFieldPath(name, fieldPath string) v1.EnvVar {
|
||||
return v1.EnvVar{
|
||||
func CreateEnvFieldPath(name, fieldPath string) core.EnvVar {
|
||||
return core.EnvVar{
|
||||
Name: name,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
ValueFrom: &core.EnvVarSource{
|
||||
FieldRef: &core.ObjectFieldSelector{
|
||||
FieldPath: fieldPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateEnvSecretKeySelector(name, SecretKeyName, secretKey string) v1.EnvVar {
|
||||
return v1.EnvVar{
|
||||
func CreateEnvSecretKeySelector(name, SecretKeyName, secretKey string) core.EnvVar {
|
||||
return core.EnvVar{
|
||||
Name: name,
|
||||
Value: "",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
ValueFrom: &core.EnvVarSource{
|
||||
SecretKeyRef: &core.SecretKeySelector{
|
||||
LocalObjectReference: core.LocalObjectReference{
|
||||
Name: SecretKeyName,
|
||||
},
|
||||
Key: secretKey,
|
||||
|
|
Loading…
Reference in a new issue