mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
Cleaning the code (#501)
This commit is contained in:
parent
d79065bfbd
commit
be9ca18925
13 changed files with 3735 additions and 606 deletions
2349
pkg/deployment/deployment_test.go
Normal file
2349
pkg/deployment/deployment_test.go
Normal file
File diff suppressed because it is too large
Load diff
|
@ -35,14 +35,21 @@ import (
|
|||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
const (
|
||||
dockerPullableImageIDPrefix_ = "docker-pullable://"
|
||||
)
|
||||
type ImageUpdatePod struct {
|
||||
spec api.DeploymentSpec
|
||||
image string
|
||||
}
|
||||
|
||||
type ArangoDImageUpdateContainer struct {
|
||||
spec api.DeploymentSpec
|
||||
image string
|
||||
}
|
||||
|
||||
type imagesBuilder struct {
|
||||
APIObject k8sutil.APIObject
|
||||
|
@ -182,26 +189,130 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, ima
|
|||
"--database.directory=" + k8sutil.ArangodVolumeMountDir,
|
||||
"--log.output=+",
|
||||
}
|
||||
terminationGracePeriod := time.Second * 30
|
||||
tolerations := make([]v1.Toleration, 0, 2)
|
||||
shortDur := k8sutil.TolerationDuration{Forever: false, TimeSpan: time.Second * 5}
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations, k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeNotReady, shortDur))
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations, k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeUnreachable, shortDur))
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations, k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeAlphaUnreachable, shortDur))
|
||||
serviceAccountName := ""
|
||||
|
||||
env := make(map[string]k8sutil.EnvValue)
|
||||
if ib.Spec.License.HasSecretName() {
|
||||
env[constants.EnvArangoLicenseKey] = k8sutil.EnvValue{
|
||||
SecretName: ib.Spec.License.GetSecretName(),
|
||||
SecretKey: constants.SecretKeyToken,
|
||||
imagePod := ImageUpdatePod{
|
||||
spec: ib.Spec,
|
||||
image: image,
|
||||
}
|
||||
}
|
||||
if err := k8sutil.CreateArangodPod(ib.KubeCli, true, ib.APIObject, role, id, podName, "", image, "", "", ib.Spec.GetImagePullPolicy(), ib.Spec.ImagePullSecrets, "", false, terminationGracePeriod, args, env, nil, nil, nil,
|
||||
tolerations, serviceAccountName, "", "", "", nil, "", v1.ResourceRequirements{}, nil, nil, nil); err != nil {
|
||||
|
||||
if err := resources.CreateArangoPod(ib.KubeCli, ib.APIObject, role, id, podName, args, &imagePod); err != nil {
|
||||
log.Debug().Err(err).Msg("Failed to create image ID pod")
|
||||
return true, maskAny(err)
|
||||
}
|
||||
// Come back soon to inspect the pod
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetExecutor() string {
|
||||
return resources.ArangoDExecutor
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetProbes() (*v1.Probe, *v1.Probe, error) {
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetResourceRequirements() v1.ResourceRequirements {
|
||||
return v1.ResourceRequirements{
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
}
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetImage() string {
|
||||
return a.image
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetEnvs() []v1.EnvVar {
|
||||
env := make([]v1.EnvVar, 0)
|
||||
|
||||
if a.spec.License.HasSecretName() {
|
||||
env = append(env, k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey,
|
||||
a.spec.License.GetSecretName(), constants.SecretKeyToken))
|
||||
}
|
||||
|
||||
if len(env) > 0 {
|
||||
return env
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetLifecycle() (*v1.Lifecycle, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDImageUpdateContainer) GetImagePullPolicy() v1.PullPolicy {
|
||||
return a.spec.GetImagePullPolicy()
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) Init(pod *v1.Pod) {
|
||||
terminationGracePeriodSeconds := int64((time.Second * 30).Seconds())
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetImagePullSecrets() []string {
|
||||
return i.spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetContainerCreator() k8sutil.ContainerCreator {
|
||||
return &ArangoDImageUpdateContainer{
|
||||
spec: i.spec,
|
||||
image: i.image,
|
||||
}
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetAffinityRole() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetVolumes() ([]v1.Volume, []v1.VolumeMount) {
|
||||
var volumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
|
||||
volumes = append(volumes, k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName))
|
||||
volumeMounts = append(volumeMounts, k8sutil.ArangodVolumeMount())
|
||||
|
||||
return volumes, volumeMounts
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetSidecars(*v1.Pod) {
|
||||
return
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetInitContainers() ([]v1.Container, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetFinalizers() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetTolerations() []v1.Toleration {
|
||||
|
||||
shortDur := k8sutil.TolerationDuration{
|
||||
Forever: false,
|
||||
TimeSpan: time.Second * 5,
|
||||
}
|
||||
|
||||
tolerations := make([]v1.Toleration, 0, 2)
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations,
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeNotReady, shortDur))
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations,
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeUnreachable, shortDur))
|
||||
tolerations = k8sutil.AddTolerationIfNotFound(tolerations,
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeAlphaUnreachable, shortDur))
|
||||
|
||||
return tolerations
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) IsDeploymentMode() bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetNodeSelector() map[string]string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (i *ImageUpdatePod) GetServiceAccountName() string {
|
||||
return ""
|
||||
}
|
||||
|
|
385
pkg/deployment/images_test.go
Normal file
385
pkg/deployment/images_test.go
Normal file
|
@ -0,0 +1,385 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2019 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Tomasz Mielech <tomasz@arangodb.com>
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"crypto/sha1"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const (
|
||||
testNewImage = testImage + "2"
|
||||
)
|
||||
|
||||
type testCaseImageUpdate struct {
|
||||
Name string
|
||||
ArangoDeployment *api.ArangoDeployment
|
||||
Before func(*testing.T, *Deployment)
|
||||
After func(*testing.T, *Deployment)
|
||||
ExpectedError error
|
||||
RetrySoon bool
|
||||
ExpectedPod v1.Pod
|
||||
}
|
||||
|
||||
func TestEnsureImages(t *testing.T) {
|
||||
// Arange
|
||||
terminationGracePeriodSeconds := int64((time.Second * 30).Seconds())
|
||||
id := fmt.Sprintf("%0x", sha1.Sum([]byte(testNewImage)))[:6]
|
||||
hostname := testDeploymentName + "-" + k8sutil.ImageIDAndVersionRole + "-" + id
|
||||
|
||||
testCases := []testCaseImageUpdate{
|
||||
{
|
||||
Name: "Image has not been changed",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Image has been changed",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
},
|
||||
},
|
||||
RetrySoon: true,
|
||||
ExpectedPod: v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testNewImage,
|
||||
Command: createTestCommandForImageUpdatePod(),
|
||||
Ports: createTestPorts(),
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Tolerations: getTestTolerations(),
|
||||
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
|
||||
Hostname: hostname,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName,
|
||||
k8sutil.ImageIDAndVersionRole, false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Image not been changed with license",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
License: api.LicenseSpec{
|
||||
SecretName: util.NewString(testLicense),
|
||||
},
|
||||
},
|
||||
},
|
||||
RetrySoon: true,
|
||||
ExpectedPod: v1.Pod{
|
||||
Spec: v1.PodSpec{
|
||||
Volumes: []v1.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []v1.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testNewImage,
|
||||
Command: createTestCommandForImageUpdatePod(),
|
||||
Ports: createTestPorts(),
|
||||
Env: []v1.EnvVar{
|
||||
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey,
|
||||
testLicense, constants.SecretKeyToken),
|
||||
},
|
||||
Resources: v1.ResourceRequirements{
|
||||
Limits: make(v1.ResourceList),
|
||||
Requests: make(v1.ResourceList),
|
||||
},
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
SecurityContext: &v1.SecurityContext{
|
||||
Capabilities: &v1.Capabilities{Drop: []v1.Capability{"ALL"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Tolerations: getTestTolerations(),
|
||||
TerminationGracePeriodSeconds: &terminationGracePeriodSeconds,
|
||||
Hostname: hostname,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName,
|
||||
k8sutil.ImageIDAndVersionRole, false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Image is being updated in failed phase",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
},
|
||||
},
|
||||
Before: func(t *testing.T, deployment *Deployment) {
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k8sutil.CreatePodName(testDeploymentName, k8sutil.ImageIDAndVersionRole, id, ""),
|
||||
CreationTimestamp: metav1.Now(),
|
||||
},
|
||||
Spec: v1.PodSpec{},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
}
|
||||
|
||||
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(&pod)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
After: func(t *testing.T, deployment *Deployment) {
|
||||
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Image is being updated too long in failed phase",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
},
|
||||
},
|
||||
Before: func(t *testing.T, deployment *Deployment) {
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k8sutil.CreatePodName(testDeploymentName, k8sutil.ImageIDAndVersionRole, id, ""),
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
},
|
||||
}
|
||||
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(&pod)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
After: func(t *testing.T, deployment *Deployment) {
|
||||
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 0)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Image is being updated in not ready phase",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
},
|
||||
},
|
||||
RetrySoon: true,
|
||||
Before: func(t *testing.T, deployment *Deployment) {
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k8sutil.CreatePodName(testDeploymentName, k8sutil.ImageIDAndVersionRole, id, ""),
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodScheduled,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(&pod)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
After: func(t *testing.T, deployment *Deployment) {
|
||||
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Image is being updated in ready phase with empty statuses list",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
},
|
||||
},
|
||||
RetrySoon: true,
|
||||
Before: func(t *testing.T, deployment *Deployment) {
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k8sutil.CreatePodName(testDeploymentName, k8sutil.ImageIDAndVersionRole, id, ""),
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(&pod)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
After: func(t *testing.T, deployment *Deployment) {
|
||||
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Can not get API version of arnagod",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testNewImage),
|
||||
},
|
||||
},
|
||||
RetrySoon: true,
|
||||
Before: func(t *testing.T, deployment *Deployment) {
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: k8sutil.CreatePodName(testDeploymentName, k8sutil.ImageIDAndVersionRole, id, ""),
|
||||
},
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
},
|
||||
},
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
{},
|
||||
},
|
||||
},
|
||||
}
|
||||
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(&pod)
|
||||
require.NoError(t, err)
|
||||
},
|
||||
After: func(t *testing.T, deployment *Deployment) {
|
||||
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, testCase := range testCases {
|
||||
//nolint:scopelint
|
||||
t.Run(testCase.Name, func(t *testing.T) {
|
||||
// Arrange
|
||||
d, _ := createTestDeployment(Config{}, testCase.ArangoDeployment)
|
||||
|
||||
d.status.last = api.DeploymentStatus{
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
|
||||
if testCase.Before != nil {
|
||||
testCase.Before(t, d)
|
||||
}
|
||||
|
||||
// Create custom resource in the fake kubernetes API
|
||||
_, err := d.deps.DatabaseCRCli.DatabaseV1().ArangoDeployments(testNamespace).Create(d.apiObject)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Act
|
||||
retrySoon, err := d.ensureImages(d.apiObject)
|
||||
|
||||
// Assert
|
||||
assert.EqualValues(t, testCase.RetrySoon, retrySoon)
|
||||
if testCase.ExpectedError != nil {
|
||||
assert.EqualError(t, err, testCase.ExpectedError.Error())
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
if len(testCase.ExpectedPod.Spec.Containers) > 0 {
|
||||
pods, err := d.deps.KubeCli.CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
require.Equal(t, testCase.ExpectedPod.Spec, pods.Items[0].Spec)
|
||||
|
||||
ownerRef := pods.Items[0].GetOwnerReferences()
|
||||
require.Len(t, ownerRef, 1)
|
||||
require.Equal(t, ownerRef[0], testCase.ArangoDeployment.AsOwner())
|
||||
}
|
||||
|
||||
if testCase.After != nil {
|
||||
testCase.After(t, d)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func createTestCommandForImageUpdatePod() []string {
|
||||
return []string{resources.ArangoDExecutor,
|
||||
"--server.authentication=false",
|
||||
fmt.Sprintf("--server.endpoint=tcp://[::]:%d", k8sutil.ArangoPort),
|
||||
"--database.directory=" + k8sutil.ArangodVolumeMountDir,
|
||||
"--log.output=+",
|
||||
}
|
||||
}
|
||||
|
||||
func getTestTolerations() []v1.Toleration {
|
||||
|
||||
shortDur := k8sutil.TolerationDuration{
|
||||
Forever: false,
|
||||
TimeSpan: time.Second * 5,
|
||||
}
|
||||
|
||||
return []v1.Toleration{
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeNotReady, shortDur),
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeUnreachable, shortDur),
|
||||
k8sutil.NewNoExecuteToleration(k8sutil.TolerationKeyNodeAlphaUnreachable, shortDur),
|
||||
}
|
||||
}
|
|
@ -72,7 +72,9 @@ func createTLSCACertificate(log zerolog.Logger, secrets k8sutil.SecretInterface,
|
|||
|
||||
// createTLSServerCertificate creates a TLS certificate for a specific server and stores
|
||||
// it in a secret with the given name.
|
||||
func createTLSServerCertificate(log zerolog.Logger, secrets v1.SecretInterface, serverNames []string, spec api.TLSSpec, secretName string, ownerRef *metav1.OwnerReference) error {
|
||||
func createTLSServerCertificate(log zerolog.Logger, secrets v1.SecretInterface, serverNames []string, spec api.TLSSpec,
|
||||
secretName string, ownerRef *metav1.OwnerReference) error {
|
||||
|
||||
log = log.With().Str("secret", secretName).Logger()
|
||||
// Load alt names
|
||||
dnsNames, ipAddresses, emailAddress, err := spec.GetParsedAltNames()
|
||||
|
|
|
@ -43,6 +43,7 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
type optionPair struct {
|
||||
|
@ -256,7 +257,8 @@ func createArangodArgs(apiObject metav1.Object, deplSpec api.DeploymentSpec, gro
|
|||
}
|
||||
|
||||
// createArangoSyncArgs creates command line arguments for an arangosync server in the given group.
|
||||
func createArangoSyncArgs(apiObject metav1.Object, spec api.DeploymentSpec, group api.ServerGroup, groupSpec api.ServerGroupSpec, agents api.MemberStatusList, id string) []string {
|
||||
func createArangoSyncArgs(apiObject metav1.Object, spec api.DeploymentSpec, group api.ServerGroup,
|
||||
groupSpec api.ServerGroupSpec, id string) []string {
|
||||
options := make([]optionPair, 0, 64)
|
||||
var runCmd string
|
||||
var port int
|
||||
|
@ -401,9 +403,6 @@ func (r *Resources) createLivenessProbe(spec api.DeploymentSpec, group api.Serve
|
|||
return nil, maskAny(err)
|
||||
}
|
||||
authorization = "bearer " + token
|
||||
if err != nil {
|
||||
return nil, maskAny(err)
|
||||
}
|
||||
} else if group == api.ServerGroupSyncMasters {
|
||||
// Fall back to JWT secret
|
||||
secretData, err := r.getSyncJWTSecret(spec)
|
||||
|
@ -477,8 +476,8 @@ func (r *Resources) createReadinessProbe(spec api.DeploymentSpec, group api.Serv
|
|||
return probeCfg, nil
|
||||
}
|
||||
|
||||
// createPodFinalizers creates a list of finalizers for a pod created for the given group.
|
||||
func (r *Resources) createPodFinalizers(group api.ServerGroup) []string {
|
||||
// CreatePodFinalizers creates a list of finalizers for a pod created for the given group.
|
||||
func (r *Resources) CreatePodFinalizers(group api.ServerGroup) []string {
|
||||
switch group {
|
||||
case api.ServerGroupAgents:
|
||||
return []string{constants.FinalizerPodAgencyServing}
|
||||
|
@ -489,8 +488,8 @@ func (r *Resources) createPodFinalizers(group api.ServerGroup) []string {
|
|||
}
|
||||
}
|
||||
|
||||
// createPodTolerations creates a list of tolerations for a pod created for the given group.
|
||||
func (r *Resources) createPodTolerations(group api.ServerGroup, groupSpec api.ServerGroupSpec) []v1.Toleration {
|
||||
// CreatePodTolerations creates a list of tolerations for a pod created for the given group.
|
||||
func (r *Resources) CreatePodTolerations(group api.ServerGroup, groupSpec api.ServerGroupSpec) []v1.Toleration {
|
||||
notReadyDur := k8sutil.TolerationDuration{Forever: false, TimeSpan: time.Minute}
|
||||
unreachableDur := k8sutil.TolerationDuration{Forever: false, TimeSpan: time.Minute}
|
||||
switch group {
|
||||
|
@ -548,17 +547,12 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
return maskAny(fmt.Errorf("Member '%s' not found", memberID))
|
||||
}
|
||||
groupSpec := spec.GetServerGroupSpec(group)
|
||||
lifecycleImage := r.context.GetLifecycleImage()
|
||||
alpineImage := r.context.GetAlpineImage()
|
||||
terminationGracePeriod := group.DefaultTerminationGracePeriod()
|
||||
tolerations := r.createPodTolerations(group, groupSpec)
|
||||
serviceAccountName := groupSpec.GetServiceAccountName()
|
||||
|
||||
// Update pod name
|
||||
role := group.AsRole()
|
||||
roleAbbr := group.AsRoleAbbreviated()
|
||||
podSuffix := createPodSuffix(spec)
|
||||
m.PodName = k8sutil.CreatePodName(apiObject.GetName(), roleAbbr, m.ID, podSuffix)
|
||||
|
||||
m.PodName = k8sutil.CreatePodName(apiObject.GetName(), roleAbbr, m.ID, CreatePodSuffix(spec))
|
||||
newPhase := api.MemberPhaseCreated
|
||||
// Select image
|
||||
var imageInfo api.ImageInfo
|
||||
|
@ -587,15 +581,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
newPhase = api.MemberPhaseUpgrading
|
||||
}
|
||||
args := createArangodArgs(apiObject, spec, group, status.Members.Agents, m.ID, version, autoUpgrade)
|
||||
env := make(map[string]k8sutil.EnvValue)
|
||||
livenessProbe, err := r.createLivenessProbe(spec, group)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
readinessProbe, err := r.createReadinessProbe(spec, group, version)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
|
||||
tlsKeyfileSecretName := ""
|
||||
if spec.IsSecure() {
|
||||
tlsKeyfileSecretName = k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), role, m.ID)
|
||||
|
@ -626,19 +612,6 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
if err := k8sutil.ValidateTokenSecret(secrets, clusterJWTSecretName); err != nil {
|
||||
return maskAny(errors.Wrapf(err, "Cluster JWT secret validation failed"))
|
||||
}
|
||||
} else {
|
||||
env[constants.EnvArangodJWTSecret] = k8sutil.EnvValue{
|
||||
SecretName: spec.Authentication.GetJWTSecretName(),
|
||||
SecretKey: constants.SecretKeyToken,
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if spec.License.HasSecretName() {
|
||||
env[constants.EnvArangoLicenseKey] = k8sutil.EnvValue{
|
||||
SecretName: spec.License.GetSecretName(),
|
||||
SecretKey: constants.SecretKeyToken,
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -659,12 +632,20 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
}
|
||||
}
|
||||
|
||||
engine := spec.GetStorageEngine().AsArangoArgument()
|
||||
requireUUID := group == api.ServerGroupDBServers && m.IsInitialized
|
||||
finalizers := r.createPodFinalizers(group)
|
||||
if err := k8sutil.CreateArangodPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, m.PersistentVolumeClaimName, imageInfo.ImageID, lifecycleImage, alpineImage, spec.GetImagePullPolicy(), spec.ImagePullSecrets,
|
||||
engine, requireUUID, terminationGracePeriod, args, env, finalizers, livenessProbe, readinessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, rocksdbEncryptionSecretName,
|
||||
clusterJWTSecretName, groupSpec.GetNodeSelector(), groupSpec.PriorityClassName, groupSpec.Resources, exporter, groupSpec.GetSidecars(), groupSpec.VolumeClaimTemplate); err != nil {
|
||||
memberPod := MemberArangoDPod{
|
||||
status: m,
|
||||
tlsKeyfileSecretName: tlsKeyfileSecretName,
|
||||
rocksdbEncryptionSecretName: rocksdbEncryptionSecretName,
|
||||
clusterJWTSecretName: clusterJWTSecretName,
|
||||
exporter: exporter,
|
||||
groupSpec: groupSpec,
|
||||
spec: spec,
|
||||
group: group,
|
||||
resources: r,
|
||||
imageInfo: imageInfo,
|
||||
}
|
||||
|
||||
if err := CreateArangoPod(kubecli, apiObject, role, m.ID, m.PodName, args, &memberPod); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
|
||||
|
@ -733,31 +714,21 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
}
|
||||
|
||||
// Prepare arguments
|
||||
args := createArangoSyncArgs(apiObject, spec, group, groupSpec, status.Members.Agents, m.ID)
|
||||
env := make(map[string]k8sutil.EnvValue)
|
||||
if spec.Sync.Monitoring.GetTokenSecretName() != "" {
|
||||
env[constants.EnvArangoSyncMonitoringToken] = k8sutil.EnvValue{
|
||||
SecretName: spec.Sync.Monitoring.GetTokenSecretName(),
|
||||
SecretKey: constants.SecretKeyToken,
|
||||
args := createArangoSyncArgs(apiObject, spec, group, groupSpec, m.ID)
|
||||
|
||||
memberSyncPod := MemberSyncPod{
|
||||
tlsKeyfileSecretName: tlsKeyfileSecretName,
|
||||
clientAuthCASecretName: clientAuthCASecretName,
|
||||
masterJWTSecretName: masterJWTSecretName,
|
||||
clusterJWTSecretName: clusterJWTSecretName,
|
||||
groupSpec: groupSpec,
|
||||
spec: spec,
|
||||
group: group,
|
||||
resources: r,
|
||||
image: imageID,
|
||||
}
|
||||
}
|
||||
if spec.License.HasSecretName() {
|
||||
env[constants.EnvArangoLicenseKey] = k8sutil.EnvValue{
|
||||
SecretName: spec.License.GetSecretName(),
|
||||
SecretKey: constants.SecretKeyToken,
|
||||
}
|
||||
}
|
||||
livenessProbe, err := r.createLivenessProbe(spec, group)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
affinityWithRole := ""
|
||||
if group == api.ServerGroupSyncWorkers {
|
||||
affinityWithRole = api.ServerGroupDBServers.AsRole()
|
||||
}
|
||||
if err := k8sutil.CreateArangoSyncPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, imageID, lifecycleImage, spec.GetImagePullPolicy(), spec.ImagePullSecrets, terminationGracePeriod, args, env,
|
||||
livenessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName, affinityWithRole, groupSpec.GetNodeSelector(),
|
||||
groupSpec.PriorityClassName, groupSpec.Resources, groupSpec.GetSidecars()); err != nil {
|
||||
|
||||
if err := CreateArangoPod(kubecli, apiObject, role, m.ID, m.PodName, args, &memberSyncPod); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
log.Debug().Str("pod-name", m.PodName).Msg("Created pod")
|
||||
|
@ -781,12 +752,49 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
return nil
|
||||
}
|
||||
|
||||
// CreateArangoPod creates a new Pod with container provided by parameter 'containerCreator'
|
||||
// If the pod already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func CreateArangoPod(kubecli kubernetes.Interface, deployment k8sutil.APIObject, role, id, podName string,
|
||||
args []string, podCreator k8sutil.PodCreator) error {
|
||||
|
||||
// Prepare basic pod
|
||||
p := k8sutil.NewPod(deployment.GetName(), role, id, podName, podCreator)
|
||||
|
||||
podCreator.Init(&p)
|
||||
|
||||
if initContainers, err := podCreator.GetInitContainers(); err != nil {
|
||||
return maskAny(err)
|
||||
} else if initContainers != nil {
|
||||
p.Spec.InitContainers = append(p.Spec.InitContainers, initContainers...)
|
||||
}
|
||||
|
||||
c, err := k8sutil.NewContainer(args, podCreator.GetContainerCreator())
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
|
||||
p.Spec.Volumes, c.VolumeMounts = podCreator.GetVolumes()
|
||||
p.Spec.Containers = append(p.Spec.Containers, c)
|
||||
podCreator.GetSidecars(&p)
|
||||
|
||||
// Add (anti-)affinity
|
||||
p.Spec.Affinity = k8sutil.CreateAffinity(deployment.GetName(), role, !podCreator.IsDeploymentMode(),
|
||||
podCreator.GetAffinityRole())
|
||||
|
||||
if err := k8sutil.CreatePod(kubecli, &p, deployment.GetNamespace(), deployment.AsOwner()); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// EnsurePods creates all Pods listed in member status
|
||||
func (r *Resources) EnsurePods() error {
|
||||
iterator := r.context.GetServerGroupIterator()
|
||||
status, _ := r.context.GetStatus()
|
||||
deploymentStatus, _ := r.context.GetStatus()
|
||||
imageNotFoundOnce := &sync.Once{}
|
||||
if err := iterator.ForeachServerGroup(func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
|
||||
createPodMember := func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
for _, m := range *status {
|
||||
if m.Phase != api.MemberPhaseNone {
|
||||
continue
|
||||
|
@ -800,13 +808,16 @@ func (r *Resources) EnsurePods() error {
|
|||
}
|
||||
}
|
||||
return nil
|
||||
}, &status); err != nil {
|
||||
}
|
||||
|
||||
if err := iterator.ForeachServerGroup(createPodMember, &deploymentStatus); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func createPodSuffix(spec api.DeploymentSpec) string {
|
||||
func CreatePodSuffix(spec api.DeploymentSpec) string {
|
||||
raw, _ := json.Marshal(spec)
|
||||
hash := sha1.Sum(raw)
|
||||
return fmt.Sprintf("%0x", hash)[:6]
|
||||
|
|
283
pkg/deployment/resources/pod_creator_arangod.go
Normal file
283
pkg/deployment/resources/pod_creator_arangod.go
Normal file
|
@ -0,0 +1,283 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2019 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Tomasz Mielech <tomasz@arangodb.com>
|
||||
//
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ArangoDExecutor string = "/usr/sbin/arangod"
|
||||
)
|
||||
|
||||
type MemberArangoDPod struct {
|
||||
status api.MemberStatus
|
||||
tlsKeyfileSecretName string
|
||||
rocksdbEncryptionSecretName string
|
||||
clusterJWTSecretName string
|
||||
exporter *k8sutil.ArangodbExporterContainerConf
|
||||
groupSpec api.ServerGroupSpec
|
||||
spec api.DeploymentSpec
|
||||
group api.ServerGroup
|
||||
context Context
|
||||
resources *Resources
|
||||
imageInfo api.ImageInfo
|
||||
}
|
||||
|
||||
type ArangoDContainer struct {
|
||||
resources *Resources
|
||||
groupSpec api.ServerGroupSpec
|
||||
spec api.DeploymentSpec
|
||||
group api.ServerGroup
|
||||
imageInfo api.ImageInfo
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetExecutor() string {
|
||||
return ArangoDExecutor
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetProbes() (*v1.Probe, *v1.Probe, error) {
|
||||
var liveness, readiness *v1.Probe
|
||||
|
||||
probeLivenessConfig, err := a.resources.createLivenessProbe(a.spec, a.group)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
probeReadinessConfig, err := a.resources.createReadinessProbe(a.spec, a.group, a.imageInfo.ArangoDBVersion)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if probeLivenessConfig != nil {
|
||||
liveness = probeLivenessConfig.Create()
|
||||
}
|
||||
|
||||
if probeReadinessConfig != nil {
|
||||
readiness = probeReadinessConfig.Create()
|
||||
}
|
||||
|
||||
return liveness, readiness, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetImage() string {
|
||||
return a.imageInfo.ImageID
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetEnvs() []v1.EnvVar {
|
||||
envs := make([]v1.EnvVar, 0)
|
||||
|
||||
if a.spec.IsAuthenticated() {
|
||||
if !versionHasJWTSecretKeyfile(a.imageInfo.ArangoDBVersion) {
|
||||
env := k8sutil.CreateEnvSecretKeySelector(constants.EnvArangodJWTSecret,
|
||||
a.spec.Authentication.GetJWTSecretName(), constants.SecretKeyToken)
|
||||
|
||||
envs = append(envs, env)
|
||||
}
|
||||
}
|
||||
|
||||
if a.spec.License.HasSecretName() {
|
||||
env := k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey, a.spec.License.GetSecretName(),
|
||||
constants.SecretKeyToken)
|
||||
|
||||
envs = append(envs, env)
|
||||
}
|
||||
|
||||
if a.resources.context.GetLifecycleImage() != "" {
|
||||
envs = append(envs, k8sutil.GetLifecycleEnv()...)
|
||||
}
|
||||
|
||||
if len(envs) > 0 {
|
||||
return envs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetResourceRequirements() v1.ResourceRequirements {
|
||||
if a.groupSpec.GetVolumeClaimTemplate() != nil {
|
||||
return a.groupSpec.Resources
|
||||
}
|
||||
|
||||
return k8sutil.ExtractPodResourceRequirement(a.groupSpec.Resources)
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetLifecycle() (*v1.Lifecycle, error) {
|
||||
if a.resources.context.GetLifecycleImage() != "" {
|
||||
return k8sutil.NewLifecycle()
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoDContainer) GetImagePullPolicy() v1.PullPolicy {
|
||||
return a.spec.GetImagePullPolicy()
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) Init(pod *v1.Pod) {
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.group.DefaultTerminationGracePeriod().Seconds()))
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
pod.Spec.PriorityClassName = m.groupSpec.PriorityClassName
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetImagePullSecrets() []string {
|
||||
return m.spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetAffinityRole() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetNodeSelector() map[string]string {
|
||||
return m.groupSpec.GetNodeSelector()
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetServiceAccountName() string {
|
||||
return m.groupSpec.GetServiceAccountName()
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetSidecars(pod *v1.Pod) {
|
||||
if m.exporter != nil {
|
||||
// Metrics sidecar
|
||||
c := k8sutil.ArangodbexporterContainer(m.exporter.Image, m.exporter.Args, m.exporter.Env, m.exporter.LivenessProbe)
|
||||
|
||||
if m.exporter.JWTTokenSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, k8sutil.ExporterJWTVolumeMount())
|
||||
}
|
||||
|
||||
if m.tlsKeyfileSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, k8sutil.TlsKeyfileVolumeMount())
|
||||
}
|
||||
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, c)
|
||||
pod.Labels[k8sutil.LabelKeyArangoExporter] = "yes"
|
||||
}
|
||||
|
||||
// A sidecar provided by the user
|
||||
sidecars := m.groupSpec.GetSidecars()
|
||||
if len(sidecars) > 0 {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetVolumes() ([]v1.Volume, []v1.VolumeMount) {
|
||||
var volumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
|
||||
volumeMounts = append(volumeMounts, k8sutil.ArangodVolumeMount())
|
||||
|
||||
if m.resources.context.GetLifecycleImage() != "" {
|
||||
volumeMounts = append(volumeMounts, k8sutil.LifecycleVolumeMount())
|
||||
}
|
||||
|
||||
if m.status.PersistentVolumeClaimName != "" {
|
||||
vol := k8sutil.CreateVolumeWithPersitantVolumeClaim(k8sutil.ArangodVolumeName,
|
||||
m.status.PersistentVolumeClaimName)
|
||||
|
||||
volumes = append(volumes, vol)
|
||||
} else {
|
||||
volumes = append(volumes, k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName))
|
||||
}
|
||||
|
||||
if m.tlsKeyfileSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.TlsKeyfileVolumeName, m.tlsKeyfileSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.TlsKeyfileVolumeMount())
|
||||
}
|
||||
|
||||
if m.rocksdbEncryptionSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.RocksdbEncryptionVolumeName, m.rocksdbEncryptionSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.RocksdbEncryptionVolumeMount())
|
||||
}
|
||||
|
||||
if m.exporter != nil && m.exporter.JWTTokenSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.ExporterJWTVolumeName, m.exporter.JWTTokenSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
}
|
||||
|
||||
if m.clusterJWTSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.ClusterJWTSecretVolumeName, m.clusterJWTSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.ClusterJWTVolumeMount())
|
||||
}
|
||||
|
||||
if m.resources.context.GetLifecycleImage() != "" {
|
||||
volumes = append(volumes, k8sutil.LifecycleVolume())
|
||||
}
|
||||
|
||||
return volumes, volumeMounts
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) IsDeploymentMode() bool {
|
||||
return m.spec.IsDevelopment()
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetInitContainers() ([]v1.Container, error) {
|
||||
var initContainers []v1.Container
|
||||
|
||||
lifecycleImage := m.resources.context.GetLifecycleImage()
|
||||
if lifecycleImage != "" {
|
||||
c, err := k8sutil.InitLifecycleContainer(lifecycleImage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initContainers = append(initContainers, c)
|
||||
}
|
||||
|
||||
alpineImage := m.resources.context.GetAlpineImage()
|
||||
if alpineImage != "" {
|
||||
engine := m.spec.GetStorageEngine().AsArangoArgument()
|
||||
requireUUID := m.group == api.ServerGroupDBServers && m.status.IsInitialized
|
||||
|
||||
c := k8sutil.ArangodInitContainer("uuid", m.status.ID, engine, alpineImage, requireUUID)
|
||||
initContainers = append(initContainers, c)
|
||||
}
|
||||
|
||||
return initContainers, nil
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetFinalizers() []string {
|
||||
return m.resources.CreatePodFinalizers(m.group)
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetTolerations() []v1.Toleration {
|
||||
return m.resources.CreatePodTolerations(m.group, m.groupSpec)
|
||||
}
|
||||
|
||||
func (m *MemberArangoDPod) GetContainerCreator() k8sutil.ContainerCreator {
|
||||
return &ArangoDContainer{
|
||||
spec: m.spec,
|
||||
group: m.group,
|
||||
resources: m.resources,
|
||||
imageInfo: m.imageInfo,
|
||||
groupSpec: m.groupSpec,
|
||||
}
|
||||
}
|
230
pkg/deployment/resources/pod_creator_sync.go
Normal file
230
pkg/deployment/resources/pod_creator_sync.go
Normal file
|
@ -0,0 +1,230 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2019 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Tomasz Mielech <tomasz@arangodb.com>
|
||||
//
|
||||
|
||||
package resources
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
ArangoSyncExecutor string = "/usr/sbin/arangosync"
|
||||
)
|
||||
|
||||
type ArangoSyncContainer struct {
|
||||
groupSpec api.ServerGroupSpec
|
||||
spec api.DeploymentSpec
|
||||
group api.ServerGroup
|
||||
resources *Resources
|
||||
image string
|
||||
}
|
||||
|
||||
type MemberSyncPod struct {
|
||||
tlsKeyfileSecretName string
|
||||
clientAuthCASecretName string
|
||||
masterJWTSecretName string
|
||||
clusterJWTSecretName string
|
||||
groupSpec api.ServerGroupSpec
|
||||
spec api.DeploymentSpec
|
||||
group api.ServerGroup
|
||||
resources *Resources
|
||||
image string
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetExecutor() string {
|
||||
return ArangoSyncExecutor
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetProbes() (*v1.Probe, *v1.Probe, error) {
|
||||
livenessProbe, err := a.resources.createLivenessProbe(a.spec, a.group)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if livenessProbe != nil {
|
||||
return livenessProbe.Create(), nil, nil
|
||||
}
|
||||
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetResourceRequirements() v1.ResourceRequirements {
|
||||
return a.groupSpec.Resources
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetLifecycle() (*v1.Lifecycle, error) {
|
||||
if a.resources.context.GetLifecycleImage() != "" {
|
||||
return k8sutil.NewLifecycle()
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetImagePullPolicy() v1.PullPolicy {
|
||||
return a.spec.GetImagePullPolicy()
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetImage() string {
|
||||
return a.image
|
||||
}
|
||||
|
||||
func (a *ArangoSyncContainer) GetEnvs() []v1.EnvVar {
|
||||
envs := make([]v1.EnvVar, 0)
|
||||
|
||||
if a.spec.Sync.Monitoring.GetTokenSecretName() != "" {
|
||||
env := k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
|
||||
a.spec.Sync.Monitoring.GetTokenSecretName(), constants.SecretKeyToken)
|
||||
|
||||
envs = append(envs, env)
|
||||
}
|
||||
|
||||
if a.spec.License.HasSecretName() {
|
||||
env := k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey, a.spec.License.GetSecretName(),
|
||||
constants.SecretKeyToken)
|
||||
|
||||
envs = append(envs, env)
|
||||
}
|
||||
|
||||
if a.resources.context.GetLifecycleImage() != "" {
|
||||
envs = append(envs, k8sutil.GetLifecycleEnv()...)
|
||||
}
|
||||
|
||||
if len(envs) > 0 {
|
||||
return envs
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetAffinityRole() string {
|
||||
if m.group == api.ServerGroupSyncWorkers {
|
||||
return api.ServerGroupDBServers.AsRole()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetImagePullSecrets() []string {
|
||||
return m.spec.ImagePullSecrets
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetNodeSelector() map[string]string {
|
||||
return m.groupSpec.GetNodeSelector()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetServiceAccountName() string {
|
||||
return m.groupSpec.GetServiceAccountName()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetSidecars(pod *v1.Pod) {
|
||||
// A sidecar provided by the user
|
||||
sidecars := m.groupSpec.GetSidecars()
|
||||
if len(sidecars) > 0 {
|
||||
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetVolumes() ([]v1.Volume, []v1.VolumeMount) {
|
||||
var volumes []v1.Volume
|
||||
var volumeMounts []v1.VolumeMount
|
||||
|
||||
if m.resources.context.GetLifecycleImage() != "" {
|
||||
volumes = append(volumes, k8sutil.LifecycleVolume())
|
||||
volumeMounts = append(volumeMounts, k8sutil.LifecycleVolumeMount())
|
||||
}
|
||||
|
||||
if m.tlsKeyfileSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.TlsKeyfileVolumeName, m.tlsKeyfileSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.TlsKeyfileVolumeMount())
|
||||
}
|
||||
|
||||
// Client Authentication certificate secret mount (if any)
|
||||
if m.clientAuthCASecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.ClientAuthCAVolumeName, m.clientAuthCASecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.ClientAuthCACertificateVolumeMount())
|
||||
}
|
||||
|
||||
// Master JWT secret mount (if any)
|
||||
if m.masterJWTSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.MasterJWTSecretVolumeName, m.masterJWTSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.MasterJWTVolumeMount())
|
||||
}
|
||||
|
||||
// Cluster JWT secret mount (if any)
|
||||
if m.clusterJWTSecretName != "" {
|
||||
vol := k8sutil.CreateVolumeWithSecret(k8sutil.ClusterJWTSecretVolumeName, m.clusterJWTSecretName)
|
||||
volumes = append(volumes, vol)
|
||||
volumeMounts = append(volumeMounts, k8sutil.ClusterJWTVolumeMount())
|
||||
}
|
||||
|
||||
return volumes, volumeMounts
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) IsDeploymentMode() bool {
|
||||
return m.spec.IsDevelopment()
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetInitContainers() ([]v1.Container, error) {
|
||||
var initContainers []v1.Container
|
||||
|
||||
lifecycleImage := m.resources.context.GetLifecycleImage()
|
||||
if lifecycleImage != "" {
|
||||
c, err := k8sutil.InitLifecycleContainer(lifecycleImage)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
initContainers = append(initContainers, c)
|
||||
}
|
||||
|
||||
return initContainers, nil
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetFinalizers() []string {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetTolerations() []v1.Toleration {
|
||||
return m.resources.CreatePodTolerations(m.group, m.groupSpec)
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) GetContainerCreator() k8sutil.ContainerCreator {
|
||||
return &ArangoSyncContainer{
|
||||
groupSpec: m.groupSpec,
|
||||
spec: m.spec,
|
||||
group: m.group,
|
||||
resources: m.resources,
|
||||
image: m.image,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *MemberSyncPod) Init(pod *v1.Pod) {
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(m.group.DefaultTerminationGracePeriod().Seconds()))
|
||||
pod.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
pod.Spec.PriorityClassName = m.groupSpec.PriorityClassName
|
||||
}
|
|
@ -285,7 +285,7 @@ func (r *Resources) GetExpectedPodArguments(apiObject metav1.Object, deplSpec ap
|
|||
}
|
||||
if group.IsArangosync() {
|
||||
groupSpec := deplSpec.GetServerGroupSpec(group)
|
||||
return createArangoSyncArgs(apiObject, deplSpec, group, groupSpec, agents, id)
|
||||
return createArangoSyncArgs(apiObject, deplSpec, group, groupSpec, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,10 +27,10 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// createAffinity creates pod anti-affinity for the given role.
|
||||
// CreateAffinity creates pod anti-affinity for the given role.
|
||||
// role contains the name of the role to configure any-affinity with.
|
||||
// affinityWithRole contains the role to configure affinity with.
|
||||
func createAffinity(deploymentName, role string, required bool, affinityWithRole string) *v1.Affinity {
|
||||
func CreateAffinity(deploymentName, role string, required bool, affinityWithRole string) *v1.Affinity {
|
||||
a := &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
|
|
|
@ -31,7 +31,6 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
// TestCreateAffinity tests createAffinity
|
||||
func TestCreateAffinity(t *testing.T) {
|
||||
expectedNodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
|
@ -49,7 +48,7 @@ func TestCreateAffinity(t *testing.T) {
|
|||
},
|
||||
}
|
||||
// Required
|
||||
a := createAffinity("test", "role", true, "")
|
||||
a := CreateAffinity("test", "role", true, "")
|
||||
assert.Nil(t, a.PodAffinity)
|
||||
require.NotNil(t, a.PodAntiAffinity)
|
||||
require.Len(t, a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1)
|
||||
|
@ -62,7 +61,7 @@ func TestCreateAffinity(t *testing.T) {
|
|||
assert.Equal(t, expectedNodeAffinity, a.NodeAffinity)
|
||||
|
||||
// Require & affinity with role dbserver
|
||||
a = createAffinity("test", "role", true, "dbserver")
|
||||
a = CreateAffinity("test", "role", true, "dbserver")
|
||||
require.NotNil(t, a.PodAffinity)
|
||||
require.Len(t, a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 1)
|
||||
assert.Len(t, a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, 0)
|
||||
|
@ -84,7 +83,7 @@ func TestCreateAffinity(t *testing.T) {
|
|||
assert.Equal(t, expectedNodeAffinity, a.NodeAffinity)
|
||||
|
||||
// Not Required
|
||||
a = createAffinity("test", "role", false, "")
|
||||
a = CreateAffinity("test", "role", false, "")
|
||||
assert.Nil(t, a.PodAffinity)
|
||||
require.NotNil(t, a.PodAntiAffinity)
|
||||
assert.Len(t, a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 0)
|
||||
|
@ -97,7 +96,7 @@ func TestCreateAffinity(t *testing.T) {
|
|||
assert.Equal(t, expectedNodeAffinity, a.NodeAffinity)
|
||||
|
||||
// Not Required & affinity with role dbserver
|
||||
a = createAffinity("test", "role", false, "dbserver")
|
||||
a = CreateAffinity("test", "role", false, "dbserver")
|
||||
require.NotNil(t, a.PodAffinity)
|
||||
require.Len(t, a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, 1)
|
||||
assert.Len(t, a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, 0)
|
||||
|
|
102
pkg/util/k8sutil/lifecycle.go
Normal file
102
pkg/util/k8sutil/lifecycle.go
Normal file
|
@ -0,0 +1,102 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2019 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Tomasz Mielech <tomasz@arangodb.com>
|
||||
//
|
||||
|
||||
package k8sutil
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
initLifecycleContainerName = "init-lifecycle"
|
||||
lifecycleVolumeMountDir = "/lifecycle/tools"
|
||||
lifecycleVolumeName = "lifecycle"
|
||||
)
|
||||
|
||||
// InitLifecycleContainer creates an init-container to copy the lifecycle binary to a shared volume.
|
||||
func InitLifecycleContainer(image string) (v1.Container, error) {
|
||||
binaryPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return v1.Container{}, maskAny(err)
|
||||
}
|
||||
c := v1.Container{
|
||||
Command: append([]string{binaryPath}, "lifecycle", "copy", "--target", lifecycleVolumeMountDir),
|
||||
Name: initLifecycleContainerName,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
LifecycleVolumeMount(),
|
||||
},
|
||||
SecurityContext: SecurityContextWithoutCapabilities(),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// NewLifecycle creates a lifecycle structure with preStop handler.
|
||||
func NewLifecycle() (*v1.Lifecycle, error) {
|
||||
binaryPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, maskAny(err)
|
||||
}
|
||||
exePath := filepath.Join(lifecycleVolumeMountDir, filepath.Base(binaryPath))
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: append([]string{exePath}, "lifecycle", "preStop"),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return lifecycle, nil
|
||||
}
|
||||
|
||||
func GetLifecycleEnv() []v1.EnvVar {
|
||||
return []v1.EnvVar{
|
||||
CreateEnvFieldPath(constants.EnvOperatorPodName, "metadata.name"),
|
||||
CreateEnvFieldPath(constants.EnvOperatorPodNamespace, "metadata.namespace"),
|
||||
CreateEnvFieldPath(constants.EnvOperatorNodeName, "spec.nodeName"),
|
||||
CreateEnvFieldPath(constants.EnvOperatorNodeNameArango, "spec.nodeName"),
|
||||
}
|
||||
}
|
||||
|
||||
// LifecycleVolumeMount creates a volume mount structure for shared lifecycle emptyDir.
|
||||
func LifecycleVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: lifecycleVolumeName,
|
||||
MountPath: lifecycleVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// LifecycleVolume creates a volume mount structure for shared lifecycle emptyDir.
|
||||
func LifecycleVolume() v1.Volume {
|
||||
return v1.Volume{
|
||||
Name: lifecycleVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -24,36 +24,28 @@ package k8sutil
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
InitDataContainerName = "init-data"
|
||||
InitLifecycleContainerName = "init-lifecycle"
|
||||
ServerContainerName = "server"
|
||||
ExporterContainerName = "exporter"
|
||||
arangodVolumeName = "arangod-data"
|
||||
tlsKeyfileVolumeName = "tls-keyfile"
|
||||
lifecycleVolumeName = "lifecycle"
|
||||
clientAuthCAVolumeName = "client-auth-ca"
|
||||
clusterJWTSecretVolumeName = "cluster-jwt"
|
||||
masterJWTSecretVolumeName = "master-jwt"
|
||||
rocksdbEncryptionVolumeName = "rocksdb-encryption"
|
||||
exporterJWTVolumeName = "exporter-jwt"
|
||||
ArangodVolumeName = "arangod-data"
|
||||
TlsKeyfileVolumeName = "tls-keyfile"
|
||||
ClientAuthCAVolumeName = "client-auth-ca"
|
||||
ClusterJWTSecretVolumeName = "cluster-jwt"
|
||||
MasterJWTSecretVolumeName = "master-jwt"
|
||||
RocksdbEncryptionVolumeName = "rocksdb-encryption"
|
||||
ExporterJWTVolumeName = "exporter-jwt"
|
||||
ArangodVolumeMountDir = "/data"
|
||||
RocksDBEncryptionVolumeMountDir = "/secrets/rocksdb/encryption"
|
||||
JWTSecretFileVolumeMountDir = "/secrets/jwt"
|
||||
TLSKeyfileVolumeMountDir = "/secrets/tls"
|
||||
LifecycleVolumeMountDir = "/lifecycle/tools"
|
||||
ClientAuthCAVolumeMountDir = "/secrets/client-auth/ca"
|
||||
ClusterJWTSecretVolumeMountDir = "/secrets/cluster/jwt"
|
||||
ExporterJWTVolumeMountDir = "/secrets/exporter/jwt"
|
||||
|
@ -67,6 +59,31 @@ type EnvValue struct {
|
|||
SecretKey string // Key inside secret to fill into the envvar. Only relevant is SecretName is set.
|
||||
}
|
||||
|
||||
type PodCreator interface {
|
||||
Init(*v1.Pod)
|
||||
GetVolumes() ([]v1.Volume, []v1.VolumeMount)
|
||||
GetSidecars(*v1.Pod)
|
||||
GetInitContainers() ([]v1.Container, error)
|
||||
GetFinalizers() []string
|
||||
GetTolerations() []v1.Toleration
|
||||
GetNodeSelector() map[string]string
|
||||
GetServiceAccountName() string
|
||||
GetAffinityRole() string
|
||||
GetContainerCreator() ContainerCreator
|
||||
GetImagePullSecrets() []string
|
||||
IsDeploymentMode() bool
|
||||
}
|
||||
|
||||
type ContainerCreator interface {
|
||||
GetExecutor() string
|
||||
GetProbes() (*v1.Probe, *v1.Probe, error)
|
||||
GetResourceRequirements() v1.ResourceRequirements
|
||||
GetLifecycle() (*v1.Lifecycle, error)
|
||||
GetImagePullPolicy() v1.PullPolicy
|
||||
GetImage() string
|
||||
GetEnvs() []v1.EnvVar
|
||||
}
|
||||
|
||||
// CreateEnvVar creates an EnvVar structure for given key from given EnvValue.
|
||||
func (v EnvValue) CreateEnvVar(key string) v1.EnvVar {
|
||||
ev := v1.EnvVar{
|
||||
|
@ -75,6 +92,7 @@ func (v EnvValue) CreateEnvVar(key string) v1.EnvVar {
|
|||
if ev.Value != "" {
|
||||
ev.Value = v.Value
|
||||
} else if v.SecretName != "" {
|
||||
//return CreateEnvSecretKeySelector(key, v.SecretName, v.SecretKey)
|
||||
ev.ValueFrom = &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
|
@ -199,82 +217,63 @@ func CreateTLSKeyfileSecretName(deploymentName, role, id string) string {
|
|||
return CreatePodName(deploymentName, role, id, "-tls-keyfile")
|
||||
}
|
||||
|
||||
// lifecycleVolumeMounts creates a volume mount structure for shared lifecycle emptyDir.
|
||||
func lifecycleVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{Name: lifecycleVolumeName, MountPath: LifecycleVolumeMountDir},
|
||||
// ArangodVolumeMount creates a volume mount structure for arangod.
|
||||
func ArangodVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: ArangodVolumeName,
|
||||
MountPath: ArangodVolumeMountDir,
|
||||
}
|
||||
}
|
||||
|
||||
// arangodVolumeMounts creates a volume mount structure for arangod.
|
||||
func arangodVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{Name: arangodVolumeName, MountPath: ArangodVolumeMountDir},
|
||||
}
|
||||
}
|
||||
|
||||
// tlsKeyfileVolumeMounts creates a volume mount structure for a TLS keyfile.
|
||||
func tlsKeyfileVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: tlsKeyfileVolumeName,
|
||||
// TlsKeyfileVolumeMount creates a volume mount structure for a TLS keyfile.
|
||||
func TlsKeyfileVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: TlsKeyfileVolumeName,
|
||||
MountPath: TLSKeyfileVolumeMountDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clientAuthCACertificateVolumeMounts creates a volume mount structure for a client-auth CA certificate (ca.crt).
|
||||
func clientAuthCACertificateVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: clientAuthCAVolumeName,
|
||||
// ClientAuthCACertificateVolumeMount creates a volume mount structure for a client-auth CA certificate (ca.crt).
|
||||
func ClientAuthCACertificateVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: ClientAuthCAVolumeName,
|
||||
MountPath: ClientAuthCAVolumeMountDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// masterJWTVolumeMounts creates a volume mount structure for a master JWT secret (token).
|
||||
func masterJWTVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: masterJWTSecretVolumeName,
|
||||
// MasterJWTVolumeMount creates a volume mount structure for a master JWT secret (token).
|
||||
func MasterJWTVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: MasterJWTSecretVolumeName,
|
||||
MountPath: MasterJWTSecretVolumeMountDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// clusterJWTVolumeMounts creates a volume mount structure for a cluster JWT secret (token).
|
||||
func clusterJWTVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: clusterJWTSecretVolumeName,
|
||||
// ClusterJWTVolumeMount creates a volume mount structure for a cluster JWT secret (token).
|
||||
func ClusterJWTVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: ClusterJWTSecretVolumeName,
|
||||
MountPath: ClusterJWTSecretVolumeMountDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func exporterJWTVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: exporterJWTVolumeName,
|
||||
func ExporterJWTVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: ExporterJWTVolumeName,
|
||||
MountPath: ExporterJWTVolumeMountDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// rocksdbEncryptionVolumeMounts creates a volume mount structure for a RocksDB encryption key.
|
||||
func rocksdbEncryptionVolumeMounts() []v1.VolumeMount {
|
||||
return []v1.VolumeMount{
|
||||
{
|
||||
Name: rocksdbEncryptionVolumeName,
|
||||
// RocksdbEncryptionVolumeMount creates a volume mount structure for a RocksDB encryption key.
|
||||
func RocksdbEncryptionVolumeMount() v1.VolumeMount {
|
||||
return v1.VolumeMount{
|
||||
Name: RocksdbEncryptionVolumeName,
|
||||
MountPath: RocksDBEncryptionVolumeMountDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// arangodInitContainer creates a container configured to
|
||||
// initalize a UUID file.
|
||||
func arangodInitContainer(name, id, engine, alpineImage string, requireUUID bool) v1.Container {
|
||||
// ArangodInitContainer creates a container configured to initalize a UUID file.
|
||||
func ArangodInitContainer(name, id, engine, alpineImage string, requireUUID bool) v1.Container {
|
||||
uuidFile := filepath.Join(ArangodVolumeMountDir, "UUID")
|
||||
engineFile := filepath.Join(ArangodVolumeMountDir, "ENGINE")
|
||||
var command string
|
||||
|
@ -299,7 +298,9 @@ func arangodInitContainer(name, id, engine, alpineImage string, requireUUID bool
|
|||
},
|
||||
Name: name,
|
||||
Image: alpineImage,
|
||||
VolumeMounts: arangodVolumeMounts(),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
ArangodVolumeMount(),
|
||||
},
|
||||
SecurityContext: SecurityContextWithoutCapabilities(),
|
||||
}
|
||||
return c
|
||||
|
@ -325,15 +326,23 @@ func ExtractPodResourceRequirement(resources v1.ResourceRequirements) v1.Resourc
|
|||
}
|
||||
}
|
||||
|
||||
// arangodContainer creates a container configured to run `arangod`.
|
||||
func arangodContainer(image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig, readinessProbe *HTTPProbeConfig,
|
||||
lifecycle *v1.Lifecycle, lifecycleEnvVars []v1.EnvVar, resources v1.ResourceRequirements, noFilterResources bool) v1.Container {
|
||||
c := v1.Container{
|
||||
Command: append([]string{"/usr/sbin/arangod"}, args...),
|
||||
// NewContainer creates a container for specified creator
|
||||
func NewContainer(args []string, containerCreator ContainerCreator) (v1.Container, error) {
|
||||
|
||||
liveness, readiness, err := containerCreator.GetProbes()
|
||||
if err != nil {
|
||||
return v1.Container{}, err
|
||||
}
|
||||
|
||||
lifecycle, err := containerCreator.GetLifecycle()
|
||||
if err != nil {
|
||||
return v1.Container{}, err
|
||||
}
|
||||
|
||||
return v1.Container{
|
||||
Name: ServerContainerName,
|
||||
Image: image,
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
Lifecycle: lifecycle,
|
||||
Image: containerCreator.GetImage(),
|
||||
Command: append([]string{containerCreator.GetExecutor()}, args...),
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "server",
|
||||
|
@ -341,66 +350,17 @@ func arangodContainer(image string, imagePullPolicy v1.PullPolicy, args []string
|
|||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
VolumeMounts: arangodVolumeMounts(),
|
||||
SecurityContext: SecurityContextWithoutCapabilities(),
|
||||
}
|
||||
if noFilterResources {
|
||||
c.Resources = resources // if volumeclaimtemplate is specified
|
||||
} else {
|
||||
c.Resources = ExtractPodResourceRequirement(resources) // Storage is handled via pvcs
|
||||
}
|
||||
|
||||
for k, v := range env {
|
||||
c.Env = append(c.Env, v.CreateEnvVar(k))
|
||||
}
|
||||
if livenessProbe != nil {
|
||||
c.LivenessProbe = livenessProbe.Create()
|
||||
}
|
||||
if readinessProbe != nil {
|
||||
c.ReadinessProbe = readinessProbe.Create()
|
||||
}
|
||||
if lifecycle != nil {
|
||||
c.Env = append(c.Env, lifecycleEnvVars...)
|
||||
c.VolumeMounts = append(c.VolumeMounts, lifecycleVolumeMounts()...)
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// arangosyncContainer creates a container configured to run `arangosync`.
|
||||
func arangosyncContainer(image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig,
|
||||
lifecycle *v1.Lifecycle, lifecycleEnvVars []v1.EnvVar, resources v1.ResourceRequirements) v1.Container {
|
||||
c := v1.Container{
|
||||
Command: append([]string{"/usr/sbin/arangosync"}, args...),
|
||||
Name: ServerContainerName,
|
||||
Image: image,
|
||||
ImagePullPolicy: imagePullPolicy,
|
||||
Env: containerCreator.GetEnvs(),
|
||||
Resources: containerCreator.GetResourceRequirements(),
|
||||
LivenessProbe: liveness,
|
||||
ReadinessProbe: readiness,
|
||||
Lifecycle: lifecycle,
|
||||
Ports: []v1.ContainerPort{
|
||||
{
|
||||
Name: "server",
|
||||
ContainerPort: int32(ArangoPort),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Resources: resources,
|
||||
ImagePullPolicy: containerCreator.GetImagePullPolicy(),
|
||||
SecurityContext: SecurityContextWithoutCapabilities(),
|
||||
}
|
||||
for k, v := range env {
|
||||
c.Env = append(c.Env, v.CreateEnvVar(k))
|
||||
}
|
||||
if livenessProbe != nil {
|
||||
c.LivenessProbe = livenessProbe.Create()
|
||||
}
|
||||
if lifecycle != nil {
|
||||
c.Env = append(c.Env, lifecycleEnvVars...)
|
||||
c.VolumeMounts = append(c.VolumeMounts, lifecycleVolumeMounts()...)
|
||||
}
|
||||
|
||||
return c
|
||||
}, nil
|
||||
}
|
||||
|
||||
func arangodbexporterContainer(image string, imagePullPolicy v1.PullPolicy, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig) v1.Container {
|
||||
func ArangodbexporterContainer(image string, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig) v1.Container {
|
||||
c := v1.Container{
|
||||
Command: append([]string{"/app/arangodb-exporter"}, args...),
|
||||
Name: ExporterContainerName,
|
||||
|
@ -424,103 +384,28 @@ func arangodbexporterContainer(image string, imagePullPolicy v1.PullPolicy, args
|
|||
return c
|
||||
}
|
||||
|
||||
// newLifecycle creates a lifecycle structure with preStop handler.
|
||||
func newLifecycle() (*v1.Lifecycle, []v1.EnvVar, []v1.Volume, error) {
|
||||
binaryPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return nil, nil, nil, maskAny(err)
|
||||
}
|
||||
exePath := filepath.Join(LifecycleVolumeMountDir, filepath.Base(binaryPath))
|
||||
lifecycle := &v1.Lifecycle{
|
||||
PreStop: &v1.Handler{
|
||||
Exec: &v1.ExecAction{
|
||||
Command: append([]string{exePath}, "lifecycle", "preStop"),
|
||||
},
|
||||
},
|
||||
}
|
||||
envVars := []v1.EnvVar{
|
||||
v1.EnvVar{
|
||||
Name: constants.EnvOperatorPodName,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.name",
|
||||
},
|
||||
},
|
||||
},
|
||||
v1.EnvVar{
|
||||
Name: constants.EnvOperatorPodNamespace,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "metadata.namespace",
|
||||
},
|
||||
},
|
||||
},
|
||||
v1.EnvVar{
|
||||
Name: constants.EnvOperatorNodeName,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
v1.EnvVar{
|
||||
Name: constants.EnvOperatorNodeNameArango,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: "spec.nodeName",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
vols := []v1.Volume{
|
||||
v1.Volume{
|
||||
Name: lifecycleVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
},
|
||||
}
|
||||
return lifecycle, envVars, vols, nil
|
||||
}
|
||||
// NewPod creates a basic Pod for given settings.
|
||||
func NewPod(deploymentName, role, id, podName string, podCreator PodCreator) v1.Pod {
|
||||
|
||||
// initLifecycleContainer creates an init-container to copy the lifecycle binary
|
||||
// to a shared volume.
|
||||
func initLifecycleContainer(image string) (v1.Container, error) {
|
||||
binaryPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return v1.Container{}, maskAny(err)
|
||||
}
|
||||
c := v1.Container{
|
||||
Command: append([]string{binaryPath}, "lifecycle", "copy", "--target", LifecycleVolumeMountDir),
|
||||
Name: InitLifecycleContainerName,
|
||||
Image: image,
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
VolumeMounts: lifecycleVolumeMounts(),
|
||||
SecurityContext: SecurityContextWithoutCapabilities(),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// newPod creates a basic Pod for given settings.
|
||||
func newPod(deploymentName, ns, role, id, podName string, imagePullSecrets []string, finalizers []string, tolerations []v1.Toleration, serviceAccountName string, nodeSelector map[string]string) v1.Pod {
|
||||
hostname := CreatePodHostName(deploymentName, role, id)
|
||||
p := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: LabelsForDeployment(deploymentName, role),
|
||||
Finalizers: finalizers,
|
||||
Finalizers: podCreator.GetFinalizers(),
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Hostname: hostname,
|
||||
Subdomain: CreateHeadlessServiceName(deploymentName),
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
Tolerations: tolerations,
|
||||
ServiceAccountName: serviceAccountName,
|
||||
NodeSelector: nodeSelector,
|
||||
Tolerations: podCreator.GetTolerations(),
|
||||
ServiceAccountName: podCreator.GetServiceAccountName(),
|
||||
NodeSelector: podCreator.GetNodeSelector(),
|
||||
},
|
||||
}
|
||||
|
||||
// Add ImagePullSecrets
|
||||
imagePullSecrets := podCreator.GetImagePullSecrets()
|
||||
if imagePullSecrets != nil {
|
||||
imagePullSecretsReference := make([]v1.LocalObjectReference, len(imagePullSecrets))
|
||||
for id := range imagePullSecrets {
|
||||
|
@ -543,288 +428,10 @@ type ArangodbExporterContainerConf struct {
|
|||
Image string
|
||||
}
|
||||
|
||||
// CreateArangodPod creates a Pod that runs `arangod`.
|
||||
// CreatePod adds an owner to the given pod and calls the k8s api-server to created it.
|
||||
// If the pod already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func CreateArangodPod(kubecli kubernetes.Interface, developmentMode bool, deployment APIObject,
|
||||
role, id, podName, pvcName, image, lifecycleImage, alpineImage string,
|
||||
imagePullPolicy v1.PullPolicy, imagePullSecrets []string,
|
||||
engine string, requireUUID bool, terminationGracePeriod time.Duration,
|
||||
args []string, env map[string]EnvValue, finalizers []string,
|
||||
livenessProbe *HTTPProbeConfig, readinessProbe *HTTPProbeConfig, tolerations []v1.Toleration, serviceAccountName string,
|
||||
tlsKeyfileSecretName, rocksdbEncryptionSecretName string, clusterJWTSecretName string, nodeSelector map[string]string,
|
||||
podPriorityClassName string, resources v1.ResourceRequirements, exporter *ArangodbExporterContainerConf, sidecars []v1.Container, vct *v1.PersistentVolumeClaim) error {
|
||||
|
||||
// Prepare basic pod
|
||||
p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, imagePullSecrets, finalizers, tolerations, serviceAccountName, nodeSelector)
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(terminationGracePeriod.Seconds()))
|
||||
p.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
|
||||
// Add lifecycle container
|
||||
var lifecycle *v1.Lifecycle
|
||||
var lifecycleEnvVars []v1.EnvVar
|
||||
var lifecycleVolumes []v1.Volume
|
||||
if lifecycleImage != "" {
|
||||
c, err := initLifecycleContainer(lifecycleImage)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
p.Spec.InitContainers = append(p.Spec.InitContainers, c)
|
||||
lifecycle, lifecycleEnvVars, lifecycleVolumes, err = newLifecycle()
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add arangod container
|
||||
c :=
|
||||
arangodContainer(image, imagePullPolicy, args, env, livenessProbe, readinessProbe, lifecycle, lifecycleEnvVars, resources, vct != nil)
|
||||
if tlsKeyfileSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, tlsKeyfileVolumeMounts()...)
|
||||
}
|
||||
if rocksdbEncryptionSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, rocksdbEncryptionVolumeMounts()...)
|
||||
}
|
||||
if clusterJWTSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, clusterJWTVolumeMounts()...)
|
||||
}
|
||||
|
||||
p.Spec.Containers = append(p.Spec.Containers, c)
|
||||
|
||||
// Add arangodb exporter container
|
||||
if exporter != nil {
|
||||
c = arangodbexporterContainer(exporter.Image, imagePullPolicy, exporter.Args, exporter.Env, exporter.LivenessProbe)
|
||||
if exporter.JWTTokenSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, exporterJWTVolumeMounts()...)
|
||||
}
|
||||
if tlsKeyfileSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, tlsKeyfileVolumeMounts()...)
|
||||
}
|
||||
p.Spec.Containers = append(p.Spec.Containers, c)
|
||||
p.Labels[LabelKeyArangoExporter] = "yes"
|
||||
}
|
||||
|
||||
// Add sidecars
|
||||
if len(sidecars) > 0 {
|
||||
p.Spec.Containers = append(p.Spec.Containers, sidecars...)
|
||||
}
|
||||
|
||||
// Add priorityClassName
|
||||
p.Spec.PriorityClassName = podPriorityClassName
|
||||
|
||||
// Add UUID init container
|
||||
if alpineImage != "" {
|
||||
p.Spec.InitContainers = append(p.Spec.InitContainers, arangodInitContainer("uuid", id, engine, alpineImage, requireUUID))
|
||||
}
|
||||
|
||||
// Add volume
|
||||
if pvcName != "" {
|
||||
// Create PVC
|
||||
vol := v1.Volume{
|
||||
Name: arangodVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: pvcName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
} else {
|
||||
// Create emptydir volume
|
||||
vol := v1.Volume{
|
||||
Name: arangodVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// TLS keyfile secret mount (if any)
|
||||
if tlsKeyfileSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: tlsKeyfileVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: tlsKeyfileSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// RocksDB encryption secret mount (if any)
|
||||
if rocksdbEncryptionSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: rocksdbEncryptionVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: rocksdbEncryptionSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Exporter Token Mount
|
||||
if exporter != nil && exporter.JWTTokenSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: exporterJWTVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: exporter.JWTTokenSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Cluster JWT secret mount (if any)
|
||||
if clusterJWTSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: clusterJWTSecretVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: clusterJWTSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Lifecycle volumes (if any)
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, lifecycleVolumes...)
|
||||
|
||||
// Add (anti-)affinity
|
||||
p.Spec.Affinity = createAffinity(deployment.GetName(), role, !developmentMode, "")
|
||||
|
||||
if err := createPod(kubecli, &p, deployment.GetNamespace(), deployment.AsOwner()); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateArangoSyncPod creates a Pod that runs `arangosync`.
|
||||
// If the pod already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func CreateArangoSyncPod(kubecli kubernetes.Interface, developmentMode bool, deployment APIObject, role, id, podName, image, lifecycleImage string,
|
||||
imagePullPolicy v1.PullPolicy, imagePullSecrets []string,
|
||||
terminationGracePeriod time.Duration, args []string, env map[string]EnvValue, livenessProbe *HTTPProbeConfig, tolerations []v1.Toleration, serviceAccountName string,
|
||||
tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName, affinityWithRole string, nodeSelector map[string]string,
|
||||
podPriorityClassName string, resources v1.ResourceRequirements, sidecars []v1.Container) error {
|
||||
// Prepare basic pod
|
||||
p := newPod(deployment.GetName(), deployment.GetNamespace(), role, id, podName, imagePullSecrets, nil, tolerations, serviceAccountName, nodeSelector)
|
||||
terminationGracePeriodSeconds := int64(math.Ceil(terminationGracePeriod.Seconds()))
|
||||
p.Spec.TerminationGracePeriodSeconds = &terminationGracePeriodSeconds
|
||||
|
||||
// Add lifecycle container
|
||||
var lifecycle *v1.Lifecycle
|
||||
var lifecycleEnvVars []v1.EnvVar
|
||||
var lifecycleVolumes []v1.Volume
|
||||
if lifecycleImage != "" {
|
||||
c, err := initLifecycleContainer(lifecycleImage)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
p.Spec.InitContainers = append(p.Spec.InitContainers, c)
|
||||
lifecycle, lifecycleEnvVars, lifecycleVolumes, err = newLifecycle()
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
}
|
||||
|
||||
// Lifecycle volumes (if any)
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, lifecycleVolumes...)
|
||||
|
||||
// Add arangosync container
|
||||
c := arangosyncContainer(image, imagePullPolicy, args, env, livenessProbe, lifecycle, lifecycleEnvVars, resources)
|
||||
if tlsKeyfileSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, tlsKeyfileVolumeMounts()...)
|
||||
}
|
||||
if clientAuthCASecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, clientAuthCACertificateVolumeMounts()...)
|
||||
}
|
||||
if masterJWTSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, masterJWTVolumeMounts()...)
|
||||
}
|
||||
if clusterJWTSecretName != "" {
|
||||
c.VolumeMounts = append(c.VolumeMounts, clusterJWTVolumeMounts()...)
|
||||
}
|
||||
p.Spec.Containers = append(p.Spec.Containers, c)
|
||||
|
||||
// Add sidecars
|
||||
if len(sidecars) > 0 {
|
||||
p.Spec.Containers = append(p.Spec.Containers, sidecars...)
|
||||
}
|
||||
|
||||
// Add priorityClassName
|
||||
p.Spec.PriorityClassName = podPriorityClassName
|
||||
|
||||
// TLS keyfile secret mount (if any)
|
||||
if tlsKeyfileSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: tlsKeyfileVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: tlsKeyfileSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Client Authentication certificate secret mount (if any)
|
||||
if clientAuthCASecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: clientAuthCAVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: clientAuthCASecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Master JWT secret mount (if any)
|
||||
if masterJWTSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: masterJWTSecretVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: masterJWTSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Cluster JWT secret mount (if any)
|
||||
if clusterJWTSecretName != "" {
|
||||
vol := v1.Volume{
|
||||
Name: clusterJWTSecretVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: clusterJWTSecretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
p.Spec.Volumes = append(p.Spec.Volumes, vol)
|
||||
}
|
||||
|
||||
// Add (anti-)affinity
|
||||
p.Spec.Affinity = createAffinity(deployment.GetName(), role, !developmentMode, affinityWithRole)
|
||||
|
||||
if err := createPod(kubecli, &p, deployment.GetNamespace(), deployment.AsOwner()); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// createPod adds an owner to the given pod and calls the k8s api-server to created it.
|
||||
// If the pod already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func createPod(kubecli kubernetes.Interface, pod *v1.Pod, ns string, owner metav1.OwnerReference) error {
|
||||
func CreatePod(kubecli kubernetes.Interface, pod *v1.Pod, ns string, owner metav1.OwnerReference) error {
|
||||
addOwnerRefToObject(pod.GetObjectMeta(), &owner)
|
||||
if _, err := kubecli.CoreV1().Pods(ns).Create(pod); err != nil && !IsAlreadyExists(err) {
|
||||
return maskAny(err)
|
||||
|
@ -839,3 +446,60 @@ func SecurityContextWithoutCapabilities() *v1.SecurityContext {
|
|||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateVolumeEmptyDir(name string) v1.Volume {
|
||||
return v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateVolumeWithSecret(name, secretName string) v1.Volume {
|
||||
return v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: secretName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateVolumeWithPersitantVolumeClaim(name, claimName string) v1.Volume {
|
||||
return v1.Volume{
|
||||
Name: name,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: claimName,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateEnvFieldPath(name, fieldPath string) v1.EnvVar {
|
||||
return v1.EnvVar{
|
||||
Name: name,
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
FieldRef: &v1.ObjectFieldSelector{
|
||||
FieldPath: fieldPath,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func CreateEnvSecretKeySelector(name, SecretKeyName, secretKey string) v1.EnvVar {
|
||||
return v1.EnvVar{
|
||||
Name: name,
|
||||
Value: "",
|
||||
ValueFrom: &v1.EnvVarSource{
|
||||
SecretKeyRef: &v1.SecretKeySelector{
|
||||
LocalObjectReference: v1.LocalObjectReference{
|
||||
Name: SecretKeyName,
|
||||
},
|
||||
Key: secretKey,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
|
|
@ -177,14 +177,7 @@ func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name
|
|||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{
|
||||
Name: "data",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{
|
||||
ClaimName: claimname,
|
||||
},
|
||||
},
|
||||
},
|
||||
k8sutil.CreateVolumeWithPersitantVolumeClaim("data", claimname),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue