mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
Merge pull request #206 from arangodb/feature/rotate-on-args-change
Rotate server on changed arguments
This commit is contained in:
commit
86f5e46ef6
7 changed files with 339 additions and 58 deletions
|
@ -366,3 +366,9 @@ func (d *Deployment) DeleteSecret(secretName string) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetExpectedPodArguments creates command line arguments for a server in the given group with given ID.
|
||||
func (d *Deployment) GetExpectedPodArguments(apiObject metav1.Object, deplSpec api.DeploymentSpec, group api.ServerGroup,
|
||||
agents api.MemberStatusList, id string) []string {
|
||||
return d.resources.GetExpectedPodArguments(apiObject, deplSpec, group, agents, id)
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
driver "github.com/arangodb/go-driver"
|
||||
"github.com/arangodb/go-driver/agency"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
|
@ -89,4 +90,7 @@ type Context interface {
|
|||
// DeleteSecret removes the Secret with given name.
|
||||
// If the secret does not exist, the error is ignored.
|
||||
DeleteSecret(secretName string) error
|
||||
// GetExpectedPodArguments creates command line arguments for a server in the given group with given ID.
|
||||
GetExpectedPodArguments(apiObject metav1.Object, deplSpec api.DeploymentSpec, group api.ServerGroup,
|
||||
agents api.MemberStatusList, id string) []string
|
||||
}
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
package reconcile
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/rs/zerolog/log"
|
||||
"k8s.io/api/core/v1"
|
||||
|
@ -54,7 +56,8 @@ func (d *Reconciler) CreatePlan() error {
|
|||
apiObject := d.context.GetAPIObject()
|
||||
spec := d.context.GetSpec()
|
||||
status, lastVersion := d.context.GetStatus()
|
||||
newPlan, changed := createPlan(d.log, apiObject, status.Plan, spec, status, pods, d.context.GetTLSKeyfile, d.context.GetTLSCA, d.context.GetPvc, d.context.CreateEvent)
|
||||
ctx := newPlanBuilderContext(d.context)
|
||||
newPlan, changed := createPlan(d.log, apiObject, status.Plan, spec, status, pods, ctx)
|
||||
|
||||
// If not change, we're done
|
||||
if !changed {
|
||||
|
@ -79,10 +82,7 @@ func (d *Reconciler) CreatePlan() error {
|
|||
func createPlan(log zerolog.Logger, apiObject k8sutil.APIObject,
|
||||
currentPlan api.Plan, spec api.DeploymentSpec,
|
||||
status api.DeploymentStatus, pods []v1.Pod,
|
||||
getTLSKeyfile func(group api.ServerGroup, member api.MemberStatus) (string, error),
|
||||
getTLSCA func(string) (string, string, bool, error),
|
||||
getPVC func(pvcName string) (*v1.PersistentVolumeClaim, error),
|
||||
createEvent func(evt *k8sutil.Event)) (api.Plan, bool) {
|
||||
context PlanBuilderContext) (api.Plan, bool) {
|
||||
if len(currentPlan) > 0 {
|
||||
// Plan already exists, complete that first
|
||||
return currentPlan, false
|
||||
|
@ -165,7 +165,7 @@ func createPlan(log zerolog.Logger, apiObject k8sutil.APIObject,
|
|||
if decision.UpgradeNeeded && decision.UpgradeAllowed {
|
||||
plan = append(plan, createUpgradeMemberPlan(log, m, group, "Version upgrade")...)
|
||||
} else {
|
||||
rotNeeded, reason := podNeedsRotation(*p, apiObject, spec, group, status.Members.Agents, m.ID)
|
||||
rotNeeded, reason := podNeedsRotation(log, *p, apiObject, spec, group, status.Members.Agents, m.ID, context)
|
||||
if rotNeeded {
|
||||
plan = append(plan, createRotateMemberPlan(log, m, group, reason)...)
|
||||
}
|
||||
|
@ -179,17 +179,17 @@ func createPlan(log zerolog.Logger, apiObject k8sutil.APIObject,
|
|||
|
||||
// Check for the need to rotate TLS certificate of a members
|
||||
if len(plan) == 0 {
|
||||
plan = createRotateTLSServerCertificatePlan(log, spec, status, getTLSKeyfile)
|
||||
plan = createRotateTLSServerCertificatePlan(log, spec, status, context.GetTLSKeyfile)
|
||||
}
|
||||
|
||||
// Check for changes storage classes or requirements
|
||||
if len(plan) == 0 {
|
||||
plan = createRotateServerStoragePlan(log, apiObject, spec, status, getPVC, createEvent)
|
||||
plan = createRotateServerStoragePlan(log, apiObject, spec, status, context.GetPvc, context.CreateEvent)
|
||||
}
|
||||
|
||||
// Check for the need to rotate TLS CA certificate and all members
|
||||
if len(plan) == 0 {
|
||||
plan = createRotateTLSCAPlan(log, apiObject, spec, status, getTLSCA, createEvent)
|
||||
plan = createRotateTLSCAPlan(log, apiObject, spec, status, context.GetTLSCA, context.CreateEvent)
|
||||
}
|
||||
|
||||
// Return plan
|
||||
|
@ -241,12 +241,14 @@ func podNeedsUpgrading(p v1.Pod, spec api.DeploymentSpec, images api.ImageInfoLi
|
|||
// given pod differs from what it should be according to the
|
||||
// given deployment spec.
|
||||
// When true is returned, a reason for the rotation is already returned.
|
||||
func podNeedsRotation(p v1.Pod, apiObject metav1.Object, spec api.DeploymentSpec,
|
||||
group api.ServerGroup, agents api.MemberStatusList, id string) (bool, string) {
|
||||
func podNeedsRotation(log zerolog.Logger, p v1.Pod, apiObject metav1.Object, spec api.DeploymentSpec,
|
||||
group api.ServerGroup, agents api.MemberStatusList, id string,
|
||||
context PlanBuilderContext) (bool, string) {
|
||||
groupSpec := spec.GetServerGroupSpec(group)
|
||||
|
||||
// Check image pull policy
|
||||
if c, found := k8sutil.GetContainerByName(&p, k8sutil.ServerContainerName); found {
|
||||
c, found := k8sutil.GetContainerByName(&p, k8sutil.ServerContainerName)
|
||||
if found {
|
||||
if c.ImagePullPolicy != spec.GetImagePullPolicy() {
|
||||
return true, "Image pull policy changed"
|
||||
}
|
||||
|
@ -255,15 +257,15 @@ func podNeedsRotation(p v1.Pod, apiObject metav1.Object, spec api.DeploymentSpec
|
|||
}
|
||||
|
||||
// Check arguments
|
||||
/*expectedArgs := createArangodArgs(apiObject, spec, group, agents, id)
|
||||
if len(expectedArgs) != len(c.Args) {
|
||||
expectedArgs := strings.Join(context.GetExpectedPodArguments(apiObject, spec, group, agents, id), " ")
|
||||
actualArgs := strings.Join(getContainerArgs(c), " ")
|
||||
if expectedArgs != actualArgs {
|
||||
log.Debug().
|
||||
Str("actual-args", actualArgs).
|
||||
Str("expected-args", expectedArgs).
|
||||
Msg("Arguments changed. Rotation needed.")
|
||||
return true, "Arguments changed"
|
||||
}
|
||||
for i, a := range expectedArgs {
|
||||
if c.Args[i] != a {
|
||||
return true, "Arguments changed"
|
||||
}
|
||||
}*/
|
||||
|
||||
// Check service account
|
||||
if normalizeServiceAccountName(p.Spec.ServiceAccountName) != normalizeServiceAccountName(groupSpec.GetServiceAccountName()) {
|
||||
|
@ -352,3 +354,10 @@ func createUpgradeMemberPlan(log zerolog.Logger, member api.MemberStatus,
|
|||
}
|
||||
return plan
|
||||
}
|
||||
|
||||
func getContainerArgs(c v1.Container) []string {
|
||||
if len(c.Command) >= 1 {
|
||||
return c.Command[1:]
|
||||
}
|
||||
return c.Args
|
||||
}
|
||||
|
|
53
pkg/deployment/reconcile/plan_builder_context.go
Normal file
53
pkg/deployment/reconcile/plan_builder_context.go
Normal file
|
@ -0,0 +1,53 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2018 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Ewout Prangsma
|
||||
//
|
||||
|
||||
package reconcile
|
||||
|
||||
import (
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// PlanBuilderContext contains context methods provided to plan builders.
|
||||
type PlanBuilderContext interface {
|
||||
// GetTLSKeyfile returns the keyfile encoded TLS certificate+key for
|
||||
// the given member.
|
||||
GetTLSKeyfile(group api.ServerGroup, member api.MemberStatus) (string, error)
|
||||
// GetTLSCA returns the TLS CA certificate in the secret with given name.
|
||||
// Returns: publicKey, privateKey, ownerByDeployment, error
|
||||
GetTLSCA(secretName string) (string, string, bool, error)
|
||||
// CreateEvent creates a given event.
|
||||
// On error, the error is logged.
|
||||
CreateEvent(evt *k8sutil.Event)
|
||||
// GetPvc gets a PVC by the given name, in the samespace of the deployment.
|
||||
GetPvc(pvcName string) (*v1.PersistentVolumeClaim, error)
|
||||
// GetExpectedPodArguments creates command line arguments for a server in the given group with given ID.
|
||||
GetExpectedPodArguments(apiObject metav1.Object, deplSpec api.DeploymentSpec, group api.ServerGroup,
|
||||
agents api.MemberStatusList, id string) []string
|
||||
}
|
||||
|
||||
// newPlanBuilderContext creates a PlanBuilderContext from the given context
|
||||
func newPlanBuilderContext(ctx Context) PlanBuilderContext {
|
||||
return ctx
|
||||
}
|
|
@ -37,18 +37,40 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
type testContext struct{}
|
||||
|
||||
// GetTLSKeyfile returns the keyfile encoded TLS certificate+key for
|
||||
// the given member.
|
||||
func (c *testContext) GetTLSKeyfile(group api.ServerGroup, member api.MemberStatus) (string, error) {
|
||||
return "", maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
|
||||
// GetTLSCA returns the TLS CA certificate in the secret with given name.
|
||||
// Returns: publicKey, privateKey, ownerByDeployment, error
|
||||
func (c *testContext) GetTLSCA(secretName string) (string, string, bool, error) {
|
||||
return "", "", false, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
|
||||
// CreateEvent creates a given event.
|
||||
// On error, the error is logged.
|
||||
func (c *testContext) CreateEvent(evt *k8sutil.Event) {
|
||||
// not implemented
|
||||
}
|
||||
|
||||
// GetPvc gets a PVC by the given name, in the samespace of the deployment.
|
||||
func (c *testContext) GetPvc(pvcName string) (*v1.PersistentVolumeClaim, error) {
|
||||
return nil, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
|
||||
// GetExpectedPodArguments creates command line arguments for a server in the given group with given ID.
|
||||
func (c *testContext) GetExpectedPodArguments(apiObject metav1.Object, deplSpec api.DeploymentSpec, group api.ServerGroup,
|
||||
agents api.MemberStatusList, id string) []string {
|
||||
return nil // not implemented
|
||||
}
|
||||
|
||||
// TestCreatePlanSingleScale creates a `single` deployment to test the creating of scaling plan.
|
||||
func TestCreatePlanSingleScale(t *testing.T) {
|
||||
getTLSKeyfile := func(group api.ServerGroup, member api.MemberStatus) (string, error) {
|
||||
return "", maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
getTLSCA := func(string) (string, string, bool, error) {
|
||||
return "", "", false, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
getPVC := func(pvcName string) (*v1.PersistentVolumeClaim, error) {
|
||||
return nil, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
createEvent := func(evt *k8sutil.Event) {}
|
||||
c := &testContext{}
|
||||
log := zerolog.Nop()
|
||||
spec := api.DeploymentSpec{
|
||||
Mode: api.NewMode(api.DeploymentModeSingle),
|
||||
|
@ -64,7 +86,7 @@ func TestCreatePlanSingleScale(t *testing.T) {
|
|||
|
||||
// Test with empty status
|
||||
var status api.DeploymentStatus
|
||||
newPlan, changed := createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed := createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
assert.Len(t, newPlan, 0) // Single mode does not scale
|
||||
|
||||
|
@ -75,7 +97,7 @@ func TestCreatePlanSingleScale(t *testing.T) {
|
|||
PodName: "something",
|
||||
},
|
||||
}
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
assert.Len(t, newPlan, 0) // Single mode does not scale
|
||||
|
||||
|
@ -90,23 +112,14 @@ func TestCreatePlanSingleScale(t *testing.T) {
|
|||
PodName: "something1",
|
||||
},
|
||||
}
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
assert.Len(t, newPlan, 0) // Single mode does not scale
|
||||
}
|
||||
|
||||
// TestCreatePlanActiveFailoverScale creates a `ActiveFailover` deployment to test the creating of scaling plan.
|
||||
func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
||||
getTLSKeyfile := func(group api.ServerGroup, member api.MemberStatus) (string, error) {
|
||||
return "", maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
getTLSCA := func(string) (string, string, bool, error) {
|
||||
return "", "", false, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
getPVC := func(pvcName string) (*v1.PersistentVolumeClaim, error) {
|
||||
return nil, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
createEvent := func(evt *k8sutil.Event) {}
|
||||
c := &testContext{}
|
||||
log := zerolog.Nop()
|
||||
spec := api.DeploymentSpec{
|
||||
Mode: api.NewMode(api.DeploymentModeActiveFailover),
|
||||
|
@ -123,7 +136,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
|||
|
||||
// Test with empty status
|
||||
var status api.DeploymentStatus
|
||||
newPlan, changed := createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed := createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
require.Len(t, newPlan, 2)
|
||||
assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type)
|
||||
|
@ -136,7 +149,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
|||
PodName: "something",
|
||||
},
|
||||
}
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
require.Len(t, newPlan, 1)
|
||||
assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type)
|
||||
|
@ -161,7 +174,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
|||
PodName: "something4",
|
||||
},
|
||||
}
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
require.Len(t, newPlan, 2) // Note: Downscaling is only down 1 at a time
|
||||
assert.Equal(t, api.ActionTypeShutdownMember, newPlan[0].Type)
|
||||
|
@ -172,16 +185,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
|||
|
||||
// TestCreatePlanClusterScale creates a `cluster` deployment to test the creating of scaling plan.
|
||||
func TestCreatePlanClusterScale(t *testing.T) {
|
||||
getTLSKeyfile := func(group api.ServerGroup, member api.MemberStatus) (string, error) {
|
||||
return "", maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
getTLSCA := func(string) (string, string, bool, error) {
|
||||
return "", "", false, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
getPVC := func(pvcName string) (*v1.PersistentVolumeClaim, error) {
|
||||
return nil, maskAny(fmt.Errorf("Not implemented"))
|
||||
}
|
||||
createEvent := func(evt *k8sutil.Event) {}
|
||||
c := &testContext{}
|
||||
log := zerolog.Nop()
|
||||
spec := api.DeploymentSpec{
|
||||
Mode: api.NewMode(api.DeploymentModeCluster),
|
||||
|
@ -197,7 +201,7 @@ func TestCreatePlanClusterScale(t *testing.T) {
|
|||
|
||||
// Test with empty status
|
||||
var status api.DeploymentStatus
|
||||
newPlan, changed := createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed := createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
require.Len(t, newPlan, 6) // Adding 3 dbservers & 3 coordinators (note: agents do not scale now)
|
||||
assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type)
|
||||
|
@ -230,7 +234,7 @@ func TestCreatePlanClusterScale(t *testing.T) {
|
|||
PodName: "coordinator1",
|
||||
},
|
||||
}
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
require.Len(t, newPlan, 3)
|
||||
assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type)
|
||||
|
@ -267,7 +271,7 @@ func TestCreatePlanClusterScale(t *testing.T) {
|
|||
}
|
||||
spec.DBServers.Count = util.NewInt(1)
|
||||
spec.Coordinators.Count = util.NewInt(1)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, getTLSKeyfile, getTLSCA, getPVC, createEvent)
|
||||
newPlan, changed = createPlan(log, depl, nil, spec, status, nil, c)
|
||||
assert.True(t, changed)
|
||||
require.Len(t, newPlan, 5) // Note: Downscaling is done 1 at a time
|
||||
assert.Equal(t, api.ActionTypeCleanOutMember, newPlan[0].Type)
|
||||
|
|
|
@ -247,3 +247,16 @@ func (r *Resources) InspectPods(ctx context.Context) error {
|
|||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetExpectedPodArguments creates command line arguments for a server in the given group with given ID.
|
||||
func (r *Resources) GetExpectedPodArguments(apiObject metav1.Object, deplSpec api.DeploymentSpec, group api.ServerGroup,
|
||||
agents api.MemberStatusList, id string) []string {
|
||||
if group.IsArangod() {
|
||||
return createArangodArgs(apiObject, deplSpec, group, agents, id, false)
|
||||
}
|
||||
if group.IsArangosync() {
|
||||
groupSpec := deplSpec.GetServerGroupSpec(group)
|
||||
return createArangoSyncArgs(apiObject, deplSpec, group, groupSpec, agents, id)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
192
tests/change_args_test.go
Normal file
192
tests/change_args_test.go
Normal file
|
@ -0,0 +1,192 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2018 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Ewout Prangsma
|
||||
//
|
||||
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/dchest/uniuri"
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
driver "github.com/arangodb/go-driver"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
// TestChangeArgsAgents tests the creating of an active failover deployment
|
||||
// with default settings and once ready changes the arguments of the agents.
|
||||
func TestChangeArgsAgents(t *testing.T) {
|
||||
c := client.MustNewInCluster()
|
||||
kubecli := mustNewKubeClient(t)
|
||||
ns := getNamespace(t)
|
||||
|
||||
// Prepare deployment config
|
||||
depl := newDeployment("test-chga-rs-" + uniuri.NewLen(4))
|
||||
depl.Spec.Mode = api.NewMode(api.DeploymentModeActiveFailover)
|
||||
|
||||
// Create deployment
|
||||
_, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
|
||||
if err != nil {
|
||||
t.Fatalf("Create deployment failed: %v", err)
|
||||
}
|
||||
// Prepare cleanup
|
||||
defer removeDeployment(c, depl.GetName(), ns)
|
||||
|
||||
// Wait for deployment to be ready
|
||||
apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
|
||||
if err != nil {
|
||||
t.Fatalf("Deployment not running in time: %v", err)
|
||||
}
|
||||
|
||||
// Create a database client
|
||||
ctx := context.Background()
|
||||
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
|
||||
|
||||
// Wait for single server available
|
||||
if err := waitUntilVersionUp(client, nil); err != nil {
|
||||
t.Fatalf("ActiveFailover servers not running returning version in time: %v", err)
|
||||
}
|
||||
|
||||
// Check server role
|
||||
assert.NoError(t, testServerRole(ctx, client, driver.ServerRoleSingleActive))
|
||||
|
||||
// Now change agent arguments
|
||||
if _, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
|
||||
spec.Agents.Args = []string{"--log.level=DEBUG"}
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update deployment: %v", err)
|
||||
}
|
||||
|
||||
// Wait until all agents have the right arguments
|
||||
if _, err := waitUntilDeployment(c, depl.GetName(), ns, func(d *api.ArangoDeployment) error {
|
||||
members := d.Status.Members
|
||||
if len(members.Agents) != 3 {
|
||||
return fmt.Errorf("Expected 3 agents, got %d", len(members.Agents))
|
||||
}
|
||||
pods := kubecli.CoreV1().Pods(ns)
|
||||
for _, m := range members.Agents {
|
||||
pod, err := pods.Get(m.PodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
found := false
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if c.Name != k8sutil.ServerContainerName {
|
||||
continue
|
||||
}
|
||||
// Check command
|
||||
for _, a := range append(c.Args, c.Command...) {
|
||||
if a == "--log.level=DEBUG" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Did not find new argument")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, time.Minute*10); err != nil {
|
||||
t.Fatalf("Deployment not updated in time: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestChangeArgsDBServer tests the creating of a cluster deployment
|
||||
// with default settings and once ready changes the arguments of the dbservers.
|
||||
func TestChangeArgsDBServer(t *testing.T) {
|
||||
c := client.MustNewInCluster()
|
||||
kubecli := mustNewKubeClient(t)
|
||||
ns := getNamespace(t)
|
||||
|
||||
// Prepare deployment config
|
||||
depl := newDeployment("test-chga-db-" + uniuri.NewLen(4))
|
||||
depl.Spec.Mode = api.NewMode(api.DeploymentModeCluster)
|
||||
|
||||
// Create deployment
|
||||
_, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
|
||||
if err != nil {
|
||||
t.Fatalf("Create deployment failed: %v", err)
|
||||
}
|
||||
// Prepare cleanup
|
||||
defer removeDeployment(c, depl.GetName(), ns)
|
||||
|
||||
// Wait for deployment to be ready
|
||||
apiObject, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
|
||||
if err != nil {
|
||||
t.Fatalf("Deployment not running in time: %v", err)
|
||||
}
|
||||
|
||||
// Create a database client
|
||||
ctx := context.Background()
|
||||
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
|
||||
|
||||
// Wait for cluster available
|
||||
if err := waitUntilVersionUp(client, nil); err != nil {
|
||||
t.Fatalf("Cluster servers not running returning version in time: %v", err)
|
||||
}
|
||||
|
||||
// Now change agent arguments
|
||||
if _, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
|
||||
spec.DBServers.Args = []string{"--log.level=DEBUG"}
|
||||
}); err != nil {
|
||||
t.Fatalf("Failed to update deployment: %v", err)
|
||||
}
|
||||
|
||||
// Wait until all dbservers have the right arguments
|
||||
if _, err := waitUntilDeployment(c, depl.GetName(), ns, func(d *api.ArangoDeployment) error {
|
||||
members := d.Status.Members
|
||||
if len(members.DBServers) != 3 {
|
||||
return fmt.Errorf("Expected 3 dbservers, got %d", len(members.DBServers))
|
||||
}
|
||||
pods := kubecli.CoreV1().Pods(ns)
|
||||
for _, m := range members.DBServers {
|
||||
pod, err := pods.Get(m.PodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
found := false
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if c.Name != k8sutil.ServerContainerName {
|
||||
continue
|
||||
}
|
||||
// Check command
|
||||
for _, a := range append(c.Args, c.Command...) {
|
||||
if a == "--log.level=DEBUG" {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return fmt.Errorf("Did not find new argument")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, time.Minute*10); err != nil {
|
||||
t.Fatalf("Deployment not updated in time: %v", err)
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue