1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00

Feature/restart pods on sidecar changes (#420)

* added side car changes to pod rotation
* change log
* go fmt
* sidecar test
This commit is contained in:
Kaveh Vahedipour 2019-07-31 10:58:44 +02:00 committed by Max Neunhöffer
parent b192e7b0a4
commit 08ecc445cd
11 changed files with 569 additions and 7 deletions

View file

@ -1,6 +1,7 @@
# Change Log
## [0.3.13]() (XXXX-XX-XX)
- Added side car changed to pod rotation criterium
- Added ArangoDB version and image id to member status
## [0.3.12](https://github.com/arangodb/kube-arangodb/tree/0.3.12) (2019-07-04)

View file

@ -220,7 +220,7 @@ kubectl get all --all-namespaces
- Attach `tiller` to proper role
```
kubectl create clusterrolebinding tiller-cluster-rule \
kubectl create clusterrolebinding tiller-cluster-rule \
--clusterrole=cluster-admin --serviceaccount=kube-system:tiller
```
```

View file

@ -22,7 +22,9 @@
package v1alpha
import "github.com/arangodb/kube-arangodb/pkg/util"
import (
"github.com/arangodb/kube-arangodb/pkg/util"
)
// DeploymentStatus contains the status part of a Cluster resource.
type DeploymentStatus struct {

View file

@ -23,6 +23,7 @@
package v1alpha
import (
"reflect"
"time"
driver "github.com/arangodb/go-driver"
@ -54,6 +55,8 @@ type MemberStatus struct {
IsInitialized bool `json:"initialized"`
// CleanoutJobID holds the ID of the agency job for cleaning out this server
CleanoutJobID string `json:"cleanout-job-id,omitempty"`
// SideCarSpecs contains list of specifications specified for side cars
SideCarSpecs map[string]v1.Container
// ArangoVersion holds the ArangoDB version in member
ArangoVersion driver.Version `json:"arango-version,omitempty"`
//ImageId holds the members ArangoDB image ID
@ -70,6 +73,7 @@ func (s MemberStatus) Equal(other MemberStatus) bool {
s.Conditions.Equal(other.Conditions) &&
s.IsInitialized == other.IsInitialized &&
s.CleanoutJobID == other.CleanoutJobID &&
reflect.DeepEqual(s.SideCarSpecs, other.SideCarSpecs) &&
s.ArangoVersion == other.ArangoVersion &&
s.ImageID == other.ImageID
}

View file

@ -88,6 +88,9 @@ type ActionContext interface {
GetDeploymentHealth() (driver.ClusterHealth, error)
// InvalidateSyncStatus resets the sync state to false and triggers an inspection
InvalidateSyncStatus()
// GetSpec returns a copy of the spec
GetSpec() api.DeploymentSpec
}
// newActionContext creates a new ActionContext implementation.
@ -109,6 +112,10 @@ func (ac *actionContext) GetMode() api.DeploymentMode {
return ac.context.GetSpec().GetMode()
}
func (ac *actionContext) GetSpec() api.DeploymentSpec {
return ac.context.GetSpec()
}
// GetDeploymentHealth returns a copy of the latest known state of cluster health
func (ac *actionContext) GetDeploymentHealth() (driver.ClusterHealth, error) {
return ac.context.GetDeploymentHealth()

View file

@ -26,6 +26,7 @@ import (
"context"
"time"
"k8s.io/api/core/v1"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
"github.com/rs/zerolog"
)
@ -89,6 +90,7 @@ func (a *actionRotateMember) Start(ctx context.Context) (bool, error) {
}
// Update status
m.Phase = api.MemberPhaseRotating
if err := a.actionCtx.UpdateMember(m); err != nil {
return false, maskAny(err)
}
@ -117,6 +119,15 @@ func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, err
m.Phase = api.MemberPhaseNone
m.RecentTerminations = nil // Since we're rotating, we do not care about old terminations.
m.CleanoutJobID = ""
group := a.action.Group
var groupSpec = a.actionCtx.GetSpec().GetServerGroupSpec(group)
// Check for missing side cars in
m.SideCarSpecs = make(map[string]v1.Container)
for _, specSidecar := range groupSpec.GetSidecars() {
m.SideCarSpecs[specSidecar.Name] = *specSidecar.DeepCopy()
}
if err := a.actionCtx.UpdateMember(m); err != nil {
return false, false, maskAny(err)
}

View file

@ -23,17 +23,18 @@
package reconcile
import (
"reflect"
"strings"
driver "github.com/arangodb/go-driver"
upgraderules "github.com/arangodb/go-upgrade-rules"
"github.com/rs/zerolog"
"github.com/rs/zerolog/log"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
v1 "k8s.io/api/core/v1"
)
// upgradeDecision is the result of an upgrade check.
@ -394,9 +395,106 @@ func podNeedsRotation(log zerolog.Logger, p v1.Pod, apiObject metav1.Object, spe
return true, "Resource Requirements changed"
}
var memberStatus, _, _ = status.Members.MemberStatusByPodName(p.GetName())
if memberStatus.SideCarSpecs == nil {
memberStatus.SideCarSpecs = make(map[string]v1.Container)
}
// Check for missing side cars in
for _, specSidecar := range groupSpec.GetSidecars() {
var stateSidecar v1.Container
if stateSidecar, found = memberStatus.SideCarSpecs[specSidecar.Name]; !found {
return true, "Sidecar " + specSidecar.Name + " not found in running pod " + p.GetName()
}
if sideCarRequireRotation(specSidecar.DeepCopy(), &stateSidecar) {
return true, "Sidecar " + specSidecar.Name + " requires rotation"
}
}
for name := range memberStatus.SideCarSpecs {
var found = false
for _, specSidecar := range groupSpec.GetSidecars() {
if name == specSidecar.Name {
found = true
break
}
}
if !found {
return true, "Sidecar " + name + " no longer in specification"
}
}
return false, ""
}
// sideCarRequireRotation checks if side car requires rotation including default parameters
func sideCarRequireRotation(wanted, given *v1.Container) bool {
if !reflect.DeepEqual(wanted.Args, given.Args) {
return true
}
if !reflect.DeepEqual(wanted.Command, given.Command) {
return true
}
if !reflect.DeepEqual(wanted.Env, given.Env) {
return true
}
if !reflect.DeepEqual(wanted.EnvFrom, given.EnvFrom) {
return true
}
if wanted.Image != given.Image {
return true
}
if wanted.ImagePullPolicy != given.ImagePullPolicy {
if wanted.ImagePullPolicy != "Always" || !strings.HasSuffix(given.Image, ":latest") {
return true
}
}
if wanted.Lifecycle != given.Lifecycle {
return true
}
if wanted.LivenessProbe != given.LivenessProbe {
return true
}
if !reflect.DeepEqual(wanted.Ports, given.Ports) {
return true
}
if wanted.ReadinessProbe != given.ReadinessProbe {
return true
}
if !reflect.DeepEqual(wanted.Resources, given.Resources) {
return true
}
if wanted.SecurityContext != given.SecurityContext {
return true
}
if wanted.Stdin != given.Stdin {
return true
}
if wanted.StdinOnce != given.StdinOnce {
return true
}
if wanted.TerminationMessagePath != given.TerminationMessagePath {
return true
}
if wanted.TerminationMessagePolicy != given.TerminationMessagePolicy {
return true
}
if wanted.TTY != given.TTY {
return true
}
if !reflect.DeepEqual(wanted.VolumeDevices, given.VolumeDevices) {
return true
}
if !reflect.DeepEqual(wanted.VolumeMounts, given.VolumeMounts) {
return true
}
if wanted.WorkingDir != given.WorkingDir {
return true
}
return false
}
// resourcesRequireRotation returns true if the resource requirements have changed such that a rotation is required
func resourcesRequireRotation(wanted, given v1.ResourceRequirements) bool {
checkList := func(wanted, given v1.ResourceList) bool {

View file

@ -71,6 +71,7 @@ func (r *Resources) cleanupRemovedClusterMembers() error {
r.health.mutex.Unlock()
// Only accept recent cluster health values
healthAge := time.Since(ts)
if healthAge > maxClusterHealthAge {
log.Info().Dur("age", healthAge).Msg("Cleanup longer than max cluster health. Exiting")

View file

@ -108,7 +108,7 @@ func TestLoadBalancingSourceRanges(t *testing.T) {
}
t.Logf("Service %s cannot be found, waiting for some time...", eaServiceName)
time.Sleep(time.Second)
counter += 1
counter++
if counter >= 60 {
t.Fatalf("Could not find service %s within 60 seconds, giving up.", eaServiceName)
}
@ -149,7 +149,7 @@ func TestLoadBalancingSourceRanges(t *testing.T) {
}
}
t.Logf("Service %s cannot be found, waiting for some more time...", eaServiceName)
counter += 1
counter++
if counter >= 60 {
t.Fatalf("Could not find changed service %s within 60 seconds, giving up.", eaServiceName)
}

387
tests/sidecar_test.go Normal file
View file

@ -0,0 +1,387 @@
//
// DISCLAIMER
//
// Copyright 2018 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// Author Kaveh Vahedipour <kaveh@arangodb.com>
//
package tests
import (
"context"
"fmt"
"testing"
driver "github.com/arangodb/go-driver"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client"
"github.com/arangodb/kube-arangodb/pkg/util"
"github.com/dchest/uniuri"
v1 "k8s.io/api/core/v1"
)
type sideCarTest struct {
shortTest bool
name string
mode api.DeploymentMode
version string
image string
imageTag string
sideCars map[string][]v1.Container
}
type SideCarTest interface {
IsShortTest() bool
Mode() api.DeploymentMode
Name() string
Image() string
Version() driver.Version
GroupSideCars(string) []v1.Container
AddSideCar(string, v1.Container)
ClearGroupSideCars(group string)
}
func (s *sideCarTest) IsShortTest() bool {
return s.shortTest
}
func (s *sideCarTest) Name() string {
return s.name
}
func (s *sideCarTest) Mode() api.DeploymentMode {
return s.mode
}
func (s *sideCarTest) Version() driver.Version {
return driver.Version(s.version)
}
func (s *sideCarTest) GroupSideCars(group string) []v1.Container {
if s.sideCars == nil {
s.sideCars = make(map[string][]v1.Container)
}
return s.sideCars[group]
}
func (s *sideCarTest) AddSideCar(group string, container v1.Container) {
if s.sideCars == nil {
s.sideCars = make(map[string][]v1.Container)
}
s.sideCars[group] = append(s.sideCars[group], container)
}
func (s *sideCarTest) Image() string {
imageName := "arangodb/arangodb"
if s.image != "" {
imageName = s.image
}
imageTag := "latest"
if s.imageTag != "" {
imageTag = s.imageTag
}
return fmt.Sprintf("%s:%s", imageName, imageTag)
}
func (s *sideCarTest) ClearGroupSideCars(group string) {
s.sideCars[group] = nil
}
// TestSideCars tests side car functionality
func TestSideCars(t *testing.T) {
runSideCarTest(t, &sideCarTest{
version: "3.4.7",
name: "sidecar-tests",
})
}
func runSideCarTest(t *testing.T, spec SideCarTest) {
if !spec.IsShortTest() {
longOrSkip(t)
}
ns := getNamespace(t)
kubecli := mustNewKubeClient(t)
c := kubeArangoClient.MustNewInCluster()
depl := newDeployment(fmt.Sprintf("tu-%s-%s", spec.Name(), uniuri.NewLen(4)))
depl.Spec.Mode = api.NewMode(spec.Mode())
depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert
depl.Spec.Image = util.NewString(spec.Image())
depl.Spec.SetDefaults(depl.GetName()) // this must be last
// Create deployment
deployment, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
if err != nil {
t.Fatalf("Create deployment failed: %v", err)
}
defer deferedCleanupDeployment(c, depl.GetName(), ns)
// Wait for deployment to be ready
deployment, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
if err != nil {
t.Fatalf("Deployment not running in time: %v", err)
}
// Create a database client
ctx := context.Background()
DBClient := mustNewArangodDatabaseClient(ctx, kubecli, deployment, t, nil)
if err := waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, spec.Version()); err != nil {
t.Fatalf("Deployment not healthy in time: %v", err)
}
// Add sidecar to coordinators
var coordinators = api.ServerGroupCoordinators.AsRole()
var dbservers = api.ServerGroupDBServers.AsRole()
var agents = api.ServerGroupAgents.AsRole()
var name = "nginx"
var image = "nginx:1.7.9"
spec.AddSideCar(coordinators, v1.Container{Image: image, Name: name})
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to add %s to group %s", name, coordinators)
} else {
t.Logf("Add %s sidecar to group %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
cmd1 := []string{"sh", "-c", "sleep 3600"}
cmd2 := []string{"sh", "-c", "sleep 1800"}
cmd := []string{"sh"}
args := []string{"-c", "sleep 3600"}
// Add 2nd sidecar to coordinators
image = "busybox"
name = "sleeper"
spec.AddSideCar(coordinators, v1.Container{Image: image, Name: name, Command: cmd1})
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to add %s to group %s", name, coordinators)
} else {
t.Logf("Add sidecar %s to group %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Update command line of second sidecar
spec.GroupSideCars(coordinators)[1].Command = cmd2
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to update %s in group %s with new command line", name, coordinators)
} else {
t.Logf("Update %s in group %s with new command line ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Change command line args of second sidecar
spec.GroupSideCars(coordinators)[1].Command = cmd
spec.GroupSideCars(coordinators)[1].Args = args
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to update %s in group %s with new command line arguments", name, coordinators)
} else {
t.Logf("Update %s in group %s with new command line arguments ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Change environment variables of second container
spec.GroupSideCars(coordinators)[1].Env = []v1.EnvVar{
{Name: "Hello", Value: "World"}, {Name: "Pi", Value: "3.14159265359"}, {Name: "Two", Value: "2"}}
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to change environment variables of %s sidecars for %s", name, coordinators)
} else {
t.Logf("Change environment variables of %s sidecars for %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Upgrade side car image
name = spec.GroupSideCars(coordinators)[0].Name
spec.GroupSideCars(coordinators)[0].Image = "nginx:1.7.10"
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to update %s in group %s with new image", name, coordinators)
} else {
t.Logf("Update image of sidecar %s in group %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Update side car image with new pull policy
spec.GroupSideCars(coordinators)[0].ImagePullPolicy = v1.PullPolicy("Always")
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to update %s in group %s with new image pull policy", name, coordinators)
} else {
t.Logf("Update %s in group %s with new image pull policy ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Remove all sidecars again
spec.ClearGroupSideCars(coordinators)
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to remove all sidecars from group %s", coordinators)
} else {
t.Logf("Remove all sidecars from group %s ...", coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Adding containers to coordinators and db servers
image = "busybox"
name = "sleeper"
spec.AddSideCar(coordinators, v1.Container{Image: image, Name: name, Command: cmd1})
spec.AddSideCar(dbservers, v1.Container{Image: image, Name: name, Command: cmd1})
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
depl.DBServers.Sidecars = spec.GroupSideCars(dbservers)
})
if err != nil {
t.Fatalf("Failed to add a container to both coordinators and db servers")
} else {
t.Logf("Add %s sidecar to %s and %s ...", name, coordinators, dbservers)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Clear containers from both groups
spec.ClearGroupSideCars(coordinators)
spec.ClearGroupSideCars(dbservers)
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
depl.DBServers.Sidecars = spec.GroupSideCars(dbservers)
})
if err != nil {
t.Fatalf("Failed to delete all containers from both coordinators and db servers")
} else {
t.Logf("Remove all sidecars ...")
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Adding containers to agents again
spec.AddSideCar(agents, v1.Container{Image: image, Name: name, Command: cmd1})
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
})
if err != nil {
t.Fatalf("Failed to add a %s sidecar to %s", name, agents)
} else {
t.Logf("Add a %s sidecar to %s ...", name, agents)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Clear containers from coordinators and add to db servers
spec.ClearGroupSideCars(agents)
spec.AddSideCar(dbservers, v1.Container{Image: image, Name: name, Command: cmd1})
deployment, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Coordinators.Sidecars = spec.GroupSideCars(coordinators)
depl.DBServers.Sidecars = spec.GroupSideCars(dbservers)
})
if err != nil {
t.Fatalf("Failed to delete %s containers and add %s sidecars to %s", agents, name, dbservers)
} else {
t.Logf("Delete %s containers and add %s sidecars to %s", agents, name, dbservers)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
t.Log("... done")
}
// Clean up
removeDeployment(c, depl.GetName(), ns)
}

View file

@ -28,6 +28,7 @@ import (
"fmt"
"net"
"os"
"reflect"
"strconv"
"strings"
"sync"
@ -49,6 +50,7 @@ import (
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
rapi "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1alpha"
cl "github.com/arangodb/kube-arangodb/pkg/client"
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
"github.com/arangodb/kube-arangodb/pkg/util"
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
@ -157,7 +159,7 @@ func getEnterpriseImageOrSkip(t *testing.T) string {
return image
}
const TestEnterpriseLicenseKeySecretName = "arangodb-jenkins-license-key"
const testEnterpriseLicenseKeySecretName = "arangodb-jenkins-license-key"
func getEnterpriseLicenseKey() string {
return strings.TrimSpace(os.Getenv("ENTERPRISELICENSE"))
@ -267,7 +269,7 @@ func newDeployment(name string) *api.ArangoDeployment {
Spec: api.DeploymentSpec{
ImagePullPolicy: util.NewPullPolicy(v1.PullAlways),
License: api.LicenseSpec{
SecretName: util.NewString(TestEnterpriseLicenseKeySecretName),
SecretName: util.NewString(testEnterpriseLicenseKeySecretName),
},
},
}
@ -558,6 +560,55 @@ func createEqualVersionsPredicate(version driver.Version) func(driver.VersionInf
}
}
// clusterSidecarsEqualSpec returns nil if sidecars from spec and cluster match
func waitUntilClusterSidecarsEqualSpec(t *testing.T, spec api.DeploymentMode, depl api.ArangoDeployment) error {
c := cl.MustNewInCluster()
ns := getNamespace(t)
var noGood int
for start := time.Now(); time.Since(start) < 600*time.Second; {
// Fetch latest status so we know all member details
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Get(depl.GetName(), metav1.GetOptions{})
if err != nil {
t.Fatalf("Failed to get deployment: %v", err)
}
// How many pods not matching
noGood = 0
// Check member after another
apiObject.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
for _, m := range *status {
if len(m.SideCarSpecs) != len(spec.GetSidecars()) {
noGood++
continue
}
for _, scar := range spec.GetSidecars() {
mcar, found := m.SideCarSpecs[scar.Name]
if found {
if !reflect.DeepEqual(mcar, scar) {
noGood++
}
} else {
noGood++
}
}
}
return nil
}, &apiObject.Status)
if noGood == 0 {
return nil
}
time.Sleep(2 * time.Second)
}
return maskAny(fmt.Errorf("%d pods with unmatched sidecars", noGood))
}
// clusterHealthEqualsSpec returns nil when the given health matches
// with the given deployment spec.
func clusterHealthEqualsSpec(h driver.ClusterHealth, spec api.DeploymentSpec) error {