mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-15 17:51:03 +00:00
Merge pull request #347 from arangodb/feature/upgrade-tests
Feature/upgrade tests
This commit is contained in:
commit
781be47861
6 changed files with 373 additions and 31 deletions
8
Makefile
8
Makefile
|
@ -92,7 +92,7 @@ RELEASE := $(GOBUILDDIR)/bin/release
|
|||
GHRELEASE := $(GOBUILDDIR)/bin/github-release
|
||||
|
||||
TESTLENGTHOPTIONS := -test.short
|
||||
TESTTIMEOUT := 20m
|
||||
TESTTIMEOUT := 30m
|
||||
ifeq ($(LONG), 1)
|
||||
TESTLENGTHOPTIONS :=
|
||||
TESTTIMEOUT := 180m
|
||||
|
@ -294,6 +294,10 @@ $(TESTBIN): $(GOBUILDDIR) $(SOURCES)
|
|||
docker-test: $(TESTBIN)
|
||||
docker build --quiet -f $(DOCKERTESTFILE) -t $(TESTIMAGE) .
|
||||
|
||||
.PHONY: run-upgrade-tests
|
||||
run-upgrade-tests:
|
||||
TESTOPTIONS="-test.run=TestUpgrade" make run-tests
|
||||
|
||||
.PHONY: run-tests
|
||||
run-tests: docker-test
|
||||
ifdef PUSHIMAGES
|
||||
|
@ -311,7 +315,7 @@ endif
|
|||
kubectl apply -f $(MANIFESTPATHTEST)
|
||||
$(ROOTDIR)/scripts/kube_create_storage.sh $(DEPLOYMENTNAMESPACE)
|
||||
$(ROOTDIR)/scripts/kube_create_license_key_secret.sh "$(DEPLOYMENTNAMESPACE)" '$(ENTERPRISELICENSE)'
|
||||
$(ROOTDIR)/scripts/kube_run_tests.sh $(DEPLOYMENTNAMESPACE) $(TESTIMAGE) "$(ARANGODIMAGE)" '$(ENTERPRISEIMAGE)' $(TESTTIMEOUT) $(TESTLENGTHOPTIONS)
|
||||
$(ROOTDIR)/scripts/kube_run_tests.sh $(DEPLOYMENTNAMESPACE) $(TESTIMAGE) "$(ARANGODIMAGE)" '$(ENTERPRISEIMAGE)' $(TESTTIMEOUT) $(TESTLENGTHOPTIONS) $(TESTOPTIONS)
|
||||
|
||||
$(DURATIONTESTBIN): $(GOBUILDDIR) $(SOURCES)
|
||||
@mkdir -p $(BINDIR)
|
||||
|
|
|
@ -13,7 +13,7 @@ rules:
|
|||
resources: ["pods", "services", "persistentvolumes", "persistentvolumeclaims", "secrets", "serviceaccounts"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets"]
|
||||
resources: ["daemonsets", "deployments"]
|
||||
verbs: ["*"]
|
||||
|
||||
---
|
||||
|
|
|
@ -62,6 +62,11 @@ func TestEnvironmentProduction(t *testing.T) {
|
|||
depl.Spec.Environment = api.NewEnvironment(api.EnvironmentProduction)
|
||||
depl.Spec.DBServers.Count = util.NewInt(numNodes + 1)
|
||||
depl.Spec.SetDefaults(depl.GetName()) // this must be last
|
||||
|
||||
// This test failes to validate the spec if no image is set explicitly because this is required in production mode
|
||||
if depl.Spec.Image == nil {
|
||||
depl.Spec.Image = util.NewString("arangodb/arangodb:latest")
|
||||
}
|
||||
assert.NoError(t, depl.Spec.Validate())
|
||||
|
||||
dbserverCount := depl.Spec.DBServers.GetCount()
|
||||
|
|
196
tests/operator_upgrade_test.go
Normal file
196
tests/operator_upgrade_test.go
Normal file
|
@ -0,0 +1,196 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
"k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
||||
"github.com/dchest/uniuri"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
watch "k8s.io/apimachinery/pkg/watch"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
const (
|
||||
operatorTestDeploymentName string = "arango-deployment-operator"
|
||||
oldOperatorTestImage string = "arangodb/kube-arangodb:0.3.7"
|
||||
)
|
||||
|
||||
func TestOperatorUpgradeFrom038(t *testing.T) {
|
||||
ns := getNamespace(t)
|
||||
kubecli := mustNewKubeClient(t)
|
||||
c := kubeArangoClient.MustNewInCluster()
|
||||
|
||||
if err := waitForArangoDBPodsGone(ns, kubecli); err != nil {
|
||||
t.Fatalf("Remaining arangodb pods did not vanish, can not start test: %v", err)
|
||||
}
|
||||
|
||||
currentimage, err := updateOperatorImage(t, ns, kubecli, oldOperatorTestImage)
|
||||
if err != nil {
|
||||
t.Fatalf("Could not replace operator with old image: %v", err)
|
||||
}
|
||||
defer updateOperatorImage(t, ns, kubecli, currentimage)
|
||||
|
||||
if err := waitForOperatorImage(ns, kubecli, oldOperatorTestImage); err != nil {
|
||||
t.Fatalf("Old Operator not ready in time: %v", err)
|
||||
}
|
||||
|
||||
depl := newDeployment(fmt.Sprintf("opup-%s", uniuri.NewLen(4)))
|
||||
depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert
|
||||
depl.Spec.SetDefaults(depl.GetName()) // this must be last
|
||||
|
||||
// Create deployment
|
||||
if _, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl); err != nil {
|
||||
t.Fatalf("Create deployment failed: %v", err)
|
||||
}
|
||||
defer removeDeployment(c, depl.GetName(), ns)
|
||||
|
||||
// Wait for deployment to be ready
|
||||
_, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
|
||||
if err != nil {
|
||||
t.Fatalf("Deployment not running in time: %v", err)
|
||||
}
|
||||
|
||||
podsWatcher, err := kubecli.CoreV1().Pods(ns).Watch(metav1.ListOptions{
|
||||
LabelSelector: fields.OneTermEqualSelector("app", "arangodb").String(),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to watch pods: %v", err)
|
||||
}
|
||||
defer podsWatcher.Stop()
|
||||
|
||||
errorChannel := make(chan error)
|
||||
go func() {
|
||||
var addedPods []string
|
||||
for {
|
||||
select {
|
||||
case ev, ok := <-podsWatcher.ResultChan():
|
||||
if !ok {
|
||||
return // Abort
|
||||
}
|
||||
if pod, ok := ev.Object.(*v1.Pod); ok {
|
||||
if k8sutil.IsArangoDBImageIDAndVersionPod(*pod) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch ev.Type {
|
||||
case watch.Modified:
|
||||
if !k8sutil.IsPodReady(pod) {
|
||||
errorChannel <- fmt.Errorf("Pod no longer ready: %s", pod.GetName())
|
||||
}
|
||||
break
|
||||
case watch.Deleted:
|
||||
errorChannel <- fmt.Errorf("Pod was deleted: %s", pod.GetName())
|
||||
break
|
||||
case watch.Added:
|
||||
if len(addedPods) >= 9 {
|
||||
errorChannel <- fmt.Errorf("New pod was created: %s", pod.GetName())
|
||||
}
|
||||
addedPods = append(addedPods, pod.GetName())
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if _, err := updateOperatorImage(t, ns, kubecli, currentimage); err != nil {
|
||||
t.Fatalf("Failed to replace new ")
|
||||
}
|
||||
|
||||
if err := waitForOperatorImage(ns, kubecli, currentimage); err != nil {
|
||||
t.Fatalf("New operator not ready in time: %v", err)
|
||||
}
|
||||
|
||||
select {
|
||||
case <-time.After(1 * time.Minute):
|
||||
break // cool
|
||||
case err := <-errorChannel:
|
||||
// not cool
|
||||
t.Errorf("Deployment had error: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func updateOperatorImage(t *testing.T, ns string, kube kubernetes.Interface, newImage string) (string, error) {
|
||||
for {
|
||||
depl, err := kube.AppsV1().Deployments(ns).Get(operatorTestDeploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
old, err := getOperatorImage(depl)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
setOperatorImage(depl, newImage)
|
||||
if _, err := kube.AppsV1().Deployments(ns).Update(depl); k8sutil.IsConflict(err) {
|
||||
continue
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return old, nil
|
||||
}
|
||||
}
|
||||
|
||||
func updateOperatorDeployment(ns string, kube kubernetes.Interface) (*appsv1.Deployment, error) {
|
||||
return kube.AppsV1().Deployments(ns).Get(operatorTestDeploymentName, metav1.GetOptions{})
|
||||
}
|
||||
|
||||
func getOperatorImage(depl *appsv1.Deployment) (string, error) {
|
||||
for _, c := range depl.Spec.Template.Spec.Containers {
|
||||
if c.Name == "operator" {
|
||||
return c.Image, nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("Operator container not found")
|
||||
}
|
||||
|
||||
func setOperatorImage(depl *appsv1.Deployment, image string) {
|
||||
for i := range depl.Spec.Template.Spec.Containers {
|
||||
c := &depl.Spec.Template.Spec.Containers[i]
|
||||
if c.Name == "operator" {
|
||||
c.Image = image
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func waitForArangoDBPodsGone(ns string, kube kubernetes.Interface) error {
|
||||
return retry.Retry(func() error {
|
||||
_, err := kube.CoreV1().Pods(ns).List(metav1.ListOptions{
|
||||
LabelSelector: fields.OneTermEqualSelector("app", "arangodb").String(),
|
||||
})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}, deploymentReadyTimeout)
|
||||
}
|
||||
|
||||
func waitForOperatorImage(ns string, kube kubernetes.Interface, image string) error {
|
||||
return retry.Retry(func() error {
|
||||
pods, err := kube.CoreV1().Pods(ns).List(metav1.ListOptions{
|
||||
LabelSelector: fields.OneTermEqualSelector("app", operatorTestDeploymentName).String(),
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, pod := range pods.Items {
|
||||
for _, c := range pod.Spec.Containers {
|
||||
if c.Name == "operator" {
|
||||
if c.Image != image {
|
||||
return fmt.Errorf("in pod %s found image %s, expected %s", pod.Name, c.Image, image)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, deploymentReadyTimeout)
|
||||
}
|
|
@ -56,7 +56,8 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
deploymentReadyTimeout = time.Minute * 4
|
||||
deploymentReadyTimeout = time.Minute * 4
|
||||
deploymentUpgradeTimeout = time.Minute * 20
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -344,7 +345,7 @@ func waitUntilSecretNotFound(cli kubernetes.Interface, secretName, ns string, ti
|
|||
|
||||
// waitUntilClusterHealth waits until an arango cluster
|
||||
// reached a state where the given predicate returns nil.
|
||||
func waitUntilClusterHealth(cli driver.Client, predicate func(driver.ClusterHealth) error) error {
|
||||
func waitUntilClusterHealth(cli driver.Client, predicate func(driver.ClusterHealth) error, timeout ...time.Duration) error {
|
||||
ctx := context.Background()
|
||||
op := func() error {
|
||||
cluster, err := cli.Cluster(ctx)
|
||||
|
@ -362,12 +363,30 @@ func waitUntilClusterHealth(cli driver.Client, predicate func(driver.ClusterHeal
|
|||
}
|
||||
return nil
|
||||
}
|
||||
if err := retry.Retry(op, deploymentReadyTimeout); err != nil {
|
||||
actualTimeout := deploymentReadyTimeout
|
||||
if len(timeout) > 0 {
|
||||
actualTimeout = timeout[0]
|
||||
}
|
||||
if err := retry.Retry(op, actualTimeout); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitUntilClusterVersionUp waits until an arango cluster is healthy and
|
||||
// all servers are running the given version.
|
||||
func waitUntilClusterVersionUp(cli driver.Client, version driver.Version) error {
|
||||
return waitUntilClusterHealth(cli, func(h driver.ClusterHealth) error {
|
||||
for s, r := range h.Health {
|
||||
if cmp := r.Version.CompareTo(version); cmp != 0 {
|
||||
return maskAny(fmt.Errorf("Member %s has version %s, expecting %s", s, r.Version, version))
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}, deploymentUpgradeTimeout)
|
||||
}
|
||||
|
||||
// waitUntilVersionUp waits until the arango database responds to
|
||||
// an `/_api/version` request without an error. An additional Predicate
|
||||
// can do a check on the VersionInfo object returned by the server.
|
||||
|
@ -552,16 +571,24 @@ func removeSecret(cli kubernetes.Interface, secretName, ns string) error {
|
|||
|
||||
// check if a deployment is up and has reached a state where it is able to answer to /_api/version requests.
|
||||
// Optionally the returned version can be checked against a user provided version
|
||||
func waitUntilArangoDeploymentHealthy(deployment *api.ArangoDeployment, DBClient driver.Client, k8sClient kubernetes.Interface, versionString string) error {
|
||||
func waitUntilArangoDeploymentHealthy(deployment *api.ArangoDeployment, DBClient driver.Client, k8sClient kubernetes.Interface, versionString driver.Version) error {
|
||||
// deployment checks
|
||||
var checkVersionPredicate func(driver.VersionInfo) error
|
||||
if len(versionString) > 0 {
|
||||
checkVersionPredicate = createEqualVersionsPredicate(driver.Version(versionString))
|
||||
checkVersionPredicate = createEqualVersionsPredicate(versionString)
|
||||
}
|
||||
switch mode := deployment.Spec.GetMode(); mode {
|
||||
case api.DeploymentModeCluster:
|
||||
// Wait for cluster to be completely ready
|
||||
if err := waitUntilClusterHealth(DBClient, func(h driver.ClusterHealth) error {
|
||||
if len(versionString) > 0 {
|
||||
for s, r := range h.Health {
|
||||
if cmp := r.Version.CompareTo(versionString); cmp != 0 {
|
||||
return maskAny(fmt.Errorf("Member %s has version %s, expecting %s", s, r.Version, versionString))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return clusterHealthEqualsSpec(h, deployment.Spec)
|
||||
}); err != nil {
|
||||
return maskAny(fmt.Errorf("Cluster not running in expected health in time: %s", err))
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
"strings"
|
||||
"testing"
|
||||
|
||||
driver "github.com/arangodb/go-driver"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
|
@ -82,19 +83,138 @@ func TestUpgradeActiveFailoverRocksDB33to34(t *testing.T) {
|
|||
// upgradeSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB, "arangodb/arangodb:3.3.16", "arangodb/arangodb:3.3.17")
|
||||
// }
|
||||
|
||||
func upgradeSubTest(t *testing.T, mode api.DeploymentMode, engine api.StorageEngine, fromImage, toImage string) error {
|
||||
// check environment
|
||||
longOrSkip(t)
|
||||
func TestUpgradeClusterRocksDB3322Cto342C(t *testing.T) {
|
||||
runUpgradeTest(t, &upgradeTest{
|
||||
fromVersion: "3.3.22",
|
||||
toVersion: "3.4.2-1",
|
||||
shortTest: true,
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpgradeClusterRocksDB3318Cto3322C(t *testing.T) {
|
||||
runUpgradeTest(t, &upgradeTest{
|
||||
fromVersion: "3.3.16",
|
||||
toVersion: "3.3.22",
|
||||
shortTest: false,
|
||||
})
|
||||
}
|
||||
|
||||
func TestUpgradeClusterRocksDB341Cto342C(t *testing.T) {
|
||||
runUpgradeTest(t, &upgradeTest{
|
||||
fromVersion: "3.4.1",
|
||||
toImageTag: "3.4.2",
|
||||
toVersion: "3.4.2-1",
|
||||
shortTest: true,
|
||||
})
|
||||
}
|
||||
|
||||
type upgradeTest struct {
|
||||
fromVersion string
|
||||
toVersion string
|
||||
|
||||
// Mode describes the deployment mode of the upgrade test, defaults to Cluster
|
||||
mode api.DeploymentMode
|
||||
// Engine describes the deployment storage engine, defaults to RocksDB
|
||||
engine api.StorageEngine
|
||||
|
||||
// fromImage describes the image of the version from which the upgrade should start, defaults to "arangodb/arangodb:<fromVersion>"
|
||||
fromImage string
|
||||
fromImageTag string
|
||||
|
||||
// toImage describes the image of the version to which the upgrade should start, defaults to "arangodb/arangodb:<toVersion>"
|
||||
toImage string
|
||||
toImageTag string
|
||||
|
||||
name string
|
||||
shortTest bool
|
||||
}
|
||||
|
||||
type UpgradeTest interface {
|
||||
FromVersion() driver.Version
|
||||
ToVersion() driver.Version
|
||||
|
||||
Name() string
|
||||
FromImage() string
|
||||
ToImage() string
|
||||
|
||||
Mode() api.DeploymentMode
|
||||
Engine() api.StorageEngine
|
||||
|
||||
IsShortTest() bool
|
||||
}
|
||||
|
||||
func (u *upgradeTest) FromImage() string {
|
||||
imageName := "arangodb/arangodb"
|
||||
if u.fromImage != "" {
|
||||
imageName = u.fromImage
|
||||
}
|
||||
imageTag := u.fromVersion
|
||||
if u.fromImageTag != "" {
|
||||
imageTag = u.fromImageTag
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", imageName, imageTag)
|
||||
}
|
||||
|
||||
func (u *upgradeTest) ToImage() string {
|
||||
imageName := "arangodb/arangodb"
|
||||
if u.toImage != "" {
|
||||
imageName = u.toImage
|
||||
}
|
||||
imageTag := u.toVersion
|
||||
if u.toImageTag != "" {
|
||||
imageTag = u.toImageTag
|
||||
}
|
||||
return fmt.Sprintf("%s:%s", imageName, imageTag)
|
||||
}
|
||||
|
||||
func (u *upgradeTest) Mode() api.DeploymentMode {
|
||||
if u.mode != "" {
|
||||
return u.mode
|
||||
}
|
||||
return api.DeploymentModeCluster
|
||||
}
|
||||
|
||||
func (u *upgradeTest) Engine() api.StorageEngine {
|
||||
if u.engine != "" {
|
||||
return u.engine
|
||||
}
|
||||
return api.StorageEngineRocksDB
|
||||
}
|
||||
|
||||
func (u *upgradeTest) Name() string {
|
||||
if u.name != "" {
|
||||
return u.name
|
||||
}
|
||||
|
||||
return strings.Replace(fmt.Sprintf("%s-to-%s", u.FromVersion(), u.ToVersion()), ".", "-", -1)
|
||||
}
|
||||
|
||||
func (u *upgradeTest) FromVersion() driver.Version {
|
||||
return driver.Version(u.fromVersion)
|
||||
}
|
||||
|
||||
func (u *upgradeTest) ToVersion() driver.Version {
|
||||
return driver.Version(u.toVersion)
|
||||
}
|
||||
|
||||
func (u *upgradeTest) IsShortTest() bool {
|
||||
return u.shortTest
|
||||
}
|
||||
|
||||
func runUpgradeTest(t *testing.T, spec UpgradeTest) {
|
||||
if !spec.IsShortTest() {
|
||||
longOrSkip(t)
|
||||
}
|
||||
|
||||
ns := getNamespace(t)
|
||||
kubecli := mustNewKubeClient(t)
|
||||
c := kubeArangoClient.MustNewInCluster()
|
||||
|
||||
depl := newDeployment(strings.Replace(fmt.Sprintf("tu-%s-%s-%s", mode[:2], engine[:2], uniuri.NewLen(4)), ".", "", -1))
|
||||
depl.Spec.Mode = api.NewMode(mode)
|
||||
depl.Spec.StorageEngine = api.NewStorageEngine(engine)
|
||||
depl := newDeployment(fmt.Sprintf("tu-%s-%s", spec.Name(), uniuri.NewLen(4)))
|
||||
depl.Spec.Mode = api.NewMode(spec.Mode())
|
||||
depl.Spec.StorageEngine = api.NewStorageEngine(spec.Engine())
|
||||
depl.Spec.TLS = api.TLSSpec{} // should auto-generate cert
|
||||
depl.Spec.Image = util.NewString(fromImage)
|
||||
depl.Spec.Image = util.NewString(spec.FromImage())
|
||||
depl.Spec.SetDefaults(depl.GetName()) // this must be last
|
||||
|
||||
// Create deployment
|
||||
|
@ -113,37 +233,27 @@ func upgradeSubTest(t *testing.T, mode api.DeploymentMode, engine api.StorageEng
|
|||
// Create a database client
|
||||
ctx := context.Background()
|
||||
DBClient := mustNewArangodDatabaseClient(ctx, kubecli, deployment, t, nil)
|
||||
|
||||
if err := waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, ""); err != nil {
|
||||
if err := waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, spec.FromVersion()); err != nil {
|
||||
t.Fatalf("Deployment not healthy in time: %v", err)
|
||||
}
|
||||
|
||||
// Try to change image version
|
||||
deployment, err = updateDeployment(c, depl.GetName(), ns,
|
||||
func(spec *api.DeploymentSpec) {
|
||||
spec.Image = util.NewString(toImage)
|
||||
func(depl *api.DeploymentSpec) {
|
||||
depl.Image = util.NewString(spec.ToImage())
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to upgrade the Image from version : " + fromImage + " to version: " + toImage)
|
||||
t.Fatalf("Failed to upgrade the Image from version : " + spec.FromImage() + " to version: " + spec.ToImage())
|
||||
} else {
|
||||
t.Log("Updated deployment")
|
||||
}
|
||||
|
||||
deployment, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
|
||||
if err != nil {
|
||||
t.Fatalf("Deployment not running in time: %v", err)
|
||||
} else {
|
||||
t.Log("Deployment running")
|
||||
}
|
||||
|
||||
if err := waitUntilArangoDeploymentHealthy(deployment, DBClient, kubecli, toImage); err != nil {
|
||||
t.Fatalf("Deployment not healthy in time: %v", err)
|
||||
if err := waitUntilClusterVersionUp(DBClient, spec.ToVersion()); err != nil {
|
||||
t.Errorf("Deployment not healthy in time: %v", err)
|
||||
} else {
|
||||
t.Log("Deployment healthy")
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
removeDeployment(c, depl.GetName(), ns)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue