1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00
kube-arangodb/tests/resilience_test.go

197 lines
6.6 KiB
Go
Raw Normal View History

2018-03-21 17:28:08 +00:00
package tests
import (
"context"
"fmt"
"os"
"testing"
2018-03-22 11:29:32 +00:00
"time"
2018-03-21 17:28:08 +00:00
"github.com/dchest/uniuri"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
driver "github.com/arangodb/go-driver"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
"github.com/arangodb/kube-arangodb/pkg/client"
)
// TestResiliencePod
// Tests handling of individual pod deletions
func TestResiliencePod(t *testing.T) {
longOrSkip(t)
c := client.MustNewInCluster()
kubecli := mustNewKubeClient(t)
ns := getNamespace(t)
//fmt.Printf("There are %d pods in the cluster\n", len(pods.Items))
// Prepare deployment config
depl := newDeployment("test-pod-resilience" + uniuri.NewLen(4))
depl.Spec.Mode = api.DeploymentModeCluster
depl.Spec.SetDefaults(depl.GetName()) // this must be last
// Create deployment
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
if err != nil {
t.Fatalf("Create deployment failed: %v", err)
}
// Wait for deployment to be ready
if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
t.Fatalf("Deployment not running in time: %v", err)
}
// Create a database client
ctx := context.Background()
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
// Wait for cluster to be completely ready
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
// Delete one pod after the other
pods, err := kubecli.CoreV1().Pods(ns).List(metav1.ListOptions{})
if err != nil {
t.Fatalf("Could not find any pods in the %s, namespace: %v\n", ns, err)
}
2018-03-22 08:38:43 +00:00
fmt.Fprintf(os.Stderr,
"There are %d pods in the %s namespace\n", len(pods.Items), ns)
2018-03-21 17:28:08 +00:00
for _, pod := range pods.Items {
2018-03-22 08:38:43 +00:00
if pod.GetName() == "arangodb-operator-test" { continue }
fmt.Fprintf(os.Stderr,
"Deleting pod %s in the %s namespace\n", pod.GetName(), ns)
2018-03-22 08:12:01 +00:00
kubecli.CoreV1().Pods(ns).Delete(pod.GetName(),&metav1.DeleteOptions{})
2018-03-22 11:29:32 +00:00
time.Sleep(30 * time.Second) // wait for problem to arise
// Wait for cluster to be completely ready
2018-03-22 08:12:01 +00:00
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
}
// Cleanup
removeDeployment(c, depl.GetName(), ns)
}
2018-03-22 11:29:32 +00:00
// TestResiliencePVC
// Tests handling of individual pod deletions
func TestResiliencePVC(t *testing.T) {
longOrSkip(t)
c := client.MustNewInCluster()
kubecli := mustNewKubeClient(t)
ns := getNamespace(t)
// Prepare deployment config
depl := newDeployment("test-pod-resilience" + uniuri.NewLen(4))
depl.Spec.Mode = api.DeploymentModeCluster
depl.Spec.SetDefaults(depl.GetName()) // this must be last
// Create deployment
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
if err != nil {
t.Fatalf("Create deployment failed: %v", err)
}
// Wait for deployment to be ready
if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
t.Fatalf("Deployment not running in time: %v", err)
}
// Create a database client
ctx := context.Background()
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
// Wait for cluster to be completely ready
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
// Delete one pod after the other
pvcs, err := kubecli.CoreV1().PersistentVolumeClaims(ns).List(metav1.ListOptions{})
if err != nil {
2018-03-22 11:52:46 +00:00
t.Fatalf("Could not find any persisted volume claims in the %s, namespace: %v\n", ns, err)
2018-03-22 11:29:32 +00:00
}
fmt.Fprintf(os.Stderr,
"There are %d peristent volume claims in the %s namespace\n", len(pvcs.Items), ns)
for _, pvc := range pvcs.Items {
if pvc.GetName() == "arangodb-operator-test" { continue }
fmt.Fprintf(os.Stderr,
2018-03-22 11:52:46 +00:00
"Deleting persistent volume claim %s in the %s namespace\n", pvc.GetName(), ns)
2018-03-22 11:29:32 +00:00
kubecli.CoreV1().PersistentVolumeClaims(ns).Delete(pvc.GetName(),&metav1.DeleteOptions{})
time.Sleep(30 * time.Second) // wait for problem to arise
// Wait for cluster to be completely ready
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
}
// Cleanup
removeDeployment(c, depl.GetName(), ns)
}
2018-03-22 08:12:01 +00:00
// TestResilienceService
// Tests handling of individual service deletions
func TestResilienceService(t *testing.T) {
longOrSkip(t)
c := client.MustNewInCluster()
kubecli := mustNewKubeClient(t)
ns := getNamespace(t)
// Prepare deployment config
depl := newDeployment("test-service-resilience" + uniuri.NewLen(4))
depl.Spec.Mode = api.DeploymentModeCluster
depl.Spec.SetDefaults(depl.GetName()) // this must be last
// Create deployment
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
if err != nil {
t.Fatalf("Create deployment failed: %v", err)
}
// Wait for deployment to be ready
2018-03-22 11:29:32 +00:00
if _, err := waitUntilDeployment(
c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
2018-03-22 08:12:01 +00:00
t.Fatalf("Deployment not running in time: %v", err)
}
// Create a database client
ctx := context.Background()
client := mustNewArangodDatabaseClient(ctx, kubecli, apiObject, t)
// Wait for cluster to be completely ready
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
// Delete one pod after the other
services, err := kubecli.CoreV1().Services(ns).List(metav1.ListOptions{})
if err != nil {
t.Fatalf("Could not find any services in the %s, namespace: %v\n", ns, err)
}
2018-03-22 11:29:32 +00:00
fmt.Fprintf(os.Stderr, "There are %d services in the %s namespace \n", len(services.Items), ns)
2018-03-22 08:12:01 +00:00
for _, service := range services.Items {
kubecli.CoreV1().Services(ns).Delete(service.GetName(),&metav1.DeleteOptions{})
2018-03-22 11:29:32 +00:00
time.Sleep(30 * time.Second)
// Wait for cluster to be completely ready
2018-03-21 17:28:08 +00:00
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
return clusterHealthEqualsSpec(h, apiObject.Spec)
}); err != nil {
t.Fatalf("Cluster not running in expected health in time: %v", err)
}
}
// Cleanup
removeDeployment(c, depl.GetName(), ns)
}
2018-03-22 11:29:32 +00:00