mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
Added scaling test
This commit is contained in:
parent
84d071e337
commit
32e8000143
4 changed files with 202 additions and 8 deletions
|
@ -23,12 +23,17 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha"
|
||||
)
|
||||
|
||||
// deploymentHasState creates a predicate that returns true when the deployment has the given state.
|
||||
func deploymentHasState(state api.DeploymentState) func(*api.ArangoDeployment) bool {
|
||||
return func(obj *api.ArangoDeployment) bool {
|
||||
return obj.Status.State == state
|
||||
// deploymentHasState creates a predicate that returns nil when the deployment has the given state.
|
||||
func deploymentHasState(state api.DeploymentState) func(*api.ArangoDeployment) error {
|
||||
return func(obj *api.ArangoDeployment) error {
|
||||
if obj.Status.State == state {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Expected state %s, got %s", state, obj.Status.State)
|
||||
}
|
||||
}
|
||||
|
|
80
tests/scale_test.go
Normal file
80
tests/scale_test.go
Normal file
|
@ -0,0 +1,80 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/dchest/uniuri"
|
||||
|
||||
driver "github.com/arangodb/go-driver"
|
||||
api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha"
|
||||
"github.com/arangodb/k8s-operator/pkg/client"
|
||||
)
|
||||
|
||||
// TestScaleCluster tests scaling up/down the number of DBServers & coordinators
|
||||
// of a cluster.
|
||||
func TestScaleCluster(t *testing.T) {
|
||||
c := client.MustNewInCluster()
|
||||
kubecli := mustNewKubeClient(t)
|
||||
ns := getNamespace(t)
|
||||
|
||||
// Prepare deployment config
|
||||
depl := newDeployment("test-scale-" + uniuri.NewLen(4))
|
||||
depl.Spec.Mode = api.DeploymentModeCluster
|
||||
depl.Spec.SetDefaults()
|
||||
|
||||
// Create deployment
|
||||
apiObject, err := c.DatabaseV1alpha().ArangoDeployments(ns).Create(depl)
|
||||
if err != nil {
|
||||
t.Fatalf("Create deployment failed: %v", err)
|
||||
}
|
||||
|
||||
// Wait for deployment to be ready
|
||||
if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
|
||||
t.Fatalf("Deployment not running in time: %v", err)
|
||||
}
|
||||
|
||||
// Create a database client
|
||||
client := mustNewArangodDatabaseClient(kubecli, apiObject, t)
|
||||
|
||||
// Wait for cluster to be completely ready
|
||||
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
|
||||
return clusterHealthEqualsSpec(h, apiObject.Spec)
|
||||
}); err != nil {
|
||||
t.Fatalf("Cluster not running in expected health in time: %v", err)
|
||||
}
|
||||
|
||||
// Add 2 DBServers, 1 coordinator
|
||||
updated, err := updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
|
||||
spec.DBServers.Count = 5
|
||||
spec.Coordinators.Count = 4
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update deployment: %v", err)
|
||||
}
|
||||
|
||||
// Wait for cluster to reach new size
|
||||
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
|
||||
return clusterHealthEqualsSpec(h, updated.Spec)
|
||||
}); err != nil {
|
||||
t.Fatalf("Cluster not running, after scale-up, in expected health in time: %v", err)
|
||||
}
|
||||
|
||||
// Remove 3 DBServers, 2 coordinator
|
||||
updated, err = updateDeployment(c, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
|
||||
spec.DBServers.Count = 3
|
||||
spec.Coordinators.Count = 2
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to update deployment: %v", err)
|
||||
}
|
||||
|
||||
// Wait for cluster to reach new size
|
||||
if err := waitUntilClusterHealth(client, func(h driver.ClusterHealth) error {
|
||||
return clusterHealthEqualsSpec(h, updated.Spec)
|
||||
}); err != nil {
|
||||
t.Fatalf("Cluster not running, after scale-down, in expected health in time: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
removeDeployment(c, depl.GetName(), ns)
|
||||
}
|
|
@ -28,4 +28,7 @@ func TestSimpleSingle(t *testing.T) {
|
|||
if _, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentHasState(api.DeploymentStateRunning)); err != nil {
|
||||
t.Errorf("Deployment not running in time: %v", err)
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
removeDeployment(c, depl.GetName(), ns)
|
||||
}
|
||||
|
|
|
@ -23,17 +23,23 @@
|
|||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
driver "github.com/arangodb/go-driver"
|
||||
api "github.com/arangodb/k8s-operator/pkg/apis/arangodb/v1alpha"
|
||||
"github.com/arangodb/k8s-operator/pkg/generated/clientset/versioned"
|
||||
"github.com/arangodb/k8s-operator/pkg/util/arangod"
|
||||
"github.com/arangodb/k8s-operator/pkg/util/k8sutil"
|
||||
"github.com/arangodb/k8s-operator/pkg/util/retry"
|
||||
)
|
||||
|
||||
|
@ -45,6 +51,26 @@ var (
|
|||
maskAny = errors.WithStack
|
||||
)
|
||||
|
||||
// mustNewKubeClient creates a kubernetes client
|
||||
// failing the test on errors.
|
||||
func mustNewKubeClient(t *testing.T) kubernetes.Interface {
|
||||
c, err := k8sutil.NewKubeClient()
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create kube cli: %v", err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// mustNewArangodDatabaseClient creates a new database client,
|
||||
// failing the test on errors.
|
||||
func mustNewArangodDatabaseClient(kubecli kubernetes.Interface, apiObject *api.ArangoDeployment, t *testing.T) driver.Client {
|
||||
c, err := arangod.CreateArangodDatabaseClient(kubecli, apiObject)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to create arango database client: %v", err)
|
||||
}
|
||||
return c
|
||||
}
|
||||
|
||||
// getNamespace returns the kubernetes namespace in which to run tests.
|
||||
func getNamespace(t *testing.T) string {
|
||||
ns := os.Getenv("TEST_NAMESPACE")
|
||||
|
@ -70,7 +96,7 @@ func newDeployment(name string) *api.ArangoDeployment {
|
|||
|
||||
// waitUntilDeployment waits until a deployment with given name in given namespace
|
||||
// reached a state where the given predicate returns true.
|
||||
func waitUntilDeployment(cli versioned.Interface, deploymentName, ns string, predicate func(*api.ArangoDeployment) bool) (*api.ArangoDeployment, error) {
|
||||
func waitUntilDeployment(cli versioned.Interface, deploymentName, ns string, predicate func(*api.ArangoDeployment) error) (*api.ArangoDeployment, error) {
|
||||
var result *api.ArangoDeployment
|
||||
op := func() error {
|
||||
obj, err := cli.DatabaseV1alpha().ArangoDeployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
|
@ -79,13 +105,93 @@ func waitUntilDeployment(cli versioned.Interface, deploymentName, ns string, pre
|
|||
return maskAny(err)
|
||||
}
|
||||
result = obj
|
||||
if predicate(obj) {
|
||||
return nil
|
||||
if err := predicate(obj); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return fmt.Errorf("Predicate returns false")
|
||||
return nil
|
||||
}
|
||||
if err := retry.Retry(op, deploymentReadyTimeout); err != nil {
|
||||
return nil, maskAny(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// waitUntilClusterHealth waits until an arango cluster
|
||||
// reached a state where the given predicate returns nil.
|
||||
func waitUntilClusterHealth(cli driver.Client, predicate func(driver.ClusterHealth) error) error {
|
||||
ctx := context.Background()
|
||||
op := func() error {
|
||||
cluster, err := cli.Cluster(ctx)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
h, err := cluster.Health(ctx)
|
||||
if err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
if err := predicate(h); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if err := retry.Retry(op, deploymentReadyTimeout); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// clusterHealthEqualsSpec returns nil when the given health matches
|
||||
// with the given deployment spec.
|
||||
func clusterHealthEqualsSpec(h driver.ClusterHealth, spec api.DeploymentSpec) error {
|
||||
agents := 0
|
||||
goodDBServers := 0
|
||||
goodCoordinators := 0
|
||||
for _, s := range h.Health {
|
||||
if s.Role == driver.ServerRoleAgent {
|
||||
agents++
|
||||
} else if s.Status == driver.ServerStatusGood {
|
||||
switch s.Role {
|
||||
case driver.ServerRoleDBServer:
|
||||
goodDBServers++
|
||||
case driver.ServerRoleCoordinator:
|
||||
goodCoordinators++
|
||||
}
|
||||
}
|
||||
}
|
||||
if spec.Agents.Count == agents &&
|
||||
spec.DBServers.Count == goodDBServers &&
|
||||
spec.Coordinators.Count == goodCoordinators {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Expected %d,%d,%d got %d,%d,%d",
|
||||
spec.Agents.Count, spec.DBServers.Count, spec.Coordinators.Count,
|
||||
agents, goodDBServers, goodCoordinators,
|
||||
)
|
||||
}
|
||||
|
||||
// updateDeployment updates a deployment
|
||||
func updateDeployment(cli versioned.Interface, deploymentName, ns string, update func(*api.DeploymentSpec)) (*api.ArangoDeployment, error) {
|
||||
for {
|
||||
// Get current version
|
||||
current, err := cli.Database().ArangoDeployments(ns).Get(deploymentName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, maskAny(err)
|
||||
}
|
||||
update(¤t.Spec)
|
||||
current, err = cli.Database().ArangoDeployments(ns).Update(current)
|
||||
if k8sutil.IsConflict(err) {
|
||||
// Retry
|
||||
} else if err != nil {
|
||||
return nil, maskAny(err)
|
||||
}
|
||||
return current, nil
|
||||
}
|
||||
}
|
||||
|
||||
// removeDeployment removes a deployment
|
||||
func removeDeployment(cli versioned.Interface, deploymentName, ns string) error {
|
||||
if err := cli.Database().ArangoDeployments(ns).Delete(deploymentName, nil); err != nil && k8sutil.IsNotFound(err) {
|
||||
return maskAny(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue