mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
Create control pod and run syncer tests when deployment is ready.
This commit is contained in:
parent
0231e7eae0
commit
efaf29899b
12 changed files with 592 additions and 43 deletions
80
Makefile
80
Makefile
|
@ -104,6 +104,22 @@ endif
|
|||
SOURCES := $(shell find $(SRCDIR) -name '*.go' -not -path './test/*')
|
||||
DASHBOARDSOURCES := $(shell find $(DASHBOARDDIR)/src -name '*.js' -not -path './test/*') $(DASHBOARDDIR)/package.json
|
||||
|
||||
ifndef ARANGOSYNCSRCDIR
|
||||
ARANGOSYNCSRCDIR := $(SCRIPTDIR)/arangosync
|
||||
endif
|
||||
DOCKERARANGOSYNCCTRLFILE=tests/sync/Dockerfile
|
||||
ifndef ARANGOSYNCTESTCTRLIMAGE
|
||||
ARANGOSYNCTESTCTRLIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-sync-test-ctrl$(IMAGESUFFIX)
|
||||
endif
|
||||
ifndef ARANGOSYNCTESTIMAGE
|
||||
ARANGOSYNCTESTIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-sync-test$(IMAGESUFFIX)
|
||||
endif
|
||||
ifndef ARANGOSYNCIMAGE
|
||||
ARANGOSYNCIMAGE := $(DOCKERNAMESPACE)/kube-arangodb-sync$(IMAGESUFFIX)
|
||||
endif
|
||||
ARANGOSYNCTESTCTRLBINNAME := $(PROJECT)_sync_test_ctrl
|
||||
ARANGOSYNCTESTCTRLBIN := $(BINDIR)/$(ARANGOSYNCTESTCTRLBINNAME)
|
||||
|
||||
.PHONY: all
|
||||
all: verify-generated build
|
||||
|
||||
|
@ -298,6 +314,23 @@ docker-test: $(TESTBIN)
|
|||
run-upgrade-tests:
|
||||
TESTOPTIONS="-test.run=TestUpgrade" make run-tests
|
||||
|
||||
.PHONY: prepare-run-tests
|
||||
prepare-run-tests:
|
||||
ifdef PUSHIMAGES
|
||||
docker push $(OPERATORIMAGE)
|
||||
endif
|
||||
ifneq ($(DEPLOYMENTNAMESPACE), default)
|
||||
$(ROOTDIR)/scripts/kube_delete_namespace.sh $(DEPLOYMENTNAMESPACE)
|
||||
kubectl create namespace $(DEPLOYMENTNAMESPACE)
|
||||
endif
|
||||
kubectl apply -f $(MANIFESTPATHCRD)
|
||||
kubectl apply -f $(MANIFESTPATHSTORAGE)
|
||||
kubectl apply -f $(MANIFESTPATHDEPLOYMENT)
|
||||
kubectl apply -f $(MANIFESTPATHDEPLOYMENTREPLICATION)
|
||||
kubectl apply -f $(MANIFESTPATHTEST)
|
||||
$(ROOTDIR)/scripts/kube_create_storage.sh $(DEPLOYMENTNAMESPACE)
|
||||
$(ROOTDIR)/scripts/kube_create_license_key_secret.sh "$(DEPLOYMENTNAMESPACE)" '$(ENTERPRISELICENSE)'
|
||||
|
||||
.PHONY: run-tests
|
||||
run-tests: docker-test
|
||||
ifdef PUSHIMAGES
|
||||
|
@ -424,3 +457,50 @@ redeploy-operator: delete-operator manifests
|
|||
kubectl apply -f $(MANIFESTPATHDEPLOYMENTREPLICATION)
|
||||
kubectl apply -f $(MANIFESTPATHTEST)
|
||||
kubectl get pods
|
||||
|
||||
## ArangoSync Tests
|
||||
|
||||
$(ARANGOSYNCTESTCTRLBIN): $(GOBUILDDIR) $(SOURCES)
|
||||
@mkdir -p $(BINDIR)
|
||||
docker run \
|
||||
--rm \
|
||||
-v $(SRCDIR):/usr/code \
|
||||
-v $(CACHEVOL):/usr/gocache \
|
||||
-e GOCACHE=/usr/gocache \
|
||||
-e GOPATH=/usr/code/.gobuild \
|
||||
-e GOOS=linux \
|
||||
-e GOARCH=amd64 \
|
||||
-e CGO_ENABLED=0 \
|
||||
-w /usr/code/ \
|
||||
golang:$(GOVERSION) \
|
||||
go build -installsuffix cgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o /usr/code/bin/$(ARANGOSYNCTESTCTRLBINNAME) $(REPOPATH)/tests/sync
|
||||
|
||||
.PHONY: check-sync-vars
|
||||
check-sync-vars:
|
||||
ifndef ARANGOSYNCSRCDIR
|
||||
@echo ARANGOSYNCSRCDIR must point to the arangosync source directory
|
||||
@exit 1
|
||||
endif
|
||||
ifndef ARANGODIMAGE
|
||||
@echo ARANGODIMAGE must point to the usable arangodb enterprise image
|
||||
@exit 1
|
||||
endif
|
||||
@echo Using ArangoSync source at $(ARANGOSYNCSRCDIR)
|
||||
@echo Using ArangoDB image $(ARANGODIMAGE)
|
||||
|
||||
.PHONY: docker-sync
|
||||
docker-sync: check-sync-vars
|
||||
SYNCIMAGE=$(ARANGOSYNCIMAGE) $(MAKE) -C $(ARANGOSYNCSRCDIR) docker docker-test
|
||||
|
||||
.PHONY:
|
||||
docker-sync-test-ctrl: $(ARANGOSYNCTESTCTRLBIN)
|
||||
docker build --quiet -f $(DOCKERARANGOSYNCCTRLFILE) -t $(ARANGOSYNCTESTCTRLIMAGE) .
|
||||
|
||||
.PHONY:
|
||||
run-sync-tests: check-vars docker-sync docker-sync-test-ctrl prepare-run-tests
|
||||
ifdef PUSHIMAGES
|
||||
docker push $(ARANGOSYNCTESTCTRLIMAGE)
|
||||
docker push $(ARANGOSYNCTESTIMAGE)
|
||||
docker push $(ARANGOSYNCIMAGE)
|
||||
endif
|
||||
$(ROOTDIR)/scripts/kube_run_sync_tests.sh $(DEPLOYMENTNAMESPACE) '$(ARANGODIMAGE)' '$(ARANGOSYNCIMAGE)' '$(ARANGOSYNCTESTIMAGE)' '$(ARANGOSYNCTESTCTRLIMAGE)'
|
|
@ -10,7 +10,7 @@ rules:
|
|||
resources: ["nodes"]
|
||||
verbs: ["list"]
|
||||
- apiGroups: [""]
|
||||
resources: ["pods", "services", "persistentvolumes", "persistentvolumeclaims", "secrets", "serviceaccounts"]
|
||||
resources: ["pods", "services", "persistentvolumes", "persistentvolumeclaims", "secrets", "serviceaccounts", "pods/log"]
|
||||
verbs: ["*"]
|
||||
- apiGroups: ["apps"]
|
||||
resources: ["daemonsets", "deployments"]
|
||||
|
|
|
@ -97,6 +97,14 @@ func (s DeploymentSpec) GetImage() string {
|
|||
return util.StringOrDefault(s.Image)
|
||||
}
|
||||
|
||||
// GetSyncImage returns, if set, Sync.Image or the default image.
|
||||
func (s DeploymentSpec) GetSyncImage() string {
|
||||
if s.Sync.HasSyncImage() {
|
||||
return s.Sync.GetSyncImage()
|
||||
}
|
||||
return s.GetImage()
|
||||
}
|
||||
|
||||
// GetImagePullPolicy returns the value of imagePullPolicy.
|
||||
func (s DeploymentSpec) GetImagePullPolicy() v1.PullPolicy {
|
||||
return util.PullPolicyOrDefault(s.ImagePullPolicy)
|
||||
|
|
|
@ -36,6 +36,7 @@ type SyncSpec struct {
|
|||
Authentication SyncAuthenticationSpec `json:"auth"`
|
||||
TLS TLSSpec `json:"tls"`
|
||||
Monitoring MonitoringSpec `json:"monitoring"`
|
||||
Image *string `json:"image"`
|
||||
}
|
||||
|
||||
// IsEnabled returns the value of enabled.
|
||||
|
@ -43,6 +44,16 @@ func (s SyncSpec) IsEnabled() bool {
|
|||
return util.BoolOrDefault(s.Enabled)
|
||||
}
|
||||
|
||||
// GetSyncImage returns the syncer image or empty string
|
||||
func (s SyncSpec) GetSyncImage() string {
|
||||
return util.StringOrDefault(s.Image)
|
||||
}
|
||||
|
||||
// HasSyncImage returns whether a special sync image is set
|
||||
func (s SyncSpec) HasSyncImage() bool {
|
||||
return s.GetSyncImage() != ""
|
||||
}
|
||||
|
||||
// Validate the given spec
|
||||
func (s SyncSpec) Validate(mode DeploymentMode) error {
|
||||
if s.IsEnabled() && !mode.SupportsSync() {
|
||||
|
@ -78,6 +89,9 @@ func (s *SyncSpec) SetDefaultsFrom(source SyncSpec) {
|
|||
if s.Enabled == nil {
|
||||
s.Enabled = util.NewBoolOrNil(source.Enabled)
|
||||
}
|
||||
if s.Image == nil {
|
||||
s.Image = util.NewStringOrNil(source.Image)
|
||||
}
|
||||
s.ExternalAccess.SetDefaultsFrom(source.ExternalAccess)
|
||||
s.Authentication.SetDefaultsFrom(source.Authentication)
|
||||
s.TLS.SetDefaultsFrom(source.TLS)
|
||||
|
@ -95,5 +109,8 @@ func (s SyncSpec) ResetImmutableFields(fieldPrefix string, target *SyncSpec) []s
|
|||
if list := s.Authentication.ResetImmutableFields(fieldPrefix+".auth", &target.Authentication); len(list) > 0 {
|
||||
resetFields = append(resetFields, list...)
|
||||
}
|
||||
if s.GetSyncImage() != target.GetSyncImage() {
|
||||
resetFields = append(resetFields, fieldPrefix+".image")
|
||||
}
|
||||
return resetFields
|
||||
}
|
||||
|
|
|
@ -753,6 +753,11 @@ func (in *SyncSpec) DeepCopyInto(out *SyncSpec) {
|
|||
in.Authentication.DeepCopyInto(&out.Authentication)
|
||||
in.TLS.DeepCopyInto(&out.TLS)
|
||||
in.Monitoring.DeepCopyInto(&out.Monitoring)
|
||||
if in.Image != nil {
|
||||
in, out := &in.Image, &out.Image
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -196,6 +196,8 @@ func (d *Deployment) ensureAccessPackage(apSecretName string) error {
|
|||
},
|
||||
Data: map[string][]byte{
|
||||
constants.SecretAccessPackageYaml: []byte(allYaml),
|
||||
constants.SecretCACertificate: []byte(tlsCACert),
|
||||
constants.SecretTLSKeyfile: []byte(keyfile),
|
||||
},
|
||||
}
|
||||
// Attach secret to owner
|
||||
|
|
|
@ -598,6 +598,11 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
log.Debug().Str("image", spec.GetImage()).Msg("Image is not an enterprise image")
|
||||
return maskAny(fmt.Errorf("Image '%s' does not contain an Enterprise version of ArangoDB", spec.GetImage()))
|
||||
}
|
||||
// Check if the sync image is overwritten by the SyncSpec
|
||||
imageID := imageInfo.ImageID
|
||||
if spec.Sync.HasSyncImage() {
|
||||
imageID = spec.Sync.GetSyncImage()
|
||||
}
|
||||
var tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName string
|
||||
// Check master JWT secret
|
||||
masterJWTSecretName = spec.Sync.Authentication.GetJWTSecretName()
|
||||
|
@ -664,7 +669,7 @@ func (r *Resources) createPodForMember(spec api.DeploymentSpec, memberID string,
|
|||
if group == api.ServerGroupSyncWorkers {
|
||||
affinityWithRole = api.ServerGroupDBServers.AsRole()
|
||||
}
|
||||
if err := k8sutil.CreateArangoSyncPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, imageInfo.ImageID, lifecycleImage, spec.GetImagePullPolicy(), terminationGracePeriod, args, env,
|
||||
if err := k8sutil.CreateArangoSyncPod(kubecli, spec.IsDevelopment(), apiObject, role, m.ID, m.PodName, imageID, lifecycleImage, spec.GetImagePullPolicy(), terminationGracePeriod, args, env,
|
||||
livenessProbe, tolerations, serviceAccountName, tlsKeyfileSecretName, clientAuthCASecretName, masterJWTSecretName, clusterJWTSecretName, affinityWithRole, groupSpec.GetNodeSelector()); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
|
|
|
@ -44,6 +44,15 @@ func NewKubeClient() (kubernetes.Interface, error) {
|
|||
return c, nil
|
||||
}
|
||||
|
||||
// MustNewKubeClient calls NewKubeClient an panics if it fails
|
||||
func MustNewKubeClient() kubernetes.Interface {
|
||||
i, err := NewKubeClient()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return i
|
||||
}
|
||||
|
||||
// NewKubeExtClient creates a new k8s api extensions client
|
||||
func NewKubeExtClient() (apiextensionsclient.Interface, error) {
|
||||
cfg, err := InClusterConfig()
|
||||
|
|
23
scripts/kube_run_sync_tests.sh
Executable file
23
scripts/kube_run_sync_tests.sh
Executable file
|
@ -0,0 +1,23 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Run kubectl run to run the integration tests.
|
||||
|
||||
DEPLOYMENTNAMESPACE=$1
|
||||
ARANGODIMAGE=$2
|
||||
ARANGOSYNCIMAGE=$3
|
||||
ARANOSYNCTESTIMAGE=$4
|
||||
ARANOSYNCTESTCTRLIMAGE=$5
|
||||
|
||||
ARANGOSYNCIMAGEID=$(docker inspect ${ARANGOSYNCIMAGE} '--format={{index .RepoDigests 0}}')
|
||||
ARANOSYNCTESTIMAGEID=$(docker inspect ${ARANOSYNCTESTIMAGE} '--format={{index .RepoDigests 0}}')
|
||||
ARANOSYNCTESTCTRLIMAGEID=$(docker inspect ${ARANOSYNCTESTCTRLIMAGE} '--format={{index .RepoDigests 0}}')
|
||||
|
||||
kubectl --namespace ${DEPLOYMENTNAMESPACE} \
|
||||
run arangodb-sync-test-controller -i --rm --quiet --restart=Never \
|
||||
--image=${ARANOSYNCTESTCTRLIMAGEID} \
|
||||
-- \
|
||||
--arango-image=${ARANGODIMAGE} \
|
||||
--arango-sync-image=${ARANGOSYNCIMAGEID} \
|
||||
--arango-sync-test-image=${ARANOSYNCTESTIMAGEID} \
|
||||
--license-key-secret-name=arangodb-jenkins-license-key \
|
||||
--namespace=${DEPLOYMENTNAMESPACE}
|
5
tests/sync/Dockerfile
Normal file
5
tests/sync/Dockerfile
Normal file
|
@ -0,0 +1,5 @@
|
|||
FROM scratch
|
||||
|
||||
ADD bin/arangodb_operator_sync_test_ctrl /usr/bin/
|
||||
|
||||
ENTRYPOINT [ "/usr/bin/arangodb_operator_sync_test_ctrl" ]
|
431
tests/sync/main.go
Normal file
431
tests/sync/main.go
Normal file
|
@ -0,0 +1,431 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
sync "github.com/arangodb/arangosync/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
||||
"github.com/pkg/errors"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
dapi "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
rapi "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
)
|
||||
|
||||
var (
|
||||
arangoImage string
|
||||
arangoSyncTestImage string
|
||||
arangoSyncImage string
|
||||
licenseKeySecretName string
|
||||
namespace string
|
||||
)
|
||||
|
||||
const (
|
||||
accessPackageSecretName = "dst-access-package"
|
||||
dstDeploymentName = "dc-dst"
|
||||
srcDeploymentName = "dc-src"
|
||||
replicationResourceName = "dc-dst-src-replication"
|
||||
arangosyncTestPodName = "kube-arango-sync-tests"
|
||||
)
|
||||
|
||||
func init() {
|
||||
flag.StringVar(&arangoImage, "arango-image", "arangodb/enterprise:latest", "ArangoDB Enterprise image used for test")
|
||||
flag.StringVar(&arangoSyncTestImage, "arango-sync-test-image", "", "ArangoSync test image")
|
||||
flag.StringVar(&arangoSyncImage, "arango-sync-image", "", "ArangoSync Image used for testing")
|
||||
flag.StringVar(&licenseKeySecretName, "license-key-secret-name", "arangodb-license-key", "Secret name of the license key used for the deployments")
|
||||
flag.StringVar(&namespace, "namespace", "default", "Testing namespace")
|
||||
}
|
||||
|
||||
func newDeployment(ns, name string) *dapi.ArangoDeployment {
|
||||
return &dapi.ArangoDeployment{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: dapi.SchemeGroupVersion.String(),
|
||||
Kind: dapi.ArangoDeploymentResourceKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
// OwnerReferences: []metav1.OwnerReference{
|
||||
// metav1.OwnerReference{
|
||||
// },
|
||||
// },
|
||||
},
|
||||
Spec: dapi.DeploymentSpec{
|
||||
Image: util.NewString(arangoImage),
|
||||
License: dapi.LicenseSpec{
|
||||
SecretName: util.NewString(licenseKeySecretName),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newSyncDeployment(ns, name string, accessPackage bool) *dapi.ArangoDeployment {
|
||||
d := newDeployment(ns, name)
|
||||
d.Spec.Sync = dapi.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
ExternalAccess: dapi.SyncExternalAccessSpec{
|
||||
ExternalAccessSpec: dapi.ExternalAccessSpec{
|
||||
Type: dapi.NewExternalAccessType(dapi.ExternalAccessTypeNone),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if accessPackage {
|
||||
d.Spec.Sync.ExternalAccess.AccessPackageSecretNames = []string{accessPackageSecretName}
|
||||
}
|
||||
|
||||
if arangoSyncImage != "" {
|
||||
d.Spec.Sync.Image = util.NewString(arangoSyncImage)
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func newReplication(ns, name string) *rapi.ArangoDeploymentReplication {
|
||||
return &rapi.ArangoDeploymentReplication{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
APIVersion: rapi.SchemeGroupVersion.String(),
|
||||
Kind: rapi.ArangoDeploymentReplicationResourceKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: rapi.DeploymentReplicationSpec{
|
||||
Source: rapi.EndpointSpec{
|
||||
DeploymentName: util.NewString(srcDeploymentName),
|
||||
Authentication: rapi.EndpointAuthenticationSpec{
|
||||
KeyfileSecretName: util.NewString(accessPackageSecretName),
|
||||
},
|
||||
TLS: rapi.EndpointTLSSpec{
|
||||
CASecretName: util.NewString(accessPackageSecretName),
|
||||
},
|
||||
},
|
||||
Destination: rapi.EndpointSpec{
|
||||
DeploymentName: util.NewString(dstDeploymentName),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func newArangoSyncTestJob(ns, name string) *batchv1.Job {
|
||||
return &batchv1.Job{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func waitForSyncDeploymentReady(ctx context.Context, ns, name string, kubecli kubernetes.Interface, c versioned.Interface) error {
|
||||
return retry.Retry(func() error {
|
||||
deployment, err := c.Database().ArangoDeployments(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
sc, err := mustNewArangoDBSyncClient(ctx, kubecli, deployment)
|
||||
if err != nil {
|
||||
log.Printf("failed to create sync client: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
info, err := sc.Master().Status(ctx)
|
||||
if err != nil {
|
||||
log.Printf("failed to fetch status: %s", err.Error())
|
||||
return err
|
||||
}
|
||||
|
||||
if info.Status != sync.SyncStatusRunning {
|
||||
log.Printf("SyncStatus not running: %s", info.Status)
|
||||
return fmt.Errorf("SyncStatus not running: %s", info.Status)
|
||||
}
|
||||
|
||||
return nil
|
||||
}, 5*time.Minute)
|
||||
}
|
||||
|
||||
func setupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c versioned.Interface) error {
|
||||
|
||||
dstSpec := newSyncDeployment(namespace, dstDeploymentName, false)
|
||||
srcSpec := newSyncDeployment(namespace, srcDeploymentName, true)
|
||||
|
||||
if _, err := c.Database().ArangoDeployments(namespace).Create(srcSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := c.Database().ArangoDeployments(namespace).Create(dstSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
replSpec := newReplication(namespace, replicationResourceName)
|
||||
if _, err := c.Replication().ArangoDeploymentReplications(namespace).Create(replSpec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Print("Deployments and Replication created")
|
||||
|
||||
//if err := waitForSyncDeploymentReady(ctx, namespace, srcSpec.GetName(), kube, c); err != nil {
|
||||
// return errors.Wrap(err, "Source Cluster not ready")
|
||||
//}
|
||||
|
||||
if err := waitForSyncDeploymentReady(ctx, namespace, dstSpec.GetName(), kube, c); err != nil {
|
||||
return errors.Wrap(err, "Destination Cluster not ready")
|
||||
}
|
||||
|
||||
log.Print("Deployments and Replication ready")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForReplicationGone(ns, name string, c versioned.Interface) error {
|
||||
return retry.Retry(func() error {
|
||||
if _, err := c.Replication().ArangoDeploymentReplications(ns).Get(name, metav1.GetOptions{}); k8sutil.IsNotFound(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Replication resource not gone")
|
||||
}, 1*time.Minute)
|
||||
}
|
||||
|
||||
func waitForDeploymentGone(ns, name string, c versioned.Interface) error {
|
||||
return retry.Retry(func() error {
|
||||
if _, err := c.Database().ArangoDeployments(ns).Get(name, metav1.GetOptions{}); k8sutil.IsNotFound(err) {
|
||||
return nil
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
return fmt.Errorf("Deployment resource %s not gone", name)
|
||||
}, 1*time.Minute)
|
||||
}
|
||||
|
||||
func removeReplicationWaitForCompletion(ns, name string, c versioned.Interface) error {
|
||||
if err := c.Replication().ArangoDeploymentReplications(ns).Delete(name, &metav1.DeleteOptions{}); err != nil {
|
||||
if k8sutil.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := waitForReplicationGone(ns, name, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func removeDeploymentWaitForCompletion(ns, name string, c versioned.Interface) error {
|
||||
if err := c.Database().ArangoDeployments(ns).Delete(name, &metav1.DeleteOptions{}); err != nil {
|
||||
if k8sutil.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
if err := waitForDeploymentGone(ns, name, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cleanupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c versioned.Interface) error {
|
||||
if err := removeReplicationWaitForCompletion(namespace, replicationResourceName, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := removeDeploymentWaitForCompletion(namespace, dstDeploymentName, c); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := removeDeploymentWaitForCompletion(namespace, srcDeploymentName, c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func waitForPodRunning(ns, name string, kube kubernetes.Interface) error {
|
||||
return retry.Retry(func() error {
|
||||
pod, err := kube.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !k8sutil.IsPodReady(pod) {
|
||||
return fmt.Errorf("pod not ready")
|
||||
}
|
||||
return nil
|
||||
|
||||
}, 1*time.Minute)
|
||||
}
|
||||
|
||||
func copyPodLogs(ns, name string, kube kubernetes.Interface) error {
|
||||
logs, err := kube.CoreV1().Pods(ns).GetLogs(name, &corev1.PodLogOptions{
|
||||
Follow: true,
|
||||
}).Stream()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
defer logs.Close()
|
||||
if _, err := io.Copy(os.Stdout, logs); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func createArangoSyncTestPod(ns, name string) *corev1.Pod {
|
||||
return &corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: name,
|
||||
Namespace: ns,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Name: "tests",
|
||||
Image: arangoSyncTestImage,
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
Args: []string{"-test.v"},
|
||||
Env: []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Name: "MASTERAENDPOINTS",
|
||||
Value: fmt.Sprintf("https://%s-sync.%s.svc:8629/", srcDeploymentName, namespace),
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "MASTERBENDPOINTS",
|
||||
Value: fmt.Sprintf("https://%s-sync.%s.svc:8629/", dstDeploymentName, namespace),
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "CLUSTERAENDPOINTS",
|
||||
Value: fmt.Sprintf("https://%s.%s.svc:8529/", srcDeploymentName, namespace),
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "CLUSTERBENDPOINTS",
|
||||
Value: fmt.Sprintf("https://%s.%s.svc:8529/", dstDeploymentName, namespace),
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "CLUSTERACACERT",
|
||||
Value: "/data/access/ca.crt",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "CLUSTERACLIENTCERT",
|
||||
Value: "/data/access/tls.keyfile",
|
||||
},
|
||||
corev1.EnvVar{
|
||||
Name: "CLUSTERMANAGED",
|
||||
Value: "yes",
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
corev1.VolumeMount{
|
||||
MountPath: "/data/access",
|
||||
Name: "access",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
corev1.Volume{
|
||||
Name: "access",
|
||||
VolumeSource: corev1.VolumeSource{
|
||||
Secret: &corev1.SecretVolumeSource{
|
||||
SecretName: accessPackageSecretName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func runArangoSyncTests(kube kubernetes.Interface) error {
|
||||
|
||||
// Start a new pod with the test image
|
||||
defer kube.CoreV1().Pods(namespace).Delete(arangosyncTestPodName, &metav1.DeleteOptions{})
|
||||
podspec := createArangoSyncTestPod(namespace, arangosyncTestPodName)
|
||||
if _, err := kube.CoreV1().Pods(namespace).Create(podspec); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Test pod created")
|
||||
|
||||
if err := waitForPodRunning(namespace, arangosyncTestPodName, kube); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Printf("Test pod running, receiving log")
|
||||
|
||||
if err := copyPodLogs(namespace, arangosyncTestPodName, kube); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
pod, err := kube.CoreV1().Pods(namespace).Get(arangosyncTestPodName, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !k8sutil.IsPodSucceeded(pod) {
|
||||
return fmt.Errorf("Pod not succeded")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
ctx := context.Background()
|
||||
kube := k8sutil.MustNewKubeClient()
|
||||
c := client.MustNewInCluster()
|
||||
|
||||
defer removeReplicationWaitForCompletion(namespace, replicationResourceName, c)
|
||||
defer removeDeploymentWaitForCompletion(namespace, dstDeploymentName, c)
|
||||
defer removeDeploymentWaitForCompletion(namespace, srcDeploymentName, c)
|
||||
if err := setupArangoDBCluster(ctx, kube, c); err != nil {
|
||||
log.Printf("Failed to setup deployment: %s", err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
exitCode := 0
|
||||
|
||||
if err := runArangoSyncTests(kube); err != nil {
|
||||
log.Printf("ArangoSync tests failed: %s", err.Error())
|
||||
exitCode = 1
|
||||
}
|
||||
|
||||
if err := cleanupArangoDBCluster(ctx, kube, c); err != nil {
|
||||
log.Printf("Failed to clean up deployments: %s", err.Error())
|
||||
}
|
||||
|
||||
os.Exit(exitCode)
|
||||
}
|
||||
|
||||
func mustNewArangoDBSyncClient(ctx context.Context, kubecli kubernetes.Interface, deployment *dapi.ArangoDeployment) (sync.API, error) {
|
||||
ns := deployment.GetNamespace()
|
||||
secrets := kubecli.CoreV1().Secrets(ns)
|
||||
secretName := deployment.Spec.Sync.Authentication.GetJWTSecretName()
|
||||
jwtSecret, err := k8sutil.GetTokenSecret(secrets, secretName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Fetch service DNS name
|
||||
dnsName := k8sutil.CreateSyncMasterClientServiceDNSName(deployment)
|
||||
ep := sync.Endpoint{"https://" + net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoSyncMasterPort))}
|
||||
|
||||
api, err := sync.NewArangoSyncClient(ep, sync.AuthenticationConfig{JWTSecret: jwtSecret}, &tls.Config{InsecureSkipVerify: true})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return api, nil
|
||||
}
|
|
@ -25,53 +25,21 @@ package tests
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ghodss/yaml"
|
||||
|
||||
"github.com/dchest/uniuri"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
driver "github.com/arangodb/go-driver"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
||||
)
|
||||
|
||||
// deployAccessPackage unpacks the secrets from an access package and deploys them
|
||||
func deployAccessPackage(ap *v1.Secret, kube kubernetes.Interface) error {
|
||||
if allyaml, ok := ap.Data[constants.SecretAccessPackageYaml]; ok {
|
||||
secrets := strings.Split(string(allyaml), "---")
|
||||
for _, secretyaml := range secrets {
|
||||
var secret v1.Secret
|
||||
if err := yaml.Unmarshal([]byte(secretyaml), &secret); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if _, err := kube.Core().Secrets(ap.GetNamespace()).Create(&secret); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("Failed to read access package")
|
||||
}
|
||||
|
||||
// removeAccessPackage fire and forget deletes secrets related to a access package
|
||||
func removeAccessPackage(name, ns string, kube kubernetes.Interface) {
|
||||
kube.Core().Secrets(ns).Delete(name+"-auth", &metav1.DeleteOptions{})
|
||||
kube.Core().Secrets(ns).Delete(name+"-ca", &metav1.DeleteOptions{})
|
||||
}
|
||||
|
||||
// waitUntilReplicationNotFound waits until a replication resource is deleted
|
||||
func waitUntilReplicationNotFound(ns, name string, cli versioned.Interface) error {
|
||||
return retry.Retry(func() error {
|
||||
|
@ -84,9 +52,9 @@ func waitUntilReplicationNotFound(ns, name string, cli versioned.Interface) erro
|
|||
}, time.Minute)
|
||||
}
|
||||
|
||||
// TestSyncSameDC create two clusters and configures sync between them.
|
||||
// TestSyncSimple create two clusters and configures sync between them.
|
||||
// Then it creates a test collection in source and waits for it to appear in dest.
|
||||
func TestSyncSameDC(t *testing.T) {
|
||||
func TestSyncSimple(t *testing.T) {
|
||||
longOrSkip(t)
|
||||
img := getEnterpriseImageOrSkip(t)
|
||||
c := client.MustNewInCluster()
|
||||
|
@ -127,20 +95,16 @@ func TestSyncSameDC(t *testing.T) {
|
|||
// Wait for deployments to be ready
|
||||
// Wait for access package
|
||||
// Deploy access package
|
||||
ap, err := waitUntilSecret(kubecli, apname, ns, nil, deploymentReadyTimeout)
|
||||
_, err = waitUntilSecret(kubecli, apname, ns, nil, deploymentReadyTimeout)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to get access package: %v", err)
|
||||
}
|
||||
if err := deployAccessPackage(ap, kubecli); err != nil {
|
||||
t.Fatalf("Failed to deploy access package: %v", err)
|
||||
}
|
||||
defer removeAccessPackage(apname, ns, kubecli)
|
||||
|
||||
// Deploy Replication Resource
|
||||
repl := newReplication("test-sync-sdc-repl")
|
||||
repl.Spec.Source.DeploymentName = util.NewString(depla.GetName())
|
||||
repl.Spec.Source.Authentication.KeyfileSecretName = util.NewString("test-syn-sdc-a-access-package-auth")
|
||||
repl.Spec.Source.TLS.CASecretName = util.NewString("test-syn-sdc-a-access-package-ca")
|
||||
repl.Spec.Source.Authentication.KeyfileSecretName = util.NewString(apname)
|
||||
repl.Spec.Source.TLS.CASecretName = util.NewString(apname)
|
||||
repl.Spec.Destination.DeploymentName = util.NewString(deplb.GetName())
|
||||
_, err = c.ReplicationV1alpha().ArangoDeploymentReplications(ns).Create(repl)
|
||||
if err != nil {
|
||||
|
|
Loading…
Reference in a new issue