1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00

[Cleanup] Linter code cleaning (#718)

This commit is contained in:
Tomasz Mielech 2021-05-07 16:13:15 +02:00 committed by GitHub
parent e559f210f7
commit c5cc68d94c
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
88 changed files with 371 additions and 764 deletions

View file

@ -13,6 +13,6 @@ install:
- make init
script:
- make license-verify fmt-verify
- make license-verify fmt-verify linter
- make run-unit-tests
- make bin

View file

@ -138,9 +138,9 @@ ifdef VERBOSE
TESTVERBOSEOPTIONS := -v
endif
EXCLUDE_DIRS := tests vendor .gobuild deps tools
SOURCES_QUERY := find $(SRCDIR) -name '*.go' -type f -not -path '$(SRCDIR)/tests/*' -not -path '$(SRCDIR)/vendor/*' -not -path '$(SRCDIR)/.gobuild/*' -not -path '$(SRCDIR)/deps/*' -not -path '$(SRCDIR)/tools/*'
SOURCES := $(shell $(SOURCES_QUERY))
SOURCES_PACKAGES := $(shell $(SOURCES_QUERY) -exec dirname {} \; | sort | uniq)
DASHBOARDSOURCES := $(shell find $(DASHBOARDDIR)/src -name '*.js' -not -path './test/*') $(DASHBOARDDIR)/package.json
ifndef ARANGOSYNCSRCDIR
@ -174,10 +174,8 @@ allall: all
# Tip: Run `eval $(minikube docker-env)` before calling make if you're developing on minikube.
#
GOLANGCI_ENABLED=deadcode gocyclo golint varcheck structcheck maligned errcheck \
ineffassign interfacer unconvert goconst \
megacheck
GOLANGCI_ENABLED=deadcode govet ineffassign staticcheck structcheck typecheck unconvert unparam unused varcheck
#GOLANGCI_ENABLED=gocyclo goconst golint maligned errcheck interfacer megacheck
#GOLANGCI_ENABLED+=dupl - disable dupl check
.PHONY: license-verify
@ -201,11 +199,10 @@ fmt-verify: license-verify
@if [ X"$$(go run golang.org/x/tools/cmd/goimports -l $(SOURCES) | wc -l)" != X"0" ]; then echo ">> Style errors"; go run golang.org/x/tools/cmd/goimports -l $(SOURCES); exit 1; fi
.PHONY: linter
linter: fmt
@golangci-lint run --no-config --issues-exit-code=1 --deadline=30m --disable-all \
$(foreach MODE,$(GOLANGCI_ENABLED),--enable $(MODE) ) \
--exclude-use-default=false \
$(SOURCES_PACKAGES)
linter:
$(GOPATH)/bin/golangci-lint run --no-config --issues-exit-code=1 --deadline=30m --exclude-use-default=false \
--disable-all $(foreach EXCLUDE_DIR,$(EXCLUDE_DIRS),--skip-dirs $(EXCLUDE_DIR)) \
$(foreach MODE,$(GOLANGCI_ENABLED),--enable $(MODE)) ./...
.PHONY: build
build: docker manifests
@ -599,6 +596,8 @@ init: tools update-generated $(GHRELEASE) $(RELEASE) $(TESTBIN) $(BIN) vendor
.PHONY: tools
tools: update-vendor
@echo ">> Fetching golangci-lint linter"
@go install github.com/golangci/golangci-lint/cmd/golangci-lint
@echo ">> Fetching goimports"
@go get -u golang.org/x/tools/cmd/goimports
@echo ">> Fetching license check"
@ -653,4 +652,4 @@ synchronize-v2alpha1-with-v1:
@for file in $$(find "$(ROOT)/pkg/apis/deployment/v1/" -type f -exec realpath --relative-to "$(ROOT)/pkg/apis/deployment/v1/" {} \;); do cat "$(ROOT)/pkg/apis/deployment/v1/$${file}" | sed "s#package v1#package v2alpha1#g" | sed 's#ArangoDeploymentVersion = "v1"#ArangoDeploymentVersion = "v2alpha1"#g' > "$(ROOT)/pkg/apis/deployment/v2alpha1/$${file}"; done
@make update-generated
@make set-deployment-api-version-v2alpha1 bin
@make set-deployment-api-version-v1 bin
@make set-deployment-api-version-v1 bin

View file

@ -28,10 +28,6 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
addMemberTimeout = time.Minute * 5
)
type Timeouts struct {
AddMember *Timeout `json:"addMember,omitempty"`
}

View file

@ -28,10 +28,6 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
addMemberTimeout = time.Minute * 5
)
type Timeouts struct {
AddMember *Timeout `json:"addMember,omitempty"`
}

View file

@ -264,7 +264,6 @@ func wrapperProgressMissing(t *testing.T, state state.State) {
newObj := refreshArangoBackup(t, handler, obj)
require.Equal(t, newObj.Status.State, backupApi.ArangoBackupStateFailed)
require.Equal(t, newObj.Status.Message, createStateMessage(state, backupApi.ArangoBackupStateFailed, fmt.Sprintf("missing field .status.backup")))
require.Equal(t, newObj.Status.Message, createStateMessage(state, backupApi.ArangoBackupStateFailed, "missing field .status.backup"))
})
}

View file

@ -175,7 +175,7 @@ func hasFinalizers(backup *backupApi.ArangoBackup) bool {
}
for _, finalizer := range backupApi.FinalizersArangoBackup {
if !hasFinalizer(backup, finalizer) {
if !hasFinalizer(finalizer) {
return false
}
}
@ -183,7 +183,7 @@ func hasFinalizers(backup *backupApi.ArangoBackup) bool {
return true
}
func hasFinalizer(backup *backupApi.ArangoBackup, finalizer string) bool {
func hasFinalizer(finalizer string) bool {
if backupApi.FinalizersArangoBackup == nil {
return false
}
@ -209,7 +209,7 @@ func appendFinalizers(backup *backupApi.ArangoBackup) []string {
old := backup.Finalizers
for _, finalizer := range backupApi.FinalizersArangoBackup {
if !hasFinalizer(backup, finalizer) {
if !hasFinalizer(finalizer) {
old = append(old, finalizer)
}
}

View file

@ -118,7 +118,7 @@ func setFailedState(backup *backupApi.ArangoBackup, err error) (*backupApi.Arang
updateStatusAvailable(false))
}
func createStateMessage(from, to state.State, message string) string {
func createStateMessage(from, to state.State, message string) string { // nolint:unparam
return fmt.Sprintf("Transiting from %s to %s: %s", from, to, message)
}

View file

@ -74,11 +74,7 @@ func (h *handler) Handle(item operation.Item) error {
return err
}
status, err := h.processBackupPolicy(policy.DeepCopy())
if err != nil {
return err
}
status := h.processBackupPolicy(policy.DeepCopy())
// Nothing to update, objects are equal
if reflect.DeepEqual(policy.Status, status) {
return nil
@ -94,13 +90,13 @@ func (h *handler) Handle(item operation.Item) error {
return nil
}
func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (backupApi.ArangoBackupPolicyStatus, error) {
func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) backupApi.ArangoBackupPolicyStatus {
if err := policy.Validate(); err != nil {
h.eventRecorder.Warning(policy, policyError, "Policy Error: %s", err.Error())
return backupApi.ArangoBackupPolicyStatus{
Message: fmt.Sprintf("Validation error: %s", err.Error()),
}, nil
}
}
now := time.Now()
@ -111,7 +107,7 @@ func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (bac
return backupApi.ArangoBackupPolicyStatus{
Message: fmt.Sprintf("error while parsing expr: %s", err.Error()),
}, nil
}
}
if policy.Status.Scheduled.IsZero() {
@ -121,7 +117,7 @@ func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (bac
Scheduled: meta.Time{
Time: next,
},
}, nil
}
}
// Check if schedule is required
@ -135,10 +131,10 @@ func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (bac
Scheduled: meta.Time{
Time: next,
},
}, nil
}
}
return policy.Status, nil
return policy.Status
}
// Schedule new deployments
@ -159,7 +155,7 @@ func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (bac
return backupApi.ArangoBackupPolicyStatus{
Scheduled: policy.Status.Scheduled,
Message: fmt.Sprintf("deployments listing failed: %s", err.Error()),
}, nil
}
}
for _, deployment := range deployments.Items {
@ -171,7 +167,7 @@ func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (bac
return backupApi.ArangoBackupPolicyStatus{
Scheduled: policy.Status.Scheduled,
Message: fmt.Sprintf("backup creation failed: %s", err.Error()),
}, nil
}
}
h.eventRecorder.Normal(policy, backupCreated, "Created ArangoBackup: %s/%s", b.Namespace, b.Name)
@ -185,7 +181,7 @@ func (h *handler) processBackupPolicy(policy *backupApi.ArangoBackupPolicy) (bac
Scheduled: meta.Time{
Time: next,
},
}, nil
}
}
func (*handler) CanBeHandled(item operation.Item) bool {

View file

@ -69,7 +69,7 @@ func newItem(o operation.Operation, namespace, name string) operation.Item {
}
}
func newItemFromBackupPolicy(operation operation.Operation, policy *backupApi.ArangoBackupPolicy) operation.Item {
func newItemFromBackupPolicy(operation operation.Operation, policy *backupApi.ArangoBackupPolicy) operation.Item { // nolint:unparam
return newItem(operation, policy.Namespace, policy.Name)
}

View file

@ -102,8 +102,8 @@ func mockSimpleObject(name string, canBeHandled bool) (Handler, chan operation.I
})
}
func waitForItems(t *testing.T, i <-chan operation.Item, expectedSize int, timeout time.Duration) []operation.Item {
tmout := time.NewTimer(timeout)
func waitForItems(t *testing.T, i <-chan operation.Item, expectedSize int) []operation.Item {
tmout := time.NewTimer(time.Second)
defer tmout.Stop()
received := make([]operation.Item, 0, expectedSize)
for {

View file

@ -79,7 +79,7 @@ func Test_Operator_InformerProcessing(t *testing.T) {
}
// Assert
res := waitForItems(t, i, size, time.Second)
res := waitForItems(t, i, size)
assert.Len(t, res, size)
time.Sleep(50 * time.Millisecond)
@ -140,7 +140,7 @@ func Test_Operator_MultipleInformers(t *testing.T) {
}
// Assert
res := waitForItems(t, i, size*2, time.Second)
res := waitForItems(t, i, size*2)
assert.Len(t, res, size*2)
time.Sleep(50 * time.Millisecond)
@ -200,7 +200,7 @@ func Test_Operator_MultipleInformers_IgnoredTypes(t *testing.T) {
}
// Assert
res := waitForItems(t, i, size, time.Second)
res := waitForItems(t, i, size)
assert.Len(t, res, size)
time.Sleep(50 * time.Millisecond)
@ -300,10 +300,10 @@ func Test_Operator_MultipleInformers_MultipleHandlers(t *testing.T) {
}
// Assert
assert.Len(t, waitForItems(t, ip, size, time.Second), size)
assert.Len(t, waitForItems(t, in, size, time.Second), size)
assert.Len(t, waitForItems(t, is, size, time.Second), size)
assert.Len(t, waitForItems(t, id, size, time.Second), size)
assert.Len(t, waitForItems(t, ip, size), size)
assert.Len(t, waitForItems(t, in, size), size)
assert.Len(t, waitForItems(t, is, size), size)
assert.Len(t, waitForItems(t, id, size), size)
time.Sleep(50 * time.Millisecond)
assert.Len(t, ip, 0)
@ -358,7 +358,7 @@ func Test_Operator_InformerProcessing_Namespaced(t *testing.T) {
}
// Assert
res := waitForItems(t, i, 1, time.Second)
res := waitForItems(t, i, 1)
assert.Len(t, res, 1)
time.Sleep(50 * time.Millisecond)

View file

@ -69,7 +69,7 @@ func (cc *cache) extendHost(host string) string {
return scheme + "://" + net.JoinHostPort(host, strconv.Itoa(k8sutil.ArangoPort))
}
func (cc *cache) getClient(ctx context.Context, group api.ServerGroup, id string) (driver.Client, error) {
func (cc *cache) getClient(group api.ServerGroup, id string) (driver.Client, error) {
m, _, _ := cc.apiObjectGetter().Status.Members.ElementByID(id)
c, err := cc.factory.Client(cc.extendHost(m.GetEndpoint(k8sutil.CreatePodDNSName(cc.apiObjectGetter(), group.AsRole(), id))))
@ -80,7 +80,7 @@ func (cc *cache) getClient(ctx context.Context, group api.ServerGroup, id string
}
func (cc *cache) get(ctx context.Context, group api.ServerGroup, id string) (driver.Client, error) {
client, err := cc.getClient(ctx, group, id)
client, err := cc.getClient(group, id)
if err != nil {
return nil, errors.WithStack(err)
}
@ -88,7 +88,7 @@ func (cc *cache) get(ctx context.Context, group api.ServerGroup, id string) (dri
if _, err := client.Version(ctx); err == nil {
return client, nil
} else if driver.IsUnauthorized(err) {
return cc.getClient(ctx, group, id)
return cc.getClient(group, id)
} else {
return client, nil
}
@ -103,7 +103,7 @@ func (cc *cache) Get(ctx context.Context, group api.ServerGroup, id string) (dri
return cc.get(ctx, group, id)
}
func (cc cache) GetAuth() conn.Auth {
func (cc *cache) GetAuth() conn.Auth {
return cc.factory.GetAuth()
}

View file

@ -31,9 +31,8 @@ import (
core "k8s.io/api/core/v1"
)
func modifyAffinity(name, group string, required bool, role string, mods ...func(a *core.Affinity)) *core.Affinity {
affinity := k8sutil.CreateAffinity(name, group,
required, role)
func modifyAffinity(group string, required bool, role string, mods ...func(a *core.Affinity)) *core.Affinity { // nolint:unparam
affinity := k8sutil.CreateAffinity(testDeploymentName, group, required, role)
for _, mod := range mods {
mod(affinity)
@ -108,7 +107,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity)
}),
@ -170,7 +169,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight)
}),
@ -235,7 +234,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight)
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity)
@ -305,7 +304,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, weight, weight, weight, weight)
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, testAffinity, testAffinity)
@ -384,7 +383,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
if a.PodAffinity == nil {
a.PodAffinity = &core.PodAffinity{}
@ -449,7 +448,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
if a.PodAffinity == nil {
a.PodAffinity = &core.PodAffinity{}
@ -517,7 +516,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
if a.PodAffinity == nil {
a.PodAffinity = &core.PodAffinity{}
@ -590,7 +589,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
if a.PodAffinity == nil {
a.PodAffinity = &core.PodAffinity{}
@ -671,7 +670,7 @@ func TestEnsurePod_ArangoDB_NodeAffinity(t *testing.T) {
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
firstDBServerStatus.ID,
Subdomain: testDeploymentName + "-int",
Affinity: modifyAffinity(testDeploymentName, api.ServerGroupDBServersString,
Affinity: modifyAffinity(api.ServerGroupDBServersString,
false, "", func(a *core.Affinity) {
f := a.NodeAffinity.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms[0]

View file

@ -1344,7 +1344,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, true, false),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, true),
Ports: createTestPorts(),
ImagePullPolicy: core.PullIfNotPresent,
Resources: emptyResources,
@ -1410,7 +1410,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForSingleMode(singleStatus.ID, true, true, false),
Command: createTestCommandForSingleMode(true, true),
Ports: createTestPorts(),
ImagePullPolicy: core.PullIfNotPresent,
SecurityContext: securityContext.NewSecurityContext(),

View file

@ -187,7 +187,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, false, func() k8sutil.OptionPairs {
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, func() k8sutil.OptionPairs {
args := k8sutil.NewOptionPair()
args.Add("--foxx.queues", true)
@ -250,7 +250,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, false, func() k8sutil.OptionPairs {
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, func() k8sutil.OptionPairs {
args := k8sutil.NewOptionPair()
args.Add("--foxx.queues", false)
@ -313,7 +313,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, false, func() k8sutil.OptionPairs {
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, func() k8sutil.OptionPairs {
args := k8sutil.NewOptionPair()
args.Add("--foxx.queues", true)
@ -375,7 +375,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForSingleMode(singleStatus.ID, false, false, false, func() k8sutil.OptionPairs {
Command: createTestCommandForSingleMode(false, false, func() k8sutil.OptionPairs {
args := k8sutil.NewOptionPair()
args.Add("--foxx.queues", true)
@ -440,7 +440,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForSingleMode(singleStatus.ID, false, false, false, func() k8sutil.OptionPairs {
Command: createTestCommandForSingleMode(false, false, func() k8sutil.OptionPairs {
args := k8sutil.NewOptionPair()
args.Add("--foxx.queues", false)
@ -505,7 +505,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForSingleMode(singleStatus.ID, false, false, false, func() k8sutil.OptionPairs {
Command: createTestCommandForSingleMode(false, false, func() k8sutil.OptionPairs {
args := k8sutil.NewOptionPair()
args.Add("--foxx.queues", true)

View file

@ -185,7 +185,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
},
Resources: emptyResources,
LivenessProbe: createTestLivenessProbe(httpProbe, false, "", k8sutil.ArangoPort),
ReadinessProbe: createTestReadinessSimpleProbe(httpProbe, false, "", k8sutil.ArangoPort),
ReadinessProbe: createTestReadinessSimpleProbe(httpProbe, false, ""),
ImagePullPolicy: core.PullIfNotPresent,
SecurityContext: securityContext.NewSecurityContext(),
},
@ -292,7 +292,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
},
Resources: emptyResources,
LivenessProbe: createTestLivenessProbe(httpProbe, false, "", k8sutil.ArangoPort),
ReadinessProbe: createTestReadinessSimpleProbe(httpProbe, false, "", k8sutil.ArangoPort),
ReadinessProbe: createTestReadinessSimpleProbe(httpProbe, false, ""),
ImagePullPolicy: core.PullIfNotPresent,
SecurityContext: securityContext.NewSecurityContext(),
},
@ -336,7 +336,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, false),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false),
Ports: createTestPorts(),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -392,7 +392,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false, false),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false),
Ports: createTestPorts(),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),

View file

@ -37,7 +37,7 @@ import (
type envFunc func() []core.EnvVar
func withEnvs(t *testing.T, f ...envFunc) []core.EnvVar {
func withEnvs(f ...envFunc) []core.EnvVar {
var e []core.EnvVar
for _, c := range f {
@ -47,14 +47,12 @@ func withEnvs(t *testing.T, f ...envFunc) []core.EnvVar {
return e
}
func withDefaultEnvs(t *testing.T, requirements core.ResourceRequirements, f ...envFunc) []core.EnvVar {
func withDefaultEnvs(t *testing.T, requirements core.ResourceRequirements) []core.EnvVar {
var q []envFunc
q = append(q, resourceLimitAsEnv(t, requirements))
q = append(q, f...)
return withEnvs(t, q...)
return withEnvs(q...)
}
func resourceLimitAsEnv(t *testing.T, requirements core.ResourceRequirements) envFunc {
@ -196,7 +194,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
Env: withEnvs(t, resourceCPULimitAsEnv(t, resourcesUnfiltered)),
Env: withEnvs(resourceCPULimitAsEnv(t, resourcesUnfiltered)),
LivenessProbe: createTestLivenessProbe(httpProbe, false, "", k8sutil.ArangoPort),
ImagePullPolicy: core.PullIfNotPresent,
SecurityContext: securityContext.NewSecurityContext(),

View file

@ -38,7 +38,7 @@ import (
core "k8s.io/api/core/v1"
)
func createTLSSNISecret(t *testing.T, client kubernetes.Interface, name, namespace, key, value string) {
func createTLSSNISecret(t *testing.T, client kubernetes.Interface, name, namespace string) {
secret := core.Secret{
ObjectMeta: meta.ObjectMeta{
Name: name,
@ -47,10 +47,7 @@ func createTLSSNISecret(t *testing.T, client kubernetes.Interface, name, namespa
Type: core.SecretTypeOpaque,
Data: map[string][]byte{},
}
if key != "" {
secret.Data[key] = []byte(value)
}
secret.Data[constants.SecretTLSKeyfile] = []byte("")
_, err := client.CoreV1().Secrets(namespace).Create(context.Background(), &secret, meta.CreateOptions{})
require.NoError(t, err)
@ -88,8 +85,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -113,7 +110,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false, false),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false),
Ports: createTestPorts(),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -163,8 +160,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -188,7 +185,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false, false),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false),
Ports: createTestPorts(),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -238,8 +235,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -263,7 +260,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
{
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false, false),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false),
Ports: createTestPorts(),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -313,8 +310,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -355,7 +352,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
Name: k8sutil.ServerContainerName,
Image: testImage,
Command: func() []string {
args := createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false, false)
args := createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false)
args = append(args, fmt.Sprintf("--ssl.server-name-indication=a=%s/sni1/tls.keyfile", k8sutil.TLSSNIKeyfileVolumeMountDir),
fmt.Sprintf("--ssl.server-name-indication=b=%s/sni1/tls.keyfile", k8sutil.TLSSNIKeyfileVolumeMountDir),
fmt.Sprintf("--ssl.server-name-indication=c=%s/sni2/tls.keyfile", k8sutil.TLSSNIKeyfileVolumeMountDir),
@ -421,8 +418,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace(), constants.SecretTLSKeyfile, "")
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{

View file

@ -120,7 +120,7 @@ func modTestLivenessProbe(mode string, secure bool, authorization string, port i
return probe
}
func createTestReadinessSimpleProbe(mode string, secure bool, authorization string, port int) *core.Probe {
func createTestReadinessSimpleProbe(mode string, secure bool, authorization string) *core.Probe {
probe := createTestReadinessProbe(mode, secure, authorization)
probe.InitialDelaySeconds = 15
@ -255,7 +255,7 @@ func createTestCommandForDBServer(name string, tls, auth, encryptionRocksDB bool
return append(command, args.Unique().AsArgs()...)
}
func createTestCommandForCoordinator(name string, tls, auth, encryptionRocksDB bool, mods ...func() k8sutil.OptionPairs) []string {
func createTestCommandForCoordinator(name string, tls, auth bool, mods ...func() k8sutil.OptionPairs) []string {
command := []string{resources.ArangoDExecutor}
args := k8sutil.OptionPairs{}
@ -277,11 +277,6 @@ func createTestCommandForCoordinator(name string, tls, auth, encryptionRocksDB b
args.Add("--foxx.queues", "true")
args.Add("--log.level", "INFO")
args.Add("--log.output", "+")
if encryptionRocksDB {
args.Add("--rocksdb.encryption-keyfile", "/secrets/rocksdb/encryption/key")
}
args.Add("--server.authentication", auth)
if tls {
@ -309,7 +304,7 @@ func createTestCommandForCoordinator(name string, tls, auth, encryptionRocksDB b
return append(command, args.Unique().AsArgs()...)
}
func createTestCommandForSingleMode(name string, tls, auth, encryptionRocksDB bool, mods ...func() k8sutil.OptionPairs) []string {
func createTestCommandForSingleMode(tls, auth bool, mods ...func() k8sutil.OptionPairs) []string {
command := []string{resources.ArangoDExecutor}
args := k8sutil.OptionPairs{}
@ -318,11 +313,6 @@ func createTestCommandForSingleMode(name string, tls, auth, encryptionRocksDB bo
args.Add("--foxx.queues", "true")
args.Add("--log.level", "INFO")
args.Add("--log.output", "+")
if encryptionRocksDB {
args.Add("--rocksdb.encryption-keyfile", "/secrets/rocksdb/encryption/key")
}
args.Add("--server.authentication", auth)
if tls {

View file

@ -101,9 +101,9 @@ func cmdRun(cmd *cobra.Command, args []string) {
}
if feature.EnterpriseRequired() {
println(fmt.Sprintf("ArangoDB Edition Required: Enterprise"))
println("ArangoDB Edition Required: Enterprise")
} else {
println(fmt.Sprintf("ArangoDB Edition Required: Community, Enterprise"))
println("ArangoDB Edition Required: Community, Enterprise")
}
println()

View file

@ -116,17 +116,17 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) {
// listenForSecretEvents keep listening for changes in Secrets's until the given channel is closed.
func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
getSecret := func(obj interface{}) (*v1.Secret, bool) {
secret, ok := obj.(*v1.Secret)
getSecret := func(obj interface{}) bool {
_, ok := obj.(*v1.Secret)
if !ok {
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
if !ok {
return nil, false
return false
}
secret, ok = tombstone.Obj.(*v1.Secret)
return secret, ok
_, ok = tombstone.Obj.(*v1.Secret)
return ok
}
return secret, true
return true
}
rw := k8sutil.NewResourceWatcher(
@ -138,17 +138,17 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
cache.ResourceEventHandlerFuncs{
// Note: For secrets we look at all of them because they do not have to be owned by this deployment.
AddFunc: func(obj interface{}) {
if _, ok := getSecret(obj); ok {
if getSecret(obj) {
d.triggerInspection()
}
},
UpdateFunc: func(oldObj, newObj interface{}) {
if _, ok := getSecret(newObj); ok {
if getSecret(newObj) {
d.triggerInspection()
}
},
DeleteFunc: func(obj interface{}) {
if _, ok := getSecret(obj); ok {
if getSecret(obj) {
d.triggerInspection()
}
},

View file

@ -159,8 +159,8 @@ func (a actionBootstrapSetPassword) ensureUserPasswordSecret(ctx context.Context
return token, nil
} else {
user, pass, err := k8sutil.GetSecretAuthCredentials(auth)
if err == nil && user == user {
_, pass, err := k8sutil.GetSecretAuthCredentials(auth)
if err == nil {
return pass, nil
}
return "", errors.Newf("invalid secret format in secret %s", secret)

View file

@ -69,7 +69,7 @@ func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, erro
return true, nil
}
keyHashes := secretKeysToListWithPrefix("sha256:", f)
keyHashes := secretKeysToListWithPrefix(f)
if err = a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
if len(keyHashes) == 0 {

View file

@ -66,10 +66,6 @@ func newActionImpl(log zerolog.Logger, action api.Action, actionCtx ActionContex
return newBaseActionImpl(log, action, actionCtx, NewTimeoutFetcher(timeout), memberIDRef)
}
func newBaseActionImplDefRef(log zerolog.Logger, action api.Action, actionCtx ActionContext, timeout TimeoutFetcher) actionImpl {
return newBaseActionImpl(log, action, actionCtx, timeout, &action.MemberID)
}
func newBaseActionImpl(log zerolog.Logger, action api.Action, actionCtx ActionContext, timeout TimeoutFetcher, memberIDRef *string) actionImpl {
if memberIDRef == nil {
panic("Action cannot have nil reference to member!")

View file

@ -64,7 +64,7 @@ func (t *tlsSNIUpdate) CheckProgress(ctx context.Context) (bool, bool, error) {
return true, false, nil
}
fetchedSecrets, err := mapTLSSNIConfig(t.log, *sni, t.actionCtx.GetCachedStatus())
fetchedSecrets, err := mapTLSSNIConfig(*sni, t.actionCtx.GetCachedStatus())
if err != nil {
t.log.Warn().Err(err).Msg("Unable to get SNI desired state")
return true, false, nil

View file

@ -66,7 +66,7 @@ func (a *tlsKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) {
return true, nil
}
keyHashes := secretKeysToListWithPrefix("sha256:", f)
keyHashes := secretKeysToListWithPrefix(f)
if err = a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
r := false

View file

@ -62,14 +62,14 @@ func (a *actionWaitForMemberInSync) Start(ctx context.Context) (bool, error) {
// CheckProgress checks the progress of the action.
// Returns true if the action is completely finished, false otherwise.
func (a *actionWaitForMemberInSync) CheckProgress(ctx context.Context) (bool, bool, error) {
func (a *actionWaitForMemberInSync) CheckProgress(_ context.Context) (bool, bool, error) {
member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID())
if !ok || member.Phase == api.MemberPhaseFailed {
a.log.Debug().Msg("Member in failed phase")
return true, false, nil
}
ready, err := a.check(ctx)
ready, err := a.check()
if err != nil {
return false, false, err
}
@ -77,7 +77,7 @@ func (a *actionWaitForMemberInSync) CheckProgress(ctx context.Context) (bool, bo
return ready, false, nil
}
func (a *actionWaitForMemberInSync) check(ctx context.Context) (bool, error) {
func (a *actionWaitForMemberInSync) check() (bool, error) {
spec := a.actionCtx.GetSpec()
groupSpec := spec.GetServerGroupSpec(a.action.Group)
@ -88,13 +88,13 @@ func (a *actionWaitForMemberInSync) check(ctx context.Context) (bool, error) {
switch spec.Mode.Get() {
case api.DeploymentModeCluster:
return a.checkCluster(ctx, spec, groupSpec)
return a.checkCluster()
default:
return true, nil
}
}
func (a *actionWaitForMemberInSync) checkCluster(ctx context.Context, spec api.DeploymentSpec, groupSpec api.ServerGroupSpec) (bool, error) {
func (a *actionWaitForMemberInSync) checkCluster() (bool, error) {
if !a.actionCtx.GetShardSyncStatus() {
a.log.Info().Str("mode", "cluster").Msgf("Shards are not in sync")
return false, nil

View file

@ -96,7 +96,7 @@ func (a *actionWaitForMemberUp) CheckProgress(ctx context.Context) (bool, bool,
if a.action.Group == api.ServerGroupAgents {
return a.checkProgressAgent(ctxChild)
}
return a.checkProgressCluster(ctxChild)
return a.checkProgressCluster()
}
}
@ -162,7 +162,7 @@ func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, b
// checkProgressCluster checks the progress of the action in the case
// of a cluster deployment (coordinator/dbserver).
func (a *actionWaitForMemberUp) checkProgressCluster(ctx context.Context) (bool, bool, error) {
func (a *actionWaitForMemberUp) checkProgressCluster() (bool, bool, error) {
log := a.log
h, err := a.actionCtx.GetDeploymentHealth()
if err != nil {

View file

@ -35,11 +35,9 @@ import (
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/deployment/client"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/rs/zerolog"
)
func mapTLSSNIConfig(log zerolog.Logger, sni api.TLSSNISpec, cachedStatus inspectorInterface.Inspector) (map[string]string, error) {
func mapTLSSNIConfig(sni api.TLSSNISpec, cachedStatus inspectorInterface.Inspector) (map[string]string, error) {
fetchedSecrets := map[string]string{}
mapping := sni.Mapping

View file

@ -91,9 +91,8 @@ func (d *Reconciler) CreatePlan(ctx context.Context, cachedStatus inspectorInter
return nil, true
}
func fetchAgency(ctx context.Context, log zerolog.Logger,
spec api.DeploymentSpec, status api.DeploymentStatus,
cache inspectorInterface.Inspector, context PlanBuilderContext) (*agency.ArangoPlanDatabases, error) {
func fetchAgency(ctx context.Context, spec api.DeploymentSpec, status api.DeploymentStatus,
context PlanBuilderContext) (*agency.ArangoPlanDatabases, error) {
if spec.GetMode() != api.DeploymentModeCluster && spec.GetMode() != api.DeploymentModeActiveFailover {
return nil, nil
} else if status.Members.Agents.MembersReady() > 0 {
@ -119,7 +118,7 @@ func createPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIOb
}
// Fetch agency plan
agencyPlan, agencyErr := fetchAgency(ctx, log, spec, status, cachedStatus, builderCtx)
agencyPlan, agencyErr := fetchAgency(ctx, spec, status, builderCtx)
// Check for various scenario's
var plan api.Plan

View file

@ -132,7 +132,7 @@ func createEncryptionKey(ctx context.Context,
}
}
plan, failed := areEncryptionKeysUpToDate(ctx, log, apiObject, spec, status, cachedStatus, context, keyfolder)
plan, failed := areEncryptionKeysUpToDate(ctx, log, spec, status, context, keyfolder)
if !plan.IsEmpty() {
return plan
}
@ -154,7 +154,7 @@ func createEncryptionKeyStatusUpdate(ctx context.Context,
return nil
}
if createEncryptionKeyStatusUpdateRequired(ctx, log, apiObject, spec, status, cachedStatus, context) {
if createEncryptionKeyStatusUpdateRequired(log, spec, status, cachedStatus, context) {
return api.Plan{api.NewAction(api.ActionTypeEncryptionKeyStatusUpdate, api.ServerGroupUnknown, "")}
}
@ -162,9 +162,7 @@ func createEncryptionKeyStatusUpdate(ctx context.Context,
}
func createEncryptionKeyStatusUpdateRequired(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) bool {
if skipEncryptionPlan(spec, status) {
return false
@ -176,7 +174,7 @@ func createEncryptionKeyStatusUpdateRequired(ctx context.Context,
return false
}
keyHashes := secretKeysToListWithPrefix("sha256:", keyfolder)
keyHashes := secretKeysToListWithPrefix(keyfolder)
if !util.CompareStringArray(keyHashes, status.Hashes.Encryption.Keys) {
return true
@ -241,11 +239,8 @@ func createEncryptionKeyCleanPlan(ctx context.Context,
return api.Plan{}
}
func areEncryptionKeysUpToDate(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext,
folder *core.Secret) (plan api.Plan, failed bool) {
func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec,
status api.DeploymentStatus, context PlanBuilderContext, folder *core.Secret) (plan api.Plan, failed bool) {
status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
if !pod.GroupEncryptionSupported(spec.Mode.Get(), group) {
@ -253,7 +248,7 @@ func areEncryptionKeysUpToDate(ctx context.Context,
}
for _, m := range list {
if updateRequired, failedMember := isEncryptionKeyUpToDate(ctx, log, apiObject, spec, status, cachedStatus, context, group, m, folder); failedMember {
if updateRequired, failedMember := isEncryptionKeyUpToDate(ctx, log, status, context, group, m, folder); failedMember {
failed = true
continue
} else if updateRequired {
@ -269,9 +264,8 @@ func areEncryptionKeysUpToDate(ctx context.Context,
}
func isEncryptionKeyUpToDate(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, planCtx PlanBuilderContext,
log zerolog.Logger, status api.DeploymentStatus,
planCtx PlanBuilderContext,
group api.ServerGroup, m api.MemberStatus,
folder *core.Secret) (updateRequired bool, failed bool) {
if m.Phase != api.MemberPhaseCreated {

View file

@ -88,7 +88,7 @@ func createJWTKeyUpdate(ctx context.Context,
return addJWTPropagatedPlanAction(status, api.NewAction(api.ActionTypeJWTSetActive, api.ServerGroupUnknown, "", "Set active key and add token field").AddParam(checksum, jwtSha))
}
plan, failed := areJWTTokensUpToDate(ctx, log, apiObject, spec, status, cachedStatus, context, folder)
plan, failed := areJWTTokensUpToDate(ctx, log, status, context, folder)
if len(plan) > 0 {
return plan
}
@ -125,17 +125,15 @@ func createJWTStatusUpdate(ctx context.Context,
return nil
}
if createJWTStatusUpdateRequired(ctx, log, apiObject, spec, status, cachedStatus, context) {
if createJWTStatusUpdateRequired(log, apiObject, spec, status, cachedStatus) {
return addJWTPropagatedPlanAction(status, api.NewAction(api.ActionTypeJWTStatusUpdate, api.ServerGroupUnknown, "", "Update status"))
}
return nil
}
func createJWTStatusUpdateRequired(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) bool {
func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec,
status api.DeploymentStatus, cachedStatus inspectorInterface.Inspector) bool {
folder, err := ensureJWTFolderSupport(spec, status)
if err != nil {
log.Error().Err(err).Msgf("Action not supported")
@ -218,11 +216,8 @@ func createJWTStatusUpdateRequired(ctx context.Context,
return false
}
func areJWTTokensUpToDate(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, planCtx PlanBuilderContext,
folder *core.Secret) (plan api.Plan, failed bool) {
func areJWTTokensUpToDate(ctx context.Context, log zerolog.Logger, status api.DeploymentStatus,
planCtx PlanBuilderContext, folder *core.Secret) (plan api.Plan, failed bool) {
gCtx, c := context.WithTimeout(ctx, 2*time.Second)
defer c()
@ -230,7 +225,7 @@ func areJWTTokensUpToDate(ctx context.Context,
for _, m := range list {
nCtx, c := context.WithTimeout(gCtx, 500*time.Millisecond)
defer c()
if updateRequired, failedMember := isJWTTokenUpToDate(nCtx, log, apiObject, spec, status, cachedStatus, planCtx, group, m, folder); failedMember {
if updateRequired, failedMember := isJWTTokenUpToDate(nCtx, log, status, planCtx, group, m, folder); failedMember {
failed = true
continue
} else if updateRequired {
@ -245,12 +240,8 @@ func areJWTTokensUpToDate(ctx context.Context,
return
}
func isJWTTokenUpToDate(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext,
group api.ServerGroup, m api.MemberStatus,
folder *core.Secret) (updateRequired bool, failed bool) {
func isJWTTokenUpToDate(ctx context.Context, log zerolog.Logger, status api.DeploymentStatus, context PlanBuilderContext,
group api.ServerGroup, m api.MemberStatus, folder *core.Secret) (updateRequired bool, failed bool) {
if m.Phase != api.MemberPhaseCreated {
return false, true
}

View file

@ -30,7 +30,6 @@ import (
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
backupv1 "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
"github.com/rs/zerolog"
@ -61,7 +60,7 @@ func createRestorePlan(ctx context.Context,
}
if spec.RocksDB.IsEncrypted() {
if ok, p := createRestorePlanEncryption(ctx, log, spec, status, context, backup); !ok {
if ok, p := createRestorePlanEncryption(ctx, log, spec, status, context); !ok {
return nil
} else if !p.IsEmpty() {
return p
@ -94,7 +93,7 @@ func restorePlan(mode api.DeploymentMode) api.Plan {
return p
}
func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext, backup *backupv1.ArangoBackup) (bool, api.Plan) {
func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (bool, api.Plan) {
if spec.RestoreEncryptionSecret != nil {
if !spec.RocksDB.IsEncrypted() {
return true, nil

View file

@ -36,7 +36,6 @@ import (
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
"github.com/rs/zerolog"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var (
@ -116,7 +115,7 @@ func createRotateOrUpgradePlanInternal(ctx context.Context, log zerolog.Logger,
!decision.AutoUpgradeNeeded)
} else {
// Use new level of rotate logic
rotNeeded, reason := podNeedsRotation(ctx, log, pod, apiObject, spec, group, status, m, cachedStatus, context)
rotNeeded, reason := podNeedsRotation(ctx, log, pod, spec, group, status, m, cachedStatus, context)
if rotNeeded {
newPlan = createRotateMemberPlan(log, m, group, reason)
}
@ -283,7 +282,7 @@ func memberImageInfo(spec api.DeploymentSpec, status api.MemberStatus, images ap
// given pod differs from what it should be according to the
// given deployment spec.
// When true is returned, a reason for the rotation is already returned.
func podNeedsRotation(ctx context.Context, log zerolog.Logger, p *core.Pod, apiObject metav1.Object, spec api.DeploymentSpec,
func podNeedsRotation(ctx context.Context, log zerolog.Logger, p *core.Pod, spec api.DeploymentSpec,
group api.ServerGroup, status api.DeploymentStatus, m api.MemberStatus,
cachedStatus inspectorInterface.Inspector, planCtx PlanBuilderContext) (bool, string) {
if m.PodUID != p.UID {

View file

@ -59,6 +59,8 @@ import (
core "k8s.io/api/core/v1"
)
const pvcName = "pvc_test"
var _ PlanBuilderContext = &testContext{}
var _ Context = &testContext{}
@ -614,7 +616,7 @@ func TestCreatePlan(t *testing.T) {
{
Name: "Change Storage for DBServers",
PVCS: map[string]*core.PersistentVolumeClaim{
"pvc_test": {
pvcName: {
Spec: core.PersistentVolumeClaimSpec{
StorageClassName: util.NewString("oldStorage"),
},
@ -633,7 +635,7 @@ func TestCreatePlan(t *testing.T) {
},
}
ad.Status.Members.DBServers[0].Phase = api.MemberPhaseCreated
ad.Status.Members.DBServers[0].PersistentVolumeClaimName = "pvc_test"
ad.Status.Members.DBServers[0].PersistentVolumeClaimName = pvcName
},
ExpectedPlan: []api.Action{
api.NewAction(api.ActionTypeDisableClusterScaling, api.ServerGroupDBServers, ""),
@ -649,7 +651,7 @@ func TestCreatePlan(t *testing.T) {
{
Name: "Change Storage for Agents with deprecated storage class name",
PVCS: map[string]*core.PersistentVolumeClaim{
"pvc_test": {
pvcName: {
Spec: core.PersistentVolumeClaimSpec{
StorageClassName: util.NewString("oldStorage"),
},
@ -664,7 +666,7 @@ func TestCreatePlan(t *testing.T) {
StorageClassName: util.NewString("newStorage"),
}
ad.Status.Members.Agents[0].Phase = api.MemberPhaseCreated
ad.Status.Members.Agents[0].PersistentVolumeClaimName = "pvc_test"
ad.Status.Members.Agents[0].PersistentVolumeClaimName = pvcName
},
ExpectedPlan: []api.Action{
api.NewAction(api.ActionTypeShutdownMember, api.ServerGroupAgents, ""),
@ -677,7 +679,7 @@ func TestCreatePlan(t *testing.T) {
{
Name: "Storage for Coordinators is not possible",
PVCS: map[string]*core.PersistentVolumeClaim{
"pvc_test": {
pvcName: {
Spec: core.PersistentVolumeClaimSpec{
StorageClassName: util.NewString("oldStorage"),
},
@ -696,7 +698,7 @@ func TestCreatePlan(t *testing.T) {
},
}
ad.Status.Members.Coordinators[0].Phase = api.MemberPhaseCreated
ad.Status.Members.Coordinators[0].PersistentVolumeClaimName = "pvc_test"
ad.Status.Members.Coordinators[0].PersistentVolumeClaimName = pvcName
},
ExpectedPlan: []api.Action{},
ExpectedLog: "Storage class has changed - pod needs replacement",

View file

@ -91,7 +91,7 @@ func createTLSStatusUpdate(ctx context.Context,
return nil
}
if createTLSStatusUpdateRequired(ctx, log, apiObject, spec, status, cachedStatus, context) {
if createTLSStatusUpdateRequired(log, apiObject, spec, status, cachedStatus) {
return api.Plan{api.NewAction(api.ActionTypeTLSKeyStatusUpdate, api.ServerGroupUnknown, "", "Update status")}
}
@ -116,10 +116,8 @@ func createTLSStatusPropagated(ctx context.Context,
return nil
}
func createTLSStatusUpdateRequired(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext) bool {
func createTLSStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec,
status api.DeploymentStatus, cachedStatus inspectorInterface.Inspector) bool {
if !spec.TLS.IsSecure() {
return false
}
@ -130,7 +128,7 @@ func createTLSStatusUpdateRequired(ctx context.Context,
return false
}
keyHashes := secretKeysToListWithPrefix("sha256:", trusted)
keyHashes := secretKeysToListWithPrefix(trusted)
if len(keyHashes) == 0 {
return false
@ -310,7 +308,7 @@ func createKeyfileRenewalPlanDefault(ctx context.Context,
lCtx, c := context.WithTimeout(ctx, 500*time.Millisecond)
defer c()
if renew, recreate := keyfileRenewalRequired(lCtx, log, apiObject, spec, status, cachedStatus, planCtx, group, member, api.TLSRotateModeRecreate); renew {
if renew, recreate := keyfileRenewalRequired(lCtx, log, apiObject, spec, cachedStatus, planCtx, group, member, api.TLSRotateModeRecreate); renew {
log.Info().Msg("Renewal of keyfile required - Recreate")
if recreate {
plan = append(plan, api.NewAction(api.ActionTypeCleanTLSKeyfileCertificate, group, member.ID, "Remove server keyfile and enforce renewal"))
@ -344,7 +342,7 @@ func createKeyfileRenewalPlanInPlace(ctx context.Context,
lCtx, c := context.WithTimeout(ctx, 500*time.Millisecond)
defer c()
if renew, recreate := keyfileRenewalRequired(lCtx, log, apiObject, spec, status, cachedStatus, planCtx, group, member, api.TLSRotateModeInPlace); renew {
if renew, recreate := keyfileRenewalRequired(lCtx, log, apiObject, spec, cachedStatus, planCtx, group, member, api.TLSRotateModeInPlace); renew {
log.Info().Msg("Renewal of keyfile required - InPlace")
if recreate {
plan = append(plan, api.NewAction(api.ActionTypeCleanTLSKeyfileCertificate, group, member.ID, "Remove server keyfile and enforce renewal"))
@ -448,7 +446,7 @@ func checkServerValidCertRequest(ctx context.Context, context PlanBuilderContext
func keyfileRenewalRequired(ctx context.Context,
log zerolog.Logger, apiObject k8sutil.APIObject,
spec api.DeploymentSpec, status api.DeploymentStatus,
spec api.DeploymentSpec,
cachedStatus inspectorInterface.Inspector, context PlanBuilderContext,
group api.ServerGroup, member api.MemberStatus, mode api.TLSRotateMode) (bool, bool) {
if !spec.TLS.IsSecure() {

View file

@ -57,7 +57,7 @@ func createRotateTLSServerSNIPlan(ctx context.Context,
return nil
}
fetchedSecrets, err := mapTLSSNIConfig(log, *sni, cachedStatus)
fetchedSecrets, err := mapTLSSNIConfig(*sni, cachedStatus)
if err != nil {
log.Warn().Err(err).Msg("Unable to get SNI desired state")
return nil

View file

@ -54,7 +54,7 @@ func (d *Reconciler) ExecutePlan(ctx context.Context, cachedStatus inspectorInte
}
return !firstLoop, nil
}
firstLoop = false
firstLoop = false // nolint:ineffassign
// Take first action
planAction := loopStatus.Plan[0]

View file

@ -29,8 +29,6 @@ const (
cleanoutMemberTimeout = time.Hour * 12
removeMemberTimeout = time.Minute * 15
recreateMemberTimeout = time.Minute * 15
renewTLSCertificateTimeout = time.Minute * 30
renewTLSCACertificateTimeout = time.Minute * 30
operationTLSCACertificateTimeout = time.Minute * 30
rotateMemberTimeout = time.Minute * 15
pvcResizeTimeout = time.Minute * 30

View file

@ -29,8 +29,8 @@ import (
core "k8s.io/api/core/v1"
)
func secretKeysToListWithPrefix(prefix string, s *core.Secret) []string {
return util.PrefixStringArray(secretKeysToList(s), prefix)
func secretKeysToListWithPrefix(s *core.Secret) []string {
return util.PrefixStringArray(secretKeysToList(s), "sha256:")
}
func secretKeysToList(s *core.Secret) []string {

View file

@ -147,7 +147,6 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
deployment.ArangoDeploymentResourceKind,
r.context.GetAPIObject().GetName(),
r.context.GetAPIObject().GetNamespace(),
r.context.GetSpec().Annotations,
r.context.GetSpec()); err != nil {
return err
}
@ -264,7 +263,7 @@ func getObjectGroup(obj meta.Object) api.ServerGroup {
return api.ServerGroupFromRole(group)
}
func ensurePodsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, annotations map[string]string, spec api.DeploymentSpec) error {
func ensurePodsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error {
if err := cachedStatus.IteratePods(func(pod *core.Pod) error {
ensureGroupAnnotationsMap(pod.Kind, pod, spec, patch)
return nil
@ -315,7 +314,7 @@ func ensureLabelsMap(kind string, obj meta.Object, spec api.DeploymentSpec,
}
func ensureGroupAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec,
patchCmd func(name string, d []byte) error) bool {
patchCmd func(name string, d []byte) error) {
group := getObjectGroup(obj)
groupSpec := spec.GetServerGroupSpec(group)
expected := collection.MergeAnnotations(spec.Annotations, groupSpec.Annotations)
@ -324,16 +323,16 @@ func ensureGroupAnnotationsMap(kind string, obj meta.Object, spec api.Deployment
mode := groupSpec.AnnotationsMode.Get(spec.AnnotationsMode.Get(getDefaultMode(expected)))
return ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignoredList...)
ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignoredList...)
}
func ensureAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd PatchFunc) bool {
func ensureAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd PatchFunc) {
expected := spec.Annotations
ignored := spec.AnnotationsIgnoreList
mode := spec.AnnotationsMode.Get(getDefaultMode(expected))
return ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignored...)
ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignored...)
}
func ensureObjectMap(kind string, obj meta.Object, mode api.LabelsMode,

View file

@ -26,17 +26,14 @@ package resources
import (
"context"
"fmt"
"strings"
"time"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
certificates "github.com/arangodb-helper/go-certificates"
"github.com/rs/zerolog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
certificates "github.com/arangodb-helper/go-certificates"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
)
@ -72,46 +69,3 @@ func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secr
log.Debug().Msg("Created CA Secret")
return nil
}
// createClientAuthCertificateKeyfile creates a client authentication certificate for a specific user and stores
// it in a secret with the given name.
func createClientAuthCertificateKeyfile(log zerolog.Logger, secrets v1.SecretInterface, commonName string, ttl time.Duration, spec api.SyncAuthenticationSpec, secretName string, ownerRef *metav1.OwnerReference) error {
log = log.With().Str("secret", secretName).Logger()
// Load CA certificate
caCert, caKey, _, err := k8sutil.GetCASecret(context.TODO(), secrets, spec.GetClientCASecretName(), nil)
if err != nil {
log.Debug().Err(err).Msg("Failed to load CA certificate")
return errors.WithStack(err)
}
ca, err := certificates.LoadCAFromPEM(caCert, caKey)
if err != nil {
log.Debug().Err(err).Msg("Failed to decode CA certificate")
return errors.WithStack(err)
}
options := certificates.CreateCertificateOptions{
CommonName: commonName,
ValidFrom: time.Now(),
ValidFor: ttl,
IsCA: false,
IsClientAuth: true,
ECDSACurve: clientAuthECDSACurve,
}
cert, priv, err := certificates.CreateCertificate(options, &ca)
if err != nil {
log.Debug().Err(err).Msg("Failed to create server certificate")
return errors.WithStack(err)
}
keyfile := strings.TrimSpace(cert) + "\n" +
strings.TrimSpace(priv)
if err := k8sutil.CreateTLSKeyfileSecret(context.TODO(), secrets, secretName, keyfile, ownerRef); err != nil {
if k8sutil.IsAlreadyExists(err) {
log.Debug().Msg("Server Secret already exists")
} else {
log.Debug().Err(err).Msg("Failed to create server Secret")
}
return errors.WithStack(err)
}
log.Debug().Msg("Created server Secret")
return nil
}

View file

@ -126,10 +126,6 @@ type inspector struct {
podDisruptionBudgets map[string]*policy.PodDisruptionBudget
serviceMonitors map[string]*monitoring.ServiceMonitor
arangoMembers map[string]*api.ArangoMember
ns string
k kubernetes.Interface
m monitoringClient.MonitoringV1Interface
}
func (i *inspector) Refresh(ctx context.Context, k kubernetes.Interface, m monitoringClient.MonitoringV1Interface,

View file

@ -79,10 +79,7 @@ func (i *inspector) ServiceMonitor(name string) (*monitoring.ServiceMonitor, boo
}
func serviceMonitorsToMap(ctx context.Context, m monitoringClient.MonitoringV1Interface, namespace string) (map[string]*monitoring.ServiceMonitor, error) {
serviceMonitors, err := getServiceMonitors(ctx, m, namespace, "")
if err != nil {
return nil, err
}
serviceMonitors := getServiceMonitors(ctx, m, namespace, "")
serviceMonitorMap := map[string]*monitoring.ServiceMonitor{}
@ -98,7 +95,7 @@ func serviceMonitorsToMap(ctx context.Context, m monitoringClient.MonitoringV1In
return serviceMonitorMap, nil
}
func getServiceMonitors(ctx context.Context, m monitoringClient.MonitoringV1Interface, namespace, cont string) ([]*monitoring.ServiceMonitor, error) {
func getServiceMonitors(ctx context.Context, m monitoringClient.MonitoringV1Interface, namespace, cont string) []*monitoring.ServiceMonitor {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
serviceMonitors, err := m.ServiceMonitors(namespace).List(ctxChild, meta.ListOptions{
@ -107,10 +104,10 @@ func getServiceMonitors(ctx context.Context, m monitoringClient.MonitoringV1Inte
})
if err != nil {
return []*monitoring.ServiceMonitor{}, nil
return []*monitoring.ServiceMonitor{}
}
return serviceMonitors.Items, nil
return serviceMonitors.Items
}
func FilterServiceMonitorsByLabels(labels map[string]string) servicemonitor.ServiceMonitorFilter {

View file

@ -64,7 +64,7 @@ func versionHasAdvertisedEndpoint(v driver.Version) bool {
}
// createArangodArgsWithUpgrade creates command line arguments for an arangod server upgrade in the given group.
func createArangodArgsWithUpgrade(input pod.Input, additionalOptions ...k8sutil.OptionPair) []string {
func createArangodArgsWithUpgrade(input pod.Input) []string {
return createArangodArgs(input, pod.AutoUpgrade().Args(input)...)
}
@ -171,7 +171,7 @@ func createArangodArgs(input pod.Input, additionalOptions ...k8sutil.OptionPair)
options.Add("--rocksdb.encryption-key-rotation", "true")
}
args := append(options.Copy().Sort().AsArgs())
args := options.Copy().Sort().AsArgs()
if len(input.GroupSpec.Args) > 0 {
args = append(args, input.GroupSpec.Args...)
}

View file

@ -43,9 +43,9 @@ import (
)
const (
ArangoDExecutor string = "/usr/sbin/arangod"
ArangoDBOverrideDetectedTotalMemoryEnv = "ARANGODB_OVERRIDE_DETECTED_TOTAL_MEMORY"
ArangoDBOverrideDetectedNumberOfCoresEnv = "ARANGODB_OVERRIDE_DETECTED_NUMBER_OF_CORES"
ArangoDExecutor = "/usr/sbin/arangod"
ArangoDBOverrideDetectedTotalMemoryEnv = "ARANGODB_OVERRIDE_DETECTED_TOTAL_MEMORY"
ArangoDBOverrideDetectedNumberOfCoresEnv = "ARANGODB_OVERRIDE_DETECTED_NUMBER_OF_CORES"
)
var _ interfaces.PodCreator = &MemberArangoDPod{}
@ -325,8 +325,6 @@ func (m *MemberArangoDPod) GetSidecars(pod *core.Pod) {
if len(sidecars) > 0 {
pod.Spec.Containers = append(pod.Spec.Containers, sidecars...)
}
return
}
func (m *MemberArangoDPod) GetVolumes() ([]core.Volume, []core.VolumeMount) {
@ -490,10 +488,6 @@ func (m *MemberArangoDPod) GetContainerCreator() interfaces.ContainerCreator {
}
}
func (m *MemberArangoDPod) isMetricsEnabledForGroup() bool {
return m.spec.Metrics.IsEnabled() && m.group.IsExportMetrics()
}
func (m *MemberArangoDPod) createMetricsExporterSidecar() *core.Container {
image := m.context.GetMetricsExporterImage()
if m.spec.Metrics.HasImage() {

View file

@ -58,7 +58,7 @@ func nilProbeBuilder(spec api.DeploymentSpec, group api.ServerGroup, version dri
}
func (r *Resources) getReadinessProbe(spec api.DeploymentSpec, group api.ServerGroup, version driver.Version) (Probe, error) {
if !r.isReadinessProbeEnabled(spec, group, version) {
if !r.isReadinessProbeEnabled(spec, group) {
return nil, nil
}
@ -88,7 +88,7 @@ func (r *Resources) getReadinessProbe(spec api.DeploymentSpec, group api.ServerG
}
func (r *Resources) getLivenessProbe(spec api.DeploymentSpec, group api.ServerGroup, version driver.Version) (Probe, error) {
if !r.isLivenessProbeEnabled(spec, group, version) {
if !r.isLivenessProbeEnabled(spec, group) {
return nil, nil
}
@ -117,7 +117,7 @@ func (r *Resources) getLivenessProbe(spec api.DeploymentSpec, group api.ServerGr
return config, nil
}
func (r *Resources) isReadinessProbeEnabled(spec api.DeploymentSpec, group api.ServerGroup, version driver.Version) bool {
func (r *Resources) isReadinessProbeEnabled(spec api.DeploymentSpec, group api.ServerGroup) bool {
probe := pod.ReadinessSpec(group)
groupSpec := spec.GetServerGroupSpec(group)
@ -131,7 +131,7 @@ func (r *Resources) isReadinessProbeEnabled(spec api.DeploymentSpec, group api.S
return probe.CanBeEnabled && probe.EnabledByDefault
}
func (r *Resources) isLivenessProbeEnabled(spec api.DeploymentSpec, group api.ServerGroup, version driver.Version) bool {
func (r *Resources) isLivenessProbeEnabled(spec api.DeploymentSpec, group api.ServerGroup) bool {
probe := pod.LivenessSpec(group)
groupSpec := spec.GetServerGroupSpec(group)
@ -174,7 +174,7 @@ func (r *Resources) probeBuilders() map[api.ServerGroup]probeCheckBuilder {
}
}
func (r *Resources) probeCommand(spec api.DeploymentSpec, group api.ServerGroup, version driver.Version, endpoint string) ([]string, error) {
func (r *Resources) probeCommand(spec api.DeploymentSpec, endpoint string) ([]string, error) {
binaryPath, err := os.Executable()
if err != nil {
return nil, err
@ -207,7 +207,7 @@ func (r *Resources) probeBuilderLivenessCoreSelect() probeBuilder {
}
func (r *Resources) probeBuilderLivenessCoreOperator(spec api.DeploymentSpec, group api.ServerGroup, version driver.Version) (Probe, error) {
args, err := r.probeCommand(spec, group, version, "/_api/version")
args, err := r.probeCommand(spec, "/_api/version")
if err != nil {
return nil, err
}
@ -300,7 +300,7 @@ func (r *Resources) probeBuilderReadinessCoreOperator(spec api.DeploymentSpec, g
localPath = "/_admin/server/availability"
}
args, err := r.probeCommand(spec, group, version, localPath)
args, err := r.probeCommand(spec, localPath)
if err != nil {
return nil, err
}

View file

@ -37,8 +37,6 @@ import (
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
"github.com/arangodb/go-driver"
v1 "k8s.io/api/core/v1"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
@ -233,40 +231,6 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe
return nil
}
func changeUserPassword(c Context, secret *v1.Secret) error {
username, password, err := k8sutil.GetSecretAuthCredentials(secret)
if err != nil {
return nil
}
ctx := context.Background()
client, err := c.GetDatabaseClient(ctx)
if err != nil {
return errors.WithStack(err)
}
user, err := client.User(ctx, username)
if err != nil {
if driver.IsNotFound(err) {
options := &driver.UserOptions{
Password: password,
Active: new(bool),
}
*options.Active = true
_, err = client.CreateUser(ctx, username, options)
return errors.WithStack(err)
}
return err
}
err = user.Update(ctx, driver.UserOptions{
Password: password,
})
return errors.WithStack(err)
}
// getSecretHash fetches a secret with given name and returns a hash over its value.
func (r *Resources) getSecretHash(cachedStatus inspectorInterface.Inspector, secretName string) (*v1.Secret, string, bool) {
s, exists := cachedStatus.Secret(secretName)

View file

@ -329,32 +329,6 @@ func (r *Resources) ensureTokenSecret(ctx context.Context, cachedStatus inspecto
return nil
}
func (r *Resources) ensureSecret(cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, secretName string) error {
if _, exists := cachedStatus.Secret(secretName); !exists {
return r.createSecret(secrets, secretName)
}
return nil
}
func (r *Resources) createSecret(secrets k8sutil.SecretInterface, secretName string) error {
// Create secret
secret := &core.Secret{
ObjectMeta: meta.ObjectMeta{
Name: secretName,
},
}
// Attach secret to owner
owner := r.context.GetAPIObject().AsOwner()
k8sutil.AddOwnerRefToObject(secret, &owner)
if _, err := secrets.Create(context.Background(), secret, meta.CreateOptions{}); err != nil {
// Failed to create secret
return errors.WithStack(err)
}
return operatorErrors.Reconcile()
}
func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, secretName, keyName string) error {
if _, exists := cachedStatus.Secret(secretName); !exists {
return r.createSecretWithKey(ctx, secrets, secretName, keyName, nil)
@ -363,14 +337,6 @@ func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus i
return nil
}
func (r *Resources) ensureSecretWithKey(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, secretName, keyName string, value []byte) error {
if _, exists := cachedStatus.Secret(secretName); !exists {
return r.createSecretWithKey(ctx, secrets, secretName, keyName, value)
}
return nil
}
func (r *Resources) createSecretWithMod(ctx context.Context, secrets k8sutil.SecretInterface, secretName string, f func(s *core.Secret)) error {
// Create secret
secret := &core.Secret{
@ -581,43 +547,6 @@ func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStat
return nil
}
// ensureTLSCACertificateSecret checks if a secret with given name exists in the namespace
// of the deployment. If not, it will add such a secret with a generated CA certificate.
func (r *Resources) ensureTLSCAFolderSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, spec api.TLSSpec, folderSecretName string) error {
if spec.CASecretName == nil {
return errors.Newf("CA Secret Name is nil")
}
caSecret, ok := cachedStatus.Secret(*spec.CASecretName)
if !ok {
return errors.Newf("CA Secret is missing")
}
if _, exists := cachedStatus.Secret(spec.GetCASecretName()); !exists {
ca, _, err := GetKeyCertFromSecret(r.log, caSecret, CACertName, CAKeyName)
if err != nil {
return errors.WithStack(err)
}
if len(ca) == 0 {
return errors.WithStack(err)
}
caData, err := ca.ToPem()
if err != nil {
return errors.WithStack(err)
}
certSha := util.SHA256(caData)
// Secret not found, create it
return r.createSecretWithMod(ctx, secrets, folderSecretName, func(s *core.Secret) {
s.Data[certSha] = caData
})
}
return nil
}
// ensureClientAuthCACertificateSecret checks if a secret with given name exists in the namespace
// of the deployment. If not, it will add such a secret with a generated CA certificate.
func (r *Resources) ensureClientAuthCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, spec api.SyncAuthenticationSpec) error {

View file

@ -40,7 +40,6 @@ import (
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/metrics"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"
)
@ -193,7 +192,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
if single {
role = "single"
}
if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, ns, role, "database", k8sutil.ArangoPort, false, spec.ExternalAccess, apiObject, log, counterMetric); err != nil {
if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, role, "database", k8sutil.ArangoPort, false, spec.ExternalAccess, apiObject, log); err != nil {
return errors.WithStack(err)
}
@ -202,7 +201,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
counterMetric.Inc()
eaServiceName := k8sutil.CreateSyncMasterClientServiceName(deploymentName)
role := "syncmaster"
if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, ns, role, "sync", k8sutil.ArangoSyncMasterPort, true, spec.Sync.ExternalAccess.ExternalAccessSpec, apiObject, log, counterMetric); err != nil {
if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, role, "sync", k8sutil.ArangoSyncMasterPort, true, spec.Sync.ExternalAccess.ExternalAccessSpec, apiObject, log); err != nil {
return errors.WithStack(err)
}
status, lastVersion := r.context.GetStatus()
@ -234,7 +233,9 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
}
// EnsureServices creates all services needed to service the deployment
func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStatus inspectorInterface.Inspector, svcs k8sutil.ServiceInterface, eaServiceName, ns, svcRole, title string, port int, noneIsClusterIP bool, spec api.ExternalAccessSpec, apiObject k8sutil.APIObject, log zerolog.Logger, counterMetric prometheus.Counter) error {
func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStatus inspectorInterface.Inspector,
svcs k8sutil.ServiceInterface, eaServiceName, svcRole, title string, port int, noneIsClusterIP bool,
spec api.ExternalAccessSpec, apiObject k8sutil.APIObject, log zerolog.Logger) error {
// Database external access service
createExternalAccessService := false
deleteExternalAccessService := false

View file

@ -35,7 +35,6 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
"k8s.io/client-go/tools/record"
api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha"
@ -95,8 +94,6 @@ type LocalStorage struct {
stopCh chan struct{}
stopped int32
eventsCli corev1.EventInterface
image string
imagePullPolicy v1.PullPolicy
inspectTrigger trigger.Trigger

View file

@ -84,10 +84,10 @@ func (p *Provisioner) GetInfo(ctx context.Context, localPath string) (provisione
}
// Available is blocks available * fragment size
available := int64(statfs.Bavail) * int64(statfs.Bsize)
available := int64(statfs.Bavail) * statfs.Bsize
// Capacity is total block count * fragment size
capacity := int64(statfs.Blocks) * int64(statfs.Bsize)
capacity := int64(statfs.Blocks) * statfs.Bsize
log.Debug().
Str("node-name", p.NodeName).

View file

@ -81,7 +81,7 @@ func getNodeInfoHandler(api provisioner.API) func(w http.ResponseWriter, r *http
if err != nil {
handleError(w, err)
} else {
sendJSON(w, http.StatusOK, result)
sendJSON(w, result)
}
}
}
@ -97,7 +97,7 @@ func getInfoHandler(api provisioner.API) func(w http.ResponseWriter, r *http.Req
if err != nil {
handleError(w, err)
} else {
sendJSON(w, http.StatusOK, result)
sendJSON(w, result)
}
}
}
@ -113,7 +113,7 @@ func getPrepareHandler(api provisioner.API) func(w http.ResponseWriter, r *http.
if err := api.Prepare(ctx, input.LocalPath); err != nil {
handleError(w, err)
} else {
sendJSON(w, http.StatusOK, struct{}{})
sendJSON(w, struct{}{})
}
}
}
@ -129,16 +129,16 @@ func getRemoveHandler(api provisioner.API) func(w http.ResponseWriter, r *http.R
if err := api.Remove(ctx, input.LocalPath); err != nil {
handleError(w, err)
} else {
sendJSON(w, http.StatusOK, struct{}{})
sendJSON(w, struct{}{})
}
}
}
}
// sendJSON encodes given body as JSON and sends it to the given writer with given HTTP status.
func sendJSON(w http.ResponseWriter, status int, body interface{}) error {
func sendJSON(w http.ResponseWriter, body interface{}) error {
w.Header().Set("Content-Type", contentTypeJSON)
w.WriteHeader(status)
w.WriteHeader(http.StatusOK)
if body == nil {
w.Write([]byte("{}"))
} else {

View file

@ -74,10 +74,6 @@ type agencyJob struct {
Type string `json:"type,omitempty"`
}
const (
agencyJobTypeCleanOutServer = "cleanOutServer"
)
// CleanoutServerJobStatus checks the status of a cleanout-server job with given ID.
func CleanoutServerJobStatus(ctx context.Context, jobID string, client driver.Client, agencyClient agency.Agency) (CleanoutJobStatus, error) {
for _, keyPrefix := range agencyJobStateKeyPrefixes {

View file

@ -34,7 +34,6 @@ import (
"github.com/arangodb/kube-arangodb/pkg/util/errors"
driver "github.com/arangodb/go-driver"
"github.com/arangodb/go-driver/agency"
"github.com/arangodb/go-driver/http"
"github.com/arangodb/go-driver/jwt"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
@ -167,57 +166,6 @@ func CreateArangodDatabaseClient(ctx context.Context, cli corev1.CoreV1Interface
return c, nil
}
func CreateArangodAgencyConnection(ctx context.Context, apiObject *api.ArangoDeployment) (driver.Connection, error) {
var dnsNames []string
for _, m := range apiObject.Status.Members.Agents {
dnsName := k8sutil.CreatePodDNSNameWithDomain(apiObject, apiObject.Spec.ClusterDomain, api.ServerGroupAgents.AsRole(), m.ID)
dnsNames = append(dnsNames, dnsName)
}
shortTimeout := false
connConfig, err := createArangodHTTPConfigForDNSNames(ctx, apiObject, dnsNames, shortTimeout)
if err != nil {
return nil, errors.WithStack(err)
}
agencyConn, err := agency.NewAgencyConnection(connConfig)
if err != nil {
return nil, errors.WithStack(err)
}
return agencyConn, nil
}
// CreateArangodAgencyClient creates a go-driver client for accessing the agents of the given deployment.
func CreateArangodAgencyClient(ctx context.Context, cli corev1.CoreV1Interface, apiObject *api.ArangoDeployment) (agency.Agency, error) {
var dnsNames []string
for _, m := range apiObject.Status.Members.Agents {
dnsName := k8sutil.CreatePodDNSNameWithDomain(apiObject, apiObject.Spec.ClusterDomain, api.ServerGroupAgents.AsRole(), m.ID)
dnsNames = append(dnsNames, dnsName)
}
shortTimeout := false
connConfig, err := createArangodHTTPConfigForDNSNames(ctx, apiObject, dnsNames, shortTimeout)
if err != nil {
return nil, errors.WithStack(err)
}
agencyConn, err := agency.NewAgencyConnection(connConfig)
if err != nil {
return nil, errors.WithStack(err)
}
auth, err := createArangodClientAuthentication(ctx, cli, apiObject)
if err != nil {
return nil, errors.WithStack(err)
}
if auth != nil {
agencyConn, err = agencyConn.SetAuthentication(auth)
if err != nil {
return nil, errors.WithStack(err)
}
}
a, err := agency.NewAgency(agencyConn)
if err != nil {
return nil, errors.WithStack(err)
}
return a, nil
}
// CreateArangodImageIDClient creates a go-driver client for an ArangoDB instance
// running in an Image-ID pod.
func CreateArangodImageIDClient(ctx context.Context, deployment k8sutil.APIObject, role, id string) (driver.Client, error) {
@ -232,10 +180,7 @@ func CreateArangodImageIDClient(ctx context.Context, deployment k8sutil.APIObjec
// CreateArangodClientForDNSName creates a go-driver client for a given DNS name.
func createArangodClientForDNSName(ctx context.Context, cli corev1.CoreV1Interface, apiObject *api.ArangoDeployment, dnsName string, shortTimeout bool) (driver.Client, error) {
connConfig, err := createArangodHTTPConfigForDNSNames(ctx, apiObject, []string{dnsName}, shortTimeout)
if err != nil {
return nil, errors.WithStack(err)
}
connConfig := createArangodHTTPConfigForDNSNames(apiObject, []string{dnsName}, shortTimeout)
// TODO deal with TLS with proper CA checking
conn, err := http.NewConnection(connConfig)
if err != nil {
@ -259,7 +204,7 @@ func createArangodClientForDNSName(ctx context.Context, cli corev1.CoreV1Interfa
}
// createArangodHTTPConfigForDNSNames creates a go-driver HTTP connection config for a given DNS names.
func createArangodHTTPConfigForDNSNames(ctx context.Context, apiObject *api.ArangoDeployment, dnsNames []string, shortTimeout bool) (http.ConnectionConfig, error) {
func createArangodHTTPConfigForDNSNames(apiObject *api.ArangoDeployment, dnsNames []string, shortTimeout bool) http.ConnectionConfig {
scheme := "http"
transport := sharedHTTPTransport
if shortTimeout {
@ -279,7 +224,7 @@ func createArangodHTTPConfigForDNSNames(ctx context.Context, apiObject *api.Aran
for _, dnsName := range dnsNames {
connConfig.Endpoints = append(connConfig.Endpoints, scheme+"://"+net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoPort)))
}
return connConfig, nil
return connConfig
}
// createArangodClientAuthentication creates a go-driver authentication for the servers in the given deployment.

View file

@ -35,10 +35,10 @@ import (
)
var (
conflictError = apierrors.NewConflict(schema.GroupResource{"groupName", "resourceName"}, "something", os.ErrInvalid)
existsError = apierrors.NewAlreadyExists(schema.GroupResource{"groupName", "resourceName"}, "something")
invalidError = apierrors.NewInvalid(schema.GroupKind{"groupName", "kindName"}, "something", field.ErrorList{})
notFoundError = apierrors.NewNotFound(schema.GroupResource{"groupName", "resourceName"}, "something")
conflictError = apierrors.NewConflict(schema.GroupResource{Group: "groupName", Resource: "resourceName"}, "something", os.ErrInvalid)
existsError = apierrors.NewAlreadyExists(schema.GroupResource{Group: "groupName", Resource: "resourceName"}, "something")
invalidError = apierrors.NewInvalid(schema.GroupKind{Group: "groupName", Kind: "kindName"}, "something", field.ErrorList{})
notFoundError = apierrors.NewNotFound(schema.GroupResource{Group: "groupName", Resource: "resourceName"}, "something")
)
func TestIsAlreadyExists(t *testing.T) {

View file

@ -21,6 +21,8 @@
package mocks
import (
"context"
"github.com/stretchr/testify/mock"
v1 "k8s.io/api/core/v1"
meta_v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -60,42 +62,43 @@ func (s *secrets) AsMock() *mock.Mock {
return &s.Mock
}
func (s *secrets) Create(x *v1.Secret) (*v1.Secret, error) {
func (s *secrets) Create(_ context.Context, x *v1.Secret, _ meta_v1.CreateOptions) (*v1.Secret, error) {
args := s.Called(x)
return nilOrSecret(args.Get(0)), args.Error(1)
}
func (s *secrets) Update(x *v1.Secret) (*v1.Secret, error) {
func (s *secrets) Update(_ context.Context, x *v1.Secret, _ meta_v1.UpdateOptions) (*v1.Secret, error) {
args := s.Called(x)
return nilOrSecret(args.Get(0)), args.Error(1)
}
func (s *secrets) Delete(name string, options *meta_v1.DeleteOptions) error {
func (s *secrets) Delete(_ context.Context, name string, options meta_v1.DeleteOptions) error {
args := s.Called(name, options)
return args.Error(0)
}
func (s *secrets) DeleteCollection(options *meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
func (s *secrets) DeleteCollection(_ context.Context, options meta_v1.DeleteOptions, listOptions meta_v1.ListOptions) error {
args := s.Called(options, listOptions)
return args.Error(0)
}
func (s *secrets) Get(name string, options meta_v1.GetOptions) (*v1.Secret, error) {
func (s *secrets) Get(_ context.Context, name string, options meta_v1.GetOptions) (*v1.Secret, error) {
args := s.Called(name, options)
return nilOrSecret(args.Get(0)), args.Error(1)
}
func (s *secrets) List(opts meta_v1.ListOptions) (*v1.SecretList, error) {
func (s *secrets) List(_ context.Context, opts meta_v1.ListOptions) (*v1.SecretList, error) {
args := s.Called(opts)
return nilOrSecretList(args.Get(0)), args.Error(1)
}
func (s *secrets) Watch(opts meta_v1.ListOptions) (watch.Interface, error) {
func (s *secrets) Watch(_ context.Context, opts meta_v1.ListOptions) (watch.Interface, error) {
args := s.Called(opts)
return nilOrWatch(args.Get(0)), args.Error(1)
}
func (s *secrets) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.Secret, err error) {
func (s *secrets) Patch(_ context.Context, name string, pt types.PatchType, data []byte,
options meta_v1.PatchOptions, subresources ...string) (result *v1.Secret, err error) {
args := s.Called(name, pt, data, subresources)
return nilOrSecret(args.Get(0)), args.Error(1)
}

View file

@ -132,7 +132,7 @@ func CreateHeadlessService(ctx context.Context, svcs ServiceInterface, deploymen
}
publishNotReadyAddresses := true
serviceType := core.ServiceTypeClusterIP
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, deployment.GetNamespace(), ClusterIPNone, "", serviceType, ports, "", nil, publishNotReadyAddresses, owner)
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, ClusterIPNone, "", serviceType, ports, "", nil, publishNotReadyAddresses, owner)
if err != nil {
return "", false, errors.WithStack(err)
}
@ -162,7 +162,7 @@ func CreateDatabaseClientService(ctx context.Context, svcs ServiceInterface, dep
}
serviceType := core.ServiceTypeClusterIP
publishNotReadyAddresses := false
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, deployment.GetNamespace(), "", role, serviceType, ports, "", nil, publishNotReadyAddresses, owner)
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, "", role, serviceType, ports, "", nil, publishNotReadyAddresses, owner)
if err != nil {
return "", false, errors.WithStack(err)
}
@ -186,7 +186,7 @@ func CreateExternalAccessService(ctx context.Context, svcs ServiceInterface, svc
},
}
publishNotReadyAddresses := false
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, deployment.GetNamespace(), "", role, serviceType, ports, loadBalancerIP, loadBalancerSourceRanges, publishNotReadyAddresses, owner)
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, "", role, serviceType, ports, loadBalancerIP, loadBalancerSourceRanges, publishNotReadyAddresses, owner)
if err != nil {
return "", false, errors.WithStack(err)
}
@ -197,7 +197,7 @@ func CreateExternalAccessService(ctx context.Context, svcs ServiceInterface, svc
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func createService(ctx context.Context, svcs ServiceInterface, svcName, deploymentName, ns, clusterIP, role string,
func createService(ctx context.Context, svcs ServiceInterface, svcName, deploymentName, clusterIP, role string,
serviceType core.ServiceType, ports []core.ServicePort, loadBalancerIP string, loadBalancerSourceRanges []string,
publishNotReadyAddresses bool, owner metav1.OwnerReference) (bool, error) {
labels := LabelsForDeployment(deploymentName, role)

View file

@ -108,7 +108,7 @@ func retry(ctx context.Context, op func() error, timeout time.Duration) error {
// Retry the given operation until it succeeds,
// has a permanent failure or times out.
func Retry(op func() error, timeout time.Duration) error {
return retry(nil, op, timeout)
return retry(nil, op, timeout) // nolint:staticcheck
}
// RetryWithContext retries the given operation until it succeeds,

View file

@ -78,10 +78,7 @@ func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) {
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorNodeName)
}
config, deps, err := newProvisionerConfigAndDeps(nodeName)
if err != nil {
cliLog.Fatal().Err(err).Msg("Failed to create provisioner config & dependencies")
}
config, deps := newProvisionerConfigAndDeps(nodeName)
p, err := service.New(config, deps)
if err != nil {
cliLog.Fatal().Err(err).Msg("Failed to create provisioner")
@ -92,7 +89,7 @@ func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) {
}
// newProvisionerConfigAndDeps creates storage provisioner config & dependencies.
func newProvisionerConfigAndDeps(nodeName string) (service.Config, service.Dependencies, error) {
func newProvisionerConfigAndDeps(nodeName string) (service.Config, service.Dependencies) {
cfg := service.Config{
Address: net.JoinHostPort("0.0.0.0", strconv.Itoa(storageProvisioner.port)),
NodeName: nodeName,
@ -101,5 +98,5 @@ func newProvisionerConfigAndDeps(nodeName string) (service.Config, service.Depen
Log: logService.MustGetLogger("provisioner"),
}
return cfg, deps, nil
return cfg, deps
}

View file

@ -25,11 +25,12 @@ package tests
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strings"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/dchest/uniuri"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
@ -65,7 +66,7 @@ func TestAuthenticationSingleDefaultSecret(t *testing.T) {
}
// Secret must now exist
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, nil, time.Second); err != nil {
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil {
t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err)
}
@ -133,7 +134,7 @@ func TestAuthenticationSingleCustomSecret(t *testing.T) {
removeDeployment(c, depl.GetName(), ns)
// Secret must still exist
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, nil, time.Second); err != nil {
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil {
t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err)
}
}
@ -203,7 +204,7 @@ func TestAuthenticationClusterDefaultSecret(t *testing.T) {
}
// Secret must now exist
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, nil, time.Second); err != nil {
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil {
t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err)
}
@ -270,7 +271,7 @@ func TestAuthenticationClusterCustomSecret(t *testing.T) {
removeDeployment(c, depl.GetName(), ns)
// Secret must still exist
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, nil, time.Second); err != nil {
if _, err := waitUntilSecret(kubecli, depl.Spec.Authentication.GetJWTSecretName(), ns, time.Second); err != nil {
t.Fatalf("JWT secret '%s' not found: %v", depl.Spec.Authentication.GetJWTSecretName(), err)
}

View file

@ -52,8 +52,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
var backupAPIAvailable *bool
func waitUntilBackup(ci versioned.Interface, name, ns string, predicate func(*backupApi.ArangoBackup, error) error, timeout ...time.Duration) (*backupApi.ArangoBackup, error) {
var result *backupApi.ArangoBackup
op := func() error {
@ -219,7 +217,7 @@ func skipIfBackupUnavailable(t *testing.T, client driver.Client) {
func statBackupMeta(client driver.Client, backupID driver.BackupID) (bool, driver.BackupMeta, error) {
list, err := client.Backup().List(nil, &driver.BackupListOptions{ID: backupID})
list, err := client.Backup().List(context.Background(), &driver.BackupListOptions{ID: backupID})
if err != nil {
if driver.IsNotFound(err) {
return false, driver.BackupMeta{}, nil
@ -228,7 +226,7 @@ func statBackupMeta(client driver.Client, backupID driver.BackupID) (bool, drive
return false, driver.BackupMeta{}, err
}
if meta, ok := list[driver.BackupID(backupID)]; ok {
if meta, ok := list[backupID]; ok {
return true, meta, nil
}
@ -250,12 +248,11 @@ func ensureBackup(t *testing.T, deployment, ns string, deploymentClient versione
return backup, name, driver.BackupID(backupID)
}
func skipOrRemotePath(t *testing.T) (repoPath string) {
repoPath = os.Getenv("TEST_REMOTE_REPOSITORY")
func skipOrRemotePath(t *testing.T) {
repoPath := os.Getenv("TEST_REMOTE_REPOSITORY")
if repoPath == "" {
t.Skip("TEST_REMOTE_REPOSITORY not set")
}
return repoPath
}
func newOperation() *backupApi.ArangoBackupSpecOperation {
@ -419,7 +416,7 @@ func TestBackupCluster(t *testing.T) {
t.Run("create backup", func(t *testing.T) {
backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), depl.GetName(), nil)
_, err := backupClient.Create(context.Background(), backup,metav1.CreateOptions{})
_, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{})
require.NoError(t, err, "failed to create backup: %s", err)
defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{})
@ -438,7 +435,7 @@ func TestBackupCluster(t *testing.T) {
skipOrRemotePath(t)
backup := newBackup(fmt.Sprintf("my-backup-%s", uniuri.NewLen(4)), depl.GetName(), nil)
_, err := backupClient.Create(context.Background(), backup,metav1.CreateOptions{})
_, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{})
require.NoError(t, err, "failed to create backup: %s", err)
defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{})
@ -492,7 +489,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err, "Backup test failed: %s", err)
// check that the actual backup has been deleted
found, _, err = statBackupMeta(databaseClient, id)
found, _, _ = statBackupMeta(databaseClient, id)
require.False(t, found)
})
@ -501,7 +498,7 @@ func TestBackupCluster(t *testing.T) {
defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{})
// now remove the backup locally
err := databaseClient.Backup().Delete(nil, id)
err := databaseClient.Backup().Delete(context.Background(), id)
require.NoError(t, err, "Failed to delete backup: %s", err)
// wait for the backup to become unavailable
@ -512,9 +509,9 @@ func TestBackupCluster(t *testing.T) {
t.Run("handle existing backups", func(t *testing.T) {
// create a local backup manually
id, _, err := databaseClient.Backup().Create(nil, nil)
id, _, err := databaseClient.Backup().Create(context.Background(), nil)
require.NoError(t, err, "Creating backup failed: %s", err)
found, meta, err := statBackupMeta(databaseClient, driver.BackupID(id))
found, meta, err := statBackupMeta(databaseClient, id)
require.NoError(t, err, "Backup test failed: %s", err)
require.True(t, found)
@ -582,7 +579,7 @@ func TestBackupCluster(t *testing.T) {
backup.Labels = labels.MatchLabels
_, err := backupClient.Create(context.Background(), backup,metav1.CreateOptions{})
_, err := backupClient.Create(context.Background(), backup, metav1.CreateOptions{})
require.NoError(t, err, "failed to create backup: %s", err)
defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{})
}
@ -601,13 +598,13 @@ func TestBackupCluster(t *testing.T) {
backup.Labels = labels.MatchLabels
_, err = backupClient.Create(context.Background(), backup,metav1.CreateOptions{})
_, err = backupClient.Create(context.Background(), backup, metav1.CreateOptions{})
require.NoError(t, err, "failed to create backup: %s", err)
defer backupClient.Delete(context.Background(), backup.GetName(), metav1.DeleteOptions{})
name := backup.Name
err = timeout(time.Second, 5*time.Minute, timeoutWaitForBackups(t, backupClient, labels, size+1))
_ = timeout(time.Second, 5*time.Minute, timeoutWaitForBackups(t, backupClient, labels, size+1))
// insert yet another document
meta2, err := col.CreateDocument(ctx, &Book{Title: "Bad book title", Author: "Lars"})
@ -615,7 +612,7 @@ func TestBackupCluster(t *testing.T) {
// now restore the backup
_, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
spec.RestoreFrom = util.NewString(string(name))
spec.RestoreFrom = util.NewString(name)
})
require.NoError(t, err, "Failed to update deployment: %s", err)
@ -708,7 +705,7 @@ func TestBackupCluster(t *testing.T) {
// now restore the backup
_, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
spec.RestoreFrom = util.NewString(string(name))
spec.RestoreFrom = util.NewString(name)
})
require.NoError(t, err, "Failed to update deployment: %s", err)
@ -858,7 +855,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err)
// Wait for uploaded flag to appear
backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded)
_, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded)
require.NoError(t, err, "backup did not become ready: %s", err)
})
@ -892,7 +889,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err, "Backup test failed: %s", err)
// check that the actual backup has been deleted
found, _, err = statBackupMeta(databaseClient, id)
found, _, _ = statBackupMeta(databaseClient, id)
require.False(t, found)
// create backup with download operation
@ -945,7 +942,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err, "Backup test failed: %s", err)
// check that the actual backup has been deleted
found, _, err = statBackupMeta(databaseClient, id)
found, _, _ = statBackupMeta(databaseClient, id)
require.False(t, found)
// create backup with download operation
@ -978,7 +975,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err)
// Wait for uploaded flag to appear
backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded)
_, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsUploaded)
require.NoError(t, err, "backup did not become ready: %s", err)
})
@ -1016,7 +1013,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err, "failed to create document: %s", err)
// now remove the backup locally
err = databaseClient.Backup().Delete(nil, id)
err = databaseClient.Backup().Delete(context.Background(), id)
require.NoError(t, err, "Failed to delete backup: %s", err)
// wait for the backup to become unavailable
@ -1034,12 +1031,12 @@ func TestBackupCluster(t *testing.T) {
defer backupClient.Delete(context.Background(), name, metav1.DeleteOptions{})
// wait until the backup becomes ready
backup, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsReady)
_, err = waitUntilBackup(deploymentClient, backup.GetName(), ns, backupIsReady)
require.NoError(t, err, "backup did not become ready: %s", err)
// now restore the backup
_, err = updateDeployment(deploymentClient, depl.GetName(), ns, func(spec *api.DeploymentSpec) {
spec.RestoreFrom = util.NewString(string(name))
spec.RestoreFrom = util.NewString(name)
})
require.NoError(t, err, "Failed to update deployment: %s", err)
@ -1106,7 +1103,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err)
require.Len(t, list.Items, 0, "unexpected matching ArangoBackup objects")
_, err = backupPolicyClient.Create(context.Background(), policy,metav1.CreateOptions{})
_, err = backupPolicyClient.Create(context.Background(), policy, metav1.CreateOptions{})
require.NoError(t, err)
defer backupPolicyClient.Delete(context.Background(), policy.Name, metav1.DeleteOptions{})
@ -1184,7 +1181,7 @@ func TestBackupCluster(t *testing.T) {
require.NoError(t, err)
require.Len(t, list.Items, 0, "unexpected matching ArangoBackup objects")
_, err = backupPolicyClient.Create(context.Background(), policy,metav1.CreateOptions{})
_, err = backupPolicyClient.Create(context.Background(), policy, metav1.CreateOptions{})
require.NoError(t, err)
defer backupPolicyClient.Delete(context.Background(), policy.Name, metav1.DeleteOptions{})

View file

@ -24,11 +24,12 @@ package tests
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/dchest/uniuri"
"github.com/stretchr/testify/require"
@ -296,17 +297,17 @@ func runCursorTests(t *testing.T, client driver.Client) {
contexts := []queryTestContext{
queryTestContext{nil, false},
queryTestContext{context.Background(), false},
queryTestContext{driver.WithQueryCount(nil), true},
queryTestContext{driver.WithQueryCount(nil, true), true},
queryTestContext{driver.WithQueryCount(nil, false), false},
queryTestContext{driver.WithQueryBatchSize(nil, 1), false},
queryTestContext{driver.WithQueryCache(nil), false},
queryTestContext{driver.WithQueryCache(nil, true), false},
queryTestContext{driver.WithQueryCache(nil, false), false},
queryTestContext{driver.WithQueryMemoryLimit(nil, 600000), false},
queryTestContext{driver.WithQueryTTL(nil, time.Minute), false},
queryTestContext{driver.WithQueryBatchSize(driver.WithQueryCount(nil), 1), true},
queryTestContext{driver.WithQueryCache(driver.WithQueryCount(driver.WithQueryBatchSize(nil, 2))), true},
queryTestContext{driver.WithQueryCount(context.Background()), true},
queryTestContext{driver.WithQueryCount(context.Background(), true), true},
queryTestContext{driver.WithQueryCount(context.Background(), false), false},
queryTestContext{driver.WithQueryBatchSize(context.Background(), 1), false},
queryTestContext{driver.WithQueryCache(context.Background()), false},
queryTestContext{driver.WithQueryCache(context.Background(), true), false},
queryTestContext{driver.WithQueryCache(context.Background(), false), false},
queryTestContext{driver.WithQueryMemoryLimit(context.Background(), 600000), false},
queryTestContext{driver.WithQueryTTL(context.Background(), time.Minute), false},
queryTestContext{driver.WithQueryBatchSize(driver.WithQueryCount(context.Background()), 1), true},
queryTestContext{driver.WithQueryCache(driver.WithQueryCount(driver.WithQueryBatchSize(context.Background(), 2))), true},
}
// Run tests for every context alternative

View file

@ -24,9 +24,10 @@ package tests
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/dchest/uniuri"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -67,7 +68,7 @@ func TestDeploymentClusterRocksDB(t *testing.T) {
deploymentSubTest(t, api.DeploymentModeCluster, api.StorageEngineRocksDB)
}
func deploymentSubTest(t *testing.T, mode api.DeploymentMode, engine api.StorageEngine) error {
func deploymentSubTest(t *testing.T, mode api.DeploymentMode, engine api.StorageEngine) {
// check environment
longOrSkip(t)
@ -98,8 +99,6 @@ func deploymentSubTest(t *testing.T, mode api.DeploymentMode, engine api.Storage
// Cleanup
removeDeployment(c, depl.GetName(), ns)
return nil
}
// test a setup containing multiple deployments

View file

@ -74,7 +74,7 @@ func main() {
// Start running tests
ctx, cancel := context.WithCancel(context.Background())
sigChannel := make(chan os.Signal)
sigChannel := make(chan os.Signal, 1)
signal.Notify(sigChannel, os.Interrupt, syscall.SIGTERM)
go handleSignal(sigChannel, cancel)
runTestLoop(ctx, client, testDuration)

View file

@ -1,50 +0,0 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// Author Ewout Prangsma
//
package simple
import "context"
// isDocumentEqualTo reads an existing document and checks that it is equal to the given document.
// Returns: (isEqual,currentRevision,error)
func (t *simpleTest) isDocumentEqualTo(c *collection, key string, expected UserDocument) (bool, string, error) {
ctx := context.Background()
var result UserDocument
t.log.Info().Msgf("Checking existing document '%s' from '%s'...", key, c.name)
col, err := t.db.Collection(ctx, c.name)
if err != nil {
return false, "", maskAny(err)
}
m, err := col.ReadDocument(ctx, key, &result)
if err != nil {
// This is a failure
t.log.Error().Msgf("Failed to read document '%s' from '%s': %v", key, c.name, err)
return false, "", maskAny(err)
}
// Compare document against expected document
if result.Equals(expected) {
// Found an exact match
return true, m.Rev, nil
}
t.log.Info().Msgf("Document '%s' in '%s' returned different values: got %q expected %q", key, c.name, result, expected)
return false, m.Rev, nil
}

View file

@ -350,7 +350,7 @@ func (t *simpleTest) testLoop() {
// Now try to read it, it must exist
//t.client.SetCoordinator("")
if _, err := t.readExistingDocument(c, userDoc.Key, rev, false, false); err != nil {
if err := t.readExistingDocument(c, userDoc.Key, false); err != nil {
t.log.Error().Msgf("Failed to read just-created document '%s': %#v", userDoc.Key, err)
}
}
@ -363,8 +363,8 @@ func (t *simpleTest) testLoop() {
if len(t.collections) > 0 {
c := t.selectRandomCollection()
if len(c.existingDocs) > 0 {
randomKey, rev := c.selectRandomKey()
if _, err := t.readExistingDocument(c, randomKey, rev, false, false); err != nil {
randomKey := c.selectRandomKey()
if err := t.readExistingDocument(c, randomKey, false); err != nil {
t.log.Error().Msgf("Failed to read existing document '%s': %#v", randomKey, err)
}
}
@ -391,8 +391,8 @@ func (t *simpleTest) testLoop() {
if len(t.collections) > 0 {
c := t.selectRandomCollection()
if len(c.existingDocs) > 0 {
randomKey, rev := c.selectRandomKey()
if err := t.removeExistingDocument(c.name, randomKey, rev); err != nil {
randomKey := c.selectRandomKey()
if err := t.removeExistingDocument(c.name, randomKey); err != nil {
t.log.Error().Msgf("Failed to remove existing document '%s': %#v", randomKey, err)
} else {
// Remove succeeded, key should no longer exist
@ -428,13 +428,13 @@ func (t *simpleTest) testLoop() {
if len(t.collections) > 0 {
c := t.selectRandomCollection()
if len(c.existingDocs) > 0 {
randomKey, rev := c.selectRandomKey()
if newRev, err := t.updateExistingDocument(c, randomKey, rev); err != nil {
randomKey := c.selectRandomKey()
if _, err := t.updateExistingDocument(c, randomKey); err != nil {
t.log.Error().Msgf("Failed to update existing document '%s': %#v", randomKey, err)
} else {
// Updated succeeded, now try to read it, it should exist and be updated
//t.client.SetCoordinator("")
if _, err := t.readExistingDocument(c, randomKey, newRev, false, false); err != nil {
if err := t.readExistingDocument(c, randomKey, false); err != nil {
t.log.Error().Msgf("Failed to read just-updated document '%s': %#v", randomKey, err)
}
}
@ -462,13 +462,13 @@ func (t *simpleTest) testLoop() {
if len(t.collections) > 0 {
c := t.selectRandomCollection()
if len(c.existingDocs) > 0 {
randomKey, rev := c.selectRandomKey()
if newRev, err := t.replaceExistingDocument(c, randomKey, rev); err != nil {
randomKey := c.selectRandomKey()
if _, err := t.replaceExistingDocument(c, randomKey); err != nil {
t.log.Error().Msgf("Failed to replace existing document '%s': %#v", randomKey, err)
} else {
// Replace succeeded, now try to read it, it should exist and be replaced
//t.client.SetCoordinator("")
if _, err := t.readExistingDocument(c, randomKey, newRev, false, false); err != nil {
if err := t.readExistingDocument(c, randomKey, false); err != nil {
t.log.Error().Msgf("Failed to read just-replaced document '%s': %#v", randomKey, err)
}
}
@ -517,13 +517,13 @@ func (t *simpleTest) testLoop() {
if len(t.collections) > 0 {
c := t.selectRandomCollection()
if len(c.existingDocs) > 0 {
randomKey, _ := c.selectRandomKey()
if newRev, err := t.queryUpdateDocuments(c, randomKey); err != nil {
randomKey := c.selectRandomKey()
if _, err := t.queryUpdateDocuments(c, randomKey); err != nil {
t.log.Error().Msgf("Failed to update document using AQL query: %#v", err)
} else {
// Updated succeeded, now try to read it (anywhere), it should exist and be updated
//t.client.SetCoordinator("")
if _, err := t.readExistingDocument(c, randomKey, newRev, false, false); err != nil {
if err := t.readExistingDocument(c, randomKey, false); err != nil {
t.log.Error().Msgf("Failed to read just-updated document '%s': %#v", randomKey, err)
}
}
@ -536,13 +536,13 @@ func (t *simpleTest) testLoop() {
if len(t.collections) > 0 {
c := t.selectRandomCollection()
if len(c.existingDocs) > 0 {
randomKey, _ := c.selectRandomKey()
if newRev, err := t.queryUpdateDocumentsLongRunning(c, randomKey); err != nil {
randomKey := c.selectRandomKey()
if _, err := t.queryUpdateDocumentsLongRunning(c, randomKey); err != nil {
t.log.Error().Msgf("Failed to update document using long running AQL query: %#v", err)
} else {
// Updated succeeded, now try to read it (anywhere), it should exist and be updated
//t.client.SetCoordinator("")
if _, err := t.readExistingDocument(c, randomKey, newRev, false, false); err != nil {
if err := t.readExistingDocument(c, randomKey, false); err != nil {
t.log.Error().Msgf("Failed to read just-updated document '%s': %#v", randomKey, err)
}
}
@ -622,7 +622,7 @@ func (t *simpleTest) createAndInitCollection() error {
if t.shouldStop() || t.pauseRequested {
return nil
}
if _, err := t.readExistingDocument(c, k, "", true, false); err != nil {
if err := t.readExistingDocument(c, k, true); err != nil {
t.reportFailure(test.NewFailure("Failed to read existing document '%s': %#v", k, err))
}
t.actions++
@ -667,23 +667,13 @@ func (c *collection) removeExistingKey(key string) {
delete(c.existingDocs, key)
}
func (c *collection) selectRandomKey() (string, string) {
func (c *collection) selectRandomKey() string {
index := rand.Intn(len(c.existingDocs))
for k, v := range c.existingDocs {
for k := range c.existingDocs {
if index == 0 {
return k, v.rev
return k
}
index--
}
return "", "" // This should never be reached when len(t.existingDocs) > 0
}
func (c *collection) selectWrongRevision(key string) (string, bool) {
correctRev := c.existingDocs[key].rev
for _, v := range c.existingDocs {
if v.rev != correctRev && v.rev != "" {
return v.rev, true
}
}
return "", false // This should never be reached when len(t.existingDocs) > 1
return "" // This should never be reached when len(t.existingDocs) > 0
}

View file

@ -33,37 +33,37 @@ import (
// readExistingDocument reads an existing document with an optional explicit revision.
// The operation is expected to succeed.
func (t *simpleTest) readExistingDocument(c *collection, key, rev string, updateRevision, skipExpectedValueCheck bool) (string, error) {
func (t *simpleTest) readExistingDocument(c *collection, key string, updateRevision bool) error {
ctx := context.Background()
var result UserDocument
col, err := t.db.Collection(ctx, c.name)
if err != nil {
return "", maskAny(err)
return maskAny(err)
}
m, err := col.ReadDocument(ctx, key, &result)
_, err = col.ReadDocument(ctx, key, &result)
if err != nil {
// This is a failure
t.readExistingCounter.failed++
t.reportFailure(test.NewFailure("Failed to read existing document '%s' in collection '%s': %v", key, c.name, err))
return "", maskAny(err)
return maskAny(err)
}
// Compare document against expected document
if !skipExpectedValueCheck {
expected := c.existingDocs[key]
if result.Value != expected.Value || result.Name != expected.Name || result.Odd != expected.Odd {
// This is a failure
t.readExistingCounter.failed++
t.reportFailure(test.NewFailure("Read existing document '%s' returned different values '%s': got %q expected %q", key, c.name, result, expected))
return "", maskAny(fmt.Errorf("Read returned invalid values"))
}
expected := c.existingDocs[key]
if result.Value != expected.Value || result.Name != expected.Name || result.Odd != expected.Odd {
// This is a failure
t.readExistingCounter.failed++
t.reportFailure(test.NewFailure("Read existing document '%s' returned different values '%s': got %v expected %v", key, c.name, result, expected))
return maskAny(fmt.Errorf("Read returned invalid values"))
}
if updateRevision {
// Store read document so we have the last revision
c.existingDocs[key] = result
}
t.readExistingCounter.succeeded++
t.log.Info().Msgf("Reading existing document '%s' from '%s' succeeded", key, c.name)
return m.Rev, nil
return nil
}
// readNonExistingDocument reads a non-existing document.

View file

@ -32,7 +32,7 @@ import (
// removeExistingDocument removes an existing document with an optional explicit revision.
// The operation is expected to succeed.
func (t *simpleTest) removeExistingDocument(collectionName string, key, rev string) error {
func (t *simpleTest) removeExistingDocument(collectionName string, key string) error {
ctx := context.Background()
col, err := t.db.Collection(ctx, collectionName)
if err != nil {

View file

@ -35,7 +35,7 @@ import (
// replaceExistingDocument replaces an existing document with an optional explicit revision.
// The operation is expected to succeed.
func (t *simpleTest) replaceExistingDocument(c *collection, key, rev string) (string, error) {
func (t *simpleTest) replaceExistingDocument(c *collection, key string) (string, error) {
ctx := context.Background()
col, err := t.db.Collection(ctx, c.name)
if err != nil {

View file

@ -34,7 +34,7 @@ import (
// updateExistingDocument updates an existing document with an optional explicit revision.
// The operation is expected to succeed.
func (t *simpleTest) updateExistingDocument(c *collection, key, rev string) (string, error) {
func (t *simpleTest) updateExistingDocument(c *collection, key string) (string, error) {
ctx := context.Background()
col, err := t.db.Collection(ctx, c.name)
if err != nil {

View file

@ -35,18 +35,7 @@ import (
)
var (
delayBeforeCompare = time.Minute
testPeriod = time.Minute * 2
systemCollectionsToIgnore = map[string]bool{
"_appbundles": true,
"_apps": true,
"_jobs": true,
"_queues": true,
"_routing": true,
"_statistics": true,
"_statisticsRaw": true,
"_statistics15": true,
}
testPeriod = time.Minute * 2
)
// runTestLoop keeps running tests until the given context is canceled.

View file

@ -115,7 +115,7 @@ func TestLoadBalancingSourceRanges(t *testing.T) {
}
// Now change the deployment spec to use different ranges:
depl, err = updateDeployment(c, depl.GetName(), ns,
_, err = updateDeployment(c, depl.GetName(), ns,
func(spec *api.DeploymentSpec) {
spec.ExternalAccess.LoadBalancerSourceRanges = []string{"4.5.0.0/16"}
})

View file

@ -24,11 +24,12 @@ package tests
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"reflect"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/dchest/uniuri"
driver "github.com/arangodb/go-driver"
@ -150,7 +151,7 @@ func loadBalancingCursorSubtest(t *testing.T, useVst bool) {
var r driver.Response
// Setup context
ctx = driver.WithResponse(driver.WithQueryBatchSize(nil, 1), &r)
ctx = driver.WithResponse(driver.WithQueryBatchSize(context.Background(), 1), &r)
// keep track of whether at least one request was forwarded internally to the
// correct coordinator behind the load balancer

View file

@ -57,12 +57,12 @@ func TestAddingMetrics(t *testing.T) {
depl.Spec.SetDefaults(depl.GetName()) // this must be last
// Create deployment
deployment, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{})
_, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{})
require.NoErrorf(t, err, "Create deployment failed")
defer deferedCleanupDeployment(c, depl.GetName(), ns)
// Wait for deployment to be ready
deployment, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
deployment, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
require.NoErrorf(t, err, "Deployment not running in time")
// Create a database client

View file

@ -34,11 +34,11 @@ func TestOperatorUpgradeFrom038(t *testing.T) {
t.Fatalf("Remaining arangodb pods did not vanish, can not start test: %v", err)
}
currentimage, err := updateOperatorImage(t, ns, kubecli, oldOperatorTestImage)
currentimage, err := updateOperatorImage(ns, kubecli, oldOperatorTestImage)
if err != nil {
t.Fatalf("Could not replace operator with old image: %v", err)
}
defer updateOperatorImage(t, ns, kubecli, currentimage)
defer updateOperatorImage(ns, kubecli, currentimage)
if err := waitForOperatorImage(ns, kubecli, oldOperatorTestImage); err != nil {
t.Fatalf("Old Operator not ready in time: %v", err)
@ -87,23 +87,20 @@ func TestOperatorUpgradeFrom038(t *testing.T) {
if !k8sutil.IsPodReady(pod) {
errorChannel <- fmt.Errorf("Pod no longer ready: %s", pod.GetName())
}
break
case watch.Deleted:
errorChannel <- fmt.Errorf("Pod was deleted: %s", pod.GetName())
break
case watch.Added:
if len(addedPods) >= 9 {
errorChannel <- fmt.Errorf("New pod was created: %s", pod.GetName())
}
addedPods = append(addedPods, pod.GetName())
break
}
}
}
}
}()
if _, err := updateOperatorImage(t, ns, kubecli, currentimage); err != nil {
if _, err := updateOperatorImage(ns, kubecli, currentimage); err != nil {
t.Fatalf("Failed to replace new ")
}
@ -120,7 +117,7 @@ func TestOperatorUpgradeFrom038(t *testing.T) {
}
}
func updateOperatorImage(t *testing.T, ns string, kube kubernetes.Interface, newImage string) (string, error) {
func updateOperatorImage(ns string, kube kubernetes.Interface, newImage string) (string, error) {
for {
depl, err := kube.AppsV1().Deployments(ns).Get(context.Background(), operatorTestDeploymentName, metav1.GetOptions{})
if err != nil {
@ -140,10 +137,6 @@ func updateOperatorImage(t *testing.T, ns string, kube kubernetes.Interface, new
}
}
func updateOperatorDeployment(ns string, kube kubernetes.Interface) (*appsv1.Deployment, error) {
return kube.AppsV1().Deployments(ns).Get(context.Background(), operatorTestDeploymentName, metav1.GetOptions{})
}
func getOperatorImage(depl *appsv1.Deployment) (string, error) {
for _, c := range depl.Spec.Template.Spec.Containers {
if c.Name == "operator" {

View file

@ -35,12 +35,6 @@ func isPDBAsExpected(kube kubernetes.Interface, name, ns string, expectedMinAvai
return nil
}
func waitForPDBAsExpected(kube kubernetes.Interface, name, ns string, expectedMinAvailable int) error {
return retry.Retry(func() error {
return isPDBAsExpected(kube, name, ns, expectedMinAvailable)
}, 20*time.Second)
}
func waitForPDBsOfDeployment(kube kubernetes.Interface, apiObject *api.ArangoDeployment) error {
spec := apiObject.Spec
return retry.Retry(func() error {

View file

@ -24,9 +24,10 @@ package tests
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/dchest/uniuri"
driver "github.com/arangodb/go-driver"
@ -223,7 +224,7 @@ func TestScaleClusterWithSync(t *testing.T) {
}
// Wait for syncmasters to be available
if err := waitUntilSyncVersionUp(syncClient, nil); err != nil {
if err := waitUntilSyncVersionUp(syncClient); err != nil {
t.Fatalf("SyncMasters not running returning version in time: %v", err)
}

View file

@ -101,7 +101,7 @@ func TestSecretHashesRootUser(t *testing.T) {
rootHashSecret := depl.Status.SecretHashes.Users[api.UserNameRoot]
secretRootName := string(depl.Spec.Bootstrap.PasswordSecretNames[api.UserNameRoot])
secretRoot, err := waitUntilSecret(kubecli, secretRootName, ns, nil, time.Second)
secretRoot, err := waitUntilSecret(kubecli, secretRootName, ns, time.Second)
if err != nil {
t.Fatalf("Root secret '%s' not found: %v", secretRootName, err)
}

View file

@ -241,7 +241,7 @@ func TestServiceAccountClusterWithSync(t *testing.T) {
syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t)
// Wait for syncmasters to be available
if err := waitUntilSyncVersionUp(syncClient, nil); err != nil {
if err := waitUntilSyncVersionUp(syncClient); err != nil {
t.Fatalf("SyncMasters not running returning version in time: %v", err)
}

View file

@ -24,10 +24,11 @@ package tests
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
"time"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
driver "github.com/arangodb/go-driver"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client"
@ -162,7 +163,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Add %s sidecar to group %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -189,7 +190,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Add sidecar %s to group %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -209,7 +210,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Update %s in group %s with new command line ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -230,7 +231,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Update %s in group %s with new command line arguments ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -251,7 +252,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Change environment variables of %s sidecars for %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -272,7 +273,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Update image of sidecar %s in group %s ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -292,7 +293,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Update %s in group %s with new image pull policy ...", name, coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -312,7 +313,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Remove all sidecars from group %s ...", coordinators)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -336,7 +337,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Add %s sidecar to %s and %s ...", name, coordinators, dbservers)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -370,7 +371,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Remove all sidecars ...")
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -390,7 +391,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Add a %s sidecar to %s ...", name, agents)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {
@ -412,7 +413,7 @@ func runSideCarTest(t *testing.T, spec SideCarTest) {
} else {
t.Logf("Delete %s containers and add %s sidecars to %s", agents, name, dbservers)
}
err = waitUntilClusterSidecarsEqualSpec(t, spec.Mode(), *depl)
err = waitUntilClusterSidecarsEqualSpec(t, *depl)
if err != nil {
t.Fatalf("... failed: %v", err)
} else {

View file

@ -24,9 +24,10 @@ package tests
import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/dchest/uniuri"
"github.com/stretchr/testify/assert"
@ -191,7 +192,7 @@ func TestSimpleClusterWithSync(t *testing.T) {
syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t)
// Wait for syncmasters to be available
if err := waitUntilSyncVersionUp(syncClient, nil); err != nil {
if err := waitUntilSyncVersionUp(syncClient); err != nil {
t.Fatalf("SyncMasters not running returning version in time: %v", err)
}

View file

@ -21,7 +21,6 @@ import (
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/retry"
"github.com/pkg/errors"
batchv1 "k8s.io/api/batch/v1"
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
@ -132,15 +131,6 @@ func newReplication(ns, name string) *rapi.ArangoDeploymentReplication {
}
}
func newArangoSyncTestJob(ns, name string) *batchv1.Job {
return &batchv1.Job{
ObjectMeta: metav1.ObjectMeta{
Name: name,
Namespace: ns,
},
}
}
func waitForSyncDeploymentReady(ctx context.Context, ns, name string, kubecli kubernetes.Interface, c versioned.Interface) error {
return retry.Retry(func() error {
deployment, err := c.DatabaseV1().ArangoDeployments(ns).Get(ctx, name, metav1.GetOptions{})
@ -171,15 +161,15 @@ func setupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c vers
dstSpec := newSyncDeployment(namespace, dstDeploymentName, false)
srcSpec := newSyncDeployment(namespace, srcDeploymentName, true)
if _, err := c.DatabaseV1().ArangoDeployments(namespace).Create(srcSpec); err != nil {
if _, err := c.DatabaseV1().ArangoDeployments(namespace).Create(ctx, srcSpec, metav1.CreateOptions{}); err != nil {
return err
}
if _, err := c.DatabaseV1().ArangoDeployments(namespace).Create(dstSpec); err != nil {
if _, err := c.DatabaseV1().ArangoDeployments(namespace).Create(ctx, dstSpec, metav1.CreateOptions{}); err != nil {
return err
}
replSpec := newReplication(namespace, replicationResourceName)
if _, err := c.ReplicationV1().ArangoDeploymentReplications(namespace).Create(replSpec); err != nil {
if _, err := c.ReplicationV1().ArangoDeploymentReplications(namespace).Create(ctx, replSpec, metav1.CreateOptions{}); err != nil {
return err
}
@ -200,7 +190,7 @@ func setupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c vers
func waitForReplicationGone(ns, name string, c versioned.Interface) error {
return retry.Retry(func() error {
if _, err := c.ReplicationV1().ArangoDeploymentReplications(ns).Get(name, metav1.GetOptions{}); k8sutil.IsNotFound(err) {
if _, err := c.ReplicationV1().ArangoDeploymentReplications(ns).Get(context.Background(), name, metav1.GetOptions{}); k8sutil.IsNotFound(err) {
return nil
} else if err != nil {
return err
@ -211,7 +201,7 @@ func waitForReplicationGone(ns, name string, c versioned.Interface) error {
func waitForDeploymentGone(ns, name string, c versioned.Interface) error {
return retry.Retry(func() error {
if _, err := c.DatabaseV1().ArangoDeployments(ns).Get(name, metav1.GetOptions{}); k8sutil.IsNotFound(err) {
if _, err := c.DatabaseV1().ArangoDeployments(ns).Get(context.Background(), name, metav1.GetOptions{}); k8sutil.IsNotFound(err) {
return nil
} else if err != nil {
return err
@ -221,7 +211,7 @@ func waitForDeploymentGone(ns, name string, c versioned.Interface) error {
}
func removeReplicationWaitForCompletion(ns, name string, c versioned.Interface) error {
if err := c.ReplicationV1().ArangoDeploymentReplications(ns).Delete(name, &metav1.DeleteOptions{}); err != nil {
if err := c.ReplicationV1().ArangoDeploymentReplications(ns).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
if k8sutil.IsNotFound(err) {
return nil
}
@ -234,7 +224,7 @@ func removeReplicationWaitForCompletion(ns, name string, c versioned.Interface)
}
func removeDeploymentWaitForCompletion(ns, name string, c versioned.Interface) error {
if err := c.DatabaseV1().ArangoDeployments(ns).Delete(name, &metav1.DeleteOptions{}); err != nil {
if err := c.DatabaseV1().ArangoDeployments(ns).Delete(context.Background(), name, metav1.DeleteOptions{}); err != nil {
if k8sutil.IsNotFound(err) {
return nil
}
@ -246,7 +236,7 @@ func removeDeploymentWaitForCompletion(ns, name string, c versioned.Interface) e
return nil
}
func cleanupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c versioned.Interface) error {
func cleanupArangoDBCluster(c versioned.Interface) error {
if err := removeReplicationWaitForCompletion(namespace, replicationResourceName, c); err != nil {
return err
}
@ -261,7 +251,7 @@ func cleanupArangoDBCluster(ctx context.Context, kube kubernetes.Interface, c ve
func waitForPodRunning(ns, name string, kube kubernetes.Interface) error {
return retry.Retry(func() error {
pod, err := kube.CoreV1().Pods(ns).Get(name, metav1.GetOptions{})
pod, err := kube.CoreV1().Pods(ns).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return err
}
@ -277,7 +267,7 @@ func waitForPodRunning(ns, name string, kube kubernetes.Interface) error {
func copyPodLogs(ns, name string, kube kubernetes.Interface) error {
logs, err := kube.CoreV1().Pods(ns).GetLogs(name, &corev1.PodLogOptions{
Follow: true,
}).Stream()
}).Stream(context.Background())
if err != nil {
return err
}
@ -358,9 +348,9 @@ func createArangoSyncTestPod(ns, name string) *corev1.Pod {
func runArangoSyncTests(kube kubernetes.Interface) error {
// Start a new pod with the test image
defer kube.CoreV1().Pods(namespace).Delete(arangosyncTestPodName, &metav1.DeleteOptions{})
defer kube.CoreV1().Pods(namespace).Delete(context.Background(), arangosyncTestPodName, metav1.DeleteOptions{})
podspec := createArangoSyncTestPod(namespace, arangosyncTestPodName)
if _, err := kube.CoreV1().Pods(namespace).Create(podspec); err != nil {
if _, err := kube.CoreV1().Pods(namespace).Create(context.Background(), podspec, metav1.CreateOptions{}); err != nil {
return err
}
@ -376,7 +366,7 @@ func runArangoSyncTests(kube kubernetes.Interface) error {
return err
}
pod, err := kube.CoreV1().Pods(namespace).Get(arangosyncTestPodName, metav1.GetOptions{})
pod, err := kube.CoreV1().Pods(namespace).Get(context.Background(), arangosyncTestPodName, metav1.GetOptions{})
if err != nil {
return err
}
@ -409,7 +399,7 @@ func main() {
exitCode = 1
}
if err := cleanupArangoDBCluster(ctx, kube, c); err != nil {
if err := cleanupArangoDBCluster(c); err != nil {
log.Printf("Failed to clean up deployments: %s", err.Error())
}

View file

@ -95,7 +95,7 @@ func TestSyncSimple(t *testing.T) {
// Wait for deployments to be ready
// Wait for access package
// Deploy access package
_, err = waitUntilSecret(kubecli, apname, ns, nil, deploymentReadyTimeout)
_, err = waitUntilSecret(kubecli, apname, ns, deploymentReadyTimeout)
if err != nil {
t.Fatalf("Failed to get access package: %v", err)
}
@ -202,7 +202,7 @@ func TestSyncToggleEnabled(t *testing.T) {
}
// Wait until sync jwt secret has been created
if _, err := waitUntilSecret(kubecli, updated.Spec.Sync.Authentication.GetJWTSecretName(), ns, nil, deploymentReadyTimeout); err != nil {
if _, err := waitUntilSecret(kubecli, updated.Spec.Sync.Authentication.GetJWTSecretName(), ns, deploymentReadyTimeout); err != nil {
t.Fatalf("Sync JWT secret not created in time: %v", err)
}
@ -210,7 +210,7 @@ func TestSyncToggleEnabled(t *testing.T) {
syncClient := mustNewArangoSyncClient(ctx, kubecli, apiObject, t)
// Wait for syncmasters to be available
if err := waitUntilSyncVersionUp(syncClient, nil); err != nil {
if err := waitUntilSyncVersionUp(syncClient); err != nil {
t.Fatalf("SyncMasters not running returning version in time: %v", err)
}

View file

@ -52,7 +52,6 @@ import (
"github.com/pkg/errors"
"github.com/rs/zerolog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
rapi "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1"
@ -75,13 +74,10 @@ var (
showEnterpriseImageOnce sync.Once
)
// CreateArangodClientForDNSName creates a go-driver client for a given DNS name.
func createArangodVSTClientForDNSName(ctx context.Context, cli corev1.CoreV1Interface, apiObject *api.ArangoDeployment, dnsName string, shortTimeout bool) (driver.Client, error) {
// createArangodVSTClientForDNSName creates a go-driver client for a given DNS name.
func createArangodVSTClientForDNSName(apiObject *api.ArangoDeployment, dnsName string, shortTimeout bool) (driver.Client, error) {
config := driver.ClientConfig{}
connConfig, err := createArangodVSTConfigForDNSNames(ctx, cli, apiObject, []string{dnsName}, shortTimeout)
if err != nil {
return nil, maskAny(err)
}
connConfig := createArangodVSTConfigForDNSNames(apiObject, []string{dnsName}, shortTimeout)
// TODO deal with TLS with proper CA checking
conn, err := vst.NewConnection(connConfig)
if err != nil {
@ -106,7 +102,7 @@ func createArangodVSTClientForDNSName(ctx context.Context, cli corev1.CoreV1Inte
}
// createArangodVSTConfigForDNSNames creates a go-driver VST connection config for a given DNS names.
func createArangodVSTConfigForDNSNames(ctx context.Context, cli corev1.CoreV1Interface, apiObject *api.ArangoDeployment, dnsNames []string, shortTimeout bool) (vst.ConnectionConfig, error) {
func createArangodVSTConfigForDNSNames(apiObject *api.ArangoDeployment, dnsNames []string, shortTimeout bool) vst.ConnectionConfig {
scheme := "http"
tlsConfig := &tls.Config{}
timeout := 90 * time.Second
@ -128,14 +124,14 @@ func createArangodVSTConfigForDNSNames(ctx context.Context, cli corev1.CoreV1Int
for _, dnsName := range dnsNames {
connConfig.Endpoints = append(connConfig.Endpoints, scheme+"://"+net.JoinHostPort(dnsName, strconv.Itoa(k8sutil.ArangoPort)))
}
return connConfig, nil
return connConfig
}
// CreateArangodDatabaseVSTClient creates a go-driver client for accessing the entire cluster (or single server) via VST
func createArangodDatabaseVSTClient(ctx context.Context, cli corev1.CoreV1Interface, apiObject *api.ArangoDeployment, shortTimeout bool) (driver.Client, error) {
// createArangodDatabaseVSTClient creates a go-driver client for accessing the entire cluster (or single server) via VST
func createArangodDatabaseVSTClient(apiObject *api.ArangoDeployment, shortTimeout bool) (driver.Client, error) {
// Create connection
dnsName := k8sutil.CreateDatabaseClientServiceDNSName(apiObject)
c, err := createArangodVSTClientForDNSName(ctx, cli, apiObject, dnsName, shortTimeout)
c, err := createArangodVSTClientForDNSName(apiObject, dnsName, shortTimeout)
if err != nil {
return nil, maskAny(err)
}
@ -168,10 +164,6 @@ func getEnterpriseImageOrSkip(t *testing.T) string {
const testEnterpriseLicenseKeySecretName = "arangodb-jenkins-license-key"
const testBackupRemoteSecretName = "arangodb-backup-remote-secret"
func getEnterpriseLicenseKey() string {
return strings.TrimSpace(os.Getenv("ENTERPRISELICENSE"))
}
// shouldCleanDeployments returns true when deployments created
// by tests should be removed, even when the test fails.
func shouldCleanDeployments() bool {
@ -202,7 +194,7 @@ func mustNewArangodDatabaseClient(ctx context.Context, kubecli kubernetes.Interf
shortTimeout := options != nil && options.ShortTimeout
useVST := options != nil && options.UseVST
if useVST {
c, err = createArangodDatabaseVSTClient(ctx, kubecli.CoreV1(), apiObject, shortTimeout)
c, err = createArangodDatabaseVSTClient(apiObject, shortTimeout)
} else {
c, err = arangod.CreateArangodDatabaseClient(ctx, kubecli.CoreV1(), apiObject, shortTimeout)
}
@ -349,7 +341,7 @@ func waitUntilDeployment(cli versioned.Interface, deploymentName, ns string, pre
// waitUntilSecret waits until a secret with given name in given namespace
// reached a state where the given predicate returns true.
func waitUntilSecret(cli kubernetes.Interface, secretName, ns string, predicate func(*v1.Secret) error, timeout time.Duration) (*v1.Secret, error) {
func waitUntilSecret(cli kubernetes.Interface, secretName, ns string, timeout time.Duration) (*v1.Secret, error) {
var result *v1.Secret
op := func() error {
obj, err := cli.CoreV1().Secrets(ns).Get(context.Background(), secretName, metav1.GetOptions{})
@ -358,11 +350,6 @@ func waitUntilSecret(cli kubernetes.Interface, secretName, ns string, predicate
return maskAny(err)
}
result = obj
if predicate != nil {
if err := predicate(obj); err != nil {
return maskAny(err)
}
}
return nil
}
if err := retry.Retry(op, timeout); err != nil {
@ -517,14 +504,12 @@ func waitUntilVersionUp(cli driver.Client, predicate func(driver.VersionInfo) er
// waitUntilSyncVersionUp waits until the syncmasters responds to
// an `/_api/version` request without an error. An additional Predicate
// can do a check on the VersionInfo object returned by the server.
func waitUntilSyncVersionUp(cli client.API, predicate func(client.VersionInfo) error) error {
func waitUntilSyncVersionUp(cli client.API) error {
ctx := context.Background()
op := func() error {
if version, err := cli.Version(ctx); err != nil {
if _, err := cli.Version(ctx); err != nil {
return maskAny(err)
} else if predicate != nil {
return predicate(version)
}
return nil
}
@ -589,7 +574,7 @@ func createEqualVersionsPredicate(version driver.Version) func(driver.VersionInf
}
// clusterSidecarsEqualSpec returns nil if sidecars from spec and cluster match
func waitUntilClusterSidecarsEqualSpec(t *testing.T, spec api.DeploymentMode, depl api.ArangoDeployment) error {
func waitUntilClusterSidecarsEqualSpec(t *testing.T, depl api.ArangoDeployment) error {
c := cl.MustNewClient()
ns := getNamespace(t)
@ -678,6 +663,7 @@ func updateDeployment(cli versioned.Interface, deploymentName, ns string, update
current, err = cli.DatabaseV1().ArangoDeployments(ns).Update(context.Background(), current, metav1.UpdateOptions{})
if k8sutil.IsConflict(err) {
// Retry
continue
} else if err != nil {
return nil, maskAny(err)
}
@ -927,7 +913,6 @@ func (d *DocumentGenerator) generate(t *testing.T, generator func(int) interface
d.documentsMeta, errorSlice, err = collection.CreateDocuments(context.Background(), items)
require.NoError(t, err, "failed to create documents")
require.Equal(t, errorSlice, errorSliceExpected)
return
}
func (d *DocumentGenerator) check(t *testing.T) {

View file

@ -34,7 +34,6 @@ func timeout(interval, timeout time.Duration, action func() error) error {
}
return err
}
break
case <-timeoutT.C:
return fmt.Errorf("function timeouted")
}

View file

@ -24,10 +24,11 @@ package tests
import (
"context"
"fmt"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"strings"
"testing"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
driver "github.com/arangodb/go-driver"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
kubeArangoClient "github.com/arangodb/kube-arangodb/pkg/client"
@ -238,14 +239,14 @@ func runUpgradeTest(t *testing.T, spec UpgradeTest) {
depl.Spec.SetDefaults(depl.GetName()) // this must be last
// Create deployment
deployment, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{})
_, err := c.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), depl, metav1.CreateOptions{})
if err != nil {
t.Fatalf("Create deployment failed: %v", err)
}
defer deferedCleanupDeployment(c, depl.GetName(), ns)
// Wait for deployment to be ready
deployment, err = waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
deployment, err := waitUntilDeployment(c, depl.GetName(), ns, deploymentIsReady())
if err != nil {
t.Fatalf("Deployment not running in time: %v", err)
}
@ -258,7 +259,7 @@ func runUpgradeTest(t *testing.T, spec UpgradeTest) {
}
// Try to change image version
deployment, err = updateDeployment(c, depl.GetName(), ns,
_, err = updateDeployment(c, depl.GetName(), ns,
func(depl *api.DeploymentSpec) {
depl.Image = util.NewString(spec.ToImage())
})

View file

@ -19,5 +19,3 @@
//
package tools
import _ "github.com/jessevdk/go-assets-builder"