mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] Add customizable volumes (#533)
This commit is contained in:
parent
26088637dc
commit
d8c9f5a536
25 changed files with 2977 additions and 1209 deletions
|
@ -1,6 +1,9 @@
|
|||
# Change Log
|
||||
|
||||
## [master](https://github.com/arangodb/kube-arangodb/tree/master) (N/A)
|
||||
- Added Customizable Volumes and VolumeMounts for ArangoDB server container
|
||||
- Added MemoryOverride flag for ArangoDB >= 3.6.3
|
||||
- Improved Rotation discovery process
|
||||
- Added annotation to rotate ArangoDeployment in secure way
|
||||
|
||||
## [1.0.0](https://github.com/arangodb/kube-arangodb/tree/1.0.0) (2020-03-03)
|
||||
|
|
28
Makefile
28
Makefile
|
@ -177,7 +177,11 @@ linter: fmt
|
|||
$(SOURCES_PACKAGES)
|
||||
|
||||
.PHONY: build
|
||||
build: docker docker-ubi manifests
|
||||
build: docker manifests
|
||||
|
||||
ifndef IGNORE_UBI
|
||||
build: docker-ubi
|
||||
endif
|
||||
|
||||
.PHONY: clean
|
||||
clean:
|
||||
|
@ -204,12 +208,19 @@ update-generated:
|
|||
@mkdir -p $(ORGDIR)
|
||||
@ln -s -f $(SCRIPTDIR) $(ORGDIR)/kube-arangodb
|
||||
GOPATH=$(GOBUILDDIR) $(VENDORDIR)/k8s.io/code-generator/generate-groups.sh \
|
||||
"all" \
|
||||
"github.com/arangodb/kube-arangodb/pkg/generated" \
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis" \
|
||||
"deployment:v1 replication:v1 storage:v1alpha backup:v1" \
|
||||
--go-header-file "./tools/codegen/boilerplate.go.txt" \
|
||||
$(VERIFYARGS)
|
||||
"all" \
|
||||
"github.com/arangodb/kube-arangodb/pkg/generated" \
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis" \
|
||||
"deployment:v1 replication:v1 storage:v1alpha backup:v1" \
|
||||
--go-header-file "./tools/codegen/boilerplate.go.txt" \
|
||||
$(VERIFYARGS)
|
||||
GOPATH=$(GOBUILDDIR) $(VENDORDIR)/k8s.io/code-generator/generate-groups.sh \
|
||||
"deepcopy" \
|
||||
"github.com/arangodb/kube-arangodb/pkg/generated" \
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis" \
|
||||
"shared:v1" \
|
||||
--go-header-file "./tools/codegen/boilerplate.go.txt" \
|
||||
$(VERIFYARGS)
|
||||
|
||||
.PHONY: verify-generated
|
||||
verify-generated:
|
||||
|
@ -226,6 +237,9 @@ dashboard/assets.go: $(DASHBOARDSOURCES) $(DASHBOARDDIR)/Dockerfile.build
|
|||
$(DASHBOARDBUILDIMAGE)
|
||||
go run github.com/jessevdk/go-assets-builder -s /dashboard/build/ -o dashboard/assets.go -p dashboard dashboard/build
|
||||
|
||||
.PHONY: bin
|
||||
bin: $(BIN)
|
||||
|
||||
$(BIN): $(SOURCES) dashboard/assets.go VERSION
|
||||
@mkdir -p $(BINDIR)
|
||||
CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -installsuffix netgo -ldflags "-X main.projectVersion=$(VERSION) -X main.projectBuild=$(COMMIT)" -o $(BIN) $(REPOPATH)
|
||||
|
|
1
go.mod
1
go.mod
|
@ -44,6 +44,7 @@ require (
|
|||
golang.org/x/sys v0.0.0-20200116001909-b77594299b42
|
||||
gopkg.in/go-playground/assert.v1 v1.2.1 // indirect
|
||||
gopkg.in/go-playground/validator.v8 v8.18.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.17.3
|
||||
k8s.io/apiextensions-apiserver v0.17.3
|
||||
k8s.io/apimachinery v0.17.3
|
||||
|
|
|
@ -45,8 +45,8 @@ type ArangoDeploymentList struct {
|
|||
type ArangoDeployment struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec DeploymentSpec `json:"spec"`
|
||||
Status DeploymentStatus `json:"status"`
|
||||
Spec DeploymentSpec `json:"spec,omitempty"`
|
||||
Status DeploymentStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
type ServerGroupFunc func(ServerGroup, ServerGroupSpec, *MemberStatusList) error
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
// DeploymentStatus contains the status part of a Cluster resource.
|
||||
type DeploymentStatus struct {
|
||||
// Phase holds the current lifetime phase of the deployment
|
||||
Phase DeploymentPhase `json:"phase"`
|
||||
Phase DeploymentPhase `json:"phase,omitempty"`
|
||||
// Reason contains a human readable reason for reaching the current state (can be empty)
|
||||
Reason string `json:"reason,omitempty"` // Reason for current state
|
||||
|
||||
|
|
|
@ -26,6 +26,8 @@ import (
|
|||
"math"
|
||||
"strings"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
@ -72,6 +74,10 @@ type ServerGroupSpec struct {
|
|||
Sidecars []v1.Container `json:"sidecars,omitempty"`
|
||||
// SecurityContext specifies security context for group
|
||||
SecurityContext *ServerGroupSpecSecurityContext `json:"securityContext,omitempty"`
|
||||
// Volumes define list of volumes mounted to pod
|
||||
Volumes ServerGroupSpecVolumes `json:"volumes,omitempty"`
|
||||
// VolumeMounts define list of volume mounts mounted into server container
|
||||
VolumeMounts ServerGroupSpecVolumeMounts `json:"volumeMounts,omitempty"`
|
||||
}
|
||||
|
||||
// ServerGroupSpecSecurityContext contains specification for pod security context
|
||||
|
@ -372,12 +378,44 @@ func (s ServerGroupSpec) Validate(group ServerGroup, used bool, mode DeploymentM
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.validate(); err != nil {
|
||||
return maskAny(err)
|
||||
}
|
||||
} else if s.GetCount() != 0 {
|
||||
return maskAny(errors.Wrapf(ValidationError, "Invalid count value %d for un-used group. Expected 0", s.GetCount()))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *ServerGroupSpec) validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return shared.WithErrors(
|
||||
shared.PrefixResourceError("volumes", s.Volumes.Validate()),
|
||||
shared.PrefixResourceError("volumeMounts", s.VolumeMounts.Validate()),
|
||||
s.validateVolumes(),
|
||||
)
|
||||
}
|
||||
|
||||
func (s *ServerGroupSpec) validateVolumes() error {
|
||||
volumes := map[string]bool{}
|
||||
|
||||
for _, volume := range s.Volumes {
|
||||
volumes[volume.Name] = true
|
||||
}
|
||||
|
||||
for _, mount := range s.VolumeMounts {
|
||||
if _, ok := volumes[mount.Name]; !ok {
|
||||
return errors.Errorf("Volume %s is not defined, but required by mount", mount.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetDefaults fills in missing defaults
|
||||
func (s *ServerGroupSpec) SetDefaults(group ServerGroup, used bool, mode DeploymentMode) {
|
||||
if s.GetCount() == 0 && used {
|
||||
|
|
176
pkg/apis/deployment/v1/server_group_volume.go
Normal file
176
pkg/apis/deployment/v1/server_group_volume.go
Normal file
|
@ -0,0 +1,176 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
sharedv1 "github.com/arangodb/kube-arangodb/pkg/apis/shared/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/pkg/errors"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
var (
|
||||
restrictedVolumeNames = []string{
|
||||
k8sutil.ArangodVolumeName,
|
||||
k8sutil.TlsKeyfileVolumeName,
|
||||
k8sutil.RocksdbEncryptionVolumeName,
|
||||
k8sutil.ExporterJWTVolumeName,
|
||||
k8sutil.ClusterJWTSecretVolumeName,
|
||||
"lifecycle",
|
||||
}
|
||||
)
|
||||
|
||||
// IsRestrictedVolumeName check of volume name is restricted, for example for originally mounted volumes
|
||||
func IsRestrictedVolumeName(name string) bool {
|
||||
for _, restrictedVolumeName := range restrictedVolumeNames {
|
||||
if restrictedVolumeName == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// ServerGroupSpecVolumes definition of volume list which need to be mounted to Pod
|
||||
type ServerGroupSpecVolumes []ServerGroupSpecVolume
|
||||
|
||||
// Validate if ServerGroupSpec volumes are valid and does not collide
|
||||
func (s ServerGroupSpecVolumes) Validate() error {
|
||||
var validationErrors []error
|
||||
|
||||
mappedVolumes := map[string]int{}
|
||||
|
||||
for id, volume := range s {
|
||||
if i, ok := mappedVolumes[volume.Name]; ok {
|
||||
mappedVolumes[volume.Name] = i + 1
|
||||
} else {
|
||||
mappedVolumes[volume.Name] = 1
|
||||
}
|
||||
|
||||
if err := volume.Validate(); err != nil {
|
||||
validationErrors = append(validationErrors, shared.PrefixResourceErrors(fmt.Sprintf("%d", id), err))
|
||||
}
|
||||
}
|
||||
|
||||
for volumeName, count := range mappedVolumes {
|
||||
if IsRestrictedVolumeName(volumeName) {
|
||||
validationErrors = append(validationErrors, errors.Errorf("volume with name %s is restricted", volumeName))
|
||||
}
|
||||
|
||||
if count == 1 {
|
||||
continue
|
||||
}
|
||||
|
||||
validationErrors = append(validationErrors, errors.Errorf("volume with name %s defined more than once: %d", volumeName, count))
|
||||
}
|
||||
|
||||
return shared.WithErrors(validationErrors...)
|
||||
}
|
||||
|
||||
// Volumes create volumes
|
||||
func (s ServerGroupSpecVolumes) Volumes() []core.Volume {
|
||||
volumes := make([]core.Volume, len(s))
|
||||
|
||||
for id, volume := range s {
|
||||
volumes[id] = volume.Volume()
|
||||
}
|
||||
|
||||
return volumes
|
||||
}
|
||||
|
||||
// ServerGroupSpecVolume definition of volume which need to be mounted to Pod
|
||||
type ServerGroupSpecVolume struct {
|
||||
// Name of volume
|
||||
Name string `json:"name"`
|
||||
|
||||
// Secret which should be mounted into pod
|
||||
Secret *ServerGroupSpecVolumeSecret `json:"secret,omitempty"`
|
||||
|
||||
// ConfigMap which should be mounted into pod
|
||||
ConfigMap *ServerGroupSpecVolumeConfigMap `json:"configMap,omitempty"`
|
||||
}
|
||||
|
||||
// Validate if ServerGroupSpec volume is valid
|
||||
func (s *ServerGroupSpecVolume) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return shared.WithErrors(
|
||||
shared.PrefixResourceErrors("name", sharedv1.AsKubernetesResourceName(&s.Name).Validate()),
|
||||
shared.PrefixResourceErrors("secret", s.Secret.Validate()),
|
||||
shared.PrefixResourceErrors("configMap", s.ConfigMap.Validate()),
|
||||
s.validate(),
|
||||
)
|
||||
}
|
||||
|
||||
// Volume create Pod Volume object
|
||||
func (s ServerGroupSpecVolume) Volume() core.Volume {
|
||||
return core.Volume{
|
||||
Name: s.Name,
|
||||
VolumeSource: core.VolumeSource{
|
||||
ConfigMap: (*core.ConfigMapVolumeSource)(s.ConfigMap),
|
||||
Secret: (*core.SecretVolumeSource)(s.Secret),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *ServerGroupSpecVolume) validate() error {
|
||||
if s.ConfigMap == nil && s.Secret == nil {
|
||||
return errors.Errorf("at least one option need to be defined: secret or configMap")
|
||||
}
|
||||
|
||||
if s.ConfigMap != nil && s.Secret != nil {
|
||||
return errors.Errorf("only one option can be defined: secret or configMap")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type ServerGroupSpecVolumeSecret core.SecretVolumeSource
|
||||
|
||||
func (s *ServerGroupSpecVolumeSecret) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return shared.WithErrors(
|
||||
shared.PrefixResourceError("secretName", sharedv1.AsKubernetesResourceName(&s.SecretName).Validate()),
|
||||
)
|
||||
}
|
||||
|
||||
type ServerGroupSpecVolumeConfigMap core.ConfigMapVolumeSource
|
||||
|
||||
func (s *ServerGroupSpecVolumeConfigMap) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return shared.WithErrors(
|
||||
shared.PrefixResourceError("name", sharedv1.AsKubernetesResourceName(&s.Name).Validate()),
|
||||
)
|
||||
}
|
74
pkg/apis/deployment/v1/server_group_volume_mount.go
Normal file
74
pkg/apis/deployment/v1/server_group_volume_mount.go
Normal file
|
@ -0,0 +1,74 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
sharedv1 "github.com/arangodb/kube-arangodb/pkg/apis/shared/v1"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type ServerGroupSpecVolumeMounts []ServerGroupSpecVolumeMount
|
||||
|
||||
func (s ServerGroupSpecVolumeMounts) VolumeMounts() []core.VolumeMount {
|
||||
mounts := make([]core.VolumeMount, len(s))
|
||||
|
||||
for id, mount := range s {
|
||||
mounts[id] = mount.VolumeMount()
|
||||
}
|
||||
|
||||
return mounts
|
||||
}
|
||||
|
||||
func (s ServerGroupSpecVolumeMounts) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
validateErrors := make([]error, len(s))
|
||||
|
||||
for id, mount := range s {
|
||||
validateErrors[id] = shared.PrefixResourceErrors(fmt.Sprintf("%d", id), mount.Validate())
|
||||
}
|
||||
|
||||
return shared.WithErrors(validateErrors...)
|
||||
}
|
||||
|
||||
type ServerGroupSpecVolumeMount core.VolumeMount
|
||||
|
||||
func (s ServerGroupSpecVolumeMount) VolumeMount() core.VolumeMount {
|
||||
return core.VolumeMount(s)
|
||||
}
|
||||
|
||||
func (s *ServerGroupSpecVolumeMount) Validate() error {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return shared.WithErrors(
|
||||
shared.PrefixResourceError("name", sharedv1.AsKubernetesResourceName(&s.Name).Validate()),
|
||||
)
|
||||
}
|
184
pkg/apis/deployment/v1/server_group_volume_test.go
Normal file
184
pkg/apis/deployment/v1/server_group_volume_test.go
Normal file
|
@ -0,0 +1,184 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
labelValidationError = "Validation of label failed: a DNS-1123 label must consist of lower case alphanumeric characters or '-', and must start and end with an alphanumeric character (e.g. 'my-name', or '123-abc', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?')"
|
||||
invalidName = "-invalid"
|
||||
validName = "valid"
|
||||
)
|
||||
|
||||
func Test_Volume_Validation(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
volumes ServerGroupSpecVolumes
|
||||
fail bool
|
||||
failedFields map[string]string
|
||||
}{
|
||||
{
|
||||
name: "Nil definition",
|
||||
},
|
||||
{
|
||||
name: "Invalid name",
|
||||
|
||||
fail: true,
|
||||
failedFields: map[string]string{
|
||||
"0.name": labelValidationError,
|
||||
"0.secret.secretName": labelValidationError,
|
||||
},
|
||||
|
||||
volumes: []ServerGroupSpecVolume{
|
||||
{
|
||||
Name: invalidName,
|
||||
Secret: &ServerGroupSpecVolumeSecret{
|
||||
SecretName: invalidName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Restricted name",
|
||||
|
||||
fail: true,
|
||||
failedFields: map[string]string{
|
||||
"": fmt.Sprintf("volume with name %s is restricted", restrictedVolumeNames[0]),
|
||||
},
|
||||
|
||||
volumes: []ServerGroupSpecVolume{
|
||||
{
|
||||
Name: restrictedVolumeNames[0],
|
||||
Secret: &ServerGroupSpecVolumeSecret{
|
||||
SecretName: validName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Defined multiple sources",
|
||||
|
||||
fail: true,
|
||||
failedFields: map[string]string{
|
||||
"0": "only one option can be defined: secret or configMap",
|
||||
},
|
||||
|
||||
volumes: []ServerGroupSpecVolume{
|
||||
{
|
||||
Name: validName,
|
||||
Secret: &ServerGroupSpecVolumeSecret{
|
||||
SecretName: validName,
|
||||
},
|
||||
ConfigMap: &ServerGroupSpecVolumeConfigMap{
|
||||
LocalObjectReference: core.LocalObjectReference{
|
||||
Name: validName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Defined multiple volumes with same name",
|
||||
|
||||
fail: true,
|
||||
failedFields: map[string]string{
|
||||
"": "volume with name valid defined more than once: 2",
|
||||
},
|
||||
|
||||
volumes: []ServerGroupSpecVolume{
|
||||
{
|
||||
Name: validName,
|
||||
Secret: &ServerGroupSpecVolumeSecret{
|
||||
SecretName: validName,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: validName,
|
||||
Secret: &ServerGroupSpecVolumeSecret{
|
||||
SecretName: validName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "Defined multiple volumes",
|
||||
|
||||
volumes: []ServerGroupSpecVolume{
|
||||
{
|
||||
Name: validName,
|
||||
Secret: &ServerGroupSpecVolumeSecret{
|
||||
SecretName: validName,
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "valid-2",
|
||||
ConfigMap: &ServerGroupSpecVolumeConfigMap{
|
||||
LocalObjectReference: core.LocalObjectReference{
|
||||
Name: validName,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
err := c.volumes.Validate()
|
||||
|
||||
if c.fail {
|
||||
require.Error(t, err)
|
||||
|
||||
mergedErr, ok := err.(shared.MergedErrors)
|
||||
require.True(t, ok, "Is not MergedError type")
|
||||
|
||||
require.Equal(t, len(mergedErr.Errors()), len(c.failedFields), "Count of expected fields and merged errors does not match")
|
||||
|
||||
for _, fieldError := range mergedErr.Errors() {
|
||||
resourceErr, ok := fieldError.(shared.ResourceError)
|
||||
if !ok {
|
||||
resourceErr = shared.ResourceError{
|
||||
Prefix: "",
|
||||
Err: fieldError,
|
||||
}
|
||||
}
|
||||
|
||||
errValue, ok := c.failedFields[resourceErr.Prefix]
|
||||
require.True(t, ok, "unexpected prefix %s", resourceErr.Prefix)
|
||||
|
||||
require.EqualError(t, resourceErr.Err, errValue)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
172
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
172
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
|
@ -972,6 +972,20 @@ func (in *ServerGroupSpec) DeepCopyInto(out *ServerGroupSpec) {
|
|||
*out = new(ServerGroupSpecSecurityContext)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Volumes != nil {
|
||||
in, out := &in.Volumes, &out.Volumes
|
||||
*out = make(ServerGroupSpecVolumes, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.VolumeMounts != nil {
|
||||
in, out := &in.VolumeMounts, &out.VolumeMounts
|
||||
*out = make(ServerGroupSpecVolumeMounts, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -1011,6 +1025,164 @@ func (in *ServerGroupSpecSecurityContext) DeepCopy() *ServerGroupSpecSecurityCon
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServerGroupSpecVolume) DeepCopyInto(out *ServerGroupSpecVolume) {
|
||||
*out = *in
|
||||
if in.Secret != nil {
|
||||
in, out := &in.Secret, &out.Secret
|
||||
*out = new(ServerGroupSpecVolumeSecret)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.ConfigMap != nil {
|
||||
in, out := &in.ConfigMap, &out.ConfigMap
|
||||
*out = new(ServerGroupSpecVolumeConfigMap)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupSpecVolume.
|
||||
func (in *ServerGroupSpecVolume) DeepCopy() *ServerGroupSpecVolume {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServerGroupSpecVolume)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServerGroupSpecVolumeConfigMap) DeepCopyInto(out *ServerGroupSpecVolumeConfigMap) {
|
||||
*out = *in
|
||||
out.LocalObjectReference = in.LocalObjectReference
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]corev1.KeyToPath, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.DefaultMode != nil {
|
||||
in, out := &in.DefaultMode, &out.DefaultMode
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Optional != nil {
|
||||
in, out := &in.Optional, &out.Optional
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupSpecVolumeConfigMap.
|
||||
func (in *ServerGroupSpecVolumeConfigMap) DeepCopy() *ServerGroupSpecVolumeConfigMap {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServerGroupSpecVolumeConfigMap)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServerGroupSpecVolumeMount) DeepCopyInto(out *ServerGroupSpecVolumeMount) {
|
||||
*out = *in
|
||||
if in.MountPropagation != nil {
|
||||
in, out := &in.MountPropagation, &out.MountPropagation
|
||||
*out = new(corev1.MountPropagationMode)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupSpecVolumeMount.
|
||||
func (in *ServerGroupSpecVolumeMount) DeepCopy() *ServerGroupSpecVolumeMount {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServerGroupSpecVolumeMount)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ServerGroupSpecVolumeMounts) DeepCopyInto(out *ServerGroupSpecVolumeMounts) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ServerGroupSpecVolumeMounts, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupSpecVolumeMounts.
|
||||
func (in ServerGroupSpecVolumeMounts) DeepCopy() ServerGroupSpecVolumeMounts {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServerGroupSpecVolumeMounts)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *ServerGroupSpecVolumeSecret) DeepCopyInto(out *ServerGroupSpecVolumeSecret) {
|
||||
*out = *in
|
||||
if in.Items != nil {
|
||||
in, out := &in.Items, &out.Items
|
||||
*out = make([]corev1.KeyToPath, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
}
|
||||
if in.DefaultMode != nil {
|
||||
in, out := &in.DefaultMode, &out.DefaultMode
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.Optional != nil {
|
||||
in, out := &in.Optional, &out.Optional
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupSpecVolumeSecret.
|
||||
func (in *ServerGroupSpecVolumeSecret) DeepCopy() *ServerGroupSpecVolumeSecret {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServerGroupSpecVolumeSecret)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in ServerGroupSpecVolumes) DeepCopyInto(out *ServerGroupSpecVolumes) {
|
||||
{
|
||||
in := &in
|
||||
*out = make(ServerGroupSpecVolumes, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ServerGroupSpecVolumes.
|
||||
func (in ServerGroupSpecVolumes) DeepCopy() ServerGroupSpecVolumes {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(ServerGroupSpecVolumes)
|
||||
in.DeepCopyInto(out)
|
||||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *SyncAuthenticationSpec) DeepCopyInto(out *SyncAuthenticationSpec) {
|
||||
*out = *in
|
||||
|
|
144
pkg/apis/shared/errors.go
Normal file
144
pkg/apis/shared/errors.go
Normal file
|
@ -0,0 +1,144 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package shared
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type ResourceError struct {
|
||||
Prefix string
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error return string representation of error
|
||||
func (p ResourceError) Error() string {
|
||||
return fmt.Sprintf("%s: %s", p.Prefix, p.Err.Error())
|
||||
}
|
||||
|
||||
// Format formats error with verbs
|
||||
func (p *ResourceError) Format(s fmt.State, verb rune) {
|
||||
switch verb {
|
||||
case 'v':
|
||||
if s.Flag('+') {
|
||||
fmt.Fprintf(s, "%s\n", p.Error())
|
||||
fmt.Fprintf(s, "%+v", p.Err)
|
||||
return
|
||||
}
|
||||
fallthrough
|
||||
case 's':
|
||||
io.WriteString(s, p.Error())
|
||||
case 'q':
|
||||
fmt.Fprintf(s, "%q", p.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// PrefixResourceError creates new prefixed error. If error is already prefixed then current key is appended
|
||||
func PrefixResourceError(prefix string, err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
switch e := err.(type) {
|
||||
case ResourceError:
|
||||
return ResourceError{
|
||||
Prefix: fmt.Sprintf("%s.%s", prefix, e.Prefix),
|
||||
Err: e.Err,
|
||||
}
|
||||
default:
|
||||
return ResourceError{
|
||||
Prefix: prefix,
|
||||
Err: err,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PrefixResourceErrors creates new prefixed errors. If error is already prefixed then current key is appended
|
||||
func PrefixResourceErrors(prefix string, errs ...error) error {
|
||||
prefixed := make([]error, 0, len(errs))
|
||||
|
||||
for _, err := range errs {
|
||||
switch errType := err.(type) {
|
||||
case MergedErrors:
|
||||
for _, subError := range errType.errors {
|
||||
prefixed = append(prefixed, PrefixResourceError(prefix, subError))
|
||||
}
|
||||
default:
|
||||
prefixed = append(prefixed, PrefixResourceError(prefix, err))
|
||||
}
|
||||
}
|
||||
|
||||
return WithErrors(prefixed...)
|
||||
}
|
||||
|
||||
type MergedErrors struct {
|
||||
errors []error
|
||||
}
|
||||
|
||||
func (m MergedErrors) Error() string {
|
||||
errStrings := make([]string, 0, len(m.errors))
|
||||
|
||||
for _, err := range m.errors {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
errStrings = append(errStrings, err.Error())
|
||||
}
|
||||
|
||||
return fmt.Sprintf("Received %d errors: %s", len(errStrings), strings.Join(errStrings, ", "))
|
||||
}
|
||||
|
||||
func (m MergedErrors) Errors() []error {
|
||||
return m.errors
|
||||
}
|
||||
|
||||
// WithErrors filter out nil errors
|
||||
func WithErrors(errs ...error) error {
|
||||
filteredErrs := make([]error, 0, len(errs))
|
||||
|
||||
for _, err := range errs {
|
||||
if err == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
switch errType := err.(type) {
|
||||
case MergedErrors:
|
||||
for _, subError := range errType.errors {
|
||||
filteredErrs = append(filteredErrs, subError)
|
||||
}
|
||||
default:
|
||||
filteredErrs = append(filteredErrs, err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(filteredErrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return MergedErrors{
|
||||
errors: filteredErrs,
|
||||
}
|
||||
}
|
24
pkg/apis/shared/v1/doc.go
Normal file
24
pkg/apis/shared/v1/doc.go
Normal file
|
@ -0,0 +1,24 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Ewout Prangsma
|
||||
//
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
package v1
|
112
pkg/apis/shared/v1/resource.go
Normal file
112
pkg/apis/shared/v1/resource.go
Normal file
|
@ -0,0 +1,112 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/apimachinery/pkg/util/validation"
|
||||
)
|
||||
|
||||
// KubernetesResourceName define name of kubernetes resource including validation function
|
||||
type KubernetesResourceName string
|
||||
|
||||
// AsKubernetesResourceName formats string into AsKubernetesResourceName for validation purposes
|
||||
func AsKubernetesResourceName(s *string) *KubernetesResourceName {
|
||||
if s == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
value := KubernetesResourceName(*s)
|
||||
|
||||
return &value
|
||||
}
|
||||
|
||||
// StringP returns string pointer to resource name
|
||||
func (n *KubernetesResourceName) StringP() *string {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
value := string(*n)
|
||||
|
||||
return &value
|
||||
}
|
||||
|
||||
// String returns string value of name
|
||||
func (n *KubernetesResourceName) String() string {
|
||||
value := n.StringP()
|
||||
|
||||
if value == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return *value
|
||||
}
|
||||
|
||||
// Validate validate if name is valid kubernetes DNS_LABEL
|
||||
func (n *KubernetesResourceName) Validate() error {
|
||||
if n == nil {
|
||||
return errors.Errorf("cannot be undefined")
|
||||
}
|
||||
|
||||
name := *n
|
||||
|
||||
if name == "" {
|
||||
return errors.Errorf("cannot be empty")
|
||||
}
|
||||
|
||||
if err := IsValidName(name.String()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Immutable verify if field changed
|
||||
func (n *KubernetesResourceName) Immutable(o *KubernetesResourceName) error {
|
||||
if o == nil && n == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if o == nil || n == nil {
|
||||
return errors.Errorf("field is immutable")
|
||||
}
|
||||
|
||||
if *o != *n {
|
||||
return errors.Errorf("field is immutable")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsValidName validate name to be a DNS_LABEL.
|
||||
// More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
|
||||
func IsValidName(name string) error {
|
||||
if res := validation.IsDNS1123Label(name); len(res) > 0 {
|
||||
return errors.Errorf("Validation of label failed: %s", strings.Join(res, ", "))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
25
pkg/apis/shared/v1/zz_generated.deepcopy.go
generated
Normal file
25
pkg/apis/shared/v1/zz_generated.deepcopy.go
generated
Normal file
|
@ -0,0 +1,25 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
File diff suppressed because it is too large
Load diff
123
pkg/deployment/deployment_definitions_test.go
Normal file
123
pkg/deployment/deployment_definitions_test.go
Normal file
|
@ -0,0 +1,123 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Tomasz Mielech <tomasz@arangodb.com>
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
)
|
||||
|
||||
var (
|
||||
defaultAgentTerminationTimeout = int64(api.ServerGroupAgents.DefaultTerminationGracePeriod().Seconds())
|
||||
defaultDBServerTerminationTimeout = int64(api.ServerGroupDBServers.DefaultTerminationGracePeriod().Seconds())
|
||||
defaultCoordinatorTerminationTimeout = int64(api.ServerGroupCoordinators.DefaultTerminationGracePeriod().Seconds())
|
||||
defaultSingleTerminationTimeout = int64(api.ServerGroupSingle.DefaultTerminationGracePeriod().Seconds())
|
||||
defaultSyncMasterTerminationTimeout = int64(api.ServerGroupSyncMasters.DefaultTerminationGracePeriod().Seconds())
|
||||
defaultSyncWorkerTerminationTimeout = int64(api.ServerGroupSyncWorkers.DefaultTerminationGracePeriod().Seconds())
|
||||
|
||||
securityContext api.ServerGroupSpecSecurityContext
|
||||
|
||||
nodeSelectorTest = map[string]string{
|
||||
"test": "test",
|
||||
}
|
||||
|
||||
firstAgentStatus = api.MemberStatus{
|
||||
ID: "agent1",
|
||||
Phase: api.MemberPhaseNone,
|
||||
}
|
||||
|
||||
firstCoordinatorStatus = api.MemberStatus{
|
||||
ID: "coordinator1",
|
||||
Phase: api.MemberPhaseNone,
|
||||
}
|
||||
|
||||
singleStatus = api.MemberStatus{
|
||||
ID: "single1",
|
||||
Phase: api.MemberPhaseNone,
|
||||
}
|
||||
|
||||
firstSyncMaster = api.MemberStatus{
|
||||
ID: "syncMaster1",
|
||||
Phase: api.MemberPhaseNone,
|
||||
}
|
||||
|
||||
firstSyncWorker = api.MemberStatus{
|
||||
ID: "syncWorker1",
|
||||
Phase: api.MemberPhaseNone,
|
||||
}
|
||||
|
||||
firstDBServerStatus = api.MemberStatus{
|
||||
ID: "DBserver1",
|
||||
Phase: api.MemberPhaseNone,
|
||||
}
|
||||
|
||||
noAuthentication = api.AuthenticationSpec{
|
||||
JWTSecretName: util.NewString(api.JWTSecretNameDisabled),
|
||||
}
|
||||
|
||||
noTLS = api.TLSSpec{
|
||||
CASecretName: util.NewString(api.CASecretNameDisabled),
|
||||
}
|
||||
|
||||
authenticationSpec = api.AuthenticationSpec{
|
||||
JWTSecretName: util.NewString(testJWTSecretName),
|
||||
}
|
||||
tlsSpec = api.TLSSpec{
|
||||
CASecretName: util.NewString(testCASecretName),
|
||||
}
|
||||
|
||||
rocksDBSpec = api.RocksDBSpec{
|
||||
Encryption: api.RocksDBEncryptionSpec{
|
||||
KeySecretName: util.NewString(testRocksDBEncryptionKey),
|
||||
},
|
||||
}
|
||||
|
||||
metricsSpec = api.MetricsSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
Image: util.NewString(testExporterImage),
|
||||
Authentication: api.MetricsAuthenticationSpec{
|
||||
JWTTokenSecretName: util.NewString(testExporterToken),
|
||||
},
|
||||
}
|
||||
|
||||
resourcesUnfiltered = core.ResourceRequirements{
|
||||
Limits: core.ResourceList{
|
||||
core.ResourceCPU: resource.MustParse("500m"),
|
||||
core.ResourceMemory: resource.MustParse("2Gi"),
|
||||
},
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceCPU: resource.MustParse("100m"),
|
||||
core.ResourceMemory: resource.MustParse("1Gi"),
|
||||
},
|
||||
}
|
||||
|
||||
emptyResources = core.ResourceRequirements{
|
||||
Limits: make(core.ResourceList),
|
||||
Requests: make(core.ResourceList),
|
||||
}
|
||||
|
||||
sidecarName1 = "sidecar1"
|
||||
sidecarName2 = "sidecar2"
|
||||
)
|
225
pkg/deployment/deployment_pod_resources_test.go
Normal file
225
pkg/deployment/deployment_pod_resources_test.go
Normal file
|
@ -0,0 +1,225 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func resourceLimitAsEnv(t *testing.T, requirements core.ResourceRequirements) core.EnvVar {
|
||||
value, ok := requirements.Limits[core.ResourceMemory]
|
||||
require.True(t, ok)
|
||||
|
||||
return core.EnvVar{
|
||||
Name: resources.ArangoDBOverrideDetectedTotalMemoryEnv,
|
||||
Value: fmt.Sprintf("%d", value.Value()),
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "DBserver POD with resource requirements",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Resources: resourcesUnfiltered,
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with resource requirements, with override flag",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Resources: resourcesUnfiltered,
|
||||
OverrideDetectedTotalMemory: util.NewBool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
Env: []core.EnvVar{
|
||||
resourceLimitAsEnv(t, resourcesUnfiltered),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD without resource requirements, with override flag",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
OverrideDetectedTotalMemory: util.NewBool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
468
pkg/deployment/deployment_pod_sync_test.go
Normal file
468
pkg/deployment/deployment_pod_sync_test.go
Normal file
|
@ -0,0 +1,468 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Tomasz Mielech <tomasz@arangodb.com>
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/stretchr/testify/require"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestEnsurePod_Sync_Error(t *testing.T) {
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "Sync Pod does not work for enterprise image",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
},
|
||||
ExpectedError: errors.New("Image '" + testImage + "' does not contain an Enterprise version of ArangoDB"),
|
||||
},
|
||||
{
|
||||
Name: "Sync Pod cannot get master JWT secret",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
},
|
||||
ExpectedError: errors.New("Master JWT secret validation failed: secrets \"" +
|
||||
testDeploymentName + "-sync-jwt\" not found"),
|
||||
},
|
||||
{
|
||||
Name: "Sync Pod cannot get monitoring token secret",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Monitoring token secret validation failed: secrets \"" +
|
||||
testDeploymentName + "-sync-mt\" not found"),
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
||||
|
||||
func TestEnsurePod_Sync_Master(t *testing.T) {
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "Sync Master Pod cannot create TLS keyfile secret",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Sync.TLS.GetCASecretName()
|
||||
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Failed to create TLS keyfile secret: secrets \"" +
|
||||
testDeploymentName + "-sync-ca\" not found"),
|
||||
},
|
||||
{
|
||||
Name: "Sync Master Pod cannot get cluster JWT secret",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: authenticationSpec,
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Authentication.GetJWTSecretName()
|
||||
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Cluster JWT secret validation failed: secrets \"" +
|
||||
testJWTSecretName + "\" not found"),
|
||||
},
|
||||
{
|
||||
Name: "Sync Master Pod cannot get authentication CA certificate",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: authenticationSpec,
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Sync.Authentication.GetClientCASecretName()
|
||||
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(secretName, &metav1.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Client authentication CA certificate secret validation failed: " +
|
||||
"secrets \"" + testDeploymentName + "-sync-client-auth-ca\" not found"),
|
||||
},
|
||||
{
|
||||
Name: "Sync Master Pod with authentication, monitoring, tls, service account, node selector, " +
|
||||
"liveness probe, priority class name, resource requirements",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: authenticationSpec,
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
SyncMasters: api.ServerGroupSpec{
|
||||
ServiceAccountName: util.NewString(testServiceAccountName),
|
||||
NodeSelector: nodeSelectorTest,
|
||||
PriorityClassName: testPriorityClassName,
|
||||
Resources: resourcesUnfiltered,
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupSyncMasters, firstSyncMaster)
|
||||
|
||||
name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||
auth, err := k8sutil.GetTokenSecret(deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe(
|
||||
true, "bearer "+auth, k8sutil.ArangoSyncMasterPort)
|
||||
},
|
||||
ExpectedEvent: "member syncmaster is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
createTestTLSVolume(api.ServerGroupSyncMastersString, firstSyncMaster.ID),
|
||||
k8sutil.CreateVolumeWithSecret(k8sutil.ClientAuthCAVolumeName, "test-sync-client-auth-ca"),
|
||||
k8sutil.CreateVolumeWithSecret(k8sutil.MasterJWTSecretVolumeName, "test-sync-jwt"),
|
||||
k8sutil.CreateVolumeWithSecret(k8sutil.ClusterJWTSecretVolumeName, testJWTSecretName),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, true, true),
|
||||
Ports: createTestPorts(),
|
||||
Env: []core.EnvVar{
|
||||
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
|
||||
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
|
||||
},
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
Resources: resourcesUnfiltered,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.TlsKeyfileVolumeMount(),
|
||||
k8sutil.ClientAuthCACertificateVolumeMount(),
|
||||
k8sutil.MasterJWTVolumeMount(),
|
||||
k8sutil.ClusterJWTVolumeMount(),
|
||||
},
|
||||
},
|
||||
},
|
||||
PriorityClassName: testPriorityClassName,
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
ServiceAccountName: testServiceAccountName,
|
||||
NodeSelector: nodeSelectorTest,
|
||||
TerminationGracePeriodSeconds: &defaultSyncMasterTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupSyncMastersString + "-" +
|
||||
firstSyncMaster.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupSyncMastersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "Sync Master Pod with lifecycle, license, monitoring without authentication and alpine",
|
||||
config: Config{
|
||||
LifecycleImage: testImageLifecycle,
|
||||
AlpineImage: testImageAlpine,
|
||||
},
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
Environment: api.NewEnvironment(api.EnvironmentProduction),
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
License: api.LicenseSpec{
|
||||
SecretName: util.NewString(testLicense),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupSyncMasters, firstSyncMaster)
|
||||
name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||
auth, err := k8sutil.GetTokenSecret(deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe(
|
||||
true, "bearer "+auth, k8sutil.ArangoSyncMasterPort)
|
||||
},
|
||||
ExpectedEvent: "member syncmaster is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.LifecycleVolume(),
|
||||
createTestTLSVolume(api.ServerGroupSyncMastersString, firstSyncMaster.ID),
|
||||
k8sutil.CreateVolumeWithSecret(k8sutil.ClientAuthCAVolumeName,
|
||||
testDeploymentName+"-sync-client-auth-ca"),
|
||||
k8sutil.CreateVolumeWithSecret(k8sutil.MasterJWTSecretVolumeName,
|
||||
testDeploymentName+"-sync-jwt"),
|
||||
},
|
||||
InitContainers: []core.Container{
|
||||
createTestLifecycleContainer(emptyResources),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true),
|
||||
Ports: createTestPorts(),
|
||||
Env: []core.EnvVar{
|
||||
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
|
||||
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
|
||||
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey,
|
||||
testLicense, constants.SecretKeyToken),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorPodName, "metadata.name"),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorPodNamespace, "metadata.namespace"),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorNodeName, "spec.nodeName"),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorNodeNameArango, "spec.nodeName"),
|
||||
},
|
||||
Resources: emptyResources,
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
Lifecycle: createTestLifecycle(),
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.LifecycleVolumeMount(),
|
||||
k8sutil.TlsKeyfileVolumeMount(),
|
||||
k8sutil.ClientAuthCACertificateVolumeMount(),
|
||||
k8sutil.MasterJWTVolumeMount(),
|
||||
},
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultSyncMasterTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupSyncMastersString + "-" +
|
||||
firstSyncMaster.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupSyncMastersString,
|
||||
true, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
||||
|
||||
func TestEnsurePod_Sync_Worker(t *testing.T) {
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "Sync Worker Pod with monitoring, service account, node selector, lifecycle, license " +
|
||||
"liveness probe, priority class name, resource requirements without alpine",
|
||||
config: Config{
|
||||
LifecycleImage: testImageLifecycle,
|
||||
AlpineImage: testImageAlpine,
|
||||
},
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
Sync: api.SyncSpec{
|
||||
Enabled: util.NewBool(true),
|
||||
},
|
||||
SyncWorkers: api.ServerGroupSpec{
|
||||
ServiceAccountName: util.NewString(testServiceAccountName),
|
||||
NodeSelector: nodeSelectorTest,
|
||||
PriorityClassName: testPriorityClassName,
|
||||
Resources: resourcesUnfiltered,
|
||||
},
|
||||
License: api.LicenseSpec{
|
||||
SecretName: util.NewString(testLicense),
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncWorkers: api.MemberStatusList{
|
||||
firstSyncWorker,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(true),
|
||||
}
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupSyncWorkers, firstSyncWorker)
|
||||
|
||||
name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||
auth, err := k8sutil.GetTokenSecret(deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
|
||||
require.NoError(t, err)
|
||||
|
||||
testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe(
|
||||
true, "bearer "+auth, k8sutil.ArangoSyncWorkerPort)
|
||||
},
|
||||
ExpectedEvent: "member syncworker is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.LifecycleVolume(),
|
||||
k8sutil.CreateVolumeWithSecret(k8sutil.MasterJWTSecretVolumeName, testDeploymentName+"-sync-jwt"),
|
||||
},
|
||||
InitContainers: []core.Container{
|
||||
createTestLifecycleContainer(emptyResources),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForSyncWorker(firstSyncWorker.ID, true, true),
|
||||
Ports: createTestPorts(),
|
||||
Env: []core.EnvVar{
|
||||
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
|
||||
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
|
||||
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey,
|
||||
testLicense, constants.SecretKeyToken),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorPodName, "metadata.name"),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorPodNamespace, "metadata.namespace"),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorNodeName, "spec.nodeName"),
|
||||
k8sutil.CreateEnvFieldPath(constants.EnvOperatorNodeNameArango, "spec.nodeName"),
|
||||
},
|
||||
Lifecycle: createTestLifecycle(),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.LifecycleVolumeMount(),
|
||||
k8sutil.MasterJWTVolumeMount(),
|
||||
},
|
||||
},
|
||||
},
|
||||
PriorityClassName: testPriorityClassName,
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
ServiceAccountName: testServiceAccountName,
|
||||
NodeSelector: nodeSelectorTest,
|
||||
TerminationGracePeriodSeconds: &defaultSyncWorkerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupSyncWorkersString + "-" +
|
||||
firstSyncWorker.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupSyncWorkersString,
|
||||
false, api.ServerGroupDBServersString),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
242
pkg/deployment/deployment_pod_volumes_test.go
Normal file
242
pkg/deployment/deployment_pod_volumes_test.go
Normal file
|
@ -0,0 +1,242 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func createExampleVolume(name string) api.ServerGroupSpecVolume {
|
||||
return api.ServerGroupSpecVolume{
|
||||
Name: name,
|
||||
ConfigMap: &api.ServerGroupSpecVolumeConfigMap{
|
||||
LocalObjectReference: core.LocalObjectReference{
|
||||
Name: name,
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createExampleVolumeMount(name string) api.ServerGroupSpecVolumeMount {
|
||||
return api.ServerGroupSpecVolumeMount{
|
||||
Name: name,
|
||||
MountPath: fmt.Sprintf("/mount/%s", name),
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
||||
testCases := []testCaseStruct{
|
||||
{
|
||||
Name: "DBserver POD with Volume",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Volumes: []api.ServerGroupSpecVolume{
|
||||
createExampleVolume("volume"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
createExampleVolume("volume").Volume(),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "DBserver POD with Volumes",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Volumes: []api.ServerGroupSpecVolume{
|
||||
createExampleVolume("volume"),
|
||||
createExampleVolume("volume2"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
createExampleVolume("volume").Volume(),
|
||||
createExampleVolume("volume2").Volume(),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
{
|
||||
Name: "DBserver POD with Volume Mount",
|
||||
ArangoDeployment: &api.ArangoDeployment{
|
||||
Spec: api.DeploymentSpec{
|
||||
Image: util.NewString(testImage),
|
||||
Authentication: noAuthentication,
|
||||
TLS: noTLS,
|
||||
DBServers: api.ServerGroupSpec{
|
||||
Volumes: []api.ServerGroupSpecVolume{
|
||||
createExampleVolume("volume"),
|
||||
},
|
||||
VolumeMounts: []api.ServerGroupSpecVolumeMount{
|
||||
createExampleVolumeMount("volume"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
},
|
||||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
ExpectedPod: core.Pod{
|
||||
Spec: core.PodSpec{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeEmptyDir(k8sutil.ArangodVolumeName),
|
||||
createExampleVolume("volume").Volume(),
|
||||
},
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: k8sutil.ServerContainerName,
|
||||
Image: testImage,
|
||||
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
|
||||
Ports: createTestPorts(),
|
||||
Resources: emptyResources,
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ArangodVolumeMount(),
|
||||
createExampleVolumeMount("volume").VolumeMount(),
|
||||
},
|
||||
LivenessProbe: createTestLivenessProbe(false, "", k8sutil.ArangoPort),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
},
|
||||
},
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
TerminationGracePeriodSeconds: &defaultDBServerTerminationTimeout,
|
||||
Hostname: testDeploymentName + "-" + api.ServerGroupDBServersString + "-" +
|
||||
firstDBServerStatus.ID,
|
||||
Subdomain: testDeploymentName + "-int",
|
||||
Affinity: k8sutil.CreateAffinity(testDeploymentName, api.ServerGroupDBServersString,
|
||||
false, ""),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
runTestCases(t, testCases...)
|
||||
}
|
108
pkg/deployment/deployment_run_test.go
Normal file
108
pkg/deployment/deployment_run_test.go
Normal file
|
@ -0,0 +1,108 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func runTestCases(t *testing.T, testCases ...testCaseStruct) {
|
||||
for _, testCase := range testCases {
|
||||
runTestCase(t, testCase)
|
||||
}
|
||||
}
|
||||
|
||||
func runTestCase(t *testing.T, testCase testCaseStruct) {
|
||||
t.Run(testCase.Name, func(t *testing.T) {
|
||||
// Arrange
|
||||
d, eventRecorder := createTestDeployment(testCase.config, testCase.ArangoDeployment)
|
||||
|
||||
err := d.resources.EnsureSecrets()
|
||||
require.NoError(t, err)
|
||||
|
||||
if testCase.Helper != nil {
|
||||
testCase.Helper(t, d, &testCase)
|
||||
}
|
||||
|
||||
// Create custom resource in the fake kubernetes API
|
||||
_, err = d.deps.DatabaseCRCli.DatabaseV1().ArangoDeployments(testNamespace).Create(d.apiObject)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Act
|
||||
err = d.resources.EnsurePods()
|
||||
|
||||
// Assert
|
||||
if testCase.ExpectedError != nil {
|
||||
|
||||
if !assert.EqualError(t, err, testCase.ExpectedError.Error()) {
|
||||
println(fmt.Sprintf("%+v", err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
pods, err := d.deps.KubeCli.CoreV1().Pods(testNamespace).List(metav1.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
require.Equal(t, testCase.ExpectedPod.Spec, pods.Items[0].Spec)
|
||||
require.Equal(t, testCase.ExpectedPod.ObjectMeta, pods.Items[0].ObjectMeta)
|
||||
|
||||
if len(testCase.ExpectedEvent) > 0 {
|
||||
select {
|
||||
case msg := <-eventRecorder.Events:
|
||||
assert.Contains(t, msg, testCase.ExpectedEvent)
|
||||
default:
|
||||
assert.Fail(t, "expected event", "expected event with message '%s'", testCase.ExpectedEvent)
|
||||
}
|
||||
|
||||
status, version := d.GetStatus()
|
||||
assert.Equal(t, int32(1), version)
|
||||
|
||||
checkEachMember := func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
for _, m := range *status {
|
||||
require.Equal(t, api.MemberPhaseCreated, m.Phase)
|
||||
|
||||
_, exist := m.Conditions.Get(api.ConditionTypeReady)
|
||||
require.Equal(t, false, exist)
|
||||
_, exist = m.Conditions.Get(api.ConditionTypeTerminated)
|
||||
require.Equal(t, false, exist)
|
||||
_, exist = m.Conditions.Get(api.ConditionTypeTerminating)
|
||||
require.Equal(t, false, exist)
|
||||
_, exist = m.Conditions.Get(api.ConditionTypeAgentRecoveryNeeded)
|
||||
require.Equal(t, false, exist)
|
||||
_, exist = m.Conditions.Get(api.ConditionTypeAutoUpgrade)
|
||||
require.Equal(t, false, exist)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
d.GetServerGroupIterator().ForeachServerGroup(checkEachMember, &status)
|
||||
}
|
||||
})
|
||||
}
|
481
pkg/deployment/deployment_suite_test.go
Normal file
481
pkg/deployment/deployment_suite_test.go
Normal file
|
@ -0,0 +1,481 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package deployment
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/arangodb/go-driver/jwt"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||
arangofake "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/rs/zerolog"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
recordfake "k8s.io/client-go/tools/record"
|
||||
)
|
||||
|
||||
const (
|
||||
testNamespace = "default"
|
||||
testDeploymentName = "test"
|
||||
testVersion = "3.5.2"
|
||||
testImage = "arangodb/arangodb:" + testVersion
|
||||
testCASecretName = "testCA"
|
||||
testJWTSecretName = "testJWT"
|
||||
testExporterToken = "testExporterToken"
|
||||
testRocksDBEncryptionKey = "testRocksDB"
|
||||
testPersistentVolumeClaimName = "testClaim"
|
||||
testLicense = "testLicense"
|
||||
testServiceAccountName = "testServiceAccountName"
|
||||
testPriorityClassName = "testPriority"
|
||||
testImageLifecycle = "arangodb/kube-arangodb:0.3.16"
|
||||
testExporterImage = "arangodb/arangodb-exporter:0.1.6"
|
||||
testImageAlpine = "alpine:3.7"
|
||||
|
||||
testYes = "yes"
|
||||
)
|
||||
|
||||
type testCaseStruct struct {
|
||||
Name string
|
||||
ArangoDeployment *api.ArangoDeployment
|
||||
Helper func(*testing.T, *Deployment, *testCaseStruct)
|
||||
config Config
|
||||
ExpectedError error
|
||||
ExpectedEvent string
|
||||
ExpectedPod core.Pod
|
||||
}
|
||||
|
||||
func createTestTLSVolume(serverGroupString, ID string) core.Volume {
|
||||
return k8sutil.CreateVolumeWithSecret(k8sutil.TlsKeyfileVolumeName,
|
||||
k8sutil.CreateTLSKeyfileSecretName(testDeploymentName, serverGroupString, ID))
|
||||
}
|
||||
|
||||
func createTestLifecycle() *core.Lifecycle {
|
||||
lifecycle, _ := k8sutil.NewLifecycle()
|
||||
return lifecycle
|
||||
}
|
||||
|
||||
func createTestToken(deployment *Deployment, testCase *testCaseStruct, paths []string) (string, error) {
|
||||
|
||||
name := testCase.ArangoDeployment.Spec.Authentication.GetJWTSecretName()
|
||||
s, err := k8sutil.GetTokenSecret(deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return jwt.CreateArangodJwtAuthorizationHeaderAllowedPaths(s, "kube-arangodb", paths)
|
||||
}
|
||||
|
||||
func createTestLivenessProbe(secure bool, authorization string, port int) *core.Probe {
|
||||
return k8sutil.HTTPProbeConfig{
|
||||
LocalPath: "/_api/version",
|
||||
Secure: secure,
|
||||
Authorization: authorization,
|
||||
Port: port,
|
||||
}.Create()
|
||||
}
|
||||
|
||||
func createTestReadinessProbe(secure bool, authorization string) *core.Probe {
|
||||
return k8sutil.HTTPProbeConfig{
|
||||
LocalPath: "/_admin/server/availability",
|
||||
Secure: secure,
|
||||
Authorization: authorization,
|
||||
InitialDelaySeconds: 2,
|
||||
PeriodSeconds: 2,
|
||||
}.Create()
|
||||
}
|
||||
|
||||
func createTestCommandForDBServer(name string, tls, auth, encryptionRocksDB bool) []string {
|
||||
command := []string{resources.ArangoDExecutor}
|
||||
if tls {
|
||||
command = append(command, "--cluster.my-address=ssl://"+testDeploymentName+"-"+
|
||||
api.ServerGroupDBServersString+"-"+name+".test-int.default.svc:8529")
|
||||
} else {
|
||||
command = append(command, "--cluster.my-address=tcp://"+testDeploymentName+"-"+
|
||||
api.ServerGroupDBServersString+"-"+name+".test-int.default.svc:8529")
|
||||
}
|
||||
|
||||
command = append(command, "--cluster.my-role=PRIMARY", "--database.directory=/data",
|
||||
"--foxx.queues=false", "--log.level=INFO", "--log.output=+")
|
||||
|
||||
if encryptionRocksDB {
|
||||
command = append(command, "--rocksdb.encryption-keyfile=/secrets/rocksdb/encryption/key")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.authentication=true")
|
||||
} else {
|
||||
command = append(command, "--server.authentication=false")
|
||||
}
|
||||
|
||||
if tls {
|
||||
command = append(command, "--server.endpoint=ssl://[::]:8529")
|
||||
} else {
|
||||
command = append(command, "--server.endpoint=tcp://[::]:8529")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.jwt-secret-keyfile=/secrets/cluster/jwt/token")
|
||||
}
|
||||
|
||||
command = append(command, "--server.statistics=true", "--server.storage-engine=rocksdb")
|
||||
|
||||
if tls {
|
||||
command = append(command, "--ssl.ecdh-curve=", "--ssl.keyfile=/secrets/tls/tls.keyfile")
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestCommandForCoordinator(name string, tls, auth, encryptionRocksDB bool) []string {
|
||||
command := []string{resources.ArangoDExecutor}
|
||||
if tls {
|
||||
command = append(command, "--cluster.my-address=ssl://"+testDeploymentName+"-"+
|
||||
api.ServerGroupCoordinatorsString+"-"+name+".test-int.default.svc:8529")
|
||||
} else {
|
||||
command = append(command, "--cluster.my-address=tcp://"+testDeploymentName+"-"+
|
||||
api.ServerGroupCoordinatorsString+"-"+name+".test-int.default.svc:8529")
|
||||
}
|
||||
|
||||
command = append(command, "--cluster.my-role=COORDINATOR", "--database.directory=/data",
|
||||
"--foxx.queues=true", "--log.level=INFO", "--log.output=+")
|
||||
|
||||
if encryptionRocksDB {
|
||||
command = append(command, "--rocksdb.encryption-keyfile=/secrets/rocksdb/encryption/key")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.authentication=true")
|
||||
} else {
|
||||
command = append(command, "--server.authentication=false")
|
||||
}
|
||||
|
||||
if tls {
|
||||
command = append(command, "--server.endpoint=ssl://[::]:8529")
|
||||
} else {
|
||||
command = append(command, "--server.endpoint=tcp://[::]:8529")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.jwt-secret-keyfile=/secrets/cluster/jwt/token")
|
||||
}
|
||||
|
||||
command = append(command, "--server.statistics=true", "--server.storage-engine=rocksdb")
|
||||
|
||||
if tls {
|
||||
command = append(command, "--ssl.ecdh-curve=", "--ssl.keyfile=/secrets/tls/tls.keyfile")
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestCommandForSingleMode(name string, tls, auth, encryptionRocksDB bool) []string {
|
||||
command := []string{resources.ArangoDExecutor}
|
||||
|
||||
command = append(command, "--database.directory=/data", "--foxx.queues=true", "--log.level=INFO",
|
||||
"--log.output=+")
|
||||
|
||||
if encryptionRocksDB {
|
||||
command = append(command, "--rocksdb.encryption-keyfile=/secrets/rocksdb/encryption/key")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.authentication=true")
|
||||
} else {
|
||||
command = append(command, "--server.authentication=false")
|
||||
}
|
||||
|
||||
if tls {
|
||||
command = append(command, "--server.endpoint=ssl://[::]:8529")
|
||||
} else {
|
||||
command = append(command, "--server.endpoint=tcp://[::]:8529")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.jwt-secret-keyfile=/secrets/cluster/jwt/token")
|
||||
}
|
||||
|
||||
command = append(command, "--server.statistics=true", "--server.storage-engine=rocksdb")
|
||||
|
||||
if tls {
|
||||
command = append(command, "--ssl.ecdh-curve=", "--ssl.keyfile=/secrets/tls/tls.keyfile")
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestCommandForAgent(name string, tls, auth, encryptionRocksDB bool) []string {
|
||||
command := []string{
|
||||
resources.ArangoDExecutor,
|
||||
"--agency.activate=true",
|
||||
"--agency.disaster-recovery-id=" + name}
|
||||
|
||||
if tls {
|
||||
command = append(command, "--agency.my-address=ssl://"+testDeploymentName+"-"+
|
||||
api.ServerGroupAgentsString+"-"+name+"."+testDeploymentName+"-int."+testNamespace+".svc:8529")
|
||||
} else {
|
||||
command = append(command, "--agency.my-address=tcp://"+testDeploymentName+"-"+
|
||||
api.ServerGroupAgentsString+"-"+name+"."+testDeploymentName+"-int."+testNamespace+".svc:8529")
|
||||
}
|
||||
|
||||
command = append(command,
|
||||
"--agency.size=3",
|
||||
"--agency.supervision=true",
|
||||
"--database.directory=/data",
|
||||
"--foxx.queues=false",
|
||||
"--log.level=INFO",
|
||||
"--log.output=+")
|
||||
|
||||
if encryptionRocksDB {
|
||||
command = append(command, "--rocksdb.encryption-keyfile=/secrets/rocksdb/encryption/key")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.authentication=true")
|
||||
} else {
|
||||
command = append(command, "--server.authentication=false")
|
||||
}
|
||||
|
||||
if tls {
|
||||
command = append(command, "--server.endpoint=ssl://[::]:8529")
|
||||
} else {
|
||||
command = append(command, "--server.endpoint=tcp://[::]:8529")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--server.jwt-secret-keyfile=/secrets/cluster/jwt/token")
|
||||
}
|
||||
|
||||
command = append(command, "--server.statistics=false", "--server.storage-engine=rocksdb")
|
||||
|
||||
if tls {
|
||||
command = append(command, "--ssl.ecdh-curve=", "--ssl.keyfile=/secrets/tls/tls.keyfile")
|
||||
}
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestCommandForSyncMaster(name string, tls, auth, monitoring bool) []string {
|
||||
command := []string{resources.ArangoSyncExecutor, "run", "master"}
|
||||
|
||||
if tls {
|
||||
command = append(command, "--cluster.endpoint=https://"+testDeploymentName+":8529")
|
||||
} else {
|
||||
command = append(command, "--cluster.endpoint=http://"+testDeploymentName+":8529")
|
||||
}
|
||||
|
||||
if auth {
|
||||
command = append(command, "--cluster.jwt-secret=/secrets/cluster/jwt/token")
|
||||
}
|
||||
|
||||
command = append(command, "--master.endpoint=https://"+testDeploymentName+"-sync.default.svc:8629")
|
||||
|
||||
command = append(command, "--master.jwt-secret=/secrets/master/jwt/token")
|
||||
|
||||
if monitoring {
|
||||
command = append(command, "--monitoring.token="+"$("+constants.EnvArangoSyncMonitoringToken+")")
|
||||
}
|
||||
|
||||
command = append(command, "--mq.type=direct", "--server.client-cafile=/secrets/client-auth/ca/ca.crt")
|
||||
|
||||
command = append(command, "--server.endpoint=https://"+testDeploymentName+
|
||||
"-syncmaster-"+name+".test-int."+testNamespace+".svc:8629",
|
||||
"--server.keyfile=/secrets/tls/tls.keyfile", "--server.port=8629")
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestCommandForSyncWorker(name string, tls, monitoring bool) []string {
|
||||
command := []string{resources.ArangoSyncExecutor, "run", "worker"}
|
||||
|
||||
scheme := "http"
|
||||
if tls {
|
||||
scheme = "https"
|
||||
}
|
||||
|
||||
command = append(command,
|
||||
"--master.endpoint=https://"+testDeploymentName+"-sync:8629",
|
||||
"--master.jwt-secret=/secrets/master/jwt/token")
|
||||
|
||||
if monitoring {
|
||||
command = append(command, "--monitoring.token="+"$("+constants.EnvArangoSyncMonitoringToken+")")
|
||||
}
|
||||
|
||||
command = append(command,
|
||||
"--server.endpoint="+scheme+"://"+testDeploymentName+"-syncworker-"+name+".test-int."+testNamespace+".svc:8729",
|
||||
"--server.port=8729")
|
||||
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestDeployment(config Config, arangoDeployment *api.ArangoDeployment) (*Deployment, *recordfake.FakeRecorder) {
|
||||
|
||||
eventRecorder := recordfake.NewFakeRecorder(10)
|
||||
kubernetesClientSet := fake.NewSimpleClientset()
|
||||
|
||||
arangoDeployment.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: testDeploymentName,
|
||||
Namespace: testNamespace,
|
||||
}
|
||||
|
||||
deps := Dependencies{
|
||||
Log: zerolog.New(ioutil.Discard),
|
||||
KubeCli: kubernetesClientSet,
|
||||
DatabaseCRCli: arangofake.NewSimpleClientset(&api.ArangoDeployment{}),
|
||||
EventRecorder: eventRecorder,
|
||||
}
|
||||
|
||||
d := &Deployment{
|
||||
apiObject: arangoDeployment,
|
||||
config: config,
|
||||
deps: deps,
|
||||
eventCh: make(chan *deploymentEvent, deploymentEventQueueSize),
|
||||
stopCh: make(chan struct{}),
|
||||
clientCache: newClientCache(deps.KubeCli, arangoDeployment),
|
||||
}
|
||||
|
||||
arangoDeployment.Spec.SetDefaults(arangoDeployment.GetName())
|
||||
d.resources = resources.NewResources(deps.Log, d)
|
||||
|
||||
return d, eventRecorder
|
||||
}
|
||||
|
||||
func createTestPorts() []core.ContainerPort {
|
||||
return []core.ContainerPort{
|
||||
{
|
||||
Name: "server",
|
||||
ContainerPort: 8529,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createTestImages(enterprise bool) api.ImageInfoList {
|
||||
return api.ImageInfoList{
|
||||
{
|
||||
Image: testImage,
|
||||
ArangoDBVersion: testVersion,
|
||||
ImageID: testImage,
|
||||
Enterprise: enterprise,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createTestExporterPorts() []core.ContainerPort {
|
||||
return []core.ContainerPort{
|
||||
{
|
||||
Name: "exporter",
|
||||
ContainerPort: 9101,
|
||||
Protocol: "TCP",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func createTestExporterCommand(secure bool) []string {
|
||||
command := []string{
|
||||
"/app/arangodb-exporter",
|
||||
}
|
||||
|
||||
if secure {
|
||||
command = append(command, "--arangodb.endpoint=https://localhost:8529")
|
||||
} else {
|
||||
command = append(command, "--arangodb.endpoint=http://localhost:8529")
|
||||
}
|
||||
|
||||
command = append(command, "--arangodb.jwt-file=/secrets/exporter/jwt/token")
|
||||
|
||||
if secure {
|
||||
command = append(command, "--ssl.keyfile=/secrets/tls/tls.keyfile")
|
||||
}
|
||||
return command
|
||||
}
|
||||
|
||||
func createTestExporterLivenessProbe(secure bool) *core.Probe {
|
||||
return k8sutil.HTTPProbeConfig{
|
||||
LocalPath: "/",
|
||||
Port: k8sutil.ArangoExporterPort,
|
||||
Secure: secure,
|
||||
}.Create()
|
||||
}
|
||||
|
||||
func createTestLifecycleContainer(resources core.ResourceRequirements) core.Container {
|
||||
binaryPath, _ := os.Executable()
|
||||
var securityContext api.ServerGroupSpecSecurityContext
|
||||
|
||||
return core.Container{
|
||||
Name: "init-lifecycle",
|
||||
Image: testImageLifecycle,
|
||||
Command: []string{binaryPath, "lifecycle", "copy", "--target", "/lifecycle/tools"},
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.LifecycleVolumeMount(),
|
||||
},
|
||||
ImagePullPolicy: "IfNotPresent",
|
||||
Resources: resources,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
}
|
||||
}
|
||||
|
||||
func createTestAlpineContainer(name string, requireUUID bool) core.Container {
|
||||
var securityContext api.ServerGroupSpecSecurityContext
|
||||
return k8sutil.ArangodInitContainer("uuid", name, "rocksdb", testImageAlpine, requireUUID, securityContext.NewSecurityContext())
|
||||
}
|
||||
|
||||
func (testCase *testCaseStruct) createTestPodData(deployment *Deployment, group api.ServerGroup,
|
||||
memberStatus api.MemberStatus) {
|
||||
|
||||
podName := k8sutil.CreatePodName(testDeploymentName, group.AsRoleAbbreviated(), memberStatus.ID,
|
||||
resources.CreatePodSuffix(testCase.ArangoDeployment.Spec))
|
||||
|
||||
testCase.ExpectedPod.ObjectMeta = metav1.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
Labels: k8sutil.LabelsForDeployment(testDeploymentName, group.AsRole()),
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
testCase.ArangoDeployment.AsOwner(),
|
||||
},
|
||||
Finalizers: deployment.resources.CreatePodFinalizers(group),
|
||||
}
|
||||
|
||||
groupSpec := testCase.ArangoDeployment.Spec.GetServerGroupSpec(group)
|
||||
testCase.ExpectedPod.Spec.Tolerations = deployment.resources.CreatePodTolerations(group, groupSpec)
|
||||
}
|
||||
|
||||
func testCreateExporterContainer(secure bool, resources core.ResourceRequirements) core.Container {
|
||||
var securityContext api.ServerGroupSpecSecurityContext
|
||||
|
||||
return core.Container{
|
||||
Name: k8sutil.ExporterContainerName,
|
||||
Image: testExporterImage,
|
||||
Command: createTestExporterCommand(secure),
|
||||
Ports: createTestExporterPorts(),
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
k8sutil.ExporterJWTVolumeMount(),
|
||||
},
|
||||
Resources: k8sutil.ExtractPodResourceRequirement(resources),
|
||||
LivenessProbe: createTestExporterLivenessProbe(secure),
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext.NewSecurityContext(),
|
||||
}
|
||||
}
|
|
@ -23,9 +23,12 @@
|
|||
package reconcile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||
|
||||
|
@ -358,6 +361,15 @@ func podNeedsRotation(log zerolog.Logger, p core.Pod, apiObject metav1.Object, s
|
|||
return rotate, reason
|
||||
}
|
||||
|
||||
// Volumes
|
||||
if rotate, reason := compareVolumes(groupSpec, p); rotate {
|
||||
return rotate, reason
|
||||
}
|
||||
|
||||
if rotate, reason := compareVolumeMounts(groupSpec, c); rotate {
|
||||
return rotate, reason
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
|
@ -417,6 +429,63 @@ func compareProbes(probe pod.Probe, groupProbeDisabled *bool, groupProbeSpec *ap
|
|||
return false, ""
|
||||
}
|
||||
|
||||
func compareVolumes(spec api.ServerGroupSpec, pod core.Pod) (bool, string) {
|
||||
if len(spec.Volumes) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
currentVolumes := map[string]core.Volume{}
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
currentVolumes[volume.Name] = volume
|
||||
}
|
||||
|
||||
for _, expectedVolumeTemplate := range spec.Volumes {
|
||||
expectedVolume := expectedVolumeTemplate.Volume()
|
||||
|
||||
currentVolume, ok := currentVolumes[expectedVolume.Name]
|
||||
if !ok {
|
||||
return true, fmt.Sprintf("Volume %s is not mount. Rotating", expectedVolume.Name)
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepDerivative(expectedVolume, currentVolume) {
|
||||
return true, fmt.Sprintf("Volume %s needs to be updated", expectedVolume.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func compareVolumeMounts(spec api.ServerGroupSpec, container core.Container) (bool, string) {
|
||||
if len(container.VolumeMounts) < len(spec.VolumeMounts) {
|
||||
return true, "Missing volume mounts in container"
|
||||
}
|
||||
|
||||
// Get compared mounts
|
||||
mounts := container.VolumeMounts
|
||||
if len(mounts) > 0 {
|
||||
if container.VolumeMounts[len(mounts)-1].MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" {
|
||||
// Remove last mount added by ServiceAccount from compare
|
||||
mounts = mounts[0 : len(mounts)-1]
|
||||
}
|
||||
}
|
||||
|
||||
if len(spec.VolumeMounts) > len(mounts) {
|
||||
return true, "Missing volume mounts in container"
|
||||
}
|
||||
|
||||
mounts = mounts[len(mounts)-len(spec.VolumeMounts):]
|
||||
|
||||
// Now we can compare lists
|
||||
for id, mount := range mounts {
|
||||
if !equality.Semantic.DeepDerivative(spec.VolumeMounts[id].VolumeMount(), mount) {
|
||||
return true, fmt.Sprintf("Mount with if %d does not match - got %s, expected %s", id, mount.Name, spec.VolumeMounts[id].Name)
|
||||
}
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// clusterReadyForUpgrade returns true if the cluster is ready for the next update, that is:
|
||||
// - all shards are in sync
|
||||
// - all members are ready and fine
|
||||
|
|
|
@ -23,6 +23,8 @@
|
|||
package reconcile
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
|
@ -141,6 +143,11 @@ func compareCapabilityLists(a, b []core.Capability) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
// If we got ALL on list and expect ALL to be present then it is equal for us
|
||||
if strings.EqualFold(string(capability), "ALL") {
|
||||
return true
|
||||
}
|
||||
|
||||
checked[capability] = true
|
||||
}
|
||||
|
||||
|
|
|
@ -121,6 +121,7 @@ func createRotateServerStoragePlan(log zerolog.Logger, apiObject k8sutil.APIObje
|
|||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
return plan
|
||||
}
|
||||
|
||||
|
|
|
@ -253,6 +253,14 @@ func (m *MemberArangoDPod) GetVolumes() ([]core.Volume, []core.VolumeMount) {
|
|||
volumes = append(volumes, k8sutil.LifecycleVolume())
|
||||
}
|
||||
|
||||
if len(m.groupSpec.Volumes) > 0 {
|
||||
volumes = append(volumes, m.groupSpec.Volumes.Volumes()...)
|
||||
}
|
||||
|
||||
if len(m.groupSpec.VolumeMounts) > 0 {
|
||||
volumeMounts = append(volumeMounts, m.groupSpec.VolumeMounts.VolumeMounts()...)
|
||||
}
|
||||
|
||||
return volumes, volumeMounts
|
||||
}
|
||||
|
||||
|
|
Loading…
Reference in a new issue