1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00

[Feature] Parallel backup uploads limit (#859)

This commit is contained in:
Adam Janikowski 2021-12-13 15:37:39 +01:00 committed by GitHub
parent 39035437d5
commit 1cef3cf511
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 172 additions and 8 deletions

View file

@ -8,6 +8,7 @@
- Fix Exporter in Deployments without authentication - Fix Exporter in Deployments without authentication
- Allow to disable ClusterScalingIntegration and add proper Scheduled label to pods - Allow to disable ClusterScalingIntegration and add proper Scheduled label to pods
- Add additional timeout parameters and kubernetes batch size - Add additional timeout parameters and kubernetes batch size
- Limit parallel Backup uploads
## [1.2.5](https://github.com/arangodb/kube-arangodb/tree/1.2.5) (2021-10-25) ## [1.2.5](https://github.com/arangodb/kube-arangodb/tree/1.2.5) (2021-10-25)
- Split & Unify Lifecycle management functionality - Split & Unify Lifecycle management functionality

View file

@ -126,6 +126,9 @@ var (
operatorKubernetesOptions struct { operatorKubernetesOptions struct {
maxBatchSize int64 maxBatchSize int64
} }
operatorBackup struct {
concurrentUploads int
}
operatorTimeouts struct { operatorTimeouts struct {
k8s time.Duration k8s time.Duration
arangoD time.Duration arangoD time.Duration
@ -167,6 +170,7 @@ func init() {
f.DurationVar(&operatorTimeouts.reconciliation, "timeout.reconciliation", globals.DefaultReconciliationTimeout, "The reconciliation timeout to the ArangoDB CR") f.DurationVar(&operatorTimeouts.reconciliation, "timeout.reconciliation", globals.DefaultReconciliationTimeout, "The reconciliation timeout to the ArangoDB CR")
f.BoolVar(&operatorOptions.scalingIntegrationEnabled, "internal.scaling-integration", false, "Enable Scaling Integration") f.BoolVar(&operatorOptions.scalingIntegrationEnabled, "internal.scaling-integration", false, "Enable Scaling Integration")
f.Int64Var(&operatorKubernetesOptions.maxBatchSize, "kubernetes.max-batch-size", globals.DefaultKubernetesRequestBatchSize, "Size of batch during objects read") f.Int64Var(&operatorKubernetesOptions.maxBatchSize, "kubernetes.max-batch-size", globals.DefaultKubernetesRequestBatchSize, "Size of batch during objects read")
f.IntVar(&operatorBackup.concurrentUploads, "backup-concurrent-uploads", globals.DefaultBackupConcurrentUploads, "Number of concurrent uploads per deployment")
features.Init(&cmdMain) features.Init(&cmdMain)
} }
@ -196,6 +200,7 @@ func cmdMainRun(cmd *cobra.Command, args []string) {
globals.GetGlobalTimeouts().ArangoD().Set(operatorTimeouts.arangoD) globals.GetGlobalTimeouts().ArangoD().Set(operatorTimeouts.arangoD)
globals.GetGlobalTimeouts().Reconciliation().Set(operatorTimeouts.reconciliation) globals.GetGlobalTimeouts().Reconciliation().Set(operatorTimeouts.reconciliation)
globals.GetGlobals().Kubernetes().RequestBatchSize().Set(operatorKubernetesOptions.maxBatchSize) globals.GetGlobals().Kubernetes().RequestBatchSize().Set(operatorKubernetesOptions.maxBatchSize)
globals.GetGlobals().Backup().ConcurrentUploads().Set(operatorBackup.concurrentUploads)
// Prepare log service // Prepare log service
var err error var err error

View file

@ -26,6 +26,8 @@ import (
"sync" "sync"
"testing" "testing"
"github.com/arangodb/kube-arangodb/pkg/util/globals"
"github.com/arangodb/go-driver" "github.com/arangodb/go-driver"
"github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util"
"k8s.io/apimachinery/pkg/util/uuid" "k8s.io/apimachinery/pkg/util/uuid"
@ -464,8 +466,8 @@ func Test_State_Ready_KeepPendingWithForcedRunning(t *testing.T) {
} }
} }
require.Equal(t, size, upload) require.Equal(t, globals.DefaultBackupConcurrentUploads, upload)
require.Equal(t, 0, ready) require.Equal(t, size-globals.DefaultBackupConcurrentUploads, ready)
} }
func Test_State_Ready_KeepPendingWithForcedRunningSameId(t *testing.T) { func Test_State_Ready_KeepPendingWithForcedRunningSameId(t *testing.T) {
@ -531,3 +533,103 @@ func Test_State_Ready_KeepPendingWithForcedRunningSameId(t *testing.T) {
require.Equal(t, 1, upload) require.Equal(t, 1, upload)
require.Equal(t, size-1, ready) require.Equal(t, size-1, ready)
} }
func Test_State_Ready_Concurrent_Queued(t *testing.T) {
// Arrange
handler, mock := newErrorsFakeHandler(mockErrorsArangoClientBackup{})
createResponse, err := mock.Create()
require.NoError(t, err)
obj, deployment := newObjectSet(backupApi.ArangoBackupStateReady)
backupMeta, err := mock.Get(createResponse.ID)
require.NoError(t, err)
obj.Status.Backup = createBackupFromMeta(backupMeta, nil)
obj.Spec.Upload = &backupApi.ArangoBackupSpecOperation{
RepositoryURL: "Any",
}
size := globals.DefaultBackupConcurrentUploads
objects := make([]*backupApi.ArangoBackup, size)
for id := range objects {
createResponse, err := mock.Create()
require.NoError(t, err)
backupMeta, err := mock.Get(createResponse.ID)
require.NoError(t, err)
obj := newArangoBackup(deployment.GetName(), deployment.GetNamespace(), string(uuid.NewUUID()), backupApi.ArangoBackupStateUploading)
obj.Status.Backup = createBackupFromMeta(backupMeta, nil)
obj.Spec.Upload = &backupApi.ArangoBackupSpecOperation{
RepositoryURL: "s3://test",
}
obj.Status.Available = true
objects[id] = obj
}
// Act
createArangoDeployment(t, handler, deployment)
createArangoBackup(t, handler, objects...)
createArangoBackup(t, handler, obj)
require.NoError(t, handler.Handle(newItemFromBackup(operation.Update, obj)))
// Assert
newObj := refreshArangoBackup(t, handler, obj)
checkBackup(t, newObj, backupApi.ArangoBackupStateReady, true)
compareBackupMeta(t, backupMeta, newObj)
}
func Test_State_Ready_Concurrent_Started(t *testing.T) {
// Arrange
handler, mock := newErrorsFakeHandler(mockErrorsArangoClientBackup{})
createResponse, err := mock.Create()
require.NoError(t, err)
obj, deployment := newObjectSet(backupApi.ArangoBackupStateReady)
backupMeta, err := mock.Get(createResponse.ID)
require.NoError(t, err)
obj.Status.Backup = createBackupFromMeta(backupMeta, nil)
obj.Spec.Upload = &backupApi.ArangoBackupSpecOperation{
RepositoryURL: "Any",
}
size := globals.DefaultBackupConcurrentUploads - 1
objects := make([]*backupApi.ArangoBackup, size)
for id := range objects {
createResponse, err := mock.Create()
require.NoError(t, err)
backupMeta, err := mock.Get(createResponse.ID)
require.NoError(t, err)
obj := newArangoBackup(deployment.GetName(), deployment.GetNamespace(), string(uuid.NewUUID()), backupApi.ArangoBackupStateUploading)
obj.Status.Backup = createBackupFromMeta(backupMeta, nil)
obj.Spec.Upload = &backupApi.ArangoBackupSpecOperation{
RepositoryURL: "s3://test",
}
obj.Status.Available = true
objects[id] = obj
}
// Act
createArangoDeployment(t, handler, deployment)
createArangoBackup(t, handler, objects...)
createArangoBackup(t, handler, obj)
require.NoError(t, handler.Handle(newItemFromBackup(operation.Update, obj)))
// Assert
newObj := refreshArangoBackup(t, handler, obj)
checkBackup(t, newObj, backupApi.ArangoBackupStateUpload, true)
compareBackupMeta(t, backupMeta, newObj)
}

View file

@ -26,6 +26,8 @@ import (
"context" "context"
"strings" "strings"
"github.com/arangodb/kube-arangodb/pkg/util/globals"
clientBackup "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/typed/backup/v1" clientBackup "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/typed/backup/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -62,6 +64,8 @@ func isBackupRunning(backup *backupApi.ArangoBackup, client clientBackup.ArangoB
return false, newTemporaryError(err) return false, newTemporaryError(err)
} }
currentUploads := 0
for _, existingBackup := range backups.Items { for _, existingBackup := range backups.Items {
if existingBackup.Name == backup.Name { if existingBackup.Name == backup.Name {
continue continue
@ -70,6 +74,7 @@ func isBackupRunning(backup *backupApi.ArangoBackup, client clientBackup.ArangoB
// We can upload multiple uploads from same deployment in same time // We can upload multiple uploads from same deployment in same time
if backup.Status.State == backupApi.ArangoBackupStateReady && if backup.Status.State == backupApi.ArangoBackupStateReady &&
(existingBackup.Status.State == backupApi.ArangoBackupStateUpload || existingBackup.Status.State == backupApi.ArangoBackupStateUploading) { (existingBackup.Status.State == backupApi.ArangoBackupStateUpload || existingBackup.Status.State == backupApi.ArangoBackupStateUploading) {
currentUploads++
if backupUpload := backup.Status.Backup; backupUpload != nil { if backupUpload := backup.Status.Backup; backupUpload != nil {
if existingBackupUpload := existingBackup.Status.Backup; existingBackupUpload != nil { if existingBackupUpload := existingBackup.Status.Backup; existingBackupUpload != nil {
if strings.EqualFold(backupUpload.ID, existingBackupUpload.ID) { if strings.EqualFold(backupUpload.ID, existingBackupUpload.ID) {
@ -88,5 +93,9 @@ func isBackupRunning(backup *backupApi.ArangoBackup, client clientBackup.ArangoB
} }
} }
if backup.Status.State == backupApi.ArangoBackupStateReady {
return currentUploads >= globals.GetGlobals().Backup().ConcurrentUploads().Get(), nil
}
return false, nil return false, nil
} }

View file

@ -28,6 +28,8 @@ const (
DefaultReconciliationTimeout = time.Minute DefaultReconciliationTimeout = time.Minute
DefaultKubernetesRequestBatchSize = 256 DefaultKubernetesRequestBatchSize = 256
DefaultBackupConcurrentUploads = 4
) )
var globalObj = &globals{ var globalObj = &globals{
@ -37,7 +39,10 @@ var globalObj = &globals{
reconciliation: NewTimeout(DefaultReconciliationTimeout), reconciliation: NewTimeout(DefaultReconciliationTimeout),
}, },
kubernetes: &globalKubernetes{ kubernetes: &globalKubernetes{
requestBatchSize: NewInt(DefaultKubernetesRequestBatchSize), requestBatchSize: NewInt64(DefaultKubernetesRequestBatchSize),
},
backup: &globalBackup{
concurrentUploads: NewInt(DefaultBackupConcurrentUploads),
}, },
} }
@ -52,11 +57,17 @@ func GetGlobalTimeouts() GlobalTimeouts {
type Globals interface { type Globals interface {
Timeouts() GlobalTimeouts Timeouts() GlobalTimeouts
Kubernetes() GlobalKubernetes Kubernetes() GlobalKubernetes
Backup() GlobalBackup
} }
type globals struct { type globals struct {
timeouts *globalTimeouts timeouts *globalTimeouts
kubernetes *globalKubernetes kubernetes *globalKubernetes
backup *globalBackup
}
func (g globals) Backup() GlobalBackup {
return g.backup
} }
func (g globals) Kubernetes() GlobalKubernetes { func (g globals) Kubernetes() GlobalKubernetes {
@ -79,6 +90,18 @@ func (g *globalKubernetes) RequestBatchSize() Int64 {
return g.requestBatchSize return g.requestBatchSize
} }
type GlobalBackup interface {
ConcurrentUploads() Int
}
type globalBackup struct {
concurrentUploads Int
}
func (g *globalBackup) ConcurrentUploads() Int {
return g.concurrentUploads
}
type GlobalTimeouts interface { type GlobalTimeouts interface {
Reconciliation() Timeout Reconciliation() Timeout

View file

@ -32,6 +32,7 @@ func Test_Globals(t *testing.T) {
require.EqualValues(t, DefaultKubernetesTimeout, GetGlobals().Timeouts().Kubernetes().Get()) require.EqualValues(t, DefaultKubernetesTimeout, GetGlobals().Timeouts().Kubernetes().Get())
require.EqualValues(t, DefaultArangoDTimeout, GetGlobals().Timeouts().ArangoD().Get()) require.EqualValues(t, DefaultArangoDTimeout, GetGlobals().Timeouts().ArangoD().Get())
require.EqualValues(t, DefaultReconciliationTimeout, GetGlobals().Timeouts().Reconciliation().Get()) require.EqualValues(t, DefaultReconciliationTimeout, GetGlobals().Timeouts().Reconciliation().Get())
require.EqualValues(t, DefaultBackupConcurrentUploads, GetGlobals().Backup().ConcurrentUploads().Get())
}) })
t.Run("Override", func(t *testing.T) { t.Run("Override", func(t *testing.T) {
@ -39,6 +40,7 @@ func Test_Globals(t *testing.T) {
GetGlobals().Timeouts().Kubernetes().Set(0) GetGlobals().Timeouts().Kubernetes().Set(0)
GetGlobals().Timeouts().ArangoD().Set(0) GetGlobals().Timeouts().ArangoD().Set(0)
GetGlobals().Timeouts().Reconciliation().Set(0) GetGlobals().Timeouts().Reconciliation().Set(0)
GetGlobals().Backup().ConcurrentUploads().Set(0)
}) })
t.Run("Check", func(t *testing.T) { t.Run("Check", func(t *testing.T) {
@ -46,5 +48,6 @@ func Test_Globals(t *testing.T) {
require.EqualValues(t, 0, GetGlobals().Timeouts().Kubernetes().Get()) require.EqualValues(t, 0, GetGlobals().Timeouts().Kubernetes().Get())
require.EqualValues(t, 0, GetGlobals().Timeouts().ArangoD().Get()) require.EqualValues(t, 0, GetGlobals().Timeouts().ArangoD().Get())
require.EqualValues(t, 0, GetGlobals().Timeouts().Reconciliation().Get()) require.EqualValues(t, 0, GetGlobals().Timeouts().Reconciliation().Get())
require.EqualValues(t, 0, GetGlobals().Backup().ConcurrentUploads().Get())
}) })
} }

View file

@ -20,23 +20,44 @@
package globals package globals
type Int interface {
Set(in int)
Get() int
}
func NewInt(def int) Int {
return &intObj{i: def}
}
type intObj struct {
i int
}
func (i *intObj) Set(in int) {
i.i = in
}
func (i *intObj) Get() int {
return i.i
}
type Int64 interface { type Int64 interface {
Set(in int64) Set(in int64)
Get() int64 Get() int64
} }
func NewInt(def int64) Int64 { func NewInt64(def int64) Int64 {
return &intObj{i: def} return &int64Obj{i: def}
} }
type intObj struct { type int64Obj struct {
i int64 i int64
} }
func (i *intObj) Set(in int64) { func (i *int64Obj) Set(in int64) {
i.i = in i.i = in
} }
func (i *intObj) Get() int64 { func (i *int64Obj) Get() int64 {
return i.i return i.i
} }