mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] Unify logging system (#1007)
This commit is contained in:
parent
ccf6f1148b
commit
1b6f0476c7
211 changed files with 2973 additions and 2548 deletions
14
cmd/admin.go
14
cmd/admin.go
|
@ -107,11 +107,11 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) {
|
||||||
ctx := getInterruptionContext()
|
ctx := getInterruptionContext()
|
||||||
d, certCA, auth, err := getDeploymentAndCredentials(ctx, deploymentName)
|
d, certCA, auth, err := getDeploymentAndCredentials(ctx, deploymentName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("failed to create basic data for the connection")
|
logger.Err(err).Fatal("failed to create basic data for the connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.Spec.GetMode() != api.DeploymentModeCluster {
|
if d.Spec.GetMode() != api.DeploymentModeCluster {
|
||||||
cliLog.Fatal().Msgf("agency state does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(),
|
logger.Fatal("agency state does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(),
|
||||||
d.GetName())
|
d.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) {
|
||||||
conn := createClient([]string{endpoint}, certCA, auth, connection.ApplicationJSON)
|
conn := createClient([]string{endpoint}, certCA, auth, connection.ApplicationJSON)
|
||||||
leaderID, err := getAgencyLeader(ctx, conn)
|
leaderID, err := getAgencyLeader(ctx, conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("failed to get leader ID")
|
logger.Err(err).Fatal("failed to get leader ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
dnsLeaderName := k8sutil.CreatePodDNSName(d.GetObjectMeta(), api.ServerGroupAgents.AsRole(), leaderID)
|
dnsLeaderName := k8sutil.CreatePodDNSName(d.GetObjectMeta(), api.ServerGroupAgents.AsRole(), leaderID)
|
||||||
|
@ -131,7 +131,7 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) {
|
||||||
defer body.Close()
|
defer body.Close()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("can not get state of the agency")
|
logger.Err(err).Fatal("can not get state of the agency")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print and receive parallelly.
|
// Print and receive parallelly.
|
||||||
|
@ -143,11 +143,11 @@ func cmdGetAgencyDump(cmd *cobra.Command, _ []string) {
|
||||||
ctx := getInterruptionContext()
|
ctx := getInterruptionContext()
|
||||||
d, certCA, auth, err := getDeploymentAndCredentials(ctx, deploymentName)
|
d, certCA, auth, err := getDeploymentAndCredentials(ctx, deploymentName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("failed to create basic data for the connection")
|
logger.Err(err).Fatal("failed to create basic data for the connection")
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.Spec.GetMode() != api.DeploymentModeCluster {
|
if d.Spec.GetMode() != api.DeploymentModeCluster {
|
||||||
cliLog.Fatal().Msgf("agency dump does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(),
|
logger.Fatal("agency dump does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(),
|
||||||
d.GetName())
|
d.GetName())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +158,7 @@ func cmdGetAgencyDump(cmd *cobra.Command, _ []string) {
|
||||||
defer body.Close()
|
defer body.Close()
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("can not get dump")
|
logger.Err(err).Fatal("can not get dump")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print and receive parallelly.
|
// Print and receive parallelly.
|
||||||
|
|
96
cmd/cmd.go
96
cmd/cmd.go
|
@ -31,29 +31,33 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
|
|
||||||
|
"github.com/gin-gonic/gin"
|
||||||
|
|
||||||
|
operatorHTTP "github.com/arangodb/kube-arangodb/pkg/util/http"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/version"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/operator/scope"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
|
||||||
|
|
||||||
deploymentApi "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
deploymentApi "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/crd"
|
"github.com/arangodb/kube-arangodb/pkg/crd"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/scheme"
|
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/scheme"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/operator"
|
"github.com/arangodb/kube-arangodb/pkg/operator"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/operator/scope"
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/server"
|
"github.com/arangodb/kube-arangodb/pkg/server"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||||
utilsError "github.com/arangodb/kube-arangodb/pkg/util/errors"
|
utilsError "github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
|
||||||
operatorHTTP "github.com/arangodb/kube-arangodb/pkg/util/http"
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/probe"
|
"github.com/arangodb/kube-arangodb/pkg/util/probe"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/version"
|
|
||||||
|
|
||||||
"github.com/gin-gonic/gin"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
flag "github.com/spf13/pflag"
|
flag "github.com/spf13/pflag"
|
||||||
appsv1 "k8s.io/api/apps/v1"
|
appsv1 "k8s.io/api/apps/v1"
|
||||||
|
@ -83,14 +87,15 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
logger = logging.Global().RegisterAndGetLogger("root", logging.Info)
|
||||||
|
eventRecorder = logging.Global().RegisterAndGetLogger("root-event-recorder", logging.Info)
|
||||||
|
|
||||||
cmdMain = cobra.Command{
|
cmdMain = cobra.Command{
|
||||||
Use: "arangodb_operator",
|
Use: "arangodb_operator",
|
||||||
Run: executeMain,
|
Run: executeMain,
|
||||||
}
|
}
|
||||||
|
|
||||||
logLevels []string
|
logLevels []string
|
||||||
cliLog = logging.NewRootLogger()
|
|
||||||
logService logging.Service
|
|
||||||
serverOptions struct {
|
serverOptions struct {
|
||||||
host string
|
host string
|
||||||
port int
|
port int
|
||||||
|
@ -157,13 +162,14 @@ func init() {
|
||||||
f.StringVar(&serverOptions.tlsSecretName, "server.tls-secret-name", "", "Name of secret containing tls.crt & tls.key for HTTPS server (if empty, self-signed certificate is used)")
|
f.StringVar(&serverOptions.tlsSecretName, "server.tls-secret-name", "", "Name of secret containing tls.crt & tls.key for HTTPS server (if empty, self-signed certificate is used)")
|
||||||
f.StringVar(&serverOptions.adminSecretName, "server.admin-secret-name", defaultAdminSecretName, "Name of secret containing username + password for login to the dashboard")
|
f.StringVar(&serverOptions.adminSecretName, "server.admin-secret-name", defaultAdminSecretName, "Name of secret containing username + password for login to the dashboard")
|
||||||
f.BoolVar(&serverOptions.allowAnonymous, "server.allow-anonymous-access", false, "Allow anonymous access to the dashboard")
|
f.BoolVar(&serverOptions.allowAnonymous, "server.allow-anonymous-access", false, "Allow anonymous access to the dashboard")
|
||||||
f.StringArrayVar(&logLevels, "log.level", []string{defaultLogLevel}, fmt.Sprintf("Set log levels in format <level> or <logger>=<level>. Possible loggers: %s", strings.Join(logging.LoggerNames(), ", ")))
|
f.StringArrayVar(&logLevels, "log.level", []string{defaultLogLevel}, fmt.Sprintf("Set log levels in format <level> or <logger>=<level>. Possible loggers: %s", strings.Join(logging.Global().Names(), ", ")))
|
||||||
f.BoolVar(&operatorOptions.enableDeployment, "operator.deployment", false, "Enable to run the ArangoDeployment operator")
|
f.BoolVar(&operatorOptions.enableDeployment, "operator.deployment", false, "Enable to run the ArangoDeployment operator")
|
||||||
f.BoolVar(&operatorOptions.enableDeploymentReplication, "operator.deployment-replication", false, "Enable to run the ArangoDeploymentReplication operator")
|
f.BoolVar(&operatorOptions.enableDeploymentReplication, "operator.deployment-replication", false, "Enable to run the ArangoDeploymentReplication operator")
|
||||||
f.BoolVar(&operatorOptions.enableStorage, "operator.storage", false, "Enable to run the ArangoLocalStorage operator")
|
f.BoolVar(&operatorOptions.enableStorage, "operator.storage", false, "Enable to run the ArangoLocalStorage operator")
|
||||||
f.BoolVar(&operatorOptions.enableBackup, "operator.backup", false, "Enable to run the ArangoBackup operator")
|
f.BoolVar(&operatorOptions.enableBackup, "operator.backup", false, "Enable to run the ArangoBackup operator")
|
||||||
f.BoolVar(&operatorOptions.enableApps, "operator.apps", false, "Enable to run the ArangoApps operator")
|
f.BoolVar(&operatorOptions.enableApps, "operator.apps", false, "Enable to run the ArangoApps operator")
|
||||||
f.BoolVar(&operatorOptions.enableK2KClusterSync, "operator.k2k-cluster-sync", false, "Enable to run the ListSimple operator")
|
f.BoolVar(&operatorOptions.enableK2KClusterSync, "operator.k2k-cluster-sync", false, "Enable to run the ListSimple operator")
|
||||||
|
f.MarkDeprecated("operator.k2k-cluster-sync", "Enabled within deployment operator")
|
||||||
f.BoolVar(&operatorOptions.versionOnly, "operator.version", false, "Enable only version endpoint in Operator")
|
f.BoolVar(&operatorOptions.versionOnly, "operator.version", false, "Enable only version endpoint in Operator")
|
||||||
f.StringVar(&operatorOptions.alpineImage, "operator.alpine-image", UBIImageEnv.GetOrDefault(defaultAlpineImage), "Docker image used for alpine containers")
|
f.StringVar(&operatorOptions.alpineImage, "operator.alpine-image", UBIImageEnv.GetOrDefault(defaultAlpineImage), "Docker image used for alpine containers")
|
||||||
f.MarkDeprecated("operator.alpine-image", "Value is not used anymore")
|
f.MarkDeprecated("operator.alpine-image", "Value is not used anymore")
|
||||||
|
@ -205,9 +211,6 @@ func executeUsage(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
// Run the operator
|
// Run the operator
|
||||||
func executeMain(cmd *cobra.Command, args []string) {
|
func executeMain(cmd *cobra.Command, args []string) {
|
||||||
// Set global logger
|
|
||||||
log.Logger = logging.NewRootLogger()
|
|
||||||
|
|
||||||
// Get environment
|
// Get environment
|
||||||
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
||||||
name := os.Getenv(constants.EnvOperatorPodName)
|
name := os.Getenv(constants.EnvOperatorPodName)
|
||||||
|
@ -228,20 +231,23 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
// Prepare log service
|
// Prepare log service
|
||||||
var err error
|
var err error
|
||||||
if err := logging.InitGlobalLogger(defaultLogLevel, logLevels); err != nil {
|
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to initialize log service")
|
levels, err := logging.ParseLogLevelsFromArgs(logLevels)
|
||||||
|
if err != nil {
|
||||||
|
logger.Err(err).Fatal("Unable to parse log level")
|
||||||
}
|
}
|
||||||
|
|
||||||
logService = logging.GlobalLogger()
|
logging.Global().ApplyLogLevels(levels)
|
||||||
|
|
||||||
logService.ConfigureRootLogger(func(log zerolog.Logger) zerolog.Logger {
|
podNameParts := strings.Split(name, "-")
|
||||||
podNameParts := strings.Split(name, "-")
|
operatorID := podNameParts[len(podNameParts)-1]
|
||||||
operatorID := podNameParts[len(podNameParts)-1]
|
logging.Global().RegisterWrappers(func(in *zerolog.Event) *zerolog.Event {
|
||||||
cliLog = cliLog.With().Str("operator-id", operatorID).Logger()
|
return in.Str("operator-id", operatorID)
|
||||||
return log.With().Str("operator-id", operatorID).Logger()
|
|
||||||
})
|
})
|
||||||
|
|
||||||
klog.SetOutput(logService.MustGetLogger(logging.LoggerNameKLog))
|
kl := logging.Global().RegisterAndGetLogger("klog", logging.Info)
|
||||||
|
|
||||||
|
klog.SetOutput(kl.InfoIO())
|
||||||
klog.Info("nice to meet you")
|
klog.Info("nice to meet you")
|
||||||
klog.Flush()
|
klog.Flush()
|
||||||
|
|
||||||
|
@ -249,46 +255,46 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
if !operatorOptions.enableDeployment && !operatorOptions.enableDeploymentReplication && !operatorOptions.enableStorage &&
|
if !operatorOptions.enableDeployment && !operatorOptions.enableDeploymentReplication && !operatorOptions.enableStorage &&
|
||||||
!operatorOptions.enableBackup && !operatorOptions.enableApps && !operatorOptions.enableK2KClusterSync {
|
!operatorOptions.enableBackup && !operatorOptions.enableApps && !operatorOptions.enableK2KClusterSync {
|
||||||
if !operatorOptions.versionOnly {
|
if !operatorOptions.versionOnly {
|
||||||
cliLog.Fatal().Err(err).Msg("Turn on --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync or any combination of these")
|
logger.Err(err).Fatal("Turn on --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync or any combination of these")
|
||||||
}
|
}
|
||||||
} else if operatorOptions.versionOnly {
|
} else if operatorOptions.versionOnly {
|
||||||
cliLog.Fatal().Err(err).Msg("Options --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync cannot be enabled together with --operator.version")
|
logger.Err(err).Fatal("Options --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync cannot be enabled together with --operator.version")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Log version
|
// Log version
|
||||||
cliLog.Info().
|
logger.
|
||||||
Str("pod-name", name).
|
Str("pod-name", name).
|
||||||
Str("pod-namespace", namespace).
|
Str("pod-namespace", namespace).
|
||||||
Msgf("Starting arangodb-operator (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
Info("Starting arangodb-operator (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
||||||
|
|
||||||
// Check environment
|
// Check environment
|
||||||
if !operatorOptions.versionOnly {
|
if !operatorOptions.versionOnly {
|
||||||
if len(namespace) == 0 {
|
if len(namespace) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodNamespace)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodNamespace)
|
||||||
}
|
}
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodName)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodName)
|
||||||
}
|
}
|
||||||
if len(ip) == 0 {
|
if len(ip) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodIP)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodIP)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Get host name
|
// Get host name
|
||||||
id, err := os.Hostname()
|
id, err := os.Hostname()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to get hostname")
|
logger.Err(err).Fatal("Failed to get hostname")
|
||||||
}
|
}
|
||||||
|
|
||||||
client, ok := kclient.GetDefaultFactory().Client()
|
client, ok := kclient.GetDefaultFactory().Client()
|
||||||
if !ok {
|
if !ok {
|
||||||
cliLog.Fatal().Msg("Failed to get client")
|
logger.Fatal("Failed to get client")
|
||||||
}
|
}
|
||||||
|
|
||||||
if crdOptions.install {
|
if crdOptions.install {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
crd.EnsureCRD(ctx, logService.MustGetLogger("crd"), client)
|
crd.EnsureCRD(ctx, client)
|
||||||
}
|
}
|
||||||
|
|
||||||
secrets := client.Kubernetes().CoreV1().Secrets(namespace)
|
secrets := client.Kubernetes().CoreV1().Secrets(namespace)
|
||||||
|
@ -296,11 +302,11 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
// Create operator
|
// Create operator
|
||||||
cfg, deps, err := newOperatorConfigAndDeps(id+"-"+name, namespace, name)
|
cfg, deps, err := newOperatorConfigAndDeps(id+"-"+name, namespace, name)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to create operator config & deps")
|
logger.Err(err).Fatal("Failed to create operator config & deps")
|
||||||
}
|
}
|
||||||
o, err := operator.NewOperator(cfg, deps)
|
o, err := operator.NewOperator(cfg, deps)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to create operator")
|
logger.Err(err).Fatal("Failed to create operator")
|
||||||
}
|
}
|
||||||
|
|
||||||
listenAddr := net.JoinHostPort(serverOptions.host, strconv.Itoa(serverOptions.port))
|
listenAddr := net.JoinHostPort(serverOptions.host, strconv.Itoa(serverOptions.port))
|
||||||
|
@ -314,7 +320,6 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
AdminSecretName: serverOptions.adminSecretName,
|
AdminSecretName: serverOptions.adminSecretName,
|
||||||
AllowAnonymous: serverOptions.allowAnonymous,
|
AllowAnonymous: serverOptions.allowAnonymous,
|
||||||
}, server.Dependencies{
|
}, server.Dependencies{
|
||||||
Log: logService.MustGetLogger(logging.LoggerNameServer),
|
|
||||||
LivenessProbe: &livenessProbe,
|
LivenessProbe: &livenessProbe,
|
||||||
Deployment: server.OperatorDependency{
|
Deployment: server.OperatorDependency{
|
||||||
Enabled: cfg.EnableDeployment,
|
Enabled: cfg.EnableDeployment,
|
||||||
|
@ -344,9 +349,9 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
Secrets: secrets,
|
Secrets: secrets,
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to create HTTP server")
|
logger.Err(err).Fatal("Failed to create HTTP server")
|
||||||
} else {
|
} else {
|
||||||
go utilsError.LogError(cliLog, "error while starting service", svr.Run)
|
go utilsError.LogError(logger, "error while starting service", svr.Run)
|
||||||
}
|
}
|
||||||
|
|
||||||
// startChaos(context.Background(), cfg.KubeCli, cfg.Namespace, chaosLevel)
|
// startChaos(context.Background(), cfg.KubeCli, cfg.Namespace, chaosLevel)
|
||||||
|
@ -355,7 +360,7 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
o.Run()
|
o.Run()
|
||||||
} else {
|
} else {
|
||||||
if err := startVersionProcess(); err != nil {
|
if err := startVersionProcess(); err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to create HTTP server")
|
logger.Err(err).Fatal("Failed to create HTTP server")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -363,7 +368,7 @@ func executeMain(cmd *cobra.Command, args []string) {
|
||||||
func startVersionProcess() error {
|
func startVersionProcess() error {
|
||||||
// Just expose version
|
// Just expose version
|
||||||
listenAddr := net.JoinHostPort(serverOptions.host, strconv.Itoa(serverOptions.port))
|
listenAddr := net.JoinHostPort(serverOptions.host, strconv.Itoa(serverOptions.port))
|
||||||
cliLog.Info().Str("addr", listenAddr).Msgf("Starting version endpoint")
|
logger.Str("addr", listenAddr).Info("Starting version endpoint")
|
||||||
|
|
||||||
gin.SetMode(gin.ReleaseMode)
|
gin.SetMode(gin.ReleaseMode)
|
||||||
r := gin.New()
|
r := gin.New()
|
||||||
|
@ -396,7 +401,7 @@ func newOperatorConfigAndDeps(id, namespace, name string) (operator.Config, oper
|
||||||
return operator.Config{}, operator.Dependencies{}, errors.WithStack(fmt.Errorf("Failed to get my pod's service account: %s", err))
|
return operator.Config{}, operator.Dependencies{}, errors.WithStack(fmt.Errorf("Failed to get my pod's service account: %s", err))
|
||||||
}
|
}
|
||||||
|
|
||||||
eventRecorder := createRecorder(cliLog, client.Kubernetes(), name, namespace)
|
eventRecorder := createRecorder(client.Kubernetes(), name, namespace)
|
||||||
|
|
||||||
scope, ok := scope.AsScope(operatorOptions.scope)
|
scope, ok := scope.AsScope(operatorOptions.scope)
|
||||||
if !ok {
|
if !ok {
|
||||||
|
@ -424,7 +429,6 @@ func newOperatorConfigAndDeps(id, namespace, name string) (operator.Config, oper
|
||||||
ShutdownTimeout: shutdownOptions.timeout,
|
ShutdownTimeout: shutdownOptions.timeout,
|
||||||
}
|
}
|
||||||
deps := operator.Dependencies{
|
deps := operator.Dependencies{
|
||||||
LogService: logService,
|
|
||||||
Client: client,
|
Client: client,
|
||||||
EventRecorder: eventRecorder,
|
EventRecorder: eventRecorder,
|
||||||
LivenessProbe: &livenessProbe,
|
LivenessProbe: &livenessProbe,
|
||||||
|
@ -446,10 +450,10 @@ func getMyPodInfo(kubecli kubernetes.Interface, namespace, name string) (string,
|
||||||
op := func() error {
|
op := func() error {
|
||||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{})
|
pod, err := kubecli.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Error().
|
logger.
|
||||||
Err(err).
|
Err(err).
|
||||||
Str("name", name).
|
Str("name", name).
|
||||||
Msg("Failed to get operator pod")
|
Error("Failed to get operator pod")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
sa = pod.Spec.ServiceAccountName
|
sa = pod.Spec.ServiceAccountName
|
||||||
|
@ -468,10 +472,10 @@ func getMyPodInfo(kubecli kubernetes.Interface, namespace, name string) (string,
|
||||||
return image, sa, nil
|
return image, sa, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRecorder(log zerolog.Logger, kubecli kubernetes.Interface, name, namespace string) record.EventRecorder {
|
func createRecorder(kubecli kubernetes.Interface, name, namespace string) record.EventRecorder {
|
||||||
eventBroadcaster := record.NewBroadcaster()
|
eventBroadcaster := record.NewBroadcaster()
|
||||||
eventBroadcaster.StartLogging(func(format string, args ...interface{}) {
|
eventBroadcaster.StartLogging(func(format string, args ...interface{}) {
|
||||||
log.Info().Msgf(format, args...)
|
eventRecorder.Info(format, args...)
|
||||||
})
|
})
|
||||||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events(namespace)})
|
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events(namespace)})
|
||||||
combinedScheme := runtime.NewScheme()
|
combinedScheme := runtime.NewScheme()
|
||||||
|
|
|
@ -98,23 +98,22 @@ func init() {
|
||||||
|
|
||||||
// Wait until all finalizers of the current pod have been removed.
|
// Wait until all finalizers of the current pod have been removed.
|
||||||
func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) {
|
func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) {
|
||||||
|
logger.Info("Starting arangodb-operator (%s), lifecycle preStop, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
||||||
cliLog.Info().Msgf("Starting arangodb-operator (%s), lifecycle preStop, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
|
||||||
|
|
||||||
// Get environment
|
// Get environment
|
||||||
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
||||||
if len(namespace) == 0 {
|
if len(namespace) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodNamespace)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodNamespace)
|
||||||
}
|
}
|
||||||
name := os.Getenv(constants.EnvOperatorPodName)
|
name := os.Getenv(constants.EnvOperatorPodName)
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodName)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create kubernetes client
|
// Create kubernetes client
|
||||||
client, ok := kclient.GetDefaultFactory().Client()
|
client, ok := kclient.GetDefaultFactory().Client()
|
||||||
if !ok {
|
if !ok {
|
||||||
cliLog.Fatal().Msg("Client not initialised")
|
logger.Fatal("Client not initialised")
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := client.Kubernetes().CoreV1().Pods(namespace)
|
pods := client.Kubernetes().CoreV1().Pods(namespace)
|
||||||
|
@ -122,13 +121,13 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) {
|
||||||
for {
|
for {
|
||||||
p, err := pods.Get(context.Background(), name, metav1.GetOptions{})
|
p, err := pods.Get(context.Background(), name, metav1.GetOptions{})
|
||||||
if k8sutil.IsNotFound(err) {
|
if k8sutil.IsNotFound(err) {
|
||||||
cliLog.Warn().Msg("Pod not found")
|
logger.Warn("Pod not found")
|
||||||
return
|
return
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
recentErrors++
|
recentErrors++
|
||||||
cliLog.Error().Err(err).Msg("Failed to get pod")
|
logger.Err(err).Error("Failed to get pod")
|
||||||
if recentErrors > 20 {
|
if recentErrors > 20 {
|
||||||
cliLog.Fatal().Err(err).Msg("Too many recent errors")
|
logger.Err(err).Fatal("Too many recent errors")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -136,10 +135,10 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) {
|
||||||
finalizerCount := len(p.GetFinalizers())
|
finalizerCount := len(p.GetFinalizers())
|
||||||
if finalizerCount == 0 {
|
if finalizerCount == 0 {
|
||||||
// No more finalizers, we're done
|
// No more finalizers, we're done
|
||||||
cliLog.Info().Msg("All finalizers gone, we can stop now")
|
logger.Info("All finalizers gone, we can stop now")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
cliLog.Info().Msgf("Waiting for %d more finalizers to be removed", finalizerCount)
|
logger.Info("Waiting for %d more finalizers to be removed", finalizerCount)
|
||||||
}
|
}
|
||||||
// Wait a bit
|
// Wait a bit
|
||||||
time.Sleep(time.Second)
|
time.Sleep(time.Second)
|
||||||
|
@ -148,17 +147,17 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
// Copy the executable to a given place.
|
// Copy the executable to a given place.
|
||||||
func cmdLifecycleCopyRun(cmd *cobra.Command, args []string) {
|
func cmdLifecycleCopyRun(cmd *cobra.Command, args []string) {
|
||||||
cliLog.Info().Msgf("Starting arangodb-operator (%s), lifecycle copy, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
logger.Info("Starting arangodb-operator (%s), lifecycle copy, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
||||||
|
|
||||||
exePath, err := os.Executable()
|
exePath, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to get executable path")
|
logger.Err(err).Fatal("Failed to get executable path")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Open source
|
// Open source
|
||||||
rd, err := os.Open(exePath)
|
rd, err := os.Open(exePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to open executable file")
|
logger.Err(err).Fatal("Failed to open executable file")
|
||||||
}
|
}
|
||||||
defer rd.Close()
|
defer rd.Close()
|
||||||
|
|
||||||
|
@ -166,20 +165,20 @@ func cmdLifecycleCopyRun(cmd *cobra.Command, args []string) {
|
||||||
targetPath := filepath.Join(lifecycleCopyOptions.TargetDir, filepath.Base(exePath))
|
targetPath := filepath.Join(lifecycleCopyOptions.TargetDir, filepath.Base(exePath))
|
||||||
wr, err := os.Create(targetPath)
|
wr, err := os.Create(targetPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to create target file")
|
logger.Err(err).Fatal("Failed to create target file")
|
||||||
}
|
}
|
||||||
defer wr.Close()
|
defer wr.Close()
|
||||||
|
|
||||||
if _, err := io.Copy(wr, rd); err != nil {
|
if _, err := io.Copy(wr, rd); err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to copy")
|
logger.Err(err).Fatal("Failed to copy")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set file mode
|
// Set file mode
|
||||||
if err := os.Chmod(targetPath, 0755); err != nil {
|
if err := os.Chmod(targetPath, 0755); err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to chmod")
|
logger.Err(err).Fatal("Failed to chmod")
|
||||||
}
|
}
|
||||||
|
|
||||||
cliLog.Info().Msgf("Executable copied to %s", targetPath)
|
logger.Info("Executable copied to %s", targetPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
type cmdLifecyclePreStopRunPort struct {
|
type cmdLifecyclePreStopRunPort struct {
|
||||||
|
@ -193,17 +192,17 @@ func (c *cmdLifecyclePreStopRunPort) run(cmd *cobra.Command, args []string) erro
|
||||||
// Get environment
|
// Get environment
|
||||||
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
||||||
if len(namespace) == 0 {
|
if len(namespace) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodNamespace)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodNamespace)
|
||||||
}
|
}
|
||||||
name := os.Getenv(constants.EnvOperatorPodName)
|
name := os.Getenv(constants.EnvOperatorPodName)
|
||||||
if len(name) == 0 {
|
if len(name) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodName)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorPodName)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create kubernetes client
|
// Create kubernetes client
|
||||||
client, ok := kclient.GetDefaultFactory().Client()
|
client, ok := kclient.GetDefaultFactory().Client()
|
||||||
if !ok {
|
if !ok {
|
||||||
cliLog.Fatal().Msg("Client not initialised")
|
logger.Fatal("Client not initialised")
|
||||||
}
|
}
|
||||||
|
|
||||||
pods := client.Kubernetes().CoreV1().Pods(namespace)
|
pods := client.Kubernetes().CoreV1().Pods(namespace)
|
||||||
|
@ -221,13 +220,13 @@ func (c *cmdLifecyclePreStopRunPort) run(cmd *cobra.Command, args []string) erro
|
||||||
|
|
||||||
p, err := pods.Get(context.Background(), name, metav1.GetOptions{})
|
p, err := pods.Get(context.Background(), name, metav1.GetOptions{})
|
||||||
if k8sutil.IsNotFound(err) {
|
if k8sutil.IsNotFound(err) {
|
||||||
cliLog.Warn().Msg("Pod not found")
|
logger.Warn("Pod not found")
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
recentErrors++
|
recentErrors++
|
||||||
cliLog.Error().Err(err).Msg("Failed to get pod")
|
logger.Err(err).Error("Failed to get pod")
|
||||||
if recentErrors > 20 {
|
if recentErrors > 20 {
|
||||||
cliLog.Fatal().Err(err).Msg("Too many recent errors")
|
logger.Err(err).Fatal("Too many recent errors")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -63,26 +63,26 @@ func cmdLifecycleWaitCheck(cmd *cobra.Command, _ []string) {
|
||||||
|
|
||||||
deploymentName, err := cmd.Flags().GetString(ArgDeploymentName)
|
deploymentName, err := cmd.Flags().GetString(ArgDeploymentName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg(fmt.Sprintf("error parsing argument: %s", ArgDeploymentName))
|
logger.Err(err).Fatal("error parsing argument: %s", ArgDeploymentName)
|
||||||
}
|
}
|
||||||
watchTimeout, err := cmd.Flags().GetDuration(ArgDeploymentWatchTimeout)
|
watchTimeout, err := cmd.Flags().GetDuration(ArgDeploymentWatchTimeout)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg(fmt.Sprintf("error parsing argument: %s", ArgDeploymentWatchTimeout))
|
logger.Err(err).Fatal("error parsing argument: %s", ArgDeploymentWatchTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
for {
|
for {
|
||||||
d, err := getDeployment(ctx, os.Getenv(constants.EnvOperatorPodNamespace), deploymentName)
|
d, err := getDeployment(ctx, os.Getenv(constants.EnvOperatorPodNamespace), deploymentName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg(fmt.Sprintf("error getting ArangoDeployment: %s", d.Name))
|
logger.Err(err).Fatal(fmt.Sprintf("error getting ArangoDeployment: %s", d.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
isUpToDate, err := d.IsUpToDate()
|
isUpToDate, err := d.IsUpToDate()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Err(err).Msg(fmt.Sprintf("error checking Status for ArangoDeployment: %s", d.Name))
|
logger.Err(err).Error(fmt.Sprintf("error checking Status for ArangoDeployment: %s", d.Name))
|
||||||
}
|
}
|
||||||
|
|
||||||
if isUpToDate {
|
if isUpToDate {
|
||||||
cliLog.Info().Msg(fmt.Sprintf("ArangoDeployment: %s is %s", d.Name, v1.ConditionTypeUpToDate))
|
logger.Info(fmt.Sprintf("ArangoDeployment: %s is %s", d.Name, v1.ConditionTypeUpToDate))
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,10 +90,10 @@ func cmdLifecycleWaitCheck(cmd *cobra.Command, _ []string) {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return
|
return
|
||||||
case <-time.After(WatchCheckInterval):
|
case <-time.After(WatchCheckInterval):
|
||||||
cliLog.Info().Msg(fmt.Sprintf("ArangoDeployment: %s is not ready yet. Waiting...", d.Name))
|
logger.Info("ArangoDeployment: %s is not ready yet. Waiting...", d.Name)
|
||||||
continue
|
continue
|
||||||
case <-time.After(watchTimeout):
|
case <-time.After(watchTimeout):
|
||||||
cliLog.Error().Msg(fmt.Sprintf("ArangoDeployment: %s is not %s yet - operation timed out!", d.Name, v1.ConditionTypeUpToDate))
|
logger.Error("ArangoDeployment: %s is not %s yet - operation timed out!", d.Name, v1.ConditionTypeUpToDate)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -129,7 +129,7 @@ func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name
|
||||||
}
|
}
|
||||||
defer func() {
|
defer func() {
|
||||||
if deletePVC {
|
if deletePVC {
|
||||||
cliLog.Debug().Str("pvc-name", claimname).Msg("deleting pvc")
|
logger.Str("pvc-name", claimname).Debug("deleting pvc")
|
||||||
kube.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), claimname, metav1.DeleteOptions{})
|
kube.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), claimname, metav1.DeleteOptions{})
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -383,7 +383,7 @@ func cmdRebootRun(cmd *cobra.Command, args []string) {
|
||||||
// Create kubernetes client
|
// Create kubernetes client
|
||||||
client, ok := kclient.GetDefaultFactory().Client()
|
client, ok := kclient.GetDefaultFactory().Client()
|
||||||
if !ok {
|
if !ok {
|
||||||
cliLog.Fatal().Msg("Failed to get client")
|
logger.Fatal("Failed to get client")
|
||||||
}
|
}
|
||||||
|
|
||||||
kubecli := client.Kubernetes()
|
kubecli := client.Kubernetes()
|
||||||
|
@ -392,12 +392,12 @@ func cmdRebootRun(cmd *cobra.Command, args []string) {
|
||||||
|
|
||||||
image, err := getMyImage(kubecli, namespace, podname)
|
image, err := getMyImage(kubecli, namespace, podname)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("failed to get my image")
|
logger.Err(err).Fatal("failed to get my image")
|
||||||
}
|
}
|
||||||
|
|
||||||
vinfo, err := preflightChecks(kubecli, volumes)
|
vinfo, err := preflightChecks(kubecli, volumes)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("preflight checks failed")
|
logger.Err(err).Fatal("preflight checks failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
@ -406,7 +406,7 @@ func cmdRebootRun(cmd *cobra.Command, args []string) {
|
||||||
received := 0
|
received := 0
|
||||||
|
|
||||||
for _, volumeName := range volumes {
|
for _, volumeName := range volumes {
|
||||||
cliLog.Debug().Str("volume", volumeName).Msg("Starting inspection")
|
logger.Str("volume", volumeName).Debug("Starting inspection")
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func(vn string) {
|
go func(vn string) {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
@ -424,9 +424,9 @@ func cmdRebootRun(cmd *cobra.Command, args []string) {
|
||||||
select {
|
select {
|
||||||
case res := <-resultChan:
|
case res := <-resultChan:
|
||||||
if res.Error != nil {
|
if res.Error != nil {
|
||||||
cliLog.Error().Err(res.Error).Msg("Inspection failed")
|
logger.Err(res.Error).Error("Inspection failed")
|
||||||
} else {
|
} else {
|
||||||
cliLog.Info().Str("claim", res.Claim).Str("uuid", res.UUID).Msg("Inspection completed")
|
logger.Str("claim", res.Claim).Str("uuid", res.UUID).Info("Inspection completed")
|
||||||
}
|
}
|
||||||
members[res.UUID] = res
|
members[res.UUID] = res
|
||||||
received++
|
received++
|
||||||
|
@ -435,13 +435,13 @@ func cmdRebootRun(cmd *cobra.Command, args []string) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
cliLog.Debug().Msg("results complete - generating ArangoDeployment resource")
|
logger.Debug("results complete - generating ArangoDeployment resource")
|
||||||
|
|
||||||
if err := createArangoDeployment(extcli, namespace, rebootOptions.DeploymentName, rebootOptions.ImageName, members); err != nil {
|
if err := createArangoDeployment(extcli, namespace, rebootOptions.DeploymentName, rebootOptions.ImageName, members); err != nil {
|
||||||
cliLog.Error().Err(err).Msg("failed to create deployment")
|
logger.Err(err).Error("failed to create deployment")
|
||||||
}
|
}
|
||||||
|
|
||||||
cliLog.Info().Msg("ArangoDeployment created.")
|
logger.Info("ArangoDeployment created.")
|
||||||
|
|
||||||
// Wait for everyone to be completed
|
// Wait for everyone to be completed
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
@ -481,6 +481,6 @@ func cmdRebootInspectRun(cmd *cobra.Command, args []string) {
|
||||||
})
|
})
|
||||||
|
|
||||||
if http.ListenAndServe(":8080", nil); err != nil {
|
if http.ListenAndServe(":8080", nil); err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to listen and serve")
|
logger.Err(err).Fatal("Failed to listen and serve")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner"
|
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner/service"
|
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner/service"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||||
|
@ -63,26 +62,21 @@ func init() {
|
||||||
// Run the provisioner
|
// Run the provisioner
|
||||||
func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) {
|
func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) {
|
||||||
var err error
|
var err error
|
||||||
if err := logging.InitGlobalLogger(defaultLogLevel, logLevels); err != nil {
|
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to initialize log service")
|
|
||||||
}
|
|
||||||
|
|
||||||
logService = logging.GlobalLogger()
|
|
||||||
|
|
||||||
// Log version
|
// Log version
|
||||||
|
|
||||||
cliLog.Info().Msgf("Starting arangodb local storage provisioner (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
logger.Info("Starting arangodb local storage provisioner (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build)
|
||||||
|
|
||||||
// Get environment
|
// Get environment
|
||||||
nodeName := os.Getenv(constants.EnvOperatorNodeName)
|
nodeName := os.Getenv(constants.EnvOperatorNodeName)
|
||||||
if len(nodeName) == 0 {
|
if len(nodeName) == 0 {
|
||||||
cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorNodeName)
|
logger.Fatal("%s environment variable missing", constants.EnvOperatorNodeName)
|
||||||
}
|
}
|
||||||
|
|
||||||
config, deps := newProvisionerConfigAndDeps(nodeName)
|
config := newProvisionerConfigAndDeps(nodeName)
|
||||||
p, err := service.New(config, deps)
|
p, err := service.New(config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
cliLog.Fatal().Err(err).Msg("Failed to create provisioner")
|
logger.Err(err).Fatal("Failed to create provisioner")
|
||||||
}
|
}
|
||||||
|
|
||||||
ctx := context.TODO()
|
ctx := context.TODO()
|
||||||
|
@ -90,14 +84,11 @@ func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newProvisionerConfigAndDeps creates storage provisioner config & dependencies.
|
// newProvisionerConfigAndDeps creates storage provisioner config & dependencies.
|
||||||
func newProvisionerConfigAndDeps(nodeName string) (service.Config, service.Dependencies) {
|
func newProvisionerConfigAndDeps(nodeName string) service.Config {
|
||||||
cfg := service.Config{
|
cfg := service.Config{
|
||||||
Address: net.JoinHostPort("0.0.0.0", strconv.Itoa(storageProvisioner.port)),
|
Address: net.JoinHostPort("0.0.0.0", strconv.Itoa(storageProvisioner.port)),
|
||||||
NodeName: nodeName,
|
NodeName: nodeName,
|
||||||
}
|
}
|
||||||
deps := service.Dependencies{
|
|
||||||
Log: logService.MustGetLogger(logging.LoggerNameProvisioner),
|
|
||||||
}
|
|
||||||
|
|
||||||
return cfg, deps
|
return cfg
|
||||||
}
|
}
|
||||||
|
|
|
@ -21,7 +21,6 @@
|
||||||
package cmd
|
package cmd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
"github.com/spf13/cobra"
|
"github.com/spf13/cobra"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -57,9 +56,9 @@ var cmdTaskState = &cobra.Command{
|
||||||
}
|
}
|
||||||
|
|
||||||
func taskCreate(cmd *cobra.Command, args []string) {
|
func taskCreate(cmd *cobra.Command, args []string) {
|
||||||
log.Info().Msgf("TODO: create task")
|
logger.Info("TODO: create task")
|
||||||
}
|
}
|
||||||
|
|
||||||
func taskState(cmd *cobra.Command, args []string) {
|
func taskState(cmd *cobra.Command, args []string) {
|
||||||
log.Info().Msgf("TODO: check task state")
|
logger.Info("TODO: check task state")
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,15 +25,17 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
|
||||||
"github.com/arangodb/go-driver"
|
"github.com/arangodb/go-driver"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
authorization "k8s.io/api/authorization/v1"
|
authorization "k8s.io/api/authorization/v1"
|
||||||
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/errors"
|
"k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) {
|
var logger = logging.Global().RegisterAndGetLogger("crd", logging.Info)
|
||||||
|
|
||||||
|
func EnsureCRD(ctx context.Context, client kclient.Client) {
|
||||||
crdsLock.Lock()
|
crdsLock.Lock()
|
||||||
defer crdsLock.Unlock()
|
defer crdsLock.Unlock()
|
||||||
|
|
||||||
|
@ -41,21 +43,21 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) {
|
||||||
getAccess := verifyCRDAccess(ctx, client, crd, "get")
|
getAccess := verifyCRDAccess(ctx, client, crd, "get")
|
||||||
|
|
||||||
if !getAccess.Allowed {
|
if !getAccess.Allowed {
|
||||||
log.Info().Str("crd", crd).Msgf("Get Operations is not allowed. Continue")
|
logger.Str("crd", crd).Info("Get Operations is not allowed. Continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd, meta.GetOptions{})
|
c, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !errors.IsNotFound(err) {
|
if !errors.IsNotFound(err) {
|
||||||
log.Warn().Err(err).Str("crd", crd).Msgf("Get Operations is not allowed due to error. Continue")
|
logger.Err(err).Str("crd", crd).Warn("Get Operations is not allowed due to error. Continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
createAccess := verifyCRDAccess(ctx, client, crd, "create")
|
createAccess := verifyCRDAccess(ctx, client, crd, "create")
|
||||||
|
|
||||||
if !createAccess.Allowed {
|
if !createAccess.Allowed {
|
||||||
log.Info().Str("crd", crd).Msgf("Create Operations is not allowed but CRD is missing. Continue")
|
logger.Str("crd", crd).Info("Create Operations is not allowed but CRD is missing. Continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,18 +72,18 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Create(ctx, c, meta.CreateOptions{}); err != nil {
|
if _, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Create(ctx, c, meta.CreateOptions{}); err != nil {
|
||||||
log.Warn().Err(err).Str("crd", crd).Msgf("Create Operations is not allowed due to error. Continue")
|
logger.Err(err).Str("crd", crd).Warn("Create Operations is not allowed due to error. Continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info().Str("crd", crd).Msgf("CRD Created")
|
logger.Str("crd", crd).Info("CRD Created")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
updateAccess := verifyCRDAccess(ctx, client, crd, "update")
|
updateAccess := verifyCRDAccess(ctx, client, crd, "update")
|
||||||
|
|
||||||
if !updateAccess.Allowed {
|
if !updateAccess.Allowed {
|
||||||
log.Info().Str("crd", crd).Msgf("Update Operations is not allowed. Continue")
|
logger.Str("crd", crd).Info("Update Operations is not allowed. Continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,7 +94,7 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) {
|
||||||
if v, ok := c.ObjectMeta.Labels[Version]; ok {
|
if v, ok := c.ObjectMeta.Labels[Version]; ok {
|
||||||
if v != "" {
|
if v != "" {
|
||||||
if !isUpdateRequired(spec.version, driver.Version(v)) {
|
if !isUpdateRequired(spec.version, driver.Version(v)) {
|
||||||
log.Info().Str("crd", crd).Msgf("CRD Update not required")
|
logger.Str("crd", crd).Info("CRD Update not required")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -103,10 +105,10 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) {
|
||||||
c.Spec = spec.spec
|
c.Spec = spec.spec
|
||||||
|
|
||||||
if _, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Update(ctx, c, meta.UpdateOptions{}); err != nil {
|
if _, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Update(ctx, c, meta.UpdateOptions{}); err != nil {
|
||||||
log.Warn().Err(err).Str("crd", crd).Msgf("Create Operations is not allowed due to error. Continue")
|
logger.Err(err).Str("crd", crd).Warn("Create Operations is not allowed due to error. Continue")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
log.Info().Str("crd", crd).Msgf("CRD Updated")
|
logger.Str("crd", crd).Info("CRD Updated")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -34,6 +33,6 @@ func Test_Apply(t *testing.T) {
|
||||||
c, ok := kclient.GetDefaultFactory().Client()
|
c, ok := kclient.GetDefaultFactory().Client()
|
||||||
require.True(t, ok)
|
require.True(t, ok)
|
||||||
|
|
||||||
EnsureCRD(context.Background(), log.Logger, c)
|
EnsureCRD(context.Background(), c)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
|
@ -47,7 +47,7 @@ const (
|
||||||
// createAccessPackages creates a arangosync access packages specified
|
// createAccessPackages creates a arangosync access packages specified
|
||||||
// in spec.sync.externalAccess.accessPackageSecretNames.
|
// in spec.sync.externalAccess.accessPackageSecretNames.
|
||||||
func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
||||||
log := d.deps.Log
|
log := d.sectionLogger("access-package")
|
||||||
spec := d.apiObject.Spec
|
spec := d.apiObject.Spec
|
||||||
|
|
||||||
if !spec.Sync.IsEnabled() {
|
if !spec.Sync.IsEnabled() {
|
||||||
|
@ -78,12 +78,12 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
if err != nil && !k8sutil.IsNotFound(err) {
|
if err != nil && !k8sutil.IsNotFound(err) {
|
||||||
// Not serious enough to stop everything now, just log and create an event
|
// Not serious enough to stop everything now, just sectionLogger and create an event
|
||||||
log.Warn().Err(err).Msg("Failed to remove obsolete access package secret")
|
log.Err(err).Warn("Failed to remove obsolete access package secret")
|
||||||
d.CreateEvent(k8sutil.NewErrorEvent("Access Package cleanup failed", err, d.apiObject))
|
d.CreateEvent(k8sutil.NewErrorEvent("Access Package cleanup failed", err, d.apiObject))
|
||||||
} else {
|
} else {
|
||||||
// Access package removed, notify user
|
// Access package removed, notify user
|
||||||
log.Info().Str("secret-name", secret.GetName()).Msg("Removed access package Secret")
|
log.Str("secret-name", secret.GetName()).Info("Removed access package Secret")
|
||||||
d.CreateEvent(k8sutil.NewAccessPackageDeletedEvent(d.apiObject, secret.GetName()))
|
d.CreateEvent(k8sutil.NewAccessPackageDeletedEvent(d.apiObject, secret.GetName()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -97,7 +97,7 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
||||||
// ensureAccessPackage creates an arangosync access package with given name
|
// ensureAccessPackage creates an arangosync access package with given name
|
||||||
// it is does not already exist.
|
// it is does not already exist.
|
||||||
func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName string) error {
|
func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName string) error {
|
||||||
log := d.deps.Log
|
log := d.sectionLogger("access-package")
|
||||||
spec := d.apiObject.Spec
|
spec := d.apiObject.Spec
|
||||||
|
|
||||||
_, err := d.acs.CurrentClusterCache().Secret().V1().Read().Get(ctx, apSecretName, metav1.GetOptions{})
|
_, err := d.acs.CurrentClusterCache().Secret().V1().Read().Get(ctx, apSecretName, metav1.GetOptions{})
|
||||||
|
@ -105,7 +105,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
||||||
// Secret already exists
|
// Secret already exists
|
||||||
return nil
|
return nil
|
||||||
} else if !k8sutil.IsNotFound(err) {
|
} else if !k8sutil.IsNotFound(err) {
|
||||||
log.Debug().Err(err).Str("name", apSecretName).Msg("Failed to get arangosync access package secret")
|
log.Err(err).Str("name", apSecretName).Debug("Failed to get arangosync access package secret")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +113,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
||||||
clientAuthSecretName := spec.Sync.Authentication.GetClientCASecretName()
|
clientAuthSecretName := spec.Sync.Authentication.GetClientCASecretName()
|
||||||
clientAuthCert, clientAuthKey, _, err := k8sutil.GetCASecret(ctx, d.acs.CurrentClusterCache().Secret().V1().Read(), clientAuthSecretName, nil)
|
clientAuthCert, clientAuthKey, _, err := k8sutil.GetCASecret(ctx, d.acs.CurrentClusterCache().Secret().V1().Read(), clientAuthSecretName, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to get client-auth CA secret")
|
log.Err(err).Debug("Failed to get client-auth CA secret")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -121,14 +121,14 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
||||||
tlsCASecretName := spec.Sync.TLS.GetCASecretName()
|
tlsCASecretName := spec.Sync.TLS.GetCASecretName()
|
||||||
tlsCACert, err := k8sutil.GetCACertficateSecret(ctx, d.acs.CurrentClusterCache().Secret().V1().Read(), tlsCASecretName)
|
tlsCACert, err := k8sutil.GetCACertficateSecret(ctx, d.acs.CurrentClusterCache().Secret().V1().Read(), tlsCASecretName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to get TLS CA secret")
|
log.Err(err).Debug("Failed to get TLS CA secret")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create keyfile
|
// Create keyfile
|
||||||
ca, err := certificates.LoadCAFromPEM(clientAuthCert, clientAuthKey)
|
ca, err := certificates.LoadCAFromPEM(clientAuthCert, clientAuthKey)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to parse client-auth CA")
|
log.Err(err).Debug("Failed to parse client-auth CA")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,7 +140,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
||||||
}
|
}
|
||||||
cert, key, err := certificates.CreateCertificate(options, &ca)
|
cert, key, err := certificates.CreateCertificate(options, &ca)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create client-auth keyfile")
|
log.Err(err).Debug("Failed to create client-auth keyfile")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
keyfile := strings.TrimSpace(cert) + "\n" + strings.TrimSpace(key)
|
keyfile := strings.TrimSpace(cert) + "\n" + strings.TrimSpace(key)
|
||||||
|
@ -182,12 +182,12 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
||||||
// Serialize secrets
|
// Serialize secrets
|
||||||
keyfileYaml, err := yaml.Marshal(keyfileSecret)
|
keyfileYaml, err := yaml.Marshal(keyfileSecret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to encode client-auth keyfile Secret")
|
log.Err(err).Debug("Failed to encode client-auth keyfile Secret")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
tlsCAYaml, err := yaml.Marshal(tlsCASecret)
|
tlsCAYaml, err := yaml.Marshal(tlsCASecret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to encode TLS CA Secret")
|
log.Err(err).Debug("Failed to encode TLS CA Secret")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
allYaml := strings.TrimSpace(string(keyfileYaml)) + "\n---\n" + strings.TrimSpace(string(tlsCAYaml))
|
allYaml := strings.TrimSpace(string(keyfileYaml)) + "\n---\n" + strings.TrimSpace(string(tlsCAYaml))
|
||||||
|
@ -211,12 +211,12 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// Failed to create secret
|
// Failed to create secret
|
||||||
log.Debug().Err(err).Str("secret-name", apSecretName).Msg("Failed to create access package Secret")
|
log.Err(err).Str("secret-name", apSecretName).Debug("Failed to create access package Secret")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Write log entry & create event
|
// Write sectionLogger entry & create event
|
||||||
log.Info().Str("secret-name", apSecretName).Msg("Created access package Secret")
|
log.Str("secret-name", apSecretName).Info("Created access package Secret")
|
||||||
d.CreateEvent(k8sutil.NewAccessPackageCreatedEvent(d.apiObject, apSecretName))
|
d.CreateEvent(k8sutil.NewAccessPackageCreatedEvent(d.apiObject, apSecretName))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -25,27 +25,38 @@ import (
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
chaosMonkeyLogger = logging.Global().RegisterAndGetLogger("chaos-monkey", logging.Info)
|
||||||
)
|
)
|
||||||
|
|
||||||
// Monkey is the service that introduces chaos in the deployment
|
// Monkey is the service that introduces chaos in the deployment
|
||||||
// if allowed and enabled.
|
// if allowed and enabled.
|
||||||
type Monkey struct {
|
type Monkey struct {
|
||||||
log zerolog.Logger
|
namespace, name string
|
||||||
context Context
|
log logging.Logger
|
||||||
|
context Context
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m Monkey) WrapLogger(in *zerolog.Event) *zerolog.Event {
|
||||||
|
return in.Str("namespace", m.namespace).Str("name", m.name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewMonkey creates a new chaos monkey with given context.
|
// NewMonkey creates a new chaos monkey with given context.
|
||||||
func NewMonkey(log zerolog.Logger, context Context) *Monkey {
|
func NewMonkey(namespace, name string, context Context) *Monkey {
|
||||||
log = log.With().Str("component", "chaos-monkey").Logger()
|
m := &Monkey{
|
||||||
return &Monkey{
|
context: context,
|
||||||
log: log,
|
namespace: namespace,
|
||||||
context: context,
|
name: name,
|
||||||
}
|
}
|
||||||
|
m.log = chaosMonkeyLogger.WrapObj(m)
|
||||||
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run the monkey until the given channel is closed.
|
// Run the monkey until the given channel is closed.
|
||||||
|
@ -61,7 +72,7 @@ func (m Monkey) Run(stopCh <-chan struct{}) {
|
||||||
if rand.Float64() < chance {
|
if rand.Float64() < chance {
|
||||||
// Let's introduce pod chaos
|
// Let's introduce pod chaos
|
||||||
if err := m.killRandomPod(ctx); err != nil {
|
if err := m.killRandomPod(ctx); err != nil {
|
||||||
log.Info().Err(err).Msg("Failed to kill random pod")
|
m.log.Err(err).Info("Failed to kill random pod")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -87,7 +98,7 @@ func (m Monkey) killRandomPod(ctx context.Context) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
p := pods[rand.Intn(len(pods))]
|
p := pods[rand.Intn(len(pods))]
|
||||||
m.log.Info().Str("pod-name", p.GetName()).Msg("Killing pod")
|
m.log.Str("pod-name", p.GetName()).Info("Killing pod")
|
||||||
if err := m.context.DeletePod(ctx, p.GetName(), meta.DeleteOptions{}); err != nil {
|
if err := m.context.DeletePod(ctx, p.GetName(), meta.DeleteOptions{}); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,14 +38,14 @@ import (
|
||||||
|
|
||||||
// removePodFinalizers removes all finalizers from all pods owned by us.
|
// removePodFinalizers removes all finalizers from all pods owned by us.
|
||||||
func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) {
|
func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) {
|
||||||
log := d.deps.Log
|
log := d.sectionLogger("pod-finalizer")
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error {
|
if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error {
|
||||||
log.Info().Str("pod", pod.GetName()).Msgf("Removing Pod Finalizer")
|
log.Str("pod", pod.GetName()).Info("Removing Pod Finalizer")
|
||||||
if count, err := k8sutil.RemovePodFinalizers(ctx, cachedStatus, log, d.PodsModInterface(), pod, constants.ManagedFinalizers(), true); err != nil {
|
if count, err := k8sutil.RemovePodFinalizers(ctx, cachedStatus, d.PodsModInterface(), pod, constants.ManagedFinalizers(), true); err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to remove pod finalizers")
|
log.Err(err).Warn("Failed to remove pod finalizers")
|
||||||
return err
|
return err
|
||||||
} else if count > 0 {
|
} else if count > 0 {
|
||||||
found = true
|
found = true
|
||||||
|
@ -58,7 +58,7 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe
|
||||||
GracePeriodSeconds: util.NewInt64(0),
|
GracePeriodSeconds: util.NewInt64(0),
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
if !k8sutil.IsNotFound(err) {
|
if !k8sutil.IsNotFound(err) {
|
||||||
log.Warn().Err(err).Msg("Failed to remove pod")
|
log.Err(err).Warn("Failed to remove pod")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -72,14 +72,14 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe
|
||||||
|
|
||||||
// removePVCFinalizers removes all finalizers from all PVCs owned by us.
|
// removePVCFinalizers removes all finalizers from all PVCs owned by us.
|
||||||
func (d *Deployment) removePVCFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) {
|
func (d *Deployment) removePVCFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) {
|
||||||
log := d.deps.Log
|
log := d.sectionLogger("pvc-finalizer")
|
||||||
|
|
||||||
found := false
|
found := false
|
||||||
|
|
||||||
if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *core.PersistentVolumeClaim) error {
|
if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *core.PersistentVolumeClaim) error {
|
||||||
log.Info().Str("pvc", pvc.GetName()).Msgf("Removing PVC Finalizer")
|
log.Str("pvc", pvc.GetName()).Info("Removing PVC Finalizer")
|
||||||
if count, err := k8sutil.RemovePVCFinalizers(ctx, cachedStatus, log, d.PersistentVolumeClaimsModInterface(), pvc, constants.ManagedFinalizers(), true); err != nil {
|
if count, err := k8sutil.RemovePVCFinalizers(ctx, cachedStatus, d.PersistentVolumeClaimsModInterface(), pvc, constants.ManagedFinalizers(), true); err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to remove PVC finalizers")
|
log.Err(err).Warn("Failed to remove PVC finalizers")
|
||||||
return err
|
return err
|
||||||
} else if count > 0 {
|
} else if count > 0 {
|
||||||
found = true
|
found = true
|
||||||
|
|
|
@ -33,15 +33,18 @@ import (
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var ciLogger = logging.Global().RegisterAndGetLogger("deployment-ci", logging.Info)
|
||||||
|
|
||||||
// clusterScalingIntegration is a helper to communicate with the clusters
|
// clusterScalingIntegration is a helper to communicate with the clusters
|
||||||
// scaling UI.
|
// scaling UI.
|
||||||
type clusterScalingIntegration struct {
|
type clusterScalingIntegration struct {
|
||||||
log zerolog.Logger
|
log logging.Logger
|
||||||
depl *Deployment
|
depl *Deployment
|
||||||
pendingUpdate struct {
|
pendingUpdate struct {
|
||||||
mutex sync.Mutex
|
mutex sync.Mutex
|
||||||
|
@ -57,6 +60,10 @@ type clusterScalingIntegration struct {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (ci *clusterScalingIntegration) WrapLogger(in *zerolog.Event) *zerolog.Event {
|
||||||
|
return in.Str("namespace", ci.depl.GetNamespace()).Str("name", ci.depl.Name())
|
||||||
|
}
|
||||||
|
|
||||||
const (
|
const (
|
||||||
maxClusterBootstrapTime = time.Minute * 2 // Time we allow a cluster bootstrap to take, before we can do cluster inspections.
|
maxClusterBootstrapTime = time.Minute * 2 // Time we allow a cluster bootstrap to take, before we can do cluster inspections.
|
||||||
)
|
)
|
||||||
|
@ -64,9 +71,9 @@ const (
|
||||||
// newClusterScalingIntegration creates a new clusterScalingIntegration.
|
// newClusterScalingIntegration creates a new clusterScalingIntegration.
|
||||||
func newClusterScalingIntegration(depl *Deployment) *clusterScalingIntegration {
|
func newClusterScalingIntegration(depl *Deployment) *clusterScalingIntegration {
|
||||||
ci := &clusterScalingIntegration{
|
ci := &clusterScalingIntegration{
|
||||||
log: depl.deps.Log,
|
|
||||||
depl: depl,
|
depl: depl,
|
||||||
}
|
}
|
||||||
|
ci.log = ciLogger.WrapObj(ci)
|
||||||
ci.scaleEnabled.enabled = true
|
ci.scaleEnabled.enabled = true
|
||||||
return ci
|
return ci
|
||||||
}
|
}
|
||||||
|
@ -108,13 +115,13 @@ func (ci *clusterScalingIntegration) checkScalingCluster(ctx context.Context, ex
|
||||||
safeToAskCluster, err := ci.updateClusterServerCount(ctx, expectSuccess)
|
safeToAskCluster, err := ci.updateClusterServerCount(ctx, expectSuccess)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if expectSuccess {
|
if expectSuccess {
|
||||||
ci.log.Debug().Err(err).Msg("Cluster update failed")
|
ci.log.Err(err).Debug("Cluster update failed")
|
||||||
}
|
}
|
||||||
} else if safeToAskCluster {
|
} else if safeToAskCluster {
|
||||||
// Inspect once
|
// Inspect once
|
||||||
if err := ci.inspectCluster(ctx, expectSuccess); err != nil {
|
if err := ci.inspectCluster(ctx, expectSuccess); err != nil {
|
||||||
if expectSuccess {
|
if expectSuccess {
|
||||||
ci.log.Debug().Err(err).Msg("Cluster inspection failed")
|
ci.log.Err(err).Debug("Cluster inspection failed")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
return true
|
return true
|
||||||
|
@ -163,7 +170,7 @@ func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectS
|
||||||
req, err := arangod.GetNumberOfServers(ctxChild, c.Connection())
|
req, err := arangod.GetNumberOfServers(ctxChild, c.Connection())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if expectSuccess {
|
if expectSuccess {
|
||||||
log.Debug().Err(err).Msg("Failed to get number of servers")
|
log.Err(err).Debug("Failed to get number of servers")
|
||||||
}
|
}
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
@ -220,13 +227,13 @@ func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectS
|
||||||
// min <= count <= max holds for the given server groups
|
// min <= count <= max holds for the given server groups
|
||||||
if err := newSpec.Validate(); err != nil {
|
if err := newSpec.Validate(); err != nil {
|
||||||
// Log failure & create event
|
// Log failure & create event
|
||||||
log.Warn().Err(err).Msg("Validation of updated spec has failed")
|
log.Err(err).Warn("Validation of updated spec has failed")
|
||||||
ci.depl.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, apiObject))
|
ci.depl.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, apiObject))
|
||||||
// Restore original spec in cluster
|
// Restore original spec in cluster
|
||||||
ci.SendUpdateToCluster(current.Spec)
|
ci.SendUpdateToCluster(current.Spec)
|
||||||
} else {
|
} else {
|
||||||
if err := ci.depl.updateCRSpec(ctx, *newSpec); err != nil {
|
if err := ci.depl.updateCRSpec(ctx, *newSpec); err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to update current deployment")
|
log.Err(err).Warn("Failed to update current deployment")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -269,7 +276,7 @@ func (ci *clusterScalingIntegration) updateClusterServerCount(ctx context.Contex
|
||||||
if coordinatorCount != lastNumberOfServers.GetCoordinators() || dbserverCount != lastNumberOfServers.GetDBServers() {
|
if coordinatorCount != lastNumberOfServers.GetCoordinators() || dbserverCount != lastNumberOfServers.GetDBServers() {
|
||||||
if err := ci.depl.SetNumberOfServers(ctx, coordinatorCountPtr, dbserverCountPtr); err != nil {
|
if err := ci.depl.SetNumberOfServers(ctx, coordinatorCountPtr, dbserverCountPtr); err != nil {
|
||||||
if expectSuccess {
|
if expectSuccess {
|
||||||
log.Debug().Err(err).Msg("Failed to set number of servers")
|
log.Err(err).Debug("Failed to set number of servers")
|
||||||
}
|
}
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -52,7 +52,6 @@ import (
|
||||||
|
|
||||||
apiErrors "k8s.io/apimachinery/pkg/api/errors"
|
apiErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
@ -75,6 +74,7 @@ import (
|
||||||
serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1"
|
serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1"
|
||||||
servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1"
|
servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||||
|
"github.com/rs/zerolog/log"
|
||||||
)
|
)
|
||||||
|
|
||||||
var _ resources.Context = &Deployment{}
|
var _ resources.Context = &Deployment{}
|
||||||
|
@ -146,10 +146,10 @@ func (d *Deployment) UpdateStatus(ctx context.Context, status api.DeploymentStat
|
||||||
func (d *Deployment) updateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error {
|
func (d *Deployment) updateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error {
|
||||||
if d.status.version != lastVersion {
|
if d.status.version != lastVersion {
|
||||||
// Status is obsolete
|
// Status is obsolete
|
||||||
d.deps.Log.Error().
|
d.log.
|
||||||
Int32("expected-version", lastVersion).
|
Int32("expected-version", lastVersion).
|
||||||
Int32("actual-version", d.status.version).
|
Int32("actual-version", d.status.version).
|
||||||
Msg("UpdateStatus version conflict error.")
|
Error("UpdateStatus version conflict error.")
|
||||||
return errors.WithStack(errors.Newf("Status conflict error. Expected version %d, got %d", lastVersion, d.status.version))
|
return errors.WithStack(errors.Newf("Status conflict error. Expected version %d, got %d", lastVersion, d.status.version))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ func (d *Deployment) UpdateMember(ctx context.Context, member api.MemberStatus)
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||||
d.deps.Log.Debug().Err(err).Msg("Updating CR status failed")
|
d.log.Err(err).Debug("Updating CR status failed")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -307,7 +307,7 @@ func (d *Deployment) getJWTFolderToken() (string, bool) {
|
||||||
if i := d.apiObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
|
if i := d.apiObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
|
||||||
s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{})
|
s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.deps.Log.Error().Err(err).Msgf("Unable to get secret")
|
d.log.Err(err).Error("Unable to get secret")
|
||||||
return "", false
|
return "", false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -344,11 +344,10 @@ func (d *Deployment) getJWTToken() (string, bool) {
|
||||||
// GetSyncServerClient returns a cached client for a specific arangosync server.
|
// GetSyncServerClient returns a cached client for a specific arangosync server.
|
||||||
func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGroup, id string) (client.API, error) {
|
func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGroup, id string) (client.API, error) {
|
||||||
// Fetch monitoring token
|
// Fetch monitoring token
|
||||||
log := d.deps.Log
|
|
||||||
secretName := d.apiObject.Spec.Sync.Monitoring.GetTokenSecretName()
|
secretName := d.apiObject.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||||
monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().Secret().V1().Read(), secretName)
|
monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().Secret().V1().Read(), secretName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret")
|
d.log.Err(err).Str("secret-name", secretName).Debug("Failed to get sync monitoring secret")
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -368,7 +367,8 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr
|
||||||
}
|
}
|
||||||
auth := client.NewAuthentication(tlsAuth, "")
|
auth := client.NewAuthentication(tlsAuth, "")
|
||||||
insecureSkipVerify := true
|
insecureSkipVerify := true
|
||||||
c, err := d.syncClientCache.GetClient(d.deps.Log, source, auth, insecureSkipVerify)
|
// TODO: Change logging system in sync client
|
||||||
|
c, err := d.syncClientCache.GetClient(log.Logger, source, auth, insecureSkipVerify)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
@ -378,11 +378,10 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr
|
||||||
// CreateMember adds a new member to the given group.
|
// CreateMember adds a new member to the given group.
|
||||||
// If ID is non-empty, it will be used, otherwise a new ID is created.
|
// If ID is non-empty, it will be used, otherwise a new ID is created.
|
||||||
func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id string, mods ...reconcile.CreateMemberMod) (string, error) {
|
func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id string, mods ...reconcile.CreateMemberMod) (string, error) {
|
||||||
log := d.deps.Log
|
|
||||||
if err := d.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
if err := d.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
||||||
nid, err := createMember(log, s, group, id, d.apiObject, mods...)
|
nid, err := d.createMember(s, group, id, d.apiObject, mods...)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Str("group", group.AsRole()).Msg("Failed to create member")
|
d.log.Err(err).Str("group", group.AsRole()).Debug("Failed to create member")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -407,12 +406,12 @@ func (d *Deployment) GetPod(ctx context.Context, podName string) (*core.Pod, err
|
||||||
// DeletePod deletes a pod with given name in the namespace
|
// DeletePod deletes a pod with given name in the namespace
|
||||||
// of the deployment. If the pod does not exist, the error is ignored.
|
// of the deployment. If the pod does not exist, the error is ignored.
|
||||||
func (d *Deployment) DeletePod(ctx context.Context, podName string, options meta.DeleteOptions) error {
|
func (d *Deployment) DeletePod(ctx context.Context, podName string, options meta.DeleteOptions) error {
|
||||||
log := d.deps.Log
|
log := d.log
|
||||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||||
return d.PodsModInterface().Delete(ctxChild, podName, options)
|
return d.PodsModInterface().Delete(ctxChild, podName, options)
|
||||||
})
|
})
|
||||||
if err != nil && !k8sutil.IsNotFound(err) {
|
if err != nil && !k8sutil.IsNotFound(err) {
|
||||||
log.Debug().Err(err).Str("pod", podName).Msg("Failed to remove pod")
|
log.Err(err).Str("pod", podName).Debug("Failed to remove pod")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -421,7 +420,7 @@ func (d *Deployment) DeletePod(ctx context.Context, podName string, options meta
|
||||||
// CleanupPod deletes a given pod with force and explicit UID.
|
// CleanupPod deletes a given pod with force and explicit UID.
|
||||||
// If the pod does not exist, the error is ignored.
|
// If the pod does not exist, the error is ignored.
|
||||||
func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error {
|
func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error {
|
||||||
log := d.deps.Log
|
log := d.log
|
||||||
podName := p.GetName()
|
podName := p.GetName()
|
||||||
options := meta.NewDeleteOptions(0)
|
options := meta.NewDeleteOptions(0)
|
||||||
options.Preconditions = meta.NewUIDPreconditions(string(p.GetUID()))
|
options.Preconditions = meta.NewUIDPreconditions(string(p.GetUID()))
|
||||||
|
@ -429,7 +428,7 @@ func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error {
|
||||||
return d.PodsModInterface().Delete(ctxChild, podName, *options)
|
return d.PodsModInterface().Delete(ctxChild, podName, *options)
|
||||||
})
|
})
|
||||||
if err != nil && !k8sutil.IsNotFound(err) {
|
if err != nil && !k8sutil.IsNotFound(err) {
|
||||||
log.Debug().Err(err).Str("pod", podName).Msg("Failed to cleanup pod")
|
log.Err(err).Str("pod", podName).Debug("Failed to cleanup pod")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -438,8 +437,6 @@ func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error {
|
||||||
// RemovePodFinalizers removes all the finalizers from the Pod with given name in the namespace
|
// RemovePodFinalizers removes all the finalizers from the Pod with given name in the namespace
|
||||||
// of the deployment. If the pod does not exist, the error is ignored.
|
// of the deployment. If the pod does not exist, the error is ignored.
|
||||||
func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) error {
|
func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) error {
|
||||||
log := d.deps.Log
|
|
||||||
|
|
||||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
p, err := d.GetCachedStatus().Pod().V1().Read().Get(ctxChild, podName, meta.GetOptions{})
|
p, err := d.GetCachedStatus().Pod().V1().Read().Get(ctxChild, podName, meta.GetOptions{})
|
||||||
|
@ -450,7 +447,7 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = k8sutil.RemovePodFinalizers(ctx, d.GetCachedStatus(), log, d.PodsModInterface(), p, p.GetFinalizers(), true)
|
_, err = k8sutil.RemovePodFinalizers(ctx, d.GetCachedStatus(), d.PodsModInterface(), p, p.GetFinalizers(), true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
@ -460,12 +457,12 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er
|
||||||
// DeletePvc deletes a persistent volume claim with given name in the namespace
|
// DeletePvc deletes a persistent volume claim with given name in the namespace
|
||||||
// of the deployment. If the pvc does not exist, the error is ignored.
|
// of the deployment. If the pvc does not exist, the error is ignored.
|
||||||
func (d *Deployment) DeletePvc(ctx context.Context, pvcName string) error {
|
func (d *Deployment) DeletePvc(ctx context.Context, pvcName string) error {
|
||||||
log := d.deps.Log
|
log := d.log
|
||||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||||
return d.PersistentVolumeClaimsModInterface().Delete(ctxChild, pvcName, meta.DeleteOptions{})
|
return d.PersistentVolumeClaimsModInterface().Delete(ctxChild, pvcName, meta.DeleteOptions{})
|
||||||
})
|
})
|
||||||
if err != nil && !k8sutil.IsNotFound(err) {
|
if err != nil && !k8sutil.IsNotFound(err) {
|
||||||
log.Debug().Err(err).Str("pvc", pvcName).Msg("Failed to remove pvc")
|
log.Err(err).Str("pvc", pvcName).Debug("Failed to remove pvc")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -509,7 +506,7 @@ func (d *Deployment) GetPvc(ctx context.Context, pvcName string) (*core.Persiste
|
||||||
|
|
||||||
pvc, err := d.GetCachedStatus().PersistentVolumeClaim().V1().Read().Get(ctxChild, pvcName, meta.GetOptions{})
|
pvc, err := d.GetCachedStatus().PersistentVolumeClaim().V1().Read().Get(ctxChild, pvcName, meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Str("pvc-name", pvcName).Msg("Failed to get PVC")
|
d.log.Err(err).Str("pvc-name", pvcName).Debug("Failed to get PVC")
|
||||||
return nil, errors.WithStack(err)
|
return nil, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return pvc, nil
|
return pvc, nil
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
"k8s.io/client-go/tools/record"
|
"k8s.io/client-go/tools/record"
|
||||||
|
@ -48,6 +47,7 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resilience"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resilience"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/operator/scope"
|
"github.com/arangodb/kube-arangodb/pkg/operator/scope"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||||
|
@ -72,7 +72,6 @@ type Config struct {
|
||||||
|
|
||||||
// Dependencies holds dependent services for a Deployment
|
// Dependencies holds dependent services for a Deployment
|
||||||
type Dependencies struct {
|
type Dependencies struct {
|
||||||
Log zerolog.Logger
|
|
||||||
EventRecorder record.EventRecorder
|
EventRecorder record.EventRecorder
|
||||||
|
|
||||||
Client kclient.Client
|
Client kclient.Client
|
||||||
|
@ -104,6 +103,8 @@ type deploymentStatusObject struct {
|
||||||
|
|
||||||
// Deployment is the in process state of an ArangoDeployment.
|
// Deployment is the in process state of an ArangoDeployment.
|
||||||
type Deployment struct {
|
type Deployment struct {
|
||||||
|
log logging.Logger
|
||||||
|
|
||||||
name string
|
name string
|
||||||
namespace string
|
namespace string
|
||||||
|
|
||||||
|
@ -237,14 +238,16 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De
|
||||||
acs: acs.NewACS(apiObject.GetUID(), i),
|
acs: acs.NewACS(apiObject.GetUID(), i),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
d.log = logger.WrapObj(d)
|
||||||
|
|
||||||
d.memberState = memberState.NewStateInspector(d)
|
d.memberState = memberState.NewStateInspector(d)
|
||||||
|
|
||||||
d.clientCache = deploymentClient.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig))
|
d.clientCache = deploymentClient.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig))
|
||||||
|
|
||||||
d.status.last = *(apiObject.Status.DeepCopy())
|
d.status.last = *(apiObject.Status.DeepCopy())
|
||||||
d.reconciler = reconcile.NewReconciler(deps.Log, d)
|
d.reconciler = reconcile.NewReconciler(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||||
d.resilience = resilience.NewResilience(deps.Log, d)
|
d.resilience = resilience.NewResilience(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||||
d.resources = resources.NewResources(deps.Log, d)
|
d.resources = resources.NewResources(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||||
if d.status.last.AcceptedSpec == nil {
|
if d.status.last.AcceptedSpec == nil {
|
||||||
// We've validated the spec, so let's use it from now.
|
// We've validated the spec, so let's use it from now.
|
||||||
d.status.last.AcceptedSpec = apiObject.Spec.DeepCopy()
|
d.status.last.AcceptedSpec = apiObject.Spec.DeepCopy()
|
||||||
|
@ -264,7 +267,7 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De
|
||||||
go ci.ListenForClusterEvents(d.stopCh)
|
go ci.ListenForClusterEvents(d.stopCh)
|
||||||
}
|
}
|
||||||
if config.AllowChaos {
|
if config.AllowChaos {
|
||||||
d.chaosMonkey = chaos.NewMonkey(deps.Log, d)
|
d.chaosMonkey = chaos.NewMonkey(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||||
go d.chaosMonkey.Run(d.stopCh)
|
go d.chaosMonkey.Run(d.stopCh)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -283,7 +286,7 @@ func (d *Deployment) Update(apiObject *api.ArangoDeployment) {
|
||||||
// Delete the deployment.
|
// Delete the deployment.
|
||||||
// Called when the deployment was deleted by the user.
|
// Called when the deployment was deleted by the user.
|
||||||
func (d *Deployment) Delete() {
|
func (d *Deployment) Delete() {
|
||||||
d.deps.Log.Info().Msg("deployment is deleted by user")
|
d.log.Info("deployment is deleted by user")
|
||||||
if atomic.CompareAndSwapInt32(&d.stopped, 0, 1) {
|
if atomic.CompareAndSwapInt32(&d.stopped, 0, 1) {
|
||||||
close(d.stopCh)
|
close(d.stopCh)
|
||||||
}
|
}
|
||||||
|
@ -295,10 +298,10 @@ func (d *Deployment) send(ev *deploymentEvent) {
|
||||||
case d.eventCh <- ev:
|
case d.eventCh <- ev:
|
||||||
l, ecap := len(d.eventCh), cap(d.eventCh)
|
l, ecap := len(d.eventCh), cap(d.eventCh)
|
||||||
if l > int(float64(ecap)*0.8) {
|
if l > int(float64(ecap)*0.8) {
|
||||||
d.deps.Log.Warn().
|
d.log.
|
||||||
Int("used", l).
|
Int("used", l).
|
||||||
Int("capacity", ecap).
|
Int("capacity", ecap).
|
||||||
Msg("event queue buffer is almost full")
|
Warn("event queue buffer is almost full")
|
||||||
}
|
}
|
||||||
case <-d.stopCh:
|
case <-d.stopCh:
|
||||||
}
|
}
|
||||||
|
@ -308,7 +311,7 @@ func (d *Deployment) send(ev *deploymentEvent) {
|
||||||
// It processes the event queue and polls the state of generated
|
// It processes the event queue and polls the state of generated
|
||||||
// resource on a regular basis.
|
// resource on a regular basis.
|
||||||
func (d *Deployment) run() {
|
func (d *Deployment) run() {
|
||||||
log := d.deps.Log
|
log := d.log
|
||||||
|
|
||||||
// Create agency mapping
|
// Create agency mapping
|
||||||
if err := d.createAgencyMapping(context.TODO()); err != nil {
|
if err := d.createAgencyMapping(context.TODO()); err != nil {
|
||||||
|
@ -331,32 +334,32 @@ func (d *Deployment) run() {
|
||||||
status, lastVersion := d.GetStatus()
|
status, lastVersion := d.GetStatus()
|
||||||
status.Phase = api.DeploymentPhaseRunning
|
status.Phase = api.DeploymentPhaseRunning
|
||||||
if err := d.UpdateStatus(context.TODO(), status, lastVersion); err != nil {
|
if err := d.UpdateStatus(context.TODO(), status, lastVersion); err != nil {
|
||||||
log.Warn().Err(err).Msg("update initial CR status failed")
|
log.Err(err).Warn("update initial CR status failed")
|
||||||
}
|
}
|
||||||
log.Info().Msg("start running...")
|
log.Info("start running...")
|
||||||
}
|
}
|
||||||
|
|
||||||
d.lookForServiceMonitorCRD()
|
d.lookForServiceMonitorCRD()
|
||||||
|
|
||||||
// Execute inspection for first time without delay of 10s
|
// Execute inspection for first time without delay of 10s
|
||||||
log.Debug().Msg("Initially inspect deployment...")
|
log.Debug("Initially inspect deployment...")
|
||||||
inspectionInterval := d.inspectDeployment(minInspectionInterval)
|
inspectionInterval := d.inspectDeployment(minInspectionInterval)
|
||||||
log.Debug().Str("interval", inspectionInterval.String()).Msg("...deployment inspect started")
|
log.Str("interval", inspectionInterval.String()).Debug("...deployment inspect started")
|
||||||
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-d.stopCh:
|
case <-d.stopCh:
|
||||||
err := d.acs.CurrentClusterCache().Refresh(context.Background())
|
err := d.acs.CurrentClusterCache().Refresh(context.Background())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to get resources")
|
log.Err(err).Error("Unable to get resources")
|
||||||
}
|
}
|
||||||
// Remove finalizers from created resources
|
// Remove finalizers from created resources
|
||||||
log.Info().Msg("Deployment removed, removing finalizers to prevent orphaned resources")
|
log.Info("Deployment removed, removing finalizers to prevent orphaned resources")
|
||||||
if _, err := d.removePodFinalizers(context.TODO(), d.GetCachedStatus()); err != nil {
|
if _, err := d.removePodFinalizers(context.TODO(), d.GetCachedStatus()); err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to remove Pod finalizers")
|
log.Err(err).Warn("Failed to remove Pod finalizers")
|
||||||
}
|
}
|
||||||
if _, err := d.removePVCFinalizers(context.TODO(), d.GetCachedStatus()); err != nil {
|
if _, err := d.removePVCFinalizers(context.TODO(), d.GetCachedStatus()); err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to remove PVC finalizers")
|
log.Err(err).Warn("Failed to remove PVC finalizers")
|
||||||
}
|
}
|
||||||
// We're being stopped.
|
// We're being stopped.
|
||||||
return
|
return
|
||||||
|
@ -371,9 +374,9 @@ func (d *Deployment) run() {
|
||||||
}
|
}
|
||||||
|
|
||||||
case <-d.inspectTrigger.Done():
|
case <-d.inspectTrigger.Done():
|
||||||
log.Debug().Msg("Inspect deployment...")
|
log.Debug("Inspect deployment...")
|
||||||
inspectionInterval = d.inspectDeployment(inspectionInterval)
|
inspectionInterval = d.inspectDeployment(inspectionInterval)
|
||||||
log.Debug().Str("interval", inspectionInterval.String()).Msg("...inspected deployment")
|
log.Str("interval", inspectionInterval.String()).Debug("...inspected deployment")
|
||||||
|
|
||||||
case <-d.inspectCRDTrigger.Done():
|
case <-d.inspectCRDTrigger.Done():
|
||||||
d.lookForServiceMonitorCRD()
|
d.lookForServiceMonitorCRD()
|
||||||
|
@ -394,7 +397,7 @@ func (d *Deployment) run() {
|
||||||
|
|
||||||
// handleArangoDeploymentUpdatedEvent is called when the deployment is updated by the user.
|
// handleArangoDeploymentUpdatedEvent is called when the deployment is updated by the user.
|
||||||
func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) error {
|
func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) error {
|
||||||
log := d.deps.Log.With().Str("deployment", d.apiObject.GetName()).Logger()
|
log := d.log.Str("deployment", d.apiObject.GetName())
|
||||||
|
|
||||||
// Get the most recent version of the deployment from the API server
|
// Get the most recent version of the deployment from the API server
|
||||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||||
|
@ -402,7 +405,7 @@ func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) err
|
||||||
|
|
||||||
current, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.apiObject.GetNamespace()).Get(ctxChild, d.apiObject.GetName(), meta.GetOptions{})
|
current, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.apiObject.GetNamespace()).Get(ctxChild, d.apiObject.GetName(), meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to get current version of deployment from API server")
|
log.Err(err).Debug("Failed to get current version of deployment from API server")
|
||||||
if k8sutil.IsNotFound(err) {
|
if k8sutil.IsNotFound(err) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -420,21 +423,21 @@ func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) err
|
||||||
|
|
||||||
resetFields := specBefore.ResetImmutableFields(&newAPIObject.Spec)
|
resetFields := specBefore.ResetImmutableFields(&newAPIObject.Spec)
|
||||||
if len(resetFields) > 0 {
|
if len(resetFields) > 0 {
|
||||||
log.Debug().Strs("fields", resetFields).Msg("Found modified immutable fields")
|
log.Strs("fields", resetFields...).Debug("Found modified immutable fields")
|
||||||
newAPIObject.Spec.SetDefaults(d.apiObject.GetName())
|
newAPIObject.Spec.SetDefaults(d.apiObject.GetName())
|
||||||
}
|
}
|
||||||
if err := newAPIObject.Spec.Validate(); err != nil {
|
if err := newAPIObject.Spec.Validate(); err != nil {
|
||||||
d.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, d.apiObject))
|
d.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, d.apiObject))
|
||||||
// Try to reset object
|
// Try to reset object
|
||||||
if err := d.updateCRSpec(ctx, d.apiObject.Spec); err != nil {
|
if err := d.updateCRSpec(ctx, d.apiObject.Spec); err != nil {
|
||||||
log.Error().Err(err).Msg("Restore original spec failed")
|
log.Err(err).Error("Restore original spec failed")
|
||||||
d.CreateEvent(k8sutil.NewErrorEvent("Restore original failed", err, d.apiObject))
|
d.CreateEvent(k8sutil.NewErrorEvent("Restore original failed", err, d.apiObject))
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if len(resetFields) > 0 {
|
if len(resetFields) > 0 {
|
||||||
for _, fieldName := range resetFields {
|
for _, fieldName := range resetFields {
|
||||||
log.Debug().Str("field", fieldName).Msg("Reset immutable field")
|
log.Str("field", fieldName).Debug("Reset immutable field")
|
||||||
d.CreateEvent(k8sutil.NewImmutableFieldEvent(fieldName, d.apiObject))
|
d.CreateEvent(k8sutil.NewImmutableFieldEvent(fieldName, d.apiObject))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -447,7 +450,7 @@ func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) err
|
||||||
{
|
{
|
||||||
status, lastVersion := d.GetStatus()
|
status, lastVersion := d.GetStatus()
|
||||||
if newAPIObject.Status.IsForceReload() {
|
if newAPIObject.Status.IsForceReload() {
|
||||||
log.Warn().Msg("Forced status reload!")
|
log.Warn("Forced status reload!")
|
||||||
status = newAPIObject.Status
|
status = newAPIObject.Status
|
||||||
status.ForceStatusReload = nil
|
status.ForceStatusReload = nil
|
||||||
}
|
}
|
||||||
|
@ -516,7 +519,7 @@ func (d *Deployment) updateCRStatus(ctx context.Context, force ...bool) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.deps.Log.Debug().Err(err).Msg("failed to patch ArangoDeployment status")
|
d.log.Err(err).Debug("failed to patch ArangoDeployment status")
|
||||||
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment status: %v", err))
|
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment status: %v", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -529,7 +532,7 @@ func (d *Deployment) updateCRSpec(ctx context.Context, newSpec api.DeploymentSpe
|
||||||
|
|
||||||
if len(force) == 0 || !force[0] {
|
if len(force) == 0 || !force[0] {
|
||||||
if d.apiObject.Spec.Equal(&newSpec) {
|
if d.apiObject.Spec.Equal(&newSpec) {
|
||||||
d.deps.Log.Debug().Msg("Nothing to update in updateCRSpec")
|
d.log.Debug("Nothing to update in updateCRSpec")
|
||||||
// Nothing to update
|
// Nothing to update
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -572,7 +575,7 @@ func (d *Deployment) updateCRSpec(ctx context.Context, newSpec api.DeploymentSpe
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
d.deps.Log.Debug().Err(err).Msg("failed to patch ArangoDeployment spec")
|
d.log.Err(err).Debug("failed to patch ArangoDeployment spec")
|
||||||
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment spec: %v", err))
|
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment spec: %v", err))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -601,23 +604,23 @@ func (d *Deployment) lookForServiceMonitorCRD() {
|
||||||
} else {
|
} else {
|
||||||
_, err = d.deps.Client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "servicemonitors.monitoring.coreos.com", meta.GetOptions{})
|
_, err = d.deps.Client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "servicemonitors.monitoring.coreos.com", meta.GetOptions{})
|
||||||
}
|
}
|
||||||
log := d.deps.Log
|
log := d.log
|
||||||
log.Debug().Msgf("Looking for ServiceMonitor CRD...")
|
log.Debug("Looking for ServiceMonitor CRD...")
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if !d.haveServiceMonitorCRD {
|
if !d.haveServiceMonitorCRD {
|
||||||
log.Info().Msgf("...have discovered ServiceMonitor CRD")
|
log.Info("...have discovered ServiceMonitor CRD")
|
||||||
}
|
}
|
||||||
d.haveServiceMonitorCRD = true
|
d.haveServiceMonitorCRD = true
|
||||||
d.triggerInspection()
|
d.triggerInspection()
|
||||||
return
|
return
|
||||||
} else if k8sutil.IsNotFound(err) {
|
} else if k8sutil.IsNotFound(err) {
|
||||||
if d.haveServiceMonitorCRD {
|
if d.haveServiceMonitorCRD {
|
||||||
log.Info().Msgf("...ServiceMonitor CRD no longer there")
|
log.Info("...ServiceMonitor CRD no longer there")
|
||||||
}
|
}
|
||||||
d.haveServiceMonitorCRD = false
|
d.haveServiceMonitorCRD = false
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log.Warn().Err(err).Msgf("Error when looking for ServiceMonitor CRD")
|
log.Err(err).Warn("Error when looking for ServiceMonitor CRD")
|
||||||
}
|
}
|
||||||
|
|
||||||
// SetNumberOfServers adjust number of DBservers and coordinators in arangod
|
// SetNumberOfServers adjust number of DBservers and coordinators in arangod
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -54,7 +53,6 @@ func ensureFinalizers(depl *api.ArangoDeployment) bool {
|
||||||
|
|
||||||
// runDeploymentFinalizers goes through the list of ArangoDeployoment finalizers to see if they can be removed.
|
// runDeploymentFinalizers goes through the list of ArangoDeployoment finalizers to see if they can be removed.
|
||||||
func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
|
func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
|
||||||
log := d.deps.Log
|
|
||||||
var removalList []string
|
var removalList []string
|
||||||
|
|
||||||
depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace())
|
depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace())
|
||||||
|
@ -67,20 +65,20 @@ func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus i
|
||||||
for _, f := range updated.ObjectMeta.GetFinalizers() {
|
for _, f := range updated.ObjectMeta.GetFinalizers() {
|
||||||
switch f {
|
switch f {
|
||||||
case constants.FinalizerDeplRemoveChildFinalizers:
|
case constants.FinalizerDeplRemoveChildFinalizers:
|
||||||
log.Debug().Msg("Inspecting 'remove child finalizers' finalizer")
|
d.log.Debug("Inspecting 'remove child finalizers' finalizer")
|
||||||
if retry, err := d.inspectRemoveChildFinalizers(ctx, log, updated, cachedStatus); err == nil && !retry {
|
if retry, err := d.inspectRemoveChildFinalizers(ctx, updated, cachedStatus); err == nil && !retry {
|
||||||
removalList = append(removalList, f)
|
removalList = append(removalList, f)
|
||||||
} else if retry {
|
} else if retry {
|
||||||
log.Debug().Str("finalizer", f).Msg("Retry on finalizer removal")
|
d.log.Str("finalizer", f).Debug("Retry on finalizer removal")
|
||||||
} else {
|
} else {
|
||||||
log.Debug().Err(err).Str("finalizer", f).Msg("Cannot remove finalizer yet")
|
d.log.Err(err).Str("finalizer", f).Debug("Cannot remove finalizer yet")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Remove finalizers (if needed)
|
// Remove finalizers (if needed)
|
||||||
if len(removalList) > 0 {
|
if len(removalList) > 0 {
|
||||||
if err := removeDeploymentFinalizers(ctx, log, d.deps.Client.Arango(), updated, removalList); err != nil {
|
if err := removeDeploymentFinalizers(ctx, d.deps.Client.Arango(), updated, removalList); err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to update ArangoDeployment (to remove finalizers)")
|
d.log.Err(err).Debug("Failed to update ArangoDeployment (to remove finalizers)")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -89,7 +87,7 @@ func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus i
|
||||||
|
|
||||||
// inspectRemoveChildFinalizers checks the finalizer condition for remove-child-finalizers.
|
// inspectRemoveChildFinalizers checks the finalizer condition for remove-child-finalizers.
|
||||||
// It returns nil if the finalizer can be removed.
|
// It returns nil if the finalizer can be removed.
|
||||||
func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ zerolog.Logger, _ *api.ArangoDeployment, cachedStatus inspectorInterface.Inspector) (bool, error) {
|
func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ *api.ArangoDeployment, cachedStatus inspectorInterface.Inspector) (bool, error) {
|
||||||
retry := false
|
retry := false
|
||||||
|
|
||||||
if found, err := d.removePodFinalizers(ctx, cachedStatus); err != nil {
|
if found, err := d.removePodFinalizers(ctx, cachedStatus); err != nil {
|
||||||
|
@ -107,7 +105,7 @@ func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ zerolog
|
||||||
}
|
}
|
||||||
|
|
||||||
// removeDeploymentFinalizers removes the given finalizers from the given PVC.
|
// removeDeploymentFinalizers removes the given finalizers from the given PVC.
|
||||||
func removeDeploymentFinalizers(ctx context.Context, log zerolog.Logger, cli versioned.Interface,
|
func removeDeploymentFinalizers(ctx context.Context, cli versioned.Interface,
|
||||||
depl *api.ArangoDeployment, finalizers []string) error {
|
depl *api.ArangoDeployment, finalizers []string) error {
|
||||||
depls := cli.DatabaseV1().ArangoDeployments(depl.GetNamespace())
|
depls := cli.DatabaseV1().ArangoDeployments(depl.GetNamespace())
|
||||||
getFunc := func() (metav1.Object, error) {
|
getFunc := func() (metav1.Object, error) {
|
||||||
|
@ -133,7 +131,7 @@ func removeDeploymentFinalizers(ctx context.Context, log zerolog.Logger, cli ver
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
ignoreNotFound := false
|
ignoreNotFound := false
|
||||||
if _, err := k8sutil.RemoveFinalizers(log, finalizers, getFunc, updateFunc, ignoreNotFound); err != nil {
|
if _, err := k8sutil.RemoveFinalizers(finalizers, getFunc, updateFunc, ignoreNotFound); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -57,13 +57,12 @@ var (
|
||||||
// - once in a while
|
// - once in a while
|
||||||
// Returns the delay until this function should be called again.
|
// Returns the delay until this function should be called again.
|
||||||
func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval {
|
func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval {
|
||||||
log := d.deps.Log
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
ctxReconciliation, cancelReconciliation := globals.GetGlobalTimeouts().Reconciliation().WithTimeout(context.Background())
|
ctxReconciliation, cancelReconciliation := globals.GetGlobalTimeouts().Reconciliation().WithTimeout(context.Background())
|
||||||
defer cancelReconciliation()
|
defer cancelReconciliation()
|
||||||
defer func() {
|
defer func() {
|
||||||
d.deps.Log.Info().Msgf("Inspect loop took %s", time.Since(start))
|
d.log.Info("Inspect loop took %s", time.Since(start))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
nextInterval := lastInterval
|
nextInterval := lastInterval
|
||||||
|
@ -74,7 +73,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
||||||
|
|
||||||
err := d.acs.CurrentClusterCache().Refresh(ctxReconciliation)
|
err := d.acs.CurrentClusterCache().Refresh(ctxReconciliation)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to get resources")
|
d.log.Err(err).Error("Unable to get resources")
|
||||||
return minInspectionInterval // Retry ASAP
|
return minInspectionInterval // Retry ASAP
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +81,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
||||||
updated, err := d.acs.CurrentClusterCache().GetCurrentArangoDeployment()
|
updated, err := d.acs.CurrentClusterCache().GetCurrentArangoDeployment()
|
||||||
if k8sutil.IsNotFound(err) {
|
if k8sutil.IsNotFound(err) {
|
||||||
// Deployment is gone
|
// Deployment is gone
|
||||||
log.Info().Msg("Deployment is gone")
|
d.log.Info("Deployment is gone")
|
||||||
d.Delete()
|
d.Delete()
|
||||||
return nextInterval
|
return nextInterval
|
||||||
} else if updated != nil && updated.GetDeletionTimestamp() != nil {
|
} else if updated != nil && updated.GetDeletionTimestamp() != nil {
|
||||||
|
@ -96,20 +95,20 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
||||||
if updated != nil && updated.Annotations != nil {
|
if updated != nil && updated.Annotations != nil {
|
||||||
if v, ok := updated.Annotations[deployment.ArangoDeploymentPodMaintenanceAnnotation]; ok && v == "true" {
|
if v, ok := updated.Annotations[deployment.ArangoDeploymentPodMaintenanceAnnotation]; ok && v == "true" {
|
||||||
// Disable checks if we will enter maintenance mode
|
// Disable checks if we will enter maintenance mode
|
||||||
log.Info().Str("deployment", deploymentName).Msg("Deployment in maintenance mode")
|
d.log.Str("deployment", deploymentName).Info("Deployment in maintenance mode")
|
||||||
return nextInterval
|
return nextInterval
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Is the deployment in failed state, if so, give up.
|
// Is the deployment in failed state, if so, give up.
|
||||||
if d.GetPhase() == api.DeploymentPhaseFailed {
|
if d.GetPhase() == api.DeploymentPhaseFailed {
|
||||||
log.Debug().Msg("Deployment is in Failed state.")
|
d.log.Debug("Deployment is in Failed state.")
|
||||||
return nextInterval
|
return nextInterval
|
||||||
}
|
}
|
||||||
|
|
||||||
d.apiObject = updated
|
d.apiObject = updated
|
||||||
|
|
||||||
d.GetMembersState().RefreshState(ctxReconciliation, updated.Status.Members.AsList())
|
d.GetMembersState().RefreshState(ctxReconciliation, updated.Status.Members.AsList())
|
||||||
d.GetMembersState().Log(d.deps.Log)
|
d.GetMembersState().Log(d.log)
|
||||||
if err := d.WithStatusUpdateErr(ctxReconciliation, func(s *api.DeploymentStatus) (bool, error) {
|
if err := d.WithStatusUpdateErr(ctxReconciliation, func(s *api.DeploymentStatus) (bool, error) {
|
||||||
if changed, err := upgrade.RunUpgrade(*updated, s, d.GetCachedStatus()); err != nil {
|
if changed, err := upgrade.RunUpgrade(*updated, s, d.GetCachedStatus()); err != nil {
|
||||||
return false, err
|
return false, err
|
||||||
|
@ -153,7 +152,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
||||||
t := time.Now()
|
t := time.Now()
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
d.deps.Log.Info().Msgf("Reconciliation loop took %s", time.Since(t))
|
d.log.Info("Reconciliation loop took %s", time.Since(t))
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// Ensure that spec and status checksum are same
|
// Ensure that spec and status checksum are same
|
||||||
|
@ -178,7 +177,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.acs.Inspect(ctx, d.apiObject, d.deps.Client, d.GetCachedStatus()); err != nil {
|
if err := d.acs.Inspect(ctx, d.apiObject, d.deps.Client, d.GetCachedStatus()); err != nil {
|
||||||
d.deps.Log.Warn().Err(err).Msgf("Unable to handle ACS objects")
|
d.log.Err(err).Warn("Unable to handle ACS objects")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Cleanup terminated pods on the beginning of loop
|
// Cleanup terminated pods on the beginning of loop
|
||||||
|
@ -200,7 +199,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
||||||
return minInspectionInterval, errors.Wrapf(err, "Service creation failed")
|
return minInspectionInterval, errors.Wrapf(err, "Service creation failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := d.resources.EnsureSecrets(ctx, d.deps.Log, d.GetCachedStatus()); err != nil {
|
if err := d.resources.EnsureSecrets(ctx, d.GetCachedStatus()); err != nil {
|
||||||
return minInspectionInterval, errors.Wrapf(err, "Secret creation failed")
|
return minInspectionInterval, errors.Wrapf(err, "Secret creation failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -258,7 +257,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
||||||
inspectDeploymentAgencyFetches.WithLabelValues(d.GetName()).Inc()
|
inspectDeploymentAgencyFetches.WithLabelValues(d.GetName()).Inc()
|
||||||
if offset, err := d.RefreshAgencyCache(ctx); err != nil {
|
if offset, err := d.RefreshAgencyCache(ctx); err != nil {
|
||||||
inspectDeploymentAgencyErrors.WithLabelValues(d.GetName()).Inc()
|
inspectDeploymentAgencyErrors.WithLabelValues(d.GetName()).Inc()
|
||||||
d.deps.Log.Err(err).Msgf("Unable to refresh agency")
|
d.log.Err(err).Error("Unable to refresh agency")
|
||||||
} else {
|
} else {
|
||||||
inspectDeploymentAgencyIndex.WithLabelValues(d.GetName()).Set(float64(offset))
|
inspectDeploymentAgencyIndex.WithLabelValues(d.GetName()).Set(float64(offset))
|
||||||
}
|
}
|
||||||
|
@ -278,10 +277,10 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
||||||
}, true); err != nil {
|
}, true); err != nil {
|
||||||
return minInspectionInterval, errors.Wrapf(err, "Unable clean plan")
|
return minInspectionInterval, errors.Wrapf(err, "Unable clean plan")
|
||||||
}
|
}
|
||||||
} else if err, updated := d.reconciler.CreatePlan(ctx, d.GetCachedStatus()); err != nil {
|
} else if err, updated := d.reconciler.CreatePlan(ctx); err != nil {
|
||||||
return minInspectionInterval, errors.Wrapf(err, "Plan creation failed")
|
return minInspectionInterval, errors.Wrapf(err, "Plan creation failed")
|
||||||
} else if updated {
|
} else if updated {
|
||||||
d.deps.Log.Info().Msgf("Plan generated, reconciling")
|
d.log.Info("Plan generated, reconciling")
|
||||||
return minInspectionInterval, nil
|
return minInspectionInterval, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -331,7 +330,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
||||||
}
|
}
|
||||||
|
|
||||||
// Execute current step of scale/update plan
|
// Execute current step of scale/update plan
|
||||||
retrySoon, err := d.reconciler.ExecutePlan(ctx, d.GetCachedStatus())
|
retrySoon, err := d.reconciler.ExecutePlan(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return minInspectionInterval, errors.Wrapf(err, "Plan execution failed")
|
return minInspectionInterval, errors.Wrapf(err, "Plan execution failed")
|
||||||
}
|
}
|
||||||
|
@ -420,14 +419,14 @@ func (d *Deployment) refreshMaintenanceTTL(ctx context.Context) {
|
||||||
if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.deps.Log.Info().Msgf("Refreshed maintenance lock")
|
d.log.Info("Refreshed maintenance lock")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if condition.LastUpdateTime.Add(d.apiObject.Spec.Timeouts.GetMaintenanceGracePeriod()).Before(time.Now()) {
|
if condition.LastUpdateTime.Add(d.apiObject.Spec.Timeouts.GetMaintenanceGracePeriod()).Before(time.Now()) {
|
||||||
if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
d.deps.Log.Info().Msgf("Refreshed maintenance lock")
|
d.log.Info("Refreshed maintenance lock")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -475,7 +474,7 @@ func (d *Deployment) triggerCRDInspection() {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *Deployment) updateConditionWithHash(ctx context.Context, conditionType api.ConditionType, status bool, reason, message, hash string) error {
|
func (d *Deployment) updateConditionWithHash(ctx context.Context, conditionType api.ConditionType, status bool, reason, message, hash string) error {
|
||||||
d.deps.Log.Info().Str("condition", string(conditionType)).Bool("status", status).Str("reason", reason).Str("message", message).Str("hash", hash).Msg("Updated condition")
|
d.log.Str("condition", string(conditionType)).Bool("status", status).Str("reason", reason).Str("message", message).Str("hash", hash).Info("Updated condition")
|
||||||
if err := d.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
if err := d.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
||||||
return s.Conditions.UpdateWithHash(conditionType, status, reason, message, hash)
|
return s.Conditions.UpdateWithHash(conditionType, status, reason, message, hash)
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
|
|
|
@ -31,8 +31,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
@ -68,7 +66,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
||||||
errs := 0
|
errs := 0
|
||||||
for {
|
for {
|
||||||
require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background()))
|
require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background()))
|
||||||
err := d.resources.EnsureSecrets(context.Background(), log.Logger, d.GetCachedStatus())
|
err := d.resources.EnsureSecrets(context.Background(), d.GetCachedStatus())
|
||||||
if err == nil {
|
if err == nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ package deployment
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sort"
|
"sort"
|
||||||
|
@ -34,7 +33,6 @@ import (
|
||||||
"github.com/arangodb/go-driver/jwt"
|
"github.com/arangodb/go-driver/jwt"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/client"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/client"
|
||||||
monitoringFakeClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
|
monitoringFakeClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
@ -475,7 +473,6 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara
|
||||||
arangoDeployment.Status.CurrentImage = &arangoDeployment.Status.Images[0]
|
arangoDeployment.Status.CurrentImage = &arangoDeployment.Status.Images[0]
|
||||||
|
|
||||||
deps := Dependencies{
|
deps := Dependencies{
|
||||||
Log: zerolog.New(ioutil.Discard),
|
|
||||||
EventRecorder: eventRecorder,
|
EventRecorder: eventRecorder,
|
||||||
Client: kclient.NewStaticClient(kubernetesClientSet, kubernetesExtClientSet, arangoClientSet, monitoringClientSet),
|
Client: kclient.NewStaticClient(kubernetesClientSet, kubernetesExtClientSet, arangoClientSet, monitoringClientSet),
|
||||||
}
|
}
|
||||||
|
@ -490,6 +487,7 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara
|
||||||
deps: deps,
|
deps: deps,
|
||||||
eventCh: make(chan *deploymentEvent, deploymentEventQueueSize),
|
eventCh: make(chan *deploymentEvent, deploymentEventQueueSize),
|
||||||
stopCh: make(chan struct{}),
|
stopCh: make(chan struct{}),
|
||||||
|
log: logger,
|
||||||
}
|
}
|
||||||
d.clientCache = client.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig))
|
d.clientCache = client.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig))
|
||||||
d.acs = acs.NewACS("", i)
|
d.acs = acs.NewACS("", i)
|
||||||
|
@ -497,7 +495,7 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara
|
||||||
require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background()))
|
require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background()))
|
||||||
|
|
||||||
arangoDeployment.Spec.SetDefaults(arangoDeployment.GetName())
|
arangoDeployment.Spec.SetDefaults(arangoDeployment.GetName())
|
||||||
d.resources = resources.NewResources(deps.Log, d)
|
d.resources = resources.NewResources(arangoDeployment.GetNamespace(), arangoDeployment.GetName(), d)
|
||||||
|
|
||||||
return d, eventRecorder
|
return d, eventRecorder
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
@ -37,6 +36,7 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/handlers/utils"
|
"github.com/arangodb/kube-arangodb/pkg/handlers/utils"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
@ -76,11 +76,11 @@ type ArangoSyncIdentity struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
type imagesBuilder struct {
|
type imagesBuilder struct {
|
||||||
|
Log logging.Logger
|
||||||
Context resources.Context
|
Context resources.Context
|
||||||
APIObject k8sutil.APIObject
|
APIObject k8sutil.APIObject
|
||||||
Spec api.DeploymentSpec
|
Spec api.DeploymentSpec
|
||||||
Status api.DeploymentStatus
|
Status api.DeploymentStatus
|
||||||
Log zerolog.Logger
|
|
||||||
UpdateCRStatus func(status api.DeploymentStatus) error
|
UpdateCRStatus func(status api.DeploymentStatus) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +93,7 @@ func (d *Deployment) ensureImages(ctx context.Context, apiObject *api.ArangoDepl
|
||||||
APIObject: apiObject,
|
APIObject: apiObject,
|
||||||
Spec: apiObject.Spec,
|
Spec: apiObject.Spec,
|
||||||
Status: status,
|
Status: status,
|
||||||
Log: d.deps.Log,
|
Log: d.log,
|
||||||
UpdateCRStatus: func(status api.DeploymentStatus) error {
|
UpdateCRStatus: func(status api.DeploymentStatus) error {
|
||||||
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
|
@ -132,10 +132,9 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
role := shared.ImageIDAndVersionRole
|
role := shared.ImageIDAndVersionRole
|
||||||
id := fmt.Sprintf("%0x", sha1.Sum([]byte(image)))[:6]
|
id := fmt.Sprintf("%0x", sha1.Sum([]byte(image)))[:6]
|
||||||
podName := k8sutil.CreatePodName(ib.APIObject.GetName(), role, id, "")
|
podName := k8sutil.CreatePodName(ib.APIObject.GetName(), role, id, "")
|
||||||
log := ib.Log.With().
|
log := ib.Log.
|
||||||
Str("pod", podName).
|
Str("pod", podName).
|
||||||
Str("image", image).
|
Str("image", image)
|
||||||
Logger()
|
|
||||||
|
|
||||||
// Check if pod exists
|
// Check if pod exists
|
||||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||||
|
@ -150,20 +149,20 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{})
|
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{})
|
||||||
})
|
})
|
||||||
if err != nil && !k8sutil.IsNotFound(err) {
|
if err != nil && !k8sutil.IsNotFound(err) {
|
||||||
log.Warn().Err(err).Msg("Failed to delete Image ID Pod")
|
log.Err(err).Warn("Failed to delete Image ID Pod")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
if !k8sutil.IsPodReady(pod) {
|
if !k8sutil.IsPodReady(pod) {
|
||||||
log.Debug().Msg("Image ID Pod is not yet ready")
|
log.Debug("Image ID Pod is not yet ready")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
imageID, err := k8sutil.GetArangoDBImageIDFromPod(pod)
|
imageID, err := k8sutil.GetArangoDBImageIDFromPod(pod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn().Err(err).Msg("failed to get image ID from pod")
|
log.Err(err).Warn("failed to get image ID from pod")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
if imageID == "" {
|
if imageID == "" {
|
||||||
|
@ -174,14 +173,14 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
// Try fetching the ArangoDB version
|
// Try fetching the ArangoDB version
|
||||||
client, err := arangod.CreateArangodImageIDClient(ctx, ib.APIObject, pod.Status.PodIP)
|
client, err := arangod.CreateArangodImageIDClient(ctx, ib.APIObject, pod.Status.PodIP)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to create Image ID Pod client")
|
log.Err(err).Warn("Failed to create Image ID Pod client")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
v, err := client.Version(ctxChild)
|
v, err := client.Version(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to fetch version from Image ID Pod")
|
log.Err(err).Debug("Failed to fetch version from Image ID Pod")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
version := v.Version
|
version := v.Version
|
||||||
|
@ -192,7 +191,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{})
|
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{})
|
||||||
})
|
})
|
||||||
if err != nil && !k8sutil.IsNotFound(err) {
|
if err != nil && !k8sutil.IsNotFound(err) {
|
||||||
log.Warn().Err(err).Msg("Failed to delete Image ID Pod")
|
log.Err(err).Warn("Failed to delete Image ID Pod")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,14 +203,14 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
}
|
}
|
||||||
ib.Status.Images.AddOrUpdate(info)
|
ib.Status.Images.AddOrUpdate(info)
|
||||||
if err := ib.UpdateCRStatus(ib.Status); err != nil {
|
if err := ib.UpdateCRStatus(ib.Status); err != nil {
|
||||||
log.Warn().Err(err).Msg("Failed to save Image Info in CR status")
|
log.Err(err).Warn("Failed to save Image Info in CR status")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
// We're done
|
// We're done
|
||||||
log.Debug().
|
log.
|
||||||
Str("image-id", imageID).
|
Str("image-id", imageID).
|
||||||
Str("arangodb-version", string(version)).
|
Str("arangodb-version", string(version)).
|
||||||
Msg("Found image ID and ArangoDB version")
|
Debug("Found image ID and ArangoDB version")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,7 +230,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
|
|
||||||
pod, err = resources.RenderArangoPod(ctx, cachedStatus, ib.APIObject, role, id, podName, &imagePod)
|
pod, err = resources.RenderArangoPod(ctx, cachedStatus, ib.APIObject, role, id, podName, &imagePod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to render image ID pod")
|
log.Err(err).Debug("Failed to render image ID pod")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -240,7 +239,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
||||||
return err
|
return err
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create image ID pod")
|
log.Err(err).Debug("Failed to create image ID pod")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
// Come back soon to inspect the pod
|
// Come back soon to inspect the pod
|
||||||
|
|
|
@ -44,7 +44,6 @@ func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rw := k8sutil.NewResourceWatcher(
|
rw := k8sutil.NewResourceWatcher(
|
||||||
d.deps.Log,
|
|
||||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||||
"pods",
|
"pods",
|
||||||
d.apiObject.GetNamespace(),
|
d.apiObject.GetNamespace(),
|
||||||
|
@ -89,7 +88,6 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rw := k8sutil.NewResourceWatcher(
|
rw := k8sutil.NewResourceWatcher(
|
||||||
d.deps.Log,
|
|
||||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||||
"persistentvolumeclaims",
|
"persistentvolumeclaims",
|
||||||
d.apiObject.GetNamespace(),
|
d.apiObject.GetNamespace(),
|
||||||
|
@ -134,7 +132,6 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rw := k8sutil.NewResourceWatcher(
|
rw := k8sutil.NewResourceWatcher(
|
||||||
d.deps.Log,
|
|
||||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||||
"secrets",
|
"secrets",
|
||||||
d.apiObject.GetNamespace(),
|
d.apiObject.GetNamespace(),
|
||||||
|
@ -180,7 +177,6 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rw := k8sutil.NewResourceWatcher(
|
rw := k8sutil.NewResourceWatcher(
|
||||||
d.deps.Log,
|
|
||||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||||
"services",
|
"services",
|
||||||
d.apiObject.GetNamespace(),
|
d.apiObject.GetNamespace(),
|
||||||
|
@ -212,7 +208,6 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) {
|
||||||
// listenForCRDEvents keep listening for changes in CRDs until the given channel is closed.
|
// listenForCRDEvents keep listening for changes in CRDs until the given channel is closed.
|
||||||
func (d *Deployment) listenForCRDEvents(stopCh <-chan struct{}) {
|
func (d *Deployment) listenForCRDEvents(stopCh <-chan struct{}) {
|
||||||
rw := k8sutil.NewResourceWatcher(
|
rw := k8sutil.NewResourceWatcher(
|
||||||
d.deps.Log,
|
|
||||||
d.deps.Client.KubernetesExtensions().ApiextensionsV1().RESTClient(),
|
d.deps.Client.KubernetesExtensions().ApiextensionsV1().RESTClient(),
|
||||||
"customresourcedefinitions",
|
"customresourcedefinitions",
|
||||||
"",
|
"",
|
||||||
|
|
38
pkg/deployment/logger.go
Normal file
38
pkg/deployment/logger.go
Normal file
|
@ -0,0 +1,38 @@
|
||||||
|
//
|
||||||
|
// DISCLAIMER
|
||||||
|
//
|
||||||
|
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
//
|
||||||
|
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||||
|
//
|
||||||
|
|
||||||
|
package deployment
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
logger = logging.Global().RegisterAndGetLogger("deployment", logging.Info)
|
||||||
|
)
|
||||||
|
|
||||||
|
func (d *Deployment) sectionLogger(section string) logging.Logger {
|
||||||
|
return d.log.Str("section", section)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (d *Deployment) WrapLogger(in *zerolog.Event) *zerolog.Event {
|
||||||
|
return in.Str("namespace", d.namespace).Str("name", d.name)
|
||||||
|
}
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"github.com/arangodb/go-driver"
|
"github.com/arangodb/go-driver"
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,7 +45,7 @@ type StateInspector interface {
|
||||||
|
|
||||||
State() State
|
State() State
|
||||||
|
|
||||||
Log(logger zerolog.Logger)
|
Log(logger logging.Logger)
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewStateInspector(client reconciler.DeploymentClient) StateInspector {
|
func NewStateInspector(client reconciler.DeploymentClient) StateInspector {
|
||||||
|
@ -73,13 +74,13 @@ func (s *stateInspector) State() State {
|
||||||
return s.state
|
return s.state
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *stateInspector) Log(logger zerolog.Logger) {
|
func (s *stateInspector) Log(log logging.Logger) {
|
||||||
s.lock.Lock()
|
s.lock.Lock()
|
||||||
defer s.lock.Unlock()
|
defer s.lock.Unlock()
|
||||||
|
|
||||||
for m, s := range s.members {
|
for m, s := range s.members {
|
||||||
if !s.IsReachable() {
|
if !s.IsReachable() {
|
||||||
s.Log(logger.Info()).Str("member", m).Msgf("Member is in invalid state")
|
log.WrapObj(s).Str("member", m).Info("Member is in invalid state")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -211,6 +212,6 @@ func (s State) IsReachable() bool {
|
||||||
return s.NotReachableErr == nil
|
return s.NotReachableErr == nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s State) Log(event *zerolog.Event) *zerolog.Event {
|
func (s State) WrapLogger(event *zerolog.Event) *zerolog.Event {
|
||||||
return event.Bool("reachable", s.IsReachable()).AnErr("reachableError", s.NotReachableErr)
|
return event.Bool("reachable", s.IsReachable()).AnErr("reachableError", s.NotReachableErr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -83,8 +82,8 @@ func (d *Deployment) createAgencyMapping(ctx context.Context) error {
|
||||||
// createMember creates member and adds it to the applicable member list.
|
// createMember creates member and adds it to the applicable member list.
|
||||||
// Note: This does not create any pods of PVCs
|
// Note: This does not create any pods of PVCs
|
||||||
// Note: The updated status is not yet written to the apiserver.
|
// Note: The updated status is not yet written to the apiserver.
|
||||||
func createMember(log zerolog.Logger, status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment, mods ...reconcile.CreateMemberMod) (string, error) {
|
func (d *Deployment) createMember(status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment, mods ...reconcile.CreateMemberMod) (string, error) {
|
||||||
m, err := renderMember(log, status, group, id, apiObject)
|
m, err := d.renderMember(status, group, id, apiObject)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -102,7 +101,7 @@ func createMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
return m.ID, nil
|
return m.ID, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment) (*api.MemberStatus, error) {
|
func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment) (*api.MemberStatus, error) {
|
||||||
if group == api.ServerGroupAgents {
|
if group == api.ServerGroupAgents {
|
||||||
if status.Agency == nil {
|
if status.Agency == nil {
|
||||||
return nil, errors.New("Agency is not yet defined")
|
return nil, errors.New("Agency is not yet defined")
|
||||||
|
@ -136,7 +135,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
|
|
||||||
switch group {
|
switch group {
|
||||||
case api.ServerGroupSingle:
|
case api.ServerGroupSingle:
|
||||||
log.Debug().Str("id", id).Msg("Adding single server")
|
d.log.Str("id", id).Debug("Adding single server")
|
||||||
return &api.MemberStatus{
|
return &api.MemberStatus{
|
||||||
ID: id,
|
ID: id,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
|
@ -148,7 +147,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
Architecture: &arch,
|
Architecture: &arch,
|
||||||
}, nil
|
}, nil
|
||||||
case api.ServerGroupAgents:
|
case api.ServerGroupAgents:
|
||||||
log.Debug().Str("id", id).Msg("Adding agent")
|
d.log.Str("id", id).Debug("Adding agent")
|
||||||
return &api.MemberStatus{
|
return &api.MemberStatus{
|
||||||
ID: id,
|
ID: id,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
|
@ -160,7 +159,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
Architecture: &arch,
|
Architecture: &arch,
|
||||||
}, nil
|
}, nil
|
||||||
case api.ServerGroupDBServers:
|
case api.ServerGroupDBServers:
|
||||||
log.Debug().Str("id", id).Msg("Adding dbserver")
|
d.log.Str("id", id).Debug("Adding dbserver")
|
||||||
return &api.MemberStatus{
|
return &api.MemberStatus{
|
||||||
ID: id,
|
ID: id,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
|
@ -172,7 +171,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
Architecture: &arch,
|
Architecture: &arch,
|
||||||
}, nil
|
}, nil
|
||||||
case api.ServerGroupCoordinators:
|
case api.ServerGroupCoordinators:
|
||||||
log.Debug().Str("id", id).Msg("Adding coordinator")
|
d.log.Str("id", id).Debug("Adding coordinator")
|
||||||
return &api.MemberStatus{
|
return &api.MemberStatus{
|
||||||
ID: id,
|
ID: id,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
|
@ -184,7 +183,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
Architecture: &arch,
|
Architecture: &arch,
|
||||||
}, nil
|
}, nil
|
||||||
case api.ServerGroupSyncMasters:
|
case api.ServerGroupSyncMasters:
|
||||||
log.Debug().Str("id", id).Msg("Adding syncmaster")
|
d.log.Str("id", id).Debug("Adding syncmaster")
|
||||||
return &api.MemberStatus{
|
return &api.MemberStatus{
|
||||||
ID: id,
|
ID: id,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
|
@ -196,7 +195,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se
|
||||||
Architecture: &arch,
|
Architecture: &arch,
|
||||||
}, nil
|
}, nil
|
||||||
case api.ServerGroupSyncWorkers:
|
case api.ServerGroupSyncWorkers:
|
||||||
log.Debug().Str("id", id).Msg("Adding syncworker")
|
d.log.Str("id", id).Debug("Adding syncworker")
|
||||||
return &api.MemberStatus{
|
return &api.MemberStatus{
|
||||||
ID: id,
|
ID: id,
|
||||||
UID: uuid.NewUUID(),
|
UID: uuid.NewUUID(),
|
||||||
|
|
|
@ -26,8 +26,6 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle"
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
@ -114,8 +112,8 @@ func wrapActionStartFailureGracePeriod(action Action, failureGracePeriod time.Du
|
||||||
}
|
}
|
||||||
|
|
||||||
func withActionStartFailureGracePeriod(in actionFactory, failureGracePeriod time.Duration) actionFactory {
|
func withActionStartFailureGracePeriod(in actionFactory, failureGracePeriod time.Duration) actionFactory {
|
||||||
return func(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
return func(action api.Action, actionCtx ActionContext) Action {
|
||||||
return wrapActionStartFailureGracePeriod(in(log, action, actionCtx), failureGracePeriod)
|
return wrapActionStartFailureGracePeriod(in(action, actionCtx), failureGracePeriod)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -154,7 +152,7 @@ func getActionPlanAppender(a Action, plan api.Plan) (api.Plan, bool) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type actionFactory func(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action
|
type actionFactory func(action api.Action, actionCtx ActionContext) Action
|
||||||
|
|
||||||
var (
|
var (
|
||||||
definedActions = map[api.ActionType]actionFactory{}
|
definedActions = map[api.ActionType]actionFactory{}
|
||||||
|
|
|
@ -29,8 +29,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -39,10 +37,10 @@ func init() {
|
||||||
|
|
||||||
// newAddMemberAction creates a new Action that implements the given
|
// newAddMemberAction creates a new Action that implements the given
|
||||||
// planned AddMember action.
|
// planned AddMember action.
|
||||||
func newAddMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newAddMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionAddMember{}
|
a := &actionAddMember{}
|
||||||
|
|
||||||
a.actionImpl = newBaseActionImpl(log, action, actionCtx, &a.newMemberID)
|
a.actionImpl = newBaseActionImpl(action, actionCtx, &a.newMemberID)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -66,7 +64,7 @@ type actionAddMember struct {
|
||||||
func (a *actionAddMember) Start(ctx context.Context) (bool, error) {
|
func (a *actionAddMember) Start(ctx context.Context) (bool, error) {
|
||||||
newID, err := a.actionCtx.CreateMember(ctx, a.action.Group, a.action.MemberID, topology.WithTopologyMod)
|
newID, err := a.actionCtx.CreateMember(ctx, a.action.Group, a.action.MemberID, topology.WithTopologyMod)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create member")
|
a.log.Err(err).Debug("Failed to create member")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
a.newMemberID = newID
|
a.newMemberID = newID
|
||||||
|
|
|
@ -23,13 +23,10 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/rs/zerolog"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -38,10 +35,10 @@ func init() {
|
||||||
|
|
||||||
// newArangoMemberUpdatePodSpecAction creates a new Action that implements the given
|
// newArangoMemberUpdatePodSpecAction creates a new Action that implements the given
|
||||||
// planned ArangoMemberUpdatePodSpec action.
|
// planned ArangoMemberUpdatePodSpec action.
|
||||||
func newArangoMemberUpdatePodSpecAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newArangoMemberUpdatePodSpecAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionArangoMemberUpdatePodSpec{}
|
a := &actionArangoMemberUpdatePodSpec{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -64,20 +61,20 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro
|
||||||
|
|
||||||
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !found {
|
if !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
||||||
if !ok {
|
if !ok {
|
||||||
err := errors.Newf("ArangoMember not found")
|
err := errors.Newf("ArangoMember not found")
|
||||||
log.Error().Err(err).Msg("ArangoMember not found")
|
a.log.Err(err).Error("ArangoMember not found")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
endpoint, err := a.actionCtx.GenerateMemberEndpoint(a.action.Group, m)
|
endpoint, err := a.actionCtx.GenerateMemberEndpoint(a.action.Group, m)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to render endpoint")
|
a.log.Err(err).Error("Unable to render endpoint")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -85,7 +82,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro
|
||||||
// Update endpoint
|
// Update endpoint
|
||||||
m.Endpoint = util.NewString(endpoint)
|
m.Endpoint = util.NewString(endpoint)
|
||||||
if err := status.Members.Update(m, a.action.Group); err != nil {
|
if err := status.Members.Update(m, a.action.Group); err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to update endpoint")
|
a.log.Err(err).Error("Unable to update endpoint")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -104,19 +101,19 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro
|
||||||
|
|
||||||
renderedPod, err := a.actionCtx.RenderPodTemplateForMember(ctx, a.actionCtx.ACS(), spec, status, a.action.MemberID, imageInfo)
|
renderedPod, err := a.actionCtx.RenderPodTemplateForMember(ctx, a.actionCtx.ACS(), spec, status, a.action.MemberID, imageInfo)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Err(err).Msg("Error while rendering pod")
|
a.log.Err(err).Error("Error while rendering pod")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
checksum, err := resources.ChecksumArangoPod(groupSpec, resources.CreatePodFromTemplate(renderedPod))
|
checksum, err := resources.ChecksumArangoPod(groupSpec, resources.CreatePodFromTemplate(renderedPod))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Err(err).Msg("Error while getting pod checksum")
|
a.log.Err(err).Error("Error while getting pod checksum")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
template, err := api.GetArangoMemberPodTemplate(renderedPod, checksum)
|
template, err := api.GetArangoMemberPodTemplate(renderedPod, checksum)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Err(err).Msg("Error while getting pod template")
|
a.log.Err(err).Error("Error while getting pod template")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -135,7 +132,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Err(err).Msg("Error while updating member")
|
a.log.Err(err).Error("Error while updating member")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -146,7 +143,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Err(err).Msg("Error while updating member status")
|
a.log.Err(err).Error("Error while updating member status")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,11 +23,8 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -40,10 +37,10 @@ const (
|
||||||
|
|
||||||
// newArangoMemberUpdatePodStatusAction creates a new Action that implements the given
|
// newArangoMemberUpdatePodStatusAction creates a new Action that implements the given
|
||||||
// planned ArangoMemberUpdatePodStatus action.
|
// planned ArangoMemberUpdatePodStatus action.
|
||||||
func newArangoMemberUpdatePodStatusAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newArangoMemberUpdatePodStatusAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionArangoMemberUpdatePodStatus{}
|
a := &actionArangoMemberUpdatePodStatus{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -63,14 +60,14 @@ type actionArangoMemberUpdatePodStatus struct {
|
||||||
func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, error) {
|
func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, error) {
|
||||||
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !found {
|
if !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
||||||
if !ok {
|
if !ok {
|
||||||
err := errors.Newf("ArangoMember not found")
|
err := errors.Newf("ArangoMember not found")
|
||||||
log.Error().Err(err).Msg("ArangoMember not found")
|
a.log.Err(err).Error("ArangoMember not found")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +90,7 @@ func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, er
|
||||||
}
|
}
|
||||||
return false
|
return false
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Err(err).Msg("Error while updating member")
|
a.log.Err(err).Error("Error while updating member")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,8 +26,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
|
|
||||||
"github.com/arangodb/go-driver"
|
"github.com/arangodb/go-driver"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1"
|
backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1"
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod/conn"
|
"github.com/arangodb/kube-arangodb/pkg/util/arangod/conn"
|
||||||
|
@ -43,10 +41,10 @@ const (
|
||||||
actionBackupRestoreLocalBackupName api.PlanLocalKey = "backupName"
|
actionBackupRestoreLocalBackupName api.PlanLocalKey = "backupName"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newBackupRestoreAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newBackupRestoreAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionBackupRestore{}
|
a := &actionBackupRestore{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -66,18 +64,18 @@ func (a actionBackupRestore) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if status.Restore != nil {
|
if status.Restore != nil {
|
||||||
a.log.Warn().Msg("Backup restore status should not be nil")
|
a.log.Warn("Backup restore status should not be nil")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
backupResource, err := a.actionCtx.GetBackup(ctx, *spec.RestoreFrom)
|
backupResource, err := a.actionCtx.GetBackup(ctx, *spec.RestoreFrom)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msg("Unable to find backup")
|
a.log.Err(err).Error("Unable to find backup")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if backupResource.Status.Backup == nil {
|
if backupResource.Status.Backup == nil {
|
||||||
a.log.Error().Msg("Backup ID is not set")
|
a.log.Error("Backup ID is not set")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -137,14 +135,14 @@ func (a actionBackupRestore) restoreSync(ctx context.Context, backup *backupApi.
|
||||||
defer cancel()
|
defer cancel()
|
||||||
dbc, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
dbc, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Debug().Err(err).Msg("Failed to create database client")
|
a.log.Err(err).Debug("Failed to create database client")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// The below action can take a while so the full parent timeout context is used.
|
// The below action can take a while so the full parent timeout context is used.
|
||||||
restoreError := dbc.Backup().Restore(ctx, driver.BackupID(backup.Status.Backup.ID), nil)
|
restoreError := dbc.Backup().Restore(ctx, driver.BackupID(backup.Status.Backup.ID), nil)
|
||||||
if restoreError != nil {
|
if restoreError != nil {
|
||||||
a.log.Error().Err(restoreError).Msg("Restore failed")
|
a.log.Err(restoreError).Error("Restore failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
if err := a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
||||||
|
@ -163,7 +161,7 @@ func (a actionBackupRestore) restoreSync(ctx context.Context, backup *backupApi.
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Error().Err(err).Msg("Unable to set restored state")
|
a.log.Err(err).Error("Unable to set restored state")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -186,7 +184,7 @@ func (a actionBackupRestore) CheckProgress(ctx context.Context) (bool, bool, err
|
||||||
|
|
||||||
dbc, err := a.actionCtx.GetDatabaseAsyncClient(ctxChild)
|
dbc, err := a.actionCtx.GetDatabaseAsyncClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Debug().Err(err).Msg("Failed to create database client")
|
a.log.Err(err).Debug("Failed to create database client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -224,7 +222,7 @@ func (a actionBackupRestore) CheckProgress(ctx context.Context) (bool, bool, err
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Error().Err(err).Msg("Unable to set restored state")
|
a.log.Err(err).Error("Unable to set restored state")
|
||||||
return false, false, err
|
return false, false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeBackupRestoreClean, newBackupRestoreCleanAction, backupRestoreTimeout)
|
registerAction(api.ActionTypeBackupRestoreClean, newBackupRestoreCleanAction, backupRestoreTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBackupRestoreCleanAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newBackupRestoreCleanAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionBackupRestoreClean{}
|
a := &actionBackupRestoreClean{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -33,17 +33,16 @@ import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeBootstrapSetPassword, newBootstrapSetPasswordAction, defaultTimeout)
|
registerAction(api.ActionTypeBootstrapSetPassword, newBootstrapSetPasswordAction, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBootstrapSetPasswordAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newBootstrapSetPasswordAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionBootstrapSetPassword{}
|
a := &actionBootstrapSetPassword{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,11 +58,11 @@ func (a actionBootstrapSetPassword) Start(ctx context.Context) (bool, error) {
|
||||||
spec := a.actionCtx.GetSpec()
|
spec := a.actionCtx.GetSpec()
|
||||||
|
|
||||||
if user, ok := a.action.GetParam("user"); !ok {
|
if user, ok := a.action.GetParam("user"); !ok {
|
||||||
a.log.Warn().Msgf("User param is not set in action")
|
a.log.Warn("User param is not set in action")
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
} else {
|
||||||
if secret, ok := spec.Bootstrap.PasswordSecretNames[user]; !ok {
|
if secret, ok := spec.Bootstrap.PasswordSecretNames[user]; !ok {
|
||||||
a.log.Warn().Msgf("User does not exist in password hashes")
|
a.log.Warn("User does not exist in password hashes")
|
||||||
return true, nil
|
return true, nil
|
||||||
} else {
|
} else {
|
||||||
ctxChild, cancel := globals.GetGlobals().Timeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobals().Timeouts().ArangoD().WithTimeout(ctx)
|
||||||
|
@ -98,7 +97,7 @@ func (a actionBootstrapSetPassword) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a actionBootstrapSetPassword) setUserPassword(ctx context.Context, user, secret string) (string, error) {
|
func (a actionBootstrapSetPassword) setUserPassword(ctx context.Context, user, secret string) (string, error) {
|
||||||
a.log.Debug().Msgf("Bootstrapping user %s, secret %s", user, secret)
|
a.log.Debug("Bootstrapping user %s, secret %s", user, secret)
|
||||||
|
|
||||||
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeBootstrapUpdate, newBootstrapUpdateAction, defaultTimeout)
|
registerAction(api.ActionTypeBootstrapUpdate, newBootstrapUpdateAction, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBootstrapUpdateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newBootstrapUpdateAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionBootstrapUpdate{}
|
a := &actionBootstrapUpdate{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,8 +29,6 @@ import (
|
||||||
|
|
||||||
driver "github.com/arangodb/go-driver"
|
driver "github.com/arangodb/go-driver"
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -40,10 +38,10 @@ func init() {
|
||||||
|
|
||||||
// newCleanOutMemberAction creates a new Action that implements the given
|
// newCleanOutMemberAction creates a new Action that implements the given
|
||||||
// planned CleanOutMember action.
|
// planned CleanOutMember action.
|
||||||
func newCleanOutMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newCleanOutMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionCleanoutMember{}
|
a := &actionCleanoutMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -68,13 +66,12 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) {
|
||||||
// We wanted to remove and it is already gone. All ok
|
// We wanted to remove and it is already gone. All ok
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
log := a.log
|
|
||||||
|
|
||||||
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create member client")
|
a.log.Err(err).Debug("Failed to create member client")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +79,7 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cluster, err := c.Cluster(ctxChild)
|
cluster, err := c.Cluster(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to access cluster")
|
a.log.Err(err).Debug("Failed to access cluster")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -95,10 +92,10 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) {
|
||||||
// Member not found, it could be that it never connected to the cluster
|
// Member not found, it could be that it never connected to the cluster
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
log.Debug().Err(err).Msg("Failed to cleanout member")
|
a.log.Err(err).Debug("Failed to cleanout member")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
log.Debug().Str("job-id", jobID).Msg("Cleanout member started")
|
a.log.Str("job-id", jobID).Debug("Cleanout member started")
|
||||||
// Update status
|
// Update status
|
||||||
m.Phase = api.MemberPhaseCleanOut
|
m.Phase = api.MemberPhaseCleanOut
|
||||||
m.CleanoutJobID = jobID
|
m.CleanoutJobID = jobID
|
||||||
|
@ -111,7 +108,6 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) {
|
||||||
// CheckProgress checks the progress of the action.
|
// CheckProgress checks the progress of the action.
|
||||||
// Returns: ready, abort, error.
|
// Returns: ready, abort, error.
|
||||||
func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
// We wanted to remove and it is already gone. All ok
|
// We wanted to remove and it is already gone. All ok
|
||||||
|
@ -126,7 +122,7 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create database client")
|
a.log.Err(err).Debug("Failed to create database client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -134,7 +130,7 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cluster, err := c.Cluster(ctxChild)
|
cluster, err := c.Cluster(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to access cluster")
|
a.log.Err(err).Debug("Failed to access cluster")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -142,18 +138,18 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cleanedOut, err := cluster.IsCleanedOut(ctxChild, a.action.MemberID)
|
cleanedOut, err := cluster.IsCleanedOut(ctxChild, a.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("IsCleanedOut failed")
|
a.log.Err(err).Debug("IsCleanedOut failed")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if !cleanedOut {
|
if !cleanedOut {
|
||||||
// We're not done yet, check job status
|
// We're not done yet, check job status
|
||||||
log.Debug().Msg("IsCleanedOut returned false")
|
a.log.Debug("IsCleanedOut returned false")
|
||||||
|
|
||||||
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create database client")
|
a.log.Err(err).Debug("Failed to create database client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -161,7 +157,7 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e
|
||||||
defer cancel()
|
defer cancel()
|
||||||
agency, err := a.actionCtx.GetAgency(ctxChild)
|
agency, err := a.actionCtx.GetAgency(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create agency client")
|
a.log.Err(err).Debug("Failed to create agency client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -169,11 +165,11 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e
|
||||||
defer cancel()
|
defer cancel()
|
||||||
jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, m.CleanoutJobID, c, agency)
|
jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, m.CleanoutJobID, c, agency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to fetch cleanout job status")
|
a.log.Err(err).Debug("Failed to fetch cleanout job status")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if jobStatus.IsFailed() {
|
if jobStatus.IsFailed() {
|
||||||
log.Warn().Str("reason", jobStatus.Reason()).Msg("Cleanout Job failed. Aborting plan")
|
a.log.Str("reason", jobStatus.Reason()).Warn("Cleanout Job failed. Aborting plan")
|
||||||
// Revert cleanout state
|
// Revert cleanout state
|
||||||
m.Phase = api.MemberPhaseCreated
|
m.Phase = api.MemberPhaseCreated
|
||||||
m.CleanoutJobID = ""
|
m.CleanoutJobID = ""
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"github.com/arangodb/go-driver"
|
"github.com/arangodb/go-driver"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -37,10 +36,10 @@ func init() {
|
||||||
|
|
||||||
// newClusterMemberCleanupAction creates a new Action that implements the given
|
// newClusterMemberCleanupAction creates a new Action that implements the given
|
||||||
// planned ClusterMemberCleanup action.
|
// planned ClusterMemberCleanup action.
|
||||||
func newClusterMemberCleanupAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newClusterMemberCleanupAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionClusterMemberCleanup{}
|
a := &actionClusterMemberCleanup{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,7 +58,7 @@ type actionClusterMemberCleanup struct {
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionClusterMemberCleanup) Start(ctx context.Context) (bool, error) {
|
func (a *actionClusterMemberCleanup) Start(ctx context.Context) (bool, error) {
|
||||||
if err := a.start(ctx); err != nil {
|
if err := a.start(ctx); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("Unable to clean cluster member")
|
a.log.Err(err).Warn("Unable to clean cluster member")
|
||||||
}
|
}
|
||||||
|
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -26,8 +26,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/arangosync-client/client"
|
"github.com/arangodb/arangosync-client/client"
|
||||||
"github.com/arangodb/go-driver/agency"
|
"github.com/arangodb/go-driver/agency"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
|
||||||
"github.com/arangodb/go-driver"
|
"github.com/arangodb/go-driver"
|
||||||
|
@ -37,6 +35,7 @@ import (
|
||||||
agencyCache "github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
agencyCache "github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/member"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/member"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
||||||
|
@ -108,7 +107,7 @@ type ActionLocalsContext interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newActionContext creates a new ActionContext implementation.
|
// newActionContext creates a new ActionContext implementation.
|
||||||
func newActionContext(log zerolog.Logger, context Context) ActionContext {
|
func newActionContext(log logging.Logger, context Context) ActionContext {
|
||||||
return &actionContext{
|
return &actionContext{
|
||||||
log: log,
|
log: log,
|
||||||
context: context,
|
context: context,
|
||||||
|
@ -118,7 +117,7 @@ func newActionContext(log zerolog.Logger, context Context) ActionContext {
|
||||||
// actionContext implements ActionContext
|
// actionContext implements ActionContext
|
||||||
type actionContext struct {
|
type actionContext struct {
|
||||||
context Context
|
context Context
|
||||||
log zerolog.Logger
|
log logging.Logger
|
||||||
cachedStatus inspectorInterface.Inspector
|
cachedStatus inspectorInterface.Inspector
|
||||||
locals api.PlanLocals
|
locals api.PlanLocals
|
||||||
}
|
}
|
||||||
|
@ -340,7 +339,7 @@ func (ac *actionContext) UpdateMember(ctx context.Context, member api.MemberStat
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
if err := ac.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
if err := ac.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||||
log.Debug().Err(err).Msg("Updating CR status failed")
|
ac.log.Err(err).Debug("Updating CR status failed")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -354,7 +353,7 @@ func (ac *actionContext) RemoveMemberByID(ctx context.Context, id string) error
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if err := status.Members.RemoveByID(id, group); err != nil {
|
if err := status.Members.RemoveByID(id, group); err != nil {
|
||||||
log.Debug().Err(err).Str("group", group.AsRole()).Msg("Failed to remove member")
|
ac.log.Err(err).Str("group", group.AsRole()).Debug("Failed to remove member")
|
||||||
return errors.WithStack(err)
|
return errors.WithStack(err)
|
||||||
}
|
}
|
||||||
// Save removed member
|
// Save removed member
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -33,10 +32,10 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newDisableScalingCluster creates the new action with disabling scaling DBservers and coordinators.
|
// newDisableScalingCluster creates the new action with disabling scaling DBservers and coordinators.
|
||||||
func newDisableScalingCluster(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newDisableScalingCluster(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionDisableScalingCluster{}
|
a := &actionDisableScalingCluster{}
|
||||||
|
|
||||||
a.actionImpl = newActionImpl(log, action, actionCtx, util.NewString(""))
|
a.actionImpl = newActionImpl(action, actionCtx, util.NewString(""))
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -33,10 +32,10 @@ func init() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// newEnableScalingCluster creates the new action with enabling scaling DBservers and coordinators.
|
// newEnableScalingCluster creates the new action with enabling scaling DBservers and coordinators.
|
||||||
func newEnableScalingCluster(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEnableScalingCluster(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionEnableScalingCluster{}
|
a := &actionEnableScalingCluster{}
|
||||||
|
|
||||||
a.actionImpl = newActionImpl(log, action, actionCtx, util.NewString(""))
|
a.actionImpl = newActionImpl(action, actionCtx, util.NewString(""))
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,7 +38,6 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func ensureEncryptionSupport(actionCtx ActionContext) error {
|
func ensureEncryptionSupport(actionCtx ActionContext) error {
|
||||||
|
@ -60,10 +59,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeEncryptionKeyAdd, newEncryptionKeyAdd, defaultTimeout)
|
registerAction(api.ActionTypeEncryptionKeyAdd, newEncryptionKeyAdd, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEncryptionKeyAdd(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEncryptionKeyAdd(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &encryptionKeyAddAction{}
|
a := &encryptionKeyAddAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -76,7 +75,7 @@ type encryptionKeyAddAction struct {
|
||||||
|
|
||||||
func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) {
|
func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) {
|
||||||
if err := ensureEncryptionSupport(a.actionCtx); err != nil {
|
if err := ensureEncryptionSupport(a.actionCtx); err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -87,7 +86,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read(), secret)
|
sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read(), secret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to fetch current encryption key")
|
a.log.Err(err).Error("Unable to fetch current encryption key")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +99,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeEncryptionKeyPropagated, newEncryptionKeyPropagated, defaultTimeout)
|
registerAction(api.ActionTypeEncryptionKeyPropagated, newEncryptionKeyPropagated, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEncryptionKeyPropagated(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEncryptionKeyPropagated(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &encryptionKeyPropagatedAction{}
|
a := &encryptionKeyPropagatedAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -48,7 +47,7 @@ type encryptionKeyPropagatedAction struct {
|
||||||
func (a *encryptionKeyPropagatedAction) Start(ctx context.Context) (bool, error) {
|
func (a *encryptionKeyPropagatedAction) Start(ctx context.Context) (bool, error) {
|
||||||
propagatedFlag, exists := a.action.Params[propagated]
|
propagatedFlag, exists := a.action.Params[propagated]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Error().Msgf("Propagated flag is missing")
|
a.log.Error("Propagated flag is missing")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -37,10 +36,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeEncryptionKeyRefresh, newEncryptionKeyRefresh, defaultTimeout)
|
registerAction(api.ActionTypeEncryptionKeyRefresh, newEncryptionKeyRefresh, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEncryptionKeyRefresh(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEncryptionKeyRefresh(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &encryptionKeyRefreshAction{}
|
a := &encryptionKeyRefreshAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,7 +58,7 @@ func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, b
|
||||||
defer cancel()
|
defer cancel()
|
||||||
keyfolder, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{})
|
keyfolder, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Err(err).Msgf("Unable to fetch encryption folder")
|
a.log.Err(err).Error("Unable to fetch encryption folder")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,7 +66,7 @@ func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, b
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID)
|
c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msg("Unable to get client")
|
a.log.Err(err).Warn("Unable to get client")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,7 +75,7 @@ func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, b
|
||||||
defer cancel()
|
defer cancel()
|
||||||
e, err := client.RefreshEncryption(ctxChild)
|
e, err := client.RefreshEncryption(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msg("Unable to refresh encryption")
|
a.log.Err(err).Warn("Unable to refresh encryption")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,17 +36,16 @@ import (
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeEncryptionKeyRemove, newEncryptionKeyRemove, defaultTimeout)
|
registerAction(api.ActionTypeEncryptionKeyRemove, newEncryptionKeyRemove, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEncryptionKeyRemove(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEncryptionKeyRemove(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &encryptionKeyRemoveAction{}
|
a := &encryptionKeyRemoveAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,7 +58,7 @@ type encryptionKeyRemoveAction struct {
|
||||||
|
|
||||||
func (a *encryptionKeyRemoveAction) Start(ctx context.Context) (bool, error) {
|
func (a *encryptionKeyRemoveAction) Start(ctx context.Context) (bool, error) {
|
||||||
if err := ensureEncryptionSupport(a.actionCtx); err != nil {
|
if err := ensureEncryptionSupport(a.actionCtx); err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,7 +76,7 @@ func (a *encryptionKeyRemoveAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -30,17 +30,16 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeEncryptionKeyStatusUpdate, newEncryptionKeyStatusUpdate, defaultTimeout)
|
registerAction(api.ActionTypeEncryptionKeyStatusUpdate, newEncryptionKeyStatusUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEncryptionKeyStatusUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEncryptionKeyStatusUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &encryptionKeyStatusUpdateAction{}
|
a := &encryptionKeyStatusUpdateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -53,7 +52,7 @@ type encryptionKeyStatusUpdateAction struct {
|
||||||
|
|
||||||
func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) {
|
func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) {
|
||||||
if err := ensureEncryptionSupport(a.actionCtx); err != nil {
|
if err := ensureEncryptionSupport(a.actionCtx); err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,7 +61,7 @@ func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, erro
|
||||||
|
|
||||||
f, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{})
|
f, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to get folder info")
|
a.log.Err(err).Error("Unable to get folder info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,6 +26,11 @@ import (
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
logger = logging.Global().RegisterAndGetLogger("action", logging.Info)
|
||||||
)
|
)
|
||||||
|
|
||||||
type actionEmpty struct {
|
type actionEmpty struct {
|
||||||
|
@ -50,43 +55,70 @@ func (e actionEmptyStart) Start(_ context.Context) (bool, error) {
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newActionImplDefRef(log zerolog.Logger, action api.Action, actionCtx ActionContext) actionImpl {
|
func newActionImplDefRef(action api.Action, actionCtx ActionContext) actionImpl {
|
||||||
return newActionImpl(log, action, actionCtx, &action.MemberID)
|
return newActionImpl(action, actionCtx, &action.MemberID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newActionImpl(log zerolog.Logger, action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl {
|
func newActionImpl(action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl {
|
||||||
if memberIDRef == nil {
|
if memberIDRef == nil {
|
||||||
panic("Action cannot have nil reference to member!")
|
panic("Action cannot have nil reference to member!")
|
||||||
}
|
}
|
||||||
|
|
||||||
return newBaseActionImpl(log, action, actionCtx, memberIDRef)
|
return newBaseActionImpl(action, actionCtx, memberIDRef)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBaseActionImplDefRef(log zerolog.Logger, action api.Action, actionCtx ActionContext) actionImpl {
|
func newBaseActionImplDefRef(action api.Action, actionCtx ActionContext) actionImpl {
|
||||||
return newBaseActionImpl(log, action, actionCtx, &action.MemberID)
|
return newBaseActionImpl(action, actionCtx, &action.MemberID)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newBaseActionImpl(log zerolog.Logger, action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl {
|
func newBaseActionImpl(action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl {
|
||||||
if memberIDRef == nil {
|
if memberIDRef == nil {
|
||||||
panic("Action cannot have nil reference to member!")
|
panic("Action cannot have nil reference to member!")
|
||||||
}
|
}
|
||||||
|
|
||||||
return actionImpl{
|
a := actionImpl{
|
||||||
log: log,
|
|
||||||
action: action,
|
action: action,
|
||||||
actionCtx: actionCtx,
|
actionCtx: actionCtx,
|
||||||
memberIDRef: memberIDRef,
|
memberIDRef: memberIDRef,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
a.log = logger.Wrap(a.wrap)
|
||||||
|
|
||||||
|
return a
|
||||||
}
|
}
|
||||||
|
|
||||||
type actionImpl struct {
|
type actionImpl struct {
|
||||||
log zerolog.Logger
|
log logging.Logger
|
||||||
action api.Action
|
action api.Action
|
||||||
actionCtx ActionContext
|
actionCtx ActionContext
|
||||||
|
|
||||||
memberIDRef *string
|
memberIDRef *string
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (a actionImpl) wrap(in *zerolog.Event) *zerolog.Event {
|
||||||
|
in = in.
|
||||||
|
Str("action-id", a.action.ID).
|
||||||
|
Str("action-type", string(a.action.Type)).
|
||||||
|
Str("group", a.action.Group.AsRole()).
|
||||||
|
Str("member-id", a.action.MemberID)
|
||||||
|
|
||||||
|
if status, _ := a.actionCtx.GetStatus(); status.Members.ContainsID(a.action.MemberID) {
|
||||||
|
if member, _, ok := status.Members.ElementByID(a.action.MemberID); ok {
|
||||||
|
in = in.Str("phase", string(member.Phase))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range a.action.Params {
|
||||||
|
in = in.Str("param."+k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
for k, v := range a.action.Locals {
|
||||||
|
in = in.Str("local."+k.String(), v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return in
|
||||||
|
}
|
||||||
|
|
||||||
// MemberID returns the member ID used / created in the current action.
|
// MemberID returns the member ID used / created in the current action.
|
||||||
func (a actionImpl) MemberID() string {
|
func (a actionImpl) MemberID() string {
|
||||||
return *a.memberIDRef
|
return *a.memberIDRef
|
||||||
|
|
|
@ -24,7 +24,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -33,10 +32,10 @@ func init() {
|
||||||
|
|
||||||
// newIdleAction creates a new Action that implements the given
|
// newIdleAction creates a new Action that implements the given
|
||||||
// planned Idle action.
|
// planned Idle action.
|
||||||
func newIdleAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newIdleAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionIdle{}
|
a := &actionIdle{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -37,7 +37,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,10 +44,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeJWTAdd, newJWTAdd, defaultTimeout)
|
registerAction(api.ActionTypeJWTAdd, newJWTAdd, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJWTAdd(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newJWTAdd(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &jwtAddAction{}
|
a := &jwtAddAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -62,48 +61,48 @@ type jwtAddAction struct {
|
||||||
func (a *jwtAddAction) Start(ctx context.Context) (bool, error) {
|
func (a *jwtAddAction) Start(ctx context.Context) (bool, error) {
|
||||||
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !folder {
|
if !folder {
|
||||||
a.log.Error().Msgf("Action not supported")
|
a.log.Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
appendToken, exists := a.action.Params[checksum]
|
appendToken, exists := a.action.Params[checksum]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Key %s is missing in action", checksum)
|
a.log.Warn("Key %s is missing in action", checksum)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName())
|
s, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName())
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("JWT Secret is missing, no rotation will take place")
|
a.log.Error("JWT Secret is missing, no rotation will take place")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
jwt, ok := s.Data[constants.SecretKeyToken]
|
jwt, ok := s.Data[constants.SecretKeyToken]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("JWT Secret is invalid, no rotation will take place")
|
a.log.Error("JWT Secret is invalid, no rotation will take place")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
jwtSha := util.SHA256(jwt)
|
jwtSha := util.SHA256(jwt)
|
||||||
|
|
||||||
if appendToken != jwtSha {
|
if appendToken != jwtSha {
|
||||||
a.log.Error().Msgf("JWT Secret changed")
|
a.log.Error("JWT Secret changed")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("Unable to get JWT folder info")
|
a.log.Error("Unable to get JWT folder info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := f.Data[jwtSha]; ok {
|
if _, ok := f.Data[jwtSha]; ok {
|
||||||
a.log.Info().Msgf("JWT Already exists")
|
a.log.Info("JWT Already exists")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,7 +111,7 @@ func (a *jwtAddAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,10 +41,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeJWTClean, newJWTClean, defaultTimeout)
|
registerAction(api.ActionTypeJWTClean, newJWTClean, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJWTClean(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newJWTClean(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &jwtCleanAction{}
|
a := &jwtCleanAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,42 +58,42 @@ type jwtCleanAction struct {
|
||||||
func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) {
|
func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) {
|
||||||
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !folder {
|
if !folder {
|
||||||
a.log.Error().Msgf("Action not supported")
|
a.log.Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cleanToken, exists := a.action.Params[checksum]
|
cleanToken, exists := a.action.Params[checksum]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Key %s is missing in action", checksum)
|
a.log.Warn("Key %s is missing in action", checksum)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if cleanToken == pod.ActiveJWTKey {
|
if cleanToken == pod.ActiveJWTKey {
|
||||||
a.log.Error().Msgf("Unable to remove active key")
|
a.log.Error("Unable to remove active key")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("Unable to get JWT folder info")
|
a.log.Error("Unable to get JWT folder info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if key, ok := f.Data[pod.ActiveJWTKey]; !ok {
|
if key, ok := f.Data[pod.ActiveJWTKey]; !ok {
|
||||||
a.log.Info().Msgf("Active Key is required")
|
a.log.Info("Active Key is required")
|
||||||
return true, nil
|
return true, nil
|
||||||
} else if util.SHA256(key) == cleanToken {
|
} else if util.SHA256(key) == cleanToken {
|
||||||
a.log.Info().Msgf("Unable to remove active key")
|
a.log.Info("Unable to remove active key")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := f.Data[cleanToken]; !ok {
|
if _, ok := f.Data[cleanToken]; !ok {
|
||||||
a.log.Info().Msgf("KEy to be removed does not exist")
|
a.log.Info("KEy to be removed does not exist")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +102,7 @@ func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeJWTPropagated, newJWTPropagated, defaultTimeout)
|
registerAction(api.ActionTypeJWTPropagated, newJWTPropagated, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJWTPropagated(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newJWTPropagated(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &jwtPropagatedAction{}
|
a := &jwtPropagatedAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -48,13 +47,13 @@ type jwtPropagatedAction struct {
|
||||||
func (a *jwtPropagatedAction) Start(ctx context.Context) (bool, error) {
|
func (a *jwtPropagatedAction) Start(ctx context.Context) (bool, error) {
|
||||||
_, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
_, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
propagatedFlag, exists := a.action.Params[propagated]
|
propagatedFlag, exists := a.action.Params[propagated]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Error().Err(err).Msgf("Propagated flag is missing")
|
a.log.Err(err).Error("Propagated flag is missing")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -28,17 +28,16 @@ import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/client"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/client"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeJWTRefresh, newJWTRefresh, defaultTimeout)
|
registerAction(api.ActionTypeJWTRefresh, newJWTRefresh, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJWTRefresh(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newJWTRefresh(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &jwtRefreshAction{}
|
a := &jwtRefreshAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -54,7 +53,7 @@ func (a *jwtRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error
|
||||||
|
|
||||||
folder, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetAPIObject().GetName()))
|
folder, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetAPIObject().GetName()))
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("Unable to get JWT folder info")
|
a.log.Error("Unable to get JWT folder info")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -62,14 +61,14 @@ func (a *jwtRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID)
|
c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msg("Unable to get client")
|
a.log.Err(err).Warn("Unable to get client")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if invalid, err := isMemberJWTTokenInvalid(ctxChild, client.NewClient(c.Connection()), folder.Data, true); err != nil {
|
if invalid, err := isMemberJWTTokenInvalid(ctxChild, client.NewClient(c.Connection()), folder.Data, true); err != nil {
|
||||||
a.log.Warn().Err(err).Msg("Error while getting JWT Status")
|
a.log.Err(err).Warn("Error while getting JWT Status")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
} else if invalid {
|
} else if invalid {
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
|
|
|
@ -37,7 +37,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"k8s.io/apimachinery/pkg/types"
|
"k8s.io/apimachinery/pkg/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -45,10 +44,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeJWTSetActive, newJWTSetActive, defaultTimeout)
|
registerAction(api.ActionTypeJWTSetActive, newJWTSetActive, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJWTSetActive(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newJWTSetActive(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &jwtSetActiveAction{}
|
a := &jwtSetActiveAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -62,30 +61,30 @@ type jwtSetActiveAction struct {
|
||||||
func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) {
|
func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) {
|
||||||
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !folder {
|
if !folder {
|
||||||
a.log.Error().Msgf("Action not supported")
|
a.log.Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
toActiveChecksum, exists := a.action.Params[checksum]
|
toActiveChecksum, exists := a.action.Params[checksum]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Key %s is missing in action", checksum)
|
a.log.Warn("Key %s is missing in action", checksum)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("Unable to get JWT folder info")
|
a.log.Error("Unable to get JWT folder info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
toActiveData, toActivePresent := f.Data[toActiveChecksum]
|
toActiveData, toActivePresent := f.Data[toActiveChecksum]
|
||||||
if !toActivePresent {
|
if !toActivePresent {
|
||||||
a.log.Error().Msgf("JWT key which is desired to be active is not anymore in secret")
|
a.log.Error("JWT key which is desired to be active is not anymore in secret")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -93,7 +92,7 @@ func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) {
|
||||||
tokenKeyData, token := f.Data[constants.SecretKeyToken]
|
tokenKeyData, token := f.Data[constants.SecretKeyToken]
|
||||||
|
|
||||||
if util.SHA256(activeKeyData) == toActiveChecksum && util.SHA256(activeKeyData) == util.SHA256(tokenKeyData) {
|
if util.SHA256(activeKeyData) == toActiveChecksum && util.SHA256(activeKeyData) == util.SHA256(tokenKeyData) {
|
||||||
a.log.Info().Msgf("Desired JWT is already active")
|
a.log.Info("Desired JWT is already active")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -114,7 +113,7 @@ func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -34,7 +34,6 @@ import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -68,10 +67,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeJWTStatusUpdate, newJWTStatusUpdate, defaultTimeout)
|
registerAction(api.ActionTypeJWTStatusUpdate, newJWTStatusUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newJWTStatusUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newJWTStatusUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &jwtStatusUpdateAction{}
|
a := &jwtStatusUpdateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -85,20 +84,20 @@ type jwtStatusUpdateAction struct {
|
||||||
func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) {
|
func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) {
|
||||||
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
folder, err := ensureJWTFolderSupportFromAction(a.actionCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Action not supported")
|
a.log.Err(err).Error("Action not supported")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !folder {
|
if !folder {
|
||||||
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName())
|
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName())
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("Unable to get JWT secret info")
|
a.log.Error("Unable to get JWT secret info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
key, ok := f.Data[constants.SecretKeyToken]
|
key, ok := f.Data[constants.SecretKeyToken]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("JWT Token is invalid")
|
a.log.Error("JWT Token is invalid")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,7 +124,7 @@ func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName()))
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Error().Msgf("Unable to get JWT folder info")
|
a.log.Error("Unable to get JWT folder info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -39,10 +38,10 @@ func init() {
|
||||||
|
|
||||||
// newKillMemberPodAction creates a new Action that implements the given
|
// newKillMemberPodAction creates a new Action that implements the given
|
||||||
// planned KillMemberPod action.
|
// planned KillMemberPod action.
|
||||||
func newKillMemberPodAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newKillMemberPodAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionKillMemberPod{}
|
a := &actionKillMemberPod{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -61,10 +60,9 @@ func (a *actionKillMemberPod) Start(ctx context.Context) (bool, error) {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log := a.log
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -74,12 +72,12 @@ func (a *actionKillMemberPod) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if ifPodUIDMismatch(m, a.action, cache) {
|
if ifPodUIDMismatch(m, a.action, cache) {
|
||||||
log.Error().Msg("Member UID is changed")
|
a.log.Error("Member UID is changed")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to kill pod")
|
a.log.Err(err).Error("Unable to kill pod")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,11 +90,9 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er
|
||||||
if !features.GracefulShutdown().Enabled() {
|
if !features.GracefulShutdown().Enabled() {
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log := a.log
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +103,7 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er
|
||||||
|
|
||||||
p, ok := cache.Pod().V1().GetSimple(m.PodName)
|
p, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,17 +22,16 @@ package reconcile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeSetMaintenanceCondition, newSetMaintenanceConditionAction, addMemberTimeout)
|
registerAction(api.ActionTypeSetMaintenanceCondition, newSetMaintenanceConditionAction, addMemberTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSetMaintenanceConditionAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newSetMaintenanceConditionAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionSetMaintenanceCondition{}
|
a := &actionSetMaintenanceCondition{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeDisableMaintenance, newDisableMaintenanceAction, addMemberTimeout)
|
registerAction(api.ActionTypeDisableMaintenance, newDisableMaintenanceAction, addMemberTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newDisableMaintenanceAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newDisableMaintenanceAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionDisableMaintenance{}
|
a := &actionDisableMaintenance{}
|
||||||
|
|
||||||
a.actionImpl = newActionImpl(log, action, actionCtx, &a.newMemberID)
|
a.actionImpl = newActionImpl(action, actionCtx, &a.newMemberID)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -55,7 +54,7 @@ func (a *actionDisableMaintenance) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.actionCtx.SetAgencyMaintenanceMode(ctx, false); err != nil {
|
if err := a.actionCtx.SetAgencyMaintenanceMode(ctx, false); err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to disable maintenance")
|
a.log.Err(err).Error("Unable to disable maintenance")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeEnableMaintenance, newEnableMaintenanceAction, addMemberTimeout)
|
registerAction(api.ActionTypeEnableMaintenance, newEnableMaintenanceAction, addMemberTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newEnableMaintenanceAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newEnableMaintenanceAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionEnableMaintenance{}
|
a := &actionEnableMaintenance{}
|
||||||
|
|
||||||
a.actionImpl = newActionImpl(log, action, actionCtx, &a.newMemberID)
|
a.actionImpl = newActionImpl(action, actionCtx, &a.newMemberID)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -55,7 +54,7 @@ func (a *actionEnableMaintenance) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.actionCtx.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
if err := a.actionCtx.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to enable maintenance")
|
a.log.Err(err).Error("Unable to enable maintenance")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeMarkToRemoveMember, newMarkToRemoveMemberAction, addMemberTimeout)
|
registerAction(api.ActionTypeMarkToRemoveMember, newMarkToRemoveMemberAction, addMemberTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMarkToRemoveMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newMarkToRemoveMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionMarkToRemove{}
|
a := &actionMarkToRemove{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -67,7 +66,7 @@ func (a *actionMarkToRemove) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := s.Members.Update(member, group); err != nil {
|
if err := s.Members.Update(member, group); err != nil {
|
||||||
a.log.Warn().Err(err).Str("Member", member.ID).Msgf("Unable to update member")
|
a.log.Err(err).Str("Member", member.ID).Warn("Unable to update member")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -38,10 +37,10 @@ const (
|
||||||
actionTypeMemberPhaseUpdatePhaseKey string = "phase"
|
actionTypeMemberPhaseUpdatePhaseKey string = "phase"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newMemberPhaseUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newMemberPhaseUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &memberPhaseUpdateAction{}
|
a := &memberPhaseUpdateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -53,22 +52,21 @@ type memberPhaseUpdateAction struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *memberPhaseUpdateAction) Start(ctx context.Context) (bool, error) {
|
func (a *memberPhaseUpdateAction) Start(ctx context.Context) (bool, error) {
|
||||||
log := a.log
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
phaseString, ok := a.action.Params[actionTypeMemberPhaseUpdatePhaseKey]
|
phaseString, ok := a.action.Params[actionTypeMemberPhaseUpdatePhaseKey]
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("Phase not defined")
|
a.log.Error("Phase not defined")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
p, ok := api.GetPhase(phaseString)
|
p, ok := api.GetPhase(phaseString)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msgf("Phase %s unknown", p)
|
a.log.Error("Phase %s unknown", p)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,17 +22,16 @@ package reconcile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeMemberRIDUpdate, newMemberRIDUpdate, defaultTimeout)
|
registerAction(api.ActionTypeMemberRIDUpdate, newMemberRIDUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newMemberRIDUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newMemberRIDUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &memberRIDUpdateAction{}
|
a := &memberRIDUpdateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
@ -38,10 +37,10 @@ func init() {
|
||||||
|
|
||||||
// newRotateMemberAction creates a new Action that implements the given
|
// newRotateMemberAction creates a new Action that implements the given
|
||||||
// planned RotateMember action.
|
// planned RotateMember action.
|
||||||
func newPVCResizeAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newPVCResizeAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionPVCResize{}
|
a := &actionPVCResize{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -56,12 +55,11 @@ type actionPVCResize struct {
|
||||||
// Returns true if the action is completely finished, false in case
|
// Returns true if the action is completely finished, false in case
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionPVCResize) Start(ctx context.Context) (bool, error) {
|
func (a *actionPVCResize) Start(ctx context.Context) (bool, error) {
|
||||||
log := a.log
|
|
||||||
group := a.action.Group
|
group := a.action.Group
|
||||||
groupSpec := a.actionCtx.GetSpec().GetServerGroupSpec(group)
|
groupSpec := a.actionCtx.GetSpec().GetServerGroupSpec(group)
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,16 +109,15 @@ func (a *actionPVCResize) Start(ctx context.Context) (bool, error) {
|
||||||
// Returns: ready, abort, error.
|
// Returns: ready, abort, error.
|
||||||
func (a *actionPVCResize) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionPVCResize) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
// Check that pod is removed
|
// Check that pod is removed
|
||||||
log := a.log
|
|
||||||
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !found {
|
if !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Msg("Cluster is not ready")
|
a.log.Warn("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
|
|
||||||
// newRotateMemberAction creates a new Action that implements the given
|
// newRotateMemberAction creates a new Action that implements the given
|
||||||
// planned RotateMember action.
|
// planned RotateMember action.
|
||||||
func newPVCResizedAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newPVCResizedAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionPVCResized{}
|
a := &actionPVCResized{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -56,16 +55,15 @@ type actionPVCResized struct {
|
||||||
// Returns: ready, abort, error.
|
// Returns: ready, abort, error.
|
||||||
func (a *actionPVCResized) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionPVCResized) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
// Check that pod is removed
|
// Check that pod is removed
|
||||||
log := a.log
|
|
||||||
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !found {
|
if !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Msg("Cluster is not ready")
|
a.log.Warn("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -25,8 +25,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -36,10 +34,10 @@ func init() {
|
||||||
|
|
||||||
// newRecreateMemberAction creates a new Action that implements the given
|
// newRecreateMemberAction creates a new Action that implements the given
|
||||||
// planned RecreateMember action.
|
// planned RecreateMember action.
|
||||||
func newRecreateMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRecreateMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRecreateMember{}
|
a := &actionRecreateMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
apiErrors "k8s.io/apimachinery/pkg/api/errors"
|
apiErrors "k8s.io/apimachinery/pkg/api/errors"
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
|
@ -40,10 +39,10 @@ func init() {
|
||||||
|
|
||||||
// newRemoveMemberAction creates a new Action that implements the given
|
// newRemoveMemberAction creates a new Action that implements the given
|
||||||
// planned RemoveMember action.
|
// planned RemoveMember action.
|
||||||
func newRemoveMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRemoveMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRemoveMember{}
|
a := &actionRemoveMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -85,12 +84,12 @@ func (a *actionRemoveMember) Start(ctx context.Context) (bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := arangod.RemoveServerFromCluster(ctxChild, client.Connection(), driver.ServerID(m.ID)); err != nil {
|
if err := arangod.RemoveServerFromCluster(ctxChild, client.Connection(), driver.ServerID(m.ID)); err != nil {
|
||||||
if !driver.IsNotFound(err) && !driver.IsPreconditionFailed(err) {
|
if !driver.IsNotFound(err) && !driver.IsPreconditionFailed(err) {
|
||||||
a.log.Err(err).Str("member-id", m.ID).Msgf("Failed to remove server from cluster")
|
a.log.Err(err).Str("member-id", m.ID).Error("Failed to remove server from cluster")
|
||||||
// ignore this error, maybe all coordinators are failed and no connction to cluster is possible
|
// ignore this error, maybe all coordinators are failed and no connction to cluster is possible
|
||||||
} else if driver.IsPreconditionFailed(err) {
|
} else if driver.IsPreconditionFailed(err) {
|
||||||
health := a.actionCtx.GetMembersState().Health()
|
health := a.actionCtx.GetMembersState().Health()
|
||||||
if health.Error != nil {
|
if health.Error != nil {
|
||||||
a.log.Err(err).Str("member-id", m.ID).Msgf("Failed get cluster health")
|
a.log.Err(err).Str("member-id", m.ID).Error("Failed get cluster health")
|
||||||
}
|
}
|
||||||
// We don't care if not found
|
// We don't care if not found
|
||||||
if record, ok := health.Members[driver.ServerID(m.ID)]; ok {
|
if record, ok := health.Members[driver.ServerID(m.ID)]; ok {
|
||||||
|
@ -102,11 +101,11 @@ func (a *actionRemoveMember) Start(ctx context.Context) (bool, error) {
|
||||||
return false, errors.WithStack(errors.Newf("can not remove server from cluster. Not yet terminated. Retry later"))
|
return false, errors.WithStack(errors.Newf("can not remove server from cluster. Not yet terminated. Retry later"))
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Debug().Msg("dbserver has shut down")
|
a.log.Debug("dbserver has shut down")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
a.log.Warn().Msgf("ignoring error: %s", err.Error())
|
a.log.Warn("ignoring error: %s", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -30,7 +30,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -39,10 +38,10 @@ func init() {
|
||||||
|
|
||||||
// newResignLeadershipAction creates a new Action that implements the given
|
// newResignLeadershipAction creates a new Action that implements the given
|
||||||
// planned ResignLeadership action.
|
// planned ResignLeadership action.
|
||||||
func newResignLeadershipAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newResignLeadershipAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionResignLeadership{}
|
a := &actionResignLeadership{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -54,16 +53,15 @@ type actionResignLeadership struct {
|
||||||
|
|
||||||
// Start performs the start of the ReasignLeadership process on DBServer.
|
// Start performs the start of the ReasignLeadership process on DBServer.
|
||||||
func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) {
|
func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) {
|
||||||
log := a.log
|
|
||||||
group := a.action.Group
|
group := a.action.Group
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if a.actionCtx.GetSpec().Mode.Get() != api.DeploymentModeCluster {
|
if a.actionCtx.GetSpec().Mode.Get() != api.DeploymentModeCluster {
|
||||||
log.Debug().Msg("Resign only allowed in cluster mode")
|
a.log.Debug("Resign only allowed in cluster mode")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -71,18 +69,18 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
client, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
client, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msgf("Unable to get client")
|
a.log.Err(err).Error("Unable to get client")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
switch group {
|
switch group {
|
||||||
case api.ServerGroupDBServers:
|
case api.ServerGroupDBServers:
|
||||||
if agencyState, agencyOK := a.actionCtx.GetAgencyCache(); !agencyOK {
|
if agencyState, agencyOK := a.actionCtx.GetAgencyCache(); !agencyOK {
|
||||||
log.Warn().Err(err).Msgf("Maintenance is enabled, skipping action")
|
a.log.Err(err).Warn("Maintenance is enabled, skipping action")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
} else if agencyState.Supervision.Maintenance.Exists() {
|
} else if agencyState.Supervision.Maintenance.Exists() {
|
||||||
// We are done, action cannot be handled on maintenance mode
|
// We are done, action cannot be handled on maintenance mode
|
||||||
log.Warn().Msgf("Maintenance is enabled, skipping action")
|
a.log.Warn("Maintenance is enabled, skipping action")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -90,7 +88,7 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cluster, err := client.Cluster(ctxChild)
|
cluster, err := client.Cluster(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msgf("Unable to get cluster client")
|
a.log.Err(err).Error("Unable to get cluster client")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,9 +96,9 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) {
|
||||||
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
jobCtx := driver.WithJobIDResponse(ctxChild, &jobID)
|
jobCtx := driver.WithJobIDResponse(ctxChild, &jobID)
|
||||||
log.Debug().Msg("Temporary shutdown, resign leadership")
|
a.log.Debug("Temporary shutdown, resign leadership")
|
||||||
if err := cluster.ResignServer(jobCtx, m.ID); err != nil {
|
if err := cluster.ResignServer(jobCtx, m.ID); err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to resign server")
|
a.log.Err(err).Debug("Failed to resign server")
|
||||||
return true, errors.WithStack(err)
|
return true, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,19 +116,17 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
// CheckProgress checks if Job is completed.
|
// CheckProgress checks if Job is completed.
|
||||||
func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if agencyState, agencyOK := a.actionCtx.GetAgencyCache(); !agencyOK {
|
if agencyState, agencyOK := a.actionCtx.GetAgencyCache(); !agencyOK {
|
||||||
log.Error().Msgf("Unable to get maintenance mode")
|
a.log.Error("Unable to get maintenance mode")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
} else if agencyState.Supervision.Maintenance.Exists() {
|
} else if agencyState.Supervision.Maintenance.Exists() {
|
||||||
log.Warn().Msgf("Maintenance is enabled, skipping action")
|
a.log.Warn("Maintenance is enabled, skipping action")
|
||||||
// We are done, action cannot be handled on maintenance mode
|
// We are done, action cannot be handled on maintenance mode
|
||||||
m.CleanoutJobID = ""
|
m.CleanoutJobID = ""
|
||||||
if err := a.actionCtx.UpdateMember(ctx, m); err != nil {
|
if err := a.actionCtx.UpdateMember(ctx, m); err != nil {
|
||||||
|
@ -143,7 +139,7 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool,
|
||||||
defer cancel()
|
defer cancel()
|
||||||
agency, err := a.actionCtx.GetAgency(ctxChild)
|
agency, err := a.actionCtx.GetAgency(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create agency client")
|
a.log.Err(err).Debug("Failed to create agency client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,7 +147,7 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool,
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
c, err := a.actionCtx.GetDatabaseClient(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create member client")
|
a.log.Err(err).Debug("Failed to create member client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -160,10 +156,10 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool,
|
||||||
jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, m.CleanoutJobID, c, agency)
|
jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, m.CleanoutJobID, c, agency)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if driver.IsNotFound(err) {
|
if driver.IsNotFound(err) {
|
||||||
log.Debug().Err(err).Msg("Job not found, but proceeding")
|
a.log.Err(err).Debug("Job not found, but proceeding")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
log.Debug().Err(err).Msg("Failed to fetch job status")
|
a.log.Err(err).Debug("Failed to fetch job status")
|
||||||
return false, false, errors.WithStack(err)
|
return false, false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -172,7 +168,7 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool,
|
||||||
if err := a.actionCtx.UpdateMember(ctx, m); err != nil {
|
if err := a.actionCtx.UpdateMember(ctx, m); err != nil {
|
||||||
return false, false, errors.WithStack(err)
|
return false, false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
log.Error().Msg("Resign server job failed")
|
a.log.Error("Resign server job failed")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"time"
|
"time"
|
||||||
|
@ -40,10 +39,10 @@ func init() {
|
||||||
|
|
||||||
// newRotateMemberAction creates a new Action that implements the given
|
// newRotateMemberAction creates a new Action that implements the given
|
||||||
// planned RotateMember action.
|
// planned RotateMember action.
|
||||||
func newRotateMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRotateMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRotateMember{}
|
a := &actionRotateMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -58,7 +57,7 @@ type actionRotateMember struct {
|
||||||
// Returns true if the action is completely finished, false in case
|
// Returns true if the action is completely finished, false in case
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionRotateMember) Start(ctx context.Context) (bool, error) {
|
func (a *actionRotateMember) Start(ctx context.Context) (bool, error) {
|
||||||
shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log)
|
shutdown, m, ok := getShutdownHelper(a.actionImpl)
|
||||||
if !ok {
|
if !ok {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
@ -82,8 +81,7 @@ func (a *actionRotateMember) Start(ctx context.Context) (bool, error) {
|
||||||
// Returns: ready, abort, error.
|
// Returns: ready, abort, error.
|
||||||
func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
// Check that pod is removed
|
// Check that pod is removed
|
||||||
log := a.log
|
shutdown, m, ok := getShutdownHelper(a.actionImpl)
|
||||||
shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
@ -96,7 +94,7 @@ func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, err
|
||||||
|
|
||||||
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Msg("Cluster is not ready")
|
a.log.Warn("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -106,7 +104,7 @@ func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, err
|
||||||
// Pod is terminated, we can now remove it
|
// Pod is terminated, we can now remove it
|
||||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctxChild, m.PodName, meta.DeleteOptions{}); err != nil {
|
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctxChild, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||||
if !k8sutil.IsNotFound(err) {
|
if !k8sutil.IsNotFound(err) {
|
||||||
log.Error().Err(err).Msg("Unable to delete pod")
|
a.log.Err(err).Error("Unable to delete pod")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
"time"
|
"time"
|
||||||
|
@ -39,10 +38,10 @@ func init() {
|
||||||
|
|
||||||
// newRotateStartMemberAction creates a new Action that implements the given
|
// newRotateStartMemberAction creates a new Action that implements the given
|
||||||
// planned RotateStartMember action.
|
// planned RotateStartMember action.
|
||||||
func newRotateStartMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRotateStartMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRotateStartMember{}
|
a := &actionRotateStartMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -57,7 +56,7 @@ type actionRotateStartMember struct {
|
||||||
// Returns true if the action is completely finished, false in case
|
// Returns true if the action is completely finished, false in case
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionRotateStartMember) Start(ctx context.Context) (bool, error) {
|
func (a *actionRotateStartMember) Start(ctx context.Context) (bool, error) {
|
||||||
shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log)
|
shutdown, m, ok := getShutdownHelper(a.actionImpl)
|
||||||
if !ok {
|
if !ok {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
@ -81,15 +80,14 @@ func (a *actionRotateStartMember) Start(ctx context.Context) (bool, error) {
|
||||||
// Returns: ready, abort, error.
|
// Returns: ready, abort, error.
|
||||||
func (a *actionRotateStartMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionRotateStartMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
// Check that pod is removed
|
// Check that pod is removed
|
||||||
log := a.log
|
shutdown, m, ok := getShutdownHelper(a.actionImpl)
|
||||||
shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log)
|
|
||||||
if !ok {
|
if !ok {
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Msg("Cluster is not ready")
|
a.log.Warn("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -102,7 +100,7 @@ func (a *actionRotateStartMember) CheckProgress(ctx context.Context) (bool, bool
|
||||||
// Pod is terminated, we can now remove it
|
// Pod is terminated, we can now remove it
|
||||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||||
if !k8sutil.IsNotFound(err) {
|
if !k8sutil.IsNotFound(err) {
|
||||||
log.Error().Err(err).Msg("Unable to delete pod")
|
a.log.Err(err).Error("Unable to delete pod")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
|
|
||||||
// newRotateStopMemberAction creates a new Action that implements the given
|
// newRotateStopMemberAction creates a new Action that implements the given
|
||||||
// planned RotateStopMember action.
|
// planned RotateStopMember action.
|
||||||
func newRotateStopMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRotateStopMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRotateStopMember{}
|
a := &actionRotateStopMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -56,10 +55,9 @@ type actionRotateStopMember struct {
|
||||||
// Returns true if the action is completely finished, false in case
|
// Returns true if the action is completely finished, false in case
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionRotateStopMember) Start(ctx context.Context) (bool, error) {
|
func (a *actionRotateStopMember) Start(ctx context.Context) (bool, error) {
|
||||||
log := a.log
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
}
|
}
|
||||||
|
|
||||||
m.Phase = api.MemberPhaseNone
|
m.Phase = api.MemberPhaseNone
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -41,10 +40,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeRuntimeContainerArgsLogLevelUpdate, runtimeContainerArgsUpdate, defaultTimeout)
|
registerAction(api.ActionTypeRuntimeContainerArgsLogLevelUpdate, runtimeContainerArgsUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runtimeContainerArgsUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func runtimeContainerArgsUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRuntimeContainerArgsUpdate{}
|
a := &actionRuntimeContainerArgsUpdate{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -60,7 +59,7 @@ type actionRuntimeContainerArgsUpdate struct {
|
||||||
func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error {
|
func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error {
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("member is gone already")
|
a.log.Info("member is gone already")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -77,37 +76,37 @@ func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error {
|
||||||
|
|
||||||
containerName, ok := a.action.GetParam(rotation.ContainerName)
|
containerName, ok := a.action.GetParam(rotation.ContainerName)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Warn().Msgf("Unable to find action's param %s", rotation.ContainerName)
|
a.log.Warn("Unable to find action's param %s", rotation.ContainerName)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log := a.log.With().Str("containerName", containerName).Logger()
|
log := a.log.Str("containerName", containerName)
|
||||||
updateMemberStatusArgs := func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool {
|
updateMemberStatusArgs := func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool {
|
||||||
if obj.Spec.Template == nil || s.Template == nil ||
|
if obj.Spec.Template == nil || s.Template == nil ||
|
||||||
obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil {
|
obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil {
|
||||||
log.Info().Msgf("Nil Member definition")
|
log.Info("Nil Member definition")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(obj.Spec.Template.PodSpec.Spec.Containers) != len(s.Template.PodSpec.Spec.Containers) {
|
if len(obj.Spec.Template.PodSpec.Spec.Containers) != len(s.Template.PodSpec.Spec.Containers) {
|
||||||
log.Info().Msgf("Invalid size of containers")
|
log.Info("Invalid size of containers")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
for id := range obj.Spec.Template.PodSpec.Spec.Containers {
|
for id := range obj.Spec.Template.PodSpec.Spec.Containers {
|
||||||
if obj.Spec.Template.PodSpec.Spec.Containers[id].Name == containerName {
|
if obj.Spec.Template.PodSpec.Spec.Containers[id].Name == containerName {
|
||||||
if s.Template.PodSpec.Spec.Containers[id].Name != containerName {
|
if s.Template.PodSpec.Spec.Containers[id].Name != containerName {
|
||||||
log.Info().Msgf("Invalid order of containers")
|
log.Info("Invalid order of containers")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
s.Template.PodSpec.Spec.Containers[id].Command = obj.Spec.Template.PodSpec.Spec.Containers[id].Command
|
s.Template.PodSpec.Spec.Containers[id].Command = obj.Spec.Template.PodSpec.Spec.Containers[id].Command
|
||||||
log.Info().Msgf("Updating container args")
|
log.Info("Updating container args")
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Info().Msgf("can not find the container")
|
log.Info("can not find the container")
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
@ -130,7 +129,7 @@ func (a *actionRuntimeContainerArgsUpdate) ReloadComponents() []throttle.Compone
|
||||||
func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, error) {
|
func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, error) {
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("member is gone already")
|
a.log.Info("member is gone already")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -140,7 +139,7 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro
|
||||||
}
|
}
|
||||||
|
|
||||||
if !m.Phase.IsReady() {
|
if !m.Phase.IsReady() {
|
||||||
a.log.Info().Msg("Member is not ready, unable to run update operation")
|
a.log.Info("Member is not ready, unable to run update operation")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -157,7 +156,7 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro
|
||||||
|
|
||||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Str("podName", m.PodName).Msg("pod is not present")
|
a.log.Str("podName", m.PodName).Info("pod is not present")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,7 +174,7 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro
|
||||||
return errors.WithMessage(err, "can not set log level")
|
return errors.WithMessage(err, "can not set log level")
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Info().Interface("topics", topicsLogLevel).Msg("send log level to the ArangoDB")
|
a.log.Interface("topics", topicsLogLevel).Info("send log level to the ArangoDB")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
@ -38,10 +37,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeRuntimeContainerImageUpdate, runtimeContainerImageUpdate, defaultTimeout)
|
registerAction(api.ActionTypeRuntimeContainerImageUpdate, runtimeContainerImageUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func runtimeContainerImageUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func runtimeContainerImageUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionRuntimeContainerImageUpdate{}
|
a := &actionRuntimeContainerImageUpdate{}
|
||||||
|
|
||||||
a.actionImpl = newBaseActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newBaseActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -54,47 +53,47 @@ type actionRuntimeContainerImageUpdate struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a actionRuntimeContainerImageUpdate) Post(ctx context.Context) error {
|
func (a actionRuntimeContainerImageUpdate) Post(ctx context.Context) error {
|
||||||
a.log.Info().Msgf("Updating container image")
|
a.log.Info("Updating container image")
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("member is gone already")
|
a.log.Info("member is gone already")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
name, image, ok := a.getContainerDetails()
|
name, image, ok := a.getContainerDetails()
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("Unable to find container details")
|
a.log.Info("Unable to find container details")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
||||||
if !ok {
|
if !ok {
|
||||||
err := errors.Newf("ArangoMember not found")
|
err := errors.Newf("ArangoMember not found")
|
||||||
a.log.Error().Err(err).Msg("ArangoMember not found")
|
a.log.Err(err).Error("ArangoMember not found")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return a.actionCtx.WithCurrentArangoMember(member.GetName()).UpdateStatus(ctx, func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool {
|
return a.actionCtx.WithCurrentArangoMember(member.GetName()).UpdateStatus(ctx, func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool {
|
||||||
if obj.Spec.Template == nil || s.Template == nil ||
|
if obj.Spec.Template == nil || s.Template == nil ||
|
||||||
obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil {
|
obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil {
|
||||||
a.log.Info().Msgf("Nil Member definition")
|
a.log.Info("Nil Member definition")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(obj.Spec.Template.PodSpec.Spec.Containers) != len(s.Template.PodSpec.Spec.Containers) {
|
if len(obj.Spec.Template.PodSpec.Spec.Containers) != len(s.Template.PodSpec.Spec.Containers) {
|
||||||
a.log.Info().Msgf("Invalid size of containers")
|
a.log.Info("Invalid size of containers")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
for id := range obj.Spec.Template.PodSpec.Spec.Containers {
|
for id := range obj.Spec.Template.PodSpec.Spec.Containers {
|
||||||
if obj.Spec.Template.PodSpec.Spec.Containers[id].Name == name {
|
if obj.Spec.Template.PodSpec.Spec.Containers[id].Name == name {
|
||||||
if s.Template.PodSpec.Spec.Containers[id].Name != name {
|
if s.Template.PodSpec.Spec.Containers[id].Name != name {
|
||||||
a.log.Info().Msgf("Invalid order of containers")
|
a.log.Info("Invalid order of containers")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if obj.Spec.Template.PodSpec.Spec.Containers[id].Image != image {
|
if obj.Spec.Template.PodSpec.Spec.Containers[id].Image != image {
|
||||||
a.log.Info().Str("got", obj.Spec.Template.PodSpec.Spec.Containers[id].Image).Str("expected", image).Msgf("Invalid spec image of container")
|
a.log.Str("got", obj.Spec.Template.PodSpec.Spec.Containers[id].Image).Str("expected", image).Info("Invalid spec image of container")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,7 +126,7 @@ func (a actionRuntimeContainerImageUpdate) getContainerDetails() (string, string
|
||||||
func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, error) {
|
func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, error) {
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("member is gone already")
|
a.log.Info("member is gone already")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,45 +137,45 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err
|
||||||
|
|
||||||
name, image, ok := a.getContainerDetails()
|
name, image, ok := a.getContainerDetails()
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("Unable to find container details")
|
a.log.Info("Unable to find container details")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if !m.Phase.IsReady() {
|
if !m.Phase.IsReady() {
|
||||||
a.log.Info().Msg("Member is not ready, unable to run update operation")
|
a.log.Info("Member is not ready, unable to run update operation")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group))
|
||||||
if !ok {
|
if !ok {
|
||||||
err := errors.Newf("ArangoMember not found")
|
err := errors.Newf("ArangoMember not found")
|
||||||
a.log.Error().Err(err).Msg("ArangoMember not found")
|
a.log.Err(err).Error("ArangoMember not found")
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("pod is not present")
|
a.log.Info("pod is not present")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if member.Spec.Template == nil || member.Spec.Template.PodSpec == nil {
|
if member.Spec.Template == nil || member.Spec.Template.PodSpec == nil {
|
||||||
a.log.Info().Msg("pod spec is not present")
|
a.log.Info("pod spec is not present")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if member.Status.Template == nil || member.Status.Template.PodSpec == nil {
|
if member.Status.Template == nil || member.Status.Template.PodSpec == nil {
|
||||||
a.log.Info().Msg("pod status is not present")
|
a.log.Info("pod status is not present")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pod.Spec.Containers) != len(member.Spec.Template.PodSpec.Spec.Containers) {
|
if len(pod.Spec.Containers) != len(member.Spec.Template.PodSpec.Spec.Containers) {
|
||||||
a.log.Info().Msg("spec container count is not equal")
|
a.log.Info("spec container count is not equal")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(pod.Spec.Containers) != len(member.Status.Template.PodSpec.Spec.Containers) {
|
if len(pod.Spec.Containers) != len(member.Status.Template.PodSpec.Spec.Containers) {
|
||||||
a.log.Info().Msg("status container count is not equal")
|
a.log.Info("status container count is not equal")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -209,45 +208,45 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
a.log.Info().Msgf("Update Progress")
|
a.log.Info("Update Progress")
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("member is gone already")
|
a.log.Info("member is gone already")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("Cluster is not ready")
|
a.log.Info("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("pod is not present")
|
a.log.Info("pod is not present")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
name, image, ok := a.getContainerDetails()
|
name, image, ok := a.getContainerDetails()
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("Unable to find container details")
|
a.log.Info("Unable to find container details")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cspec, ok := k8sutil.GetContainerByName(pod, name)
|
cspec, ok := k8sutil.GetContainerByName(pod, name)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("Unable to find container spec")
|
a.log.Info("Unable to find container spec")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cstatus, ok := k8sutil.GetContainerStatusByName(pod, name)
|
cstatus, ok := k8sutil.GetContainerStatusByName(pod, name)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("Unable to find container status")
|
a.log.Info("Unable to find container status")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if cspec.Image != image {
|
if cspec.Image != image {
|
||||||
a.log.Info().Msg("Image changed")
|
a.log.Info("Image changed")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -269,7 +268,7 @@ func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (b
|
||||||
if lastTermination.FinishedAt.Time.Before(allowedRestartPeriod) {
|
if lastTermination.FinishedAt.Time.Before(allowedRestartPeriod) {
|
||||||
return true, false, errors.Newf("Container %s continuously failing during image replacement: (%d) %s: %s", name, lastTermination.ExitCode, lastTermination.Reason, lastTermination.Message)
|
return true, false, errors.Newf("Container %s continuously failing during image replacement: (%d) %s: %s", name, lastTermination.ExitCode, lastTermination.Reason, lastTermination.Message)
|
||||||
} else {
|
} else {
|
||||||
a.log.Debug().Str("pod-name", pod.GetName()).Msg("pod is restarting - we are not marking it as terminated yet..")
|
a.log.Str("pod-name", pod.GetName()).Debug("pod is restarting - we are not marking it as terminated yet..")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,8 +24,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -33,10 +31,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeSetCondition, setCondition, defaultTimeout)
|
registerAction(api.ActionTypeSetCondition, setCondition, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setCondition(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func setCondition(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionSetCondition{}
|
a := &actionSetCondition{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -51,7 +49,7 @@ type actionSetCondition struct {
|
||||||
// Start starts the action for changing conditions on the provided member.
|
// Start starts the action for changing conditions on the provided member.
|
||||||
func (a actionSetCondition) Start(ctx context.Context) (bool, error) {
|
func (a actionSetCondition) Start(ctx context.Context) (bool, error) {
|
||||||
if len(a.action.Params) == 0 {
|
if len(a.action.Params) == 0 {
|
||||||
a.log.Info().Msg("can not start the action with the empty list of conditions")
|
a.log.Info("can not start the action with the empty list of conditions")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -59,7 +57,7 @@ func (a actionSetCondition) Start(ctx context.Context) (bool, error) {
|
||||||
changed := false
|
changed := false
|
||||||
for condition, value := range a.action.Params {
|
for condition, value := range a.action.Params {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
a.log.Debug().Msg("remove the condition")
|
a.log.Debug("remove the condition")
|
||||||
|
|
||||||
if s.Conditions.Remove(api.ConditionType(condition)) {
|
if s.Conditions.Remove(api.ConditionType(condition)) {
|
||||||
changed = true
|
changed = true
|
||||||
|
@ -67,11 +65,11 @@ func (a actionSetCondition) Start(ctx context.Context) (bool, error) {
|
||||||
} else {
|
} else {
|
||||||
set, err := strconv.ParseBool(value)
|
set, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Str("value", value).Msg("can not parse string to boolean")
|
a.log.Err(err).Str("value", value).Error("can not parse string to boolean")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Debug().Msg("set the condition")
|
a.log.Debug("set the condition")
|
||||||
|
|
||||||
if s.Conditions.Update(api.ConditionType(condition), set, a.action.Reason, "action set the member condition") {
|
if s.Conditions.Update(api.ConditionType(condition), set, a.action.Reason, "action set the member condition") {
|
||||||
changed = true
|
changed = true
|
||||||
|
@ -80,7 +78,7 @@ func (a actionSetCondition) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
return changed
|
return changed
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("Unable to set condition")
|
a.log.Err(err).Warn("Unable to set condition")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -23,7 +23,6 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -45,10 +44,10 @@ const (
|
||||||
setConditionActionV2KeyHash string = "hash"
|
setConditionActionV2KeyHash string = "hash"
|
||||||
)
|
)
|
||||||
|
|
||||||
func setConditionV2(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func setConditionV2(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionSetConditionV2{}
|
a := &actionSetConditionV2{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -64,13 +63,13 @@ type actionSetConditionV2 struct {
|
||||||
func (a actionSetConditionV2) Start(ctx context.Context) (bool, error) {
|
func (a actionSetConditionV2) Start(ctx context.Context) (bool, error) {
|
||||||
at, ok := a.action.Params[setConditionActionV2KeyType]
|
at, ok := a.action.Params[setConditionActionV2KeyType]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyType)
|
a.log.Info("key %s is missing in action definition", setConditionActionV2KeyType)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
aa, ok := a.action.Params[setConditionActionV2KeyAction]
|
aa, ok := a.action.Params[setConditionActionV2KeyAction]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyAction)
|
a.log.Info("key %s is missing in action definition", setConditionActionV2KeyAction)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,18 +83,18 @@ func (a actionSetConditionV2) Start(ctx context.Context) (bool, error) {
|
||||||
if err := a.actionCtx.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
if err := a.actionCtx.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
||||||
return s.Conditions.UpdateWithHash(api.ConditionType(aa), as, ar, am, ah), nil
|
return s.Conditions.UpdateWithHash(api.ConditionType(aa), as, ar, am, ah), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("unable to update status")
|
a.log.Err(err).Warn("unable to update status")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
case setConditionActionV2KeyTypeRemove:
|
case setConditionActionV2KeyTypeRemove:
|
||||||
if err := a.actionCtx.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
if err := a.actionCtx.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
||||||
return s.Conditions.Remove(api.ConditionType(aa)), nil
|
return s.Conditions.Remove(api.ConditionType(aa)), nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("unable to update status")
|
a.log.Err(err).Warn("unable to update status")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
a.log.Info().Msgf("unknown type %s", at)
|
a.log.Info("unknown type %s", at)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
|
|
||||||
// newSetCurrentImageAction creates a new Action that implements the given
|
// newSetCurrentImageAction creates a new Action that implements the given
|
||||||
// planned SetCurrentImage action.
|
// planned SetCurrentImage action.
|
||||||
func newSetCurrentMemberImageAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newSetCurrentMemberImageAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &setCurrentMemberImageAction{}
|
a := &setCurrentMemberImageAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -63,18 +62,16 @@ func (a *setCurrentMemberImageAction) Start(ctx context.Context) (bool, error) {
|
||||||
// CheckProgress checks the progress of the action.
|
// CheckProgress checks the progress of the action.
|
||||||
// Returns true if the action is completely finished, false otherwise.
|
// Returns true if the action is completely finished, false otherwise.
|
||||||
func (a *setCurrentMemberImageAction) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *setCurrentMemberImageAction) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
|
|
||||||
imageInfo, found := a.actionCtx.GetImageInfo(a.action.Image)
|
imageInfo, found := a.actionCtx.GetImageInfo(a.action.Image)
|
||||||
if !found {
|
if !found {
|
||||||
log.Info().Msgf("Image not found")
|
a.log.Info("Image not found")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
if err := a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
||||||
m, g, found := s.Members.ElementByID(a.action.MemberID)
|
m, g, found := s.Members.ElementByID(a.action.MemberID)
|
||||||
if !found {
|
if !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -84,13 +81,13 @@ func (a *setCurrentMemberImageAction) CheckProgress(ctx context.Context) (bool,
|
||||||
m.Image = &imageInfo
|
m.Image = &imageInfo
|
||||||
|
|
||||||
if err := s.Members.Update(m, g); err != nil {
|
if err := s.Members.Update(m, g); err != nil {
|
||||||
log.Error().Msg("Member update failed")
|
a.log.Error("Member update failed")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
return true
|
return true
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
log.Error().Msg("Member failed")
|
a.log.Error("Member failed")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,17 +29,16 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeLicenseSet, newLicenseSet, defaultTimeout)
|
registerAction(api.ActionTypeLicenseSet, newLicenseSet, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLicenseSet(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newLicenseSet(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &licenseSetAction{}
|
a := &licenseSetAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -53,12 +52,9 @@ type licenseSetAction struct {
|
||||||
func (a *licenseSetAction) Start(ctx context.Context) (bool, error) {
|
func (a *licenseSetAction) Start(ctx context.Context) (bool, error) {
|
||||||
ctxChild, cancel := globals.GetGlobals().Timeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobals().Timeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
log := a.log
|
|
||||||
|
|
||||||
spec := a.actionCtx.GetSpec()
|
spec := a.actionCtx.GetSpec()
|
||||||
if !spec.License.HasSecretName() {
|
if !spec.License.HasSecretName() {
|
||||||
log.Error().Msg("License is not set")
|
a.log.Error("License is not set")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -75,20 +71,20 @@ func (a *licenseSetAction) Start(ctx context.Context) (bool, error) {
|
||||||
group := a.action.Group
|
group := a.action.Group
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
c, err := a.actionCtx.GetServerClient(ctxChild, group, m.ID)
|
c, err := a.actionCtx.GetServerClient(ctxChild, group, m.ID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Err(err).Msg("Unable to get client")
|
a.log.Err(err).Error("Unable to get client")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
client := client.NewClient(c.Connection())
|
client := client.NewClient(c.Connection())
|
||||||
|
|
||||||
if ok, err := licenseV2Compare(ctxChild, client, l.V2); err != nil {
|
if ok, err := licenseV2Compare(ctxChild, client, l.V2); err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to verify license")
|
a.log.Err(err).Error("Unable to verify license")
|
||||||
return true, nil
|
return true, nil
|
||||||
} else if ok {
|
} else if ok {
|
||||||
// Already latest license
|
// Already latest license
|
||||||
|
@ -96,7 +92,7 @@ func (a *licenseSetAction) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := client.SetLicense(ctxChild, string(l.V2), true); err != nil {
|
if err := client.SetLicense(ctxChild, string(l.V2), true); err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to set license")
|
a.log.Err(err).Error("Unable to set license")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,8 +24,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
)
|
)
|
||||||
|
@ -34,10 +32,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeSetMemberCondition, setMemberCondition, defaultTimeout)
|
registerAction(api.ActionTypeSetMemberCondition, setMemberCondition, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setMemberCondition(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func setMemberCondition(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionSetMemberCondition{}
|
a := &actionSetMemberCondition{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -53,28 +51,28 @@ type actionSetMemberCondition struct {
|
||||||
func (a actionSetMemberCondition) Start(ctx context.Context) (bool, error) {
|
func (a actionSetMemberCondition) Start(ctx context.Context) (bool, error) {
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msg("can not set the condition because the member is gone already")
|
a.log.Info("can not set the condition because the member is gone already")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(a.action.Params) == 0 {
|
if len(a.action.Params) == 0 {
|
||||||
a.log.Info().Msg("can not start the action with the empty list of conditions")
|
a.log.Info("can not start the action with the empty list of conditions")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
for condition, value := range a.action.Params {
|
for condition, value := range a.action.Params {
|
||||||
if value == "" {
|
if value == "" {
|
||||||
a.log.Debug().Msg("remove the condition")
|
a.log.Debug("remove the condition")
|
||||||
|
|
||||||
m.Conditions.Remove(api.ConditionType(condition))
|
m.Conditions.Remove(api.ConditionType(condition))
|
||||||
} else {
|
} else {
|
||||||
set, err := strconv.ParseBool(value)
|
set, err := strconv.ParseBool(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Str("value", value).Msg("can not parse string to boolean")
|
a.log.Err(err).Str("value", value).Error("can not parse string to boolean")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Debug().Msg("set the condition")
|
a.log.Debug("set the condition")
|
||||||
|
|
||||||
m.Conditions.Update(api.ConditionType(condition), set, a.action.Reason, "action set the member condition")
|
m.Conditions.Update(api.ConditionType(condition), set, a.action.Reason, "action set the member condition")
|
||||||
}
|
}
|
||||||
|
|
|
@ -23,7 +23,6 @@ package reconcile
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -33,10 +32,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeSetMemberConditionV2, setMemberConditionV2, defaultTimeout)
|
registerAction(api.ActionTypeSetMemberConditionV2, setMemberConditionV2, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func setMemberConditionV2(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func setMemberConditionV2(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionSetMemberConditionV2{}
|
a := &actionSetMemberConditionV2{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -52,13 +51,13 @@ type actionSetMemberConditionV2 struct {
|
||||||
func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) {
|
func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) {
|
||||||
at, ok := a.action.Params[setConditionActionV2KeyType]
|
at, ok := a.action.Params[setConditionActionV2KeyType]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyType)
|
a.log.Info("key %s is missing in action definition", setConditionActionV2KeyType)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
aa, ok := a.action.Params[setConditionActionV2KeyAction]
|
aa, ok := a.action.Params[setConditionActionV2KeyAction]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyAction)
|
a.log.Info("key %s is missing in action definition", setConditionActionV2KeyAction)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,14 +79,14 @@ func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Info().Msg("can not set the condition because the member is gone already")
|
a.log.Info("can not set the condition because the member is gone already")
|
||||||
return nil
|
return nil
|
||||||
}, a.action.Group)
|
}, a.action.Group)
|
||||||
|
|
||||||
// If not found then false is returned.
|
// If not found then false is returned.
|
||||||
return changed, nil
|
return changed, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("unable to update status")
|
a.log.Err(err).Warn("unable to update status")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
case setConditionActionV2KeyTypeRemove:
|
case setConditionActionV2KeyTypeRemove:
|
||||||
|
@ -102,18 +101,18 @@ func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Info().Msg("can not remove the condition because the member is gone already")
|
a.log.Info("can not remove the condition because the member is gone already")
|
||||||
return nil
|
return nil
|
||||||
}, a.action.Group)
|
}, a.action.Group)
|
||||||
|
|
||||||
// If not found then false is returned.
|
// If not found then false is returned.
|
||||||
return changed, nil
|
return changed, nil
|
||||||
}); err != nil {
|
}); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("unable to update status")
|
a.log.Err(err).Warn("unable to update status")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
a.log.Info().Msgf("unknown type %s", at)
|
a.log.Info("unknown type %s", at)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
return true, nil
|
return true, nil
|
||||||
|
|
|
@ -28,7 +28,6 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -37,10 +36,10 @@ func init() {
|
||||||
|
|
||||||
// newShutdownMemberAction creates a new Action that implements the given
|
// newShutdownMemberAction creates a new Action that implements the given
|
||||||
// planned ShutdownMember action.
|
// planned ShutdownMember action.
|
||||||
func newShutdownMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newShutdownMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionShutdownMember{}
|
a := &actionShutdownMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -55,7 +54,7 @@ type actionShutdownMember struct {
|
||||||
// Returns true if the action is completely finished, false in case
|
// Returns true if the action is completely finished, false in case
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionShutdownMember) Start(ctx context.Context) (bool, error) {
|
func (a *actionShutdownMember) Start(ctx context.Context) (bool, error) {
|
||||||
shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log)
|
shutdown, m, ok := getShutdownHelper(a.actionImpl)
|
||||||
if !ok {
|
if !ok {
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
@ -77,7 +76,7 @@ func (a *actionShutdownMember) Start(ctx context.Context) (bool, error) {
|
||||||
// CheckProgress checks the progress of the action.
|
// CheckProgress checks the progress of the action.
|
||||||
// Returns: ready, abort, error.
|
// Returns: ready, abort, error.
|
||||||
func (a *actionShutdownMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionShutdownMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
shutdown, _, ok := getShutdownHelper(&a.action, a.actionCtx, a.log)
|
shutdown, _, ok := getShutdownHelper(a.actionImpl)
|
||||||
if !ok {
|
if !ok {
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -38,17 +38,16 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeAppendTLSCACertificate, newAppendTLSCACertificateAction, operationTLSCACertificateTimeout)
|
registerAction(api.ActionTypeAppendTLSCACertificate, newAppendTLSCACertificateAction, operationTLSCACertificateTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAppendTLSCACertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newAppendTLSCACertificateAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &appendTLSCACertificateAction{}
|
a := &appendTLSCACertificateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -66,43 +65,43 @@ func (a *appendTLSCACertificateAction) Start(ctx context.Context) (bool, error)
|
||||||
|
|
||||||
certChecksum, exists := a.action.Params[checksum]
|
certChecksum, exists := a.action.Params[checksum]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Key %s is missing in action", checksum)
|
a.log.Warn("Key %s is missing in action", checksum)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caSecret, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName())
|
caSecret, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName())
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName())
|
a.log.Warn("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName())
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caFolder, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
caFolder, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
a.log.Warn("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ca, _, err := resources.GetKeyCertFromSecret(a.log, caSecret, resources.CACertName, resources.CAKeyName)
|
ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
a.log.Err(err).Warn("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caData, err := ca.ToPem()
|
caData, err := ca.ToPem()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Msgf("Unable to parse ca into pem")
|
a.log.Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Warn("Unable to parse ca into pem")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caSha := util.SHA256(caData)
|
caSha := util.SHA256(caData)
|
||||||
|
|
||||||
if caSha != certChecksum {
|
if caSha != certChecksum {
|
||||||
a.log.Warn().Msgf("Cert changed")
|
a.log.Warn("Cert changed")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, exists := caFolder.Data[caSha]; exists {
|
if _, exists := caFolder.Data[caSha]; exists {
|
||||||
a.log.Warn().Msgf("Cert already exists")
|
a.log.Warn("Cert already exists")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -111,7 +110,7 @@ func (a *appendTLSCACertificateAction) Start(ctx context.Context) (bool, error)
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -37,17 +37,16 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeCleanTLSCACertificate, newCleanTLSCACertificateAction, operationTLSCACertificateTimeout)
|
registerAction(api.ActionTypeCleanTLSCACertificate, newCleanTLSCACertificateAction, operationTLSCACertificateTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCleanTLSCACertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newCleanTLSCACertificateAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &cleanTLSCACertificateAction{}
|
a := &cleanTLSCACertificateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,51 +58,51 @@ type cleanTLSCACertificateAction struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) {
|
func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) {
|
||||||
a.log.Info().Msgf("Clean TLS Ca")
|
a.log.Info("Clean TLS Ca")
|
||||||
if !a.actionCtx.GetSpec().TLS.IsSecure() {
|
if !a.actionCtx.GetSpec().TLS.IsSecure() {
|
||||||
a.log.Info().Msgf("Insecure deployment")
|
a.log.Info("Insecure deployment")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
certChecksum, exists := a.action.Params[checksum]
|
certChecksum, exists := a.action.Params[checksum]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Key %s is missing in action", checksum)
|
a.log.Warn("Key %s is missing in action", checksum)
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caSecret, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName())
|
caSecret, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName())
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName())
|
a.log.Warn("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName())
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caFolder, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
caFolder, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
a.log.Warn("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ca, _, err := resources.GetKeyCertFromSecret(a.log, caSecret, resources.CACertName, resources.CAKeyName)
|
ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
a.log.Err(err).Warn("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject()))
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caData, err := ca.ToPem()
|
caData, err := ca.ToPem()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Msgf("Unable to parse ca into pem")
|
a.log.Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Warn("Unable to parse ca into pem")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
caSha := util.SHA256(caData)
|
caSha := util.SHA256(caData)
|
||||||
|
|
||||||
if caSha == certChecksum {
|
if caSha == certChecksum {
|
||||||
a.log.Warn().Msgf("Unable to remove current ca")
|
a.log.Warn("Unable to remove current ca")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, exists := caFolder.Data[certChecksum]; !exists {
|
if _, exists := caFolder.Data[certChecksum]; !exists {
|
||||||
a.log.Warn().Msgf("Cert missing")
|
a.log.Warn("Cert missing")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -112,11 +111,11 @@ func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
patch, err := p.Marshal()
|
patch, err := p.Marshal()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to encrypt patch")
|
a.log.Err(err).Error("Unable to encrypt patch")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
a.log.Info().Msgf("Removing key %s from truststore", certChecksum)
|
a.log.Info("Removing key %s from truststore", certChecksum)
|
||||||
|
|
||||||
err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||||
_, err := a.actionCtx.ACS().CurrentClusterCache().SecretsModInterface().V1().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{})
|
_, err := a.actionCtx.ACS().CurrentClusterCache().SecretsModInterface().V1().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{})
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeRenewTLSCACertificate, newRenewTLSCACertificateAction, operationTLSCACertificateTimeout)
|
registerAction(api.ActionTypeRenewTLSCACertificate, newRenewTLSCACertificateAction, operationTLSCACertificateTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRenewTLSCACertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRenewTLSCACertificateAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &renewTLSCACertificateAction{}
|
a := &renewTLSCACertificateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -59,7 +58,7 @@ func (a *renewTLSCACertificateAction) Start(ctx context.Context) (bool, error) {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if !k8sutil.IsNotFound(err) {
|
if !k8sutil.IsNotFound(err) {
|
||||||
a.log.Warn().Err(err).Msgf("Unable to clean cert %s", a.actionCtx.GetSpec().TLS.GetCASecretName())
|
a.log.Err(err).Warn("Unable to clean cert %s", a.actionCtx.GetSpec().TLS.GetCASecretName())
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,7 +27,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
registerAction(api.ActionTypeCleanTLSKeyfileCertificate, newCleanTLSKeyfileCertificateAction, operationTLSCACertificateTimeout)
|
registerAction(api.ActionTypeCleanTLSKeyfileCertificate, newCleanTLSKeyfileCertificateAction, operationTLSCACertificateTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newCleanTLSKeyfileCertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newCleanTLSKeyfileCertificateAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &cleanTLSKeyfileCertificateAction{}
|
a := &cleanTLSKeyfileCertificateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -56,7 +55,7 @@ func (a *cleanTLSKeyfileCertificateAction) Start(ctx context.Context) (bool, err
|
||||||
|
|
||||||
member, exists := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
member, exists := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msgf("Member does not exist")
|
a.log.Warn("Member does not exist")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -66,7 +65,7 @@ func (a *cleanTLSKeyfileCertificateAction) Start(ctx context.Context) (bool, err
|
||||||
defer cancel()
|
defer cancel()
|
||||||
|
|
||||||
if err := c.Client().Kubernetes().CoreV1().Secrets(c.Namespace()).Delete(ctxChild, k8sutil.AppendTLSKeyfileSecretPostfix(member.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)), meta.DeleteOptions{}); err != nil {
|
if err := c.Client().Kubernetes().CoreV1().Secrets(c.Namespace()).Delete(ctxChild, k8sutil.AppendTLSKeyfileSecretPostfix(member.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)), meta.DeleteOptions{}); err != nil {
|
||||||
a.log.Warn().Err(err).Msgf("Unable to remove keyfile")
|
a.log.Err(err).Warn("Unable to remove keyfile")
|
||||||
if !k8sutil.IsNotFound(err) {
|
if !k8sutil.IsNotFound(err) {
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -32,17 +32,16 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeRefreshTLSKeyfileCertificate, newRefreshTLSKeyfileCertificateAction, operationTLSCACertificateTimeout)
|
registerAction(api.ActionTypeRefreshTLSKeyfileCertificate, newRefreshTLSKeyfileCertificateAction, operationTLSCACertificateTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newRefreshTLSKeyfileCertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newRefreshTLSKeyfileCertificateAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &refreshTLSKeyfileCertificateAction{}
|
a := &refreshTLSKeyfileCertificateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -56,19 +55,19 @@ func (a *refreshTLSKeyfileCertificateAction) CheckProgress(ctx context.Context)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID)
|
c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msg("Unable to get client")
|
a.log.Err(err).Warn("Unable to get client")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
s, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(k8sutil.CreateTLSKeyfileSecretName(a.actionCtx.GetAPIObject().GetName(), a.action.Group.AsRole(), a.action.MemberID))
|
s, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(k8sutil.CreateTLSKeyfileSecretName(a.actionCtx.GetAPIObject().GetName(), a.action.Group.AsRole(), a.action.MemberID))
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Warn().Msg("Keyfile secret is missing")
|
a.log.Warn("Keyfile secret is missing")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
keyfile, ok := s.Data[constants.SecretTLSKeyfile]
|
keyfile, ok := s.Data[constants.SecretTLSKeyfile]
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Warn().Msg("Keyfile secret is invalid")
|
a.log.Warn("Keyfile secret is invalid")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -80,7 +79,7 @@ func (a *refreshTLSKeyfileCertificateAction) CheckProgress(ctx context.Context)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
e, err := client.RefreshTLS(ctxChild)
|
e, err := client.RefreshTLS(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Warn().Err(err).Msg("Unable to refresh TLS")
|
a.log.Err(err).Warn("Unable to refresh TLS")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -24,17 +24,16 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeTLSPropagated, newTLSPropagated, defaultTimeout)
|
registerAction(api.ActionTypeTLSPropagated, newTLSPropagated, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTLSPropagated(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTLSPropagated(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &tlsPropagatedAction{}
|
a := &tlsPropagatedAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -48,7 +47,7 @@ type tlsPropagatedAction struct {
|
||||||
func (a *tlsPropagatedAction) Start(ctx context.Context) (bool, error) {
|
func (a *tlsPropagatedAction) Start(ctx context.Context) (bool, error) {
|
||||||
propagatedFlag, exists := a.action.Params[propagated]
|
propagatedFlag, exists := a.action.Params[propagated]
|
||||||
if !exists {
|
if !exists {
|
||||||
a.log.Error().Msgf("Propagated flag is missing")
|
a.log.Error("Propagated flag is missing")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -26,17 +26,16 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeUpdateTLSSNI, newTLSSNIUpdate, tlsSNIUpdateTimeout)
|
registerAction(api.ActionTypeUpdateTLSSNI, newTLSSNIUpdate, tlsSNIUpdateTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTLSSNIUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTLSSNIUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &tlsSNIUpdate{}
|
a := &tlsSNIUpdate{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -64,7 +63,7 @@ func (t *tlsSNIUpdate) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
|
|
||||||
fetchedSecrets, err := mapTLSSNIConfig(*sni, t.actionCtx.ACS().CurrentClusterCache())
|
fetchedSecrets, err := mapTLSSNIConfig(*sni, t.actionCtx.ACS().CurrentClusterCache())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.log.Warn().Err(err).Msg("Unable to get SNI desired state")
|
t.log.Err(err).Warn("Unable to get SNI desired state")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -72,14 +71,14 @@ func (t *tlsSNIUpdate) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := t.actionCtx.GetServerClient(ctxChild, t.action.Group, t.action.MemberID)
|
c, err := t.actionCtx.GetServerClient(ctxChild, t.action.Group, t.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.log.Warn().Err(err).Msg("Unable to get client")
|
t.log.Err(err).Warn("Unable to get client")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if ok, err := compareTLSSNIConfig(ctxChild, c.Connection(), fetchedSecrets, true); err != nil {
|
if ok, err := compareTLSSNIConfig(ctxChild, c.Connection(), fetchedSecrets, true); err != nil {
|
||||||
t.log.Warn().Err(err).Msg("Unable to compare TLS config")
|
t.log.Err(err).Warn("Unable to compare TLS config")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
} else {
|
} else {
|
||||||
return ok, false, nil
|
return ok, false, nil
|
||||||
|
|
|
@ -30,17 +30,16 @@ import (
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeTLSKeyStatusUpdate, newTLSKeyStatusUpdate, defaultTimeout)
|
registerAction(api.ActionTypeTLSKeyStatusUpdate, newTLSKeyStatusUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTLSKeyStatusUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTLSKeyStatusUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &tlsKeyStatusUpdateAction{}
|
a := &tlsKeyStatusUpdateAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -60,7 +59,7 @@ func (a *tlsKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
f, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{})
|
f, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Error().Err(err).Msgf("Unable to get folder info")
|
a.log.Err(err).Error("Unable to get folder info")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -22,17 +22,16 @@ package reconcile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeTopologyDisable, newTopologyDisable, defaultTimeout)
|
registerAction(api.ActionTypeTopologyDisable, newTopologyDisable, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTopologyDisable(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTopologyDisable(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &topologyDisable{}
|
a := &topologyDisable{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,17 +22,16 @@ package reconcile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeTopologyEnable, newTopologyEnable, defaultTimeout)
|
registerAction(api.ActionTypeTopologyEnable, newTopologyEnable, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTopologyEnable(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTopologyEnable(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &topologyEnable{}
|
a := &topologyEnable{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,17 +22,16 @@ package reconcile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeTopologyMemberAssignment, newTopologyMemberAssignment, defaultTimeout)
|
registerAction(api.ActionTypeTopologyMemberAssignment, newTopologyMemberAssignment, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTopologyMemberAssignment(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTopologyMemberAssignment(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &topologyMemberAssignment{}
|
a := &topologyMemberAssignment{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -22,17 +22,16 @@ package reconcile
|
||||||
|
|
||||||
import (
|
import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
registerAction(api.ActionTypeTopologyZonesUpdate, newTopologyZonesUpdate, defaultTimeout)
|
registerAction(api.ActionTypeTopologyZonesUpdate, newTopologyZonesUpdate, defaultTimeout)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTopologyZonesUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newTopologyZonesUpdate(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &topologyZonesUpdate{}
|
a := &topologyZonesUpdate{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
|
|
||||||
// newSetCurrentImageAction creates a new Action that implements the given
|
// newSetCurrentImageAction creates a new Action that implements the given
|
||||||
// planned SetCurrentImage action.
|
// planned SetCurrentImage action.
|
||||||
func newSetCurrentImageAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newSetCurrentImageAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &setCurrentImageAction{}
|
a := &setCurrentImageAction{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -63,16 +62,14 @@ func (a *setCurrentImageAction) Start(ctx context.Context) (bool, error) {
|
||||||
// CheckProgress checks the progress of the action.
|
// CheckProgress checks the progress of the action.
|
||||||
// Returns true if the action is completely finished, false otherwise.
|
// Returns true if the action is completely finished, false otherwise.
|
||||||
func (a *setCurrentImageAction) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *setCurrentImageAction) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
|
|
||||||
imageInfo, found := a.actionCtx.GetImageInfo(a.action.Image)
|
imageInfo, found := a.actionCtx.GetImageInfo(a.action.Image)
|
||||||
if !found {
|
if !found {
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if err := a.actionCtx.SetCurrentImage(ctx, imageInfo); err != nil {
|
if err := a.actionCtx.SetCurrentImage(ctx, imageInfo); err != nil {
|
||||||
log.Error().Err(err).Msg("Unable to set current image")
|
a.log.Err(err).Error("Unable to set current image")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
log.Info().Str("image", a.action.Image).Str("to", imageInfo.Image).Msg("Changed current main image")
|
a.log.Str("image", a.action.Image).Str("to", imageInfo.Image).Info("Changed current main image")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
@ -35,10 +34,10 @@ func init() {
|
||||||
|
|
||||||
// newUpgradeMemberAction creates a new Action that implements the given
|
// newUpgradeMemberAction creates a new Action that implements the given
|
||||||
// planned UpgradeMember action.
|
// planned UpgradeMember action.
|
||||||
func newUpgradeMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newUpgradeMemberAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionUpgradeMember{}
|
a := &actionUpgradeMember{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -53,10 +52,9 @@ type actionUpgradeMember struct {
|
||||||
// Returns true if the action is completely finished, false in case
|
// Returns true if the action is completely finished, false in case
|
||||||
// the start time needs to be recorded and a ready condition needs to be checked.
|
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||||
func (a *actionUpgradeMember) Start(ctx context.Context) (bool, error) {
|
func (a *actionUpgradeMember) Start(ctx context.Context) (bool, error) {
|
||||||
log := a.log
|
|
||||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
}
|
}
|
||||||
// Set AutoUpgrade condition
|
// Set AutoUpgrade condition
|
||||||
m.Conditions.Update(api.ConditionTypeAutoUpgrade, true, "Upgrading", "AutoUpgrade on first restart")
|
m.Conditions.Update(api.ConditionTypeAutoUpgrade, true, "Upgrading", "AutoUpgrade on first restart")
|
||||||
|
@ -76,10 +74,9 @@ func (a *actionUpgradeMember) Start(ctx context.Context) (bool, error) {
|
||||||
// Returns true if the action is completely finished, false otherwise.
|
// Returns true if the action is completely finished, false otherwise.
|
||||||
func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
// Check that pod is removed
|
// Check that pod is removed
|
||||||
log := a.log
|
|
||||||
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !found {
|
if !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -100,7 +97,7 @@ func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, er
|
||||||
if isUpgrading {
|
if isUpgrading {
|
||||||
if m.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
if m.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
||||||
if m.Conditions.IsTrue(api.ConditionTypeUpgradeFailed) {
|
if m.Conditions.IsTrue(api.ConditionTypeUpgradeFailed) {
|
||||||
a.log.Error().Msgf("Upgrade of member failed")
|
a.log.Error("Upgrade of member failed")
|
||||||
}
|
}
|
||||||
// Invalidate plan
|
// Invalidate plan
|
||||||
m.Phase = ""
|
m.Phase = ""
|
||||||
|
@ -115,15 +112,11 @@ func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, er
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Error().Msgf("Upgrade failed")
|
a.log.Error("Upgrade failed")
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
log = log.With().
|
|
||||||
Str("pod-name", m.PodName).
|
|
||||||
Bool("is-upgrading", isUpgrading).Logger()
|
|
||||||
|
|
||||||
act := actionWaitForMemberUp{
|
act := actionWaitForMemberUp{
|
||||||
actionImpl: a.actionImpl,
|
actionImpl: a.actionImpl,
|
||||||
}
|
}
|
||||||
|
|
|
@ -25,8 +25,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
||||||
)
|
)
|
||||||
|
@ -37,10 +35,10 @@ func init() {
|
||||||
|
|
||||||
// newWaitForMemberUpAction creates a new Action that implements the given
|
// newWaitForMemberUpAction creates a new Action that implements the given
|
||||||
// planned WaitForShardInSync action.
|
// planned WaitForShardInSync action.
|
||||||
func newWaitForMemberInSync(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newWaitForMemberInSync(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionWaitForMemberInSync{}
|
a := &actionWaitForMemberInSync{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -64,7 +62,7 @@ func (a *actionWaitForMemberInSync) Start(ctx context.Context) (bool, error) {
|
||||||
func (a *actionWaitForMemberInSync) CheckProgress(_ context.Context) (bool, bool, error) {
|
func (a *actionWaitForMemberInSync) CheckProgress(_ context.Context) (bool, bool, error) {
|
||||||
member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID())
|
member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID())
|
||||||
if !ok || member.Phase == api.MemberPhaseFailed {
|
if !ok || member.Phase == api.MemberPhaseFailed {
|
||||||
a.log.Debug().Msg("Member in failed phase")
|
a.log.Debug("Member in failed phase")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -98,14 +96,14 @@ func (a *actionWaitForMemberInSync) checkCluster() (bool, error) {
|
||||||
case api.ServerGroupDBServers:
|
case api.ServerGroupDBServers:
|
||||||
agencyState, ok := a.actionCtx.GetAgencyCache()
|
agencyState, ok := a.actionCtx.GetAgencyCache()
|
||||||
if !ok {
|
if !ok {
|
||||||
a.log.Info().Str("mode", "cluster").Str("member", a.MemberID()).Msgf("AgencyCache is missing")
|
a.log.Str("mode", "cluster").Str("member", a.MemberID()).Info("AgencyCache is missing")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
notInSyncShards := agency.GetDBServerShardsNotInSync(agencyState, a.MemberID())
|
notInSyncShards := agency.GetDBServerShardsNotInSync(agencyState, a.MemberID())
|
||||||
|
|
||||||
if len(notInSyncShards) > 0 {
|
if len(notInSyncShards) > 0 {
|
||||||
a.log.Info().Str("mode", "cluster").Str("member", a.MemberID()).Int("shard", len(notInSyncShards)).Msgf("DBServer contains not in sync shards")
|
a.log.Str("mode", "cluster").Str("member", a.MemberID()).Int("shard", len(notInSyncShards)).Info("DBServer contains not in sync shards")
|
||||||
return false, nil
|
return false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,8 +31,6 @@ import (
|
||||||
driver "github.com/arangodb/go-driver"
|
driver "github.com/arangodb/go-driver"
|
||||||
"github.com/arangodb/go-driver/agency"
|
"github.com/arangodb/go-driver/agency"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -42,10 +40,10 @@ func init() {
|
||||||
|
|
||||||
// newWaitForMemberUpAction creates a new Action that implements the given
|
// newWaitForMemberUpAction creates a new Action that implements the given
|
||||||
// planned WaitForMemberUp action.
|
// planned WaitForMemberUp action.
|
||||||
func newWaitForMemberUpAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action {
|
func newWaitForMemberUpAction(action api.Action, actionCtx ActionContext) Action {
|
||||||
a := &actionWaitForMemberUp{}
|
a := &actionWaitForMemberUp{}
|
||||||
|
|
||||||
a.actionImpl = newActionImplDefRef(log, action, actionCtx)
|
a.actionImpl = newActionImplDefRef(action, actionCtx)
|
||||||
|
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
|
@ -72,7 +70,7 @@ func (a *actionWaitForMemberUp) Start(ctx context.Context) (bool, error) {
|
||||||
func (a *actionWaitForMemberUp) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (a *actionWaitForMemberUp) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID())
|
member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID())
|
||||||
if !ok || member.Phase == api.MemberPhaseFailed {
|
if !ok || member.Phase == api.MemberPhaseFailed {
|
||||||
a.log.Debug().Msg("Member in failed phase")
|
a.log.Debug("Member in failed phase")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -101,15 +99,13 @@ func (a *actionWaitForMemberUp) CheckProgress(ctx context.Context) (bool, bool,
|
||||||
// checkProgressSingle checks the progress of the action in the case
|
// checkProgressSingle checks the progress of the action in the case
|
||||||
// of a single server.
|
// of a single server.
|
||||||
func (a *actionWaitForMemberUp) checkProgressSingle(ctx context.Context) (bool, bool, error) {
|
func (a *actionWaitForMemberUp) checkProgressSingle(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
|
|
||||||
c, err := a.actionCtx.GetDatabaseClient(ctx)
|
c, err := a.actionCtx.GetDatabaseClient(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create database client")
|
a.log.Err(err).Debug("Failed to create database client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if _, err := c.Version(ctx); err != nil {
|
if _, err := c.Version(ctx); err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to get version")
|
a.log.Err(err).Debug("Failed to get version")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
|
@ -118,14 +114,13 @@ func (a *actionWaitForMemberUp) checkProgressSingle(ctx context.Context) (bool,
|
||||||
// checkProgressSingleInActiveFailover checks the progress of the action in the case
|
// checkProgressSingleInActiveFailover checks the progress of the action in the case
|
||||||
// of a single server as part of an active failover deployment.
|
// of a single server as part of an active failover deployment.
|
||||||
func (a *actionWaitForMemberUp) checkProgressSingleInActiveFailover(ctx context.Context) (bool, bool, error) {
|
func (a *actionWaitForMemberUp) checkProgressSingleInActiveFailover(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
c, err := a.actionCtx.GetServerClient(ctx, a.action.Group, a.action.MemberID)
|
c, err := a.actionCtx.GetServerClient(ctx, a.action.Group, a.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create database client")
|
a.log.Err(err).Debug("Failed to create database client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if _, err := c.Version(ctx); err != nil {
|
if _, err := c.Version(ctx); err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to get version")
|
a.log.Err(err).Debug("Failed to get version")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
|
@ -134,10 +129,9 @@ func (a *actionWaitForMemberUp) checkProgressSingleInActiveFailover(ctx context.
|
||||||
// checkProgressAgent checks the progress of the action in the case
|
// checkProgressAgent checks the progress of the action in the case
|
||||||
// of an agent.
|
// of an agent.
|
||||||
func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, bool, error) {
|
func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
clients, err := a.actionCtx.GetAgencyClients(ctx)
|
clients, err := a.actionCtx.GetAgencyClients(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create agency clients")
|
a.log.Err(err).Debug("Failed to create agency clients")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -151,11 +145,11 @@ func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, b
|
||||||
shortCtx = agency.WithAllowDifferentLeaderEndpoints(shortCtx)
|
shortCtx = agency.WithAllowDifferentLeaderEndpoints(shortCtx)
|
||||||
|
|
||||||
if err := agency.AreAgentsHealthy(shortCtx, clients); err != nil {
|
if err := agency.AreAgentsHealthy(shortCtx, clients); err != nil {
|
||||||
log.Debug().Err(err).Msg("Not all agents are ready")
|
a.log.Err(err).Debug("Not all agents are ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debug().Msg("Agency is happy")
|
a.log.Debug("Agency is happy")
|
||||||
|
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
@ -163,29 +157,28 @@ func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, b
|
||||||
// checkProgressCluster checks the progress of the action in the case
|
// checkProgressCluster checks the progress of the action in the case
|
||||||
// of a cluster deployment (coordinator/dbserver).
|
// of a cluster deployment (coordinator/dbserver).
|
||||||
func (a *actionWaitForMemberUp) checkProgressCluster() (bool, bool, error) {
|
func (a *actionWaitForMemberUp) checkProgressCluster() (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
h := a.actionCtx.GetMembersState().Health()
|
h := a.actionCtx.GetMembersState().Health()
|
||||||
if h.Error != nil {
|
if h.Error != nil {
|
||||||
log.Debug().Err(h.Error).Msg("Cluster health is missing")
|
a.log.Err(h.Error).Debug("Cluster health is missing")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
sh, found := h.Members[driver.ServerID(a.action.MemberID)]
|
sh, found := h.Members[driver.ServerID(a.action.MemberID)]
|
||||||
if !found {
|
if !found {
|
||||||
log.Debug().Msg("Member not yet found in cluster health")
|
a.log.Debug("Member not yet found in cluster health")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if sh.Status != driver.ServerStatusGood {
|
if sh.Status != driver.ServerStatusGood {
|
||||||
log.Debug().Str("status", string(sh.Status)).Msg("Member set status not yet good")
|
a.log.Str("status", string(sh.Status)).Debug("Member set status not yet good")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
// Wait for the member to become ready from a kubernetes point of view
|
// Wait for the member to become ready from a kubernetes point of view
|
||||||
// otherwise the coordinators may be rotated to fast and thus none of them
|
// otherwise the coordinators may be rotated to fast and thus none of them
|
||||||
// is ready resulting in a short downtime
|
// is ready resulting in a short downtime
|
||||||
if m, found := a.actionCtx.GetMemberStatusByID(a.MemberID()); !found {
|
if m, found := a.actionCtx.GetMemberStatusByID(a.MemberID()); !found {
|
||||||
log.Error().Msg("No such member")
|
a.log.Error("No such member")
|
||||||
return false, true, nil
|
return false, true, nil
|
||||||
} else if !m.Conditions.IsTrue(api.ConditionTypeReady) {
|
} else if !m.Conditions.IsTrue(api.ConditionTypeReady) {
|
||||||
log.Debug().Msg("Member not yet ready")
|
a.log.Debug("Member not yet ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -195,14 +188,13 @@ func (a *actionWaitForMemberUp) checkProgressCluster() (bool, bool, error) {
|
||||||
// checkProgressArangoSync checks the progress of the action in the case
|
// checkProgressArangoSync checks the progress of the action in the case
|
||||||
// of a sync master / worker.
|
// of a sync master / worker.
|
||||||
func (a *actionWaitForMemberUp) checkProgressArangoSync(ctx context.Context) (bool, bool, error) {
|
func (a *actionWaitForMemberUp) checkProgressArangoSync(ctx context.Context) (bool, bool, error) {
|
||||||
log := a.log
|
|
||||||
c, err := a.actionCtx.GetSyncServerClient(ctx, a.action.Group, a.action.MemberID)
|
c, err := a.actionCtx.GetSyncServerClient(ctx, a.action.Group, a.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create arangosync client")
|
a.log.Err(err).Debug("Failed to create arangosync client")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
if err := c.Health(ctx); err != nil {
|
if err := c.Health(ctx); err != nil {
|
||||||
log.Debug().Err(err).Msg("Health not ok yet")
|
a.log.Err(err).Debug("Health not ok yet")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
core "k8s.io/api/core/v1"
|
core "k8s.io/api/core/v1"
|
||||||
"k8s.io/apimachinery/pkg/api/resource"
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
|
||||||
|
@ -35,15 +34,15 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func createMemberRecreationConditionsPlan(ctx context.Context,
|
func (r *Reconciler) createMemberRecreationConditionsPlan(ctx context.Context,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
apiObject k8sutil.APIObject,
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext) api.Plan {
|
context PlanBuilderContext) api.Plan {
|
||||||
var p api.Plan
|
var p api.Plan
|
||||||
|
|
||||||
for _, m := range status.Members.AsList() {
|
for _, m := range status.Members.AsList() {
|
||||||
message, recreate := EvaluateMemberRecreationCondition(ctx, log, apiObject, spec, status, m.Group, m.Member,
|
message, recreate := EvaluateMemberRecreationCondition(ctx, apiObject, spec, status, m.Group, m.Member,
|
||||||
context, isStorageClassChanged, isVolumeSizeChanged)
|
context, r.isStorageClassChanged, r.isVolumeSizeChanged)
|
||||||
|
|
||||||
if !recreate {
|
if !recreate {
|
||||||
if _, ok := m.Member.Conditions.Get(api.MemberReplacementRequired); ok {
|
if _, ok := m.Member.Conditions.Get(api.MemberReplacementRequired); ok {
|
||||||
|
@ -62,20 +61,20 @@ func createMemberRecreationConditionsPlan(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
type MemberRecreationConditionEvaluator func(ctx context.Context,
|
type MemberRecreationConditionEvaluator func(ctx context.Context,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
apiObject k8sutil.APIObject,
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
group api.ServerGroup, member api.MemberStatus,
|
group api.ServerGroup, member api.MemberStatus,
|
||||||
context PlanBuilderContext) (bool, string, error)
|
context PlanBuilderContext) (bool, string, error)
|
||||||
|
|
||||||
func EvaluateMemberRecreationCondition(ctx context.Context,
|
func EvaluateMemberRecreationCondition(ctx context.Context,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
apiObject k8sutil.APIObject,
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
group api.ServerGroup, member api.MemberStatus,
|
group api.ServerGroup, member api.MemberStatus,
|
||||||
context PlanBuilderContext, evaluators ...MemberRecreationConditionEvaluator) (string, bool) {
|
context PlanBuilderContext, evaluators ...MemberRecreationConditionEvaluator) (string, bool) {
|
||||||
args := make([]string, 0, len(evaluators))
|
args := make([]string, 0, len(evaluators))
|
||||||
|
|
||||||
for _, e := range evaluators {
|
for _, e := range evaluators {
|
||||||
ok, s, err := e(ctx, log, apiObject, spec, status, group, member, context)
|
ok, s, err := e(ctx, apiObject, spec, status, group, member, context)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
// When one of an evaluator requires pod's replacement then it should be done.
|
// When one of an evaluator requires pod's replacement then it should be done.
|
||||||
continue
|
continue
|
||||||
|
@ -90,7 +89,7 @@ func EvaluateMemberRecreationCondition(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
// isStorageClassChanged returns true and reason when the member should be replaced.
|
// isStorageClassChanged returns true and reason when the member should be replaced.
|
||||||
func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec,
|
func (r *Reconciler) isStorageClassChanged(_ context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec,
|
||||||
_ api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus,
|
_ api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus,
|
||||||
context PlanBuilderContext) (bool, string, error) {
|
context PlanBuilderContext) (bool, string, error) {
|
||||||
if spec.GetMode() == api.DeploymentModeSingle {
|
if spec.GetMode() == api.DeploymentModeSingle {
|
||||||
|
@ -122,7 +121,7 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su
|
||||||
|
|
||||||
// Check if a storage class changed.
|
// Check if a storage class changed.
|
||||||
if pvc, ok := cache.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName); !ok {
|
if pvc, ok := cache.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName); !ok {
|
||||||
log.Warn().Str("role", group.AsRole()).Str("id", member.ID).Msg("Failed to get PVC")
|
r.log.Str("role", group.AsRole()).Str("id", member.ID).Warn("Failed to get PVC")
|
||||||
return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName)
|
return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName)
|
||||||
} else {
|
} else {
|
||||||
pvcClassName := util.StringOrDefault(pvc.Spec.StorageClassName)
|
pvcClassName := util.StringOrDefault(pvc.Spec.StorageClassName)
|
||||||
|
@ -147,10 +146,10 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su
|
||||||
// If pod does not exist then it will try next time.
|
// If pod does not exist then it will try next time.
|
||||||
if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok {
|
if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok {
|
||||||
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok {
|
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok {
|
||||||
log.Warn().
|
r.log.
|
||||||
Str("pod-name", member.PodName).
|
Str("pod-name", member.PodName).
|
||||||
Str("server-group", group.AsRole()).
|
Str("server-group", group.AsRole()).
|
||||||
Msgf("try changing a storage class name, but %s", getRequiredReplaceMessage(member.PodName))
|
Warn("try changing a storage class name, but %s", getRequiredReplaceMessage(member.PodName))
|
||||||
// No return here.
|
// No return here.
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -161,7 +160,7 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su
|
||||||
}
|
}
|
||||||
|
|
||||||
// isVolumeSizeChanged returns true and reason when the member should be replaced.
|
// isVolumeSizeChanged returns true and reason when the member should be replaced.
|
||||||
func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObject, spec api.DeploymentSpec,
|
func (r *Reconciler) isVolumeSizeChanged(_ context.Context, _ k8sutil.APIObject, spec api.DeploymentSpec,
|
||||||
_ api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus,
|
_ api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus,
|
||||||
context PlanBuilderContext) (bool, string, error) {
|
context PlanBuilderContext) (bool, string, error) {
|
||||||
if spec.GetMode() == api.DeploymentModeSingle {
|
if spec.GetMode() == api.DeploymentModeSingle {
|
||||||
|
@ -186,10 +185,10 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj
|
||||||
|
|
||||||
pvc, ok := cache.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName)
|
pvc, ok := cache.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().
|
r.log.
|
||||||
Str("role", group.AsRole()).
|
Str("role", group.AsRole()).
|
||||||
Str("id", member.ID).
|
Str("id", member.ID).
|
||||||
Msg("Failed to get PVC")
|
Warn("Failed to get PVC")
|
||||||
|
|
||||||
return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName)
|
return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName)
|
||||||
}
|
}
|
||||||
|
@ -201,10 +200,10 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj
|
||||||
}
|
}
|
||||||
|
|
||||||
if group != api.ServerGroupDBServers {
|
if group != api.ServerGroupDBServers {
|
||||||
log.Error().
|
r.log.
|
||||||
Str("pvc-storage-size", volumeSize.String()).
|
Str("pvc-storage-size", volumeSize.String()).
|
||||||
Str("requested-size", requestedSize.String()).
|
Str("requested-size", requestedSize.String()).
|
||||||
Msgf("Volume size should not shrink, because it is not possible for \"%s\"", group.AsRole())
|
Warn("Volume size should not shrink, because it is not possible for \"%s\"", group.AsRole())
|
||||||
|
|
||||||
return false, "", nil
|
return false, "", nil
|
||||||
}
|
}
|
||||||
|
@ -213,8 +212,8 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj
|
||||||
// If pod does not exist then it will try next time.
|
// If pod does not exist then it will try next time.
|
||||||
if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok {
|
if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok {
|
||||||
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok {
|
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok {
|
||||||
log.Warn().Str("pod-name", member.PodName).
|
r.log.Str("pod-name", member.PodName).
|
||||||
Msgf("try shrinking volume size, but %s", getRequiredReplaceMessage(member.PodName))
|
Warn("try shrinking volume size, but %s", getRequiredReplaceMessage(member.PodName))
|
||||||
// No return here.
|
// No return here.
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -25,7 +25,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
@ -40,68 +39,64 @@ import (
|
||||||
// Returns true when member status exists.
|
// Returns true when member status exists.
|
||||||
// There are 3 possibilities to shut down the pod: immediately, gracefully, standard kubernetes delete API.
|
// There are 3 possibilities to shut down the pod: immediately, gracefully, standard kubernetes delete API.
|
||||||
// When pod does not exist then success action (which always successes) is returned.
|
// When pod does not exist then success action (which always successes) is returned.
|
||||||
func getShutdownHelper(a *api.Action, actionCtx ActionContext, log zerolog.Logger) (ActionCore, api.MemberStatus, bool) {
|
func getShutdownHelper(a actionImpl) (ActionCore, api.MemberStatus, bool) {
|
||||||
m, ok := actionCtx.GetMemberStatusByID(a.MemberID)
|
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Str("pod-name", m.PodName).Msg("member is already gone")
|
a.log.Str("pod-name", m.PodName).Warn("member is already gone")
|
||||||
|
|
||||||
return nil, api.MemberStatus{}, false
|
return nil, api.MemberStatus{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, ok := actionCtx.ACS().ClusterCache(m.ClusterID)
|
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Str("pod-name", m.PodName).Msg("Cluster is not ready")
|
a.log.Str("pod-name", m.PodName).Warn("Cluster is not ready")
|
||||||
|
|
||||||
return nil, api.MemberStatus{}, false
|
return nil, api.MemberStatus{}, false
|
||||||
}
|
}
|
||||||
|
|
||||||
if ifPodUIDMismatch(m, *a, cache) {
|
if ifPodUIDMismatch(m, a.action, cache) {
|
||||||
log.Error().Msg("Member UID is changed")
|
a.log.Error("Member UID is changed")
|
||||||
return NewActionSuccess(), m, true
|
return NewActionSuccess(), m, true
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Str("pod-name", m.PodName).Msg("pod is already gone")
|
a.log.Str("pod-name", m.PodName).Warn("pod is already gone")
|
||||||
// Pod does not exist, so create success action to finish it immediately.
|
// Pod does not exist, so create success action to finish it immediately.
|
||||||
return NewActionSuccess(), m, true
|
return NewActionSuccess(), m, true
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodDeleteNow]; ok {
|
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodDeleteNow]; ok {
|
||||||
// The pod contains annotation, so pod must be deleted immediately.
|
// The pod contains annotation, so pod must be deleted immediately.
|
||||||
return shutdownNow{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true
|
return shutdownNow{actionImpl: a, memberStatus: m}, m, true
|
||||||
}
|
}
|
||||||
|
|
||||||
if features.GracefulShutdown().Enabled() {
|
if features.GracefulShutdown().Enabled() {
|
||||||
return shutdownHelperAPI{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true
|
return shutdownHelperAPI{actionImpl: a, memberStatus: m}, m, true
|
||||||
}
|
}
|
||||||
|
|
||||||
serverGroup := actionCtx.GetSpec().GetServerGroupSpec(a.Group)
|
serverGroup := a.actionCtx.GetSpec().GetServerGroupSpec(a.action.Group)
|
||||||
|
|
||||||
switch serverGroup.ShutdownMethod.Get() {
|
switch serverGroup.ShutdownMethod.Get() {
|
||||||
case api.ServerGroupShutdownMethodDelete:
|
case api.ServerGroupShutdownMethodDelete:
|
||||||
return shutdownHelperDelete{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true
|
return shutdownHelperDelete{actionImpl: a, memberStatus: m}, m, true
|
||||||
default:
|
default:
|
||||||
return shutdownHelperAPI{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true
|
return shutdownHelperAPI{actionImpl: a, memberStatus: m}, m, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
type shutdownHelperAPI struct {
|
type shutdownHelperAPI struct {
|
||||||
log zerolog.Logger
|
actionImpl
|
||||||
action *api.Action
|
|
||||||
actionCtx ActionContext
|
|
||||||
memberStatus api.MemberStatus
|
memberStatus api.MemberStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) {
|
func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) {
|
||||||
log := s.log
|
s.log.Info("Using API to shutdown member")
|
||||||
|
|
||||||
log.Info().Msgf("Using API to shutdown member")
|
|
||||||
|
|
||||||
group := s.action.Group
|
group := s.action.Group
|
||||||
podName := s.memberStatus.PodName
|
podName := s.memberStatus.PodName
|
||||||
if podName == "" {
|
if podName == "" {
|
||||||
log.Warn().Msgf("Pod is empty")
|
s.log.Warn("Pod is empty")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -131,11 +126,11 @@ func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) {
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := s.actionCtx.GetServerClient(ctxChild, group, s.action.MemberID)
|
c, err := s.actionCtx.GetServerClient(ctxChild, group, s.action.MemberID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Debug().Err(err).Msg("Failed to create member client")
|
s.log.Err(err).Debug("Failed to create member client")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
removeFromCluster := false
|
removeFromCluster := false
|
||||||
log.Debug().Bool("removeFromCluster", removeFromCluster).Msg("Shutting down member")
|
s.log.Bool("removeFromCluster", removeFromCluster).Debug("Shutting down member")
|
||||||
ctxChild, cancel = context.WithTimeout(ctx, shutdownTimeout)
|
ctxChild, cancel = context.WithTimeout(ctx, shutdownTimeout)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
if err := c.ShutdownV2(ctxChild, removeFromCluster, true); err != nil {
|
if err := c.ShutdownV2(ctxChild, removeFromCluster, true); err != nil {
|
||||||
|
@ -144,7 +139,7 @@ func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) {
|
||||||
// We're done
|
// We're done
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
log.Debug().Err(err).Msg("Failed to shutdown member")
|
s.log.Err(err).Debug("Failed to shutdown member")
|
||||||
return false, errors.WithStack(err)
|
return false, errors.WithStack(err)
|
||||||
}
|
}
|
||||||
} else if group.IsArangosync() {
|
} else if group.IsArangosync() {
|
||||||
|
@ -164,20 +159,16 @@ func (s shutdownHelperAPI) CheckProgress(_ context.Context) (bool, bool, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
type shutdownHelperDelete struct {
|
type shutdownHelperDelete struct {
|
||||||
log zerolog.Logger
|
actionImpl
|
||||||
action *api.Action
|
|
||||||
actionCtx ActionContext
|
|
||||||
memberStatus api.MemberStatus
|
memberStatus api.MemberStatus
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s shutdownHelperDelete) Start(ctx context.Context) (bool, error) {
|
func (s shutdownHelperDelete) Start(ctx context.Context) (bool, error) {
|
||||||
log := s.log
|
s.log.Info("Using Pod Delete to shutdown member")
|
||||||
|
|
||||||
log.Info().Msgf("Using Pod Delete to shutdown member")
|
|
||||||
|
|
||||||
podName := s.memberStatus.PodName
|
podName := s.memberStatus.PodName
|
||||||
if podName == "" {
|
if podName == "" {
|
||||||
log.Warn().Msgf("Pod is empty")
|
s.log.Warn("Pod is empty")
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -198,23 +189,22 @@ func (s shutdownHelperDelete) Start(ctx context.Context) (bool, error) {
|
||||||
|
|
||||||
func (s shutdownHelperDelete) CheckProgress(ctx context.Context) (bool, bool, error) {
|
func (s shutdownHelperDelete) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
// Check that pod is removed
|
// Check that pod is removed
|
||||||
log := s.log
|
|
||||||
if !s.memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
if !s.memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) {
|
||||||
// Pod is not yet terminated
|
// Pod is not yet terminated
|
||||||
log.Warn().Msgf("Pod not yet terminated")
|
s.log.Warn("Pod not yet terminated")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID)
|
cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
log.Warn().Msg("Cluster is not ready")
|
s.log.Warn("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
podName := s.memberStatus.PodName
|
podName := s.memberStatus.PodName
|
||||||
if podName != "" {
|
if podName != "" {
|
||||||
if _, ok := cache.Pod().V1().GetSimple(podName); ok {
|
if _, ok := cache.Pod().V1().GetSimple(podName); ok {
|
||||||
log.Warn().Msgf("Pod still exists")
|
s.log.Warn("Pod still exists")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -223,17 +213,15 @@ func (s shutdownHelperDelete) CheckProgress(ctx context.Context) (bool, bool, er
|
||||||
}
|
}
|
||||||
|
|
||||||
type shutdownNow struct {
|
type shutdownNow struct {
|
||||||
action *api.Action
|
actionImpl
|
||||||
actionCtx ActionContext
|
|
||||||
memberStatus api.MemberStatus
|
memberStatus api.MemberStatus
|
||||||
log zerolog.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts removing pod forcefully.
|
// Start starts removing pod forcefully.
|
||||||
func (s shutdownNow) Start(ctx context.Context) (bool, error) {
|
func (s shutdownNow) Start(ctx context.Context) (bool, error) {
|
||||||
// Check progress is used here because removing pod can start gracefully,
|
// Check progress is used here because removing pod can start gracefully,
|
||||||
// and then it can be changed to force shutdown.
|
// and then it can be changed to force shutdown.
|
||||||
s.log.Info().Msg("Using shutdown now method")
|
s.log.Info("Using shutdown now method")
|
||||||
ready, _, err := s.CheckProgress(ctx)
|
ready, _, err := s.CheckProgress(ctx)
|
||||||
return ready, err
|
return ready, err
|
||||||
}
|
}
|
||||||
|
@ -244,18 +232,18 @@ func (s shutdownNow) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
|
|
||||||
cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID)
|
cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID)
|
||||||
if !ok {
|
if !ok {
|
||||||
s.log.Warn().Msg("Cluster is not ready")
|
s.log.Warn("Cluster is not ready")
|
||||||
return false, false, nil
|
return false, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
pod, ok := cache.Pod().V1().GetSimple(podName)
|
pod, ok := cache.Pod().V1().GetSimple(podName)
|
||||||
if !ok {
|
if !ok {
|
||||||
s.log.Info().Msg("Using shutdown now method completed because pod is gone")
|
s.log.Info("Using shutdown now method completed because pod is gone")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if s.memberStatus.PodUID != pod.GetUID() {
|
if s.memberStatus.PodUID != pod.GetUID() {
|
||||||
s.log.Info().Msg("Using shutdown now method completed because it is already rotated")
|
s.log.Info("Using shutdown now method completed because it is already rotated")
|
||||||
// The new pod has been started already.
|
// The new pod has been started already.
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
@ -283,6 +271,6 @@ func (s shutdownNow) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
s.log.Info().Msgf("Using shutdown now method completed")
|
s.log.Info("Using shutdown now method completed")
|
||||||
return true, false, nil
|
return true, false, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,8 +24,6 @@ import (
|
||||||
"context"
|
"context"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
|
|
||||||
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -41,6 +39,6 @@ const (
|
||||||
// CreatePlan considers the current specification & status of the deployment creates a plan to
|
// CreatePlan considers the current specification & status of the deployment creates a plan to
|
||||||
// get the status in line with the specification.
|
// get the status in line with the specification.
|
||||||
// If a plan already exists, nothing is done.
|
// If a plan already exists, nothing is done.
|
||||||
func (d *Reconciler) CreatePlan(ctx context.Context, cachedStatus inspectorInterface.Inspector) (error, bool) {
|
func (d *Reconciler) CreatePlan(ctx context.Context) (error, bool) {
|
||||||
return d.generatePlan(ctx, d.generatePlanFunc(createHighPlan, plannerHigh{}), d.generatePlanFunc(createResourcesPlan, plannerResources{}), d.generatePlanFunc(createNormalPlan, plannerNormal{}))
|
return d.generatePlan(ctx, d.generatePlanFunc(d.createHighPlan, plannerHigh{}), d.generatePlanFunc(d.createResourcesPlan, plannerResources{}), d.generatePlanFunc(d.createNormalPlan, plannerNormal{}))
|
||||||
}
|
}
|
||||||
|
|
|
@ -24,7 +24,7 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/rs/zerolog"
|
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newPlanAppender(pb WithPlanBuilder, backoff api.BackOff, current api.Plan) PlanAppender {
|
func newPlanAppender(pb WithPlanBuilder, backoff api.BackOff, current api.Plan) PlanAppender {
|
||||||
|
@ -35,7 +35,7 @@ func newPlanAppender(pb WithPlanBuilder, backoff api.BackOff, current api.Plan)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func recoverPlanAppender(log zerolog.Logger, p PlanAppender) PlanAppender {
|
func recoverPlanAppender(log logging.Logger, p PlanAppender) PlanAppender {
|
||||||
return planAppenderRecovery{
|
return planAppenderRecovery{
|
||||||
appender: p,
|
appender: p,
|
||||||
log: log,
|
log: log,
|
||||||
|
@ -60,8 +60,8 @@ type PlanAppender interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type planAppenderRecovery struct {
|
type planAppenderRecovery struct {
|
||||||
|
log logging.Logger
|
||||||
appender PlanAppender
|
appender PlanAppender
|
||||||
log zerolog.Logger
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p planAppenderRecovery) BackOff() api.BackOff {
|
func (p planAppenderRecovery) BackOff() api.BackOff {
|
||||||
|
@ -84,7 +84,7 @@ func (p planAppenderRecovery) create(ret func(in PlanAppender) PlanAppender) (r
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
if e := recover(); e != nil {
|
||||||
r = p
|
r = p
|
||||||
p.log.Error().Interface("panic", e).Msgf("Recovering from panic")
|
p.log.Interface("panic", e).Error("Recovering from panic")
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -26,42 +26,40 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
"github.com/rs/zerolog/log"
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
|
||||||
func Test_PlanBuilderAppender_Recovery(t *testing.T) {
|
func Test_PlanBuilderAppender_Recovery(t *testing.T) {
|
||||||
t.Run("Recover", func(t *testing.T) {
|
t.Run("Recover", func(t *testing.T) {
|
||||||
require.Len(t, recoverPlanAppender(log.Logger, newPlanAppender(NewWithPlanBuilder(context.Background(), zerolog.Logger{}, nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)).
|
require.Len(t, recoverPlanAppender(testLogger, newPlanAppender(NewWithPlanBuilder(context.Background(), nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)).
|
||||||
Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
panic("")
|
panic("")
|
||||||
}).
|
}).
|
||||||
Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
panic("SomePanic")
|
panic("SomePanic")
|
||||||
}).Plan(), 0)
|
}).Plan(), 0)
|
||||||
})
|
})
|
||||||
t.Run("Recover with output", func(t *testing.T) {
|
t.Run("Recover with output", func(t *testing.T) {
|
||||||
require.Len(t, recoverPlanAppender(log.Logger, newPlanAppender(NewWithPlanBuilder(context.Background(), zerolog.Logger{}, nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)).
|
require.Len(t, recoverPlanAppender(testLogger, newPlanAppender(NewWithPlanBuilder(context.Background(), nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)).
|
||||||
Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
return api.Plan{api.Action{}}
|
return api.Plan{api.Action{}}
|
||||||
}).
|
}).
|
||||||
ApplyIfEmpty(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
ApplyIfEmpty(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
panic("SomePanic")
|
panic("SomePanic")
|
||||||
}).
|
}).
|
||||||
ApplyIfEmpty(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
ApplyIfEmpty(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
return api.Plan{api.Action{}, api.Action{}}
|
return api.Plan{api.Action{}, api.Action{}}
|
||||||
}).Plan(), 1)
|
}).Plan(), 1)
|
||||||
})
|
})
|
||||||
t.Run("Recover with multi", func(t *testing.T) {
|
t.Run("Recover with multi", func(t *testing.T) {
|
||||||
require.Len(t, recoverPlanAppender(log.Logger, newPlanAppender(NewWithPlanBuilder(context.Background(), zerolog.Logger{}, nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)).
|
require.Len(t, recoverPlanAppender(testLogger, newPlanAppender(NewWithPlanBuilder(context.Background(), nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)).
|
||||||
Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
return api.Plan{api.Action{}}
|
return api.Plan{api.Action{}}
|
||||||
}).
|
}).
|
||||||
ApplyIfEmpty(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
ApplyIfEmpty(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
panic("SomePanic")
|
panic("SomePanic")
|
||||||
}).
|
}).
|
||||||
Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan {
|
||||||
return api.Plan{api.Action{}, api.Action{}}
|
return api.Plan{api.Action{}, api.Action{}}
|
||||||
}).Plan(), 3)
|
}).Plan(), 3)
|
||||||
})
|
})
|
||||||
|
|
|
@ -26,11 +26,9 @@ import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func createBootstrapPlan(ctx context.Context,
|
func (r *Reconciler) createBootstrapPlan(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext) api.Plan {
|
context PlanBuilderContext) api.Plan {
|
||||||
if !status.Conditions.IsTrue(api.ConditionTypeReady) {
|
if !status.Conditions.IsTrue(api.ConditionTypeReady) {
|
||||||
|
|
|
@ -26,8 +26,6 @@ import (
|
||||||
|
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
|
||||||
|
|
||||||
"github.com/arangodb/go-driver"
|
"github.com/arangodb/go-driver"
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||||
|
@ -35,7 +33,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
// createCleanOutPlan creates clean out action if the server is cleaned out and the operator is not aware of it.
|
// createCleanOutPlan creates clean out action if the server is cleaned out and the operator is not aware of it.
|
||||||
func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIObject, spec api.DeploymentSpec,
|
func (r *Reconciler) createCleanOutPlan(ctx context.Context, _ k8sutil.APIObject, spec api.DeploymentSpec,
|
||||||
status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan {
|
status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan {
|
||||||
|
|
||||||
if spec.GetMode() != api.DeploymentModeCluster {
|
if spec.GetMode() != api.DeploymentModeCluster {
|
||||||
|
@ -49,7 +47,7 @@ func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIOb
|
||||||
|
|
||||||
cluster, err := getCluster(ctx, planCtx)
|
cluster, err := getCluster(ctx, planCtx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn().Err(err).Msgf("Unable to get cluster")
|
r.log.Err(err).Warn("Unable to get cluster")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -57,7 +55,7 @@ func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIOb
|
||||||
defer cancel()
|
defer cancel()
|
||||||
health, err := cluster.Health(ctxChild)
|
health, err := cluster.Health(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn().Err(err).Msgf("Unable to get cluster health")
|
r.log.Err(err).Warn("Unable to get cluster health")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -76,13 +74,13 @@ func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIOb
|
||||||
}
|
}
|
||||||
|
|
||||||
if isCleanedOut, err := cluster.IsCleanedOut(ctx, string(id)); err != nil {
|
if isCleanedOut, err := cluster.IsCleanedOut(ctx, string(id)); err != nil {
|
||||||
log.Warn().Err(err).Str("id", string(id)).Msgf("Unable to get clean out status")
|
r.log.Err(err).Str("id", string(id)).Warn("Unable to get clean out status")
|
||||||
return nil
|
return nil
|
||||||
} else if isCleanedOut {
|
} else if isCleanedOut {
|
||||||
log.Info().
|
r.log.
|
||||||
Str("role", string(member.Role)).
|
Str("role", string(member.Role)).
|
||||||
Str("id", string(id)).
|
Str("id", string(id)).
|
||||||
Msgf("server is cleaned out so operator must do the same")
|
Info("server is cleaned out so operator must do the same")
|
||||||
|
|
||||||
action := actions.NewAction(api.ActionTypeSetMemberCondition, api.ServerGroupDBServers, withPredefinedMember(string(id)),
|
action := actions.NewAction(api.ActionTypeSetMemberCondition, api.ServerGroupDBServers, withPredefinedMember(string(id)),
|
||||||
"server is cleaned out so operator must do the same").
|
"server is cleaned out so operator must do the same").
|
||||||
|
|
|
@ -30,13 +30,11 @@ import (
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const coordinatorHealthFailedTimeout time.Duration = time.Minute
|
const coordinatorHealthFailedTimeout time.Duration = time.Minute
|
||||||
|
|
||||||
func createClusterOperationPlan(ctx context.Context,
|
func (r *Reconciler) createClusterOperationPlan(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
planCtx PlanBuilderContext) api.Plan {
|
planCtx PlanBuilderContext) api.Plan {
|
||||||
|
|
||||||
|
@ -55,7 +53,7 @@ func createClusterOperationPlan(ctx context.Context,
|
||||||
defer cancel()
|
defer cancel()
|
||||||
cluster, err := c.Cluster(ctxChild)
|
cluster, err := c.Cluster(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn().Err(err).Msgf("Unable to get Cluster client")
|
r.log.Err(err).Warn("Unable to get Cluster client")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -63,7 +61,7 @@ func createClusterOperationPlan(ctx context.Context,
|
||||||
defer cancel()
|
defer cancel()
|
||||||
health, err := cluster.Health(ctxChild)
|
health, err := cluster.Health(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Warn().Err(err).Msgf("Unable to get Cluster health")
|
r.log.Err(err).Warn("Unable to get Cluster health")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -29,7 +29,6 @@ import (
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -38,8 +37,7 @@ var (
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func cleanupConditions(ctx context.Context,
|
func (r *Reconciler) cleanupConditions(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
planCtx PlanBuilderContext) api.Plan {
|
planCtx PlanBuilderContext) api.Plan {
|
||||||
var p api.Plan
|
var p api.Plan
|
||||||
|
@ -53,8 +51,7 @@ func cleanupConditions(ctx context.Context,
|
||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
func createMaintenanceManagementPlan(ctx context.Context,
|
func (r *Reconciler) createMaintenanceManagementPlan(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
planCtx PlanBuilderContext) api.Plan {
|
planCtx PlanBuilderContext) api.Plan {
|
||||||
if spec.Mode.Get() == api.DeploymentModeSingle {
|
if spec.Mode.Get() == api.DeploymentModeSingle {
|
||||||
|
@ -68,12 +65,12 @@ func createMaintenanceManagementPlan(ctx context.Context,
|
||||||
|
|
||||||
agencyState, agencyOK := planCtx.GetAgencyCache()
|
agencyState, agencyOK := planCtx.GetAgencyCache()
|
||||||
if !agencyOK {
|
if !agencyOK {
|
||||||
log.Error().Msgf("Unable to get agency mode")
|
r.log.Error("Unable to get agency mode")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if agencyState.Target.HotBackup.Create.Exists() {
|
if agencyState.Target.HotBackup.Create.Exists() {
|
||||||
log.Info().Msgf("HotBackup in progress")
|
r.log.Info("HotBackup in progress")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -82,7 +79,7 @@ func createMaintenanceManagementPlan(ctx context.Context,
|
||||||
|
|
||||||
if (cok && c.IsTrue()) != enabled {
|
if (cok && c.IsTrue()) != enabled {
|
||||||
// Condition not yet propagated
|
// Condition not yet propagated
|
||||||
log.Info().Msgf("Condition not yet propagated")
|
r.log.Info("Condition not yet propagated")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -96,12 +93,12 @@ func createMaintenanceManagementPlan(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
if !enabled && spec.Database.GetMaintenance() {
|
if !enabled && spec.Database.GetMaintenance() {
|
||||||
log.Info().Msgf("Enabling maintenance mode")
|
r.log.Info("Enabling maintenance mode")
|
||||||
return api.Plan{actions.NewClusterAction(api.ActionTypeEnableMaintenance)}
|
return api.Plan{actions.NewClusterAction(api.ActionTypeEnableMaintenance)}
|
||||||
}
|
}
|
||||||
|
|
||||||
if enabled && !spec.Database.GetMaintenance() {
|
if enabled && !spec.Database.GetMaintenance() {
|
||||||
log.Info().Msgf("Disabling maintenance mode")
|
r.log.Info("Disabling maintenance mode")
|
||||||
return api.Plan{actions.NewClusterAction(api.ActionTypeDisableMaintenance)}
|
return api.Plan{actions.NewClusterAction(api.ActionTypeDisableMaintenance)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -36,7 +36,6 @@ import (
|
||||||
|
|
||||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||||
"github.com/rs/zerolog"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func skipEncryptionPlan(spec api.DeploymentSpec, status api.DeploymentStatus) bool {
|
func skipEncryptionPlan(spec api.DeploymentSpec, status api.DeploymentStatus) bool {
|
||||||
|
@ -51,8 +50,7 @@ func skipEncryptionPlan(spec api.DeploymentSpec, status api.DeploymentStatus) bo
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func createEncryptionKeyStatusPropagatedFieldUpdate(ctx context.Context,
|
func (r *Reconciler) createEncryptionKeyStatusPropagatedFieldUpdate(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext, w WithPlanBuilder, builders ...planBuilder) api.Plan {
|
context PlanBuilderContext, w WithPlanBuilder, builders ...planBuilder) api.Plan {
|
||||||
if skipEncryptionPlan(spec, status) {
|
if skipEncryptionPlan(spec, status) {
|
||||||
|
@ -88,8 +86,7 @@ func createEncryptionKeyStatusPropagatedFieldUpdate(ctx context.Context,
|
||||||
return plan
|
return plan
|
||||||
}
|
}
|
||||||
|
|
||||||
func createEncryptionKey(ctx context.Context,
|
func (r *Reconciler) createEncryptionKey(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext) api.Plan {
|
context PlanBuilderContext) api.Plan {
|
||||||
if skipEncryptionPlan(spec, status) {
|
if skipEncryptionPlan(spec, status) {
|
||||||
|
@ -103,7 +100,7 @@ func createEncryptionKey(ctx context.Context,
|
||||||
|
|
||||||
name, _, err := pod.GetEncryptionKeyFromSecret(secret)
|
name, _, err := pod.GetEncryptionKeyFromSecret(secret)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Error().Err(err).Msgf("Unable to fetch encryption key")
|
r.log.Err(err).Error("Unable to fetch encryption key")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -113,7 +110,7 @@ func createEncryptionKey(ctx context.Context,
|
||||||
|
|
||||||
keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName()))
|
keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName()))
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Error().Msgf("Encryption key folder does not exist")
|
r.log.Error("Encryption key folder does not exist")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -128,7 +125,7 @@ func createEncryptionKey(ctx context.Context,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
plan, failed := areEncryptionKeysUpToDate(ctx, log, spec, status, context, keyfolder)
|
plan, failed := r.areEncryptionKeysUpToDate(ctx, spec, status, context, keyfolder)
|
||||||
if !plan.IsEmpty() {
|
if !plan.IsEmpty() {
|
||||||
return plan
|
return plan
|
||||||
}
|
}
|
||||||
|
@ -142,15 +139,14 @@ func createEncryptionKey(ctx context.Context,
|
||||||
return api.Plan{}
|
return api.Plan{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createEncryptionKeyStatusUpdate(ctx context.Context,
|
func (r *Reconciler) createEncryptionKeyStatusUpdate(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext) api.Plan {
|
context PlanBuilderContext) api.Plan {
|
||||||
if skipEncryptionPlan(spec, status) {
|
if skipEncryptionPlan(spec, status) {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if createEncryptionKeyStatusUpdateRequired(log, spec, status, context) {
|
if r.createEncryptionKeyStatusUpdateRequired(spec, status, context) {
|
||||||
return api.Plan{actions.NewClusterAction(api.ActionTypeEncryptionKeyStatusUpdate)}
|
return api.Plan{actions.NewClusterAction(api.ActionTypeEncryptionKeyStatusUpdate)}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -158,7 +154,7 @@ func createEncryptionKeyStatusUpdate(ctx context.Context,
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus,
|
func (r *Reconciler) createEncryptionKeyStatusUpdateRequired(spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext) bool {
|
context PlanBuilderContext) bool {
|
||||||
if skipEncryptionPlan(spec, status) {
|
if skipEncryptionPlan(spec, status) {
|
||||||
return false
|
return false
|
||||||
|
@ -166,7 +162,7 @@ func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.Deploy
|
||||||
|
|
||||||
keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName()))
|
keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName()))
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Error().Msgf("Encryption key folder does not exist")
|
r.log.Error("Encryption key folder does not exist")
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -175,8 +171,7 @@ func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.Deploy
|
||||||
return !util.CompareStringArray(keyHashes, status.Hashes.Encryption.Keys)
|
return !util.CompareStringArray(keyHashes, status.Hashes.Encryption.Keys)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createEncryptionKeyCleanPlan(ctx context.Context,
|
func (r *Reconciler) createEncryptionKeyCleanPlan(ctx context.Context, apiObject k8sutil.APIObject,
|
||||||
log zerolog.Logger, apiObject k8sutil.APIObject,
|
|
||||||
spec api.DeploymentSpec, status api.DeploymentStatus,
|
spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||||
context PlanBuilderContext) api.Plan {
|
context PlanBuilderContext) api.Plan {
|
||||||
if skipEncryptionPlan(spec, status) {
|
if skipEncryptionPlan(spec, status) {
|
||||||
|
@ -185,7 +180,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context,
|
||||||
|
|
||||||
keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName()))
|
keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName()))
|
||||||
if !exists {
|
if !exists {
|
||||||
log.Error().Msgf("Encryption key folder does not exist")
|
r.log.Error("Encryption key folder does not exist")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -214,7 +209,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context,
|
||||||
}
|
}
|
||||||
|
|
||||||
if _, ok := keyfolder.Data[name]; !ok {
|
if _, ok := keyfolder.Data[name]; !ok {
|
||||||
log.Err(err).Msgf("Key from encryption is not in keyfolder - do nothing")
|
r.log.Err(err).Error("Key from encryption is not in keyfolder - do nothing")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -231,7 +226,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context,
|
||||||
return api.Plan{}
|
return api.Plan{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec,
|
func (r *Reconciler) areEncryptionKeysUpToDate(ctx context.Context, spec api.DeploymentSpec,
|
||||||
status api.DeploymentStatus, context PlanBuilderContext, folder *core.Secret) (plan api.Plan, failed bool) {
|
status api.DeploymentStatus, context PlanBuilderContext, folder *core.Secret) (plan api.Plan, failed bool) {
|
||||||
|
|
||||||
status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
|
status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
|
||||||
|
@ -240,7 +235,7 @@ func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, m := range list {
|
for _, m := range list {
|
||||||
if updateRequired, failedMember := isEncryptionKeyUpToDate(ctx, log, status, context, group, m, folder); failedMember {
|
if updateRequired, failedMember := r.isEncryptionKeyUpToDate(ctx, status, context, group, m, folder); failedMember {
|
||||||
failed = true
|
failed = true
|
||||||
continue
|
continue
|
||||||
} else if updateRequired {
|
} else if updateRequired {
|
||||||
|
@ -255,8 +250,7 @@ func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
func isEncryptionKeyUpToDate(ctx context.Context,
|
func (r *Reconciler) isEncryptionKeyUpToDate(ctx context.Context, status api.DeploymentStatus,
|
||||||
log zerolog.Logger, status api.DeploymentStatus,
|
|
||||||
planCtx PlanBuilderContext,
|
planCtx PlanBuilderContext,
|
||||||
group api.ServerGroup, m api.MemberStatus,
|
group api.ServerGroup, m api.MemberStatus,
|
||||||
folder *core.Secret) (updateRequired bool, failed bool) {
|
folder *core.Secret) (updateRequired bool, failed bool) {
|
||||||
|
@ -268,13 +262,13 @@ func isEncryptionKeyUpToDate(ctx context.Context,
|
||||||
return false, false
|
return false, false
|
||||||
}
|
}
|
||||||
|
|
||||||
mlog := log.With().Str("group", group.AsRole()).Str("member", m.ID).Logger()
|
log := r.log.Str("group", group.AsRole()).Str("member", m.ID)
|
||||||
|
|
||||||
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
c, err := planCtx.GetServerClient(ctxChild, group, m.ID)
|
c, err := planCtx.GetServerClient(ctxChild, group, m.ID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
mlog.Warn().Err(err).Msg("Unable to get client")
|
log.Err(err).Warn("Unable to get client")
|
||||||
return false, true
|
return false, true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -284,12 +278,12 @@ func isEncryptionKeyUpToDate(ctx context.Context,
|
||||||
defer cancel()
|
defer cancel()
|
||||||
e, err := client.GetEncryption(ctxChild)
|
e, err := client.GetEncryption(ctxChild)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
mlog.Error().Err(err).Msgf("Unable to fetch encryption keys")
|
log.Err(err).Error("Unable to fetch encryption keys")
|
||||||
return false, true
|
return false, true
|
||||||
}
|
}
|
||||||
|
|
||||||
if !e.Result.KeysPresent(folder.Data) {
|
if !e.Result.KeysPresent(folder.Data) {
|
||||||
mlog.Info().Msgf("Refresh of encryption keys required")
|
log.Info("Refresh of encryption keys required")
|
||||||
return true, false
|
return true, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue