diff --git a/cmd/admin.go b/cmd/admin.go index 241554f1c..3f195cb7e 100644 --- a/cmd/admin.go +++ b/cmd/admin.go @@ -107,11 +107,11 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) { ctx := getInterruptionContext() d, certCA, auth, err := getDeploymentAndCredentials(ctx, deploymentName) if err != nil { - cliLog.Fatal().Err(err).Msg("failed to create basic data for the connection") + logger.Err(err).Fatal("failed to create basic data for the connection") } if d.Spec.GetMode() != api.DeploymentModeCluster { - cliLog.Fatal().Msgf("agency state does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(), + logger.Fatal("agency state does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(), d.GetName()) } @@ -120,7 +120,7 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) { conn := createClient([]string{endpoint}, certCA, auth, connection.ApplicationJSON) leaderID, err := getAgencyLeader(ctx, conn) if err != nil { - cliLog.Fatal().Err(err).Msg("failed to get leader ID") + logger.Err(err).Fatal("failed to get leader ID") } dnsLeaderName := k8sutil.CreatePodDNSName(d.GetObjectMeta(), api.ServerGroupAgents.AsRole(), leaderID) @@ -131,7 +131,7 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) { defer body.Close() } if err != nil { - cliLog.Fatal().Err(err).Msg("can not get state of the agency") + logger.Err(err).Fatal("can not get state of the agency") } // Print and receive parallelly. @@ -143,11 +143,11 @@ func cmdGetAgencyDump(cmd *cobra.Command, _ []string) { ctx := getInterruptionContext() d, certCA, auth, err := getDeploymentAndCredentials(ctx, deploymentName) if err != nil { - cliLog.Fatal().Err(err).Msg("failed to create basic data for the connection") + logger.Err(err).Fatal("failed to create basic data for the connection") } if d.Spec.GetMode() != api.DeploymentModeCluster { - cliLog.Fatal().Msgf("agency dump does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(), + logger.Fatal("agency dump does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(), d.GetName()) } @@ -158,7 +158,7 @@ func cmdGetAgencyDump(cmd *cobra.Command, _ []string) { defer body.Close() } if err != nil { - cliLog.Fatal().Err(err).Msg("can not get dump") + logger.Err(err).Fatal("can not get dump") } // Print and receive parallelly. diff --git a/cmd/cmd.go b/cmd/cmd.go index 51e077d01..fd98761b2 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -31,29 +31,33 @@ import ( "strings" "time" + "github.com/arangodb/kube-arangodb/pkg/util/globals" + + "github.com/gin-gonic/gin" + + operatorHTTP "github.com/arangodb/kube-arangodb/pkg/util/http" + + "github.com/arangodb/kube-arangodb/pkg/version" + + "github.com/arangodb/kube-arangodb/pkg/operator/scope" + + "github.com/arangodb/kube-arangodb/pkg/deployment/features" + deploymentApi "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/crd" - "github.com/arangodb/kube-arangodb/pkg/deployment/features" "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned/scheme" "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/operator" - "github.com/arangodb/kube-arangodb/pkg/operator/scope" "github.com/arangodb/kube-arangodb/pkg/server" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" utilsError "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/arangodb/kube-arangodb/pkg/util/globals" - operatorHTTP "github.com/arangodb/kube-arangodb/pkg/util/http" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/arangodb/kube-arangodb/pkg/util/probe" "github.com/arangodb/kube-arangodb/pkg/util/retry" - "github.com/arangodb/kube-arangodb/pkg/version" - - "github.com/gin-gonic/gin" "github.com/pkg/errors" "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/spf13/cobra" flag "github.com/spf13/pflag" appsv1 "k8s.io/api/apps/v1" @@ -83,14 +87,15 @@ const ( ) var ( + logger = logging.Global().RegisterAndGetLogger("root", logging.Info) + eventRecorder = logging.Global().RegisterAndGetLogger("root-event-recorder", logging.Info) + cmdMain = cobra.Command{ Use: "arangodb_operator", Run: executeMain, } logLevels []string - cliLog = logging.NewRootLogger() - logService logging.Service serverOptions struct { host string port int @@ -157,13 +162,14 @@ func init() { f.StringVar(&serverOptions.tlsSecretName, "server.tls-secret-name", "", "Name of secret containing tls.crt & tls.key for HTTPS server (if empty, self-signed certificate is used)") f.StringVar(&serverOptions.adminSecretName, "server.admin-secret-name", defaultAdminSecretName, "Name of secret containing username + password for login to the dashboard") f.BoolVar(&serverOptions.allowAnonymous, "server.allow-anonymous-access", false, "Allow anonymous access to the dashboard") - f.StringArrayVar(&logLevels, "log.level", []string{defaultLogLevel}, fmt.Sprintf("Set log levels in format or =. Possible loggers: %s", strings.Join(logging.LoggerNames(), ", "))) + f.StringArrayVar(&logLevels, "log.level", []string{defaultLogLevel}, fmt.Sprintf("Set log levels in format or =. Possible loggers: %s", strings.Join(logging.Global().Names(), ", "))) f.BoolVar(&operatorOptions.enableDeployment, "operator.deployment", false, "Enable to run the ArangoDeployment operator") f.BoolVar(&operatorOptions.enableDeploymentReplication, "operator.deployment-replication", false, "Enable to run the ArangoDeploymentReplication operator") f.BoolVar(&operatorOptions.enableStorage, "operator.storage", false, "Enable to run the ArangoLocalStorage operator") f.BoolVar(&operatorOptions.enableBackup, "operator.backup", false, "Enable to run the ArangoBackup operator") f.BoolVar(&operatorOptions.enableApps, "operator.apps", false, "Enable to run the ArangoApps operator") f.BoolVar(&operatorOptions.enableK2KClusterSync, "operator.k2k-cluster-sync", false, "Enable to run the ListSimple operator") + f.MarkDeprecated("operator.k2k-cluster-sync", "Enabled within deployment operator") f.BoolVar(&operatorOptions.versionOnly, "operator.version", false, "Enable only version endpoint in Operator") f.StringVar(&operatorOptions.alpineImage, "operator.alpine-image", UBIImageEnv.GetOrDefault(defaultAlpineImage), "Docker image used for alpine containers") f.MarkDeprecated("operator.alpine-image", "Value is not used anymore") @@ -205,9 +211,6 @@ func executeUsage(cmd *cobra.Command, args []string) { // Run the operator func executeMain(cmd *cobra.Command, args []string) { - // Set global logger - log.Logger = logging.NewRootLogger() - // Get environment namespace := os.Getenv(constants.EnvOperatorPodNamespace) name := os.Getenv(constants.EnvOperatorPodName) @@ -228,20 +231,23 @@ func executeMain(cmd *cobra.Command, args []string) { // Prepare log service var err error - if err := logging.InitGlobalLogger(defaultLogLevel, logLevels); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to initialize log service") + + levels, err := logging.ParseLogLevelsFromArgs(logLevels) + if err != nil { + logger.Err(err).Fatal("Unable to parse log level") } - logService = logging.GlobalLogger() + logging.Global().ApplyLogLevels(levels) - logService.ConfigureRootLogger(func(log zerolog.Logger) zerolog.Logger { - podNameParts := strings.Split(name, "-") - operatorID := podNameParts[len(podNameParts)-1] - cliLog = cliLog.With().Str("operator-id", operatorID).Logger() - return log.With().Str("operator-id", operatorID).Logger() + podNameParts := strings.Split(name, "-") + operatorID := podNameParts[len(podNameParts)-1] + logging.Global().RegisterWrappers(func(in *zerolog.Event) *zerolog.Event { + return in.Str("operator-id", operatorID) }) - klog.SetOutput(logService.MustGetLogger(logging.LoggerNameKLog)) + kl := logging.Global().RegisterAndGetLogger("klog", logging.Info) + + klog.SetOutput(kl.InfoIO()) klog.Info("nice to meet you") klog.Flush() @@ -249,46 +255,46 @@ func executeMain(cmd *cobra.Command, args []string) { if !operatorOptions.enableDeployment && !operatorOptions.enableDeploymentReplication && !operatorOptions.enableStorage && !operatorOptions.enableBackup && !operatorOptions.enableApps && !operatorOptions.enableK2KClusterSync { if !operatorOptions.versionOnly { - cliLog.Fatal().Err(err).Msg("Turn on --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync or any combination of these") + logger.Err(err).Fatal("Turn on --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync or any combination of these") } } else if operatorOptions.versionOnly { - cliLog.Fatal().Err(err).Msg("Options --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync cannot be enabled together with --operator.version") + logger.Err(err).Fatal("Options --operator.deployment, --operator.deployment-replication, --operator.storage, --operator.backup, --operator.apps, --operator.k2k-cluster-sync cannot be enabled together with --operator.version") } // Log version - cliLog.Info(). + logger. Str("pod-name", name). Str("pod-namespace", namespace). - Msgf("Starting arangodb-operator (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) + Info("Starting arangodb-operator (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) // Check environment if !operatorOptions.versionOnly { if len(namespace) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodNamespace) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodNamespace) } if len(name) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodName) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodName) } if len(ip) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodIP) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodIP) } // Get host name id, err := os.Hostname() if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to get hostname") + logger.Err(err).Fatal("Failed to get hostname") } client, ok := kclient.GetDefaultFactory().Client() if !ok { - cliLog.Fatal().Msg("Failed to get client") + logger.Fatal("Failed to get client") } if crdOptions.install { ctx, cancel := context.WithTimeout(context.Background(), time.Minute) defer cancel() - crd.EnsureCRD(ctx, logService.MustGetLogger("crd"), client) + crd.EnsureCRD(ctx, client) } secrets := client.Kubernetes().CoreV1().Secrets(namespace) @@ -296,11 +302,11 @@ func executeMain(cmd *cobra.Command, args []string) { // Create operator cfg, deps, err := newOperatorConfigAndDeps(id+"-"+name, namespace, name) if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to create operator config & deps") + logger.Err(err).Fatal("Failed to create operator config & deps") } o, err := operator.NewOperator(cfg, deps) if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to create operator") + logger.Err(err).Fatal("Failed to create operator") } listenAddr := net.JoinHostPort(serverOptions.host, strconv.Itoa(serverOptions.port)) @@ -314,7 +320,6 @@ func executeMain(cmd *cobra.Command, args []string) { AdminSecretName: serverOptions.adminSecretName, AllowAnonymous: serverOptions.allowAnonymous, }, server.Dependencies{ - Log: logService.MustGetLogger(logging.LoggerNameServer), LivenessProbe: &livenessProbe, Deployment: server.OperatorDependency{ Enabled: cfg.EnableDeployment, @@ -344,9 +349,9 @@ func executeMain(cmd *cobra.Command, args []string) { Secrets: secrets, }); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to create HTTP server") + logger.Err(err).Fatal("Failed to create HTTP server") } else { - go utilsError.LogError(cliLog, "error while starting service", svr.Run) + go utilsError.LogError(logger, "error while starting service", svr.Run) } // startChaos(context.Background(), cfg.KubeCli, cfg.Namespace, chaosLevel) @@ -355,7 +360,7 @@ func executeMain(cmd *cobra.Command, args []string) { o.Run() } else { if err := startVersionProcess(); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to create HTTP server") + logger.Err(err).Fatal("Failed to create HTTP server") } } } @@ -363,7 +368,7 @@ func executeMain(cmd *cobra.Command, args []string) { func startVersionProcess() error { // Just expose version listenAddr := net.JoinHostPort(serverOptions.host, strconv.Itoa(serverOptions.port)) - cliLog.Info().Str("addr", listenAddr).Msgf("Starting version endpoint") + logger.Str("addr", listenAddr).Info("Starting version endpoint") gin.SetMode(gin.ReleaseMode) r := gin.New() @@ -396,7 +401,7 @@ func newOperatorConfigAndDeps(id, namespace, name string) (operator.Config, oper return operator.Config{}, operator.Dependencies{}, errors.WithStack(fmt.Errorf("Failed to get my pod's service account: %s", err)) } - eventRecorder := createRecorder(cliLog, client.Kubernetes(), name, namespace) + eventRecorder := createRecorder(client.Kubernetes(), name, namespace) scope, ok := scope.AsScope(operatorOptions.scope) if !ok { @@ -424,7 +429,6 @@ func newOperatorConfigAndDeps(id, namespace, name string) (operator.Config, oper ShutdownTimeout: shutdownOptions.timeout, } deps := operator.Dependencies{ - LogService: logService, Client: client, EventRecorder: eventRecorder, LivenessProbe: &livenessProbe, @@ -446,10 +450,10 @@ func getMyPodInfo(kubecli kubernetes.Interface, namespace, name string) (string, op := func() error { pod, err := kubecli.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { - cliLog.Error(). + logger. Err(err). Str("name", name). - Msg("Failed to get operator pod") + Error("Failed to get operator pod") return errors.WithStack(err) } sa = pod.Spec.ServiceAccountName @@ -468,10 +472,10 @@ func getMyPodInfo(kubecli kubernetes.Interface, namespace, name string) (string, return image, sa, nil } -func createRecorder(log zerolog.Logger, kubecli kubernetes.Interface, name, namespace string) record.EventRecorder { +func createRecorder(kubecli kubernetes.Interface, name, namespace string) record.EventRecorder { eventBroadcaster := record.NewBroadcaster() eventBroadcaster.StartLogging(func(format string, args ...interface{}) { - log.Info().Msgf(format, args...) + eventRecorder.Info(format, args...) }) eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events(namespace)}) combinedScheme := runtime.NewScheme() diff --git a/cmd/lifecycle.go b/cmd/lifecycle.go index 74e009673..44b6ddd82 100644 --- a/cmd/lifecycle.go +++ b/cmd/lifecycle.go @@ -98,23 +98,22 @@ func init() { // Wait until all finalizers of the current pod have been removed. func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) { - - cliLog.Info().Msgf("Starting arangodb-operator (%s), lifecycle preStop, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) + logger.Info("Starting arangodb-operator (%s), lifecycle preStop, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) // Get environment namespace := os.Getenv(constants.EnvOperatorPodNamespace) if len(namespace) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodNamespace) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodNamespace) } name := os.Getenv(constants.EnvOperatorPodName) if len(name) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodName) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodName) } // Create kubernetes client client, ok := kclient.GetDefaultFactory().Client() if !ok { - cliLog.Fatal().Msg("Client not initialised") + logger.Fatal("Client not initialised") } pods := client.Kubernetes().CoreV1().Pods(namespace) @@ -122,13 +121,13 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) { for { p, err := pods.Get(context.Background(), name, metav1.GetOptions{}) if k8sutil.IsNotFound(err) { - cliLog.Warn().Msg("Pod not found") + logger.Warn("Pod not found") return } else if err != nil { recentErrors++ - cliLog.Error().Err(err).Msg("Failed to get pod") + logger.Err(err).Error("Failed to get pod") if recentErrors > 20 { - cliLog.Fatal().Err(err).Msg("Too many recent errors") + logger.Err(err).Fatal("Too many recent errors") return } } else { @@ -136,10 +135,10 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) { finalizerCount := len(p.GetFinalizers()) if finalizerCount == 0 { // No more finalizers, we're done - cliLog.Info().Msg("All finalizers gone, we can stop now") + logger.Info("All finalizers gone, we can stop now") return } - cliLog.Info().Msgf("Waiting for %d more finalizers to be removed", finalizerCount) + logger.Info("Waiting for %d more finalizers to be removed", finalizerCount) } // Wait a bit time.Sleep(time.Second) @@ -148,17 +147,17 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) { // Copy the executable to a given place. func cmdLifecycleCopyRun(cmd *cobra.Command, args []string) { - cliLog.Info().Msgf("Starting arangodb-operator (%s), lifecycle copy, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) + logger.Info("Starting arangodb-operator (%s), lifecycle copy, version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) exePath, err := os.Executable() if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to get executable path") + logger.Err(err).Fatal("Failed to get executable path") } // Open source rd, err := os.Open(exePath) if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to open executable file") + logger.Err(err).Fatal("Failed to open executable file") } defer rd.Close() @@ -166,20 +165,20 @@ func cmdLifecycleCopyRun(cmd *cobra.Command, args []string) { targetPath := filepath.Join(lifecycleCopyOptions.TargetDir, filepath.Base(exePath)) wr, err := os.Create(targetPath) if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to create target file") + logger.Err(err).Fatal("Failed to create target file") } defer wr.Close() if _, err := io.Copy(wr, rd); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to copy") + logger.Err(err).Fatal("Failed to copy") } // Set file mode if err := os.Chmod(targetPath, 0755); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to chmod") + logger.Err(err).Fatal("Failed to chmod") } - cliLog.Info().Msgf("Executable copied to %s", targetPath) + logger.Info("Executable copied to %s", targetPath) } type cmdLifecyclePreStopRunPort struct { @@ -193,17 +192,17 @@ func (c *cmdLifecyclePreStopRunPort) run(cmd *cobra.Command, args []string) erro // Get environment namespace := os.Getenv(constants.EnvOperatorPodNamespace) if len(namespace) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodNamespace) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodNamespace) } name := os.Getenv(constants.EnvOperatorPodName) if len(name) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorPodName) + logger.Fatal("%s environment variable missing", constants.EnvOperatorPodName) } // Create kubernetes client client, ok := kclient.GetDefaultFactory().Client() if !ok { - cliLog.Fatal().Msg("Client not initialised") + logger.Fatal("Client not initialised") } pods := client.Kubernetes().CoreV1().Pods(namespace) @@ -221,13 +220,13 @@ func (c *cmdLifecyclePreStopRunPort) run(cmd *cobra.Command, args []string) erro p, err := pods.Get(context.Background(), name, metav1.GetOptions{}) if k8sutil.IsNotFound(err) { - cliLog.Warn().Msg("Pod not found") + logger.Warn("Pod not found") return nil } else if err != nil { recentErrors++ - cliLog.Error().Err(err).Msg("Failed to get pod") + logger.Err(err).Error("Failed to get pod") if recentErrors > 20 { - cliLog.Fatal().Err(err).Msg("Too many recent errors") + logger.Err(err).Fatal("Too many recent errors") return nil } } else { diff --git a/cmd/lifecycle_wait.go b/cmd/lifecycle_wait.go index 1f8cefb71..2f92f8c6c 100644 --- a/cmd/lifecycle_wait.go +++ b/cmd/lifecycle_wait.go @@ -63,26 +63,26 @@ func cmdLifecycleWaitCheck(cmd *cobra.Command, _ []string) { deploymentName, err := cmd.Flags().GetString(ArgDeploymentName) if err != nil { - cliLog.Fatal().Err(err).Msg(fmt.Sprintf("error parsing argument: %s", ArgDeploymentName)) + logger.Err(err).Fatal("error parsing argument: %s", ArgDeploymentName) } watchTimeout, err := cmd.Flags().GetDuration(ArgDeploymentWatchTimeout) if err != nil { - cliLog.Fatal().Err(err).Msg(fmt.Sprintf("error parsing argument: %s", ArgDeploymentWatchTimeout)) + logger.Err(err).Fatal("error parsing argument: %s", ArgDeploymentWatchTimeout) } for { d, err := getDeployment(ctx, os.Getenv(constants.EnvOperatorPodNamespace), deploymentName) if err != nil { - cliLog.Fatal().Err(err).Msg(fmt.Sprintf("error getting ArangoDeployment: %s", d.Name)) + logger.Err(err).Fatal(fmt.Sprintf("error getting ArangoDeployment: %s", d.Name)) } isUpToDate, err := d.IsUpToDate() if err != nil { - cliLog.Err(err).Msg(fmt.Sprintf("error checking Status for ArangoDeployment: %s", d.Name)) + logger.Err(err).Error(fmt.Sprintf("error checking Status for ArangoDeployment: %s", d.Name)) } if isUpToDate { - cliLog.Info().Msg(fmt.Sprintf("ArangoDeployment: %s is %s", d.Name, v1.ConditionTypeUpToDate)) + logger.Info(fmt.Sprintf("ArangoDeployment: %s is %s", d.Name, v1.ConditionTypeUpToDate)) return } @@ -90,10 +90,10 @@ func cmdLifecycleWaitCheck(cmd *cobra.Command, _ []string) { case <-ctx.Done(): return case <-time.After(WatchCheckInterval): - cliLog.Info().Msg(fmt.Sprintf("ArangoDeployment: %s is not ready yet. Waiting...", d.Name)) + logger.Info("ArangoDeployment: %s is not ready yet. Waiting...", d.Name) continue case <-time.After(watchTimeout): - cliLog.Error().Msg(fmt.Sprintf("ArangoDeployment: %s is not %s yet - operation timed out!", d.Name, v1.ConditionTypeUpToDate)) + logger.Error("ArangoDeployment: %s is not %s yet - operation timed out!", d.Name, v1.ConditionTypeUpToDate) return } } diff --git a/cmd/reboot.go b/cmd/reboot.go index 79d158b11..b48021acb 100644 --- a/cmd/reboot.go +++ b/cmd/reboot.go @@ -129,7 +129,7 @@ func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name } defer func() { if deletePVC { - cliLog.Debug().Str("pvc-name", claimname).Msg("deleting pvc") + logger.Str("pvc-name", claimname).Debug("deleting pvc") kube.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), claimname, metav1.DeleteOptions{}) } }() @@ -383,7 +383,7 @@ func cmdRebootRun(cmd *cobra.Command, args []string) { // Create kubernetes client client, ok := kclient.GetDefaultFactory().Client() if !ok { - cliLog.Fatal().Msg("Failed to get client") + logger.Fatal("Failed to get client") } kubecli := client.Kubernetes() @@ -392,12 +392,12 @@ func cmdRebootRun(cmd *cobra.Command, args []string) { image, err := getMyImage(kubecli, namespace, podname) if err != nil { - cliLog.Fatal().Err(err).Msg("failed to get my image") + logger.Err(err).Fatal("failed to get my image") } vinfo, err := preflightChecks(kubecli, volumes) if err != nil { - cliLog.Fatal().Err(err).Msg("preflight checks failed") + logger.Err(err).Fatal("preflight checks failed") } var wg sync.WaitGroup @@ -406,7 +406,7 @@ func cmdRebootRun(cmd *cobra.Command, args []string) { received := 0 for _, volumeName := range volumes { - cliLog.Debug().Str("volume", volumeName).Msg("Starting inspection") + logger.Str("volume", volumeName).Debug("Starting inspection") wg.Add(1) go func(vn string) { defer wg.Done() @@ -424,9 +424,9 @@ func cmdRebootRun(cmd *cobra.Command, args []string) { select { case res := <-resultChan: if res.Error != nil { - cliLog.Error().Err(res.Error).Msg("Inspection failed") + logger.Err(res.Error).Error("Inspection failed") } else { - cliLog.Info().Str("claim", res.Claim).Str("uuid", res.UUID).Msg("Inspection completed") + logger.Str("claim", res.Claim).Str("uuid", res.UUID).Info("Inspection completed") } members[res.UUID] = res received++ @@ -435,13 +435,13 @@ func cmdRebootRun(cmd *cobra.Command, args []string) { } } - cliLog.Debug().Msg("results complete - generating ArangoDeployment resource") + logger.Debug("results complete - generating ArangoDeployment resource") if err := createArangoDeployment(extcli, namespace, rebootOptions.DeploymentName, rebootOptions.ImageName, members); err != nil { - cliLog.Error().Err(err).Msg("failed to create deployment") + logger.Err(err).Error("failed to create deployment") } - cliLog.Info().Msg("ArangoDeployment created.") + logger.Info("ArangoDeployment created.") // Wait for everyone to be completed wg.Wait() @@ -481,6 +481,6 @@ func cmdRebootInspectRun(cmd *cobra.Command, args []string) { }) if http.ListenAndServe(":8080", nil); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to listen and serve") + logger.Err(err).Fatal("Failed to listen and serve") } } diff --git a/cmd/storage.go b/cmd/storage.go index 007ab10fe..9a374af9c 100644 --- a/cmd/storage.go +++ b/cmd/storage.go @@ -30,7 +30,6 @@ import ( "github.com/spf13/cobra" - "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/storage/provisioner" "github.com/arangodb/kube-arangodb/pkg/storage/provisioner/service" "github.com/arangodb/kube-arangodb/pkg/util/constants" @@ -63,26 +62,21 @@ func init() { // Run the provisioner func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) { var err error - if err := logging.InitGlobalLogger(defaultLogLevel, logLevels); err != nil { - cliLog.Fatal().Err(err).Msg("Failed to initialize log service") - } - - logService = logging.GlobalLogger() // Log version - cliLog.Info().Msgf("Starting arangodb local storage provisioner (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) + logger.Info("Starting arangodb local storage provisioner (%s), version %s build %s", version.GetVersionV1().Edition.Title(), version.GetVersionV1().Version, version.GetVersionV1().Build) // Get environment nodeName := os.Getenv(constants.EnvOperatorNodeName) if len(nodeName) == 0 { - cliLog.Fatal().Msgf("%s environment variable missing", constants.EnvOperatorNodeName) + logger.Fatal("%s environment variable missing", constants.EnvOperatorNodeName) } - config, deps := newProvisionerConfigAndDeps(nodeName) - p, err := service.New(config, deps) + config := newProvisionerConfigAndDeps(nodeName) + p, err := service.New(config) if err != nil { - cliLog.Fatal().Err(err).Msg("Failed to create provisioner") + logger.Err(err).Fatal("Failed to create provisioner") } ctx := context.TODO() @@ -90,14 +84,11 @@ func cmdStorageProvisionerRun(cmd *cobra.Command, args []string) { } // newProvisionerConfigAndDeps creates storage provisioner config & dependencies. -func newProvisionerConfigAndDeps(nodeName string) (service.Config, service.Dependencies) { +func newProvisionerConfigAndDeps(nodeName string) service.Config { cfg := service.Config{ Address: net.JoinHostPort("0.0.0.0", strconv.Itoa(storageProvisioner.port)), NodeName: nodeName, } - deps := service.Dependencies{ - Log: logService.MustGetLogger(logging.LoggerNameProvisioner), - } - return cfg, deps + return cfg } diff --git a/cmd/task.go b/cmd/task.go index 28187707e..000afe8df 100644 --- a/cmd/task.go +++ b/cmd/task.go @@ -21,7 +21,6 @@ package cmd import ( - "github.com/rs/zerolog/log" "github.com/spf13/cobra" ) @@ -57,9 +56,9 @@ var cmdTaskState = &cobra.Command{ } func taskCreate(cmd *cobra.Command, args []string) { - log.Info().Msgf("TODO: create task") + logger.Info("TODO: create task") } func taskState(cmd *cobra.Command, args []string) { - log.Info().Msgf("TODO: check task state") + logger.Info("TODO: check task state") } diff --git a/pkg/crd/apply.go b/pkg/crd/apply.go index 8a858f778..0b6e00162 100644 --- a/pkg/crd/apply.go +++ b/pkg/crd/apply.go @@ -25,15 +25,17 @@ import ( "fmt" "github.com/arangodb/go-driver" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/kclient" - "github.com/rs/zerolog" authorization "k8s.io/api/authorization/v1" apiextensions "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1" "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) -func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) { +var logger = logging.Global().RegisterAndGetLogger("crd", logging.Info) + +func EnsureCRD(ctx context.Context, client kclient.Client) { crdsLock.Lock() defer crdsLock.Unlock() @@ -41,21 +43,21 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) { getAccess := verifyCRDAccess(ctx, client, crd, "get") if !getAccess.Allowed { - log.Info().Str("crd", crd).Msgf("Get Operations is not allowed. Continue") + logger.Str("crd", crd).Info("Get Operations is not allowed. Continue") continue } c, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(ctx, crd, meta.GetOptions{}) if err != nil { if !errors.IsNotFound(err) { - log.Warn().Err(err).Str("crd", crd).Msgf("Get Operations is not allowed due to error. Continue") + logger.Err(err).Str("crd", crd).Warn("Get Operations is not allowed due to error. Continue") continue } createAccess := verifyCRDAccess(ctx, client, crd, "create") if !createAccess.Allowed { - log.Info().Str("crd", crd).Msgf("Create Operations is not allowed but CRD is missing. Continue") + logger.Str("crd", crd).Info("Create Operations is not allowed but CRD is missing. Continue") continue } @@ -70,18 +72,18 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) { } if _, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Create(ctx, c, meta.CreateOptions{}); err != nil { - log.Warn().Err(err).Str("crd", crd).Msgf("Create Operations is not allowed due to error. Continue") + logger.Err(err).Str("crd", crd).Warn("Create Operations is not allowed due to error. Continue") continue } - log.Info().Str("crd", crd).Msgf("CRD Created") + logger.Str("crd", crd).Info("CRD Created") continue } updateAccess := verifyCRDAccess(ctx, client, crd, "update") if !updateAccess.Allowed { - log.Info().Str("crd", crd).Msgf("Update Operations is not allowed. Continue") + logger.Str("crd", crd).Info("Update Operations is not allowed. Continue") continue } @@ -92,7 +94,7 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) { if v, ok := c.ObjectMeta.Labels[Version]; ok { if v != "" { if !isUpdateRequired(spec.version, driver.Version(v)) { - log.Info().Str("crd", crd).Msgf("CRD Update not required") + logger.Str("crd", crd).Info("CRD Update not required") continue } } @@ -103,10 +105,10 @@ func EnsureCRD(ctx context.Context, log zerolog.Logger, client kclient.Client) { c.Spec = spec.spec if _, err := client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Update(ctx, c, meta.UpdateOptions{}); err != nil { - log.Warn().Err(err).Str("crd", crd).Msgf("Create Operations is not allowed due to error. Continue") + logger.Err(err).Str("crd", crd).Warn("Create Operations is not allowed due to error. Continue") continue } - log.Info().Str("crd", crd).Msgf("CRD Updated") + logger.Str("crd", crd).Info("CRD Updated") } } diff --git a/pkg/crd/apply_test.go b/pkg/crd/apply_test.go index 40bd62ee9..02ed746ce 100644 --- a/pkg/crd/apply_test.go +++ b/pkg/crd/apply_test.go @@ -25,7 +25,6 @@ import ( "testing" "github.com/arangodb/kube-arangodb/pkg/util/kclient" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" ) @@ -34,6 +33,6 @@ func Test_Apply(t *testing.T) { c, ok := kclient.GetDefaultFactory().Client() require.True(t, ok) - EnsureCRD(context.Background(), log.Logger, c) + EnsureCRD(context.Background(), c) }) } diff --git a/pkg/deployment/access_package.go b/pkg/deployment/access_package.go index f902506f8..7430ce334 100644 --- a/pkg/deployment/access_package.go +++ b/pkg/deployment/access_package.go @@ -47,7 +47,7 @@ const ( // createAccessPackages creates a arangosync access packages specified // in spec.sync.externalAccess.accessPackageSecretNames. func (d *Deployment) createAccessPackages(ctx context.Context) error { - log := d.deps.Log + log := d.sectionLogger("access-package") spec := d.apiObject.Spec if !spec.Sync.IsEnabled() { @@ -78,12 +78,12 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error { }) }) if err != nil && !k8sutil.IsNotFound(err) { - // Not serious enough to stop everything now, just log and create an event - log.Warn().Err(err).Msg("Failed to remove obsolete access package secret") + // Not serious enough to stop everything now, just sectionLogger and create an event + log.Err(err).Warn("Failed to remove obsolete access package secret") d.CreateEvent(k8sutil.NewErrorEvent("Access Package cleanup failed", err, d.apiObject)) } else { // Access package removed, notify user - log.Info().Str("secret-name", secret.GetName()).Msg("Removed access package Secret") + log.Str("secret-name", secret.GetName()).Info("Removed access package Secret") d.CreateEvent(k8sutil.NewAccessPackageDeletedEvent(d.apiObject, secret.GetName())) } } @@ -97,7 +97,7 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error { // ensureAccessPackage creates an arangosync access package with given name // it is does not already exist. func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName string) error { - log := d.deps.Log + log := d.sectionLogger("access-package") spec := d.apiObject.Spec _, err := d.acs.CurrentClusterCache().Secret().V1().Read().Get(ctx, apSecretName, metav1.GetOptions{}) @@ -105,7 +105,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin // Secret already exists return nil } else if !k8sutil.IsNotFound(err) { - log.Debug().Err(err).Str("name", apSecretName).Msg("Failed to get arangosync access package secret") + log.Err(err).Str("name", apSecretName).Debug("Failed to get arangosync access package secret") return errors.WithStack(err) } @@ -113,7 +113,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin clientAuthSecretName := spec.Sync.Authentication.GetClientCASecretName() clientAuthCert, clientAuthKey, _, err := k8sutil.GetCASecret(ctx, d.acs.CurrentClusterCache().Secret().V1().Read(), clientAuthSecretName, nil) if err != nil { - log.Debug().Err(err).Msg("Failed to get client-auth CA secret") + log.Err(err).Debug("Failed to get client-auth CA secret") return errors.WithStack(err) } @@ -121,14 +121,14 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin tlsCASecretName := spec.Sync.TLS.GetCASecretName() tlsCACert, err := k8sutil.GetCACertficateSecret(ctx, d.acs.CurrentClusterCache().Secret().V1().Read(), tlsCASecretName) if err != nil { - log.Debug().Err(err).Msg("Failed to get TLS CA secret") + log.Err(err).Debug("Failed to get TLS CA secret") return errors.WithStack(err) } // Create keyfile ca, err := certificates.LoadCAFromPEM(clientAuthCert, clientAuthKey) if err != nil { - log.Debug().Err(err).Msg("Failed to parse client-auth CA") + log.Err(err).Debug("Failed to parse client-auth CA") return errors.WithStack(err) } @@ -140,7 +140,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin } cert, key, err := certificates.CreateCertificate(options, &ca) if err != nil { - log.Debug().Err(err).Msg("Failed to create client-auth keyfile") + log.Err(err).Debug("Failed to create client-auth keyfile") return errors.WithStack(err) } keyfile := strings.TrimSpace(cert) + "\n" + strings.TrimSpace(key) @@ -182,12 +182,12 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin // Serialize secrets keyfileYaml, err := yaml.Marshal(keyfileSecret) if err != nil { - log.Debug().Err(err).Msg("Failed to encode client-auth keyfile Secret") + log.Err(err).Debug("Failed to encode client-auth keyfile Secret") return errors.WithStack(err) } tlsCAYaml, err := yaml.Marshal(tlsCASecret) if err != nil { - log.Debug().Err(err).Msg("Failed to encode TLS CA Secret") + log.Err(err).Debug("Failed to encode TLS CA Secret") return errors.WithStack(err) } allYaml := strings.TrimSpace(string(keyfileYaml)) + "\n---\n" + strings.TrimSpace(string(tlsCAYaml)) @@ -211,12 +211,12 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin }) if err != nil { // Failed to create secret - log.Debug().Err(err).Str("secret-name", apSecretName).Msg("Failed to create access package Secret") + log.Err(err).Str("secret-name", apSecretName).Debug("Failed to create access package Secret") return errors.WithStack(err) } - // Write log entry & create event - log.Info().Str("secret-name", apSecretName).Msg("Created access package Secret") + // Write sectionLogger entry & create event + log.Str("secret-name", apSecretName).Info("Created access package Secret") d.CreateEvent(k8sutil.NewAccessPackageCreatedEvent(d.apiObject, apSecretName)) return nil diff --git a/pkg/deployment/chaos/monkey.go b/pkg/deployment/chaos/monkey.go index a7116e789..90bc3fa8a 100644 --- a/pkg/deployment/chaos/monkey.go +++ b/pkg/deployment/chaos/monkey.go @@ -25,27 +25,38 @@ import ( "math/rand" "time" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" meta "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/errors" + "github.com/rs/zerolog" +) + +var ( + chaosMonkeyLogger = logging.Global().RegisterAndGetLogger("chaos-monkey", logging.Info) ) // Monkey is the service that introduces chaos in the deployment // if allowed and enabled. type Monkey struct { - log zerolog.Logger - context Context + namespace, name string + log logging.Logger + context Context +} + +func (m Monkey) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", m.namespace).Str("name", m.name) } // NewMonkey creates a new chaos monkey with given context. -func NewMonkey(log zerolog.Logger, context Context) *Monkey { - log = log.With().Str("component", "chaos-monkey").Logger() - return &Monkey{ - log: log, - context: context, +func NewMonkey(namespace, name string, context Context) *Monkey { + m := &Monkey{ + context: context, + namespace: namespace, + name: name, } + m.log = chaosMonkeyLogger.WrapObj(m) + return m } // Run the monkey until the given channel is closed. @@ -61,7 +72,7 @@ func (m Monkey) Run(stopCh <-chan struct{}) { if rand.Float64() < chance { // Let's introduce pod chaos if err := m.killRandomPod(ctx); err != nil { - log.Info().Err(err).Msg("Failed to kill random pod") + m.log.Err(err).Info("Failed to kill random pod") } } } @@ -87,7 +98,7 @@ func (m Monkey) killRandomPod(ctx context.Context) error { return nil } p := pods[rand.Intn(len(pods))] - m.log.Info().Str("pod-name", p.GetName()).Msg("Killing pod") + m.log.Str("pod-name", p.GetName()).Info("Killing pod") if err := m.context.DeletePod(ctx, p.GetName(), meta.DeleteOptions{}); err != nil { return errors.WithStack(err) } diff --git a/pkg/deployment/cleanup.go b/pkg/deployment/cleanup.go index 1f90f5e22..602bc8f2b 100644 --- a/pkg/deployment/cleanup.go +++ b/pkg/deployment/cleanup.go @@ -38,14 +38,14 @@ import ( // removePodFinalizers removes all finalizers from all pods owned by us. func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) { - log := d.deps.Log + log := d.sectionLogger("pod-finalizer") found := false if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { - log.Info().Str("pod", pod.GetName()).Msgf("Removing Pod Finalizer") - if count, err := k8sutil.RemovePodFinalizers(ctx, cachedStatus, log, d.PodsModInterface(), pod, constants.ManagedFinalizers(), true); err != nil { - log.Warn().Err(err).Msg("Failed to remove pod finalizers") + log.Str("pod", pod.GetName()).Info("Removing Pod Finalizer") + if count, err := k8sutil.RemovePodFinalizers(ctx, cachedStatus, d.PodsModInterface(), pod, constants.ManagedFinalizers(), true); err != nil { + log.Err(err).Warn("Failed to remove pod finalizers") return err } else if count > 0 { found = true @@ -58,7 +58,7 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe GracePeriodSeconds: util.NewInt64(0), }); err != nil { if !k8sutil.IsNotFound(err) { - log.Warn().Err(err).Msg("Failed to remove pod") + log.Err(err).Warn("Failed to remove pod") return err } } @@ -72,14 +72,14 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe // removePVCFinalizers removes all finalizers from all PVCs owned by us. func (d *Deployment) removePVCFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) { - log := d.deps.Log + log := d.sectionLogger("pvc-finalizer") found := false if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *core.PersistentVolumeClaim) error { - log.Info().Str("pvc", pvc.GetName()).Msgf("Removing PVC Finalizer") - if count, err := k8sutil.RemovePVCFinalizers(ctx, cachedStatus, log, d.PersistentVolumeClaimsModInterface(), pvc, constants.ManagedFinalizers(), true); err != nil { - log.Warn().Err(err).Msg("Failed to remove PVC finalizers") + log.Str("pvc", pvc.GetName()).Info("Removing PVC Finalizer") + if count, err := k8sutil.RemovePVCFinalizers(ctx, cachedStatus, d.PersistentVolumeClaimsModInterface(), pvc, constants.ManagedFinalizers(), true); err != nil { + log.Err(err).Warn("Failed to remove PVC finalizers") return err } else if count > 0 { found = true diff --git a/pkg/deployment/cluster_scaling_integration.go b/pkg/deployment/cluster_scaling_integration.go index 440225cfd..7e2dbb68d 100644 --- a/pkg/deployment/cluster_scaling_integration.go +++ b/pkg/deployment/cluster_scaling_integration.go @@ -33,15 +33,18 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/arangod" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" ) +var ciLogger = logging.Global().RegisterAndGetLogger("deployment-ci", logging.Info) + // clusterScalingIntegration is a helper to communicate with the clusters // scaling UI. type clusterScalingIntegration struct { - log zerolog.Logger + log logging.Logger depl *Deployment pendingUpdate struct { mutex sync.Mutex @@ -57,6 +60,10 @@ type clusterScalingIntegration struct { } } +func (ci *clusterScalingIntegration) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", ci.depl.GetNamespace()).Str("name", ci.depl.Name()) +} + const ( maxClusterBootstrapTime = time.Minute * 2 // Time we allow a cluster bootstrap to take, before we can do cluster inspections. ) @@ -64,9 +71,9 @@ const ( // newClusterScalingIntegration creates a new clusterScalingIntegration. func newClusterScalingIntegration(depl *Deployment) *clusterScalingIntegration { ci := &clusterScalingIntegration{ - log: depl.deps.Log, depl: depl, } + ci.log = ciLogger.WrapObj(ci) ci.scaleEnabled.enabled = true return ci } @@ -108,13 +115,13 @@ func (ci *clusterScalingIntegration) checkScalingCluster(ctx context.Context, ex safeToAskCluster, err := ci.updateClusterServerCount(ctx, expectSuccess) if err != nil { if expectSuccess { - ci.log.Debug().Err(err).Msg("Cluster update failed") + ci.log.Err(err).Debug("Cluster update failed") } } else if safeToAskCluster { // Inspect once if err := ci.inspectCluster(ctx, expectSuccess); err != nil { if expectSuccess { - ci.log.Debug().Err(err).Msg("Cluster inspection failed") + ci.log.Err(err).Debug("Cluster inspection failed") } } else { return true @@ -163,7 +170,7 @@ func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectS req, err := arangod.GetNumberOfServers(ctxChild, c.Connection()) if err != nil { if expectSuccess { - log.Debug().Err(err).Msg("Failed to get number of servers") + log.Err(err).Debug("Failed to get number of servers") } return errors.WithStack(err) } @@ -220,13 +227,13 @@ func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectS // min <= count <= max holds for the given server groups if err := newSpec.Validate(); err != nil { // Log failure & create event - log.Warn().Err(err).Msg("Validation of updated spec has failed") + log.Err(err).Warn("Validation of updated spec has failed") ci.depl.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, apiObject)) // Restore original spec in cluster ci.SendUpdateToCluster(current.Spec) } else { if err := ci.depl.updateCRSpec(ctx, *newSpec); err != nil { - log.Warn().Err(err).Msg("Failed to update current deployment") + log.Err(err).Warn("Failed to update current deployment") return errors.WithStack(err) } } @@ -269,7 +276,7 @@ func (ci *clusterScalingIntegration) updateClusterServerCount(ctx context.Contex if coordinatorCount != lastNumberOfServers.GetCoordinators() || dbserverCount != lastNumberOfServers.GetDBServers() { if err := ci.depl.SetNumberOfServers(ctx, coordinatorCountPtr, dbserverCountPtr); err != nil { if expectSuccess { - log.Debug().Err(err).Msg("Failed to set number of servers") + log.Err(err).Debug("Failed to set number of servers") } return false, errors.WithStack(err) } diff --git a/pkg/deployment/context_impl.go b/pkg/deployment/context_impl.go index b622f6e45..b94cead4e 100644 --- a/pkg/deployment/context_impl.go +++ b/pkg/deployment/context_impl.go @@ -52,7 +52,6 @@ import ( apiErrors "k8s.io/apimachinery/pkg/api/errors" - "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -75,6 +74,7 @@ import ( serviceaccountv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount/v1" servicemonitorv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor/v1" "github.com/arangodb/kube-arangodb/pkg/util/kclient" + "github.com/rs/zerolog/log" ) var _ resources.Context = &Deployment{} @@ -146,10 +146,10 @@ func (d *Deployment) UpdateStatus(ctx context.Context, status api.DeploymentStat func (d *Deployment) updateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error { if d.status.version != lastVersion { // Status is obsolete - d.deps.Log.Error(). + d.log. Int32("expected-version", lastVersion). Int32("actual-version", d.status.version). - Msg("UpdateStatus version conflict error.") + Error("UpdateStatus version conflict error.") return errors.WithStack(errors.Newf("Status conflict error. Expected version %d, got %d", lastVersion, d.status.version)) } @@ -174,7 +174,7 @@ func (d *Deployment) UpdateMember(ctx context.Context, member api.MemberStatus) return errors.WithStack(err) } if err := d.UpdateStatus(ctx, status, lastVersion); err != nil { - d.deps.Log.Debug().Err(err).Msg("Updating CR status failed") + d.log.Err(err).Debug("Updating CR status failed") return errors.WithStack(err) } return nil @@ -307,7 +307,7 @@ func (d *Deployment) getJWTFolderToken() (string, bool) { if i := d.apiObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) { s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{}) if err != nil { - d.deps.Log.Error().Err(err).Msgf("Unable to get secret") + d.log.Err(err).Error("Unable to get secret") return "", false } @@ -344,11 +344,10 @@ func (d *Deployment) getJWTToken() (string, bool) { // GetSyncServerClient returns a cached client for a specific arangosync server. func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGroup, id string) (client.API, error) { // Fetch monitoring token - log := d.deps.Log secretName := d.apiObject.Spec.Sync.Monitoring.GetTokenSecretName() monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().Secret().V1().Read(), secretName) if err != nil { - log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret") + d.log.Err(err).Str("secret-name", secretName).Debug("Failed to get sync monitoring secret") return nil, errors.WithStack(err) } @@ -368,7 +367,8 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr } auth := client.NewAuthentication(tlsAuth, "") insecureSkipVerify := true - c, err := d.syncClientCache.GetClient(d.deps.Log, source, auth, insecureSkipVerify) + // TODO: Change logging system in sync client + c, err := d.syncClientCache.GetClient(log.Logger, source, auth, insecureSkipVerify) if err != nil { return nil, errors.WithStack(err) } @@ -378,11 +378,10 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr // CreateMember adds a new member to the given group. // If ID is non-empty, it will be used, otherwise a new ID is created. func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id string, mods ...reconcile.CreateMemberMod) (string, error) { - log := d.deps.Log if err := d.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) { - nid, err := createMember(log, s, group, id, d.apiObject, mods...) + nid, err := d.createMember(s, group, id, d.apiObject, mods...) if err != nil { - log.Debug().Err(err).Str("group", group.AsRole()).Msg("Failed to create member") + d.log.Err(err).Str("group", group.AsRole()).Debug("Failed to create member") return false, errors.WithStack(err) } @@ -407,12 +406,12 @@ func (d *Deployment) GetPod(ctx context.Context, podName string) (*core.Pod, err // DeletePod deletes a pod with given name in the namespace // of the deployment. If the pod does not exist, the error is ignored. func (d *Deployment) DeletePod(ctx context.Context, podName string, options meta.DeleteOptions) error { - log := d.deps.Log + log := d.log err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { return d.PodsModInterface().Delete(ctxChild, podName, options) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Debug().Err(err).Str("pod", podName).Msg("Failed to remove pod") + log.Err(err).Str("pod", podName).Debug("Failed to remove pod") return errors.WithStack(err) } return nil @@ -421,7 +420,7 @@ func (d *Deployment) DeletePod(ctx context.Context, podName string, options meta // CleanupPod deletes a given pod with force and explicit UID. // If the pod does not exist, the error is ignored. func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error { - log := d.deps.Log + log := d.log podName := p.GetName() options := meta.NewDeleteOptions(0) options.Preconditions = meta.NewUIDPreconditions(string(p.GetUID())) @@ -429,7 +428,7 @@ func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error { return d.PodsModInterface().Delete(ctxChild, podName, *options) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Debug().Err(err).Str("pod", podName).Msg("Failed to cleanup pod") + log.Err(err).Str("pod", podName).Debug("Failed to cleanup pod") return errors.WithStack(err) } return nil @@ -438,8 +437,6 @@ func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error { // RemovePodFinalizers removes all the finalizers from the Pod with given name in the namespace // of the deployment. If the pod does not exist, the error is ignored. func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) error { - log := d.deps.Log - ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() p, err := d.GetCachedStatus().Pod().V1().Read().Get(ctxChild, podName, meta.GetOptions{}) @@ -450,7 +447,7 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er return errors.WithStack(err) } - _, err = k8sutil.RemovePodFinalizers(ctx, d.GetCachedStatus(), log, d.PodsModInterface(), p, p.GetFinalizers(), true) + _, err = k8sutil.RemovePodFinalizers(ctx, d.GetCachedStatus(), d.PodsModInterface(), p, p.GetFinalizers(), true) if err != nil { return errors.WithStack(err) } @@ -460,12 +457,12 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er // DeletePvc deletes a persistent volume claim with given name in the namespace // of the deployment. If the pvc does not exist, the error is ignored. func (d *Deployment) DeletePvc(ctx context.Context, pvcName string) error { - log := d.deps.Log + log := d.log err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { return d.PersistentVolumeClaimsModInterface().Delete(ctxChild, pvcName, meta.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Debug().Err(err).Str("pvc", pvcName).Msg("Failed to remove pvc") + log.Err(err).Str("pvc", pvcName).Debug("Failed to remove pvc") return errors.WithStack(err) } return nil @@ -509,7 +506,7 @@ func (d *Deployment) GetPvc(ctx context.Context, pvcName string) (*core.Persiste pvc, err := d.GetCachedStatus().PersistentVolumeClaim().V1().Read().Get(ctxChild, pvcName, meta.GetOptions{}) if err != nil { - log.Debug().Err(err).Str("pvc-name", pvcName).Msg("Failed to get PVC") + d.log.Err(err).Str("pvc-name", pvcName).Debug("Failed to get PVC") return nil, errors.WithStack(err) } return pvc, nil diff --git a/pkg/deployment/deployment.go b/pkg/deployment/deployment.go index ef510af38..b309c28fa 100644 --- a/pkg/deployment/deployment.go +++ b/pkg/deployment/deployment.go @@ -27,7 +27,6 @@ import ( "sync/atomic" "time" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/tools/record" @@ -48,6 +47,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/resilience" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/operator/scope" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/arangod" @@ -72,7 +72,6 @@ type Config struct { // Dependencies holds dependent services for a Deployment type Dependencies struct { - Log zerolog.Logger EventRecorder record.EventRecorder Client kclient.Client @@ -104,6 +103,8 @@ type deploymentStatusObject struct { // Deployment is the in process state of an ArangoDeployment. type Deployment struct { + log logging.Logger + name string namespace string @@ -237,14 +238,16 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De acs: acs.NewACS(apiObject.GetUID(), i), } + d.log = logger.WrapObj(d) + d.memberState = memberState.NewStateInspector(d) d.clientCache = deploymentClient.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig)) d.status.last = *(apiObject.Status.DeepCopy()) - d.reconciler = reconcile.NewReconciler(deps.Log, d) - d.resilience = resilience.NewResilience(deps.Log, d) - d.resources = resources.NewResources(deps.Log, d) + d.reconciler = reconcile.NewReconciler(apiObject.GetNamespace(), apiObject.GetName(), d) + d.resilience = resilience.NewResilience(apiObject.GetNamespace(), apiObject.GetName(), d) + d.resources = resources.NewResources(apiObject.GetNamespace(), apiObject.GetName(), d) if d.status.last.AcceptedSpec == nil { // We've validated the spec, so let's use it from now. d.status.last.AcceptedSpec = apiObject.Spec.DeepCopy() @@ -264,7 +267,7 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De go ci.ListenForClusterEvents(d.stopCh) } if config.AllowChaos { - d.chaosMonkey = chaos.NewMonkey(deps.Log, d) + d.chaosMonkey = chaos.NewMonkey(apiObject.GetNamespace(), apiObject.GetName(), d) go d.chaosMonkey.Run(d.stopCh) } @@ -283,7 +286,7 @@ func (d *Deployment) Update(apiObject *api.ArangoDeployment) { // Delete the deployment. // Called when the deployment was deleted by the user. func (d *Deployment) Delete() { - d.deps.Log.Info().Msg("deployment is deleted by user") + d.log.Info("deployment is deleted by user") if atomic.CompareAndSwapInt32(&d.stopped, 0, 1) { close(d.stopCh) } @@ -295,10 +298,10 @@ func (d *Deployment) send(ev *deploymentEvent) { case d.eventCh <- ev: l, ecap := len(d.eventCh), cap(d.eventCh) if l > int(float64(ecap)*0.8) { - d.deps.Log.Warn(). + d.log. Int("used", l). Int("capacity", ecap). - Msg("event queue buffer is almost full") + Warn("event queue buffer is almost full") } case <-d.stopCh: } @@ -308,7 +311,7 @@ func (d *Deployment) send(ev *deploymentEvent) { // It processes the event queue and polls the state of generated // resource on a regular basis. func (d *Deployment) run() { - log := d.deps.Log + log := d.log // Create agency mapping if err := d.createAgencyMapping(context.TODO()); err != nil { @@ -331,32 +334,32 @@ func (d *Deployment) run() { status, lastVersion := d.GetStatus() status.Phase = api.DeploymentPhaseRunning if err := d.UpdateStatus(context.TODO(), status, lastVersion); err != nil { - log.Warn().Err(err).Msg("update initial CR status failed") + log.Err(err).Warn("update initial CR status failed") } - log.Info().Msg("start running...") + log.Info("start running...") } d.lookForServiceMonitorCRD() // Execute inspection for first time without delay of 10s - log.Debug().Msg("Initially inspect deployment...") + log.Debug("Initially inspect deployment...") inspectionInterval := d.inspectDeployment(minInspectionInterval) - log.Debug().Str("interval", inspectionInterval.String()).Msg("...deployment inspect started") + log.Str("interval", inspectionInterval.String()).Debug("...deployment inspect started") for { select { case <-d.stopCh: err := d.acs.CurrentClusterCache().Refresh(context.Background()) if err != nil { - log.Error().Err(err).Msg("Unable to get resources") + log.Err(err).Error("Unable to get resources") } // Remove finalizers from created resources - log.Info().Msg("Deployment removed, removing finalizers to prevent orphaned resources") + log.Info("Deployment removed, removing finalizers to prevent orphaned resources") if _, err := d.removePodFinalizers(context.TODO(), d.GetCachedStatus()); err != nil { - log.Warn().Err(err).Msg("Failed to remove Pod finalizers") + log.Err(err).Warn("Failed to remove Pod finalizers") } if _, err := d.removePVCFinalizers(context.TODO(), d.GetCachedStatus()); err != nil { - log.Warn().Err(err).Msg("Failed to remove PVC finalizers") + log.Err(err).Warn("Failed to remove PVC finalizers") } // We're being stopped. return @@ -371,9 +374,9 @@ func (d *Deployment) run() { } case <-d.inspectTrigger.Done(): - log.Debug().Msg("Inspect deployment...") + log.Debug("Inspect deployment...") inspectionInterval = d.inspectDeployment(inspectionInterval) - log.Debug().Str("interval", inspectionInterval.String()).Msg("...inspected deployment") + log.Str("interval", inspectionInterval.String()).Debug("...inspected deployment") case <-d.inspectCRDTrigger.Done(): d.lookForServiceMonitorCRD() @@ -394,7 +397,7 @@ func (d *Deployment) run() { // handleArangoDeploymentUpdatedEvent is called when the deployment is updated by the user. func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) error { - log := d.deps.Log.With().Str("deployment", d.apiObject.GetName()).Logger() + log := d.log.Str("deployment", d.apiObject.GetName()) // Get the most recent version of the deployment from the API server ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) @@ -402,7 +405,7 @@ func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) err current, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.apiObject.GetNamespace()).Get(ctxChild, d.apiObject.GetName(), meta.GetOptions{}) if err != nil { - log.Debug().Err(err).Msg("Failed to get current version of deployment from API server") + log.Err(err).Debug("Failed to get current version of deployment from API server") if k8sutil.IsNotFound(err) { return nil } @@ -420,21 +423,21 @@ func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) err resetFields := specBefore.ResetImmutableFields(&newAPIObject.Spec) if len(resetFields) > 0 { - log.Debug().Strs("fields", resetFields).Msg("Found modified immutable fields") + log.Strs("fields", resetFields...).Debug("Found modified immutable fields") newAPIObject.Spec.SetDefaults(d.apiObject.GetName()) } if err := newAPIObject.Spec.Validate(); err != nil { d.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, d.apiObject)) // Try to reset object if err := d.updateCRSpec(ctx, d.apiObject.Spec); err != nil { - log.Error().Err(err).Msg("Restore original spec failed") + log.Err(err).Error("Restore original spec failed") d.CreateEvent(k8sutil.NewErrorEvent("Restore original failed", err, d.apiObject)) } return nil } if len(resetFields) > 0 { for _, fieldName := range resetFields { - log.Debug().Str("field", fieldName).Msg("Reset immutable field") + log.Str("field", fieldName).Debug("Reset immutable field") d.CreateEvent(k8sutil.NewImmutableFieldEvent(fieldName, d.apiObject)) } } @@ -447,7 +450,7 @@ func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) err { status, lastVersion := d.GetStatus() if newAPIObject.Status.IsForceReload() { - log.Warn().Msg("Forced status reload!") + log.Warn("Forced status reload!") status = newAPIObject.Status status.ForceStatusReload = nil } @@ -516,7 +519,7 @@ func (d *Deployment) updateCRStatus(ctx context.Context, force ...bool) error { continue } if err != nil { - d.deps.Log.Debug().Err(err).Msg("failed to patch ArangoDeployment status") + d.log.Err(err).Debug("failed to patch ArangoDeployment status") return errors.WithStack(errors.Newf("failed to patch ArangoDeployment status: %v", err)) } } @@ -529,7 +532,7 @@ func (d *Deployment) updateCRSpec(ctx context.Context, newSpec api.DeploymentSpe if len(force) == 0 || !force[0] { if d.apiObject.Spec.Equal(&newSpec) { - d.deps.Log.Debug().Msg("Nothing to update in updateCRSpec") + d.log.Debug("Nothing to update in updateCRSpec") // Nothing to update return nil } @@ -572,7 +575,7 @@ func (d *Deployment) updateCRSpec(ctx context.Context, newSpec api.DeploymentSpe } } if err != nil { - d.deps.Log.Debug().Err(err).Msg("failed to patch ArangoDeployment spec") + d.log.Err(err).Debug("failed to patch ArangoDeployment spec") return errors.WithStack(errors.Newf("failed to patch ArangoDeployment spec: %v", err)) } } @@ -601,23 +604,23 @@ func (d *Deployment) lookForServiceMonitorCRD() { } else { _, err = d.deps.Client.KubernetesExtensions().ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), "servicemonitors.monitoring.coreos.com", meta.GetOptions{}) } - log := d.deps.Log - log.Debug().Msgf("Looking for ServiceMonitor CRD...") + log := d.log + log.Debug("Looking for ServiceMonitor CRD...") if err == nil { if !d.haveServiceMonitorCRD { - log.Info().Msgf("...have discovered ServiceMonitor CRD") + log.Info("...have discovered ServiceMonitor CRD") } d.haveServiceMonitorCRD = true d.triggerInspection() return } else if k8sutil.IsNotFound(err) { if d.haveServiceMonitorCRD { - log.Info().Msgf("...ServiceMonitor CRD no longer there") + log.Info("...ServiceMonitor CRD no longer there") } d.haveServiceMonitorCRD = false return } - log.Warn().Err(err).Msgf("Error when looking for ServiceMonitor CRD") + log.Err(err).Warn("Error when looking for ServiceMonitor CRD") } // SetNumberOfServers adjust number of DBservers and coordinators in arangod diff --git a/pkg/deployment/deployment_finalizers.go b/pkg/deployment/deployment_finalizers.go index bdff47b7b..543df3542 100644 --- a/pkg/deployment/deployment_finalizers.go +++ b/pkg/deployment/deployment_finalizers.go @@ -29,7 +29,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -54,7 +53,6 @@ func ensureFinalizers(depl *api.ArangoDeployment) bool { // runDeploymentFinalizers goes through the list of ArangoDeployoment finalizers to see if they can be removed. func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { - log := d.deps.Log var removalList []string depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace()) @@ -67,20 +65,20 @@ func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus i for _, f := range updated.ObjectMeta.GetFinalizers() { switch f { case constants.FinalizerDeplRemoveChildFinalizers: - log.Debug().Msg("Inspecting 'remove child finalizers' finalizer") - if retry, err := d.inspectRemoveChildFinalizers(ctx, log, updated, cachedStatus); err == nil && !retry { + d.log.Debug("Inspecting 'remove child finalizers' finalizer") + if retry, err := d.inspectRemoveChildFinalizers(ctx, updated, cachedStatus); err == nil && !retry { removalList = append(removalList, f) } else if retry { - log.Debug().Str("finalizer", f).Msg("Retry on finalizer removal") + d.log.Str("finalizer", f).Debug("Retry on finalizer removal") } else { - log.Debug().Err(err).Str("finalizer", f).Msg("Cannot remove finalizer yet") + d.log.Err(err).Str("finalizer", f).Debug("Cannot remove finalizer yet") } } } // Remove finalizers (if needed) if len(removalList) > 0 { - if err := removeDeploymentFinalizers(ctx, log, d.deps.Client.Arango(), updated, removalList); err != nil { - log.Debug().Err(err).Msg("Failed to update ArangoDeployment (to remove finalizers)") + if err := removeDeploymentFinalizers(ctx, d.deps.Client.Arango(), updated, removalList); err != nil { + d.log.Err(err).Debug("Failed to update ArangoDeployment (to remove finalizers)") return errors.WithStack(err) } } @@ -89,7 +87,7 @@ func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus i // inspectRemoveChildFinalizers checks the finalizer condition for remove-child-finalizers. // It returns nil if the finalizer can be removed. -func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ zerolog.Logger, _ *api.ArangoDeployment, cachedStatus inspectorInterface.Inspector) (bool, error) { +func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ *api.ArangoDeployment, cachedStatus inspectorInterface.Inspector) (bool, error) { retry := false if found, err := d.removePodFinalizers(ctx, cachedStatus); err != nil { @@ -107,7 +105,7 @@ func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ zerolog } // removeDeploymentFinalizers removes the given finalizers from the given PVC. -func removeDeploymentFinalizers(ctx context.Context, log zerolog.Logger, cli versioned.Interface, +func removeDeploymentFinalizers(ctx context.Context, cli versioned.Interface, depl *api.ArangoDeployment, finalizers []string) error { depls := cli.DatabaseV1().ArangoDeployments(depl.GetNamespace()) getFunc := func() (metav1.Object, error) { @@ -133,7 +131,7 @@ func removeDeploymentFinalizers(ctx context.Context, log zerolog.Logger, cli ver return nil } ignoreNotFound := false - if _, err := k8sutil.RemoveFinalizers(log, finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { + if _, err := k8sutil.RemoveFinalizers(finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { return errors.WithStack(err) } return nil diff --git a/pkg/deployment/deployment_inspector.go b/pkg/deployment/deployment_inspector.go index e3c60ad6a..1adb2ac12 100644 --- a/pkg/deployment/deployment_inspector.go +++ b/pkg/deployment/deployment_inspector.go @@ -57,13 +57,12 @@ var ( // - once in a while // Returns the delay until this function should be called again. func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval { - log := d.deps.Log start := time.Now() ctxReconciliation, cancelReconciliation := globals.GetGlobalTimeouts().Reconciliation().WithTimeout(context.Background()) defer cancelReconciliation() defer func() { - d.deps.Log.Info().Msgf("Inspect loop took %s", time.Since(start)) + d.log.Info("Inspect loop took %s", time.Since(start)) }() nextInterval := lastInterval @@ -74,7 +73,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval err := d.acs.CurrentClusterCache().Refresh(ctxReconciliation) if err != nil { - log.Error().Err(err).Msg("Unable to get resources") + d.log.Err(err).Error("Unable to get resources") return minInspectionInterval // Retry ASAP } @@ -82,7 +81,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval updated, err := d.acs.CurrentClusterCache().GetCurrentArangoDeployment() if k8sutil.IsNotFound(err) { // Deployment is gone - log.Info().Msg("Deployment is gone") + d.log.Info("Deployment is gone") d.Delete() return nextInterval } else if updated != nil && updated.GetDeletionTimestamp() != nil { @@ -96,20 +95,20 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval if updated != nil && updated.Annotations != nil { if v, ok := updated.Annotations[deployment.ArangoDeploymentPodMaintenanceAnnotation]; ok && v == "true" { // Disable checks if we will enter maintenance mode - log.Info().Str("deployment", deploymentName).Msg("Deployment in maintenance mode") + d.log.Str("deployment", deploymentName).Info("Deployment in maintenance mode") return nextInterval } } // Is the deployment in failed state, if so, give up. if d.GetPhase() == api.DeploymentPhaseFailed { - log.Debug().Msg("Deployment is in Failed state.") + d.log.Debug("Deployment is in Failed state.") return nextInterval } d.apiObject = updated d.GetMembersState().RefreshState(ctxReconciliation, updated.Status.Members.AsList()) - d.GetMembersState().Log(d.deps.Log) + d.GetMembersState().Log(d.log) if err := d.WithStatusUpdateErr(ctxReconciliation, func(s *api.DeploymentStatus) (bool, error) { if changed, err := upgrade.RunUpgrade(*updated, s, d.GetCachedStatus()); err != nil { return false, err @@ -153,7 +152,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva t := time.Now() defer func() { - d.deps.Log.Info().Msgf("Reconciliation loop took %s", time.Since(t)) + d.log.Info("Reconciliation loop took %s", time.Since(t)) }() // Ensure that spec and status checksum are same @@ -178,7 +177,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva } if err := d.acs.Inspect(ctx, d.apiObject, d.deps.Client, d.GetCachedStatus()); err != nil { - d.deps.Log.Warn().Err(err).Msgf("Unable to handle ACS objects") + d.log.Err(err).Warn("Unable to handle ACS objects") } // Cleanup terminated pods on the beginning of loop @@ -200,7 +199,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva return minInspectionInterval, errors.Wrapf(err, "Service creation failed") } - if err := d.resources.EnsureSecrets(ctx, d.deps.Log, d.GetCachedStatus()); err != nil { + if err := d.resources.EnsureSecrets(ctx, d.GetCachedStatus()); err != nil { return minInspectionInterval, errors.Wrapf(err, "Secret creation failed") } @@ -258,7 +257,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva inspectDeploymentAgencyFetches.WithLabelValues(d.GetName()).Inc() if offset, err := d.RefreshAgencyCache(ctx); err != nil { inspectDeploymentAgencyErrors.WithLabelValues(d.GetName()).Inc() - d.deps.Log.Err(err).Msgf("Unable to refresh agency") + d.log.Err(err).Error("Unable to refresh agency") } else { inspectDeploymentAgencyIndex.WithLabelValues(d.GetName()).Set(float64(offset)) } @@ -278,10 +277,10 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva }, true); err != nil { return minInspectionInterval, errors.Wrapf(err, "Unable clean plan") } - } else if err, updated := d.reconciler.CreatePlan(ctx, d.GetCachedStatus()); err != nil { + } else if err, updated := d.reconciler.CreatePlan(ctx); err != nil { return minInspectionInterval, errors.Wrapf(err, "Plan creation failed") } else if updated { - d.deps.Log.Info().Msgf("Plan generated, reconciling") + d.log.Info("Plan generated, reconciling") return minInspectionInterval, nil } @@ -331,7 +330,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva } // Execute current step of scale/update plan - retrySoon, err := d.reconciler.ExecutePlan(ctx, d.GetCachedStatus()) + retrySoon, err := d.reconciler.ExecutePlan(ctx) if err != nil { return minInspectionInterval, errors.Wrapf(err, "Plan execution failed") } @@ -420,14 +419,14 @@ func (d *Deployment) refreshMaintenanceTTL(ctx context.Context) { if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil { return } - d.deps.Log.Info().Msgf("Refreshed maintenance lock") + d.log.Info("Refreshed maintenance lock") } } else { if condition.LastUpdateTime.Add(d.apiObject.Spec.Timeouts.GetMaintenanceGracePeriod()).Before(time.Now()) { if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil { return } - d.deps.Log.Info().Msgf("Refreshed maintenance lock") + d.log.Info("Refreshed maintenance lock") } } } @@ -475,7 +474,7 @@ func (d *Deployment) triggerCRDInspection() { } func (d *Deployment) updateConditionWithHash(ctx context.Context, conditionType api.ConditionType, status bool, reason, message, hash string) error { - d.deps.Log.Info().Str("condition", string(conditionType)).Bool("status", status).Str("reason", reason).Str("message", message).Str("hash", hash).Msg("Updated condition") + d.log.Str("condition", string(conditionType)).Bool("status", status).Str("reason", reason).Str("message", message).Str("hash", hash).Info("Updated condition") if err := d.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool { return s.Conditions.UpdateWithHash(conditionType, status, reason, message, hash) }); err != nil { diff --git a/pkg/deployment/deployment_run_test.go b/pkg/deployment/deployment_run_test.go index 04957def7..8e7728da3 100644 --- a/pkg/deployment/deployment_run_test.go +++ b/pkg/deployment/deployment_run_test.go @@ -31,8 +31,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog/log" - "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" core "k8s.io/api/core/v1" @@ -68,7 +66,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) { errs := 0 for { require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background())) - err := d.resources.EnsureSecrets(context.Background(), log.Logger, d.GetCachedStatus()) + err := d.resources.EnsureSecrets(context.Background(), d.GetCachedStatus()) if err == nil { break } diff --git a/pkg/deployment/deployment_suite_test.go b/pkg/deployment/deployment_suite_test.go index 682f4b3cb..3701b7c30 100644 --- a/pkg/deployment/deployment_suite_test.go +++ b/pkg/deployment/deployment_suite_test.go @@ -23,7 +23,6 @@ package deployment import ( "context" "fmt" - "io/ioutil" "os" "path/filepath" "sort" @@ -34,7 +33,6 @@ import ( "github.com/arangodb/go-driver/jwt" "github.com/arangodb/kube-arangodb/pkg/deployment/client" monitoringFakeClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake" - "github.com/rs/zerolog" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -475,7 +473,6 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara arangoDeployment.Status.CurrentImage = &arangoDeployment.Status.Images[0] deps := Dependencies{ - Log: zerolog.New(ioutil.Discard), EventRecorder: eventRecorder, Client: kclient.NewStaticClient(kubernetesClientSet, kubernetesExtClientSet, arangoClientSet, monitoringClientSet), } @@ -490,6 +487,7 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara deps: deps, eventCh: make(chan *deploymentEvent, deploymentEventQueueSize), stopCh: make(chan struct{}), + log: logger, } d.clientCache = client.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig)) d.acs = acs.NewACS("", i) @@ -497,7 +495,7 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background())) arangoDeployment.Spec.SetDefaults(arangoDeployment.GetName()) - d.resources = resources.NewResources(deps.Log, d) + d.resources = resources.NewResources(arangoDeployment.GetNamespace(), arangoDeployment.GetName(), d) return d, eventRecorder } diff --git a/pkg/deployment/images.go b/pkg/deployment/images.go index 56b7cd3c8..a1e832484 100644 --- a/pkg/deployment/images.go +++ b/pkg/deployment/images.go @@ -27,7 +27,6 @@ import ( "strings" "time" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -37,6 +36,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/handlers/utils" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/arangod" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/errors" @@ -76,11 +76,11 @@ type ArangoSyncIdentity struct { } type imagesBuilder struct { + Log logging.Logger Context resources.Context APIObject k8sutil.APIObject Spec api.DeploymentSpec Status api.DeploymentStatus - Log zerolog.Logger UpdateCRStatus func(status api.DeploymentStatus) error } @@ -93,7 +93,7 @@ func (d *Deployment) ensureImages(ctx context.Context, apiObject *api.ArangoDepl APIObject: apiObject, Spec: apiObject.Spec, Status: status, - Log: d.deps.Log, + Log: d.log, UpdateCRStatus: func(status api.DeploymentStatus) error { if err := d.UpdateStatus(ctx, status, lastVersion); err != nil { return errors.WithStack(err) @@ -132,10 +132,9 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac role := shared.ImageIDAndVersionRole id := fmt.Sprintf("%0x", sha1.Sum([]byte(image)))[:6] podName := k8sutil.CreatePodName(ib.APIObject.GetName(), role, id, "") - log := ib.Log.With(). + log := ib.Log. Str("pod", podName). - Str("image", image). - Logger() + Str("image", image) // Check if pod exists ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) @@ -150,20 +149,20 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Warn().Err(err).Msg("Failed to delete Image ID Pod") + log.Err(err).Warn("Failed to delete Image ID Pod") return false, nil } } return false, nil } if !k8sutil.IsPodReady(pod) { - log.Debug().Msg("Image ID Pod is not yet ready") + log.Debug("Image ID Pod is not yet ready") return true, nil } imageID, err := k8sutil.GetArangoDBImageIDFromPod(pod) if err != nil { - log.Warn().Err(err).Msg("failed to get image ID from pod") + log.Err(err).Warn("failed to get image ID from pod") return true, nil } if imageID == "" { @@ -174,14 +173,14 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac // Try fetching the ArangoDB version client, err := arangod.CreateArangodImageIDClient(ctx, ib.APIObject, pod.Status.PodIP) if err != nil { - log.Warn().Err(err).Msg("Failed to create Image ID Pod client") + log.Err(err).Warn("Failed to create Image ID Pod client") return true, nil } ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() v, err := client.Version(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to fetch version from Image ID Pod") + log.Err(err).Debug("Failed to fetch version from Image ID Pod") return true, nil } version := v.Version @@ -192,7 +191,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Warn().Err(err).Msg("Failed to delete Image ID Pod") + log.Err(err).Warn("Failed to delete Image ID Pod") return true, nil } @@ -204,14 +203,14 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac } ib.Status.Images.AddOrUpdate(info) if err := ib.UpdateCRStatus(ib.Status); err != nil { - log.Warn().Err(err).Msg("Failed to save Image Info in CR status") + log.Err(err).Warn("Failed to save Image Info in CR status") return true, errors.WithStack(err) } // We're done - log.Debug(). + log. Str("image-id", imageID). Str("arangodb-version", string(version)). - Msg("Found image ID and ArangoDB version") + Debug("Found image ID and ArangoDB version") return false, nil } @@ -231,7 +230,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac pod, err = resources.RenderArangoPod(ctx, cachedStatus, ib.APIObject, role, id, podName, &imagePod) if err != nil { - log.Debug().Err(err).Msg("Failed to render image ID pod") + log.Err(err).Debug("Failed to render image ID pod") return true, errors.WithStack(err) } @@ -240,7 +239,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac return err }) if err != nil { - log.Debug().Err(err).Msg("Failed to create image ID pod") + log.Err(err).Debug("Failed to create image ID pod") return true, errors.WithStack(err) } // Come back soon to inspect the pod diff --git a/pkg/deployment/informers.go b/pkg/deployment/informers.go index 9568412ee..476578a82 100644 --- a/pkg/deployment/informers.go +++ b/pkg/deployment/informers.go @@ -44,7 +44,6 @@ func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) { } rw := k8sutil.NewResourceWatcher( - d.deps.Log, d.deps.Client.Kubernetes().CoreV1().RESTClient(), "pods", d.apiObject.GetNamespace(), @@ -89,7 +88,6 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) { } rw := k8sutil.NewResourceWatcher( - d.deps.Log, d.deps.Client.Kubernetes().CoreV1().RESTClient(), "persistentvolumeclaims", d.apiObject.GetNamespace(), @@ -134,7 +132,6 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) { } rw := k8sutil.NewResourceWatcher( - d.deps.Log, d.deps.Client.Kubernetes().CoreV1().RESTClient(), "secrets", d.apiObject.GetNamespace(), @@ -180,7 +177,6 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) { } rw := k8sutil.NewResourceWatcher( - d.deps.Log, d.deps.Client.Kubernetes().CoreV1().RESTClient(), "services", d.apiObject.GetNamespace(), @@ -212,7 +208,6 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) { // listenForCRDEvents keep listening for changes in CRDs until the given channel is closed. func (d *Deployment) listenForCRDEvents(stopCh <-chan struct{}) { rw := k8sutil.NewResourceWatcher( - d.deps.Log, d.deps.Client.KubernetesExtensions().ApiextensionsV1().RESTClient(), "customresourcedefinitions", "", diff --git a/pkg/deployment/logger.go b/pkg/deployment/logger.go new file mode 100644 index 000000000..c437a7c66 --- /dev/null +++ b/pkg/deployment/logger.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package deployment + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" + "github.com/rs/zerolog" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("deployment", logging.Info) +) + +func (d *Deployment) sectionLogger(section string) logging.Logger { + return d.log.Str("section", section) +} + +func (d *Deployment) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", d.namespace).Str("name", d.name) +} diff --git a/pkg/deployment/member/state.go b/pkg/deployment/member/state.go index af516ea10..6ad54de9c 100644 --- a/pkg/deployment/member/state.go +++ b/pkg/deployment/member/state.go @@ -29,6 +29,7 @@ import ( "github.com/arangodb/go-driver" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/globals" ) @@ -44,7 +45,7 @@ type StateInspector interface { State() State - Log(logger zerolog.Logger) + Log(logger logging.Logger) } func NewStateInspector(client reconciler.DeploymentClient) StateInspector { @@ -73,13 +74,13 @@ func (s *stateInspector) State() State { return s.state } -func (s *stateInspector) Log(logger zerolog.Logger) { +func (s *stateInspector) Log(log logging.Logger) { s.lock.Lock() defer s.lock.Unlock() for m, s := range s.members { if !s.IsReachable() { - s.Log(logger.Info()).Str("member", m).Msgf("Member is in invalid state") + log.WrapObj(s).Str("member", m).Info("Member is in invalid state") } } } @@ -211,6 +212,6 @@ func (s State) IsReachable() bool { return s.NotReachableErr == nil } -func (s State) Log(event *zerolog.Event) *zerolog.Event { +func (s State) WrapLogger(event *zerolog.Event) *zerolog.Event { return event.Bool("reachable", s.IsReachable()).AnErr("reachableError", s.NotReachableErr) } diff --git a/pkg/deployment/members.go b/pkg/deployment/members.go index 3a592967b..f4c57e02c 100644 --- a/pkg/deployment/members.go +++ b/pkg/deployment/members.go @@ -31,7 +31,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -83,8 +82,8 @@ func (d *Deployment) createAgencyMapping(ctx context.Context) error { // createMember creates member and adds it to the applicable member list. // Note: This does not create any pods of PVCs // Note: The updated status is not yet written to the apiserver. -func createMember(log zerolog.Logger, status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment, mods ...reconcile.CreateMemberMod) (string, error) { - m, err := renderMember(log, status, group, id, apiObject) +func (d *Deployment) createMember(status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment, mods ...reconcile.CreateMemberMod) (string, error) { + m, err := d.renderMember(status, group, id, apiObject) if err != nil { return "", err } @@ -102,7 +101,7 @@ func createMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se return m.ID, nil } -func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment) (*api.MemberStatus, error) { +func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.ServerGroup, id string, apiObject *api.ArangoDeployment) (*api.MemberStatus, error) { if group == api.ServerGroupAgents { if status.Agency == nil { return nil, errors.New("Agency is not yet defined") @@ -136,7 +135,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se switch group { case api.ServerGroupSingle: - log.Debug().Str("id", id).Msg("Adding single server") + d.log.Str("id", id).Debug("Adding single server") return &api.MemberStatus{ ID: id, UID: uuid.NewUUID(), @@ -148,7 +147,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se Architecture: &arch, }, nil case api.ServerGroupAgents: - log.Debug().Str("id", id).Msg("Adding agent") + d.log.Str("id", id).Debug("Adding agent") return &api.MemberStatus{ ID: id, UID: uuid.NewUUID(), @@ -160,7 +159,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se Architecture: &arch, }, nil case api.ServerGroupDBServers: - log.Debug().Str("id", id).Msg("Adding dbserver") + d.log.Str("id", id).Debug("Adding dbserver") return &api.MemberStatus{ ID: id, UID: uuid.NewUUID(), @@ -172,7 +171,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se Architecture: &arch, }, nil case api.ServerGroupCoordinators: - log.Debug().Str("id", id).Msg("Adding coordinator") + d.log.Str("id", id).Debug("Adding coordinator") return &api.MemberStatus{ ID: id, UID: uuid.NewUUID(), @@ -184,7 +183,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se Architecture: &arch, }, nil case api.ServerGroupSyncMasters: - log.Debug().Str("id", id).Msg("Adding syncmaster") + d.log.Str("id", id).Debug("Adding syncmaster") return &api.MemberStatus{ ID: id, UID: uuid.NewUUID(), @@ -196,7 +195,7 @@ func renderMember(log zerolog.Logger, status *api.DeploymentStatus, group api.Se Architecture: &arch, }, nil case api.ServerGroupSyncWorkers: - log.Debug().Str("id", id).Msg("Adding syncworker") + d.log.Str("id", id).Debug("Adding syncworker") return &api.MemberStatus{ ID: id, UID: uuid.NewUUID(), diff --git a/pkg/deployment/reconcile/action.go b/pkg/deployment/reconcile/action.go index 44d521f38..444e7d386 100644 --- a/pkg/deployment/reconcile/action.go +++ b/pkg/deployment/reconcile/action.go @@ -26,8 +26,6 @@ import ( "sync" "time" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "k8s.io/apimachinery/pkg/types" @@ -114,8 +112,8 @@ func wrapActionStartFailureGracePeriod(action Action, failureGracePeriod time.Du } func withActionStartFailureGracePeriod(in actionFactory, failureGracePeriod time.Duration) actionFactory { - return func(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { - return wrapActionStartFailureGracePeriod(in(log, action, actionCtx), failureGracePeriod) + return func(action api.Action, actionCtx ActionContext) Action { + return wrapActionStartFailureGracePeriod(in(action, actionCtx), failureGracePeriod) } } @@ -154,7 +152,7 @@ func getActionPlanAppender(a Action, plan api.Plan) (api.Plan, bool) { } } -type actionFactory func(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action +type actionFactory func(action api.Action, actionCtx ActionContext) Action var ( definedActions = map[api.ActionType]actionFactory{} diff --git a/pkg/deployment/reconcile/action_add_member.go b/pkg/deployment/reconcile/action_add_member.go index e05cf58fe..3c0e0b1d9 100644 --- a/pkg/deployment/reconcile/action_add_member.go +++ b/pkg/deployment/reconcile/action_add_member.go @@ -29,8 +29,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" ) func init() { @@ -39,10 +37,10 @@ func init() { // newAddMemberAction creates a new Action that implements the given // planned AddMember action. -func newAddMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newAddMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionAddMember{} - a.actionImpl = newBaseActionImpl(log, action, actionCtx, &a.newMemberID) + a.actionImpl = newBaseActionImpl(action, actionCtx, &a.newMemberID) return a } @@ -66,7 +64,7 @@ type actionAddMember struct { func (a *actionAddMember) Start(ctx context.Context) (bool, error) { newID, err := a.actionCtx.CreateMember(ctx, a.action.Group, a.action.MemberID, topology.WithTopologyMod) if err != nil { - log.Debug().Err(err).Msg("Failed to create member") + a.log.Err(err).Debug("Failed to create member") return false, errors.WithStack(err) } a.newMemberID = newID diff --git a/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go b/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go index d10f29ab1..05db55488 100644 --- a/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go +++ b/pkg/deployment/reconcile/action_arango_member_update_pod_spec.go @@ -23,13 +23,10 @@ package reconcile import ( "context" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog/log" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" + "github.com/arangodb/kube-arangodb/pkg/util/errors" ) func init() { @@ -38,10 +35,10 @@ func init() { // newArangoMemberUpdatePodSpecAction creates a new Action that implements the given // planned ArangoMemberUpdatePodSpec action. -func newArangoMemberUpdatePodSpecAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newArangoMemberUpdatePodSpecAction(action api.Action, actionCtx ActionContext) Action { a := &actionArangoMemberUpdatePodSpec{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -64,20 +61,20 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") - log.Error().Err(err).Msg("ArangoMember not found") + a.log.Err(err).Error("ArangoMember not found") return false, err } endpoint, err := a.actionCtx.GenerateMemberEndpoint(a.action.Group, m) if err != nil { - log.Error().Err(err).Msg("Unable to render endpoint") + a.log.Err(err).Error("Unable to render endpoint") return false, err } @@ -85,7 +82,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro // Update endpoint m.Endpoint = util.NewString(endpoint) if err := status.Members.Update(m, a.action.Group); err != nil { - log.Error().Err(err).Msg("Unable to update endpoint") + a.log.Err(err).Error("Unable to update endpoint") return false, err } } @@ -104,19 +101,19 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro renderedPod, err := a.actionCtx.RenderPodTemplateForMember(ctx, a.actionCtx.ACS(), spec, status, a.action.MemberID, imageInfo) if err != nil { - log.Err(err).Msg("Error while rendering pod") + a.log.Err(err).Error("Error while rendering pod") return false, err } checksum, err := resources.ChecksumArangoPod(groupSpec, resources.CreatePodFromTemplate(renderedPod)) if err != nil { - log.Err(err).Msg("Error while getting pod checksum") + a.log.Err(err).Error("Error while getting pod checksum") return false, err } template, err := api.GetArangoMemberPodTemplate(renderedPod, checksum) if err != nil { - log.Err(err).Msg("Error while getting pod template") + a.log.Err(err).Error("Error while getting pod template") return false, err } @@ -135,7 +132,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro return false }); err != nil { - log.Err(err).Msg("Error while updating member") + a.log.Err(err).Error("Error while updating member") return false, err } @@ -146,7 +143,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro return true }); err != nil { - log.Err(err).Msg("Error while updating member status") + a.log.Err(err).Error("Error while updating member status") return false, err } diff --git a/pkg/deployment/reconcile/action_arango_member_update_pod_status.go b/pkg/deployment/reconcile/action_arango_member_update_pod_status.go index e73f8eea1..2891cbfd3 100644 --- a/pkg/deployment/reconcile/action_arango_member_update_pod_status.go +++ b/pkg/deployment/reconcile/action_arango_member_update_pod_status.go @@ -23,11 +23,8 @@ package reconcile import ( "context" - "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog/log" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" + "github.com/arangodb/kube-arangodb/pkg/util/errors" ) func init() { @@ -40,10 +37,10 @@ const ( // newArangoMemberUpdatePodStatusAction creates a new Action that implements the given // planned ArangoMemberUpdatePodStatus action. -func newArangoMemberUpdatePodStatusAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newArangoMemberUpdatePodStatusAction(action api.Action, actionCtx ActionContext) Action { a := &actionArangoMemberUpdatePodStatus{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -63,14 +60,14 @@ type actionArangoMemberUpdatePodStatus struct { func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, error) { m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") - log.Error().Err(err).Msg("ArangoMember not found") + a.log.Err(err).Error("ArangoMember not found") return false, err } @@ -93,7 +90,7 @@ func (a *actionArangoMemberUpdatePodStatus) Start(ctx context.Context) (bool, er } return false }); err != nil { - log.Err(err).Msg("Error while updating member") + a.log.Err(err).Error("Error while updating member") return false, err } } diff --git a/pkg/deployment/reconcile/action_backup_restore.go b/pkg/deployment/reconcile/action_backup_restore.go index fea991a75..c3492a903 100644 --- a/pkg/deployment/reconcile/action_backup_restore.go +++ b/pkg/deployment/reconcile/action_backup_restore.go @@ -26,8 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" "github.com/arangodb/go-driver" - "github.com/rs/zerolog" - backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/arangod/conn" @@ -43,10 +41,10 @@ const ( actionBackupRestoreLocalBackupName api.PlanLocalKey = "backupName" ) -func newBackupRestoreAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newBackupRestoreAction(action api.Action, actionCtx ActionContext) Action { a := &actionBackupRestore{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -66,18 +64,18 @@ func (a actionBackupRestore) Start(ctx context.Context) (bool, error) { } if status.Restore != nil { - a.log.Warn().Msg("Backup restore status should not be nil") + a.log.Warn("Backup restore status should not be nil") return true, nil } backupResource, err := a.actionCtx.GetBackup(ctx, *spec.RestoreFrom) if err != nil { - a.log.Error().Err(err).Msg("Unable to find backup") + a.log.Err(err).Error("Unable to find backup") return true, nil } if backupResource.Status.Backup == nil { - a.log.Error().Msg("Backup ID is not set") + a.log.Error("Backup ID is not set") return true, nil } @@ -137,14 +135,14 @@ func (a actionBackupRestore) restoreSync(ctx context.Context, backup *backupApi. defer cancel() dbc, err := a.actionCtx.GetDatabaseClient(ctxChild) if err != nil { - a.log.Debug().Err(err).Msg("Failed to create database client") + a.log.Err(err).Debug("Failed to create database client") return false, nil } // The below action can take a while so the full parent timeout context is used. restoreError := dbc.Backup().Restore(ctx, driver.BackupID(backup.Status.Backup.ID), nil) if restoreError != nil { - a.log.Error().Err(restoreError).Msg("Restore failed") + a.log.Err(restoreError).Error("Restore failed") } if err := a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool { @@ -163,7 +161,7 @@ func (a actionBackupRestore) restoreSync(ctx context.Context, backup *backupApi. return true }); err != nil { - a.log.Error().Err(err).Msg("Unable to set restored state") + a.log.Err(err).Error("Unable to set restored state") return false, err } @@ -186,7 +184,7 @@ func (a actionBackupRestore) CheckProgress(ctx context.Context) (bool, bool, err dbc, err := a.actionCtx.GetDatabaseAsyncClient(ctxChild) if err != nil { - a.log.Debug().Err(err).Msg("Failed to create database client") + a.log.Err(err).Debug("Failed to create database client") return false, false, nil } @@ -224,7 +222,7 @@ func (a actionBackupRestore) CheckProgress(ctx context.Context) (bool, bool, err return true }); err != nil { - a.log.Error().Err(err).Msg("Unable to set restored state") + a.log.Err(err).Error("Unable to set restored state") return false, false, err } diff --git a/pkg/deployment/reconcile/action_backup_restore_clean.go b/pkg/deployment/reconcile/action_backup_restore_clean.go index fa7408431..c64637401 100644 --- a/pkg/deployment/reconcile/action_backup_restore_clean.go +++ b/pkg/deployment/reconcile/action_backup_restore_clean.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeBackupRestoreClean, newBackupRestoreCleanAction, backupRestoreTimeout) } -func newBackupRestoreCleanAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newBackupRestoreCleanAction(action api.Action, actionCtx ActionContext) Action { a := &actionBackupRestoreClean{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_bootstrap_set_password.go b/pkg/deployment/reconcile/action_bootstrap_set_password.go index 808a66eb4..ad2304496 100644 --- a/pkg/deployment/reconcile/action_bootstrap_set_password.go +++ b/pkg/deployment/reconcile/action_bootstrap_set_password.go @@ -33,17 +33,16 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeBootstrapSetPassword, newBootstrapSetPasswordAction, defaultTimeout) } -func newBootstrapSetPasswordAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newBootstrapSetPasswordAction(action api.Action, actionCtx ActionContext) Action { a := &actionBootstrapSetPassword{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,11 +58,11 @@ func (a actionBootstrapSetPassword) Start(ctx context.Context) (bool, error) { spec := a.actionCtx.GetSpec() if user, ok := a.action.GetParam("user"); !ok { - a.log.Warn().Msgf("User param is not set in action") + a.log.Warn("User param is not set in action") return true, nil } else { if secret, ok := spec.Bootstrap.PasswordSecretNames[user]; !ok { - a.log.Warn().Msgf("User does not exist in password hashes") + a.log.Warn("User does not exist in password hashes") return true, nil } else { ctxChild, cancel := globals.GetGlobals().Timeouts().ArangoD().WithTimeout(ctx) @@ -98,7 +97,7 @@ func (a actionBootstrapSetPassword) Start(ctx context.Context) (bool, error) { } func (a actionBootstrapSetPassword) setUserPassword(ctx context.Context, user, secret string) (string, error) { - a.log.Debug().Msgf("Bootstrapping user %s, secret %s", user, secret) + a.log.Debug("Bootstrapping user %s, secret %s", user, secret) ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() diff --git a/pkg/deployment/reconcile/action_bootstrap_update.go b/pkg/deployment/reconcile/action_bootstrap_update.go index 4d2900461..9076e5f3b 100644 --- a/pkg/deployment/reconcile/action_bootstrap_update.go +++ b/pkg/deployment/reconcile/action_bootstrap_update.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeBootstrapUpdate, newBootstrapUpdateAction, defaultTimeout) } -func newBootstrapUpdateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newBootstrapUpdateAction(action api.Action, actionCtx ActionContext) Action { a := &actionBootstrapUpdate{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_cleanout_member.go b/pkg/deployment/reconcile/action_cleanout_member.go index 2362e6764..aca38300b 100644 --- a/pkg/deployment/reconcile/action_cleanout_member.go +++ b/pkg/deployment/reconcile/action_cleanout_member.go @@ -29,8 +29,6 @@ import ( driver "github.com/arangodb/go-driver" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" - "github.com/arangodb/kube-arangodb/pkg/util/arangod" ) @@ -40,10 +38,10 @@ func init() { // newCleanOutMemberAction creates a new Action that implements the given // planned CleanOutMember action. -func newCleanOutMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newCleanOutMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionCleanoutMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -68,13 +66,12 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) { // We wanted to remove and it is already gone. All ok return true, nil } - log := a.log ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() c, err := a.actionCtx.GetDatabaseClient(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create member client") + a.log.Err(err).Debug("Failed to create member client") return false, errors.WithStack(err) } @@ -82,7 +79,7 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) { defer cancel() cluster, err := c.Cluster(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to access cluster") + a.log.Err(err).Debug("Failed to access cluster") return false, errors.WithStack(err) } @@ -95,10 +92,10 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) { // Member not found, it could be that it never connected to the cluster return true, nil } - log.Debug().Err(err).Msg("Failed to cleanout member") + a.log.Err(err).Debug("Failed to cleanout member") return false, errors.WithStack(err) } - log.Debug().Str("job-id", jobID).Msg("Cleanout member started") + a.log.Str("job-id", jobID).Debug("Cleanout member started") // Update status m.Phase = api.MemberPhaseCleanOut m.CleanoutJobID = jobID @@ -111,7 +108,6 @@ func (a *actionCleanoutMember) Start(ctx context.Context) (bool, error) { // CheckProgress checks the progress of the action. // Returns: ready, abort, error. func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, error) { - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { // We wanted to remove and it is already gone. All ok @@ -126,7 +122,7 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e defer cancel() c, err := a.actionCtx.GetDatabaseClient(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create database client") + a.log.Err(err).Debug("Failed to create database client") return false, false, nil } @@ -134,7 +130,7 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e defer cancel() cluster, err := c.Cluster(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to access cluster") + a.log.Err(err).Debug("Failed to access cluster") return false, false, nil } @@ -142,18 +138,18 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e defer cancel() cleanedOut, err := cluster.IsCleanedOut(ctxChild, a.action.MemberID) if err != nil { - log.Debug().Err(err).Msg("IsCleanedOut failed") + a.log.Err(err).Debug("IsCleanedOut failed") return false, false, nil } if !cleanedOut { // We're not done yet, check job status - log.Debug().Msg("IsCleanedOut returned false") + a.log.Debug("IsCleanedOut returned false") ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() c, err := a.actionCtx.GetDatabaseClient(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create database client") + a.log.Err(err).Debug("Failed to create database client") return false, false, nil } @@ -161,7 +157,7 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e defer cancel() agency, err := a.actionCtx.GetAgency(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create agency client") + a.log.Err(err).Debug("Failed to create agency client") return false, false, nil } @@ -169,11 +165,11 @@ func (a *actionCleanoutMember) CheckProgress(ctx context.Context) (bool, bool, e defer cancel() jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, m.CleanoutJobID, c, agency) if err != nil { - log.Debug().Err(err).Msg("Failed to fetch cleanout job status") + a.log.Err(err).Debug("Failed to fetch cleanout job status") return false, false, nil } if jobStatus.IsFailed() { - log.Warn().Str("reason", jobStatus.Reason()).Msg("Cleanout Job failed. Aborting plan") + a.log.Str("reason", jobStatus.Reason()).Warn("Cleanout Job failed. Aborting plan") // Revert cleanout state m.Phase = api.MemberPhaseCreated m.CleanoutJobID = "" diff --git a/pkg/deployment/reconcile/action_cluster_member_cleanup.go b/pkg/deployment/reconcile/action_cluster_member_cleanup.go index 137003c81..c895b0699 100644 --- a/pkg/deployment/reconcile/action_cluster_member_cleanup.go +++ b/pkg/deployment/reconcile/action_cluster_member_cleanup.go @@ -28,7 +28,6 @@ import ( "github.com/arangodb/go-driver" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -37,10 +36,10 @@ func init() { // newClusterMemberCleanupAction creates a new Action that implements the given // planned ClusterMemberCleanup action. -func newClusterMemberCleanupAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newClusterMemberCleanupAction(action api.Action, actionCtx ActionContext) Action { a := &actionClusterMemberCleanup{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,7 +58,7 @@ type actionClusterMemberCleanup struct { // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionClusterMemberCleanup) Start(ctx context.Context) (bool, error) { if err := a.start(ctx); err != nil { - a.log.Warn().Err(err).Msgf("Unable to clean cluster member") + a.log.Err(err).Warn("Unable to clean cluster member") } return true, nil diff --git a/pkg/deployment/reconcile/action_context.go b/pkg/deployment/reconcile/action_context.go index 1a4b510cd..18bfc0523 100644 --- a/pkg/deployment/reconcile/action_context.go +++ b/pkg/deployment/reconcile/action_context.go @@ -26,8 +26,6 @@ import ( "github.com/arangodb/arangosync-client/client" "github.com/arangodb/go-driver/agency" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" "github.com/arangodb/go-driver" @@ -37,6 +35,7 @@ import ( agencyCache "github.com/arangodb/kube-arangodb/pkg/deployment/agency" "github.com/arangodb/kube-arangodb/pkg/deployment/member" "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" @@ -108,7 +107,7 @@ type ActionLocalsContext interface { } // newActionContext creates a new ActionContext implementation. -func newActionContext(log zerolog.Logger, context Context) ActionContext { +func newActionContext(log logging.Logger, context Context) ActionContext { return &actionContext{ log: log, context: context, @@ -118,7 +117,7 @@ func newActionContext(log zerolog.Logger, context Context) ActionContext { // actionContext implements ActionContext type actionContext struct { context Context - log zerolog.Logger + log logging.Logger cachedStatus inspectorInterface.Inspector locals api.PlanLocals } @@ -340,7 +339,7 @@ func (ac *actionContext) UpdateMember(ctx context.Context, member api.MemberStat return errors.WithStack(err) } if err := ac.context.UpdateStatus(ctx, status, lastVersion); err != nil { - log.Debug().Err(err).Msg("Updating CR status failed") + ac.log.Err(err).Debug("Updating CR status failed") return errors.WithStack(err) } return nil @@ -354,7 +353,7 @@ func (ac *actionContext) RemoveMemberByID(ctx context.Context, id string) error return nil } if err := status.Members.RemoveByID(id, group); err != nil { - log.Debug().Err(err).Str("group", group.AsRole()).Msg("Failed to remove member") + ac.log.Err(err).Str("group", group.AsRole()).Debug("Failed to remove member") return errors.WithStack(err) } // Save removed member diff --git a/pkg/deployment/reconcile/action_disable_scaling_cluster.go b/pkg/deployment/reconcile/action_disable_scaling_cluster.go index 74a8da6d0..05a4167e9 100644 --- a/pkg/deployment/reconcile/action_disable_scaling_cluster.go +++ b/pkg/deployment/reconcile/action_disable_scaling_cluster.go @@ -25,7 +25,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" ) func init() { @@ -33,10 +32,10 @@ func init() { } // newDisableScalingCluster creates the new action with disabling scaling DBservers and coordinators. -func newDisableScalingCluster(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newDisableScalingCluster(action api.Action, actionCtx ActionContext) Action { a := &actionDisableScalingCluster{} - a.actionImpl = newActionImpl(log, action, actionCtx, util.NewString("")) + a.actionImpl = newActionImpl(action, actionCtx, util.NewString("")) return a } diff --git a/pkg/deployment/reconcile/action_enable_scaling_cluster.go b/pkg/deployment/reconcile/action_enable_scaling_cluster.go index d2dfb8c72..c25bccd04 100644 --- a/pkg/deployment/reconcile/action_enable_scaling_cluster.go +++ b/pkg/deployment/reconcile/action_enable_scaling_cluster.go @@ -25,7 +25,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" ) func init() { @@ -33,10 +32,10 @@ func init() { } // newEnableScalingCluster creates the new action with enabling scaling DBservers and coordinators. -func newEnableScalingCluster(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEnableScalingCluster(action api.Action, actionCtx ActionContext) Action { a := &actionEnableScalingCluster{} - a.actionImpl = newActionImpl(log, action, actionCtx, util.NewString("")) + a.actionImpl = newActionImpl(action, actionCtx, util.NewString("")) return a } diff --git a/pkg/deployment/reconcile/action_encryption_add.go b/pkg/deployment/reconcile/action_encryption_add.go index 4c15af4f6..a6f72b261 100644 --- a/pkg/deployment/reconcile/action_encryption_add.go +++ b/pkg/deployment/reconcile/action_encryption_add.go @@ -38,7 +38,6 @@ import ( "k8s.io/apimachinery/pkg/types" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func ensureEncryptionSupport(actionCtx ActionContext) error { @@ -60,10 +59,10 @@ func init() { registerAction(api.ActionTypeEncryptionKeyAdd, newEncryptionKeyAdd, defaultTimeout) } -func newEncryptionKeyAdd(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEncryptionKeyAdd(action api.Action, actionCtx ActionContext) Action { a := &encryptionKeyAddAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -76,7 +75,7 @@ type encryptionKeyAddAction struct { func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) { if err := ensureEncryptionSupport(a.actionCtx); err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } @@ -87,7 +86,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) { sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read(), secret) if err != nil { - a.log.Error().Err(err).Msgf("Unable to fetch current encryption key") + a.log.Err(err).Error("Unable to fetch current encryption key") return true, nil } @@ -100,7 +99,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) { patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } diff --git a/pkg/deployment/reconcile/action_encryption_propagated.go b/pkg/deployment/reconcile/action_encryption_propagated.go index 6a82bcd69..b48753fb9 100644 --- a/pkg/deployment/reconcile/action_encryption_propagated.go +++ b/pkg/deployment/reconcile/action_encryption_propagated.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeEncryptionKeyPropagated, newEncryptionKeyPropagated, defaultTimeout) } -func newEncryptionKeyPropagated(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEncryptionKeyPropagated(action api.Action, actionCtx ActionContext) Action { a := &encryptionKeyPropagatedAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -48,7 +47,7 @@ type encryptionKeyPropagatedAction struct { func (a *encryptionKeyPropagatedAction) Start(ctx context.Context) (bool, error) { propagatedFlag, exists := a.action.Params[propagated] if !exists { - a.log.Error().Msgf("Propagated flag is missing") + a.log.Error("Propagated flag is missing") return true, nil } diff --git a/pkg/deployment/reconcile/action_encryption_refresh.go b/pkg/deployment/reconcile/action_encryption_refresh.go index 070105b5e..1d90b664f 100644 --- a/pkg/deployment/reconcile/action_encryption_refresh.go +++ b/pkg/deployment/reconcile/action_encryption_refresh.go @@ -25,7 +25,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -37,10 +36,10 @@ func init() { registerAction(api.ActionTypeEncryptionKeyRefresh, newEncryptionKeyRefresh, defaultTimeout) } -func newEncryptionKeyRefresh(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEncryptionKeyRefresh(action api.Action, actionCtx ActionContext) Action { a := &encryptionKeyRefreshAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,7 +58,7 @@ func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, b defer cancel() keyfolder, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{}) if err != nil { - a.log.Err(err).Msgf("Unable to fetch encryption folder") + a.log.Err(err).Error("Unable to fetch encryption folder") return true, false, nil } @@ -67,7 +66,7 @@ func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, b defer cancel() c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID) if err != nil { - a.log.Warn().Err(err).Msg("Unable to get client") + a.log.Err(err).Warn("Unable to get client") return true, false, nil } @@ -76,7 +75,7 @@ func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, b defer cancel() e, err := client.RefreshEncryption(ctxChild) if err != nil { - a.log.Warn().Err(err).Msg("Unable to refresh encryption") + a.log.Err(err).Warn("Unable to refresh encryption") return true, false, nil } diff --git a/pkg/deployment/reconcile/action_encryption_remove.go b/pkg/deployment/reconcile/action_encryption_remove.go index 846858039..81ba017ef 100644 --- a/pkg/deployment/reconcile/action_encryption_remove.go +++ b/pkg/deployment/reconcile/action_encryption_remove.go @@ -36,17 +36,16 @@ import ( "k8s.io/apimachinery/pkg/types" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeEncryptionKeyRemove, newEncryptionKeyRemove, defaultTimeout) } -func newEncryptionKeyRemove(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEncryptionKeyRemove(action api.Action, actionCtx ActionContext) Action { a := &encryptionKeyRemoveAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,7 +58,7 @@ type encryptionKeyRemoveAction struct { func (a *encryptionKeyRemoveAction) Start(ctx context.Context) (bool, error) { if err := ensureEncryptionSupport(a.actionCtx); err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } @@ -77,7 +76,7 @@ func (a *encryptionKeyRemoveAction) Start(ctx context.Context) (bool, error) { patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } diff --git a/pkg/deployment/reconcile/action_encryption_status_update.go b/pkg/deployment/reconcile/action_encryption_status_update.go index af287a373..203dc0871 100644 --- a/pkg/deployment/reconcile/action_encryption_status_update.go +++ b/pkg/deployment/reconcile/action_encryption_status_update.go @@ -30,17 +30,16 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/pod" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeEncryptionKeyStatusUpdate, newEncryptionKeyStatusUpdate, defaultTimeout) } -func newEncryptionKeyStatusUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEncryptionKeyStatusUpdate(action api.Action, actionCtx ActionContext) Action { a := &encryptionKeyStatusUpdateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -53,7 +52,7 @@ type encryptionKeyStatusUpdateAction struct { func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) { if err := ensureEncryptionSupport(a.actionCtx); err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } @@ -62,7 +61,7 @@ func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, erro f, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{}) if err != nil { - a.log.Error().Err(err).Msgf("Unable to get folder info") + a.log.Err(err).Error("Unable to get folder info") return true, nil } diff --git a/pkg/deployment/reconcile/action_helper.go b/pkg/deployment/reconcile/action_helper.go index 3b96fe135..344bc6007 100644 --- a/pkg/deployment/reconcile/action_helper.go +++ b/pkg/deployment/reconcile/action_helper.go @@ -26,6 +26,11 @@ import ( "github.com/rs/zerolog" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("action", logging.Info) ) type actionEmpty struct { @@ -50,43 +55,70 @@ func (e actionEmptyStart) Start(_ context.Context) (bool, error) { return false, nil } -func newActionImplDefRef(log zerolog.Logger, action api.Action, actionCtx ActionContext) actionImpl { - return newActionImpl(log, action, actionCtx, &action.MemberID) +func newActionImplDefRef(action api.Action, actionCtx ActionContext) actionImpl { + return newActionImpl(action, actionCtx, &action.MemberID) } -func newActionImpl(log zerolog.Logger, action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl { +func newActionImpl(action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl { if memberIDRef == nil { panic("Action cannot have nil reference to member!") } - return newBaseActionImpl(log, action, actionCtx, memberIDRef) + return newBaseActionImpl(action, actionCtx, memberIDRef) } -func newBaseActionImplDefRef(log zerolog.Logger, action api.Action, actionCtx ActionContext) actionImpl { - return newBaseActionImpl(log, action, actionCtx, &action.MemberID) +func newBaseActionImplDefRef(action api.Action, actionCtx ActionContext) actionImpl { + return newBaseActionImpl(action, actionCtx, &action.MemberID) } -func newBaseActionImpl(log zerolog.Logger, action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl { +func newBaseActionImpl(action api.Action, actionCtx ActionContext, memberIDRef *string) actionImpl { if memberIDRef == nil { panic("Action cannot have nil reference to member!") } - return actionImpl{ - log: log, + a := actionImpl{ action: action, actionCtx: actionCtx, memberIDRef: memberIDRef, } + + a.log = logger.Wrap(a.wrap) + + return a } type actionImpl struct { - log zerolog.Logger + log logging.Logger action api.Action actionCtx ActionContext memberIDRef *string } +func (a actionImpl) wrap(in *zerolog.Event) *zerolog.Event { + in = in. + Str("action-id", a.action.ID). + Str("action-type", string(a.action.Type)). + Str("group", a.action.Group.AsRole()). + Str("member-id", a.action.MemberID) + + if status, _ := a.actionCtx.GetStatus(); status.Members.ContainsID(a.action.MemberID) { + if member, _, ok := status.Members.ElementByID(a.action.MemberID); ok { + in = in.Str("phase", string(member.Phase)) + } + } + + for k, v := range a.action.Params { + in = in.Str("param."+k, v) + } + + for k, v := range a.action.Locals { + in = in.Str("local."+k.String(), v) + } + + return in +} + // MemberID returns the member ID used / created in the current action. func (a actionImpl) MemberID() string { return *a.memberIDRef diff --git a/pkg/deployment/reconcile/action_idle.go b/pkg/deployment/reconcile/action_idle.go index 153e2fc30..2cd3c31da 100644 --- a/pkg/deployment/reconcile/action_idle.go +++ b/pkg/deployment/reconcile/action_idle.go @@ -24,7 +24,6 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -33,10 +32,10 @@ func init() { // newIdleAction creates a new Action that implements the given // planned Idle action. -func newIdleAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newIdleAction(action api.Action, actionCtx ActionContext) Action { a := &actionIdle{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_jwt_add.go b/pkg/deployment/reconcile/action_jwt_add.go index 72116d3c7..d847ffd2c 100644 --- a/pkg/deployment/reconcile/action_jwt_add.go +++ b/pkg/deployment/reconcile/action_jwt_add.go @@ -37,7 +37,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" "k8s.io/apimachinery/pkg/types" ) @@ -45,10 +44,10 @@ func init() { registerAction(api.ActionTypeJWTAdd, newJWTAdd, defaultTimeout) } -func newJWTAdd(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newJWTAdd(action api.Action, actionCtx ActionContext) Action { a := &jwtAddAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -62,48 +61,48 @@ type jwtAddAction struct { func (a *jwtAddAction) Start(ctx context.Context) (bool, error) { folder, err := ensureJWTFolderSupportFromAction(a.actionCtx) if err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } if !folder { - a.log.Error().Msgf("Action not supported") + a.log.Error("Action not supported") return true, nil } appendToken, exists := a.action.Params[checksum] if !exists { - a.log.Warn().Msgf("Key %s is missing in action", checksum) + a.log.Warn("Key %s is missing in action", checksum) return true, nil } s, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName()) if !ok { - a.log.Error().Msgf("JWT Secret is missing, no rotation will take place") + a.log.Error("JWT Secret is missing, no rotation will take place") return true, nil } jwt, ok := s.Data[constants.SecretKeyToken] if !ok { - a.log.Error().Msgf("JWT Secret is invalid, no rotation will take place") + a.log.Error("JWT Secret is invalid, no rotation will take place") return true, nil } jwtSha := util.SHA256(jwt) if appendToken != jwtSha { - a.log.Error().Msgf("JWT Secret changed") + a.log.Error("JWT Secret changed") return true, nil } f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { - a.log.Error().Msgf("Unable to get JWT folder info") + a.log.Error("Unable to get JWT folder info") return true, nil } if _, ok := f.Data[jwtSha]; ok { - a.log.Info().Msgf("JWT Already exists") + a.log.Info("JWT Already exists") return true, nil } @@ -112,7 +111,7 @@ func (a *jwtAddAction) Start(ctx context.Context) (bool, error) { patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } diff --git a/pkg/deployment/reconcile/action_jwt_clean.go b/pkg/deployment/reconcile/action_jwt_clean.go index 82e8b4f68..146ecf667 100644 --- a/pkg/deployment/reconcile/action_jwt_clean.go +++ b/pkg/deployment/reconcile/action_jwt_clean.go @@ -34,7 +34,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" "k8s.io/apimachinery/pkg/types" ) @@ -42,10 +41,10 @@ func init() { registerAction(api.ActionTypeJWTClean, newJWTClean, defaultTimeout) } -func newJWTClean(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newJWTClean(action api.Action, actionCtx ActionContext) Action { a := &jwtCleanAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,42 +58,42 @@ type jwtCleanAction struct { func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) { folder, err := ensureJWTFolderSupportFromAction(a.actionCtx) if err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } if !folder { - a.log.Error().Msgf("Action not supported") + a.log.Error("Action not supported") return true, nil } cleanToken, exists := a.action.Params[checksum] if !exists { - a.log.Warn().Msgf("Key %s is missing in action", checksum) + a.log.Warn("Key %s is missing in action", checksum) return true, nil } if cleanToken == pod.ActiveJWTKey { - a.log.Error().Msgf("Unable to remove active key") + a.log.Error("Unable to remove active key") return true, nil } f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { - a.log.Error().Msgf("Unable to get JWT folder info") + a.log.Error("Unable to get JWT folder info") return true, nil } if key, ok := f.Data[pod.ActiveJWTKey]; !ok { - a.log.Info().Msgf("Active Key is required") + a.log.Info("Active Key is required") return true, nil } else if util.SHA256(key) == cleanToken { - a.log.Info().Msgf("Unable to remove active key") + a.log.Info("Unable to remove active key") return true, nil } if _, ok := f.Data[cleanToken]; !ok { - a.log.Info().Msgf("KEy to be removed does not exist") + a.log.Info("KEy to be removed does not exist") return true, nil } @@ -103,7 +102,7 @@ func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) { patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } diff --git a/pkg/deployment/reconcile/action_jwt_propagated.go b/pkg/deployment/reconcile/action_jwt_propagated.go index 7ff8fc14a..60f53c246 100644 --- a/pkg/deployment/reconcile/action_jwt_propagated.go +++ b/pkg/deployment/reconcile/action_jwt_propagated.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeJWTPropagated, newJWTPropagated, defaultTimeout) } -func newJWTPropagated(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newJWTPropagated(action api.Action, actionCtx ActionContext) Action { a := &jwtPropagatedAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -48,13 +47,13 @@ type jwtPropagatedAction struct { func (a *jwtPropagatedAction) Start(ctx context.Context) (bool, error) { _, err := ensureJWTFolderSupportFromAction(a.actionCtx) if err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } propagatedFlag, exists := a.action.Params[propagated] if !exists { - a.log.Error().Err(err).Msgf("Propagated flag is missing") + a.log.Err(err).Error("Propagated flag is missing") return true, nil } diff --git a/pkg/deployment/reconcile/action_jwt_refresh.go b/pkg/deployment/reconcile/action_jwt_refresh.go index 23989fa05..2eb2d001e 100644 --- a/pkg/deployment/reconcile/action_jwt_refresh.go +++ b/pkg/deployment/reconcile/action_jwt_refresh.go @@ -28,17 +28,16 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/client" "github.com/arangodb/kube-arangodb/pkg/deployment/pod" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeJWTRefresh, newJWTRefresh, defaultTimeout) } -func newJWTRefresh(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newJWTRefresh(action api.Action, actionCtx ActionContext) Action { a := &jwtRefreshAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -54,7 +53,7 @@ func (a *jwtRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error folder, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetAPIObject().GetName())) if !ok { - a.log.Error().Msgf("Unable to get JWT folder info") + a.log.Error("Unable to get JWT folder info") return true, false, nil } @@ -62,14 +61,14 @@ func (a *jwtRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error defer cancel() c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID) if err != nil { - a.log.Warn().Err(err).Msg("Unable to get client") + a.log.Err(err).Warn("Unable to get client") return true, false, nil } ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() if invalid, err := isMemberJWTTokenInvalid(ctxChild, client.NewClient(c.Connection()), folder.Data, true); err != nil { - a.log.Warn().Err(err).Msg("Error while getting JWT Status") + a.log.Err(err).Warn("Error while getting JWT Status") return true, false, nil } else if invalid { return false, false, nil diff --git a/pkg/deployment/reconcile/action_jwt_set_active.go b/pkg/deployment/reconcile/action_jwt_set_active.go index a34d33aa2..ff56824a3 100644 --- a/pkg/deployment/reconcile/action_jwt_set_active.go +++ b/pkg/deployment/reconcile/action_jwt_set_active.go @@ -37,7 +37,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" "k8s.io/apimachinery/pkg/types" ) @@ -45,10 +44,10 @@ func init() { registerAction(api.ActionTypeJWTSetActive, newJWTSetActive, defaultTimeout) } -func newJWTSetActive(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newJWTSetActive(action api.Action, actionCtx ActionContext) Action { a := &jwtSetActiveAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -62,30 +61,30 @@ type jwtSetActiveAction struct { func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) { folder, err := ensureJWTFolderSupportFromAction(a.actionCtx) if err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } if !folder { - a.log.Error().Msgf("Action not supported") + a.log.Error("Action not supported") return true, nil } toActiveChecksum, exists := a.action.Params[checksum] if !exists { - a.log.Warn().Msgf("Key %s is missing in action", checksum) + a.log.Warn("Key %s is missing in action", checksum) return true, nil } f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { - a.log.Error().Msgf("Unable to get JWT folder info") + a.log.Error("Unable to get JWT folder info") return true, nil } toActiveData, toActivePresent := f.Data[toActiveChecksum] if !toActivePresent { - a.log.Error().Msgf("JWT key which is desired to be active is not anymore in secret") + a.log.Error("JWT key which is desired to be active is not anymore in secret") return true, nil } @@ -93,7 +92,7 @@ func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) { tokenKeyData, token := f.Data[constants.SecretKeyToken] if util.SHA256(activeKeyData) == toActiveChecksum && util.SHA256(activeKeyData) == util.SHA256(tokenKeyData) { - a.log.Info().Msgf("Desired JWT is already active") + a.log.Info("Desired JWT is already active") return true, nil } @@ -114,7 +113,7 @@ func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) { patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } diff --git a/pkg/deployment/reconcile/action_jwt_status_update.go b/pkg/deployment/reconcile/action_jwt_status_update.go index a7f202eef..d97877a9a 100644 --- a/pkg/deployment/reconcile/action_jwt_status_update.go +++ b/pkg/deployment/reconcile/action_jwt_status_update.go @@ -34,7 +34,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" ) const ( @@ -68,10 +67,10 @@ func init() { registerAction(api.ActionTypeJWTStatusUpdate, newJWTStatusUpdate, defaultTimeout) } -func newJWTStatusUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newJWTStatusUpdate(action api.Action, actionCtx ActionContext) Action { a := &jwtStatusUpdateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -85,20 +84,20 @@ type jwtStatusUpdateAction struct { func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) { folder, err := ensureJWTFolderSupportFromAction(a.actionCtx) if err != nil { - a.log.Error().Err(err).Msgf("Action not supported") + a.log.Err(err).Error("Action not supported") return true, nil } if !folder { f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().Authentication.GetJWTSecretName()) if !ok { - a.log.Error().Msgf("Unable to get JWT secret info") + a.log.Error("Unable to get JWT secret info") return true, nil } key, ok := f.Data[constants.SecretKeyToken] if !ok { - a.log.Error().Msgf("JWT Token is invalid") + a.log.Error("JWT Token is invalid") return true, nil } @@ -125,7 +124,7 @@ func (a *jwtStatusUpdateAction) Start(ctx context.Context) (bool, error) { f, ok := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(a.actionCtx.GetName())) if !ok { - a.log.Error().Msgf("Unable to get JWT folder info") + a.log.Error("Unable to get JWT folder info") return true, nil } diff --git a/pkg/deployment/reconcile/action_kill_member_pod.go b/pkg/deployment/reconcile/action_kill_member_pod.go index f1481a0ba..22260c523 100644 --- a/pkg/deployment/reconcile/action_kill_member_pod.go +++ b/pkg/deployment/reconcile/action_kill_member_pod.go @@ -23,7 +23,6 @@ package reconcile import ( "context" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -39,10 +38,10 @@ func init() { // newKillMemberPodAction creates a new Action that implements the given // planned KillMemberPod action. -func newKillMemberPodAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newKillMemberPodAction(action api.Action, actionCtx ActionContext) Action { a := &actionKillMemberPod{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -61,10 +60,9 @@ func (a *actionKillMemberPod) Start(ctx context.Context) (bool, error) { return true, nil } - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } @@ -74,12 +72,12 @@ func (a *actionKillMemberPod) Start(ctx context.Context) (bool, error) { } if ifPodUIDMismatch(m, a.action, cache) { - log.Error().Msg("Member UID is changed") + a.log.Error("Member UID is changed") return true, nil } if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil { - log.Error().Err(err).Msg("Unable to kill pod") + a.log.Err(err).Error("Unable to kill pod") return true, nil } @@ -92,11 +90,9 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er if !features.GracefulShutdown().Enabled() { return true, false, nil } - - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, false, nil } @@ -107,7 +103,7 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er p, ok := cache.Pod().V1().GetSimple(m.PodName) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, false, nil } diff --git a/pkg/deployment/reconcile/action_maintenance_condition.go b/pkg/deployment/reconcile/action_maintenance_condition.go index 5e5f72c27..ff2855046 100644 --- a/pkg/deployment/reconcile/action_maintenance_condition.go +++ b/pkg/deployment/reconcile/action_maintenance_condition.go @@ -22,17 +22,16 @@ package reconcile import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeSetMaintenanceCondition, newSetMaintenanceConditionAction, addMemberTimeout) } -func newSetMaintenanceConditionAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newSetMaintenanceConditionAction(action api.Action, actionCtx ActionContext) Action { a := &actionSetMaintenanceCondition{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_maintenance_disable.go b/pkg/deployment/reconcile/action_maintenance_disable.go index 04f59bb80..2457b4607 100644 --- a/pkg/deployment/reconcile/action_maintenance_disable.go +++ b/pkg/deployment/reconcile/action_maintenance_disable.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeDisableMaintenance, newDisableMaintenanceAction, addMemberTimeout) } -func newDisableMaintenanceAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newDisableMaintenanceAction(action api.Action, actionCtx ActionContext) Action { a := &actionDisableMaintenance{} - a.actionImpl = newActionImpl(log, action, actionCtx, &a.newMemberID) + a.actionImpl = newActionImpl(action, actionCtx, &a.newMemberID) return a } @@ -55,7 +54,7 @@ func (a *actionDisableMaintenance) Start(ctx context.Context) (bool, error) { } if err := a.actionCtx.SetAgencyMaintenanceMode(ctx, false); err != nil { - a.log.Error().Err(err).Msgf("Unable to disable maintenance") + a.log.Err(err).Error("Unable to disable maintenance") return true, nil } diff --git a/pkg/deployment/reconcile/action_maintenance_enable.go b/pkg/deployment/reconcile/action_maintenance_enable.go index b7924ec7a..7808d3fe2 100644 --- a/pkg/deployment/reconcile/action_maintenance_enable.go +++ b/pkg/deployment/reconcile/action_maintenance_enable.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeEnableMaintenance, newEnableMaintenanceAction, addMemberTimeout) } -func newEnableMaintenanceAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newEnableMaintenanceAction(action api.Action, actionCtx ActionContext) Action { a := &actionEnableMaintenance{} - a.actionImpl = newActionImpl(log, action, actionCtx, &a.newMemberID) + a.actionImpl = newActionImpl(action, actionCtx, &a.newMemberID) return a } @@ -55,7 +54,7 @@ func (a *actionEnableMaintenance) Start(ctx context.Context) (bool, error) { } if err := a.actionCtx.SetAgencyMaintenanceMode(ctx, true); err != nil { - a.log.Error().Err(err).Msgf("Unable to enable maintenance") + a.log.Err(err).Error("Unable to enable maintenance") return true, nil } diff --git a/pkg/deployment/reconcile/action_mark_to_remove_member.go b/pkg/deployment/reconcile/action_mark_to_remove_member.go index 0a51c161b..b1f2d8a57 100644 --- a/pkg/deployment/reconcile/action_mark_to_remove_member.go +++ b/pkg/deployment/reconcile/action_mark_to_remove_member.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeMarkToRemoveMember, newMarkToRemoveMemberAction, addMemberTimeout) } -func newMarkToRemoveMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newMarkToRemoveMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionMarkToRemove{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -67,7 +66,7 @@ func (a *actionMarkToRemove) Start(ctx context.Context) (bool, error) { } if err := s.Members.Update(member, group); err != nil { - a.log.Warn().Err(err).Str("Member", member.ID).Msgf("Unable to update member") + a.log.Err(err).Str("Member", member.ID).Warn("Unable to update member") return false } diff --git a/pkg/deployment/reconcile/action_member_phase_update.go b/pkg/deployment/reconcile/action_member_phase_update.go index 2cbff4f73..bf24e454d 100644 --- a/pkg/deployment/reconcile/action_member_phase_update.go +++ b/pkg/deployment/reconcile/action_member_phase_update.go @@ -27,7 +27,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" ) func init() { @@ -38,10 +37,10 @@ const ( actionTypeMemberPhaseUpdatePhaseKey string = "phase" ) -func newMemberPhaseUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newMemberPhaseUpdate(action api.Action, actionCtx ActionContext) Action { a := &memberPhaseUpdateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -53,22 +52,21 @@ type memberPhaseUpdateAction struct { } func (a *memberPhaseUpdateAction) Start(ctx context.Context) (bool, error) { - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } phaseString, ok := a.action.Params[actionTypeMemberPhaseUpdatePhaseKey] if !ok { - log.Error().Msg("Phase not defined") + a.log.Error("Phase not defined") return true, nil } p, ok := api.GetPhase(phaseString) if !ok { - log.Error().Msgf("Phase %s unknown", p) + a.log.Error("Phase %s unknown", p) return true, nil } diff --git a/pkg/deployment/reconcile/action_member_rid_update.go b/pkg/deployment/reconcile/action_member_rid_update.go index 07ade2a54..28a7be6e6 100644 --- a/pkg/deployment/reconcile/action_member_rid_update.go +++ b/pkg/deployment/reconcile/action_member_rid_update.go @@ -22,17 +22,16 @@ package reconcile import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeMemberRIDUpdate, newMemberRIDUpdate, defaultTimeout) } -func newMemberRIDUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newMemberRIDUpdate(action api.Action, actionCtx ActionContext) Action { a := &memberRIDUpdateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_pvc_resize.go b/pkg/deployment/reconcile/action_pvc_resize.go index 2f8b4444a..f05c7828f 100644 --- a/pkg/deployment/reconcile/action_pvc_resize.go +++ b/pkg/deployment/reconcile/action_pvc_resize.go @@ -27,7 +27,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/globals" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -38,10 +37,10 @@ func init() { // newRotateMemberAction creates a new Action that implements the given // planned RotateMember action. -func newPVCResizeAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newPVCResizeAction(action api.Action, actionCtx ActionContext) Action { a := &actionPVCResize{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -56,12 +55,11 @@ type actionPVCResize struct { // Returns true if the action is completely finished, false in case // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionPVCResize) Start(ctx context.Context) (bool, error) { - log := a.log group := a.action.Group groupSpec := a.actionCtx.GetSpec().GetServerGroupSpec(group) m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } @@ -111,16 +109,15 @@ func (a *actionPVCResize) Start(ctx context.Context) (bool, error) { // Returns: ready, abort, error. func (a *actionPVCResize) CheckProgress(ctx context.Context) (bool, bool, error) { // Check that pod is removed - log := a.log m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, false, nil } cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID) if !ok { - log.Warn().Msg("Cluster is not ready") + a.log.Warn("Cluster is not ready") return false, false, nil } diff --git a/pkg/deployment/reconcile/action_pvc_resized.go b/pkg/deployment/reconcile/action_pvc_resized.go index 30cf4be61..87010cc2b 100644 --- a/pkg/deployment/reconcile/action_pvc_resized.go +++ b/pkg/deployment/reconcile/action_pvc_resized.go @@ -25,7 +25,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" ) @@ -35,10 +34,10 @@ func init() { // newRotateMemberAction creates a new Action that implements the given // planned RotateMember action. -func newPVCResizedAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newPVCResizedAction(action api.Action, actionCtx ActionContext) Action { a := &actionPVCResized{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -56,16 +55,15 @@ type actionPVCResized struct { // Returns: ready, abort, error. func (a *actionPVCResized) CheckProgress(ctx context.Context) (bool, bool, error) { // Check that pod is removed - log := a.log m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, false, nil } cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID) if !ok { - log.Warn().Msg("Cluster is not ready") + a.log.Warn("Cluster is not ready") return false, false, nil } diff --git a/pkg/deployment/reconcile/action_recreate_member.go b/pkg/deployment/reconcile/action_recreate_member.go index 8da145a0d..b25c3862b 100644 --- a/pkg/deployment/reconcile/action_recreate_member.go +++ b/pkg/deployment/reconcile/action_recreate_member.go @@ -25,8 +25,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" ) @@ -36,10 +34,10 @@ func init() { // newRecreateMemberAction creates a new Action that implements the given // planned RecreateMember action. -func newRecreateMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRecreateMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionRecreateMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_remove_member.go b/pkg/deployment/reconcile/action_remove_member.go index 833c06eeb..b4ecb1ddd 100644 --- a/pkg/deployment/reconcile/action_remove_member.go +++ b/pkg/deployment/reconcile/action_remove_member.go @@ -23,7 +23,6 @@ package reconcile import ( "context" - "github.com/rs/zerolog" apiErrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -40,10 +39,10 @@ func init() { // newRemoveMemberAction creates a new Action that implements the given // planned RemoveMember action. -func newRemoveMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRemoveMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionRemoveMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -85,12 +84,12 @@ func (a *actionRemoveMember) Start(ctx context.Context) (bool, error) { defer cancel() if err := arangod.RemoveServerFromCluster(ctxChild, client.Connection(), driver.ServerID(m.ID)); err != nil { if !driver.IsNotFound(err) && !driver.IsPreconditionFailed(err) { - a.log.Err(err).Str("member-id", m.ID).Msgf("Failed to remove server from cluster") + a.log.Err(err).Str("member-id", m.ID).Error("Failed to remove server from cluster") // ignore this error, maybe all coordinators are failed and no connction to cluster is possible } else if driver.IsPreconditionFailed(err) { health := a.actionCtx.GetMembersState().Health() if health.Error != nil { - a.log.Err(err).Str("member-id", m.ID).Msgf("Failed get cluster health") + a.log.Err(err).Str("member-id", m.ID).Error("Failed get cluster health") } // We don't care if not found if record, ok := health.Members[driver.ServerID(m.ID)]; ok { @@ -102,11 +101,11 @@ func (a *actionRemoveMember) Start(ctx context.Context) (bool, error) { return false, errors.WithStack(errors.Newf("can not remove server from cluster. Not yet terminated. Retry later")) } - a.log.Debug().Msg("dbserver has shut down") + a.log.Debug("dbserver has shut down") } } } else { - a.log.Warn().Msgf("ignoring error: %s", err.Error()) + a.log.Warn("ignoring error: %s", err.Error()) } } } diff --git a/pkg/deployment/reconcile/action_resign_leadership.go b/pkg/deployment/reconcile/action_resign_leadership.go index 2148552e9..74f84bff3 100644 --- a/pkg/deployment/reconcile/action_resign_leadership.go +++ b/pkg/deployment/reconcile/action_resign_leadership.go @@ -30,7 +30,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -39,10 +38,10 @@ func init() { // newResignLeadershipAction creates a new Action that implements the given // planned ResignLeadership action. -func newResignLeadershipAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newResignLeadershipAction(action api.Action, actionCtx ActionContext) Action { a := &actionResignLeadership{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -54,16 +53,15 @@ type actionResignLeadership struct { // Start performs the start of the ReasignLeadership process on DBServer. func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) { - log := a.log group := a.action.Group m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } if a.actionCtx.GetSpec().Mode.Get() != api.DeploymentModeCluster { - log.Debug().Msg("Resign only allowed in cluster mode") + a.log.Debug("Resign only allowed in cluster mode") return true, nil } @@ -71,18 +69,18 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) { defer cancel() client, err := a.actionCtx.GetDatabaseClient(ctxChild) if err != nil { - log.Error().Err(err).Msgf("Unable to get client") + a.log.Err(err).Error("Unable to get client") return true, errors.WithStack(err) } switch group { case api.ServerGroupDBServers: if agencyState, agencyOK := a.actionCtx.GetAgencyCache(); !agencyOK { - log.Warn().Err(err).Msgf("Maintenance is enabled, skipping action") + a.log.Err(err).Warn("Maintenance is enabled, skipping action") return true, errors.WithStack(err) } else if agencyState.Supervision.Maintenance.Exists() { // We are done, action cannot be handled on maintenance mode - log.Warn().Msgf("Maintenance is enabled, skipping action") + a.log.Warn("Maintenance is enabled, skipping action") return true, nil } @@ -90,7 +88,7 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) { defer cancel() cluster, err := client.Cluster(ctxChild) if err != nil { - log.Error().Err(err).Msgf("Unable to get cluster client") + a.log.Err(err).Error("Unable to get cluster client") return true, errors.WithStack(err) } @@ -98,9 +96,9 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) { ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() jobCtx := driver.WithJobIDResponse(ctxChild, &jobID) - log.Debug().Msg("Temporary shutdown, resign leadership") + a.log.Debug("Temporary shutdown, resign leadership") if err := cluster.ResignServer(jobCtx, m.ID); err != nil { - log.Debug().Err(err).Msg("Failed to resign server") + a.log.Err(err).Debug("Failed to resign server") return true, errors.WithStack(err) } @@ -118,19 +116,17 @@ func (a *actionResignLeadership) Start(ctx context.Context) (bool, error) { // CheckProgress checks if Job is completed. func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, error) { - log := a.log - m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, false, nil } if agencyState, agencyOK := a.actionCtx.GetAgencyCache(); !agencyOK { - log.Error().Msgf("Unable to get maintenance mode") + a.log.Error("Unable to get maintenance mode") return false, false, nil } else if agencyState.Supervision.Maintenance.Exists() { - log.Warn().Msgf("Maintenance is enabled, skipping action") + a.log.Warn("Maintenance is enabled, skipping action") // We are done, action cannot be handled on maintenance mode m.CleanoutJobID = "" if err := a.actionCtx.UpdateMember(ctx, m); err != nil { @@ -143,7 +139,7 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, defer cancel() agency, err := a.actionCtx.GetAgency(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create agency client") + a.log.Err(err).Debug("Failed to create agency client") return false, false, nil } @@ -151,7 +147,7 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, defer cancel() c, err := a.actionCtx.GetDatabaseClient(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create member client") + a.log.Err(err).Debug("Failed to create member client") return false, false, nil } @@ -160,10 +156,10 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, m.CleanoutJobID, c, agency) if err != nil { if driver.IsNotFound(err) { - log.Debug().Err(err).Msg("Job not found, but proceeding") + a.log.Err(err).Debug("Job not found, but proceeding") return true, false, nil } - log.Debug().Err(err).Msg("Failed to fetch job status") + a.log.Err(err).Debug("Failed to fetch job status") return false, false, errors.WithStack(err) } @@ -172,7 +168,7 @@ func (a *actionResignLeadership) CheckProgress(ctx context.Context) (bool, bool, if err := a.actionCtx.UpdateMember(ctx, m); err != nil { return false, false, errors.WithStack(err) } - log.Error().Msg("Resign server job failed") + a.log.Error("Resign server job failed") return true, false, nil } diff --git a/pkg/deployment/reconcile/action_rotate_member.go b/pkg/deployment/reconcile/action_rotate_member.go index d6a486aac..970cc356a 100644 --- a/pkg/deployment/reconcile/action_rotate_member.go +++ b/pkg/deployment/reconcile/action_rotate_member.go @@ -23,7 +23,6 @@ package reconcile import ( "context" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "time" @@ -40,10 +39,10 @@ func init() { // newRotateMemberAction creates a new Action that implements the given // planned RotateMember action. -func newRotateMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRotateMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionRotateMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -58,7 +57,7 @@ type actionRotateMember struct { // Returns true if the action is completely finished, false in case // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionRotateMember) Start(ctx context.Context) (bool, error) { - shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log) + shutdown, m, ok := getShutdownHelper(a.actionImpl) if !ok { return true, nil } @@ -82,8 +81,7 @@ func (a *actionRotateMember) Start(ctx context.Context) (bool, error) { // Returns: ready, abort, error. func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, error) { // Check that pod is removed - log := a.log - shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log) + shutdown, m, ok := getShutdownHelper(a.actionImpl) if !ok { return true, false, nil } @@ -96,7 +94,7 @@ func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, err cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID) if !ok { - log.Warn().Msg("Cluster is not ready") + a.log.Warn("Cluster is not ready") return false, false, nil } @@ -106,7 +104,7 @@ func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, err // Pod is terminated, we can now remove it if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctxChild, m.PodName, meta.DeleteOptions{}); err != nil { if !k8sutil.IsNotFound(err) { - log.Error().Err(err).Msg("Unable to delete pod") + a.log.Err(err).Error("Unable to delete pod") return false, false, nil } } diff --git a/pkg/deployment/reconcile/action_rotate_start_member.go b/pkg/deployment/reconcile/action_rotate_start_member.go index 964107364..ec6057cf7 100644 --- a/pkg/deployment/reconcile/action_rotate_start_member.go +++ b/pkg/deployment/reconcile/action_rotate_start_member.go @@ -23,7 +23,6 @@ package reconcile import ( "context" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "time" @@ -39,10 +38,10 @@ func init() { // newRotateStartMemberAction creates a new Action that implements the given // planned RotateStartMember action. -func newRotateStartMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRotateStartMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionRotateStartMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -57,7 +56,7 @@ type actionRotateStartMember struct { // Returns true if the action is completely finished, false in case // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionRotateStartMember) Start(ctx context.Context) (bool, error) { - shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log) + shutdown, m, ok := getShutdownHelper(a.actionImpl) if !ok { return true, nil } @@ -81,15 +80,14 @@ func (a *actionRotateStartMember) Start(ctx context.Context) (bool, error) { // Returns: ready, abort, error. func (a *actionRotateStartMember) CheckProgress(ctx context.Context) (bool, bool, error) { // Check that pod is removed - log := a.log - shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log) + shutdown, m, ok := getShutdownHelper(a.actionImpl) if !ok { return true, false, nil } cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID) if !ok { - log.Warn().Msg("Cluster is not ready") + a.log.Warn("Cluster is not ready") return false, false, nil } @@ -102,7 +100,7 @@ func (a *actionRotateStartMember) CheckProgress(ctx context.Context) (bool, bool // Pod is terminated, we can now remove it if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil { if !k8sutil.IsNotFound(err) { - log.Error().Err(err).Msg("Unable to delete pod") + a.log.Err(err).Error("Unable to delete pod") return false, false, nil } } diff --git a/pkg/deployment/reconcile/action_rotate_stop_member.go b/pkg/deployment/reconcile/action_rotate_stop_member.go index 94773fba4..6513c02ad 100644 --- a/pkg/deployment/reconcile/action_rotate_stop_member.go +++ b/pkg/deployment/reconcile/action_rotate_stop_member.go @@ -26,7 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -35,10 +34,10 @@ func init() { // newRotateStopMemberAction creates a new Action that implements the given // planned RotateStopMember action. -func newRotateStopMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRotateStopMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionRotateStopMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -56,10 +55,9 @@ type actionRotateStopMember struct { // Returns true if the action is completely finished, false in case // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionRotateStopMember) Start(ctx context.Context) (bool, error) { - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") } m.Phase = api.MemberPhaseNone diff --git a/pkg/deployment/reconcile/action_runtime_container_args_udpate.go b/pkg/deployment/reconcile/action_runtime_container_args_udpate.go index b5cf18332..c368b6a03 100644 --- a/pkg/deployment/reconcile/action_runtime_container_args_udpate.go +++ b/pkg/deployment/reconcile/action_runtime_container_args_udpate.go @@ -28,7 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" "github.com/pkg/errors" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -41,10 +40,10 @@ func init() { registerAction(api.ActionTypeRuntimeContainerArgsLogLevelUpdate, runtimeContainerArgsUpdate, defaultTimeout) } -func runtimeContainerArgsUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func runtimeContainerArgsUpdate(action api.Action, actionCtx ActionContext) Action { a := &actionRuntimeContainerArgsUpdate{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -60,7 +59,7 @@ type actionRuntimeContainerArgsUpdate struct { func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error { m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - a.log.Info().Msg("member is gone already") + a.log.Info("member is gone already") return nil } @@ -77,37 +76,37 @@ func (a actionRuntimeContainerArgsUpdate) Post(ctx context.Context) error { containerName, ok := a.action.GetParam(rotation.ContainerName) if !ok { - a.log.Warn().Msgf("Unable to find action's param %s", rotation.ContainerName) + a.log.Warn("Unable to find action's param %s", rotation.ContainerName) return nil } - log := a.log.With().Str("containerName", containerName).Logger() + log := a.log.Str("containerName", containerName) updateMemberStatusArgs := func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool { if obj.Spec.Template == nil || s.Template == nil || obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil { - log.Info().Msgf("Nil Member definition") + log.Info("Nil Member definition") return false } if len(obj.Spec.Template.PodSpec.Spec.Containers) != len(s.Template.PodSpec.Spec.Containers) { - log.Info().Msgf("Invalid size of containers") + log.Info("Invalid size of containers") return false } for id := range obj.Spec.Template.PodSpec.Spec.Containers { if obj.Spec.Template.PodSpec.Spec.Containers[id].Name == containerName { if s.Template.PodSpec.Spec.Containers[id].Name != containerName { - log.Info().Msgf("Invalid order of containers") + log.Info("Invalid order of containers") return false } s.Template.PodSpec.Spec.Containers[id].Command = obj.Spec.Template.PodSpec.Spec.Containers[id].Command - log.Info().Msgf("Updating container args") + log.Info("Updating container args") return true } } - log.Info().Msgf("can not find the container") + log.Info("can not find the container") return false } @@ -130,7 +129,7 @@ func (a *actionRuntimeContainerArgsUpdate) ReloadComponents() []throttle.Compone func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, error) { m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - a.log.Info().Msg("member is gone already") + a.log.Info("member is gone already") return true, nil } @@ -140,7 +139,7 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro } if !m.Phase.IsReady() { - a.log.Info().Msg("Member is not ready, unable to run update operation") + a.log.Info("Member is not ready, unable to run update operation") return true, nil } @@ -157,7 +156,7 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro pod, ok := cache.Pod().V1().GetSimple(m.PodName) if !ok { - a.log.Info().Str("podName", m.PodName).Msg("pod is not present") + a.log.Str("podName", m.PodName).Info("pod is not present") return true, nil } @@ -175,7 +174,7 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro return errors.WithMessage(err, "can not set log level") } - a.log.Info().Interface("topics", topicsLogLevel).Msg("send log level to the ArangoDB") + a.log.Interface("topics", topicsLogLevel).Info("send log level to the ArangoDB") return nil } diff --git a/pkg/deployment/reconcile/action_runtime_container_image_update.go b/pkg/deployment/reconcile/action_runtime_container_image_update.go index db050676d..f69e7975e 100644 --- a/pkg/deployment/reconcile/action_runtime_container_image_update.go +++ b/pkg/deployment/reconcile/action_runtime_container_image_update.go @@ -29,7 +29,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -38,10 +37,10 @@ func init() { registerAction(api.ActionTypeRuntimeContainerImageUpdate, runtimeContainerImageUpdate, defaultTimeout) } -func runtimeContainerImageUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func runtimeContainerImageUpdate(action api.Action, actionCtx ActionContext) Action { a := &actionRuntimeContainerImageUpdate{} - a.actionImpl = newBaseActionImplDefRef(log, action, actionCtx) + a.actionImpl = newBaseActionImplDefRef(action, actionCtx) return a } @@ -54,47 +53,47 @@ type actionRuntimeContainerImageUpdate struct { } func (a actionRuntimeContainerImageUpdate) Post(ctx context.Context) error { - a.log.Info().Msgf("Updating container image") + a.log.Info("Updating container image") m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - a.log.Info().Msg("member is gone already") + a.log.Info("member is gone already") return nil } name, image, ok := a.getContainerDetails() if !ok { - a.log.Info().Msg("Unable to find container details") + a.log.Info("Unable to find container details") return nil } member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") - a.log.Error().Err(err).Msg("ArangoMember not found") + a.log.Err(err).Error("ArangoMember not found") return err } return a.actionCtx.WithCurrentArangoMember(member.GetName()).UpdateStatus(ctx, func(obj *api.ArangoMember, s *api.ArangoMemberStatus) bool { if obj.Spec.Template == nil || s.Template == nil || obj.Spec.Template.PodSpec == nil || s.Template.PodSpec == nil { - a.log.Info().Msgf("Nil Member definition") + a.log.Info("Nil Member definition") return false } if len(obj.Spec.Template.PodSpec.Spec.Containers) != len(s.Template.PodSpec.Spec.Containers) { - a.log.Info().Msgf("Invalid size of containers") + a.log.Info("Invalid size of containers") return false } for id := range obj.Spec.Template.PodSpec.Spec.Containers { if obj.Spec.Template.PodSpec.Spec.Containers[id].Name == name { if s.Template.PodSpec.Spec.Containers[id].Name != name { - a.log.Info().Msgf("Invalid order of containers") + a.log.Info("Invalid order of containers") return false } if obj.Spec.Template.PodSpec.Spec.Containers[id].Image != image { - a.log.Info().Str("got", obj.Spec.Template.PodSpec.Spec.Containers[id].Image).Str("expected", image).Msgf("Invalid spec image of container") + a.log.Str("got", obj.Spec.Template.PodSpec.Spec.Containers[id].Image).Str("expected", image).Info("Invalid spec image of container") return false } @@ -127,7 +126,7 @@ func (a actionRuntimeContainerImageUpdate) getContainerDetails() (string, string func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, error) { m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - a.log.Info().Msg("member is gone already") + a.log.Info("member is gone already") return true, nil } @@ -138,45 +137,45 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err name, image, ok := a.getContainerDetails() if !ok { - a.log.Info().Msg("Unable to find container details") + a.log.Info("Unable to find container details") return true, nil } if !m.Phase.IsReady() { - a.log.Info().Msg("Member is not ready, unable to run update operation") + a.log.Info("Member is not ready, unable to run update operation") return true, nil } member, ok := a.actionCtx.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)) if !ok { err := errors.Newf("ArangoMember not found") - a.log.Error().Err(err).Msg("ArangoMember not found") + a.log.Err(err).Error("ArangoMember not found") return false, err } pod, ok := cache.Pod().V1().GetSimple(m.PodName) if !ok { - a.log.Info().Msg("pod is not present") + a.log.Info("pod is not present") return true, nil } if member.Spec.Template == nil || member.Spec.Template.PodSpec == nil { - a.log.Info().Msg("pod spec is not present") + a.log.Info("pod spec is not present") return true, nil } if member.Status.Template == nil || member.Status.Template.PodSpec == nil { - a.log.Info().Msg("pod status is not present") + a.log.Info("pod status is not present") return true, nil } if len(pod.Spec.Containers) != len(member.Spec.Template.PodSpec.Spec.Containers) { - a.log.Info().Msg("spec container count is not equal") + a.log.Info("spec container count is not equal") return true, nil } if len(pod.Spec.Containers) != len(member.Status.Template.PodSpec.Spec.Containers) { - a.log.Info().Msg("status container count is not equal") + a.log.Info("status container count is not equal") return true, nil } @@ -209,45 +208,45 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err } func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (bool, bool, error) { - a.log.Info().Msgf("Update Progress") + a.log.Info("Update Progress") m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - a.log.Info().Msg("member is gone already") + a.log.Info("member is gone already") return true, false, nil } cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID) if !ok { - a.log.Info().Msg("Cluster is not ready") + a.log.Info("Cluster is not ready") return false, false, nil } pod, ok := cache.Pod().V1().GetSimple(m.PodName) if !ok { - a.log.Info().Msg("pod is not present") + a.log.Info("pod is not present") return true, false, nil } name, image, ok := a.getContainerDetails() if !ok { - a.log.Info().Msg("Unable to find container details") + a.log.Info("Unable to find container details") return true, false, nil } cspec, ok := k8sutil.GetContainerByName(pod, name) if !ok { - a.log.Info().Msg("Unable to find container spec") + a.log.Info("Unable to find container spec") return true, false, nil } cstatus, ok := k8sutil.GetContainerStatusByName(pod, name) if !ok { - a.log.Info().Msg("Unable to find container status") + a.log.Info("Unable to find container status") return true, false, nil } if cspec.Image != image { - a.log.Info().Msg("Image changed") + a.log.Info("Image changed") return true, false, nil } @@ -269,7 +268,7 @@ func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (b if lastTermination.FinishedAt.Time.Before(allowedRestartPeriod) { return true, false, errors.Newf("Container %s continuously failing during image replacement: (%d) %s: %s", name, lastTermination.ExitCode, lastTermination.Reason, lastTermination.Message) } else { - a.log.Debug().Str("pod-name", pod.GetName()).Msg("pod is restarting - we are not marking it as terminated yet..") + a.log.Str("pod-name", pod.GetName()).Debug("pod is restarting - we are not marking it as terminated yet..") } } } diff --git a/pkg/deployment/reconcile/action_set_condition.go b/pkg/deployment/reconcile/action_set_condition.go index a7268c0d7..9f79dc8c9 100644 --- a/pkg/deployment/reconcile/action_set_condition.go +++ b/pkg/deployment/reconcile/action_set_condition.go @@ -24,8 +24,6 @@ import ( "context" "strconv" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" ) @@ -33,10 +31,10 @@ func init() { registerAction(api.ActionTypeSetCondition, setCondition, defaultTimeout) } -func setCondition(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func setCondition(action api.Action, actionCtx ActionContext) Action { a := &actionSetCondition{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -51,7 +49,7 @@ type actionSetCondition struct { // Start starts the action for changing conditions on the provided member. func (a actionSetCondition) Start(ctx context.Context) (bool, error) { if len(a.action.Params) == 0 { - a.log.Info().Msg("can not start the action with the empty list of conditions") + a.log.Info("can not start the action with the empty list of conditions") return true, nil } @@ -59,7 +57,7 @@ func (a actionSetCondition) Start(ctx context.Context) (bool, error) { changed := false for condition, value := range a.action.Params { if value == "" { - a.log.Debug().Msg("remove the condition") + a.log.Debug("remove the condition") if s.Conditions.Remove(api.ConditionType(condition)) { changed = true @@ -67,11 +65,11 @@ func (a actionSetCondition) Start(ctx context.Context) (bool, error) { } else { set, err := strconv.ParseBool(value) if err != nil { - a.log.Error().Err(err).Str("value", value).Msg("can not parse string to boolean") + a.log.Err(err).Str("value", value).Error("can not parse string to boolean") continue } - a.log.Debug().Msg("set the condition") + a.log.Debug("set the condition") if s.Conditions.Update(api.ConditionType(condition), set, a.action.Reason, "action set the member condition") { changed = true @@ -80,7 +78,7 @@ func (a actionSetCondition) Start(ctx context.Context) (bool, error) { } return changed }); err != nil { - a.log.Warn().Err(err).Msgf("Unable to set condition") + a.log.Err(err).Warn("Unable to set condition") return true, nil } diff --git a/pkg/deployment/reconcile/action_set_condition_v2.go b/pkg/deployment/reconcile/action_set_condition_v2.go index 97dcff6c8..c7f0fabe8 100644 --- a/pkg/deployment/reconcile/action_set_condition_v2.go +++ b/pkg/deployment/reconcile/action_set_condition_v2.go @@ -23,7 +23,6 @@ package reconcile import ( "context" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -45,10 +44,10 @@ const ( setConditionActionV2KeyHash string = "hash" ) -func setConditionV2(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func setConditionV2(action api.Action, actionCtx ActionContext) Action { a := &actionSetConditionV2{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -64,13 +63,13 @@ type actionSetConditionV2 struct { func (a actionSetConditionV2) Start(ctx context.Context) (bool, error) { at, ok := a.action.Params[setConditionActionV2KeyType] if !ok { - a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyType) + a.log.Info("key %s is missing in action definition", setConditionActionV2KeyType) return true, nil } aa, ok := a.action.Params[setConditionActionV2KeyAction] if !ok { - a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyAction) + a.log.Info("key %s is missing in action definition", setConditionActionV2KeyAction) return true, nil } @@ -84,18 +83,18 @@ func (a actionSetConditionV2) Start(ctx context.Context) (bool, error) { if err := a.actionCtx.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) { return s.Conditions.UpdateWithHash(api.ConditionType(aa), as, ar, am, ah), nil }); err != nil { - a.log.Warn().Err(err).Msgf("unable to update status") + a.log.Err(err).Warn("unable to update status") return true, nil } case setConditionActionV2KeyTypeRemove: if err := a.actionCtx.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) { return s.Conditions.Remove(api.ConditionType(aa)), nil }); err != nil { - a.log.Warn().Err(err).Msgf("unable to update status") + a.log.Err(err).Warn("unable to update status") return true, nil } default: - a.log.Info().Msgf("unknown type %s", at) + a.log.Info("unknown type %s", at) return true, nil } return true, nil diff --git a/pkg/deployment/reconcile/action_set_current_image.go b/pkg/deployment/reconcile/action_set_current_image.go index 5300566da..dfad5a801 100644 --- a/pkg/deployment/reconcile/action_set_current_image.go +++ b/pkg/deployment/reconcile/action_set_current_image.go @@ -26,7 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -35,10 +34,10 @@ func init() { // newSetCurrentImageAction creates a new Action that implements the given // planned SetCurrentImage action. -func newSetCurrentMemberImageAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newSetCurrentMemberImageAction(action api.Action, actionCtx ActionContext) Action { a := &setCurrentMemberImageAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -63,18 +62,16 @@ func (a *setCurrentMemberImageAction) Start(ctx context.Context) (bool, error) { // CheckProgress checks the progress of the action. // Returns true if the action is completely finished, false otherwise. func (a *setCurrentMemberImageAction) CheckProgress(ctx context.Context) (bool, bool, error) { - log := a.log - imageInfo, found := a.actionCtx.GetImageInfo(a.action.Image) if !found { - log.Info().Msgf("Image not found") + a.log.Info("Image not found") return true, false, nil } if err := a.actionCtx.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool { m, g, found := s.Members.ElementByID(a.action.MemberID) if !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return false } @@ -84,13 +81,13 @@ func (a *setCurrentMemberImageAction) CheckProgress(ctx context.Context) (bool, m.Image = &imageInfo if err := s.Members.Update(m, g); err != nil { - log.Error().Msg("Member update failed") + a.log.Error("Member update failed") return false } return true }); err != nil { - log.Error().Msg("Member failed") + a.log.Error("Member failed") return true, false, nil } diff --git a/pkg/deployment/reconcile/action_set_license.go b/pkg/deployment/reconcile/action_set_license.go index 2460bf97a..3f8434248 100644 --- a/pkg/deployment/reconcile/action_set_license.go +++ b/pkg/deployment/reconcile/action_set_license.go @@ -29,17 +29,16 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeLicenseSet, newLicenseSet, defaultTimeout) } -func newLicenseSet(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newLicenseSet(action api.Action, actionCtx ActionContext) Action { a := &licenseSetAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -53,12 +52,9 @@ type licenseSetAction struct { func (a *licenseSetAction) Start(ctx context.Context) (bool, error) { ctxChild, cancel := globals.GetGlobals().Timeouts().ArangoD().WithTimeout(ctx) defer cancel() - - log := a.log - spec := a.actionCtx.GetSpec() if !spec.License.HasSecretName() { - log.Error().Msg("License is not set") + a.log.Error("License is not set") return true, nil } @@ -75,20 +71,20 @@ func (a *licenseSetAction) Start(ctx context.Context) (bool, error) { group := a.action.Group m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, nil } c, err := a.actionCtx.GetServerClient(ctxChild, group, m.ID) if !ok { - log.Error().Err(err).Msg("Unable to get client") + a.log.Err(err).Error("Unable to get client") return true, nil } client := client.NewClient(c.Connection()) if ok, err := licenseV2Compare(ctxChild, client, l.V2); err != nil { - log.Error().Err(err).Msg("Unable to verify license") + a.log.Err(err).Error("Unable to verify license") return true, nil } else if ok { // Already latest license @@ -96,7 +92,7 @@ func (a *licenseSetAction) Start(ctx context.Context) (bool, error) { } if err := client.SetLicense(ctxChild, string(l.V2), true); err != nil { - log.Error().Err(err).Msg("Unable to set license") + a.log.Err(err).Error("Unable to set license") return true, nil } diff --git a/pkg/deployment/reconcile/action_set_member_condition.go b/pkg/deployment/reconcile/action_set_member_condition.go index 9843e21f9..364d7f91b 100644 --- a/pkg/deployment/reconcile/action_set_member_condition.go +++ b/pkg/deployment/reconcile/action_set_member_condition.go @@ -24,8 +24,6 @@ import ( "context" "strconv" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/errors" ) @@ -34,10 +32,10 @@ func init() { registerAction(api.ActionTypeSetMemberCondition, setMemberCondition, defaultTimeout) } -func setMemberCondition(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func setMemberCondition(action api.Action, actionCtx ActionContext) Action { a := &actionSetMemberCondition{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -53,28 +51,28 @@ type actionSetMemberCondition struct { func (a actionSetMemberCondition) Start(ctx context.Context) (bool, error) { m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - a.log.Info().Msg("can not set the condition because the member is gone already") + a.log.Info("can not set the condition because the member is gone already") return true, nil } if len(a.action.Params) == 0 { - a.log.Info().Msg("can not start the action with the empty list of conditions") + a.log.Info("can not start the action with the empty list of conditions") return true, nil } for condition, value := range a.action.Params { if value == "" { - a.log.Debug().Msg("remove the condition") + a.log.Debug("remove the condition") m.Conditions.Remove(api.ConditionType(condition)) } else { set, err := strconv.ParseBool(value) if err != nil { - a.log.Error().Err(err).Str("value", value).Msg("can not parse string to boolean") + a.log.Err(err).Str("value", value).Error("can not parse string to boolean") continue } - a.log.Debug().Msg("set the condition") + a.log.Debug("set the condition") m.Conditions.Update(api.ConditionType(condition), set, a.action.Reason, "action set the member condition") } diff --git a/pkg/deployment/reconcile/action_set_member_condition_v2.go b/pkg/deployment/reconcile/action_set_member_condition_v2.go index ff2766fae..7b51d0416 100644 --- a/pkg/deployment/reconcile/action_set_member_condition_v2.go +++ b/pkg/deployment/reconcile/action_set_member_condition_v2.go @@ -23,7 +23,6 @@ package reconcile import ( "context" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -33,10 +32,10 @@ func init() { registerAction(api.ActionTypeSetMemberConditionV2, setMemberConditionV2, defaultTimeout) } -func setMemberConditionV2(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func setMemberConditionV2(action api.Action, actionCtx ActionContext) Action { a := &actionSetMemberConditionV2{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -52,13 +51,13 @@ type actionSetMemberConditionV2 struct { func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) { at, ok := a.action.Params[setConditionActionV2KeyType] if !ok { - a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyType) + a.log.Info("key %s is missing in action definition", setConditionActionV2KeyType) return true, nil } aa, ok := a.action.Params[setConditionActionV2KeyAction] if !ok { - a.log.Info().Msgf("key %s is missing in action definition", setConditionActionV2KeyAction) + a.log.Info("key %s is missing in action definition", setConditionActionV2KeyAction) return true, nil } @@ -80,14 +79,14 @@ func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) { } } - a.log.Info().Msg("can not set the condition because the member is gone already") + a.log.Info("can not set the condition because the member is gone already") return nil }, a.action.Group) // If not found then false is returned. return changed, nil }); err != nil { - a.log.Warn().Err(err).Msgf("unable to update status") + a.log.Err(err).Warn("unable to update status") return true, nil } case setConditionActionV2KeyTypeRemove: @@ -102,18 +101,18 @@ func (a actionSetMemberConditionV2) Start(ctx context.Context) (bool, error) { } } - a.log.Info().Msg("can not remove the condition because the member is gone already") + a.log.Info("can not remove the condition because the member is gone already") return nil }, a.action.Group) // If not found then false is returned. return changed, nil }); err != nil { - a.log.Warn().Err(err).Msgf("unable to update status") + a.log.Err(err).Warn("unable to update status") return true, nil } default: - a.log.Info().Msgf("unknown type %s", at) + a.log.Info("unknown type %s", at) return true, nil } return true, nil diff --git a/pkg/deployment/reconcile/action_shutdown_member.go b/pkg/deployment/reconcile/action_shutdown_member.go index 69b4d6f9f..1ac00f0d3 100644 --- a/pkg/deployment/reconcile/action_shutdown_member.go +++ b/pkg/deployment/reconcile/action_shutdown_member.go @@ -28,7 +28,6 @@ import ( "time" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -37,10 +36,10 @@ func init() { // newShutdownMemberAction creates a new Action that implements the given // planned ShutdownMember action. -func newShutdownMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newShutdownMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionShutdownMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -55,7 +54,7 @@ type actionShutdownMember struct { // Returns true if the action is completely finished, false in case // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionShutdownMember) Start(ctx context.Context) (bool, error) { - shutdown, m, ok := getShutdownHelper(&a.action, a.actionCtx, a.log) + shutdown, m, ok := getShutdownHelper(a.actionImpl) if !ok { return true, nil } @@ -77,7 +76,7 @@ func (a *actionShutdownMember) Start(ctx context.Context) (bool, error) { // CheckProgress checks the progress of the action. // Returns: ready, abort, error. func (a *actionShutdownMember) CheckProgress(ctx context.Context) (bool, bool, error) { - shutdown, _, ok := getShutdownHelper(&a.action, a.actionCtx, a.log) + shutdown, _, ok := getShutdownHelper(a.actionImpl) if !ok { return true, false, nil } diff --git a/pkg/deployment/reconcile/action_tls_ca_append.go b/pkg/deployment/reconcile/action_tls_ca_append.go index 230db57f1..d699eba1c 100644 --- a/pkg/deployment/reconcile/action_tls_ca_append.go +++ b/pkg/deployment/reconcile/action_tls_ca_append.go @@ -38,17 +38,16 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeAppendTLSCACertificate, newAppendTLSCACertificateAction, operationTLSCACertificateTimeout) } -func newAppendTLSCACertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newAppendTLSCACertificateAction(action api.Action, actionCtx ActionContext) Action { a := &appendTLSCACertificateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -66,43 +65,43 @@ func (a *appendTLSCACertificateAction) Start(ctx context.Context) (bool, error) certChecksum, exists := a.action.Params[checksum] if !exists { - a.log.Warn().Msgf("Key %s is missing in action", checksum) + a.log.Warn("Key %s is missing in action", checksum) return true, nil } caSecret, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName()) if !exists { - a.log.Warn().Msgf("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName()) + a.log.Warn("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName()) return true, nil } caFolder, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject())) if !exists { - a.log.Warn().Msgf("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject())) + a.log.Warn("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject())) return true, nil } - ca, _, err := resources.GetKeyCertFromSecret(a.log, caSecret, resources.CACertName, resources.CAKeyName) + ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName) if err != nil { - a.log.Warn().Err(err).Msgf("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject())) + a.log.Err(err).Warn("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject())) return true, nil } caData, err := ca.ToPem() if err != nil { - a.log.Warn().Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Msgf("Unable to parse ca into pem") + a.log.Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Warn("Unable to parse ca into pem") return true, nil } caSha := util.SHA256(caData) if caSha != certChecksum { - a.log.Warn().Msgf("Cert changed") + a.log.Warn("Cert changed") return true, nil } if _, exists := caFolder.Data[caSha]; exists { - a.log.Warn().Msgf("Cert already exists") + a.log.Warn("Cert already exists") return true, nil } @@ -111,7 +110,7 @@ func (a *appendTLSCACertificateAction) Start(ctx context.Context) (bool, error) patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } diff --git a/pkg/deployment/reconcile/action_tls_ca_clean.go b/pkg/deployment/reconcile/action_tls_ca_clean.go index cbe56d434..55dfba32b 100644 --- a/pkg/deployment/reconcile/action_tls_ca_clean.go +++ b/pkg/deployment/reconcile/action_tls_ca_clean.go @@ -37,17 +37,16 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeCleanTLSCACertificate, newCleanTLSCACertificateAction, operationTLSCACertificateTimeout) } -func newCleanTLSCACertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newCleanTLSCACertificateAction(action api.Action, actionCtx ActionContext) Action { a := &cleanTLSCACertificateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,51 +58,51 @@ type cleanTLSCACertificateAction struct { } func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) { - a.log.Info().Msgf("Clean TLS Ca") + a.log.Info("Clean TLS Ca") if !a.actionCtx.GetSpec().TLS.IsSecure() { - a.log.Info().Msgf("Insecure deployment") + a.log.Info("Insecure deployment") return true, nil } certChecksum, exists := a.action.Params[checksum] if !exists { - a.log.Warn().Msgf("Key %s is missing in action", checksum) + a.log.Warn("Key %s is missing in action", checksum) return true, nil } caSecret, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(a.actionCtx.GetSpec().TLS.GetCASecretName()) if !exists { - a.log.Warn().Msgf("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName()) + a.log.Warn("Secret %s is missing", a.actionCtx.GetSpec().TLS.GetCASecretName()) return true, nil } caFolder, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(a.actionCtx.GetAPIObject())) if !exists { - a.log.Warn().Msgf("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject())) + a.log.Warn("Secret %s is missing", resources.GetCASecretName(a.actionCtx.GetAPIObject())) return true, nil } - ca, _, err := resources.GetKeyCertFromSecret(a.log, caSecret, resources.CACertName, resources.CAKeyName) + ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName) if err != nil { - a.log.Warn().Err(err).Msgf("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject())) + a.log.Err(err).Warn("Cert %s is invalid", resources.GetCASecretName(a.actionCtx.GetAPIObject())) return true, nil } caData, err := ca.ToPem() if err != nil { - a.log.Warn().Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Msgf("Unable to parse ca into pem") + a.log.Err(err).Str("secret", resources.GetCASecretName(a.actionCtx.GetAPIObject())).Warn("Unable to parse ca into pem") return true, nil } caSha := util.SHA256(caData) if caSha == certChecksum { - a.log.Warn().Msgf("Unable to remove current ca") + a.log.Warn("Unable to remove current ca") return true, nil } if _, exists := caFolder.Data[certChecksum]; !exists { - a.log.Warn().Msgf("Cert missing") + a.log.Warn("Cert missing") return true, nil } @@ -112,11 +111,11 @@ func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) { patch, err := p.Marshal() if err != nil { - a.log.Error().Err(err).Msgf("Unable to encrypt patch") + a.log.Err(err).Error("Unable to encrypt patch") return true, nil } - a.log.Info().Msgf("Removing key %s from truststore", certChecksum) + a.log.Info("Removing key %s from truststore", certChecksum) err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := a.actionCtx.ACS().CurrentClusterCache().SecretsModInterface().V1().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{}) diff --git a/pkg/deployment/reconcile/action_tls_ca_renew.go b/pkg/deployment/reconcile/action_tls_ca_renew.go index 77f116120..4148745b1 100644 --- a/pkg/deployment/reconcile/action_tls_ca_renew.go +++ b/pkg/deployment/reconcile/action_tls_ca_renew.go @@ -27,7 +27,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -35,10 +34,10 @@ func init() { registerAction(api.ActionTypeRenewTLSCACertificate, newRenewTLSCACertificateAction, operationTLSCACertificateTimeout) } -func newRenewTLSCACertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRenewTLSCACertificateAction(action api.Action, actionCtx ActionContext) Action { a := &renewTLSCACertificateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -59,7 +58,7 @@ func (a *renewTLSCACertificateAction) Start(ctx context.Context) (bool, error) { }) if err != nil { if !k8sutil.IsNotFound(err) { - a.log.Warn().Err(err).Msgf("Unable to clean cert %s", a.actionCtx.GetSpec().TLS.GetCASecretName()) + a.log.Err(err).Warn("Unable to clean cert %s", a.actionCtx.GetSpec().TLS.GetCASecretName()) return true, nil } } diff --git a/pkg/deployment/reconcile/action_tls_keyfile_clean.go b/pkg/deployment/reconcile/action_tls_keyfile_clean.go index daebfd798..f0d22379d 100644 --- a/pkg/deployment/reconcile/action_tls_keyfile_clean.go +++ b/pkg/deployment/reconcile/action_tls_keyfile_clean.go @@ -27,7 +27,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -35,10 +34,10 @@ func init() { registerAction(api.ActionTypeCleanTLSKeyfileCertificate, newCleanTLSKeyfileCertificateAction, operationTLSCACertificateTimeout) } -func newCleanTLSKeyfileCertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newCleanTLSKeyfileCertificateAction(action api.Action, actionCtx ActionContext) Action { a := &cleanTLSKeyfileCertificateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -56,7 +55,7 @@ func (a *cleanTLSKeyfileCertificateAction) Start(ctx context.Context) (bool, err member, exists := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !exists { - a.log.Warn().Msgf("Member does not exist") + a.log.Warn("Member does not exist") return true, nil } @@ -66,7 +65,7 @@ func (a *cleanTLSKeyfileCertificateAction) Start(ctx context.Context) (bool, err defer cancel() if err := c.Client().Kubernetes().CoreV1().Secrets(c.Namespace()).Delete(ctxChild, k8sutil.AppendTLSKeyfileSecretPostfix(member.ArangoMemberName(a.actionCtx.GetName(), a.action.Group)), meta.DeleteOptions{}); err != nil { - a.log.Warn().Err(err).Msgf("Unable to remove keyfile") + a.log.Err(err).Warn("Unable to remove keyfile") if !k8sutil.IsNotFound(err) { return false, err } diff --git a/pkg/deployment/reconcile/action_tls_keyfile_refresh.go b/pkg/deployment/reconcile/action_tls_keyfile_refresh.go index a9326cc06..ceb41bcee 100644 --- a/pkg/deployment/reconcile/action_tls_keyfile_refresh.go +++ b/pkg/deployment/reconcile/action_tls_keyfile_refresh.go @@ -32,17 +32,16 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeRefreshTLSKeyfileCertificate, newRefreshTLSKeyfileCertificateAction, operationTLSCACertificateTimeout) } -func newRefreshTLSKeyfileCertificateAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newRefreshTLSKeyfileCertificateAction(action api.Action, actionCtx ActionContext) Action { a := &refreshTLSKeyfileCertificateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -56,19 +55,19 @@ func (a *refreshTLSKeyfileCertificateAction) CheckProgress(ctx context.Context) defer cancel() c, err := a.actionCtx.GetServerClient(ctxChild, a.action.Group, a.action.MemberID) if err != nil { - a.log.Warn().Err(err).Msg("Unable to get client") + a.log.Err(err).Warn("Unable to get client") return true, false, nil } s, exists := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().GetSimple(k8sutil.CreateTLSKeyfileSecretName(a.actionCtx.GetAPIObject().GetName(), a.action.Group.AsRole(), a.action.MemberID)) if !exists { - a.log.Warn().Msg("Keyfile secret is missing") + a.log.Warn("Keyfile secret is missing") return true, false, nil } keyfile, ok := s.Data[constants.SecretTLSKeyfile] if !ok { - a.log.Warn().Msg("Keyfile secret is invalid") + a.log.Warn("Keyfile secret is invalid") return true, false, nil } @@ -80,7 +79,7 @@ func (a *refreshTLSKeyfileCertificateAction) CheckProgress(ctx context.Context) defer cancel() e, err := client.RefreshTLS(ctxChild) if err != nil { - a.log.Warn().Err(err).Msg("Unable to refresh TLS") + a.log.Err(err).Warn("Unable to refresh TLS") return true, false, nil } diff --git a/pkg/deployment/reconcile/action_tls_propagated.go b/pkg/deployment/reconcile/action_tls_propagated.go index 580edb331..0a097604f 100644 --- a/pkg/deployment/reconcile/action_tls_propagated.go +++ b/pkg/deployment/reconcile/action_tls_propagated.go @@ -24,17 +24,16 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeTLSPropagated, newTLSPropagated, defaultTimeout) } -func newTLSPropagated(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTLSPropagated(action api.Action, actionCtx ActionContext) Action { a := &tlsPropagatedAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -48,7 +47,7 @@ type tlsPropagatedAction struct { func (a *tlsPropagatedAction) Start(ctx context.Context) (bool, error) { propagatedFlag, exists := a.action.Params[propagated] if !exists { - a.log.Error().Msgf("Propagated flag is missing") + a.log.Error("Propagated flag is missing") return true, nil } diff --git a/pkg/deployment/reconcile/action_tls_sni_update.go b/pkg/deployment/reconcile/action_tls_sni_update.go index 809dccece..5461d882d 100644 --- a/pkg/deployment/reconcile/action_tls_sni_update.go +++ b/pkg/deployment/reconcile/action_tls_sni_update.go @@ -26,17 +26,16 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeUpdateTLSSNI, newTLSSNIUpdate, tlsSNIUpdateTimeout) } -func newTLSSNIUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTLSSNIUpdate(action api.Action, actionCtx ActionContext) Action { a := &tlsSNIUpdate{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -64,7 +63,7 @@ func (t *tlsSNIUpdate) CheckProgress(ctx context.Context) (bool, bool, error) { fetchedSecrets, err := mapTLSSNIConfig(*sni, t.actionCtx.ACS().CurrentClusterCache()) if err != nil { - t.log.Warn().Err(err).Msg("Unable to get SNI desired state") + t.log.Err(err).Warn("Unable to get SNI desired state") return true, false, nil } @@ -72,14 +71,14 @@ func (t *tlsSNIUpdate) CheckProgress(ctx context.Context) (bool, bool, error) { defer cancel() c, err := t.actionCtx.GetServerClient(ctxChild, t.action.Group, t.action.MemberID) if err != nil { - t.log.Warn().Err(err).Msg("Unable to get client") + t.log.Err(err).Warn("Unable to get client") return true, false, nil } ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() if ok, err := compareTLSSNIConfig(ctxChild, c.Connection(), fetchedSecrets, true); err != nil { - t.log.Warn().Err(err).Msg("Unable to compare TLS config") + t.log.Err(err).Warn("Unable to compare TLS config") return true, false, nil } else { return ok, false, nil diff --git a/pkg/deployment/reconcile/action_tls_status_update.go b/pkg/deployment/reconcile/action_tls_status_update.go index ea6d0501c..79814cf12 100644 --- a/pkg/deployment/reconcile/action_tls_status_update.go +++ b/pkg/deployment/reconcile/action_tls_status_update.go @@ -30,17 +30,16 @@ import ( meta "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeTLSKeyStatusUpdate, newTLSKeyStatusUpdate, defaultTimeout) } -func newTLSKeyStatusUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTLSKeyStatusUpdate(action api.Action, actionCtx ActionContext) Action { a := &tlsKeyStatusUpdateAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -60,7 +59,7 @@ func (a *tlsKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) { defer cancel() f, err := a.actionCtx.ACS().CurrentClusterCache().Secret().V1().Read().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{}) if err != nil { - a.log.Error().Err(err).Msgf("Unable to get folder info") + a.log.Err(err).Error("Unable to get folder info") return true, nil } diff --git a/pkg/deployment/reconcile/action_topology_disable.go b/pkg/deployment/reconcile/action_topology_disable.go index 5dd140bc0..15d7248bd 100644 --- a/pkg/deployment/reconcile/action_topology_disable.go +++ b/pkg/deployment/reconcile/action_topology_disable.go @@ -22,17 +22,16 @@ package reconcile import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeTopologyDisable, newTopologyDisable, defaultTimeout) } -func newTopologyDisable(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTopologyDisable(action api.Action, actionCtx ActionContext) Action { a := &topologyDisable{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_topology_enable.go b/pkg/deployment/reconcile/action_topology_enable.go index 1fc36d982..74b8444f8 100644 --- a/pkg/deployment/reconcile/action_topology_enable.go +++ b/pkg/deployment/reconcile/action_topology_enable.go @@ -22,17 +22,16 @@ package reconcile import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeTopologyEnable, newTopologyEnable, defaultTimeout) } -func newTopologyEnable(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTopologyEnable(action api.Action, actionCtx ActionContext) Action { a := &topologyEnable{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_topology_member_assignment.go b/pkg/deployment/reconcile/action_topology_member_assignment.go index a17d965a2..f46bee16a 100644 --- a/pkg/deployment/reconcile/action_topology_member_assignment.go +++ b/pkg/deployment/reconcile/action_topology_member_assignment.go @@ -22,17 +22,16 @@ package reconcile import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeTopologyMemberAssignment, newTopologyMemberAssignment, defaultTimeout) } -func newTopologyMemberAssignment(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTopologyMemberAssignment(action api.Action, actionCtx ActionContext) Action { a := &topologyMemberAssignment{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_topology_zones_update.go b/pkg/deployment/reconcile/action_topology_zones_update.go index 466deb604..403282e9d 100644 --- a/pkg/deployment/reconcile/action_topology_zones_update.go +++ b/pkg/deployment/reconcile/action_topology_zones_update.go @@ -22,17 +22,16 @@ package reconcile import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { registerAction(api.ActionTypeTopologyZonesUpdate, newTopologyZonesUpdate, defaultTimeout) } -func newTopologyZonesUpdate(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newTopologyZonesUpdate(action api.Action, actionCtx ActionContext) Action { a := &topologyZonesUpdate{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } diff --git a/pkg/deployment/reconcile/action_upgrade_current_image.go b/pkg/deployment/reconcile/action_upgrade_current_image.go index 09959a8f7..23084e804 100644 --- a/pkg/deployment/reconcile/action_upgrade_current_image.go +++ b/pkg/deployment/reconcile/action_upgrade_current_image.go @@ -26,7 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -35,10 +34,10 @@ func init() { // newSetCurrentImageAction creates a new Action that implements the given // planned SetCurrentImage action. -func newSetCurrentImageAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newSetCurrentImageAction(action api.Action, actionCtx ActionContext) Action { a := &setCurrentImageAction{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -63,16 +62,14 @@ func (a *setCurrentImageAction) Start(ctx context.Context) (bool, error) { // CheckProgress checks the progress of the action. // Returns true if the action is completely finished, false otherwise. func (a *setCurrentImageAction) CheckProgress(ctx context.Context) (bool, bool, error) { - log := a.log - imageInfo, found := a.actionCtx.GetImageInfo(a.action.Image) if !found { return false, false, nil } if err := a.actionCtx.SetCurrentImage(ctx, imageInfo); err != nil { - log.Error().Err(err).Msg("Unable to set current image") + a.log.Err(err).Error("Unable to set current image") return false, false, nil } - log.Info().Str("image", a.action.Image).Str("to", imageInfo.Image).Msg("Changed current main image") + a.log.Str("image", a.action.Image).Str("to", imageInfo.Image).Info("Changed current main image") return true, false, nil } diff --git a/pkg/deployment/reconcile/action_upgrade_member.go b/pkg/deployment/reconcile/action_upgrade_member.go index f5157a884..a6bc2e32f 100644 --- a/pkg/deployment/reconcile/action_upgrade_member.go +++ b/pkg/deployment/reconcile/action_upgrade_member.go @@ -26,7 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" ) func init() { @@ -35,10 +34,10 @@ func init() { // newUpgradeMemberAction creates a new Action that implements the given // planned UpgradeMember action. -func newUpgradeMemberAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newUpgradeMemberAction(action api.Action, actionCtx ActionContext) Action { a := &actionUpgradeMember{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -53,10 +52,9 @@ type actionUpgradeMember struct { // Returns true if the action is completely finished, false in case // the start time needs to be recorded and a ready condition needs to be checked. func (a *actionUpgradeMember) Start(ctx context.Context) (bool, error) { - log := a.log m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Error().Msg("No such member") + a.log.Error("No such member") } // Set AutoUpgrade condition m.Conditions.Update(api.ConditionTypeAutoUpgrade, true, "Upgrading", "AutoUpgrade on first restart") @@ -76,10 +74,9 @@ func (a *actionUpgradeMember) Start(ctx context.Context) (bool, error) { // Returns true if the action is completely finished, false otherwise. func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, error) { // Check that pod is removed - log := a.log m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return true, false, nil } @@ -100,7 +97,7 @@ func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, er if isUpgrading { if m.Conditions.IsTrue(api.ConditionTypeTerminated) { if m.Conditions.IsTrue(api.ConditionTypeUpgradeFailed) { - a.log.Error().Msgf("Upgrade of member failed") + a.log.Error("Upgrade of member failed") } // Invalidate plan m.Phase = "" @@ -115,15 +112,11 @@ func (a *actionUpgradeMember) CheckProgress(ctx context.Context) (bool, bool, er return false, true, nil } - log.Error().Msgf("Upgrade failed") + a.log.Error("Upgrade failed") return false, true, nil } } - log = log.With(). - Str("pod-name", m.PodName). - Bool("is-upgrading", isUpgrading).Logger() - act := actionWaitForMemberUp{ actionImpl: a.actionImpl, } diff --git a/pkg/deployment/reconcile/action_wait_for_member_in_sync.go b/pkg/deployment/reconcile/action_wait_for_member_in_sync.go index 9e51828cd..aed0b01ba 100644 --- a/pkg/deployment/reconcile/action_wait_for_member_in_sync.go +++ b/pkg/deployment/reconcile/action_wait_for_member_in_sync.go @@ -25,8 +25,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/agency" ) @@ -37,10 +35,10 @@ func init() { // newWaitForMemberUpAction creates a new Action that implements the given // planned WaitForShardInSync action. -func newWaitForMemberInSync(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newWaitForMemberInSync(action api.Action, actionCtx ActionContext) Action { a := &actionWaitForMemberInSync{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -64,7 +62,7 @@ func (a *actionWaitForMemberInSync) Start(ctx context.Context) (bool, error) { func (a *actionWaitForMemberInSync) CheckProgress(_ context.Context) (bool, bool, error) { member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID()) if !ok || member.Phase == api.MemberPhaseFailed { - a.log.Debug().Msg("Member in failed phase") + a.log.Debug("Member in failed phase") return true, false, nil } @@ -98,14 +96,14 @@ func (a *actionWaitForMemberInSync) checkCluster() (bool, error) { case api.ServerGroupDBServers: agencyState, ok := a.actionCtx.GetAgencyCache() if !ok { - a.log.Info().Str("mode", "cluster").Str("member", a.MemberID()).Msgf("AgencyCache is missing") + a.log.Str("mode", "cluster").Str("member", a.MemberID()).Info("AgencyCache is missing") return false, nil } notInSyncShards := agency.GetDBServerShardsNotInSync(agencyState, a.MemberID()) if len(notInSyncShards) > 0 { - a.log.Info().Str("mode", "cluster").Str("member", a.MemberID()).Int("shard", len(notInSyncShards)).Msgf("DBServer contains not in sync shards") + a.log.Str("mode", "cluster").Str("member", a.MemberID()).Int("shard", len(notInSyncShards)).Info("DBServer contains not in sync shards") return false, nil } } diff --git a/pkg/deployment/reconcile/action_wait_for_member_up.go b/pkg/deployment/reconcile/action_wait_for_member_up.go index e7d7ea2d3..f68a8fc9d 100644 --- a/pkg/deployment/reconcile/action_wait_for_member_up.go +++ b/pkg/deployment/reconcile/action_wait_for_member_up.go @@ -31,8 +31,6 @@ import ( driver "github.com/arangodb/go-driver" "github.com/arangodb/go-driver/agency" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" ) @@ -42,10 +40,10 @@ func init() { // newWaitForMemberUpAction creates a new Action that implements the given // planned WaitForMemberUp action. -func newWaitForMemberUpAction(log zerolog.Logger, action api.Action, actionCtx ActionContext) Action { +func newWaitForMemberUpAction(action api.Action, actionCtx ActionContext) Action { a := &actionWaitForMemberUp{} - a.actionImpl = newActionImplDefRef(log, action, actionCtx) + a.actionImpl = newActionImplDefRef(action, actionCtx) return a } @@ -72,7 +70,7 @@ func (a *actionWaitForMemberUp) Start(ctx context.Context) (bool, error) { func (a *actionWaitForMemberUp) CheckProgress(ctx context.Context) (bool, bool, error) { member, ok := a.actionCtx.GetMemberStatusByID(a.MemberID()) if !ok || member.Phase == api.MemberPhaseFailed { - a.log.Debug().Msg("Member in failed phase") + a.log.Debug("Member in failed phase") return true, false, nil } @@ -101,15 +99,13 @@ func (a *actionWaitForMemberUp) CheckProgress(ctx context.Context) (bool, bool, // checkProgressSingle checks the progress of the action in the case // of a single server. func (a *actionWaitForMemberUp) checkProgressSingle(ctx context.Context) (bool, bool, error) { - log := a.log - c, err := a.actionCtx.GetDatabaseClient(ctx) if err != nil { - log.Debug().Err(err).Msg("Failed to create database client") + a.log.Err(err).Debug("Failed to create database client") return false, false, nil } if _, err := c.Version(ctx); err != nil { - log.Debug().Err(err).Msg("Failed to get version") + a.log.Err(err).Debug("Failed to get version") return false, false, nil } return true, false, nil @@ -118,14 +114,13 @@ func (a *actionWaitForMemberUp) checkProgressSingle(ctx context.Context) (bool, // checkProgressSingleInActiveFailover checks the progress of the action in the case // of a single server as part of an active failover deployment. func (a *actionWaitForMemberUp) checkProgressSingleInActiveFailover(ctx context.Context) (bool, bool, error) { - log := a.log c, err := a.actionCtx.GetServerClient(ctx, a.action.Group, a.action.MemberID) if err != nil { - log.Debug().Err(err).Msg("Failed to create database client") + a.log.Err(err).Debug("Failed to create database client") return false, false, nil } if _, err := c.Version(ctx); err != nil { - log.Debug().Err(err).Msg("Failed to get version") + a.log.Err(err).Debug("Failed to get version") return false, false, nil } return true, false, nil @@ -134,10 +129,9 @@ func (a *actionWaitForMemberUp) checkProgressSingleInActiveFailover(ctx context. // checkProgressAgent checks the progress of the action in the case // of an agent. func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, bool, error) { - log := a.log clients, err := a.actionCtx.GetAgencyClients(ctx) if err != nil { - log.Debug().Err(err).Msg("Failed to create agency clients") + a.log.Err(err).Debug("Failed to create agency clients") return false, false, nil } @@ -151,11 +145,11 @@ func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, b shortCtx = agency.WithAllowDifferentLeaderEndpoints(shortCtx) if err := agency.AreAgentsHealthy(shortCtx, clients); err != nil { - log.Debug().Err(err).Msg("Not all agents are ready") + a.log.Err(err).Debug("Not all agents are ready") return false, false, nil } - log.Debug().Msg("Agency is happy") + a.log.Debug("Agency is happy") return true, false, nil } @@ -163,29 +157,28 @@ func (a *actionWaitForMemberUp) checkProgressAgent(ctx context.Context) (bool, b // checkProgressCluster checks the progress of the action in the case // of a cluster deployment (coordinator/dbserver). func (a *actionWaitForMemberUp) checkProgressCluster() (bool, bool, error) { - log := a.log h := a.actionCtx.GetMembersState().Health() if h.Error != nil { - log.Debug().Err(h.Error).Msg("Cluster health is missing") + a.log.Err(h.Error).Debug("Cluster health is missing") return false, false, nil } sh, found := h.Members[driver.ServerID(a.action.MemberID)] if !found { - log.Debug().Msg("Member not yet found in cluster health") + a.log.Debug("Member not yet found in cluster health") return false, false, nil } if sh.Status != driver.ServerStatusGood { - log.Debug().Str("status", string(sh.Status)).Msg("Member set status not yet good") + a.log.Str("status", string(sh.Status)).Debug("Member set status not yet good") return false, false, nil } // Wait for the member to become ready from a kubernetes point of view // otherwise the coordinators may be rotated to fast and thus none of them // is ready resulting in a short downtime if m, found := a.actionCtx.GetMemberStatusByID(a.MemberID()); !found { - log.Error().Msg("No such member") + a.log.Error("No such member") return false, true, nil } else if !m.Conditions.IsTrue(api.ConditionTypeReady) { - log.Debug().Msg("Member not yet ready") + a.log.Debug("Member not yet ready") return false, false, nil } @@ -195,14 +188,13 @@ func (a *actionWaitForMemberUp) checkProgressCluster() (bool, bool, error) { // checkProgressArangoSync checks the progress of the action in the case // of a sync master / worker. func (a *actionWaitForMemberUp) checkProgressArangoSync(ctx context.Context) (bool, bool, error) { - log := a.log c, err := a.actionCtx.GetSyncServerClient(ctx, a.action.Group, a.action.MemberID) if err != nil { - log.Debug().Err(err).Msg("Failed to create arangosync client") + a.log.Err(err).Debug("Failed to create arangosync client") return false, false, nil } if err := c.Health(ctx); err != nil { - log.Debug().Err(err).Msg("Health not ok yet") + a.log.Err(err).Debug("Health not ok yet") return false, false, nil } return true, false, nil diff --git a/pkg/deployment/reconcile/condition_member_recreation.go b/pkg/deployment/reconcile/condition_member_recreation.go index 5fd20d15e..84f584c49 100644 --- a/pkg/deployment/reconcile/condition_member_recreation.go +++ b/pkg/deployment/reconcile/condition_member_recreation.go @@ -25,7 +25,6 @@ import ( "fmt" "strings" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" @@ -35,15 +34,15 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" ) -func createMemberRecreationConditionsPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createMemberRecreationConditionsPlan(ctx context.Context, + apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var p api.Plan for _, m := range status.Members.AsList() { - message, recreate := EvaluateMemberRecreationCondition(ctx, log, apiObject, spec, status, m.Group, m.Member, - context, isStorageClassChanged, isVolumeSizeChanged) + message, recreate := EvaluateMemberRecreationCondition(ctx, apiObject, spec, status, m.Group, m.Member, + context, r.isStorageClassChanged, r.isVolumeSizeChanged) if !recreate { if _, ok := m.Member.Conditions.Get(api.MemberReplacementRequired); ok { @@ -62,20 +61,20 @@ func createMemberRecreationConditionsPlan(ctx context.Context, } type MemberRecreationConditionEvaluator func(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, + apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus, context PlanBuilderContext) (bool, string, error) func EvaluateMemberRecreationCondition(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, + apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus, context PlanBuilderContext, evaluators ...MemberRecreationConditionEvaluator) (string, bool) { args := make([]string, 0, len(evaluators)) for _, e := range evaluators { - ok, s, err := e(ctx, log, apiObject, spec, status, group, member, context) + ok, s, err := e(ctx, apiObject, spec, status, group, member, context) if err != nil { // When one of an evaluator requires pod's replacement then it should be done. continue @@ -90,7 +89,7 @@ func EvaluateMemberRecreationCondition(ctx context.Context, } // isStorageClassChanged returns true and reason when the member should be replaced. -func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) isStorageClassChanged(_ context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, _ api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus, context PlanBuilderContext) (bool, string, error) { if spec.GetMode() == api.DeploymentModeSingle { @@ -122,7 +121,7 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su // Check if a storage class changed. if pvc, ok := cache.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName); !ok { - log.Warn().Str("role", group.AsRole()).Str("id", member.ID).Msg("Failed to get PVC") + r.log.Str("role", group.AsRole()).Str("id", member.ID).Warn("Failed to get PVC") return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName) } else { pvcClassName := util.StringOrDefault(pvc.Spec.StorageClassName) @@ -147,10 +146,10 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su // If pod does not exist then it will try next time. if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok { if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok { - log.Warn(). + r.log. Str("pod-name", member.PodName). Str("server-group", group.AsRole()). - Msgf("try changing a storage class name, but %s", getRequiredReplaceMessage(member.PodName)) + Warn("try changing a storage class name, but %s", getRequiredReplaceMessage(member.PodName)) // No return here. } } else { @@ -161,7 +160,7 @@ func isStorageClassChanged(_ context.Context, log zerolog.Logger, apiObject k8su } // isVolumeSizeChanged returns true and reason when the member should be replaced. -func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) isVolumeSizeChanged(_ context.Context, _ k8sutil.APIObject, spec api.DeploymentSpec, _ api.DeploymentStatus, group api.ServerGroup, member api.MemberStatus, context PlanBuilderContext) (bool, string, error) { if spec.GetMode() == api.DeploymentModeSingle { @@ -186,10 +185,10 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj pvc, ok := cache.PersistentVolumeClaim().V1().GetSimple(member.PersistentVolumeClaimName) if !ok { - log.Warn(). + r.log. Str("role", group.AsRole()). Str("id", member.ID). - Msg("Failed to get PVC") + Warn("Failed to get PVC") return false, "", fmt.Errorf("failed to get PVC %s", member.PersistentVolumeClaimName) } @@ -201,10 +200,10 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj } if group != api.ServerGroupDBServers { - log.Error(). + r.log. Str("pvc-storage-size", volumeSize.String()). Str("requested-size", requestedSize.String()). - Msgf("Volume size should not shrink, because it is not possible for \"%s\"", group.AsRole()) + Warn("Volume size should not shrink, because it is not possible for \"%s\"", group.AsRole()) return false, "", nil } @@ -213,8 +212,8 @@ func isVolumeSizeChanged(_ context.Context, log zerolog.Logger, _ k8sutil.APIObj // If pod does not exist then it will try next time. if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok { if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok { - log.Warn().Str("pod-name", member.PodName). - Msgf("try shrinking volume size, but %s", getRequiredReplaceMessage(member.PodName)) + r.log.Str("pod-name", member.PodName). + Warn("try shrinking volume size, but %s", getRequiredReplaceMessage(member.PodName)) // No return here. } } else { diff --git a/pkg/deployment/reconcile/helper_shutdown.go b/pkg/deployment/reconcile/helper_shutdown.go index 6bdc9abe1..e2fe65d96 100644 --- a/pkg/deployment/reconcile/helper_shutdown.go +++ b/pkg/deployment/reconcile/helper_shutdown.go @@ -25,7 +25,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -40,68 +39,64 @@ import ( // Returns true when member status exists. // There are 3 possibilities to shut down the pod: immediately, gracefully, standard kubernetes delete API. // When pod does not exist then success action (which always successes) is returned. -func getShutdownHelper(a *api.Action, actionCtx ActionContext, log zerolog.Logger) (ActionCore, api.MemberStatus, bool) { - m, ok := actionCtx.GetMemberStatusByID(a.MemberID) +func getShutdownHelper(a actionImpl) (ActionCore, api.MemberStatus, bool) { + m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID) if !ok { - log.Warn().Str("pod-name", m.PodName).Msg("member is already gone") + a.log.Str("pod-name", m.PodName).Warn("member is already gone") return nil, api.MemberStatus{}, false } - cache, ok := actionCtx.ACS().ClusterCache(m.ClusterID) + cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID) if !ok { - log.Warn().Str("pod-name", m.PodName).Msg("Cluster is not ready") + a.log.Str("pod-name", m.PodName).Warn("Cluster is not ready") return nil, api.MemberStatus{}, false } - if ifPodUIDMismatch(m, *a, cache) { - log.Error().Msg("Member UID is changed") + if ifPodUIDMismatch(m, a.action, cache) { + a.log.Error("Member UID is changed") return NewActionSuccess(), m, true } pod, ok := cache.Pod().V1().GetSimple(m.PodName) if !ok { - log.Warn().Str("pod-name", m.PodName).Msg("pod is already gone") + a.log.Str("pod-name", m.PodName).Warn("pod is already gone") // Pod does not exist, so create success action to finish it immediately. return NewActionSuccess(), m, true } if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodDeleteNow]; ok { // The pod contains annotation, so pod must be deleted immediately. - return shutdownNow{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true + return shutdownNow{actionImpl: a, memberStatus: m}, m, true } if features.GracefulShutdown().Enabled() { - return shutdownHelperAPI{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true + return shutdownHelperAPI{actionImpl: a, memberStatus: m}, m, true } - serverGroup := actionCtx.GetSpec().GetServerGroupSpec(a.Group) + serverGroup := a.actionCtx.GetSpec().GetServerGroupSpec(a.action.Group) switch serverGroup.ShutdownMethod.Get() { case api.ServerGroupShutdownMethodDelete: - return shutdownHelperDelete{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true + return shutdownHelperDelete{actionImpl: a, memberStatus: m}, m, true default: - return shutdownHelperAPI{action: a, actionCtx: actionCtx, log: log, memberStatus: m}, m, true + return shutdownHelperAPI{actionImpl: a, memberStatus: m}, m, true } } type shutdownHelperAPI struct { - log zerolog.Logger - action *api.Action - actionCtx ActionContext + actionImpl memberStatus api.MemberStatus } func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) { - log := s.log - - log.Info().Msgf("Using API to shutdown member") + s.log.Info("Using API to shutdown member") group := s.action.Group podName := s.memberStatus.PodName if podName == "" { - log.Warn().Msgf("Pod is empty") + s.log.Warn("Pod is empty") return true, nil } @@ -131,11 +126,11 @@ func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) { defer cancel() c, err := s.actionCtx.GetServerClient(ctxChild, group, s.action.MemberID) if err != nil { - log.Debug().Err(err).Msg("Failed to create member client") + s.log.Err(err).Debug("Failed to create member client") return false, errors.WithStack(err) } removeFromCluster := false - log.Debug().Bool("removeFromCluster", removeFromCluster).Msg("Shutting down member") + s.log.Bool("removeFromCluster", removeFromCluster).Debug("Shutting down member") ctxChild, cancel = context.WithTimeout(ctx, shutdownTimeout) defer cancel() if err := c.ShutdownV2(ctxChild, removeFromCluster, true); err != nil { @@ -144,7 +139,7 @@ func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) { // We're done return true, nil } - log.Debug().Err(err).Msg("Failed to shutdown member") + s.log.Err(err).Debug("Failed to shutdown member") return false, errors.WithStack(err) } } else if group.IsArangosync() { @@ -164,20 +159,16 @@ func (s shutdownHelperAPI) CheckProgress(_ context.Context) (bool, bool, error) } type shutdownHelperDelete struct { - log zerolog.Logger - action *api.Action - actionCtx ActionContext + actionImpl memberStatus api.MemberStatus } func (s shutdownHelperDelete) Start(ctx context.Context) (bool, error) { - log := s.log - - log.Info().Msgf("Using Pod Delete to shutdown member") + s.log.Info("Using Pod Delete to shutdown member") podName := s.memberStatus.PodName if podName == "" { - log.Warn().Msgf("Pod is empty") + s.log.Warn("Pod is empty") return true, nil } @@ -198,23 +189,22 @@ func (s shutdownHelperDelete) Start(ctx context.Context) (bool, error) { func (s shutdownHelperDelete) CheckProgress(ctx context.Context) (bool, bool, error) { // Check that pod is removed - log := s.log if !s.memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) { // Pod is not yet terminated - log.Warn().Msgf("Pod not yet terminated") + s.log.Warn("Pod not yet terminated") return false, false, nil } cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID) if !ok { - log.Warn().Msg("Cluster is not ready") + s.log.Warn("Cluster is not ready") return false, false, nil } podName := s.memberStatus.PodName if podName != "" { if _, ok := cache.Pod().V1().GetSimple(podName); ok { - log.Warn().Msgf("Pod still exists") + s.log.Warn("Pod still exists") return false, false, nil } } @@ -223,17 +213,15 @@ func (s shutdownHelperDelete) CheckProgress(ctx context.Context) (bool, bool, er } type shutdownNow struct { - action *api.Action - actionCtx ActionContext + actionImpl memberStatus api.MemberStatus - log zerolog.Logger } // Start starts removing pod forcefully. func (s shutdownNow) Start(ctx context.Context) (bool, error) { // Check progress is used here because removing pod can start gracefully, // and then it can be changed to force shutdown. - s.log.Info().Msg("Using shutdown now method") + s.log.Info("Using shutdown now method") ready, _, err := s.CheckProgress(ctx) return ready, err } @@ -244,18 +232,18 @@ func (s shutdownNow) CheckProgress(ctx context.Context) (bool, bool, error) { cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID) if !ok { - s.log.Warn().Msg("Cluster is not ready") + s.log.Warn("Cluster is not ready") return false, false, nil } pod, ok := cache.Pod().V1().GetSimple(podName) if !ok { - s.log.Info().Msg("Using shutdown now method completed because pod is gone") + s.log.Info("Using shutdown now method completed because pod is gone") return true, false, nil } if s.memberStatus.PodUID != pod.GetUID() { - s.log.Info().Msg("Using shutdown now method completed because it is already rotated") + s.log.Info("Using shutdown now method completed because it is already rotated") // The new pod has been started already. return true, false, nil } @@ -283,6 +271,6 @@ func (s shutdownNow) CheckProgress(ctx context.Context) (bool, bool, error) { } } - s.log.Info().Msgf("Using shutdown now method completed") + s.log.Info("Using shutdown now method completed") return true, false, nil } diff --git a/pkg/deployment/reconcile/plan_builder.go b/pkg/deployment/reconcile/plan_builder.go index dd255a2c8..691265380 100644 --- a/pkg/deployment/reconcile/plan_builder.go +++ b/pkg/deployment/reconcile/plan_builder.go @@ -24,8 +24,6 @@ import ( "context" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" ) const ( @@ -41,6 +39,6 @@ const ( // CreatePlan considers the current specification & status of the deployment creates a plan to // get the status in line with the specification. // If a plan already exists, nothing is done. -func (d *Reconciler) CreatePlan(ctx context.Context, cachedStatus inspectorInterface.Inspector) (error, bool) { - return d.generatePlan(ctx, d.generatePlanFunc(createHighPlan, plannerHigh{}), d.generatePlanFunc(createResourcesPlan, plannerResources{}), d.generatePlanFunc(createNormalPlan, plannerNormal{})) +func (d *Reconciler) CreatePlan(ctx context.Context) (error, bool) { + return d.generatePlan(ctx, d.generatePlanFunc(d.createHighPlan, plannerHigh{}), d.generatePlanFunc(d.createResourcesPlan, plannerResources{}), d.generatePlanFunc(d.createNormalPlan, plannerNormal{})) } diff --git a/pkg/deployment/reconcile/plan_builder_appender.go b/pkg/deployment/reconcile/plan_builder_appender.go index 4046cdfcf..294c6eccf 100644 --- a/pkg/deployment/reconcile/plan_builder_appender.go +++ b/pkg/deployment/reconcile/plan_builder_appender.go @@ -24,7 +24,7 @@ import ( "time" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" + "github.com/arangodb/kube-arangodb/pkg/logging" ) func newPlanAppender(pb WithPlanBuilder, backoff api.BackOff, current api.Plan) PlanAppender { @@ -35,7 +35,7 @@ func newPlanAppender(pb WithPlanBuilder, backoff api.BackOff, current api.Plan) } } -func recoverPlanAppender(log zerolog.Logger, p PlanAppender) PlanAppender { +func recoverPlanAppender(log logging.Logger, p PlanAppender) PlanAppender { return planAppenderRecovery{ appender: p, log: log, @@ -60,8 +60,8 @@ type PlanAppender interface { } type planAppenderRecovery struct { + log logging.Logger appender PlanAppender - log zerolog.Logger } func (p planAppenderRecovery) BackOff() api.BackOff { @@ -84,7 +84,7 @@ func (p planAppenderRecovery) create(ret func(in PlanAppender) PlanAppender) (r defer func() { if e := recover(); e != nil { r = p - p.log.Error().Interface("panic", e).Msgf("Recovering from panic") + p.log.Interface("panic", e).Error("Recovering from panic") } }() diff --git a/pkg/deployment/reconcile/plan_builder_appender_test.go b/pkg/deployment/reconcile/plan_builder_appender_test.go index c697c6618..b3f25ec24 100644 --- a/pkg/deployment/reconcile/plan_builder_appender_test.go +++ b/pkg/deployment/reconcile/plan_builder_appender_test.go @@ -26,42 +26,40 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" ) func Test_PlanBuilderAppender_Recovery(t *testing.T) { t.Run("Recover", func(t *testing.T) { - require.Len(t, recoverPlanAppender(log.Logger, newPlanAppender(NewWithPlanBuilder(context.Background(), zerolog.Logger{}, nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)). - Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + require.Len(t, recoverPlanAppender(testLogger, newPlanAppender(NewWithPlanBuilder(context.Background(), nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)). + Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { panic("") }). - Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { panic("SomePanic") }).Plan(), 0) }) t.Run("Recover with output", func(t *testing.T) { - require.Len(t, recoverPlanAppender(log.Logger, newPlanAppender(NewWithPlanBuilder(context.Background(), zerolog.Logger{}, nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)). - Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + require.Len(t, recoverPlanAppender(testLogger, newPlanAppender(NewWithPlanBuilder(context.Background(), nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)). + Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { return api.Plan{api.Action{}} }). - ApplyIfEmpty(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + ApplyIfEmpty(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { panic("SomePanic") }). - ApplyIfEmpty(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + ApplyIfEmpty(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { return api.Plan{api.Action{}, api.Action{}} }).Plan(), 1) }) t.Run("Recover with multi", func(t *testing.T) { - require.Len(t, recoverPlanAppender(log.Logger, newPlanAppender(NewWithPlanBuilder(context.Background(), zerolog.Logger{}, nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)). - Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + require.Len(t, recoverPlanAppender(testLogger, newPlanAppender(NewWithPlanBuilder(context.Background(), nil, api.DeploymentSpec{}, api.DeploymentStatus{}, nil), nil, nil)). + Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { return api.Plan{api.Action{}} }). - ApplyIfEmpty(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + ApplyIfEmpty(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { panic("SomePanic") }). - Apply(func(_ context.Context, _ zerolog.Logger, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { + Apply(func(_ context.Context, _ k8sutil.APIObject, _ api.DeploymentSpec, _ api.DeploymentStatus, _ PlanBuilderContext) api.Plan { return api.Plan{api.Action{}, api.Action{}} }).Plan(), 3) }) diff --git a/pkg/deployment/reconcile/plan_builder_bootstrap.go b/pkg/deployment/reconcile/plan_builder_bootstrap.go index da25b72a5..64643c936 100644 --- a/pkg/deployment/reconcile/plan_builder_bootstrap.go +++ b/pkg/deployment/reconcile/plan_builder_bootstrap.go @@ -26,11 +26,9 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createBootstrapPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createBootstrapPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !status.Conditions.IsTrue(api.ConditionTypeReady) { diff --git a/pkg/deployment/reconcile/plan_builder_clean_out.go b/pkg/deployment/reconcile/plan_builder_clean_out.go index fcf147511..97df34731 100644 --- a/pkg/deployment/reconcile/plan_builder_clean_out.go +++ b/pkg/deployment/reconcile/plan_builder_clean_out.go @@ -26,8 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/globals" - "github.com/rs/zerolog" - "github.com/arangodb/go-driver" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" @@ -35,7 +33,7 @@ import ( ) // createCleanOutPlan creates clean out action if the server is cleaned out and the operator is not aware of it. -func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) createCleanOutPlan(ctx context.Context, _ k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { if spec.GetMode() != api.DeploymentModeCluster { @@ -49,7 +47,7 @@ func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIOb cluster, err := getCluster(ctx, planCtx) if err != nil { - log.Warn().Err(err).Msgf("Unable to get cluster") + r.log.Err(err).Warn("Unable to get cluster") return nil } @@ -57,7 +55,7 @@ func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIOb defer cancel() health, err := cluster.Health(ctxChild) if err != nil { - log.Warn().Err(err).Msgf("Unable to get cluster health") + r.log.Err(err).Warn("Unable to get cluster health") return nil } @@ -76,13 +74,13 @@ func createCleanOutPlan(ctx context.Context, log zerolog.Logger, _ k8sutil.APIOb } if isCleanedOut, err := cluster.IsCleanedOut(ctx, string(id)); err != nil { - log.Warn().Err(err).Str("id", string(id)).Msgf("Unable to get clean out status") + r.log.Err(err).Str("id", string(id)).Warn("Unable to get clean out status") return nil } else if isCleanedOut { - log.Info(). + r.log. Str("role", string(member.Role)). Str("id", string(id)). - Msgf("server is cleaned out so operator must do the same") + Info("server is cleaned out so operator must do the same") action := actions.NewAction(api.ActionTypeSetMemberCondition, api.ServerGroupDBServers, withPredefinedMember(string(id)), "server is cleaned out so operator must do the same"). diff --git a/pkg/deployment/reconcile/plan_builder_cluster.go b/pkg/deployment/reconcile/plan_builder_cluster.go index 6c69e5e35..b216188ff 100644 --- a/pkg/deployment/reconcile/plan_builder_cluster.go +++ b/pkg/deployment/reconcile/plan_builder_cluster.go @@ -30,13 +30,11 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) const coordinatorHealthFailedTimeout time.Duration = time.Minute -func createClusterOperationPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createClusterOperationPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { @@ -55,7 +53,7 @@ func createClusterOperationPlan(ctx context.Context, defer cancel() cluster, err := c.Cluster(ctxChild) if err != nil { - log.Warn().Err(err).Msgf("Unable to get Cluster client") + r.log.Err(err).Warn("Unable to get Cluster client") return nil } @@ -63,7 +61,7 @@ func createClusterOperationPlan(ctx context.Context, defer cancel() health, err := cluster.Health(ctxChild) if err != nil { - log.Warn().Err(err).Msgf("Unable to get Cluster health") + r.log.Err(err).Warn("Unable to get Cluster health") return nil } diff --git a/pkg/deployment/reconcile/plan_builder_common.go b/pkg/deployment/reconcile/plan_builder_common.go index d6a4a3ced..378ceb382 100644 --- a/pkg/deployment/reconcile/plan_builder_common.go +++ b/pkg/deployment/reconcile/plan_builder_common.go @@ -29,7 +29,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/deployment/features" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) var ( @@ -38,8 +37,7 @@ var ( } ) -func cleanupConditions(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) cleanupConditions(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { var p api.Plan @@ -53,8 +51,7 @@ func cleanupConditions(ctx context.Context, return p } -func createMaintenanceManagementPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createMaintenanceManagementPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { if spec.Mode.Get() == api.DeploymentModeSingle { @@ -68,12 +65,12 @@ func createMaintenanceManagementPlan(ctx context.Context, agencyState, agencyOK := planCtx.GetAgencyCache() if !agencyOK { - log.Error().Msgf("Unable to get agency mode") + r.log.Error("Unable to get agency mode") return nil } if agencyState.Target.HotBackup.Create.Exists() { - log.Info().Msgf("HotBackup in progress") + r.log.Info("HotBackup in progress") return nil } @@ -82,7 +79,7 @@ func createMaintenanceManagementPlan(ctx context.Context, if (cok && c.IsTrue()) != enabled { // Condition not yet propagated - log.Info().Msgf("Condition not yet propagated") + r.log.Info("Condition not yet propagated") return nil } @@ -96,12 +93,12 @@ func createMaintenanceManagementPlan(ctx context.Context, } if !enabled && spec.Database.GetMaintenance() { - log.Info().Msgf("Enabling maintenance mode") + r.log.Info("Enabling maintenance mode") return api.Plan{actions.NewClusterAction(api.ActionTypeEnableMaintenance)} } if enabled && !spec.Database.GetMaintenance() { - log.Info().Msgf("Disabling maintenance mode") + r.log.Info("Disabling maintenance mode") return api.Plan{actions.NewClusterAction(api.ActionTypeDisableMaintenance)} } diff --git a/pkg/deployment/reconcile/plan_builder_encryption.go b/pkg/deployment/reconcile/plan_builder_encryption.go index 73c81b020..6b1c05fbf 100644 --- a/pkg/deployment/reconcile/plan_builder_encryption.go +++ b/pkg/deployment/reconcile/plan_builder_encryption.go @@ -36,7 +36,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" - "github.com/rs/zerolog" ) func skipEncryptionPlan(spec api.DeploymentSpec, status api.DeploymentStatus) bool { @@ -51,8 +50,7 @@ func skipEncryptionPlan(spec api.DeploymentSpec, status api.DeploymentStatus) bo return false } -func createEncryptionKeyStatusPropagatedFieldUpdate(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createEncryptionKeyStatusPropagatedFieldUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, w WithPlanBuilder, builders ...planBuilder) api.Plan { if skipEncryptionPlan(spec, status) { @@ -88,8 +86,7 @@ func createEncryptionKeyStatusPropagatedFieldUpdate(ctx context.Context, return plan } -func createEncryptionKey(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createEncryptionKey(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if skipEncryptionPlan(spec, status) { @@ -103,7 +100,7 @@ func createEncryptionKey(ctx context.Context, name, _, err := pod.GetEncryptionKeyFromSecret(secret) if err != nil { - log.Error().Err(err).Msgf("Unable to fetch encryption key") + r.log.Err(err).Error("Unable to fetch encryption key") return nil } @@ -113,7 +110,7 @@ func createEncryptionKey(ctx context.Context, keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName())) if !exists { - log.Error().Msgf("Encryption key folder does not exist") + r.log.Error("Encryption key folder does not exist") return nil } @@ -128,7 +125,7 @@ func createEncryptionKey(ctx context.Context, } } - plan, failed := areEncryptionKeysUpToDate(ctx, log, spec, status, context, keyfolder) + plan, failed := r.areEncryptionKeysUpToDate(ctx, spec, status, context, keyfolder) if !plan.IsEmpty() { return plan } @@ -142,15 +139,14 @@ func createEncryptionKey(ctx context.Context, return api.Plan{} } -func createEncryptionKeyStatusUpdate(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createEncryptionKeyStatusUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if skipEncryptionPlan(spec, status) { return nil } - if createEncryptionKeyStatusUpdateRequired(log, spec, status, context) { + if r.createEncryptionKeyStatusUpdateRequired(spec, status, context) { return api.Plan{actions.NewClusterAction(api.ActionTypeEncryptionKeyStatusUpdate)} } @@ -158,7 +154,7 @@ func createEncryptionKeyStatusUpdate(ctx context.Context, } -func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, +func (r *Reconciler) createEncryptionKeyStatusUpdateRequired(spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) bool { if skipEncryptionPlan(spec, status) { return false @@ -166,7 +162,7 @@ func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.Deploy keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName())) if !exists { - log.Error().Msgf("Encryption key folder does not exist") + r.log.Error("Encryption key folder does not exist") return false } @@ -175,8 +171,7 @@ func createEncryptionKeyStatusUpdateRequired(log zerolog.Logger, spec api.Deploy return !util.CompareStringArray(keyHashes, status.Hashes.Encryption.Keys) } -func createEncryptionKeyCleanPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createEncryptionKeyCleanPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if skipEncryptionPlan(spec, status) { @@ -185,7 +180,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context, keyfolder, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.GetEncryptionFolderSecretName(context.GetName())) if !exists { - log.Error().Msgf("Encryption key folder does not exist") + r.log.Error("Encryption key folder does not exist") return nil } @@ -214,7 +209,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context, } if _, ok := keyfolder.Data[name]; !ok { - log.Err(err).Msgf("Key from encryption is not in keyfolder - do nothing") + r.log.Err(err).Error("Key from encryption is not in keyfolder - do nothing") return nil } @@ -231,7 +226,7 @@ func createEncryptionKeyCleanPlan(ctx context.Context, return api.Plan{} } -func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec, +func (r *Reconciler) areEncryptionKeysUpToDate(ctx context.Context, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, folder *core.Secret) (plan api.Plan, failed bool) { status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error { @@ -240,7 +235,7 @@ func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api } for _, m := range list { - if updateRequired, failedMember := isEncryptionKeyUpToDate(ctx, log, status, context, group, m, folder); failedMember { + if updateRequired, failedMember := r.isEncryptionKeyUpToDate(ctx, status, context, group, m, folder); failedMember { failed = true continue } else if updateRequired { @@ -255,8 +250,7 @@ func areEncryptionKeysUpToDate(ctx context.Context, log zerolog.Logger, spec api return } -func isEncryptionKeyUpToDate(ctx context.Context, - log zerolog.Logger, status api.DeploymentStatus, +func (r *Reconciler) isEncryptionKeyUpToDate(ctx context.Context, status api.DeploymentStatus, planCtx PlanBuilderContext, group api.ServerGroup, m api.MemberStatus, folder *core.Secret) (updateRequired bool, failed bool) { @@ -268,13 +262,13 @@ func isEncryptionKeyUpToDate(ctx context.Context, return false, false } - mlog := log.With().Str("group", group.AsRole()).Str("member", m.ID).Logger() + log := r.log.Str("group", group.AsRole()).Str("member", m.ID) ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() c, err := planCtx.GetServerClient(ctxChild, group, m.ID) if err != nil { - mlog.Warn().Err(err).Msg("Unable to get client") + log.Err(err).Warn("Unable to get client") return false, true } @@ -284,12 +278,12 @@ func isEncryptionKeyUpToDate(ctx context.Context, defer cancel() e, err := client.GetEncryption(ctxChild) if err != nil { - mlog.Error().Err(err).Msgf("Unable to fetch encryption keys") + log.Err(err).Error("Unable to fetch encryption keys") return false, true } if !e.Result.KeysPresent(folder.Data) { - mlog.Info().Msgf("Refresh of encryption keys required") + log.Info("Refresh of encryption keys required") return true, false } diff --git a/pkg/deployment/reconcile/plan_builder_factory.go b/pkg/deployment/reconcile/plan_builder_factory.go index 40dbc4f4b..16541dffe 100644 --- a/pkg/deployment/reconcile/plan_builder_factory.go +++ b/pkg/deployment/reconcile/plan_builder_factory.go @@ -25,31 +25,25 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -type planBuilder func(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +type planBuilder func(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan -type planBuilderCondition func(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +type planBuilderCondition func(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) bool -type planBuilderSubPlan func(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +type planBuilderSubPlan func(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, w WithPlanBuilder, plans ...planBuilder) api.Plan -func NewWithPlanBuilder(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func NewWithPlanBuilder(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) WithPlanBuilder { return &withPlanBuilder{ ctx: ctx, - log: log, apiObject: apiObject, spec: spec, status: status, @@ -65,7 +59,6 @@ type WithPlanBuilder interface { type withPlanBuilder struct { ctx context.Context - log zerolog.Logger apiObject k8sutil.APIObject spec api.DeploymentSpec status api.DeploymentStatus @@ -73,7 +66,7 @@ type withPlanBuilder struct { } func (w withPlanBuilder) ApplyWithCondition(c planBuilderCondition, p planBuilder) api.Plan { - if !c(w.ctx, w.log, w.apiObject, w.spec, w.status, w.context) { + if !c(w.ctx, w.apiObject, w.spec, w.status, w.context) { return api.Plan{} } @@ -81,9 +74,9 @@ func (w withPlanBuilder) ApplyWithCondition(c planBuilderCondition, p planBuilde } func (w withPlanBuilder) ApplySubPlan(p planBuilderSubPlan, plans ...planBuilder) api.Plan { - return p(w.ctx, w.log, w.apiObject, w.spec, w.status, w.context, w, plans...) + return p(w.ctx, w.apiObject, w.spec, w.status, w.context, w, plans...) } func (w withPlanBuilder) Apply(p planBuilder) api.Plan { - return p(w.ctx, w.log, w.apiObject, w.spec, w.status, w.context) + return p(w.ctx, w.apiObject, w.spec, w.status, w.context) } diff --git a/pkg/deployment/reconcile/plan_builder_generator.go b/pkg/deployment/reconcile/plan_builder_generator.go index f3d9bee06..21425ddda 100644 --- a/pkg/deployment/reconcile/plan_builder_generator.go +++ b/pkg/deployment/reconcile/plan_builder_generator.go @@ -27,7 +27,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) type planGenerationOutput struct { @@ -37,7 +36,7 @@ type planGenerationOutput struct { planner planner } -type planGeneratorFunc func(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, +type planGeneratorFunc func(ctx context.Context, apiObject k8sutil.APIObject, currentPlan api.Plan, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (api.Plan, api.BackOff, bool) @@ -51,7 +50,7 @@ func (d *Reconciler) generatePlanFunc(gen planGeneratorFunc, planner planner) pl spec := d.context.GetSpec() status, _ := d.context.GetStatus() builderCtx := newPlanBuilderContext(d.context) - newPlan, backoffs, changed := gen(ctx, d.log, apiObject, planner.Get(&status), spec, status, builderCtx) + newPlan, backoffs, changed := gen(ctx, apiObject, planner.Get(&status), spec, status, builderCtx) return planGenerationOutput{ plan: newPlan, @@ -85,9 +84,9 @@ func (d *Reconciler) generatePlan(ctx context.Context, generators ...planGenerat action := result.plan[id] d.context.CreateEvent(k8sutil.NewPlanAppendEvent(d.context.GetAPIObject(), action.Type.String(), action.Group.AsRole(), action.MemberID, action.Reason)) if r := action.Reason; r != "" { - d.log.Info().Str("Action", action.Type.String()). + d.log.Str("Action", action.Type.String()). Str("Role", action.Group.AsRole()).Str("Member", action.MemberID). - Str("Type", strings.Title(result.planner.Type())).Msgf(r) + Str("Type", strings.Title(result.planner.Type())).Info(r) } } diff --git a/pkg/deployment/reconcile/plan_builder_high.go b/pkg/deployment/reconcile/plan_builder_high.go index e15453234..31eb4f673 100644 --- a/pkg/deployment/reconcile/plan_builder_high.go +++ b/pkg/deployment/reconcile/plan_builder_high.go @@ -29,14 +29,13 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" ) // createHighPlan considers the given specification & status and creates a plan to get the status in line with the specification. // If a plan already exists, the given plan is returned with false. // Otherwise the new plan is returned with a boolean true. -func createHighPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createHighPlan(ctx context.Context, apiObject k8sutil.APIObject, currentPlan api.Plan, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (api.Plan, api.BackOff, bool) { @@ -45,29 +44,28 @@ func createHighPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil.A return currentPlan, nil, false } - r := recoverPlanAppender(log, newPlanAppender(NewWithPlanBuilder(ctx, log, apiObject, spec, status, builderCtx), status.BackOff, currentPlan). - ApplyIfEmpty(updateMemberPodTemplateSpec). - ApplyIfEmpty(updateMemberPhasePlan). - ApplyIfEmpty(createCleanOutPlan). - ApplyIfEmpty(updateMemberUpdateConditionsPlan). - ApplyIfEmpty(updateMemberRotationConditionsPlan). - ApplyIfEmpty(createMemberRecreationConditionsPlan). - ApplyIfEmpty(createRotateServerStoragePVCPendingResizeConditionPlan). - ApplyIfEmpty(createTopologyMemberUpdatePlan). - ApplyIfEmptyWithBackOff(LicenseCheck, 30*time.Second, updateClusterLicense). - ApplyIfEmpty(createTopologyMemberConditionPlan). - ApplyIfEmpty(createRebalancerCheckPlan). - ApplyWithBackOff(BackOffCheck, time.Minute, emptyPlanBuilder)). - Apply(createBackupInProgressConditionPlan). // Discover backups always - Apply(createMaintenanceConditionPlan). // Discover maintenance always - Apply(cleanupConditions) // Cleanup Conditions + q := recoverPlanAppender(r.log, newPlanAppender(NewWithPlanBuilder(ctx, apiObject, spec, status, builderCtx), status.BackOff, currentPlan). + ApplyIfEmpty(r.updateMemberPodTemplateSpec). + ApplyIfEmpty(r.updateMemberPhasePlan). + ApplyIfEmpty(r.createCleanOutPlan). + ApplyIfEmpty(r.updateMemberUpdateConditionsPlan). + ApplyIfEmpty(r.updateMemberRotationConditionsPlan). + ApplyIfEmpty(r.createMemberRecreationConditionsPlan). + ApplyIfEmpty(r.createRotateServerStoragePVCPendingResizeConditionPlan). + ApplyIfEmpty(r.createTopologyMemberUpdatePlan). + ApplyIfEmptyWithBackOff(LicenseCheck, 30*time.Second, r.updateClusterLicense). + ApplyIfEmpty(r.createTopologyMemberConditionPlan). + ApplyIfEmpty(r.createRebalancerCheckPlan). + ApplyWithBackOff(BackOffCheck, time.Minute, r.emptyPlanBuilder)). + Apply(r.createBackupInProgressConditionPlan). // Discover backups always + Apply(r.createMaintenanceConditionPlan). // Discover maintenance always + Apply(r.cleanupConditions) // Cleanup Conditions - return r.Plan(), r.BackOff(), true + return q.Plan(), q.BackOff(), true } // updateMemberPodTemplateSpec creates plan to update member Spec -func updateMemberPodTemplateSpec(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) updateMemberPodTemplateSpec(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -76,7 +74,7 @@ func updateMemberPodTemplateSpec(ctx context.Context, status.Members.ForeachServerGroup(func(group api.ServerGroup, members api.MemberStatusList) error { for _, m := range members { if m.Phase != api.MemberPhaseNone { - if reason, changed := arangoMemberPodTemplateNeedsUpdate(ctx, log, apiObject, spec, group, status, m, context); changed { + if reason, changed := r.arangoMemberPodTemplateNeedsUpdate(ctx, apiObject, spec, group, status, m, context); changed { plan = append(plan, actions.NewAction(api.ActionTypeArangoMemberUpdatePodSpec, group, m, reason)) } } @@ -89,8 +87,7 @@ func updateMemberPodTemplateSpec(ctx context.Context, } // updateMemberPhasePlan creates plan to update member phase -func updateMemberPhasePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) updateMemberPhasePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -129,8 +126,7 @@ func tlsRotateConditionAction(group api.ServerGroup, memberID string, reason str return actions.NewAction(api.ActionTypeSetMemberCondition, group, withPredefinedMember(memberID), reason).AddParam(api.ConditionTypePendingTLSRotation.String(), "T") } -func updateMemberUpdateConditionsPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) updateMemberUpdateConditionsPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -154,15 +150,14 @@ func updateMemberUpdateConditionsPlan(ctx context.Context, return nil }); err != nil { - log.Err(err).Msgf("Error while generating update plan") + r.log.Err(err).Error("Error while generating update plan") return nil } return plan } -func updateMemberRotationConditionsPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) updateMemberRotationConditionsPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -179,7 +174,7 @@ func updateMemberRotationConditionsPlan(ctx context.Context, p = nil } - if p, err := updateMemberRotationConditions(log, apiObject, spec, m, group, p, context); err != nil { + if p, err := r.updateMemberRotationConditions(apiObject, spec, m, group, p, context); err != nil { return err } else if len(p) > 0 { plan = append(plan, p...) @@ -188,14 +183,14 @@ func updateMemberRotationConditionsPlan(ctx context.Context, return nil }); err != nil { - log.Err(err).Msgf("Error while generating rotation plan") + r.log.Err(err).Error("Error while generating rotation plan") return nil } return plan } -func updateMemberRotationConditions(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, member api.MemberStatus, group api.ServerGroup, p *core.Pod, context PlanBuilderContext) (api.Plan, error) { +func (r *Reconciler) updateMemberRotationConditions(apiObject k8sutil.APIObject, spec api.DeploymentSpec, member api.MemberStatus, group api.ServerGroup, p *core.Pod, context PlanBuilderContext) (api.Plan, error) { if member.Conditions.IsTrue(api.ConditionTypeRestart) { return nil, nil } @@ -205,16 +200,16 @@ func updateMemberRotationConditions(log zerolog.Logger, apiObject k8sutil.APIObj return nil, nil } - if m, _, reason, err := rotation.IsRotationRequired(log, context.ACS(), spec, member, group, p, arangoMember.Spec.Template, arangoMember.Status.Template); err != nil { - log.Error().Err(err).Msgf("Error while getting rotation details") + if m, _, reason, err := rotation.IsRotationRequired(context.ACS(), spec, member, group, p, arangoMember.Spec.Template, arangoMember.Status.Template); err != nil { + r.log.Err(err).Error("Error while getting rotation details") return nil, err } else { switch m { case rotation.EnforcedRotation: if reason != "" { - log.Info().Bool("enforced", true).Msgf(reason) + r.log.Bool("enforced", true).Info(reason) } else { - log.Info().Bool("enforced", true).Msgf("Unknown reason") + r.log.Bool("enforced", true).Info("Unknown reason") } // We need to do enforced rotation return api.Plan{restartMemberConditionAction(group, member.ID, reason)}, nil @@ -233,9 +228,9 @@ func updateMemberRotationConditions(log zerolog.Logger, apiObject k8sutil.APIObj return api.Plan{actions.NewAction(api.ActionTypeArangoMemberUpdatePodStatus, group, member, "Propagating status of pod").AddParam(ActionTypeArangoMemberUpdatePodStatusChecksum, arangoMember.Spec.Template.GetChecksum())}, nil case rotation.GracefulRotation: if reason != "" { - log.Info().Bool("enforced", false).Msgf(reason) + r.log.Bool("enforced", false).Info(reason) } else { - log.Info().Bool("enforced", false).Msgf("Unknown reason") + r.log.Bool("enforced", false).Info("Unknown reason") } // We need to do graceful rotation if member.Conditions.IsTrue(api.ConditionTypePendingRestart) { diff --git a/pkg/deployment/reconcile/plan_builder_jwt.go b/pkg/deployment/reconcile/plan_builder_jwt.go index 55c14a9b0..06996828b 100644 --- a/pkg/deployment/reconcile/plan_builder_jwt.go +++ b/pkg/deployment/reconcile/plan_builder_jwt.go @@ -31,7 +31,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/client" - "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" @@ -40,11 +39,9 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createJWTKeyUpdate(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createJWTKeyUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if folder, err := ensureJWTFolderSupport(spec, status); err != nil || !folder { @@ -53,50 +50,50 @@ func createJWTKeyUpdate(ctx context.Context, folder, ok := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(apiObject.GetName())) if !ok { - log.Error().Msgf("Unable to get JWT folder info") + r.planLogger.Error("Unable to get JWT folder info") return nil } s, ok := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(spec.Authentication.GetJWTSecretName()) if !ok { - log.Info().Msgf("JWT Secret is missing, no rotation will take place") + r.planLogger.Info("JWT Secret is missing, no rotation will take place") return nil } jwt, ok := s.Data[constants.SecretKeyToken] if !ok { - log.Warn().Msgf("JWT Secret is invalid, no rotation will take place") - return addJWTPropagatedPlanAction(status) + r.planLogger.Warn("JWT Secret is invalid, no rotation will take place") + return r.addJWTPropagatedPlanAction(status) } jwtSha := util.SHA256(jwt) if _, ok := folder.Data[jwtSha]; !ok { - return addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTAdd, "Add JWTRotation key").AddParam(checksum, jwtSha)) + return r.addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTAdd, "Add JWTRotation key").AddParam(checksum, jwtSha)) } activeKey, ok := folder.Data[pod.ActiveJWTKey] if !ok { - return addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTSetActive, "Set active key").AddParam(checksum, jwtSha)) + return r.addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTSetActive, "Set active key").AddParam(checksum, jwtSha)) } tokenKey, ok := folder.Data[constants.SecretKeyToken] if !ok || util.SHA256(activeKey) != util.SHA256(tokenKey) { - return addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTSetActive, "Set active key and add token field").AddParam(checksum, jwtSha)) + return r.addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTSetActive, "Set active key and add token field").AddParam(checksum, jwtSha)) } - plan, failed := areJWTTokensUpToDate(ctx, log, status, context, folder) + plan, failed := r.areJWTTokensUpToDate(ctx, status, context, folder) if len(plan) > 0 { return plan } if failed { - log.Info().Msgf("JWT Failed on one pod, no rotation will take place") + r.planLogger.Info("JWT Failed on one pod, no rotation will take place") return nil } if util.SHA256(activeKey) != jwtSha { - return addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTSetActive, "Set active key").AddParam(checksum, jwtSha)) + return r.addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTSetActive, "Set active key").AddParam(checksum, jwtSha)) } for key := range folder.Data { @@ -108,32 +105,31 @@ func createJWTKeyUpdate(ctx context.Context, continue } - return addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTClean, "Remove old key").AddParam(checksum, key)) + return r.addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTClean, "Remove old key").AddParam(checksum, key)) } - return addJWTPropagatedPlanAction(status) + return r.addJWTPropagatedPlanAction(status) } -func createJWTStatusUpdate(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createJWTStatusUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if _, err := ensureJWTFolderSupport(spec, status); err != nil { return nil } - if createJWTStatusUpdateRequired(log, apiObject, spec, status, context) { - return addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTStatusUpdate, "Update status")) + if r.createJWTStatusUpdateRequired(apiObject, spec, status, context) { + return r.addJWTPropagatedPlanAction(status, actions.NewClusterAction(api.ActionTypeJWTStatusUpdate, "Update status")) } return nil } -func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) createJWTStatusUpdateRequired(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) bool { folder, err := ensureJWTFolderSupport(spec, status) if err != nil { - log.Error().Err(err).Msgf("Action not supported") + r.planLogger.Err(err).Error("Action not supported") return false } @@ -144,20 +140,20 @@ func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje f, ok := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(spec.Authentication.GetJWTSecretName()) if !ok { - log.Error().Msgf("Unable to get JWT secret info") + r.planLogger.Error("Unable to get JWT secret info") return false } key, ok := f.Data[constants.SecretKeyToken] if !ok { - log.Error().Msgf("JWT Token is invalid") + r.planLogger.Error("JWT Token is invalid") return false } keySha := fmt.Sprintf("sha256:%s", util.SHA256(key)) if status.Hashes.JWT.Active != keySha { - log.Error().Msgf("JWT Token is invalid") + r.planLogger.Error("JWT Token is invalid") return true } @@ -166,7 +162,7 @@ func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje f, ok := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(pod.JWTSecretFolder(apiObject.GetName())) if !ok { - log.Error().Msgf("Unable to get JWT folder info") + r.planLogger.Error("Unable to get JWT folder info") return false } @@ -203,7 +199,7 @@ func createJWTStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje return !util.CompareStringArray(keys, status.Hashes.JWT.Passive) } -func areJWTTokensUpToDate(ctx context.Context, log zerolog.Logger, status api.DeploymentStatus, +func (r *Reconciler) areJWTTokensUpToDate(ctx context.Context, status api.DeploymentStatus, planCtx PlanBuilderContext, folder *core.Secret) (plan api.Plan, failed bool) { gCtx, c := context.WithTimeout(ctx, 2*time.Second) defer c() @@ -212,7 +208,7 @@ func areJWTTokensUpToDate(ctx context.Context, log zerolog.Logger, status api.De for _, m := range list { nCtx, c := context.WithTimeout(gCtx, 500*time.Millisecond) defer c() - if updateRequired, failedMember := isJWTTokenUpToDate(nCtx, log, status, planCtx, group, m, folder); failedMember { + if updateRequired, failedMember := r.isJWTTokenUpToDate(nCtx, status, planCtx, group, m, folder); failedMember { failed = true continue } else if updateRequired { @@ -227,7 +223,7 @@ func areJWTTokensUpToDate(ctx context.Context, log zerolog.Logger, status api.De return } -func isJWTTokenUpToDate(ctx context.Context, log zerolog.Logger, status api.DeploymentStatus, context PlanBuilderContext, +func (r *Reconciler) isJWTTokenUpToDate(ctx context.Context, status api.DeploymentStatus, context PlanBuilderContext, group api.ServerGroup, m api.MemberStatus, folder *core.Secret) (updateRequired bool, failed bool) { if m.Phase != api.MemberPhaseCreated { return false, true @@ -237,16 +233,16 @@ func isJWTTokenUpToDate(ctx context.Context, log zerolog.Logger, status api.Depl return false, false } - mlog := log.With().Str("group", group.AsRole()).Str("member", m.ID).Logger() + log := r.planLogger.Str("group", group.AsRole()).Str("member", m.ID) c, err := context.GetServerClient(ctx, group, m.ID) if err != nil { - mlog.Warn().Err(err).Msg("Unable to get client") + log.Err(err).Warn("Unable to get client") return false, true } if updateRequired, err := isMemberJWTTokenInvalid(ctx, client.NewClient(c.Connection()), folder.Data, false); err != nil { - mlog.Warn().Err(err).Msg("JWT UpToDate Check failed") + log.Err(err).Warn("JWT UpToDate Check failed") return false, true } else if updateRequired { return true, false @@ -255,7 +251,7 @@ func isJWTTokenUpToDate(ctx context.Context, log zerolog.Logger, status api.Depl return false, false } -func addJWTPropagatedPlanAction(s api.DeploymentStatus, acts ...api.Action) api.Plan { +func (r *Reconciler) addJWTPropagatedPlanAction(s api.DeploymentStatus, acts ...api.Action) api.Plan { got := len(acts) != 0 cond := conditionFalse if !got { @@ -288,7 +284,6 @@ func isMemberJWTTokenInvalid(ctx context.Context, c client.Client, data map[stri if jwtActive, ok := data[pod.ActiveJWTKey]; !ok { return false, errors.Newf("Missing Active JWT Token in folder") } else if util.SHA256(jwtActive) != e.Result.Active.GetSHA().Checksum() { - log.Info().Str("active", e.Result.Active.GetSHA().Checksum()).Str("expected", util.SHA256(jwtActive)).Msgf("Active key is invalid") return true, nil } @@ -306,7 +301,6 @@ func compareJWTKeys(e client.Entries, keys map[string][]byte) bool { } if !e.Contains(k) { - log.Info().Msgf("Missing JWT Key") return false } } diff --git a/pkg/deployment/reconcile/plan_builder_license.go b/pkg/deployment/reconcile/plan_builder_license.go index a122b9e40..eba086214 100644 --- a/pkg/deployment/reconcile/plan_builder_license.go +++ b/pkg/deployment/reconcile/plan_builder_license.go @@ -29,11 +29,9 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/arangod" "github.com/arangodb/kube-arangodb/pkg/util/globals" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func updateClusterLicense(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) updateClusterLicense(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !spec.License.HasSecretName() { @@ -42,12 +40,12 @@ func updateClusterLicense(ctx context.Context, l, ok := k8sutil.GetLicenseFromSecret(context.ACS().CurrentClusterCache(), spec.License.GetSecretName()) if !ok { - log.Trace().Str("secret", spec.Authentication.GetJWTSecretName()).Msgf("Unable to find license secret key") + r.log.Str("secret", spec.Authentication.GetJWTSecretName()).Trace("Unable to find license secret key") return nil } if !l.V2.IsV2Set() { - log.Trace().Str("secret", spec.Authentication.GetJWTSecretName()).Msgf("V2 License key is not set") + r.log.Str("secret", spec.Authentication.GetJWTSecretName()).Trace("V2 License key is not set") return nil } @@ -62,7 +60,7 @@ func updateClusterLicense(ctx context.Context, if len(members) == 0 { // No member found to take this action - log.Trace().Msgf("No enterprise member in version 3.9.0 or above") + r.log.Trace("No enterprise member in version 3.9.0 or above") return nil } @@ -73,14 +71,14 @@ func updateClusterLicense(ctx context.Context, c, err := context.GetServerClient(ctxChild, member.Group, member.Member.ID) if err != nil { - log.Err(err).Msgf("Unable to get client") + r.log.Err(err).Error("Unable to get client") return nil } internalClient := client.NewClient(c.Connection()) if ok, err := licenseV2Compare(ctxChild, internalClient, l.V2); err != nil { - log.Error().Err(err).Msg("Unable to verify license") + r.log.Err(err).Error("Unable to verify license") return nil } else if ok { if c, _ := status.Conditions.Get(api.ConditionTypeLicenseSet); !c.IsTrue() || c.Hash != l.V2.V2Hash() { diff --git a/pkg/deployment/reconcile/plan_builder_maintenance.go b/pkg/deployment/reconcile/plan_builder_maintenance.go index 193aee27e..347b13571 100644 --- a/pkg/deployment/reconcile/plan_builder_maintenance.go +++ b/pkg/deployment/reconcile/plan_builder_maintenance.go @@ -25,11 +25,9 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createBackupInProgressConditionPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createBackupInProgressConditionPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { @@ -77,8 +75,7 @@ func createBackupInProgressConditionPlan(ctx context.Context, } } -func createMaintenanceConditionPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createMaintenanceConditionPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { diff --git a/pkg/deployment/reconcile/plan_builder_normal.go b/pkg/deployment/reconcile/plan_builder_normal.go index b355fbba2..68d35b5a9 100644 --- a/pkg/deployment/reconcile/plan_builder_normal.go +++ b/pkg/deployment/reconcile/plan_builder_normal.go @@ -26,13 +26,12 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) // createNormalPlan considers the given specification & status and creates a plan to get the status in line with the specification. // If a plan already exists, the given plan is returned with false. // Otherwise the new plan is returned with a boolean true. -func createNormalPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createNormalPlan(ctx context.Context, apiObject k8sutil.APIObject, currentPlan api.Plan, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (api.Plan, api.BackOff, bool) { @@ -41,53 +40,52 @@ func createNormalPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil return currentPlan, nil, false } - r := recoverPlanAppender(log, newPlanAppender(NewWithPlanBuilder(ctx, log, apiObject, spec, status, builderCtx), status.BackOff, currentPlan). + q := recoverPlanAppender(r.log, newPlanAppender(NewWithPlanBuilder(ctx, apiObject, spec, status, builderCtx), status.BackOff, currentPlan). // Define topology - ApplyIfEmpty(createTopologyEnablementPlan). + ApplyIfEmpty(r.createTopologyEnablementPlan). // Adjust topology settings - ApplyIfEmpty(createTopologyMemberAdjustmentPlan). - ApplyIfEmpty(createTopologyUpdatePlan). + ApplyIfEmpty(r.createTopologyMemberAdjustmentPlan). + ApplyIfEmpty(r.createTopologyUpdatePlan). // Check for scale up - ApplyIfEmpty(createScaleUPMemberPlan). + ApplyIfEmpty(r.createScaleUPMemberPlan). // Check for failed members - ApplyIfEmpty(createMemberFailedRestorePlan). + ApplyIfEmpty(r.createMemberFailedRestorePlan). // Check for scale up/down - ApplyIfEmpty(createScaleMemberPlan). + ApplyIfEmpty(r.createScaleMemberPlan). // Update status - ApplySubPlanIfEmpty(createEncryptionKeyStatusPropagatedFieldUpdate, createEncryptionKeyStatusUpdate). - ApplyIfEmpty(createTLSStatusUpdate). - ApplyIfEmpty(createJWTStatusUpdate). + ApplySubPlanIfEmpty(r.createEncryptionKeyStatusPropagatedFieldUpdate, r.createEncryptionKeyStatusUpdate). + ApplyIfEmpty(r.createTLSStatusUpdate). + ApplyIfEmpty(r.createJWTStatusUpdate). // Check for cleaned out dbserver in created state - ApplyIfEmpty(createRemoveCleanedDBServersPlan). + ApplyIfEmpty(r.createRemoveCleanedDBServersPlan). // Check for members to be removed - ApplyIfEmpty(createReplaceMemberPlan). + ApplyIfEmpty(r.createReplaceMemberPlan). // Check for the need to rotate one or more members - ApplyIfEmpty(createMarkToRemovePlan). - ApplyIfEmpty(createRotateOrUpgradePlan). + ApplyIfEmpty(r.createMarkToRemovePlan). + ApplyIfEmpty(r.createRotateOrUpgradePlan). // Disable maintenance if upgrade process was done. Upgrade task throw IDLE Action if upgrade is pending - ApplyIfEmpty(createMaintenanceManagementPlan). + ApplyIfEmpty(r.createMaintenanceManagementPlan). // Add keys - ApplySubPlanIfEmpty(createEncryptionKeyStatusPropagatedFieldUpdate, createEncryptionKey). - ApplyIfEmpty(createJWTKeyUpdate). - ApplySubPlanIfEmpty(createTLSStatusPropagatedFieldUpdate, createCARenewalPlan). - ApplySubPlanIfEmpty(createTLSStatusPropagatedFieldUpdate, createCAAppendPlan). - ApplyIfEmpty(createKeyfileRenewalPlan). - ApplyIfEmpty(createRotateServerStorageResizePlan). - ApplySubPlanIfEmpty(createTLSStatusPropagatedFieldUpdate, createRotateTLSServerSNIPlan). - ApplyIfEmpty(createRestorePlan). - ApplySubPlanIfEmpty(createEncryptionKeyStatusPropagatedFieldUpdate, createEncryptionKeyCleanPlan). - ApplySubPlanIfEmpty(createTLSStatusPropagatedFieldUpdate, createCACleanPlan). - ApplyIfEmpty(createClusterOperationPlan). - ApplyIfEmpty(createRebalancerGeneratePlan). + ApplySubPlanIfEmpty(r.createEncryptionKeyStatusPropagatedFieldUpdate, r.createEncryptionKey). + ApplyIfEmpty(r.createJWTKeyUpdate). + ApplySubPlanIfEmpty(r.createTLSStatusPropagatedFieldUpdate, r.createCARenewalPlan). + ApplySubPlanIfEmpty(r.createTLSStatusPropagatedFieldUpdate, r.createCAAppendPlan). + ApplyIfEmpty(r.createKeyfileRenewalPlan). + ApplyIfEmpty(r.createRotateServerStorageResizePlan). + ApplySubPlanIfEmpty(r.createTLSStatusPropagatedFieldUpdate, r.createRotateTLSServerSNIPlan). + ApplyIfEmpty(r.createRestorePlan). + ApplySubPlanIfEmpty(r.createEncryptionKeyStatusPropagatedFieldUpdate, r.createEncryptionKeyCleanPlan). + ApplySubPlanIfEmpty(r.createTLSStatusPropagatedFieldUpdate, r.createCACleanPlan). + ApplyIfEmpty(r.createClusterOperationPlan). + ApplyIfEmpty(r.createRebalancerGeneratePlan). // Final - ApplyIfEmpty(createTLSStatusPropagated). - ApplyIfEmpty(createBootstrapPlan)) + ApplyIfEmpty(r.createTLSStatusPropagated). + ApplyIfEmpty(r.createBootstrapPlan)) - return r.Plan(), r.BackOff(), true + return q.Plan(), q.BackOff(), true } -func createMemberFailedRestorePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createMemberFailedRestorePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -108,12 +106,12 @@ func createMemberFailedRestorePlan(ctx context.Context, continue } - memberLog := log.Info().Str("id", m.ID).Str("role", group.AsRole()) + memberLog := r.log.Str("id", m.ID).Str("role", group.AsRole()) if group == api.ServerGroupDBServers && spec.GetMode() == api.DeploymentModeCluster { // Do pre check for DBServers. If agency is down DBServers should not be touch if !agencyOK { - memberLog.Msg("Agency state is not present") + memberLog.Info("Agency state is not present") continue } @@ -124,7 +122,7 @@ func createMemberFailedRestorePlan(ctx context.Context, if agencyState.Plan.Collections.IsDBServerInDatabases(m.ID) { // DBServer still exists in agency plan! Will not be removed, but needs to be recreated - memberLog.Msg("Recreating DBServer - it cannot be removed gracefully") + memberLog.Info("Recreating DBServer - it cannot be removed gracefully") plan = append(plan, actions.NewAction(api.ActionTypeRecreateMember, group, m)) continue @@ -136,24 +134,24 @@ func createMemberFailedRestorePlan(ctx context.Context, switch group { case api.ServerGroupAgents: // For agents just recreate member do not rotate ID, do not remove PVC or service - memberLog.Msg("Restoring old member. For agency members recreation of PVC is not supported - to prevent DataLoss") + memberLog.Info("Restoring old member. For agency members recreation of PVC is not supported - to prevent DataLoss") plan = append(plan, actions.NewAction(api.ActionTypeRecreateMember, group, m)) case api.ServerGroupSingle: // Do not remove data for singles - memberLog.Msg("Restoring old member. Rotation for single servers is not safe") + memberLog.Info("Restoring old member. Rotation for single servers is not safe") plan = append(plan, actions.NewAction(api.ActionTypeRecreateMember, group, m)) default: if spec.GetAllowMemberRecreation(group) { - memberLog.Msg("Creating member replacement plan because member has failed") + memberLog.Info("Creating member replacement plan because member has failed") plan = append(plan, actions.NewAction(api.ActionTypeRemoveMember, group, m), actions.NewAction(api.ActionTypeAddMember, group, withPredefinedMember("")), actions.NewAction(api.ActionTypeWaitForMemberUp, group, withPredefinedMember(api.MemberIDPreviousAction)), ) } else { - memberLog.Msg("Restoring old member. Recreation is disabled for group") + memberLog.Info("Restoring old member. Recreation is disabled for group") plan = append(plan, actions.NewAction(api.ActionTypeRecreateMember, group, m)) } @@ -164,7 +162,7 @@ func createMemberFailedRestorePlan(ctx context.Context, // Ensure that we were able to get agency info if len(plan) == 0 && !agencyOK { - log.Warn().Msgf("unable to build further plan without access to agency") + r.log.Warn("unable to build further plan without access to agency") plan = append(plan, actions.NewClusterAction(api.ActionTypeIdle)) } @@ -172,8 +170,7 @@ func createMemberFailedRestorePlan(ctx context.Context, return plan } -func createRemoveCleanedDBServersPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRemoveCleanedDBServersPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { for _, m := range status.Members.DBServers { @@ -183,10 +180,10 @@ func createRemoveCleanedDBServersPlan(ctx context.Context, } if m.Phase.IsCreatedOrDrain() && m.Conditions.IsTrue(api.ConditionTypeCleanedOut) { - log.Debug(). + r.log. Str("id", m.ID). Str("role", api.ServerGroupDBServers.AsRole()). - Msg("Creating dbserver replacement plan because server is cleanout in created phase") + Debug("Creating dbserver replacement plan because server is cleanout in created phase") return cleanOutMember(api.ServerGroupDBServers, m) } } diff --git a/pkg/deployment/reconcile/plan_builder_rebalancer.community.go b/pkg/deployment/reconcile/plan_builder_rebalancer.community.go index 3c5776f1b..649edcc59 100644 --- a/pkg/deployment/reconcile/plan_builder_rebalancer.community.go +++ b/pkg/deployment/reconcile/plan_builder_rebalancer.community.go @@ -26,18 +26,15 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createRebalancerGeneratePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRebalancerGeneratePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil } -func createRebalancerCheckPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRebalancerCheckPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil diff --git a/pkg/deployment/reconcile/plan_builder_resources.go b/pkg/deployment/reconcile/plan_builder_resources.go index 8405622bf..3d7042781 100644 --- a/pkg/deployment/reconcile/plan_builder_resources.go +++ b/pkg/deployment/reconcile/plan_builder_resources.go @@ -25,10 +25,9 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createResourcesPlan(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createResourcesPlan(ctx context.Context, apiObject k8sutil.APIObject, currentPlan api.Plan, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (api.Plan, api.BackOff, bool) { @@ -37,7 +36,7 @@ func createResourcesPlan(ctx context.Context, log zerolog.Logger, apiObject k8su return currentPlan, nil, false } - r := recoverPlanAppender(log, newPlanAppender(NewWithPlanBuilder(ctx, log, apiObject, spec, status, builderCtx), status.BackOff, currentPlan)) + q := recoverPlanAppender(r.planLogger, newPlanAppender(NewWithPlanBuilder(ctx, apiObject, spec, status, builderCtx), status.BackOff, currentPlan)) - return r.Plan(), r.BackOff(), true + return q.Plan(), q.BackOff(), true } diff --git a/pkg/deployment/reconcile/plan_builder_restore.go b/pkg/deployment/reconcile/plan_builder_restore.go index 172dd579c..400d12c45 100644 --- a/pkg/deployment/reconcile/plan_builder_restore.go +++ b/pkg/deployment/reconcile/plan_builder_restore.go @@ -29,13 +29,11 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/deployment/pod" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) const secretActionParam = "secret" -func createRestorePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRestorePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if spec.RestoreFrom == nil && status.Restore != nil { @@ -47,17 +45,17 @@ func createRestorePlan(ctx context.Context, if spec.RestoreFrom != nil && status.Restore == nil { backup, err := context.GetBackup(ctx, spec.GetRestoreFrom()) if err != nil { - log.Warn().Err(err).Msg("Backup not found") + r.planLogger.Err(err).Warn("Backup not found") return nil } if backup.Status.Backup == nil { - log.Warn().Msg("Backup not yet ready") + r.planLogger.Warn("Backup not yet ready") return nil } if spec.RocksDB.IsEncrypted() { - if ok, p := createRestorePlanEncryption(ctx, log, spec, status, context); !ok { + if ok, p := r.createRestorePlanEncryption(ctx, spec, status, context); !ok { return nil } else if !p.IsEmpty() { return p @@ -65,7 +63,7 @@ func createRestorePlan(ctx context.Context, if i := status.CurrentImage; i != nil && features.EncryptionRotation().Supported(i.ArangoDBVersion, i.Enterprise) { if !status.Hashes.Encryption.Propagated { - log.Warn().Msg("Backup not able to be restored in non propagated state") + r.planLogger.Warn("Backup not able to be restored in non propagated state") return nil } } @@ -90,7 +88,7 @@ func restorePlan(spec api.DeploymentSpec) api.Plan { return p } -func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (bool, api.Plan) { +func (r *Reconciler) createRestorePlanEncryption(ctx context.Context, spec api.DeploymentSpec, status api.DeploymentStatus, builderCtx PlanBuilderContext) (bool, api.Plan) { if spec.RestoreEncryptionSecret != nil { if !spec.RocksDB.IsEncrypted() { @@ -110,12 +108,12 @@ func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec a // Additional logic to do restore with encryption key name, _, exists, err := pod.GetEncryptionKey(ctx, builderCtx.ACS().CurrentClusterCache().Secret().V1().Read(), secret) if err != nil { - log.Err(err).Msgf("Unable to fetch encryption key") + r.planLogger.Err(err).Error("Unable to fetch encryption key") return false, nil } if !exists { - log.Error().Msgf("Unable to fetch encryption key - key is empty or missing") + r.planLogger.Error("Unable to fetch encryption key - key is empty or missing") return false, nil } diff --git a/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go b/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go index 29492cc18..722d2ad68 100644 --- a/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go +++ b/pkg/deployment/reconcile/plan_builder_rotate_upgrade.go @@ -37,7 +37,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/agency" "github.com/arangodb/kube-arangodb/pkg/deployment/rotation" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" ) @@ -67,13 +66,12 @@ type upgradeDecision struct { } // createRotateOrUpgradePlan goes over all pods to check if an upgrade or rotate is needed. -func createRotateOrUpgradePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRotateOrUpgradePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan - newPlan, idle := createRotateOrUpgradePlanInternal(log, apiObject, spec, status, context) + newPlan, idle := r.createRotateOrUpgradePlanInternal(apiObject, spec, status, context) if idle { plan = append(plan, actions.NewClusterAction(api.ActionTypeIdle)) @@ -83,8 +81,7 @@ func createRotateOrUpgradePlan(ctx context.Context, return plan } -func createMarkToRemovePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createMarkToRemovePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -122,13 +119,13 @@ func createMarkToRemovePlan(ctx context.Context, return plan } -func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) (api.Plan, bool) { - decision := createRotateOrUpgradeDecision(log, spec, status, context) +func (r *Reconciler) createRotateOrUpgradePlanInternal(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) (api.Plan, bool) { + decision := r.createRotateOrUpgradeDecision(spec, status, context) if decision.IsUpgrade() { - return createUpgradePlanInternalCondition(log, apiObject, spec, status, context, decision) + return r.createUpgradePlanInternalCondition(apiObject, spec, status, context, decision) } else if decision.IsUpdate() { - return createUpdatePlanInternalCondition(log, apiObject, spec, status, decision, context) + return r.createUpdatePlanInternalCondition(apiObject, spec, status, decision, context) } else { upgradeCondition := status.Conditions.IsTrue(api.ConditionTypeUpgradeInProgress) updateCondition := status.Conditions.IsTrue(api.ConditionTypeUpdateInProgress) @@ -151,8 +148,8 @@ func createRotateOrUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.API return nil, false } -func createUpdatePlanInternalCondition(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, decision updateUpgradeDecisionMap, context PlanBuilderContext) (api.Plan, bool) { - plan, idle := createUpdatePlanInternal(log, apiObject, spec, status, decision, context) +func (r *Reconciler) createUpdatePlanInternalCondition(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, decision updateUpgradeDecisionMap, context PlanBuilderContext) (api.Plan, bool) { + plan, idle := r.createUpdatePlanInternal(apiObject, spec, status, decision, context) if idle || len(plan) > 0 { if !status.Conditions.IsTrue(api.ConditionTypeUpdateInProgress) { @@ -165,7 +162,7 @@ func createUpdatePlanInternalCondition(log zerolog.Logger, apiObject k8sutil.API return plan, idle } -func createUpdatePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, decision updateUpgradeDecisionMap, context PlanBuilderContext) (api.Plan, bool) { +func (r *Reconciler) createUpdatePlanInternal(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, decision updateUpgradeDecisionMap, context PlanBuilderContext) (api.Plan, bool) { // Update phase for _, m := range status.Members.AsList() { d := decision[m.Member.ID] @@ -176,14 +173,14 @@ func createUpdatePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, s if !d.updateAllowed { // Update is not allowed due to constraint if !d.unsafeUpdateAllowed { - log.Info().Str("member", m.Member.ID).Str("Reason", d.updateMessage).Msg("Pod needs restart but cluster is not ready. Either some shards are not in sync or some member is not ready.") + r.planLogger.Str("member", m.Member.ID).Str("Reason", d.updateMessage).Info("Pod needs restart but cluster is not ready. Either some shards are not in sync or some member is not ready.") continue } - log.Info().Str("member", m.Member.ID).Str("Reason", d.updateMessage).Msg("Pod needs restart but cluster is not ready. Either some shards are not in sync or some member is not ready, but unsafe upgrade is allowed") + r.planLogger.Str("member", m.Member.ID).Str("Reason", d.updateMessage).Info("Pod needs restart but cluster is not ready. Either some shards are not in sync or some member is not ready, but unsafe upgrade is allowed") } if m.Member.Conditions.IsTrue(api.ConditionTypeRestart) { - return createRotateMemberPlan(log, m.Member, m.Group, spec, "Restart flag present"), false + return r.createRotateMemberPlan(m.Member, m.Group, spec, "Restart flag present"), false } arangoMember, ok := context.ACS().CurrentClusterCache().ArangoMember().V1().GetSimple(m.Member.ArangoMemberName(apiObject.GetName(), m.Group)) if !ok { @@ -200,8 +197,8 @@ func createUpdatePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, s p = nil } - if mode, p, reason, err := rotation.IsRotationRequired(log, context.ACS(), spec, m.Member, m.Group, p, arangoMember.Spec.Template, arangoMember.Status.Template); err != nil { - log.Err(err).Str("member", m.Member.ID).Msgf("Error while generating update plan") + if mode, p, reason, err := rotation.IsRotationRequired(context.ACS(), spec, m.Member, m.Group, p, arangoMember.Spec.Template, arangoMember.Status.Template); err != nil { + r.planLogger.Err(err).Str("member", m.Member.ID).Error("Error while generating update plan") continue } else if mode != rotation.InPlaceRotation { return api.Plan{actions.NewAction(api.ActionTypeSetMemberCondition, m.Group, m.Member, "Cleaning update"). @@ -223,8 +220,8 @@ func createUpdatePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, s return nil, true } -func createUpgradePlanInternalCondition(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, decision updateUpgradeDecisionMap) (api.Plan, bool) { - plan, idle := createUpgradePlanInternal(log, apiObject, spec, status, context, decision) +func (r *Reconciler) createUpgradePlanInternalCondition(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, decision updateUpgradeDecisionMap) (api.Plan, bool) { + plan, idle := r.createUpgradePlanInternal(apiObject, spec, status, context, decision) if idle || len(plan) > 0 { if !status.Conditions.IsTrue(api.ConditionTypeUpgradeInProgress) { @@ -237,7 +234,7 @@ func createUpgradePlanInternalCondition(log zerolog.Logger, apiObject k8sutil.AP return plan, idle } -func createUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, decision updateUpgradeDecisionMap) (api.Plan, bool) { +func (r *Reconciler) createUpgradePlanInternal(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, decision updateUpgradeDecisionMap) (api.Plan, bool) { for _, m := range status.Members.AsList() { // Pre-check d := decision[m.Member.ID] @@ -283,24 +280,24 @@ func createUpgradePlanInternal(log zerolog.Logger, apiObject k8sutil.APIObject, if d.updateAllowed { // We are fine, group is alive so we can proceed - log.Info().Str("member", m.Member.ID).Str("Reason", d.updateMessage).Msg("Upgrade allowed") - return createUpgradeMemberPlan(log, m.Member, m.Group, "Version upgrade", spec, status, !d.upgradeDecision.AutoUpgradeNeeded), false + r.planLogger.Str("member", m.Member.ID).Str("Reason", d.updateMessage).Info("Upgrade allowed") + return r.createUpgradeMemberPlan(m.Member, m.Group, "Version upgrade", spec, status, !d.upgradeDecision.AutoUpgradeNeeded), false } else if d.unsafeUpdateAllowed { - log.Info().Str("member", m.Member.ID).Str("Reason", d.updateMessage).Msg("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready, but unsafe upgrade is allowed") - return createUpgradeMemberPlan(log, m.Member, m.Group, "Version upgrade", spec, status, !d.upgradeDecision.AutoUpgradeNeeded), false + r.planLogger.Str("member", m.Member.ID).Str("Reason", d.updateMessage).Info("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready, but unsafe upgrade is allowed") + return r.createUpgradeMemberPlan(m.Member, m.Group, "Version upgrade", spec, status, !d.upgradeDecision.AutoUpgradeNeeded), false } else { - log.Info().Str("member", m.Member.ID).Str("Reason", d.updateMessage).Msg("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready.") + r.planLogger.Str("member", m.Member.ID).Str("Reason", d.updateMessage).Info("Pod needs upgrade but cluster is not ready. Either some shards are not in sync or some member is not ready.") return nil, true } } - log.Warn().Msg("Pod upgrade plan has been made, but it has been dropped due to missing flag") + r.planLogger.Warn("Pod upgrade plan has been made, but it has been dropped due to missing flag") return nil, false } // podNeedsUpgrading decides if an upgrade of the pod is needed (to comply with // the given spec) and if that is allowed. -func podNeedsUpgrading(log zerolog.Logger, status api.MemberStatus, spec api.DeploymentSpec, images api.ImageInfoList) upgradeDecision { +func (r *Reconciler) podNeedsUpgrading(status api.MemberStatus, spec api.DeploymentSpec, images api.ImageInfoList) upgradeDecision { currentImage, found := currentImageInfo(spec, images) if !found { // Hold rotation tasks - we do not know image @@ -341,10 +338,10 @@ func podNeedsUpgrading(log zerolog.Logger, status api.MemberStatus, spec api.Dep } if specVersion.Major() != memberVersion.Major() || specVersion.Minor() != memberVersion.Minor() { // Is allowed, with `--database.auto-upgrade` - log.Info().Str("spec-version", string(specVersion)).Str("pod-version", string(memberVersion)). + r.planLogger.Str("spec-version", string(specVersion)).Str("pod-version", string(memberVersion)). Int("spec-version.major", specVersion.Major()).Int("spec-version.minor", specVersion.Minor()). Int("pod-version.major", memberVersion.Major()).Int("pod-version.minor", memberVersion.Minor()). - Msg("Deciding to do a upgrade with --auto-upgrade") + Info("Deciding to do a upgrade with --auto-upgrade") return upgradeDecision{ FromVersion: memberVersion, FromLicense: memberLicense, @@ -394,7 +391,7 @@ func memberImageInfo(spec api.DeploymentSpec, status api.MemberStatus, images ap return api.ImageInfo{}, false } -func getPodDetails(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) getPodDetails(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, group api.ServerGroup, status api.DeploymentStatus, m api.MemberStatus, planCtx PlanBuilderContext) (string, *core.Pod, *api.ArangoMember, bool) { imageInfo, imageFound := planCtx.SelectImageForMember(spec, status, m) @@ -412,13 +409,13 @@ func getPodDetails(ctx context.Context, log zerolog.Logger, apiObject k8sutil.AP renderedPod, err := planCtx.RenderPodForMember(ctx, planCtx.ACS(), spec, status, m.ID, imageInfo) if err != nil { - log.Err(err).Msg("Error while rendering pod") + r.planLogger.Err(err).Error("Error while rendering pod") return "", nil, nil, false } checksum, err := resources.ChecksumArangoPod(groupSpec, renderedPod) if err != nil { - log.Err(err).Msg("Error while getting pod checksum") + r.planLogger.Err(err).Error("Error while getting pod checksum") return "", nil, nil, false } @@ -429,10 +426,10 @@ func getPodDetails(ctx context.Context, log zerolog.Logger, apiObject k8sutil.AP // given pod differs from what it should be according to the // given deployment spec. // When true is returned, a reason for the rotation is already returned. -func arangoMemberPodTemplateNeedsUpdate(ctx context.Context, log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) arangoMemberPodTemplateNeedsUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, group api.ServerGroup, status api.DeploymentStatus, m api.MemberStatus, planCtx PlanBuilderContext) (string, bool) { - checksum, _, member, valid := getPodDetails(ctx, log, apiObject, spec, group, status, m, planCtx) + checksum, _, member, valid := r.getPodDetails(ctx, apiObject, spec, group, status, m, planCtx) if valid && !member.Spec.Template.EqualPodSpecChecksum(checksum) { return "Pod Spec changed", true } @@ -487,18 +484,18 @@ func groupReadyForRestart(context PlanBuilderContext, status api.DeploymentStatu // createUpgradeMemberPlan creates a plan to upgrade (stop-recreateWithAutoUpgrade-stop-start) an existing // member. -func createUpgradeMemberPlan(log zerolog.Logger, member api.MemberStatus, +func (r *Reconciler) createUpgradeMemberPlan(member api.MemberStatus, group api.ServerGroup, reason string, spec api.DeploymentSpec, status api.DeploymentStatus, rotateStatefull bool) api.Plan { upgradeAction := api.ActionTypeUpgradeMember if rotateStatefull || group.IsStateless() { upgradeAction = api.ActionTypeRotateMember } - log.Debug(). + r.planLogger. Str("id", member.ID). Str("role", group.AsRole()). Str("reason", reason). Str("action", string(upgradeAction)). - Msg("Creating upgrade plan") + Info("Creating upgrade plan") plan := createRotateMemberPlanWithAction(member, group, upgradeAction, spec, reason) diff --git a/pkg/deployment/reconcile/plan_builder_rotate_upgrade_decision.go b/pkg/deployment/reconcile/plan_builder_rotate_upgrade_decision.go index 94eb72ddb..047595c78 100644 --- a/pkg/deployment/reconcile/plan_builder_rotate_upgrade_decision.go +++ b/pkg/deployment/reconcile/plan_builder_rotate_upgrade_decision.go @@ -24,7 +24,6 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/rotation" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" ) type updateUpgradeDecisionMap map[string]updateUpgradeDecision @@ -60,23 +59,23 @@ type updateUpgradeDecision struct { restartRequired bool } -func createRotateOrUpgradeDecision(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) updateUpgradeDecisionMap { +func (r *Reconciler) createRotateOrUpgradeDecision(spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) updateUpgradeDecisionMap { d := updateUpgradeDecisionMap{} // Init phase for _, m := range status.Members.AsList() { - d[m.Member.ID] = createRotateOrUpgradeDecisionMember(log, spec, status, context, m) + d[m.Member.ID] = r.createRotateOrUpgradeDecisionMember(spec, status, context, m) } return d } -func createRotateOrUpgradeDecisionMember(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, element api.DeploymentStatusMemberElement) (d updateUpgradeDecision) { +func (r *Reconciler) createRotateOrUpgradeDecisionMember(spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, element api.DeploymentStatusMemberElement) (d updateUpgradeDecision) { if element.Member.Phase == api.MemberPhaseCreated && element.Member.PodName != "" { // Only upgrade when phase is created // Got pod, compare it with what it should be - decision := podNeedsUpgrading(log, element.Member, spec, status.Images) + decision := r.podNeedsUpgrading(element.Member, spec, status.Images) if decision.UpgradeNeeded || decision.Hold { d.upgrade = true diff --git a/pkg/deployment/reconcile/plan_builder_rotate_upgrade_test.go b/pkg/deployment/reconcile/plan_builder_rotate_upgrade_test.go index e82ac6977..d86361be4 100644 --- a/pkg/deployment/reconcile/plan_builder_rotate_upgrade_test.go +++ b/pkg/deployment/reconcile/plan_builder_rotate_upgrade_test.go @@ -26,7 +26,6 @@ import ( "github.com/arangodb/go-driver" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" ) @@ -100,9 +99,11 @@ func Test_RotateUpgrade_Condition(t *testing.T) { }, } + r := newTestReconciler() + for n, c := range testCases { t.Run(n, func(t *testing.T) { - c.verify(t, podNeedsUpgrading(log.Logger, c.status, c.spec, c.images)) + c.verify(t, r.podNeedsUpgrading(c.status, c.spec, c.images)) }) } } diff --git a/pkg/deployment/reconcile/plan_builder_scale.go b/pkg/deployment/reconcile/plan_builder_scale.go index 4aa046bbe..291c02b10 100644 --- a/pkg/deployment/reconcile/plan_builder_scale.go +++ b/pkg/deployment/reconcile/plan_builder_scale.go @@ -26,18 +26,15 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createScaleUPMemberPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createScaleUPMemberPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { - return createScaleMemberPlan(ctx, log, apiObject, spec, status, context).Filter(filterScaleUP) + return r.createScaleMemberPlan(ctx, apiObject, spec, status, context).Filter(filterScaleUP) } -func createScaleMemberPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createScaleMemberPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { @@ -46,32 +43,32 @@ func createScaleMemberPlan(ctx context.Context, switch spec.GetMode() { case api.DeploymentModeSingle: // Never scale down - plan = append(plan, createScalePlan(log, status, status.Members.Single, api.ServerGroupSingle, 1).Filter(filterScaleUP)...) + plan = append(plan, r.createScalePlan(status, status.Members.Single, api.ServerGroupSingle, 1).Filter(filterScaleUP)...) case api.DeploymentModeActiveFailover: // Only scale agents & singles if a := status.Agency; a != nil && a.Size != nil { - plan = append(plan, createScalePlan(log, status, status.Members.Agents, api.ServerGroupAgents, int(*a.Size)).Filter(filterScaleUP)...) + plan = append(plan, r.createScalePlan(status, status.Members.Agents, api.ServerGroupAgents, int(*a.Size)).Filter(filterScaleUP)...) } - plan = append(plan, createScalePlan(log, status, status.Members.Single, api.ServerGroupSingle, spec.Single.GetCount())...) + plan = append(plan, r.createScalePlan(status, status.Members.Single, api.ServerGroupSingle, spec.Single.GetCount())...) case api.DeploymentModeCluster: // Scale agents, dbservers, coordinators if a := status.Agency; a != nil && a.Size != nil { - plan = append(plan, createScalePlan(log, status, status.Members.Agents, api.ServerGroupAgents, int(*a.Size)).Filter(filterScaleUP)...) + plan = append(plan, r.createScalePlan(status, status.Members.Agents, api.ServerGroupAgents, int(*a.Size)).Filter(filterScaleUP)...) } - plan = append(plan, createScalePlan(log, status, status.Members.DBServers, api.ServerGroupDBServers, spec.DBServers.GetCount())...) - plan = append(plan, createScalePlan(log, status, status.Members.Coordinators, api.ServerGroupCoordinators, spec.Coordinators.GetCount())...) + plan = append(plan, r.createScalePlan(status, status.Members.DBServers, api.ServerGroupDBServers, spec.DBServers.GetCount())...) + plan = append(plan, r.createScalePlan(status, status.Members.Coordinators, api.ServerGroupCoordinators, spec.Coordinators.GetCount())...) } if spec.GetMode().SupportsSync() { // Scale syncmasters & syncworkers - plan = append(plan, createScalePlan(log, status, status.Members.SyncMasters, api.ServerGroupSyncMasters, spec.SyncMasters.GetCount())...) - plan = append(plan, createScalePlan(log, status, status.Members.SyncWorkers, api.ServerGroupSyncWorkers, spec.SyncWorkers.GetCount())...) + plan = append(plan, r.createScalePlan(status, status.Members.SyncMasters, api.ServerGroupSyncMasters, spec.SyncMasters.GetCount())...) + plan = append(plan, r.createScalePlan(status, status.Members.SyncWorkers, api.ServerGroupSyncWorkers, spec.SyncWorkers.GetCount())...) } return plan } // createScalePlan creates a scaling plan for a single server group -func createScalePlan(log zerolog.Logger, status api.DeploymentStatus, members api.MemberStatusList, group api.ServerGroup, count int) api.Plan { +func (r *Reconciler) createScalePlan(status api.DeploymentStatus, members api.MemberStatusList, group api.ServerGroup, count int) api.Plan { var plan api.Plan if len(members) < count { // Scale up @@ -79,36 +76,35 @@ func createScalePlan(log zerolog.Logger, status api.DeploymentStatus, members ap for i := 0; i < toAdd; i++ { plan = append(plan, actions.NewAction(api.ActionTypeAddMember, group, withPredefinedMember(""))) } - log.Debug(). + r.planLogger. Int("count", count). Int("actual-count", len(members)). Int("delta", toAdd). Str("role", group.AsRole()). - Msg("Creating scale-up plan") + Debug("Creating scale-up plan") } else if len(members) > count { // Note, we scale down 1 member at a time if m, err := members.SelectMemberToRemove(topologyMissingMemberToRemoveSelector(status.Topology), topologyAwarenessMemberToRemoveSelector(group, status.Topology)); err != nil { - log.Warn().Err(err).Str("role", group.AsRole()).Msg("Failed to select member to remove") + r.planLogger.Err(err).Str("role", group.AsRole()).Warn("Failed to select member to remove") } else { - log.Debug(). + r.planLogger. Str("member-id", m.ID). Str("phase", string(m.Phase)). - Msg("Found member to remove") + Debug("Found member to remove") plan = append(plan, cleanOutMember(group, m)...) - log.Debug(). + r.planLogger. Int("count", count). Int("actual-count", len(members)). Str("role", group.AsRole()). Str("member-id", m.ID). - Msg("Creating scale-down plan") + Debug("Creating scale-down plan") } } return plan } -func createReplaceMemberPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createReplaceMemberPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { @@ -126,24 +122,24 @@ func createReplaceMemberPlan(ctx context.Context, plan = append(plan, actions.NewAction(api.ActionTypeAddMember, group, withPredefinedMember("")). AddParam(api.ActionTypeWaitForMemberInSync.String(), ""). AddParam(api.ActionTypeWaitForMemberUp.String(), "")) - log.Debug(). + r.planLogger. Str("role", group.AsRole()). - Msg("Creating replacement plan") + Debug("Creating replacement plan") return nil case api.ServerGroupCoordinators: plan = append(plan, actions.NewAction(api.ActionTypeRemoveMember, group, member)) - log.Debug(). + r.planLogger. Str("role", group.AsRole()). - Msg("Creating replacement plan") + Debug("Creating replacement plan") return nil case api.ServerGroupAgents: plan = append(plan, actions.NewAction(api.ActionTypeRemoveMember, group, member), actions.NewAction(api.ActionTypeAddMember, group, withPredefinedMember("")). AddParam(api.ActionTypeWaitForMemberInSync.String(), ""). AddParam(api.ActionTypeWaitForMemberUp.String(), "")) - log.Debug(). + r.planLogger. Str("role", group.AsRole()). - Msg("Creating replacement plan") + Debug("Creating replacement plan") return nil } } diff --git a/pkg/deployment/reconcile/plan_builder_storage.go b/pkg/deployment/reconcile/plan_builder_storage.go index 9471b9a44..944885af9 100644 --- a/pkg/deployment/reconcile/plan_builder_storage.go +++ b/pkg/deployment/reconcile/plan_builder_storage.go @@ -26,13 +26,11 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" ) // createRotateServerStorageResizePlan creates plan to resize storage -func createRotateServerStorageResizePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRotateServerStorageResizePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -62,10 +60,10 @@ func createRotateServerStorageResizePlan(ctx context.Context, // Load PVC pvc, exists := cache.PersistentVolumeClaim().V1().GetSimple(m.PersistentVolumeClaimName) if !exists { - log.Warn(). + r.planLogger. Str("role", group.AsRole()). Str("id", m.ID). - Msg("Failed to get PVC") + Warn("Failed to get PVC") continue } @@ -79,7 +77,7 @@ func createRotateServerStorageResizePlan(ctx context.Context, if volumeSize, ok := pvc.Spec.Resources.Requests[core.ResourceStorage]; ok { cmp := volumeSize.Cmp(requestedSize) if cmp < 0 { - plan = append(plan, pvcResizePlan(log, group, groupSpec, m)...) + plan = append(plan, r.pvcResizePlan(group, groupSpec, m)...) } } } @@ -90,8 +88,7 @@ func createRotateServerStorageResizePlan(ctx context.Context, return plan } -func createRotateServerStoragePVCPendingResizeConditionPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRotateServerStoragePVCPendingResizeConditionPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { var plan api.Plan @@ -120,7 +117,7 @@ func createRotateServerStoragePVCPendingResizeConditionPlan(ctx context.Context, return plan } -func pvcResizePlan(log zerolog.Logger, group api.ServerGroup, groupSpec api.ServerGroupSpec, member api.MemberStatus) api.Plan { +func (r *Reconciler) pvcResizePlan(group api.ServerGroup, groupSpec api.ServerGroupSpec, member api.MemberStatus) api.Plan { mode := groupSpec.VolumeResizeMode.Get() switch mode { case api.PVCResizeModeRuntime: @@ -138,8 +135,8 @@ func pvcResizePlan(log zerolog.Logger, group api.ServerGroup, groupSpec api.Serv actions.NewAction(api.ActionTypeWaitForMemberUp, group, member), } default: - log.Error().Str("server-group", group.AsRole()).Str("mode", mode.String()). - Msg("Requested mode is not supported") + r.planLogger.Str("server-group", group.AsRole()).Str("mode", mode.String()). + Error("Requested mode is not supported") return nil } } diff --git a/pkg/deployment/reconcile/plan_builder_test.go b/pkg/deployment/reconcile/plan_builder_test.go index a3df6d421..7bb94d3cb 100644 --- a/pkg/deployment/reconcile/plan_builder_test.go +++ b/pkg/deployment/reconcile/plan_builder_test.go @@ -51,6 +51,7 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/reconciler" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util" "github.com/arangodb/kube-arangodb/pkg/util/arangod/conn" "github.com/arangodb/kube-arangodb/pkg/util/errors" @@ -429,8 +430,9 @@ func TestCreatePlanSingleScale(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() + r := newTestReconciler() + c := newTC(t) - log := zerolog.Nop() spec := api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeSingle), } @@ -450,7 +452,7 @@ func TestCreatePlanSingleScale(t *testing.T) { status.Hashes.TLS.Propagated = true status.Hashes.Encryption.Propagated = true - newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed := r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) assert.Len(t, newPlan, 1) @@ -461,12 +463,12 @@ func TestCreatePlanSingleScale(t *testing.T) { PodName: "something", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) assert.Len(t, newPlan, 0) // Single mode does not scale spec.Single.Count = util.NewInt(2) - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) assert.Len(t, newPlan, 0) // Single mode does not scale @@ -482,7 +484,7 @@ func TestCreatePlanSingleScale(t *testing.T) { PodName: "something1", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) assert.Len(t, newPlan, 0) // Single mode does not scale down } @@ -493,7 +495,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { defer cancel() c := newTC(t) - log := zerolog.Nop() + r := newTestReconciler() spec := api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeActiveFailover), } @@ -511,7 +513,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { var status api.DeploymentStatus addAgentsToStatus(t, &status, 3) - newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed := r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) require.Len(t, newPlan, 2) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -524,7 +526,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { PodName: "something", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) require.Len(t, newPlan, 1) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -549,7 +551,7 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) { PodName: "something4", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) require.Len(t, newPlan, 3) // Note: Downscaling is only down 1 at a time assert.Equal(t, api.ActionTypeKillMemberPod, newPlan[0].Type) @@ -566,7 +568,7 @@ func TestCreatePlanClusterScale(t *testing.T) { defer cancel() c := newTC(t) - log := zerolog.Nop() + r := newTestReconciler() spec := api.DeploymentSpec{ Mode: api.NewMode(api.DeploymentModeCluster), } @@ -583,7 +585,7 @@ func TestCreatePlanClusterScale(t *testing.T) { var status api.DeploymentStatus addAgentsToStatus(t, &status, 3) - newPlan, _, changed := createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed := r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) require.Len(t, newPlan, 6) // Adding 3 dbservers & 3 coordinators (note: agents do not scale now) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -616,7 +618,7 @@ func TestCreatePlanClusterScale(t *testing.T) { PodName: "coordinator1", }, } - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) require.Len(t, newPlan, 3) assert.Equal(t, api.ActionTypeAddMember, newPlan[0].Type) @@ -653,7 +655,7 @@ func TestCreatePlanClusterScale(t *testing.T) { } spec.DBServers.Count = util.NewInt(1) spec.Coordinators.Count = util.NewInt(1) - newPlan, _, changed = createNormalPlan(ctx, log, depl, nil, spec, status, c) + newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c) assert.True(t, changed) require.Len(t, newPlan, 7) // Note: Downscaling is done 1 at a time assert.Equal(t, api.ActionTypeCleanOutMember, newPlan[0].Type) @@ -1182,8 +1184,12 @@ func TestCreatePlan(t *testing.T) { testCase.context.Inspector = i h := &LastLogRecord{t: t} - logger := zerolog.New(ioutil.Discard).Hook(h) - r := NewReconciler(logger, testCase.context) + logger := logging.NewFactory(zerolog.New(ioutil.Discard).Hook(h)).RegisterAndGetLogger("test", logging.Debug) + r := &Reconciler{ + log: logger, + planLogger: logger, + context: testCase.context, + } if testCase.Extender != nil { testCase.Extender(t, r, &testCase) @@ -1194,7 +1200,7 @@ func TestCreatePlan(t *testing.T) { testCase.Helper(testCase.context.ArangoDeployment) } - err, _ := r.CreatePlan(ctx, i) + err, _ := r.CreatePlan(ctx) // Assert if testCase.ExpectedEvent != nil { diff --git a/pkg/deployment/reconcile/plan_builder_tls.go b/pkg/deployment/reconcile/plan_builder_tls.go index e892b0eb6..05d3c91c9 100644 --- a/pkg/deployment/reconcile/plan_builder_tls.go +++ b/pkg/deployment/reconcile/plan_builder_tls.go @@ -43,14 +43,11 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" memberTls "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls" - - "github.com/rs/zerolog" ) const CertificateRenewalMargin = 7 * 24 * time.Hour -func createTLSStatusPropagatedFieldUpdate(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTLSStatusPropagatedFieldUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, w WithPlanBuilder, builders ...planBuilder) api.Plan { if !spec.TLS.IsSecure() { @@ -83,15 +80,14 @@ func createTLSStatusPropagatedFieldUpdate(ctx context.Context, } // createTLSStatusUpdate creates plan to update ca info -func createTLSStatusUpdate(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTLSStatusUpdate(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { return nil } - if createTLSStatusUpdateRequired(log, apiObject, spec, status, context) { + if r.createTLSStatusUpdateRequired(apiObject, spec, status, context) { return api.Plan{actions.NewClusterAction(api.ActionTypeTLSKeyStatusUpdate, "Update status")} } @@ -99,8 +95,7 @@ func createTLSStatusUpdate(ctx context.Context, } // createTLSStatusUpdate creates plan to update ca info -func createTLSStatusPropagated(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTLSStatusPropagated(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -116,7 +111,7 @@ func createTLSStatusPropagated(ctx context.Context, return nil } -func createTLSStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObject, spec api.DeploymentSpec, +func (r *Reconciler) createTLSStatusUpdateRequired(apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) bool { if !spec.TLS.IsSecure() { return false @@ -124,7 +119,7 @@ func createTLSStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje trusted, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(apiObject)) if !exists { - log.Warn().Str("secret", resources.GetCASecretName(apiObject)).Msg("Folder with secrets does not exist") + r.planLogger.Str("secret", resources.GetCASecretName(apiObject)).Warn("Folder with secrets does not exist") return false } @@ -152,8 +147,7 @@ func createTLSStatusUpdateRequired(log zerolog.Logger, apiObject k8sutil.APIObje } // createCAAppendPlan creates plan to append CA -func createCAAppendPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createCAAppendPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -162,30 +156,30 @@ func createCAAppendPlan(ctx context.Context, caSecret, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { - log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") + r.planLogger.Str("secret", spec.TLS.GetCASecretName()).Warn("CA Secret does not exists") return nil } - ca, _, err := resources.GetKeyCertFromSecret(log, caSecret, resources.CACertName, resources.CAKeyName) + ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName) if err != nil { - log.Warn().Err(err).Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not contains Cert") + r.planLogger.Err(err).Str("secret", spec.TLS.GetCASecretName()).Warn("CA Secret does not contains Cert") return nil } if len(ca) == 0 { - log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA does not contain any certs") + r.planLogger.Str("secret", spec.TLS.GetCASecretName()).Warn("CA does not contain any certs") return nil } trusted, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(apiObject)) if !exists { - log.Warn().Str("secret", resources.GetCASecretName(apiObject)).Msg("Folder with secrets does not exist") + r.planLogger.Str("secret", resources.GetCASecretName(apiObject)).Warn("Folder with secrets does not exist") return nil } caData, err := ca.ToPem() if err != nil { - log.Warn().Err(err).Str("secret", spec.TLS.GetCASecretName()).Msg("Unable to parse cert") + r.planLogger.Err(err).Str("secret", spec.TLS.GetCASecretName()).Warn("Unable to parse cert") return nil } @@ -200,8 +194,7 @@ func createCAAppendPlan(ctx context.Context, } // createCARenewalPlan creates plan to renew CA -func createCARenewalPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createCARenewalPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -210,18 +203,18 @@ func createCARenewalPlan(ctx context.Context, caSecret, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { - log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") + r.planLogger.Str("secret", spec.TLS.GetCASecretName()).Warn("CA Secret does not exists") return nil } if !k8sutil.IsOwner(apiObject.AsOwner(), caSecret) { - log.Debug().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret is not owned by Operator, we wont do anything") + r.planLogger.Str("secret", spec.TLS.GetCASecretName()).Debug("CA Secret is not owned by Operator, we wont do anything") return nil } - cas, _, err := resources.GetKeyCertFromSecret(log, caSecret, resources.CACertName, resources.CAKeyName) + cas, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName) if err != nil { - log.Warn().Err(err).Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not contains Cert") + r.planLogger.Err(err).Str("secret", spec.TLS.GetCASecretName()).Warn("CA Secret does not contains Cert") return nil } @@ -236,8 +229,7 @@ func createCARenewalPlan(ctx context.Context, } // createCACleanPlan creates plan to remove old CA's -func createCACleanPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createCACleanPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -246,30 +238,30 @@ func createCACleanPlan(ctx context.Context, caSecret, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { - log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists") + r.planLogger.Str("secret", spec.TLS.GetCASecretName()).Warn("CA Secret does not exists") return nil } - ca, _, err := resources.GetKeyCertFromSecret(log, caSecret, resources.CACertName, resources.CAKeyName) + ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName) if err != nil { - log.Warn().Err(err).Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not contains Cert") + r.planLogger.Err(err).Str("secret", spec.TLS.GetCASecretName()).Warn("CA Secret does not contains Cert") return nil } if len(ca) == 0 { - log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA does not contain any certs") + r.planLogger.Str("secret", spec.TLS.GetCASecretName()).Warn("CA does not contain any certs") return nil } trusted, exists := context.ACS().CurrentClusterCache().Secret().V1().GetSimple(resources.GetCASecretName(apiObject)) if !exists { - log.Warn().Str("secret", resources.GetCASecretName(apiObject)).Msg("Folder with secrets does not exist") + r.planLogger.Str("secret", resources.GetCASecretName(apiObject)).Warn("Folder with secrets does not exist") return nil } caData, err := ca.ToPem() if err != nil { - log.Warn().Err(err).Str("secret", spec.TLS.GetCASecretName()).Msg("Unable to parse cert") + r.planLogger.Err(err).Str("secret", spec.TLS.GetCASecretName()).Warn("Unable to parse cert") return nil } @@ -285,8 +277,7 @@ func createCACleanPlan(ctx context.Context, return nil } -func createKeyfileRenewalPlanSynced(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createKeyfileRenewalPlanSynced(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { @@ -312,8 +303,8 @@ func createKeyfileRenewalPlanSynced(ctx context.Context, lCtx, c := context.WithTimeout(ctx, 500*time.Millisecond) defer c() - if renew, _ := keyfileRenewalRequired(lCtx, log, apiObject, spec.Sync.TLS, spec, cache, planCtx, group, member, api.TLSRotateModeRecreate); renew { - log.Info().Msg("Renewal of keyfile required - Recreate (sync master)") + if renew, _ := r.keyfileRenewalRequired(lCtx, apiObject, spec.Sync.TLS, spec, cache, planCtx, group, member, api.TLSRotateModeRecreate); renew { + r.planLogger.Info("Renewal of keyfile required - Recreate (sync master)") plan = append(plan, tlsRotateConditionAction(group, member.ID, "Restart sync master after keyfile removal")) } } @@ -321,8 +312,7 @@ func createKeyfileRenewalPlanSynced(ctx context.Context, return plan } -func createKeyfileRenewalPlanDefault(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createKeyfileRenewalPlanDefault(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -349,8 +339,8 @@ func createKeyfileRenewalPlanDefault(ctx context.Context, lCtx, c := context.WithTimeout(ctx, 500*time.Millisecond) defer c() - if renew, _ := keyfileRenewalRequired(lCtx, log, apiObject, spec.TLS, spec, cache, planCtx, group, member, api.TLSRotateModeRecreate); renew { - log.Info().Msg("Renewal of keyfile required - Recreate (server)") + if renew, _ := r.keyfileRenewalRequired(lCtx, apiObject, spec.TLS, spec, cache, planCtx, group, member, api.TLSRotateModeRecreate); renew { + r.planLogger.Info("Renewal of keyfile required - Recreate (server)") plan = append(plan, tlsRotateConditionAction(group, member.ID, "Restart server after keyfile removal")) } } @@ -361,8 +351,7 @@ func createKeyfileRenewalPlanDefault(ctx context.Context, return plan } -func createKeyfileRenewalPlanInPlace(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createKeyfileRenewalPlanInPlace(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -385,8 +374,8 @@ func createKeyfileRenewalPlanInPlace(ctx context.Context, lCtx, c := context.WithTimeout(ctx, 500*time.Millisecond) defer c() - if renew, recreate := keyfileRenewalRequired(lCtx, log, apiObject, spec.TLS, spec, cache, planCtx, group, member, api.TLSRotateModeInPlace); renew { - log.Info().Msg("Renewal of keyfile required - InPlace (server)") + if renew, recreate := r.keyfileRenewalRequired(lCtx, apiObject, spec.TLS, spec, cache, planCtx, group, member, api.TLSRotateModeInPlace); renew { + r.planLogger.Info("Renewal of keyfile required - InPlace (server)") if recreate { plan = append(plan, actions.NewAction(api.ActionTypeCleanTLSKeyfileCertificate, group, member, "Remove server keyfile and enforce renewal")) } @@ -400,8 +389,7 @@ func createKeyfileRenewalPlanInPlace(ctx context.Context, return plan } -func createKeyfileRenewalPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createKeyfileRenewalPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -411,13 +399,13 @@ func createKeyfileRenewalPlan(ctx context.Context, gCtx, c := context.WithTimeout(ctx, 2*time.Second) defer c() - plan := createKeyfileRenewalPlanSynced(gCtx, log, apiObject, spec, status, planCtx) + plan := r.createKeyfileRenewalPlanSynced(gCtx, apiObject, spec, status, planCtx) switch createKeyfileRenewalPlanMode(spec, status) { case api.TLSRotateModeInPlace: - plan = append(plan, createKeyfileRenewalPlanInPlace(gCtx, log, apiObject, spec, status, planCtx)...) + plan = append(plan, r.createKeyfileRenewalPlanInPlace(gCtx, apiObject, spec, status, planCtx)...) default: - plan = append(plan, createKeyfileRenewalPlanDefault(gCtx, log, apiObject, spec, status, planCtx)...) + plan = append(plan, r.createKeyfileRenewalPlanDefault(gCtx, apiObject, spec, status, planCtx)...) } return plan @@ -495,8 +483,7 @@ func checkServerValidCertRequest(ctx context.Context, context PlanBuilderContext } // keyfileRenewalRequired checks if a keyfile renewal is required and if recreation should be made -func keyfileRenewalRequired(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, tls api.TLSSpec, +func (r *Reconciler) keyfileRenewalRequired(ctx context.Context, apiObject k8sutil.APIObject, tls api.TLSSpec, spec api.DeploymentSpec, cachedStatus inspectorInterface.Inspector, context PlanBuilderContext, group api.ServerGroup, member api.MemberStatus, mode api.TLSRotateMode) (bool, bool) { @@ -508,19 +495,19 @@ func keyfileRenewalRequired(ctx context.Context, service, ok := cachedStatus.Service().V1().GetSimple(memberName) if !ok { - log.Warn().Str("service", memberName).Msg("Service does not exists") + r.planLogger.Str("service", memberName).Warn("Service does not exists") return false, false } caSecret, exists := cachedStatus.Secret().V1().GetSimple(tls.GetCASecretName()) if !exists { - log.Warn().Str("secret", tls.GetCASecretName()).Msg("CA Secret does not exists") + r.planLogger.Str("secret", tls.GetCASecretName()).Warn("CA Secret does not exists") return false, false } - ca, _, err := resources.GetKeyCertFromSecret(log, caSecret, resources.CACertName, resources.CAKeyName) + ca, _, err := resources.GetKeyCertFromSecret(caSecret, resources.CACertName, resources.CAKeyName) if err != nil { - log.Warn().Err(err).Str("secret", tls.GetCASecretName()).Msg("CA Secret does not contains Cert") + r.planLogger.Err(err).Str("secret", tls.GetCASecretName()).Warn("CA Secret does not contains Cert") return false, false } @@ -530,13 +517,13 @@ func keyfileRenewalRequired(ctx context.Context, case *url.Error: switch v.Err.(type) { case x509.UnknownAuthorityError, x509.CertificateInvalidError: - log.Debug().Err(v.Err).Str("type", reflect.TypeOf(v.Err).String()).Msgf("Validation of cert for %s failed, renewal is required", memberName) + r.planLogger.Err(v.Err).Str("type", reflect.TypeOf(v.Err).String()).Debug("Validation of cert for %s failed, renewal is required", memberName) return true, true default: - log.Debug().Err(v.Err).Str("type", reflect.TypeOf(v.Err).String()).Msgf("Validation of cert for %s failed, but cert looks fine - continuing", memberName) + r.planLogger.Err(v.Err).Str("type", reflect.TypeOf(v.Err).String()).Debug("Validation of cert for %s failed, but cert looks fine - continuing", memberName) } default: - log.Debug().Err(err).Str("type", reflect.TypeOf(err).String()).Msgf("Validation of cert for %s failed, will try again next time", memberName) + r.planLogger.Err(err).Str("type", reflect.TypeOf(err).String()).Debug("Validation of cert for %s failed, will try again next time", memberName) } return false, false } @@ -552,7 +539,7 @@ func keyfileRenewalRequired(ctx context.Context, } if time.Now().Add(CertificateRenewalMargin).After(cert.NotAfter) { - log.Info().Msg("Renewal margin exceeded") + r.planLogger.Info("Renewal margin exceeded") return true, true } @@ -565,7 +552,7 @@ func keyfileRenewalRequired(ctx context.Context, } if err != nil { - log.Warn().Msg("Unable to render alt names") + r.planLogger.Warn("Unable to render alt names") return false, false } @@ -576,9 +563,9 @@ func keyfileRenewalRequired(ctx context.Context, } if a := util.DiffStrings(altNames.AltNames, dnsNames); len(a) > 0 { - log.Info().Strs("AltNames Current", cert.DNSNames). - Strs("AltNames Expected", altNames.AltNames). - Msgf("Alt names are different") + r.planLogger.Strs("AltNames Current", cert.DNSNames...). + Strs("AltNames Expected", altNames.AltNames...). + Info("Alt names are different") return true, true } } @@ -587,33 +574,33 @@ func keyfileRenewalRequired(ctx context.Context, if mode == api.TLSRotateModeInPlace && group.IsArangod() { conn, err := context.GetServerClient(ctx, group, member.ID) if err != nil { - log.Warn().Err(err).Msg("Unable to get client") + r.planLogger.Err(err).Warn("Unable to get client") return false, false } s, exists := cachedStatus.Secret().V1().GetSimple(k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), group.AsRole(), member.ID)) if !exists { - log.Warn().Msg("Keyfile secret is missing") + r.planLogger.Warn("Keyfile secret is missing") return false, false } c := client.NewClient(conn.Connection()) tls, err := c.GetTLS(ctx) if err != nil { - log.Warn().Err(err).Msg("Unable to get tls details") + r.planLogger.Err(err).Warn("Unable to get tls details") return false, false } keyfile, ok := s.Data[constants.SecretTLSKeyfile] if !ok { - log.Warn().Msg("Keyfile secret is invalid") + r.planLogger.Warn("Keyfile secret is invalid") return false, false } keyfileSha := util.SHA256(keyfile) if tls.Result.KeyFile.GetSHA().Checksum() != keyfileSha { - log.Debug().Str("current", tls.Result.KeyFile.GetSHA().Checksum()).Str("desired", keyfileSha).Msg("Unable to get tls details") + r.planLogger.Str("current", tls.Result.KeyFile.GetSHA().Checksum()).Str("desired", keyfileSha).Debug("Unable to get tls details") return true, false } } diff --git a/pkg/deployment/reconcile/plan_builder_tls_sni.go b/pkg/deployment/reconcile/plan_builder_tls_sni.go index 78681cc4c..c7c253072 100644 --- a/pkg/deployment/reconcile/plan_builder_tls_sni.go +++ b/pkg/deployment/reconcile/plan_builder_tls_sni.go @@ -32,11 +32,9 @@ import ( "github.com/arangodb/go-driver" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" - "github.com/rs/zerolog" ) -func createRotateTLSServerSNIPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createRotateTLSServerSNIPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, planCtx PlanBuilderContext) api.Plan { if !spec.TLS.IsSecure() { @@ -54,7 +52,7 @@ func createRotateTLSServerSNIPlan(ctx context.Context, fetchedSecrets, err := mapTLSSNIConfig(*sni, planCtx.ACS().CurrentClusterCache()) if err != nil { - log.Warn().Err(err).Msg("Unable to get SNI desired state") + r.planLogger.Err(err).Warn("Unable to get SNI desired state") return nil } @@ -86,7 +84,7 @@ func createRotateTLSServerSNIPlan(ctx context.Context, return err }) if err != nil { - log.Info().Err(err).Msg("Unable to get client") + r.planLogger.Err(err).Info("Unable to get client") continue } @@ -97,7 +95,7 @@ func createRotateTLSServerSNIPlan(ctx context.Context, return err }) if err != nil { - log.Info().Err(err).Msg("SNI compare failed") + r.planLogger.Err(err).Info("SNI compare failed") return nil } else if !ok { @@ -108,7 +106,7 @@ func createRotateTLSServerSNIPlan(ctx context.Context, plan = append(plan, actions.NewAction(api.ActionTypeUpdateTLSSNI, group, m, "SNI Secret needs update")) default: - log.Warn().Msg("SNI mode rotation is unknown") + r.planLogger.Warn("SNI mode rotation is unknown") continue } } diff --git a/pkg/deployment/reconcile/plan_builder_topology.community.go b/pkg/deployment/reconcile/plan_builder_topology.community.go index d6b1a154f..c540d26d6 100644 --- a/pkg/deployment/reconcile/plan_builder_topology.community.go +++ b/pkg/deployment/reconcile/plan_builder_topology.community.go @@ -26,38 +26,32 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) -func createTopologyEnablementPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTopologyEnablementPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil } -func createTopologyMemberUpdatePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTopologyMemberUpdatePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil } -func createTopologyMemberConditionPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTopologyMemberConditionPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil } -func createTopologyMemberAdjustmentPlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTopologyMemberAdjustmentPlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil } -func createTopologyUpdatePlan(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) createTopologyUpdatePlan(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil diff --git a/pkg/deployment/reconcile/plan_builder_utils.go b/pkg/deployment/reconcile/plan_builder_utils.go index 581a6f78d..8a29990b2 100644 --- a/pkg/deployment/reconcile/plan_builder_utils.go +++ b/pkg/deployment/reconcile/plan_builder_utils.go @@ -28,18 +28,17 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - "github.com/rs/zerolog" ) // createRotateMemberPlan creates a plan to rotate (stop-recreate-start) an existing // member. -func createRotateMemberPlan(log zerolog.Logger, member api.MemberStatus, +func (r *Reconciler) createRotateMemberPlan(member api.MemberStatus, group api.ServerGroup, spec api.DeploymentSpec, reason string) api.Plan { - log.Debug(). + r.log. Str("id", member.ID). Str("role", group.AsRole()). Str("reason", reason). - Msg("Creating rotation plan") + Debug("Creating rotation plan") return createRotateMemberPlanWithAction(member, group, api.ActionTypeRotateMember, spec, reason) } @@ -63,8 +62,7 @@ func createRotateMemberPlanWithAction(member api.MemberStatus, return plan } -func emptyPlanBuilder(ctx context.Context, - log zerolog.Logger, apiObject k8sutil.APIObject, +func (r *Reconciler) emptyPlanBuilder(ctx context.Context, apiObject k8sutil.APIObject, spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext) api.Plan { return nil diff --git a/pkg/deployment/reconcile/plan_executor.go b/pkg/deployment/reconcile/plan_executor.go index a11f8ff2b..7fa652d41 100644 --- a/pkg/deployment/reconcile/plan_executor.go +++ b/pkg/deployment/reconcile/plan_executor.go @@ -25,14 +25,12 @@ import ( "fmt" "time" - "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/metrics" "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector" ) var ( @@ -117,23 +115,23 @@ func (p plannerResources) Set(deployment *api.DeploymentStatus, plan api.Plan) b // ExecutePlan tries to execute the plan as far as possible. // Returns true when it has to be called again soon. // False otherwise. -func (d *Reconciler) ExecutePlan(ctx context.Context, cachedStatus inspectorInterface.Inspector) (bool, error) { +func (d *Reconciler) ExecutePlan(ctx context.Context) (bool, error) { var callAgain bool - if again, err := d.executePlanStatus(ctx, d.log, plannerHigh{}); err != nil { + if again, err := d.executePlanStatus(ctx, plannerHigh{}); err != nil { return false, errors.WithStack(err) } else if again { callAgain = true } - if again, err := d.executePlanStatus(ctx, d.log, plannerResources{}); err != nil { - d.log.Error().Err(err).Msg("Execution of plan failed") + if again, err := d.executePlanStatus(ctx, plannerResources{}); err != nil { + d.planLogger.Err(err).Error("Execution of plan failed") return false, nil } else if again { callAgain = true } - if again, err := d.executePlanStatus(ctx, d.log, plannerNormal{}); err != nil { + if again, err := d.executePlanStatus(ctx, plannerNormal{}); err != nil { return false, errors.WithStack(err) } else if again { callAgain = true @@ -142,7 +140,7 @@ func (d *Reconciler) ExecutePlan(ctx context.Context, cachedStatus inspectorInte return callAgain, nil } -func (d *Reconciler) executePlanStatus(ctx context.Context, log zerolog.Logger, pg planner) (bool, error) { +func (d *Reconciler) executePlanStatus(ctx context.Context, pg planner) (bool, error) { loopStatus, _ := d.context.GetStatus() plan := pg.Get(&loopStatus) @@ -151,15 +149,15 @@ func (d *Reconciler) executePlanStatus(ctx context.Context, log zerolog.Logger, return false, nil } - newPlan, callAgain, err := d.executePlan(ctx, log, plan, pg) + newPlan, callAgain, err := d.executePlan(ctx, plan, pg) // Refresh current status loopStatus, lastVersion := d.context.GetStatus() if pg.Set(&loopStatus, newPlan) { - log.Info().Msg("Updating plan") + d.planLogger.Info("Updating plan") if err := d.context.UpdateStatus(ctx, loopStatus, lastVersion, true); err != nil { - log.Debug().Err(err).Msg("Failed to update CR status") + d.planLogger.Err(err).Debug("Failed to update CR status") return false, errors.WithStack(err) } } @@ -171,7 +169,7 @@ func (d *Reconciler) executePlanStatus(ctx context.Context, log zerolog.Logger, return callAgain, nil } -func (d *Reconciler) executePlan(ctx context.Context, log zerolog.Logger, statusPlan api.Plan, pg planner) (newPlan api.Plan, callAgain bool, err error) { +func (d *Reconciler) executePlan(ctx context.Context, statusPlan api.Plan, pg planner) (newPlan api.Plan, callAgain bool, err error) { plan := statusPlan.DeepCopy() for { @@ -181,32 +179,10 @@ func (d *Reconciler) executePlan(ctx context.Context, log zerolog.Logger, status // Take first action planAction := plan[0] - logContext := log.With(). - Int("plan-len", len(plan)). - Str("action-id", planAction.ID). - Str("action-type", string(planAction.Type)). - Str("group", planAction.Group.AsRole()). - Str("member-id", planAction.MemberID) - if status, _ := d.context.GetStatus(); status.Members.ContainsID(planAction.MemberID) { - if member, _, ok := status.Members.ElementByID(planAction.MemberID); ok { - logContext = logContext.Str("phase", string(member.Phase)) - } - } + action, actionContext := d.createAction(planAction) - for k, v := range planAction.Params { - logContext = logContext.Str("param."+k, v) - } - - for k, v := range planAction.Locals { - logContext = logContext.Str("local."+k.String(), v) - } - - log := logContext.Logger() - - action, actionContext := d.createAction(log, planAction) - - done, abort, recall, retry, err := d.executeAction(ctx, log, planAction, action) + done, abort, recall, retry, err := d.executeAction(ctx, planAction, action) if err != nil { if retry { return plan, true, nil @@ -253,9 +229,9 @@ func (d *Reconciler) executePlan(ctx context.Context, log zerolog.Logger, status if ok { c.GetThrottles().Invalidate(components...) - log.Info().Msgf("Reloading cached status") + d.planLogger.Info("Reloading cached status") if err := c.Refresh(ctx); err != nil { - log.Warn().Err(err).Msgf("Unable to reload cached status") + d.planLogger.Err(err).Warn("Unable to reload cached status") return plan, recall, nil } } @@ -263,12 +239,12 @@ func (d *Reconciler) executePlan(ctx context.Context, log zerolog.Logger, status if newPlan, changed := getActionPlanAppender(action, plan); changed { // Our actions have been added to the end of plan - log.Info().Msgf("Appending new plan items") + d.planLogger.Info("Appending new plan items") return newPlan, true, nil } if err := getActionPost(action, ctx); err != nil { - log.Err(err).Msgf("Post action failed") + d.planLogger.Err(err).Error("Post action failed") return nil, false, errors.WithStack(err) } } else { @@ -288,23 +264,23 @@ func (d *Reconciler) executePlan(ctx context.Context, log zerolog.Logger, status } } -func (d *Reconciler) executeAction(ctx context.Context, log zerolog.Logger, planAction api.Action, action Action) (done, abort, callAgain, retry bool, err error) { +func (d *Reconciler) executeAction(ctx context.Context, planAction api.Action, action Action) (done, abort, callAgain, retry bool, err error) { if !planAction.IsStarted() { // Not started yet ready, err := action.Start(ctx) if err != nil { - if d := getStartFailureGracePeriod(action); d > 0 && !planAction.CreationTime.IsZero() { - if time.Since(planAction.CreationTime.Time) < d { - log.Error().Err(err).Msg("Failed to start action, but still in grace period") + if g := getStartFailureGracePeriod(action); g > 0 && !planAction.CreationTime.IsZero() { + if time.Since(planAction.CreationTime.Time) < g { + d.planLogger.Err(err).Error("Failed to start action, but still in grace period") return false, false, false, true, errors.WithStack(err) } } - log.Error().Err(err).Msg("Failed to start action") + d.planLogger.Err(err).Error("Failed to start action") return false, false, false, false, errors.WithStack(err) } if ready { - log.Debug().Bool("ready", ready).Msg("Action Start completed") + d.planLogger.Bool("ready", ready).Debug("Action Start completed") return true, false, false, false, nil } @@ -313,25 +289,25 @@ func (d *Reconciler) executeAction(ctx context.Context, log zerolog.Logger, plan // First action of plan has been started, check its progress ready, abort, err := action.CheckProgress(ctx) if err != nil { - log.Debug().Err(err).Msg("Failed to check action progress") + d.planLogger.Err(err).Debug("Failed to check action progress") return false, false, false, false, errors.WithStack(err) } - log.Debug(). + d.planLogger. Bool("abort", abort). Bool("ready", ready). - Msg("Action CheckProgress completed") + Debug("Action CheckProgress completed") if ready { return true, false, false, false, nil } if abort { - log.Warn().Msg("Action aborted. Removing the entire plan") + d.planLogger.Warn("Action aborted. Removing the entire plan") d.context.CreateEvent(k8sutil.NewPlanAbortedEvent(d.context.GetAPIObject(), string(planAction.Type), planAction.MemberID, planAction.Group.AsRole())) return false, true, false, false, nil } else if time.Now().After(planAction.CreationTime.Add(GetActionTimeout(d.context.GetSpec(), planAction.Type))) { - log.Warn().Msg("Action not finished in time. Removing the entire plan") + d.planLogger.Warn("Action not finished in time. Removing the entire plan") d.context.CreateEvent(k8sutil.NewPlanTimeoutEvent(d.context.GetAPIObject(), string(planAction.Type), planAction.MemberID, planAction.Group.AsRole())) return false, true, false, false, nil } @@ -341,13 +317,13 @@ func (d *Reconciler) executeAction(ctx context.Context, log zerolog.Logger, plan } // createAction create action object based on action type -func (d *Reconciler) createAction(log zerolog.Logger, action api.Action) (Action, ActionContext) { - actionCtx := newActionContext(log.With().Str("id", action.ID).Str("type", action.Type.String()).Logger(), d.context) +func (d *Reconciler) createAction(action api.Action) (Action, ActionContext) { + actionCtx := newActionContext(d.log, d.context) f, ok := getActionFactory(action.Type) if !ok { panic(fmt.Sprintf("Unknown action type '%s'", action.Type)) } - return f(log, action, actionCtx), actionCtx + return f(action, actionCtx), actionCtx } diff --git a/pkg/deployment/reconcile/reconciler.go b/pkg/deployment/reconcile/reconciler.go index e1e7b8c0a..be3a902e6 100644 --- a/pkg/deployment/reconcile/reconciler.go +++ b/pkg/deployment/reconcile/reconciler.go @@ -27,21 +27,34 @@ import ( meta "k8s.io/apimachinery/pkg/apis/meta/v1" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" ) +var reconcileLogger = logging.Global().RegisterAndGetLogger("deployment-reconcile", logging.Info) + // Reconciler is the service that takes care of bring the a deployment // in line with its (changed) specification. type Reconciler struct { - log zerolog.Logger - context Context + namespace, name string + log logging.Logger + planLogger logging.Logger + context Context } // NewReconciler creates a new reconciler with given context. -func NewReconciler(log zerolog.Logger, context Context) *Reconciler { - return &Reconciler{ - log: log, - context: context, +func NewReconciler(namespace, name string, context Context) *Reconciler { + r := &Reconciler{ + context: context, + namespace: namespace, + name: name, } + r.log = reconcileLogger.WrapObj(r) + r.planLogger = r.log.Str("section", "plan") + return r +} + +func (r *Reconciler) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", r.namespace).Str("name", r.name) } // CheckDeployment checks for obviously broken things and fixes them immediately @@ -52,21 +65,21 @@ func (r *Reconciler) CheckDeployment(ctx context.Context) error { if spec.GetMode().HasCoordinators() { // Check if there are coordinators if status.Members.Coordinators.AllFailed() { - r.log.Error().Msg("All coordinators failed - reset") + r.log.Error("All coordinators failed - reset") for _, m := range status.Members.Coordinators { cache, ok := r.context.ACS().ClusterCache(m.ClusterID) if !ok { - r.log.Warn().Msg("Cluster is not ready") + r.log.Warn("Cluster is not ready") continue } if err := cache.Client().Kubernetes().CoreV1().Secrets(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil { - r.log.Error().Err(err).Msg("Failed to delete pod") + r.log.Err(err).Error("Failed to delete pod") } m.Phase = api.MemberPhaseNone if err := r.context.UpdateMember(ctx, m); err != nil { - r.log.Error().Err(err).Msg("Failed to update member") + r.log.Err(err).Error("Failed to update member") } } } diff --git a/pkg/deployment/reconcile/utils_test.go b/pkg/deployment/reconcile/utils_test.go new file mode 100644 index 000000000..621b4b71c --- /dev/null +++ b/pkg/deployment/reconcile/utils_test.go @@ -0,0 +1,35 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package reconcile + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" +) + +var testLF = logging.NewDefaultFactory() +var testLogger = testLF.RegisterAndGetLogger("test", logging.Debug) + +func newTestReconciler() *Reconciler { + return &Reconciler{ + log: testLogger, + planLogger: testLogger, + } +} diff --git a/pkg/deployment/resilience/logger.go b/pkg/deployment/resilience/logger.go new file mode 100644 index 000000000..b26d1613e --- /dev/null +++ b/pkg/deployment/resilience/logger.go @@ -0,0 +1,38 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package resilience + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" + "github.com/rs/zerolog" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("deployment-resilience", logging.Info) +) + +func (d *Resilience) log(section string) logging.Logger { + return logger.Wrap(d.logWrap).Str("section", section) +} + +func (d *Resilience) logWrap(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", d.namespace).Str("name", d.name) +} diff --git a/pkg/deployment/resilience/member_failure.go b/pkg/deployment/resilience/member_failure.go index 66abe4dbe..5974f38b5 100644 --- a/pkg/deployment/resilience/member_failure.go +++ b/pkg/deployment/resilience/member_failure.go @@ -47,10 +47,9 @@ func (r *Resilience) CheckMemberFailure(ctx context.Context) error { updateStatusNeeded := false if err := status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error { for _, m := range list { - log := r.log.With(). + log := r.log("member-failure"). Str("id", m.ID). - Str("role", group.AsRole()). - Logger() + Str("role", group.AsRole()) // Check if there are Members with Phase Upgrading or Rotation but no plan switch m.Phase { @@ -58,7 +57,7 @@ func (r *Resilience) CheckMemberFailure(ctx context.Context) error { continue case api.MemberPhaseUpgrading, api.MemberPhaseRotating, api.MemberPhaseCleanOut, api.MemberPhaseRotateStart: if len(status.Plan) == 0 { - log.Error().Msgf("No plan but member is in phase %s - marking as failed", m.Phase) + log.Error("No plan but member is in phase %s - marking as failed", m.Phase) m.Phase = api.MemberPhaseFailed status.Members.Update(m, group) updateStatusNeeded = true @@ -78,14 +77,14 @@ func (r *Resilience) CheckMemberFailure(ctx context.Context) error { failureAcceptable, reason, err := r.isMemberFailureAcceptable(ctx, group, m) if err != nil { - log.Warn().Err(err).Msg("Failed to check is member failure is acceptable") + log.Err(err).Warn("Failed to check is member failure is acceptable") } else if failureAcceptable { - log.Info().Msg("Member is not ready for long time, marking is failed") + log.Info("Member is not ready for long time, marking is failed") m.Phase = api.MemberPhaseFailed status.Members.Update(m, group) updateStatusNeeded = true } else { - log.Warn().Msgf("Member is not ready for long time, but it is not safe to mark it a failed because: %s", reason) + log.Warn("Member is not ready for long time, but it is not safe to mark it a failed because: %s", reason) } } } @@ -97,14 +96,14 @@ func (r *Resilience) CheckMemberFailure(ctx context.Context) error { // Member has terminated too often in recent history. failureAcceptable, reason, err := r.isMemberFailureAcceptable(ctx, group, m) if err != nil { - log.Warn().Err(err).Msg("Failed to check is member failure is acceptable") + log.Err(err).Warn("Failed to check is member failure is acceptable") } else if failureAcceptable { - log.Info().Msg("Member has terminated too often in recent history, marking is failed") + log.Info("Member has terminated too often in recent history, marking is failed") m.Phase = api.MemberPhaseFailed status.Members.Update(m, group) updateStatusNeeded = true } else { - log.Warn().Msgf("Member has terminated too often in recent history, but it is not safe to mark it a failed because: %s", reason) + log.Warn("Member has terminated too often in recent history, but it is not safe to mark it a failed because: %s", reason) } } } diff --git a/pkg/deployment/resilience/resilience.go b/pkg/deployment/resilience/resilience.go index 3864f9d75..256aab7db 100644 --- a/pkg/deployment/resilience/resilience.go +++ b/pkg/deployment/resilience/resilience.go @@ -20,19 +20,18 @@ package resilience -import "github.com/rs/zerolog" - // Resilience is the service that inspects the overall state of the deployment // to improve resilience. type Resilience struct { - log zerolog.Logger - context Context + namespace, name string + context Context } // NewResilience creates a new resilience with given context. -func NewResilience(log zerolog.Logger, context Context) *Resilience { +func NewResilience(namespace, name string, context Context) *Resilience { return &Resilience{ - log: log, - context: context, + namespace: namespace, + name: name, + context: context, } } diff --git a/pkg/deployment/resources/annotations.go b/pkg/deployment/resources/annotations.go index f1981c6d2..a6acdd8a3 100644 --- a/pkg/deployment/resources/annotations.go +++ b/pkg/deployment/resources/annotations.go @@ -24,7 +24,6 @@ import ( "context" monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" - "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" policyv1beta1 "k8s.io/api/policy/v1beta1" @@ -43,7 +42,9 @@ import ( type PatchFunc func(name string, d []byte) error func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { - log.Info().Msgf("Ensuring annotations") + log := r.log.Str("section", "annotations") + + log.Info("Ensuring annotations") patchSecret := func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { @@ -53,7 +54,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensureSecretsAnnotations(patchSecret, + if err := r.ensureSecretsAnnotations(patchSecret, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -70,7 +71,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensureServiceAccountsAnnotations(patchServiceAccount, + if err := r.ensureServiceAccountsAnnotations(patchServiceAccount, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -87,7 +88,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensureServicesAnnotations(patchService, + if err := r.ensureServicesAnnotations(patchService, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -111,7 +112,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensurePdbsAnnotations(patchPDB, + if err := r.ensurePdbsAnnotations(patchPDB, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -128,7 +129,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensurePvcsAnnotations(patchPVC, + if err := r.ensurePvcsAnnotations(patchPVC, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -145,7 +146,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensurePodsAnnotations(patchPod, + if err := r.ensurePodsAnnotations(patchPod, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -162,7 +163,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto }) } - if err := ensureServiceMonitorsAnnotations(patchServiceMonitor, + if err := r.ensureServiceMonitorsAnnotations(patchServiceMonitor, cachedStatus, deployment.ArangoDeploymentResourceKind, r.context.GetAPIObject().GetName(), @@ -174,9 +175,9 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto return nil } -func ensureSecretsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { +func (r *Resources) ensureSecretsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { if err := cachedStatus.Secret().V1().Iterate(func(secret *core.Secret) error { - ensureAnnotationsMap(secret.Kind, secret, spec, patch) + r.ensureAnnotationsMap(secret.Kind, secret, spec, patch) return nil }, func(secret *core.Secret) bool { return k8sutil.IsChildResource(kind, name, namespace, secret) @@ -187,9 +188,9 @@ func ensureSecretsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.I return nil } -func ensureServiceAccountsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { +func (r *Resources) ensureServiceAccountsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { if err := cachedStatus.ServiceAccount().V1().Iterate(func(serviceAccount *core.ServiceAccount) error { - ensureAnnotationsMap(serviceAccount.Kind, serviceAccount, spec, patch) + r.ensureAnnotationsMap(serviceAccount.Kind, serviceAccount, spec, patch) return nil }, func(serviceAccount *core.ServiceAccount) bool { return k8sutil.IsChildResource(kind, name, namespace, serviceAccount) @@ -200,9 +201,9 @@ func ensureServiceAccountsAnnotations(patch PatchFunc, cachedStatus inspectorInt return nil } -func ensureServicesAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { +func (r *Resources) ensureServicesAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { if err := cachedStatus.Service().V1().Iterate(func(service *core.Service) error { - ensureAnnotationsMap(service.Kind, service, spec, patch) + r.ensureAnnotationsMap(service.Kind, service, spec, patch) return nil }, func(service *core.Service) bool { return k8sutil.IsChildResource(kind, name, namespace, service) @@ -213,11 +214,11 @@ func ensureServicesAnnotations(patch PatchFunc, cachedStatus inspectorInterface. return nil } -func ensurePdbsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, +func (r *Resources) ensurePdbsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { if inspector, err := cachedStatus.PodDisruptionBudget().V1(); err == nil { if err := inspector.Iterate(func(podDisruptionBudget *policyv1.PodDisruptionBudget) error { - ensureAnnotationsMap(podDisruptionBudget.Kind, podDisruptionBudget, spec, patch) + r.ensureAnnotationsMap(podDisruptionBudget.Kind, podDisruptionBudget, spec, patch) return nil }, func(podDisruptionBudget *policyv1.PodDisruptionBudget) bool { return k8sutil.IsChildResource(kind, name, namespace, podDisruptionBudget) @@ -233,7 +234,7 @@ func ensurePdbsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Insp return err } if err := inspector.Iterate(func(podDisruptionBudget *policyv1beta1.PodDisruptionBudget) error { - ensureAnnotationsMap(podDisruptionBudget.Kind, podDisruptionBudget, spec, patch) + r.ensureAnnotationsMap(podDisruptionBudget.Kind, podDisruptionBudget, spec, patch) return nil }, func(podDisruptionBudget *policyv1beta1.PodDisruptionBudget) bool { return k8sutil.IsChildResource(kind, name, namespace, podDisruptionBudget) @@ -244,9 +245,9 @@ func ensurePdbsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Insp return nil } -func ensurePvcsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { +func (r *Resources) ensurePvcsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(persistentVolumeClaim *core.PersistentVolumeClaim) error { - ensureGroupAnnotationsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, spec, patch) + r.ensureGroupAnnotationsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, spec, patch) return nil }, func(persistentVolumeClaim *core.PersistentVolumeClaim) bool { return k8sutil.IsChildResource(kind, name, namespace, persistentVolumeClaim) @@ -257,7 +258,7 @@ func ensurePvcsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Insp return nil } -func ensureServiceMonitorsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { +func (r *Resources) ensureServiceMonitorsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { i, err := cachedStatus.ServiceMonitor().V1() if err != nil { if k8sutil.IsForbiddenOrNotFound(err) { @@ -266,7 +267,7 @@ func ensureServiceMonitorsAnnotations(patch PatchFunc, cachedStatus inspectorInt return err } if err := i.Iterate(func(serviceMonitor *monitoring.ServiceMonitor) error { - ensureAnnotationsMap(serviceMonitor.Kind, serviceMonitor, spec, patch) + r.ensureAnnotationsMap(serviceMonitor.Kind, serviceMonitor, spec, patch) return nil }, func(serviceMonitor *monitoring.ServiceMonitor) bool { return k8sutil.IsChildResource(kind, name, namespace, serviceMonitor) @@ -291,10 +292,10 @@ func getObjectGroup(obj meta.Object) api.ServerGroup { return api.ServerGroupFromRole(group) } -func ensurePodsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { +func (r *Resources) ensurePodsAnnotations(patch PatchFunc, cachedStatus inspectorInterface.Inspector, kind, name, namespace string, spec api.DeploymentSpec) error { if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { - ensureGroupAnnotationsMap(pod.Kind, pod, spec, patch) + r.ensureGroupAnnotationsMap(pod.Kind, pod, spec, patch) return nil }, func(pod *core.Pod) bool { return k8sutil.IsChildResource(kind, name, namespace, pod) @@ -319,7 +320,7 @@ func getDefaultMode(annotations map[string]string) api.LabelsMode { return api.LabelsReplaceMode } -func ensureGroupLabelsMap(kind string, obj meta.Object, spec api.DeploymentSpec, +func (r *Resources) ensureGroupLabelsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd func(name string, d []byte) error) bool { group := getObjectGroup(obj) groupSpec := spec.GetServerGroupSpec(group) @@ -329,20 +330,20 @@ func ensureGroupLabelsMap(kind string, obj meta.Object, spec api.DeploymentSpec, mode := groupSpec.LabelsMode.Get(spec.LabelsMode.Get(getDefaultMode(expected))) - return ensureObjectMap(kind, obj, mode, expected, obj.GetLabels(), collection.LabelsPatch, patchCmd, ignoredList...) + return r.ensureObjectMap(kind, obj, mode, expected, obj.GetLabels(), collection.LabelsPatch, patchCmd, ignoredList...) } -func ensureLabelsMap(kind string, obj meta.Object, spec api.DeploymentSpec, +func (r *Resources) ensureLabelsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd func(name string, d []byte) error) bool { expected := spec.Labels ignored := spec.LabelsIgnoreList mode := spec.LabelsMode.Get(getDefaultMode(expected)) - return ensureObjectMap(kind, obj, mode, expected, obj.GetLabels(), collection.LabelsPatch, patchCmd, ignored...) + return r.ensureObjectMap(kind, obj, mode, expected, obj.GetLabels(), collection.LabelsPatch, patchCmd, ignored...) } -func ensureGroupAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec, +func (r *Resources) ensureGroupAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd func(name string, d []byte) error) { group := getObjectGroup(obj) groupSpec := spec.GetServerGroupSpec(group) @@ -352,39 +353,41 @@ func ensureGroupAnnotationsMap(kind string, obj meta.Object, spec api.Deployment mode := groupSpec.AnnotationsMode.Get(spec.AnnotationsMode.Get(getDefaultMode(expected))) - ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignoredList...) + r.ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignoredList...) } -func ensureAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd PatchFunc) { +func (r *Resources) ensureAnnotationsMap(kind string, obj meta.Object, spec api.DeploymentSpec, patchCmd PatchFunc) { expected := spec.Annotations ignored := spec.AnnotationsIgnoreList mode := spec.AnnotationsMode.Get(getDefaultMode(expected)) - ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignored...) + r.ensureObjectMap(kind, obj, mode, expected, obj.GetAnnotations(), collection.AnnotationsPatch, patchCmd, ignored...) } -func ensureObjectMap(kind string, obj meta.Object, mode api.LabelsMode, +func (r *Resources) ensureObjectMap(kind string, obj meta.Object, mode api.LabelsMode, expected, actual map[string]string, patchGetter func(mode api.LabelsMode, expected map[string]string, actual map[string]string, ignored ...string) patch.Patch, patchCmd PatchFunc, ignored ...string) bool { p := patchGetter(mode, expected, actual, ignored...) + log := r.log.Str("section", "annotations") + if len(p) == 0 { return false } - log.Info().Msgf("Replacing annotations for %s %s", kind, obj.GetName()) + log.Info("Replacing annotations for %s %s", kind, obj.GetName()) d, err := p.Marshal() if err != nil { - log.Warn().Err(err).Msgf("Unable to marshal kubernetes patch instruction") + log.Err(err).Warn("Unable to marshal kubernetes patch instruction") return false } if err := patchCmd(obj.GetName(), d); err != nil { - log.Warn().Err(err).Msgf("Unable to patch Pod") + log.Err(err).Warn("Unable to patch Pod") return false } diff --git a/pkg/deployment/resources/certicicates.go b/pkg/deployment/resources/certicicates.go index dcffeb141..2309cf8d7 100644 --- a/pkg/deployment/resources/certicicates.go +++ b/pkg/deployment/resources/certicicates.go @@ -31,7 +31,6 @@ import ( "github.com/arangodb-helper/go-certificates" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" ) @@ -83,7 +82,8 @@ func (c Certificates) AsCertPool() *x509.CertPool { return cp } -func GetCertsFromData(log zerolog.Logger, caPem []byte) Certificates { +func (r *Resources) GetCertsFromData(caPem []byte) Certificates { + log := r.log.Str("section", "tls") certs := make([]*x509.Certificate, 0, 2) for { @@ -97,7 +97,7 @@ func GetCertsFromData(log zerolog.Logger, caPem []byte) Certificates { cert, err := x509.ParseCertificate(pem.Bytes) if err != nil { // This error should be ignored - log.Error().Err(err).Msg("Unable to parse certificate") + log.Err(err).Error("Unable to parse certificate") continue } @@ -107,25 +107,25 @@ func GetCertsFromData(log zerolog.Logger, caPem []byte) Certificates { return certs } -func GetCertsFromSecret(log zerolog.Logger, secret *core.Secret) Certificates { +func (r *Resources) GetCertsFromSecret(secret *core.Secret) Certificates { caPem, exists := secret.Data[core.ServiceAccountRootCAKey] if !exists { return nil } - return GetCertsFromData(log, caPem) + return r.GetCertsFromData(caPem) } -func GetKeyCertFromCache(log zerolog.Logger, cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, certName, keyName string) (Certificates, interface{}, error) { +func (r *Resources) GetKeyCertFromCache(cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, certName, keyName string) (Certificates, interface{}, error) { caSecret, exists := cachedStatus.Secret().V1().GetSimple(spec.TLS.GetCASecretName()) if !exists { return nil, nil, errors.Newf("CA Secret does not exists") } - return GetKeyCertFromSecret(log, caSecret, keyName, certName) + return GetKeyCertFromSecret(caSecret, keyName, certName) } -func GetKeyCertFromSecret(log zerolog.Logger, secret *core.Secret, certName, keyName string) (Certificates, interface{}, error) { +func GetKeyCertFromSecret(secret *core.Secret, certName, keyName string) (Certificates, interface{}, error) { ca, exists := secret.Data[certName] if !exists { return nil, nil, errors.Newf("Key %s missing in secret", certName) diff --git a/pkg/deployment/resources/certificates_client_auth.go b/pkg/deployment/resources/certificates_client_auth.go index 153a69639..e5e8c51c3 100644 --- a/pkg/deployment/resources/certificates_client_auth.go +++ b/pkg/deployment/resources/certificates_client_auth.go @@ -25,7 +25,6 @@ import ( "fmt" "time" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" certificates "github.com/arangodb-helper/go-certificates" @@ -41,8 +40,8 @@ const ( // createClientAuthCACertificate creates a client authentication CA certificate and stores it in a secret with name // specified in the given spec. -func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secrets secretv1.ModInterface, spec api.SyncAuthenticationSpec, deploymentName string, ownerRef *meta.OwnerReference) error { - log = log.With().Str("secret", spec.GetClientCASecretName()).Logger() +func (r *Resources) createClientAuthCACertificate(ctx context.Context, secrets secretv1.ModInterface, spec api.SyncAuthenticationSpec, deploymentName string, ownerRef *meta.OwnerReference) error { + log := r.log.Str("section", "secrets") options := certificates.CreateCertificateOptions{ CommonName: fmt.Sprintf("%s Client Authentication Root Certificate", deploymentName), ValidFrom: time.Now(), @@ -53,17 +52,17 @@ func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secr } cert, priv, err := certificates.CreateCertificate(options, nil) if err != nil { - log.Debug().Err(err).Msg("Failed to create CA certificate") + log.Err(err).Str("name", spec.GetClientCASecretName()).Debug("Failed to create CA certificate") return errors.WithStack(err) } if err := k8sutil.CreateCASecret(ctx, secrets, spec.GetClientCASecretName(), cert, priv, ownerRef); err != nil { if k8sutil.IsAlreadyExists(err) { - log.Debug().Msg("CA Secret already exists") + log.Debug("CA Secret already exists") } else { - log.Debug().Err(err).Msg("Failed to create CA Secret") + log.Err(err).Str("name", spec.GetClientCASecretName()).Debug("Failed to create CA Secret") } return errors.WithStack(err) } - log.Debug().Msg("Created CA Secret") + log.Str("name", spec.GetClientCASecretName()).Debug("Created CA Secret") return nil } diff --git a/pkg/deployment/resources/certificates_tls.go b/pkg/deployment/resources/certificates_tls.go index 202038dee..2ce39b301 100644 --- a/pkg/deployment/resources/certificates_tls.go +++ b/pkg/deployment/resources/certificates_tls.go @@ -35,9 +35,9 @@ import ( certificates "github.com/arangodb-helper/go-certificates" api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" secretv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret/v1" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -48,9 +48,9 @@ const ( // createTLSCACertificate creates a CA certificate and stores it in a secret with name // specified in the given spec. -func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets secretv1.ModInterface, spec api.TLSSpec, +func (r *Resources) createTLSCACertificate(ctx context.Context, secrets secretv1.ModInterface, spec api.TLSSpec, deploymentName string, ownerRef *meta.OwnerReference) error { - log = log.With().Str("secret", spec.GetCASecretName()).Logger() + log := r.log.Str("section", "tls").Str("secret", spec.GetCASecretName()) options := certificates.CreateCertificateOptions{ CommonName: fmt.Sprintf("%s Root Certificate", deploymentName), @@ -61,37 +61,37 @@ func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets sec } cert, priv, err := certificates.CreateCertificate(options, nil) if err != nil { - log.Debug().Err(err).Msg("Failed to create CA certificate") + log.Err(err).Debug("Failed to create CA certificate") return errors.WithStack(err) } if err := k8sutil.CreateCASecret(ctx, secrets, spec.GetCASecretName(), cert, priv, ownerRef); err != nil { if k8sutil.IsAlreadyExists(err) { - log.Debug().Msg("CA Secret already exists") + log.Debug("CA Secret already exists") } else { - log.Debug().Err(err).Msg("Failed to create CA Secret") + log.Err(err).Debug("Failed to create CA Secret") } return errors.WithStack(err) } - log.Debug().Msg("Created CA Secret") + log.Debug("Created CA Secret") return nil } // createTLSServerCertificate creates a TLS certificate for a specific server and stores // it in a secret with the given name. -func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, names tls.KeyfileInput, spec api.TLSSpec, +func createTLSServerCertificate(ctx context.Context, log logging.Logger, cachedStatus inspectorInterface.Inspector, secrets secretv1.ModInterface, names tls.KeyfileInput, spec api.TLSSpec, secretName string, ownerRef *meta.OwnerReference) (bool, error) { - log = log.With().Str("secret", secretName).Logger() + log = log.Str("secret", secretName) // Load CA certificate ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) defer cancel() caCert, caKey, _, err := k8sutil.GetCASecret(ctxChild, cachedStatus.Secret().V1().Read(), spec.GetCASecretName(), nil) if err != nil { - log.Debug().Err(err).Msg("Failed to load CA certificate") + log.Err(err).Debug("Failed to load CA certificate") return false, errors.WithStack(err) } ca, err := certificates.LoadCAFromPEM(caCert, caKey) if err != nil { - log.Debug().Err(err).Msg("Failed to decode CA certificate") + log.Err(err).Debug("Failed to decode CA certificate") return false, errors.WithStack(err) } @@ -106,7 +106,7 @@ func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, cachedS } cert, priv, err := certificates.CreateCertificate(options, &ca) if err != nil { - log.Debug().Err(err).Msg("Failed to create server certificate") + log.Err(err).Debug("Failed to create server certificate") return false, errors.WithStack(err) } keyfile := strings.TrimSpace(cert) + "\n" + @@ -117,12 +117,12 @@ func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, cachedS }) if err != nil { if k8sutil.IsAlreadyExists(err) { - log.Debug().Msg("Server Secret already exists") + log.Debug("Server Secret already exists") } else { - log.Debug().Err(err).Msg("Failed to create server Secret") + log.Err(err).Debug("Failed to create server Secret") } return false, errors.WithStack(err) } - log.Debug().Msg("Created server Secret") + log.Debug("Created server Secret") return true, nil } diff --git a/pkg/deployment/resources/inspector/inspector.go b/pkg/deployment/resources/inspector/inspector.go index 8b3eef5a7..babe9e72b 100644 --- a/pkg/deployment/resources/inspector/inspector.go +++ b/pkg/deployment/resources/inspector/inspector.go @@ -47,7 +47,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/throttle" "github.com/arangodb/kube-arangodb/pkg/util/kclient" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" @@ -57,9 +56,15 @@ const ( DefaultVersion = "" ) +func init() { + logging.Global().RegisterLogger("inspector", logging.Info) +} + var ( inspectorLoadersList inspectorLoaders inspectorLoadersLock sync.Mutex + + logger = logging.Global().Get("inspector") ) func requireRegisterInspectorLoader(i inspectorLoader) { @@ -119,7 +124,6 @@ func NewInspector(throttles throttle.Components, client kclient.Client, namespac deploymentName: deploymentName, client: client, throttles: throttles, - logger: logging.GlobalLogger().MustGetLogger(logging.LoggerNameInspector), } return i @@ -142,8 +146,6 @@ type inspectorState struct { last time.Time - logger zerolog.Logger - pods *podsInspector secrets *secretsInspector persistentVolumeClaims *persistentVolumeClaimsInspector @@ -339,15 +341,17 @@ func (i *inspectorState) refreshInThreads(ctx context.Context, threads int, load n.versionInfo = driver.Version(strings.TrimPrefix(v.GitVersion, "v")) } + logger := logger.Str("namespace", i.namespace).Str("name", i.deploymentName) + start := time.Now() - i.logger.Debug().Msg("Pre-inspector refresh start") + logger.Debug("Pre-inspector refresh start") d, err := i.client.Arango().DatabaseV1().ArangoDeployments(i.namespace).Get(context.Background(), i.deploymentName, meta.GetOptions{}) n.deploymentResult = &inspectorStateDeploymentResult{ depl: d, err: err, } - i.logger.Debug().Msg("Inspector refresh start") + logger.Debug("Inspector refresh start") for id := range loaders { go func(id int) { @@ -358,14 +362,14 @@ func (i *inspectorState) refreshInThreads(ctx context.Context, threads int, load t := n.throttles.Get(c) if !t.Throttle() { - i.logger.Debug().Str("component", string(c)).Msg("Inspector refresh skipped") + logger.Str("component", string(c)).Debug("Inspector refresh skipped") return } - i.logger.Debug().Str("component", string(c)).Msg("Inspector refresh") + logger.Str("component", string(c)).Debug("Inspector refresh") defer func() { - i.logger.Debug().Str("component", string(c)).Str("duration", time.Since(start).String()).Msg("Inspector done") + logger.Str("component", string(c)).SinceStart("duration", start).Debug("Inspector done") t.Delay() }() @@ -380,7 +384,7 @@ func (i *inspectorState) refreshInThreads(ctx context.Context, threads int, load m.Wait() - i.logger.Debug().Str("duration", time.Since(start).String()).Msg("Inspector refresh done") + logger.SinceStart("duration", start).Debug("Inspector refresh done") for id := range loaders { if err := loaders[id].Verify(n); err != nil { @@ -478,6 +482,5 @@ func (i *inspectorState) copyCore() *inspectorState { versionInfo: i.versionInfo, endpoints: i.endpoints, deploymentResult: i.deploymentResult, - logger: i.logger, } } diff --git a/pkg/deployment/resources/labels.go b/pkg/deployment/resources/labels.go index 5e16d9a09..f2d72ead4 100644 --- a/pkg/deployment/resources/labels.go +++ b/pkg/deployment/resources/labels.go @@ -38,7 +38,9 @@ import ( ) func (r *Resources) EnsureLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { - r.log.Info().Msgf("Ensuring labels") + log := r.log.Str("section", "labels") + + log.Debug("Ensure labels") if err := r.EnsureSecretLabels(ctx, cachedStatus); err != nil { return err @@ -74,7 +76,7 @@ func (r *Resources) EnsureLabels(ctx context.Context, cachedStatus inspectorInte func (r *Resources) EnsureSecretLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false if err := cachedStatus.Secret().V1().Iterate(func(secret *core.Secret) error { - if ensureLabelsMap(secret.Kind, secret, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureLabelsMap(secret.Kind, secret, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.SecretsModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -101,7 +103,7 @@ func (r *Resources) EnsureSecretLabels(ctx context.Context, cachedStatus inspect func (r *Resources) EnsureServiceAccountsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false if err := cachedStatus.ServiceAccount().V1().Iterate(func(serviceAccount *core.ServiceAccount) error { - if ensureLabelsMap(serviceAccount.Kind, serviceAccount, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureLabelsMap(serviceAccount.Kind, serviceAccount, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.ServiceAccountsModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) return err @@ -127,7 +129,7 @@ func (r *Resources) EnsureServiceAccountsLabels(ctx context.Context, cachedStatu func (r *Resources) EnsureServicesLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false if err := cachedStatus.Service().V1().Iterate(func(service *core.Service) error { - if ensureLabelsMap(service.Kind, service, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureLabelsMap(service.Kind, service, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.ServicesModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) return err @@ -160,7 +162,7 @@ func (r *Resources) EnsureServiceMonitorsLabels(ctx context.Context, cachedStatu return err } if err := i.Iterate(func(serviceMonitor *monitoring.ServiceMonitor) error { - if ensureLabelsMap(serviceMonitor.Kind, serviceMonitor, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureLabelsMap(serviceMonitor.Kind, serviceMonitor, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.ServiceMonitorsModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) return err @@ -186,7 +188,7 @@ func (r *Resources) EnsureServiceMonitorsLabels(ctx context.Context, cachedStatu func (r *Resources) EnsurePodsLabels(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { changed := false if err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error { - if ensureGroupLabelsMap(pod.Kind, pod, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureGroupLabelsMap(pod.Kind, pod, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.PodsModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) return err @@ -213,7 +215,7 @@ func (r *Resources) EnsurePersistentVolumeClaimsLabels(ctx context.Context, cach changed := false actionFn := func(persistentVolumeClaim *core.PersistentVolumeClaim) error { - if ensureGroupLabelsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureGroupLabelsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.PersistentVolumeClaimsModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) return err @@ -245,7 +247,7 @@ func (r *Resources) EnsurePodDisruptionBudgetsLabels(ctx context.Context, cached if inspector, err := cachedStatus.PodDisruptionBudget().V1(); err == nil { if err := inspector.Iterate(func(budget *policyv1.PodDisruptionBudget) error { - if ensureLabelsMap(budget.Kind, budget, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureLabelsMap(budget.Kind, budget, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.PodDisruptionBudgetsModInterface().V1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) @@ -267,7 +269,7 @@ func (r *Resources) EnsurePodDisruptionBudgetsLabels(ctx context.Context, cached return err } if err := inspector.Iterate(func(budget *policyv1beta1.PodDisruptionBudget) error { - if ensureLabelsMap(budget.Kind, budget, r.context.GetSpec(), func(name string, d []byte) error { + if r.ensureLabelsMap(budget.Kind, budget, r.context.GetSpec(), func(name string, d []byte) error { return globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { _, err := cachedStatus.PodDisruptionBudgetsModInterface().V1Beta1().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{}) diff --git a/pkg/deployment/resources/logger.go b/pkg/deployment/resources/logger.go new file mode 100644 index 000000000..ae70cea47 --- /dev/null +++ b/pkg/deployment/resources/logger.go @@ -0,0 +1,34 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package resources + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" + "github.com/rs/zerolog" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("deployment-resources", logging.Info) +) + +func (d *Resources) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", d.namespace).Str("name", d.name) +} diff --git a/pkg/deployment/resources/member_cleanup.go b/pkg/deployment/resources/member_cleanup.go index 919751942..1d7de3c7f 100644 --- a/pkg/deployment/resources/member_cleanup.go +++ b/pkg/deployment/resources/member_cleanup.go @@ -48,8 +48,10 @@ var ( // SyncMembersInCluster sets proper condition for all arangod members that belongs to the deployment. func (r *Resources) SyncMembersInCluster(ctx context.Context, health memberState.Health) error { + log := r.log.Str("section", "members") + if health.Error != nil { - r.log.Info().Err(health.Error).Msg("Health of the cluster is missing") + log.Err(health.Error).Info("Health of the cluster is missing") return nil } @@ -71,8 +73,7 @@ func (r *Resources) SyncMembersInCluster(ctx context.Context, health memberState // syncMembersInCluster sets proper condition for all arangod members that are part of the cluster. func (r *Resources) syncMembersInCluster(ctx context.Context, health memberState.Health) error { - log := r.log - + log := r.log.Str("section", "members") serverFound := func(id string) bool { _, found := health.Members[driver.ServerID(id)] return found @@ -87,35 +88,35 @@ func (r *Resources) syncMembersInCluster(ctx context.Context, health memberState return nil } for _, m := range list { - log := log.With().Str("member", m.ID).Str("role", group.AsRole()).Logger() + log := log.Str("member", m.ID).Str("role", group.AsRole()) if serverFound(m.ID) { // Member is (still) found, skip it if m.Conditions.Update(api.ConditionTypeMemberOfCluster, true, "", "") { if err := status.Members.Update(m, group); err != nil { - log.Warn().Err(err).Msg("Failed to update member") + log.Err(err).Warn("Failed to update member") } updateStatusNeeded = true - log.Debug().Msg("Updating MemberOfCluster condition to true") + log.Debug("Updating MemberOfCluster condition to true") } continue } else if !m.Conditions.IsTrue(api.ConditionTypeMemberOfCluster) { if m.Age() < minMemberAge { - log.Debug().Dur("age", m.Age()).Msg("Member is not yet recorded as member of cluster") + log.Dur("age", m.Age()).Debug("Member is not yet recorded as member of cluster") continue } - log.Warn().Msg("Member can not be found in cluster") + log.Warn("Member can not be found in cluster") } else { - log.Info().Msg("Member is no longer part of the ArangoDB cluster") + log.Info("Member is no longer part of the ArangoDB cluster") } } return nil }) if updateStatusNeeded { - log.Debug().Msg("UpdateStatus needed") + log.Debug("UpdateStatus needed") if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil { - log.Warn().Err(err).Msg("Failed to update deployment status") + log.Err(err).Warn("Failed to update deployment status") return errors.WithStack(err) } } diff --git a/pkg/deployment/resources/pdbs.go b/pkg/deployment/resources/pdbs.go index 6d0ad3b67..c732779e8 100644 --- a/pkg/deployment/resources/pdbs.go +++ b/pkg/deployment/resources/pdbs.go @@ -124,7 +124,7 @@ func newPDBV1(minAvail int, deplname string, group api.ServerGroup, owner meta.O func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup, wantedMinAvail int) error { deplName := r.context.GetAPIObject().GetName() pdbName := PDBNameForGroup(deplName, group) - log := r.log.With().Str("group", group.AsRole()).Logger() + log := r.log.Str("section", "pdb").Str("group", group.AsRole()) pdbMod := r.context.ACS().CurrentClusterCache().PodDisruptionBudgetsModInterface() for { @@ -158,7 +158,7 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup if k8sutil.IsNotFound(err) { if wantedMinAvail != 0 { // No PDB found - create new. - log.Debug().Msg("Creating new PDB") + log.Debug("Creating new PDB") err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { var errInternal error @@ -174,7 +174,7 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup }) if err != nil { - log.Error().Err(err).Msg("failed to create PDB") + log.Err(err).Error("failed to create PDB") return errors.WithStack(err) } } @@ -191,9 +191,9 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup } // Update for PDBs is forbidden, thus one has to delete it and then create it again // Otherwise delete it if wantedMinAvail is zero - log.Debug().Int("wanted-min-avail", wantedMinAvail). + log.Int("wanted-min-avail", wantedMinAvail). Int("current-min-avail", minAvailable.IntValue()). - Msg("Recreating PDB") + Debug("Recreating PDB") // Trigger deletion only if not already deleted. if deletionTimestamp == nil { @@ -206,18 +206,18 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup return pdbMod.V1Beta1().Delete(ctxChild, pdbName, meta.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Error().Err(err).Msg("PDB deletion failed") + log.Err(err).Error("PDB deletion failed") return errors.WithStack(err) } } else { - log.Debug().Msg("PDB already deleted") + log.Debug("PDB already deleted") } // Exit here if deletion was intended if wantedMinAvail == 0 { return nil } - log.Debug().Msg("Retry loop for PDB") + log.Debug("Retry loop for PDB") select { case <-ctx.Done(): return ctx.Err() diff --git a/pkg/deployment/resources/pod_cleanup.go b/pkg/deployment/resources/pod_cleanup.go index b057415b0..c01f07a2f 100644 --- a/pkg/deployment/resources/pod_cleanup.go +++ b/pkg/deployment/resources/pod_cleanup.go @@ -44,7 +44,7 @@ const ( // CleanupTerminatedPods removes all pods in Terminated state that belong to a member in Created state. // Returns: Interval_till_next_inspection, error func (r *Resources) CleanupTerminatedPods(ctx context.Context) (util.Interval, error) { - log := r.log + log := r.log.Str("section", "pod") nextInterval := maxPodInspectorInterval // Large by default, will be made smaller if needed in the rest of the function // Update member status from all pods found @@ -59,7 +59,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context) (util.Interval, e // Find member status memberStatus, group, found := status.Members.MemberStatusByPodName(pod.GetName()) if !found { - log.Debug().Str("pod", pod.GetName()).Msg("no memberstatus found for pod. Performing cleanup") + log.Str("pod", pod.GetName()).Debug("no memberstatus found for pod. Performing cleanup") } else { spec := r.context.GetSpec() coreContainers := spec.GetCoreContainers(group) @@ -73,7 +73,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context) (util.Interval, e if !memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) { if !group.IsStateless() { // For statefull members, we have to wait for confirmed termination - log.Debug().Str("pod", pod.GetName()).Msg("Cannot cleanup pod yet, waiting for it to reach terminated state") + log.Str("pod", pod.GetName()).Debug("Cannot cleanup pod yet, waiting for it to reach terminated state") nextInterval = nextInterval.ReduceTo(recheckStatefullPodCleanupInterval) return nil } else { @@ -89,7 +89,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context) (util.Interval, e } // Ok, we can delete the pod - log.Debug().Str("pod-name", pod.GetName()).Msg("Cleanup terminated pod") + log.Str("pod-name", pod.GetName()).Debug("Cleanup terminated pod") options := meta.NewDeleteOptions(0) options.Preconditions = meta.NewUIDPreconditions(string(pod.GetUID())) @@ -97,7 +97,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context) (util.Interval, e return item.Cache().Client().Kubernetes().CoreV1().Pods(item.Cache().Namespace()).Delete(ctxChild, pod.GetName(), *options) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Debug().Err(err).Str("pod", pod.GetName()).Msg("Failed to cleanup pod") + log.Err(err).Str("pod", pod.GetName()).Debug("Failed to cleanup pod") return errors.WithStack(err) } diff --git a/pkg/deployment/resources/pod_creator.go b/pkg/deployment/resources/pod_creator.go index 948e685c2..d77cb6741 100644 --- a/pkg/deployment/resources/pod_creator.go +++ b/pkg/deployment/resources/pod_creator.go @@ -328,7 +328,7 @@ func (r *Resources) RenderPodForMemberFromCurrent(ctx context.Context, acs sutil } func (r *Resources) RenderPodForMember(ctx context.Context, acs sutil.ACS, spec api.DeploymentSpec, status api.DeploymentStatus, memberID string, imageInfo api.ImageInfo) (*core.Pod, error) { - log := r.log + log := r.log.Str("section", "member") apiObject := r.context.GetAPIObject() m, group, found := status.Members.ElementByID(memberID) if !found { @@ -383,7 +383,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, acs sutil.ACS, spec } else if group.IsArangosync() { // Check image if !imageInfo.Enterprise { - log.Debug().Str("image", spec.GetImage()).Msg("Image is not an enterprise image") + log.Str("image", spec.GetImage()).Debug("Image is not an enterprise image") return nil, errors.WithStack(errors.Newf("Image '%s' does not contain an Enterprise version of ArangoDB", spec.GetImage())) } // Check if the sync image is overwritten by the SyncSpec @@ -448,14 +448,14 @@ func (r *Resources) SelectImageForMember(spec api.DeploymentSpec, status api.Dep // createPodForMember creates all Pods listed in member status func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, arangoMember *api.ArangoMember, memberID string, imageNotFoundOnce *sync.Once) error { - log := r.log + log := r.log.Str("section", "member") status, lastVersion := r.context.GetStatus() // Select image imageInfo, imageFound := r.SelectImage(spec, status) if !imageFound { imageNotFoundOnce.Do(func() { - log.Debug().Str("image", spec.GetImage()).Msg("Image ID is not known yet for image") + log.Str("image", spec.GetImage()).Debug("Image ID is not known yet for image") }) return nil } @@ -517,11 +517,11 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect // reset old sidecar values to nil m.SideCarSpecs = nil - log.Debug().Str("pod-name", m.PodName).Msg("Created pod") + log.Str("pod-name", m.PodName).Debug("Created pod") if m.Image == nil { - log.Debug().Str("pod-name", m.PodName).Msg("Created pod with default image") + log.Str("pod-name", m.PodName).Debug("Created pod with default image") } else { - log.Debug().Str("pod-name", m.PodName).Msg("Created pod with predefined image") + log.Str("pod-name", m.PodName).Debug("Created pod with predefined image") } } else if group.IsArangosync() { // Check monitoring token secret @@ -547,7 +547,7 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect if err != nil { return errors.WithStack(err) } - log.Debug().Str("pod-name", m.PodName).Msg("Created pod") + log.Str("pod-name", m.PodName).Debug("Created pod") m.PodName = podName m.PodUID = uid @@ -568,7 +568,7 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect } } - r.log.Info().Str("pod", m.PodName).Msgf("Updating member") + log.Str("pod", m.PodName).Info("Updating member") if err := status.Members.Update(m, group); err != nil { return errors.WithStack(err) } @@ -688,6 +688,8 @@ func (r *Resources) EnsurePods(ctx context.Context, cachedStatus inspectorInterf imageNotFoundOnce := &sync.Once{} changed := false + log := r.log.Str("section", "member") + if err := iterator.ForeachServerGroup(func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error { for _, m := range *status { if m.Phase != api.MemberPhasePending { @@ -701,16 +703,16 @@ func (r *Resources) EnsurePods(ctx context.Context, cachedStatus inspectorInterf } if member.Status.Template == nil { - r.log.Warn().Msgf("Missing Template") + log.Warn("Missing Template") // Template is missing, nothing to do continue } - r.log.Warn().Msgf("Ensuring pod") + log.Warn("Ensuring pod") spec := r.context.GetSpec() if err := r.createPodForMember(ctx, cachedStatus, spec, member, m.ID, imageNotFoundOnce); err != nil { - r.log.Warn().Err(err).Msgf("Ensuring pod failed") + log.Err(err).Warn("Ensuring pod failed") return errors.WithStack(err) } diff --git a/pkg/deployment/resources/pod_finalizers.go b/pkg/deployment/resources/pod_finalizers.go index 182cf4565..c0d6f5f2c 100644 --- a/pkg/deployment/resources/pod_finalizers.go +++ b/pkg/deployment/resources/pod_finalizers.go @@ -28,7 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" v1 "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -47,7 +46,7 @@ const ( // runPodFinalizers goes through the list of pod finalizers to see if they can be removed. // Returns: Interval_till_next_inspection, error func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) (util.Interval, error) { - log := r.log.With().Str("pod-name", p.GetName()).Logger() + log := r.log.Str("section", "pod").Str("pod-name", p.GetName()) var removalList []string // When the main container is terminated, then the whole pod should be terminated, @@ -58,38 +57,38 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu for _, f := range p.ObjectMeta.GetFinalizers() { switch f { case constants.FinalizerPodAgencyServing: - log.Debug().Msg("Inspecting agency-serving finalizer") + log.Debug("Inspecting agency-serving finalizer") if isServerContainerDead { - log.Debug().Msg("Server Container is dead, removing finalizer") + log.Debug("Server Container is dead, removing finalizer") removalList = append(removalList, f) break } - if err := r.inspectFinalizerPodAgencyServing(ctx, log, p, memberStatus, updateMember); err == nil { + if err := r.inspectFinalizerPodAgencyServing(ctx, p, memberStatus, updateMember); err == nil { removalList = append(removalList, f) } else { - log.Debug().Err(err).Str("finalizer", f).Msg("Cannot remove finalizer yet") + log.Err(err).Str("finalizer", f).Debug("Cannot remove finalizer yet") } case constants.FinalizerPodDrainDBServer: - log.Debug().Msg("Inspecting drain dbserver finalizer") + log.Debug("Inspecting drain dbserver finalizer") if isServerContainerDead { - log.Debug().Msg("Server Container is dead, removing finalizer") + log.Debug("Server Container is dead, removing finalizer") removalList = append(removalList, f) break } - if err := r.inspectFinalizerPodDrainDBServer(ctx, log, p, memberStatus, updateMember); err == nil { + if err := r.inspectFinalizerPodDrainDBServer(ctx, p, memberStatus, updateMember); err == nil { removalList = append(removalList, f) } else { - log.Debug().Err(err).Str("finalizer", f).Msg("Cannot remove Pod finalizer yet") + log.Err(err).Str("finalizer", f).Debug("Cannot remove Pod finalizer yet") } case constants.FinalizerPodGracefulShutdown: // We are in graceful shutdown, only one way to remove it is when container is already dead if isServerContainerDead { - log.Debug().Msg("Server Container is dead, removing finalizer") + log.Debug("Server Container is dead, removing finalizer") removalList = append(removalList, f) } case constants.FinalizerDelayPodTermination: if isServerContainerDead { - log.Debug().Msg("Server Container is dead, removing finalizer") + log.Debug("Server Container is dead, removing finalizer") removalList = append(removalList, f) break } @@ -99,14 +98,14 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu if !ok { continue } - log.Error().Str("finalizer", f).Msg("Delay finalizer") + log.Str("finalizer", f).Error("Delay finalizer") groupSpec := r.context.GetSpec().GetServerGroupSpec(group) if t := p.ObjectMeta.DeletionTimestamp; t != nil { d := time.Duration(groupSpec.GetShutdownDelay(group)) * time.Second gr := time.Duration(util.Int64OrDefault(p.ObjectMeta.GetDeletionGracePeriodSeconds(), 0)) * time.Second e := t.Time.Add(-1 * gr).Sub(time.Now().Add(-1 * d)) - log.Error().Str("finalizer", f).Str("left", e.String()).Msg("Delay finalizer status") + log.Str("finalizer", f).Str("left", e.String()).Error("Delay finalizer status") if e < 0 || d == 0 { removalList = append(removalList, f) } @@ -117,11 +116,11 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu } // Remove finalizers (if needed) if len(removalList) > 0 { - if _, err := k8sutil.RemovePodFinalizers(ctx, r.context.ACS().CurrentClusterCache(), log, r.context.ACS().CurrentClusterCache().PodsModInterface().V1(), p, removalList, false); err != nil { - log.Debug().Err(err).Msg("Failed to update pod (to remove finalizers)") + if _, err := k8sutil.RemovePodFinalizers(ctx, r.context.ACS().CurrentClusterCache(), r.context.ACS().CurrentClusterCache().PodsModInterface().V1(), p, removalList, false); err != nil { + log.Err(err).Debug("Failed to update pod (to remove finalizers)") return 0, errors.WithStack(err) } - log.Debug().Strs("finalizers", removalList).Msg("Removed finalizer(s) from Pod") + log.Strs("finalizers", removalList...).Debug("Removed finalizer(s) from Pod") // Let's do the next inspection quickly, since things may have changed now. return podFinalizerRemovedInterval, nil } @@ -131,8 +130,9 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu // inspectFinalizerPodAgencyServing checks the finalizer condition for agency-serving. // It returns nil if the finalizer can be removed. -func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, log zerolog.Logger, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { - if err := r.prepareAgencyPodTermination(ctx, log, p, memberStatus, func(update api.MemberStatus) error { +func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { + log := r.log.Str("section", "agency") + if err := r.prepareAgencyPodTermination(ctx, p, memberStatus, func(update api.MemberStatus) error { if err := updateMember(update); err != nil { return errors.WithStack(err) } @@ -150,10 +150,10 @@ func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, log ze return r.context.ACS().CurrentClusterCache().PersistentVolumeClaimsModInterface().V1().Delete(ctxChild, memberStatus.PersistentVolumeClaimName, meta.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Warn().Err(err).Msg("Failed to delete PVC for member") + log.Err(err).Warn("Failed to delete PVC for member") return errors.WithStack(err) } - log.Debug().Str("pvc-name", memberStatus.PersistentVolumeClaimName).Msg("Removed PVC of member so agency can be completely replaced") + log.Str("pvc-name", memberStatus.PersistentVolumeClaimName).Debug("Removed PVC of member so agency can be completely replaced") } return nil @@ -161,8 +161,9 @@ func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, log ze // inspectFinalizerPodDrainDBServer checks the finalizer condition for drain-dbserver. // It returns nil if the finalizer can be removed. -func (r *Resources) inspectFinalizerPodDrainDBServer(ctx context.Context, log zerolog.Logger, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { - if err := r.prepareDBServerPodTermination(ctx, log, p, memberStatus, func(update api.MemberStatus) error { +func (r *Resources) inspectFinalizerPodDrainDBServer(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { + log := r.log.Str("section", "pod") + if err := r.prepareDBServerPodTermination(ctx, p, memberStatus, func(update api.MemberStatus) error { if err := updateMember(update); err != nil { return errors.WithStack(err) } @@ -179,10 +180,10 @@ func (r *Resources) inspectFinalizerPodDrainDBServer(ctx context.Context, log ze return r.context.ACS().CurrentClusterCache().PersistentVolumeClaimsModInterface().V1().Delete(ctxChild, memberStatus.PersistentVolumeClaimName, meta.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Warn().Err(err).Msg("Failed to delete PVC for member") + log.Err(err).Warn("Failed to delete PVC for member") return errors.WithStack(err) } - log.Debug().Str("pvc-name", memberStatus.PersistentVolumeClaimName).Msg("Removed PVC of member") + log.Str("pvc-name", memberStatus.PersistentVolumeClaimName).Debug("Removed PVC of member") } return nil diff --git a/pkg/deployment/resources/pod_inspector.go b/pkg/deployment/resources/pod_inspector.go index 74601a052..3dbdcee55 100644 --- a/pkg/deployment/resources/pod_inspector.go +++ b/pkg/deployment/resources/pod_inspector.go @@ -67,11 +67,11 @@ func (r *Resources) handleRestartedPod(pod *core.Pod, memberStatus *api.MemberSt previousTermination := containerStatus.LastTerminationState.Terminated allowedRestartPeriod := time.Now().Add(terminationRestartPeriod) if previousTermination != nil && !previousTermination.FinishedAt.Time.Before(allowedRestartPeriod) { - r.log.Debug().Str("pod-name", pod.GetName()).Msg("pod is continuously restarting - we will terminate it") + r.log.Str("pod-name", pod.GetName()).Debug("pod is continuously restarting - we will terminate it") *markAsTerminated = true } else { *markAsTerminated = false - r.log.Debug().Str("pod-name", pod.GetName()).Msg("pod is restarting - we are not marking it as terminated yet..") + r.log.Str("pod-name", pod.GetName()).Debug("pod is restarting - we are not marking it as terminated yet..") } } } @@ -80,7 +80,7 @@ func (r *Resources) handleRestartedPod(pod *core.Pod, memberStatus *api.MemberSt // the member status of the deployment accordingly. // Returns: Interval_till_next_inspection, error func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInterface.Inspector) (util.Interval, error) { - log := r.log + log := r.log.Str("section", "pod") start := time.Now() apiObject := r.context.GetAPIObject() deploymentName := apiObject.GetName() @@ -103,14 +103,14 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter memberStatus, group, found := status.Members.MemberStatusByPodName(pod.GetName()) if !found { - log.Warn().Str("pod", pod.GetName()).Strs("existing-pods", status.Members.PodNames()).Msg("no memberstatus found for pod") + log.Str("pod", pod.GetName()).Strs("existing-pods", status.Members.PodNames()...).Warn("no memberstatus found for pod") if k8sutil.IsPodMarkedForDeletion(pod) && len(pod.GetFinalizers()) > 0 { // Strange, pod belongs to us, but we have no member for it. // Remove all finalizers, so it can be removed. - log.Warn().Msg("Pod belongs to this deployment, but we don't know the member. Removing all finalizers") - _, err := k8sutil.RemovePodFinalizers(ctx, r.context.ACS().CurrentClusterCache(), log, cachedStatus.PodsModInterface().V1(), pod, pod.GetFinalizers(), false) + log.Warn("Pod belongs to this deployment, but we don't know the member. Removing all finalizers") + _, err := k8sutil.RemovePodFinalizers(ctx, r.context.ACS().CurrentClusterCache(), cachedStatus.PodsModInterface().V1(), pod, pod.GetFinalizers(), false) if err != nil { - log.Debug().Err(err).Msg("Failed to update pod (to remove all finalizers)") + log.Err(err).Debug("Failed to update pod (to remove all finalizers)") return errors.WithStack(err) } } @@ -132,7 +132,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter } if markAsTerminated && memberStatus.Conditions.Update(api.ConditionTypeTerminated, true, "Pod Succeeded", "") { - log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Terminated to true: Pod Succeeded") + log.Str("pod-name", pod.GetName()).Debug("Updating member condition Terminated to true: Pod Succeeded") updateMemberStatusNeeded = true nextInterval = nextInterval.ReduceTo(recheckSoonPodInspectorInterval) @@ -168,7 +168,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.InitContainerStatuses, container); ok { if t := c.State.Terminated; t != nil && t.ExitCode != 0 { - log.Warn().Str("member", memberStatus.ID). + log.Str("member", memberStatus.ID). Str("pod", pod.GetName()). Str("container", container). Str("uid", string(pod.GetUID())). @@ -178,7 +178,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter Int32("signal", t.Signal). Time("started", t.StartedAt.Time). Time("finished", t.FinishedAt.Time). - Msgf("Pod failed in unexpected way: Init Container failed") + Warn("Pod failed in unexpected way: Init Container failed") } } } @@ -188,7 +188,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter for _, container := range containers { if c, ok := k8sutil.GetAnyContainerStatusByName(pod.Status.ContainerStatuses, container); ok { if t := c.State.Terminated; t != nil && t.ExitCode != 0 { - log.Warn().Str("member", memberStatus.ID). + log.Str("member", memberStatus.ID). Str("pod", pod.GetName()). Str("container", container). Str("uid", string(pod.GetUID())). @@ -198,13 +198,13 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter Int32("signal", t.Signal). Time("started", t.StartedAt.Time). Time("finished", t.FinishedAt.Time). - Msgf("Pod failed in unexpected way: Core Container failed") + Warn("Pod failed in unexpected way: Core Container failed") } } } } - log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Terminated to true: Pod Failed") + log.Str("pod-name", pod.GetName()).Debug("Updating member condition Terminated to true: Pod Failed") updateMemberStatusNeeded = true nextInterval = nextInterval.ReduceTo(recheckSoonPodInspectorInterval) @@ -222,7 +222,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter l := addLabel(pod.Labels, k8sutil.LabelKeyArangoScheduled, "1") if err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), l)); err != nil { - log.Error().Err(err).Msgf("Unable to update scheduled labels") + log.Err(err).Error("Unable to update scheduled labels") } } } @@ -237,7 +237,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter l = addLabel(l, k8sutil.LabelKeyArangoZone, tz) if err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), l)); err != nil { - log.Error().Err(err).Msgf("Unable to update topology labels") + log.Err(err).Error("Unable to update topology labels") } } } else { @@ -246,7 +246,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter l = removeLabel(l, k8sutil.LabelKeyArangoZone) if err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), l)); err != nil { - log.Error().Err(err).Msgf("Unable to remove topology labels") + log.Err(err).Error("Unable to remove topology labels") } } } @@ -272,7 +272,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter if anyOf(memberStatus.Conditions.Update(api.ConditionTypeReady, true, "Pod Ready", ""), memberStatus.Conditions.Update(api.ConditionTypeStarted, true, "Pod Started", ""), memberStatus.Conditions.Update(api.ConditionTypeServing, true, "Pod Serving", "")) { - log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Ready, Started & Serving to true") + log.Str("pod-name", pod.GetName()).Debug("Updating member condition Ready, Started & Serving to true") if status.Topology.IsTopologyOwned(memberStatus.Topology) { nodes, err := cachedStatus.Node().V1() @@ -295,7 +295,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter // Pod is not ready, but core containers are fine if anyOf(memberStatus.Conditions.Update(api.ConditionTypeReady, false, "Pod Not Ready", ""), memberStatus.Conditions.Update(api.ConditionTypeServing, true, "Pod is still serving", "")) { - log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Ready to false, while all core containers are ready") + log.Str("pod-name", pod.GetName()).Debug("Updating member condition Ready to false, while all core containers are ready") updateMemberStatusNeeded = true nextInterval = nextInterval.ReduceTo(recheckSoonPodInspectorInterval) } @@ -303,7 +303,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter // Pod is not ready if anyOf(memberStatus.Conditions.Update(api.ConditionTypeReady, false, "Pod Not Ready", ""), memberStatus.Conditions.Update(api.ConditionTypeServing, false, "Pod Core containers are not ready", strings.Join(coreContainers, ", "))) { - log.Debug().Str("pod-name", pod.GetName()).Msg("Updating member condition Ready & Serving to false") + log.Str("pod-name", pod.GetName()).Debug("Updating member condition Ready & Serving to false") updateMemberStatusNeeded = true nextInterval = nextInterval.ReduceTo(recheckSoonPodInspectorInterval) } @@ -311,7 +311,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter if k8sutil.IsPodNotScheduledFor(pod, podScheduleTimeout) { // Pod cannot be scheduled for to long - log.Debug().Str("pod-name", pod.GetName()).Msg("Pod scheduling timeout") + log.Str("pod-name", pod.GetName()).Debug("Pod scheduling timeout") podNamesWithScheduleTimeout = append(podNamesWithScheduleTimeout, pod.GetName()) } else if !k8sutil.IsPodScheduled(pod) { unscheduledPodNames = append(unscheduledPodNames, pod.GetName()) @@ -320,7 +320,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter if k8sutil.IsPodMarkedForDeletion(pod) { if memberStatus.Conditions.Update(api.ConditionTypeTerminating, true, "Pod marked for deletion", "") { updateMemberStatusNeeded = true - log.Debug().Str("pod-name", pod.GetName()).Msg("Pod marked as terminating") + log.Str("pod-name", pod.GetName()).Debug("Pod marked as terminating") } // Process finalizers if x, err := r.runPodFinalizers(ctx, pod, memberStatus, func(m api.MemberStatus) error { @@ -329,7 +329,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter return nil }); err != nil { // Only log here, since we'll be called to try again. - log.Warn().Err(err).Msg("Failed to run pod finalizers") + log.Err(err).Warn("Failed to run pod finalizers") } else { nextInterval = nextInterval.ReduceTo(x) } @@ -352,11 +352,11 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter for _, m := range members { if podName := m.PodName; podName != "" { if _, exists := cachedStatus.Pod().V1().GetSimple(podName); !exists { - log.Debug().Str("pod-name", podName).Msg("Does not exist") + log.Str("pod-name", podName).Debug("Does not exist") switch m.Phase { case api.MemberPhaseNone, api.MemberPhasePending: // Do nothing - log.Debug().Str("pod-name", podName).Msg("PodPhase is None, waiting for the pod to be recreated") + log.Str("pod-name", podName).Debug("PodPhase is None, waiting for the pod to be recreated") case api.MemberPhaseShuttingDown, api.MemberPhaseUpgrading, api.MemberPhaseFailed, api.MemberPhaseRotateStart, api.MemberPhaseRotating: // Shutdown was intended, so not need to do anything here. // Just mark terminated @@ -373,7 +373,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter } } default: - log.Debug().Str("pod-name", podName).Msg("Pod is gone") + log.Str("pod-name", podName).Debug("Pod is gone") m.Phase = api.MemberPhaseNone // This is trigger a recreate of the pod. // Create event nextInterval = nextInterval.ReduceTo(recheckSoonPodInspectorInterval) diff --git a/pkg/deployment/resources/pod_leader.go b/pkg/deployment/resources/pod_leader.go index 323e4ac78..53c9a96dc 100644 --- a/pkg/deployment/resources/pod_leader.go +++ b/pkg/deployment/resources/pod_leader.go @@ -49,6 +49,8 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte return nil } + log := r.log.Str("section", "pod") + cache, ok := r.context.GetAgencyHealth() if !ok { return nil @@ -77,11 +79,11 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), labels)) if err != nil { - r.log.Error().Err(err).Msgf("Unable to remove leader label") + log.Err(err).Error("Unable to remove leader label") return err } - r.log.Info().Msgf("leader label is removed from \"%s\" member", m.ID) + log.Warn("leader label is removed from \"%s\" member", m.ID) changed = true } @@ -100,10 +102,10 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), labels)) if err != nil { - r.log.Error().Err(err).Msgf("Unable to update leader label") + log.Err(err).Error("Unable to update leader label") return err } - r.log.Info().Msgf("leader label is set on \"%s\" member", m.ID) + log.Warn("leader label is set on \"%s\" member", m.ID) changed = true } diff --git a/pkg/deployment/resources/pod_termination.go b/pkg/deployment/resources/pod_termination.go index 6825ffccc..148262cf2 100644 --- a/pkg/deployment/resources/pod_termination.go +++ b/pkg/deployment/resources/pod_termination.go @@ -28,7 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" meta "k8s.io/apimachinery/pkg/apis/meta/v1" driver "github.com/arangodb/go-driver" @@ -43,16 +42,18 @@ import ( // prepareAgencyPodTermination checks if the given agency pod is allowed to terminate // and if so, prepares it for termination. // It returns nil if the pod is allowed to terminate, an error otherwise. -func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog.Logger, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { +func (r *Resources) prepareAgencyPodTermination(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { + log := r.log.Str("section", "pod") + // Inspect member phase if memberStatus.Phase.IsFailed() { - log.Debug().Msg("Pod is already failed, safe to remove agency serving finalizer") + log.Debug("Pod is already failed, safe to remove agency serving finalizer") return nil } // Inspect deployment deletion state apiObject := r.context.GetAPIObject() if apiObject.GetDeletionTimestamp() != nil { - log.Debug().Msg("Entire deployment is being deleted, safe to remove agency serving finalizer") + log.Debug("Entire deployment is being deleted, safe to remove agency serving finalizer") return nil } @@ -62,7 +63,7 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog if !r.context.GetScope().IsNamespaced() && p.Spec.NodeName != "" { node, ok := nodes.GetSimple(p.Spec.NodeName) if !ok { - log.Warn().Msg("Node not found") + log.Warn("Node not found") } else if node.Spec.Unschedulable { agentDataWillBeGone = true } @@ -74,7 +75,7 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog defer cancel() pvc, err := r.context.ACS().CurrentClusterCache().PersistentVolumeClaim().V1().Read().Get(ctxChild, memberStatus.PersistentVolumeClaimName, meta.GetOptions{}) if err != nil { - log.Warn().Err(err).Msg("Failed to get PVC for member") + log.Err(err).Warn("Failed to get PVC for member") return errors.WithStack(err) } if k8sutil.IsPersistentVolumeClaimMarkedForDeletion(pvc) { @@ -83,27 +84,27 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog // Is this a simple pod restart? if !agentDataWillBeGone { - log.Debug().Msg("Pod is just being restarted, safe to terminate agency pod") + log.Debug("Pod is just being restarted, safe to terminate agency pod") return nil } // Inspect agency state - log.Debug().Msg("Agent data will be gone, so we will check agency serving status first") + log.Debug("Agent data will be gone, so we will check agency serving status first") ctxChild, cancel = context.WithTimeout(ctx, time.Second*15) defer cancel() ctxLeader := agency.WithAllowNoLeader(ctxChild) // The ID we're checking may be the leader, so ignore situations where all other agents are followers agencyConns, err := r.context.GetAgencyClientsWithPredicate(ctxLeader, func(id string) bool { return id != memberStatus.ID }) if err != nil { - log.Debug().Err(err).Msg("Failed to create member client") + log.Err(err).Debug("Failed to create member client") return errors.WithStack(err) } if len(agencyConns) == 0 { - log.Debug().Err(err).Msg("No more remaining agents, we cannot delete this one") + log.Err(err).Debug("No more remaining agents, we cannot delete this one") return errors.WithStack(errors.Newf("No more remaining agents")) } if err := agency.AreAgentsHealthy(ctxLeader, agencyConns); err != nil { - log.Debug().Err(err).Msg("Remaining agents are not healthy") + log.Err(err).Debug("Remaining agents are not healthy") return errors.WithStack(err) } @@ -113,7 +114,7 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog return errors.WithStack(err) } } - log.Debug().Msg("Agent is ready to be completely recovered.") + log.Debug("Agent is ready to be completely recovered.") return nil } @@ -121,23 +122,25 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog // prepareDBServerPodTermination checks if the given dbserver pod is allowed to terminate // and if so, prepares it for termination. // It returns nil if the pod is allowed to terminate, an error otherwise. -func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerolog.Logger, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { +func (r *Resources) prepareDBServerPodTermination(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error { + log := r.log.Str("section", "pod") + // Inspect member phase if memberStatus.Phase.IsFailed() { - log.Debug().Msg("Pod is already failed, safe to remove dbserver pod") + log.Debug("Pod is already failed, safe to remove dbserver pod") return nil } // If pod is not member of cluster, do nothing if !memberStatus.Conditions.IsTrue(api.ConditionTypeMemberOfCluster) { - log.Debug().Msg("Pod is not member of cluster") + log.Debug("Pod is not member of cluster") return nil } // Inspect deployment deletion state apiObject := r.context.GetAPIObject() if apiObject.GetDeletionTimestamp() != nil { - log.Debug().Msg("Entire deployment is being deleted, safe to remove dbserver pod") + log.Debug("Entire deployment is being deleted, safe to remove dbserver pod") return nil } @@ -146,7 +149,7 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol if nodes, err := r.context.ACS().CurrentClusterCache().Node().V1(); err == nil { node, ok := nodes.GetSimple(p.Spec.NodeName) if !ok { - log.Warn().Msg("Node not found") + log.Warn("Node not found") } else if node.Spec.Unschedulable { if !r.context.GetSpec().IsNetworkAttachedVolumes() { dbserverDataWillBeGone = true @@ -159,7 +162,7 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol defer cancel() pvc, err := r.context.ACS().CurrentClusterCache().PersistentVolumeClaim().V1().Read().Get(ctxChild, memberStatus.PersistentVolumeClaimName, meta.GetOptions{}) if err != nil { - log.Warn().Err(err).Msg("Failed to get PVC for member") + log.Err(err).Warn("Failed to get PVC for member") return errors.WithStack(err) } if k8sutil.IsPersistentVolumeClaimMarkedForDeletion(pvc) { @@ -176,14 +179,14 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol defer cancel() c, err := r.context.GetDatabaseClient(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create member client") + log.Err(err).Debug("Failed to create member client") return errors.WithStack(err) } ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() cluster, err := c.Cluster(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to access cluster") + log.Err(err).Debug("Failed to access cluster") if r.context.GetSpec().Recovery.Get().GetAutoRecover() { if c, ok := k8sutil.GetContainerStatusByName(p, shared.ServerContainerName); ok { @@ -207,12 +210,12 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol return errors.WithStack(err) } } - log.Debug().Msg("DBServer is cleaned out.") + log.Debug("DBServer is cleaned out.") return nil } // Not cleaned out yet, check member status if memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) { - log.Warn().Msg("Member is already terminated before it could resign or be cleaned out. Not good, but removing dbserver pod because we cannot do anything further") + log.Warn("Member is already terminated before it could resign or be cleaned out. Not good, but removing dbserver pod because we cannot do anything further") // At this point we have to set CleanedOut to true, // because we can no longer reason about the state in the agency and // bringing back the dbserver again may result in an cleaned out server without us knowing @@ -240,16 +243,16 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol ctxJobID := driver.WithJobIDResponse(ctxChild, &jobID) // Ensure the cleanout is triggered if dbserverDataWillBeGone { - log.Debug().Msg("Server is not yet cleaned out. Triggering a clean out now") + log.Debug("Server is not yet cleaned out. Triggering a clean out now") if err := cluster.CleanOutServer(ctxJobID, memberStatus.ID); err != nil { - log.Debug().Err(err).Msg("Failed to clean out server") + log.Err(err).Debug("Failed to clean out server") return errors.WithStack(err) } memberStatus.Phase = api.MemberPhaseDrain } else { - log.Debug().Msg("Temporary shutdown, resign leadership") + log.Debug("Temporary shutdown, resign leadership") if err := cluster.ResignServer(ctxJobID, memberStatus.ID); err != nil { - log.Debug().Err(err).Msg("Failed to resign server") + log.Err(err).Debug("Failed to resign server") return errors.WithStack(err) } memberStatus.Phase = api.MemberPhaseResign @@ -266,25 +269,25 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol defer cancel() agency, err := r.context.GetAgency(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create agency client") + log.Err(err).Debug("Failed to create agency client") return errors.WithStack(err) } ctxChild, cancel = globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx) defer cancel() jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, memberStatus.CleanoutJobID, c, agency) if err != nil { - log.Debug().Err(err).Msg("Failed to fetch job status") + log.Err(err).Debug("Failed to fetch job status") return errors.WithStack(err) } if jobStatus.IsFailed() { - log.Warn().Str("reason", jobStatus.Reason()).Msg("Job failed") + log.Str("reason", jobStatus.Reason()).Warn("Job failed") // Revert cleanout state memberStatus.Phase = api.MemberPhaseCreated memberStatus.CleanoutJobID = "" if err := updateMember(memberStatus); err != nil { return errors.WithStack(err) } - log.Error().Msg("Cleanout/Resign server job failed, continue anyway") + log.Error("Cleanout/Resign server job failed, continue anyway") return nil } if jobStatus.IsFinished() { @@ -297,7 +300,7 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol defer cancel() agency, err := r.context.GetAgency(ctxChild) if err != nil { - log.Debug().Err(err).Msg("Failed to create agency client") + log.Err(err).Debug("Failed to create agency client") return errors.WithStack(err) } @@ -305,22 +308,22 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol defer cancel() jobStatus, err := arangod.CleanoutServerJobStatus(ctxChild, memberStatus.CleanoutJobID, c, agency) if err != nil { - log.Debug().Err(err).Msg("Failed to fetch job status") + log.Err(err).Debug("Failed to fetch job status") return errors.WithStack(err) } if jobStatus.IsFailed() { - log.Warn().Str("reason", jobStatus.Reason()).Msg("Resign Job failed") + log.Str("reason", jobStatus.Reason()).Warn("Resign Job failed") // Revert cleanout state memberStatus.Phase = api.MemberPhaseCreated memberStatus.CleanoutJobID = "" if err := updateMember(memberStatus); err != nil { return errors.WithStack(err) } - log.Error().Msg("Cleanout/Resign server job failed, continue anyway") + log.Error("Cleanout/Resign server job failed, continue anyway") return nil } if jobStatus.IsFinished() { - log.Debug().Str("reason", jobStatus.Reason()).Msg("Resign Job finished") + log.Str("reason", jobStatus.Reason()).Debug("Resign Job finished") memberStatus.CleanoutJobID = "" memberStatus.Phase = api.MemberPhaseCreated if err := updateMember(memberStatus); err != nil { diff --git a/pkg/deployment/resources/pvc_finalizers.go b/pkg/deployment/resources/pvc_finalizers.go index 4d6796183..0914c6d47 100644 --- a/pkg/deployment/resources/pvc_finalizers.go +++ b/pkg/deployment/resources/pvc_finalizers.go @@ -28,7 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" v1 "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -45,24 +44,24 @@ const ( // runPVCFinalizers goes through the list of PVC finalizers to see if they can be removed. func (r *Resources) runPVCFinalizers(ctx context.Context, p *v1.PersistentVolumeClaim, group api.ServerGroup, memberStatus api.MemberStatus) (util.Interval, error) { - log := r.log.With().Str("pvc-name", p.GetName()).Logger() + log := r.log.Str("section", "pvc").Str("pvc-name", p.GetName()) var removalList []string for _, f := range p.ObjectMeta.GetFinalizers() { switch f { case constants.FinalizerPVCMemberExists: - log.Debug().Msg("Inspecting member exists finalizer") - if err := r.inspectFinalizerPVCMemberExists(ctx, log, group, memberStatus); err == nil { + log.Debug("Inspecting member exists finalizer") + if err := r.inspectFinalizerPVCMemberExists(ctx, group, memberStatus); err == nil { removalList = append(removalList, f) } else { - log.Debug().Err(err).Str("finalizer", f).Msg("Cannot remove finalizer yet") + log.Err(err).Str("finalizer", f).Debug("Cannot remove finalizer yet") } } } // Remove finalizers (if needed) if len(removalList) > 0 { - _, err := k8sutil.RemovePVCFinalizers(ctx, r.context.ACS().CurrentClusterCache(), log, r.context.ACS().CurrentClusterCache().PersistentVolumeClaimsModInterface().V1(), p, removalList, false) + _, err := k8sutil.RemovePVCFinalizers(ctx, r.context.ACS().CurrentClusterCache(), r.context.ACS().CurrentClusterCache().PersistentVolumeClaimsModInterface().V1(), p, removalList, false) if err != nil { - log.Debug().Err(err).Msg("Failed to update PVC (to remove finalizers)") + log.Err(err).Debug("Failed to update PVC (to remove finalizers)") return 0, errors.WithStack(err) } } else { @@ -74,17 +73,19 @@ func (r *Resources) runPVCFinalizers(ctx context.Context, p *v1.PersistentVolume // inspectFinalizerPVCMemberExists checks the finalizer condition for member-exists. // It returns nil if the finalizer can be removed. -func (r *Resources) inspectFinalizerPVCMemberExists(ctx context.Context, log zerolog.Logger, group api.ServerGroup, +func (r *Resources) inspectFinalizerPVCMemberExists(ctx context.Context, group api.ServerGroup, memberStatus api.MemberStatus) error { + log := r.log.Str("section", "pvc") + // Inspect member phase if memberStatus.Phase.IsFailed() { - log.Debug().Msg("Member is already failed, safe to remove member-exists finalizer") + log.Debug("Member is already failed, safe to remove member-exists finalizer") return nil } // Inspect deployment deletion state apiObject := r.context.GetAPIObject() if apiObject.GetDeletionTimestamp() != nil { - log.Debug().Msg("Entire deployment is being deleted, safe to remove member-exists finalizer") + log.Debug("Entire deployment is being deleted, safe to remove member-exists finalizer") return nil } @@ -92,24 +93,24 @@ func (r *Resources) inspectFinalizerPVCMemberExists(ctx context.Context, log zer switch group { case api.ServerGroupAgents: if memberStatus.Conditions.IsTrue(api.ConditionTypeTerminated) { - log.Debug().Msg("Rebuilding terminated agents is allowed, safe to remove member-exists finalizer") + log.Debug("Rebuilding terminated agents is allowed, safe to remove member-exists finalizer") return nil } case api.ServerGroupDBServers: if memberStatus.Conditions.IsTrue(api.ConditionTypeCleanedOut) { - log.Debug().Msg("Removing cleanedout dbservers is allowed, safe to remove member-exists finalizer") + log.Debug("Removing cleanedout dbservers is allowed, safe to remove member-exists finalizer") return nil } } // Member still exists, let's trigger a delete of it if memberStatus.PodName != "" { - log.Info().Msg("Removing Pod of member, because PVC is being removed") + log.Info("Removing Pod of member, because PVC is being removed") err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { return r.context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, memberStatus.PodName, meta.DeleteOptions{}) }) if err != nil && !k8sutil.IsNotFound(err) { - log.Debug().Err(err).Msg("Failed to delete pod") + log.Err(err).Debug("Failed to delete pod") return errors.WithStack(err) } } diff --git a/pkg/deployment/resources/pvc_inspector.go b/pkg/deployment/resources/pvc_inspector.go index de90fbaa6..0fc8d608c 100644 --- a/pkg/deployment/resources/pvc_inspector.go +++ b/pkg/deployment/resources/pvc_inspector.go @@ -50,7 +50,7 @@ const ( // InspectPVCs lists all PVCs that belong to the given deployment and updates // the member status of the deployment accordingly. func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInterface.Inspector) (util.Interval, error) { - log := r.log + log := r.log.Str("section", "pvc") start := time.Now() nextInterval := maxPVCInspectorInterval deploymentName := r.context.GetAPIObject().GetName() @@ -65,14 +65,14 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter // Find member status memberStatus, group, found := status.Members.MemberStatusByPVCName(pvc.GetName()) if !found { - log.Debug().Str("pvc", pvc.GetName()).Msg("no memberstatus found for PVC") + log.Str("pvc", pvc.GetName()).Debug("no memberstatus found for PVC") if k8sutil.IsPersistentVolumeClaimMarkedForDeletion(pvc) && len(pvc.GetFinalizers()) > 0 { // Strange, pvc belongs to us, but we have no member for it. // Remove all finalizers, so it can be removed. - log.Warn().Msg("PVC belongs to this deployment, but we don't know the member. Removing all finalizers") - _, err := k8sutil.RemovePVCFinalizers(ctx, r.context.ACS().CurrentClusterCache(), log, cachedStatus.PersistentVolumeClaimsModInterface().V1(), pvc, pvc.GetFinalizers(), false) + log.Warn("PVC belongs to this deployment, but we don't know the member. Removing all finalizers") + _, err := k8sutil.RemovePVCFinalizers(ctx, r.context.ACS().CurrentClusterCache(), cachedStatus.PersistentVolumeClaimsModInterface().V1(), pvc, pvc.GetFinalizers(), false) if err != nil { - log.Debug().Err(err).Msg("Failed to update PVC (to remove all finalizers)") + log.Err(err).Debug("Failed to update PVC (to remove all finalizers)") return errors.WithStack(err) } } @@ -84,7 +84,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter q := patch.NewPatch(patch.ItemReplace(patch.NewPath("metadata", "ownerReferences"), pvc.ObjectMeta.OwnerReferences)) d, err := q.Marshal() if err != nil { - log.Debug().Err(err).Msg("Failed to prepare PVC patch (ownerReferences)") + log.Err(err).Debug("Failed to prepare PVC patch (ownerReferences)") return errors.WithStack(err) } @@ -94,7 +94,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter }) if err != nil { - log.Debug().Err(err).Msg("Failed to update PVC (ownerReferences)") + log.Err(err).Debug("Failed to update PVC (ownerReferences)") return errors.WithStack(err) } } @@ -103,7 +103,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter // Process finalizers if x, err := r.runPVCFinalizers(ctx, pvc, group, memberStatus); err != nil { // Only log here, since we'll be called to try again. - log.Warn().Err(err).Msg("Failed to run PVC finalizers") + log.Err(err).Warn("Failed to run PVC finalizers") } else { nextInterval = nextInterval.ReduceTo(x) } diff --git a/pkg/deployment/resources/resources.go b/pkg/deployment/resources/resources.go index c70dbc656..471d5ee8f 100644 --- a/pkg/deployment/resources/resources.go +++ b/pkg/deployment/resources/resources.go @@ -20,22 +20,26 @@ package resources -import ( - "github.com/rs/zerolog" -) +import "github.com/arangodb/kube-arangodb/pkg/logging" // Resources is a service that creates low level resources for members // and inspects low level resources, put the inspection result in members. type Resources struct { - log zerolog.Logger - context Context + log logging.Logger + namespace, name string + context Context } // NewResources creates a new Resources service, used to // create and inspect low level resources such as pods and services. -func NewResources(log zerolog.Logger, context Context) *Resources { - return &Resources{ - log: log, - context: context, +func NewResources(namespace, name string, context Context) *Resources { + r := &Resources{ + namespace: namespace, + name: name, + context: context, } + + r.log = logger.WrapObj(r) + + return r } diff --git a/pkg/deployment/resources/secret_hashes.go b/pkg/deployment/resources/secret_hashes.go index 4f53b3609..75248361f 100644 --- a/pkg/deployment/resources/secret_hashes.go +++ b/pkg/deployment/resources/secret_hashes.go @@ -50,12 +50,14 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe // validate performs a secret hash comparison for a single secret. // Return true if all is good, false when the SecretChanged condition // must be set. + log := r.log.Str("section", "secret-hashes") + validate := func(secretName string, getExpectedHash func() string, setExpectedHash func(string) error, actionHashChanged func(Context, *core.Secret) error) (bool, error) { - log := r.log.With().Str("secret-name", secretName).Logger() + log := log.Str("secret-name", secretName) expectedHash := getExpectedHash() secret, hash, exists := r.getSecretHash(cachedStatus, secretName) if expectedHash == "" { @@ -66,7 +68,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe } // Hash fetched succesfully, store it if err := setExpectedHash(hash); err != nil { - log.Debug().Msg("Failed to save secret hash") + log.Debug("Failed to save secret hash") return true, errors.WithStack(err) } return true, nil @@ -74,24 +76,23 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe // Hash is set, it must match the current hash if !exists { // Fetching error failed for other reason. - log.Debug().Msg("Secret does not exist") + log.Debug("Secret does not exist") // This is not good, return false so SecretsChanged condition will be set. return false, nil } if hash != expectedHash { // Oops, hash has changed - log.Debug(). - Str("expected-hash", expectedHash). + log.Str("expected-hash", expectedHash). Str("new-hash", hash). - Msg("Secret has changed.") + Debug("Secret has changed.") if actionHashChanged != nil { if err := actionHashChanged(r.context, secret); err != nil { - log.Debug().Msgf("failed to change secret. hash-changed-action returned error: %v", err) + log.Debug("failed to change secret. hash-changed-action returned error: %v", err) return true, nil } if err := setExpectedHash(hash); err != nil { - log.Debug().Msg("Failed to change secret hash") + log.Debug("Failed to change secret hash") return true, errors.WithStack(err) } return true, nil @@ -105,7 +106,6 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe spec := r.context.GetSpec() deploymentName := r.context.GetAPIObject().GetName() - log := r.log var badSecretNames []string status, lastVersion := r.context.GetStatus() image := status.CurrentImage @@ -205,9 +205,9 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe // We have invalid hashes, set the SecretsChanged condition if status.Conditions.Update(api.ConditionTypeSecretsChanged, true, "Secrets have changed", fmt.Sprintf("Found %d changed secrets", len(badSecretNames))) { - log.Warn().Msgf("Found %d changed secrets. Settings SecretsChanged condition", len(badSecretNames)) + log.Warn("Found %d changed secrets. Settings SecretsChanged condition", len(badSecretNames)) if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil { - log.Error().Err(err).Msg("Failed to save SecretsChanged condition") + log.Err(err).Error("Failed to save SecretsChanged condition") return errors.WithStack(err) } // Add an event about this @@ -216,9 +216,9 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe } else { // All good, we van remove the SecretsChanged condition if status.Conditions.Remove(api.ConditionTypeSecretsChanged) { - log.Info().Msg("Resetting SecretsChanged condition") + log.Info("Resetting SecretsChanged condition") if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil { - log.Error().Err(err).Msg("Failed to save SecretsChanged condition") + log.Err(err).Error("Failed to save SecretsChanged condition") return errors.WithStack(err) } // Add an event about this diff --git a/pkg/deployment/resources/secrets.go b/pkg/deployment/resources/secrets.go index 9a010812b..109f02e09 100644 --- a/pkg/deployment/resources/secrets.go +++ b/pkg/deployment/resources/secrets.go @@ -42,8 +42,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util" - "github.com/rs/zerolog" - operatorErrors "github.com/arangodb/kube-arangodb/pkg/util/errors" core "k8s.io/api/core/v1" @@ -76,7 +74,7 @@ func GetCASecretName(apiObject k8sutil.APIObject) string { } // EnsureSecrets creates all secrets needed to run the given deployment -func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cachedStatus inspectorInterface.Inspector) error { +func (r *Resources) EnsureSecrets(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { start := time.Now() spec := r.context.GetSpec() secrets := cachedStatus.SecretsModInterface().V1() @@ -88,6 +86,8 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache defer metrics.SetDuration(inspectSecretsDurationGauges.WithLabelValues(deploymentName), start) counterMetric := inspectedSecretsCounters.WithLabelValues(deploymentName) + log := r.log.Str("section", "secret") + members := status.Members.AsList() reconcileRequired := k8sutil.NewReconcile(cachedStatus) @@ -509,7 +509,7 @@ func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStat owner := apiObject.AsOwner() deploymentName := apiObject.GetName() err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return createTLSCACertificate(ctxChild, r.log, secrets, spec, deploymentName, &owner) + return r.createTLSCACertificate(ctxChild, secrets, spec, deploymentName, &owner) }) if k8sutil.IsAlreadyExists(err) { // Secret added while we tried it also @@ -533,7 +533,7 @@ func (r *Resources) ensureClientAuthCACertificateSecret(ctx context.Context, cac owner := apiObject.AsOwner() deploymentName := apiObject.GetName() err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { - return createClientAuthCACertificate(ctxChild, r.log, secrets, spec, deploymentName, &owner) + return r.createClientAuthCACertificate(ctxChild, secrets, spec, deploymentName, &owner) }) if k8sutil.IsAlreadyExists(err) { // Secret added while we tried it also @@ -556,7 +556,7 @@ func (r *Resources) getJWTSecret(spec api.DeploymentSpec) (string, error) { secretName := spec.Authentication.GetJWTSecretName() s, err := k8sutil.GetTokenSecret(context.Background(), r.context.ACS().CurrentClusterCache().Secret().V1().Read(), secretName) if err != nil { - r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get JWT secret") + r.log.Str("section", "jwt").Err(err).Str("secret-name", secretName).Debug("Failed to get JWT secret") return "", errors.WithStack(err) } return s, nil @@ -567,7 +567,7 @@ func (r *Resources) getSyncJWTSecret(spec api.DeploymentSpec) (string, error) { secretName := spec.Sync.Authentication.GetJWTSecretName() s, err := k8sutil.GetTokenSecret(context.Background(), r.context.ACS().CurrentClusterCache().Secret().V1().Read(), secretName) if err != nil { - r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync JWT secret") + r.log.Str("section", "jwt").Err(err).Str("secret-name", secretName).Debug("Failed to get sync JWT secret") return "", errors.WithStack(err) } return s, nil @@ -578,7 +578,7 @@ func (r *Resources) getSyncMonitoringToken(spec api.DeploymentSpec) (string, err secretName := spec.Sync.Monitoring.GetTokenSecretName() s, err := k8sutil.GetTokenSecret(context.Background(), r.context.ACS().CurrentClusterCache().Secret().V1().Read(), secretName) if err != nil { - r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret") + r.log.Str("section", "jwt").Err(err).Str("secret-name", secretName).Debug("Failed to get sync monitoring secret") return "", errors.WithStack(err) } return s, nil diff --git a/pkg/deployment/resources/servicemonitor.go b/pkg/deployment/resources/servicemonitor.go index 572dcf87f..b08556571 100644 --- a/pkg/deployment/resources/servicemonitor.go +++ b/pkg/deployment/resources/servicemonitor.go @@ -123,7 +123,7 @@ func (r *Resources) serviceMonitorSpec() (coreosv1.ServiceMonitorSpec, error) { // EnsureServiceMonitor creates or updates a ServiceMonitor. func (r *Resources) EnsureServiceMonitor(ctx context.Context) error { // Some preparations: - log := r.log + log := r.log.Str("section", "service-monitor") apiObject := r.context.GetAPIObject() deploymentName := apiObject.GetName() ns := apiObject.GetNamespace() @@ -139,7 +139,7 @@ func (r *Resources) EnsureServiceMonitor(ctx context.Context) error { client, ok := kclient.GetDefaultFactory().Client() if !ok { - log.Error().Msgf("Cannot get a monitoring client.") + log.Error("Cannot get a monitoring client.") return errors.Newf("Client not initialised") } @@ -176,13 +176,13 @@ func (r *Resources) EnsureServiceMonitor(ctx context.Context) error { return err }) if err != nil { - log.Error().Err(err).Msgf("Failed to create ServiceMonitor %s", serviceMonitorName) + log.Err(err).Error("Failed to create ServiceMonitor %s", serviceMonitorName) return errors.WithStack(err) } - log.Debug().Msgf("ServiceMonitor %s successfully created.", serviceMonitorName) + log.Debug("ServiceMonitor %s successfully created.", serviceMonitorName) return nil } else { - log.Error().Err(err).Msgf("Failed to get ServiceMonitor %s", serviceMonitorName) + log.Err(err).Error("Failed to get ServiceMonitor %s", serviceMonitorName) return errors.WithStack(err) } } @@ -196,11 +196,11 @@ func (r *Resources) EnsureServiceMonitor(ctx context.Context) error { } } if !found { - log.Debug().Msgf("Found unneeded ServiceMonitor %s, but not owned by us, will not touch it", serviceMonitorName) + log.Debug("Found unneeded ServiceMonitor %s, but not owned by us, will not touch it", serviceMonitorName) return nil } if wantMetrics { - log.Debug().Msgf("ServiceMonitor %s already found, ensuring it is fine.", + log.Debug("ServiceMonitor %s already found, ensuring it is fine.", serviceMonitorName) spec, err := r.serviceMonitorSpec() @@ -209,7 +209,7 @@ func (r *Resources) EnsureServiceMonitor(ctx context.Context) error { } if equality.Semantic.DeepDerivative(spec, servMon.Spec) { - log.Debug().Msgf("ServiceMonitor %s already found and up to date.", + log.Debug("ServiceMonitor %s already found and up to date.", serviceMonitorName) return nil } @@ -231,9 +231,9 @@ func (r *Resources) EnsureServiceMonitor(ctx context.Context) error { return serviceMonitors.Delete(ctxChild, serviceMonitorName, meta.DeleteOptions{}) }) if err == nil { - log.Debug().Msgf("Deleted ServiceMonitor %s", serviceMonitorName) + log.Debug("Deleted ServiceMonitor %s", serviceMonitorName) return nil } - log.Error().Err(err).Msgf("Could not delete ServiceMonitor %s.", serviceMonitorName) + log.Err(err).Error("Could not delete ServiceMonitor %s.", serviceMonitorName) return errors.WithStack(err) } diff --git a/pkg/deployment/resources/services.go b/pkg/deployment/resources/services.go index 3a399d2e2..425f33eef 100644 --- a/pkg/deployment/resources/services.go +++ b/pkg/deployment/resources/services.go @@ -37,8 +37,6 @@ import ( core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "github.com/rs/zerolog" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/apis/shared" "github.com/arangodb/kube-arangodb/pkg/metrics" @@ -118,7 +116,7 @@ func (r *Resources) adjustService(ctx context.Context, s *core.Service, targetPo // EnsureServices creates all services needed to service the deployment func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorInterface.Inspector) error { - log := r.log + log := r.log.Str("section", "service") start := time.Now() apiObject := r.context.GetAPIObject() status, _ := r.context.GetStatus() @@ -192,11 +190,11 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn defer cancel() svcName, newlyCreated, err := k8sutil.CreateHeadlessService(ctxChild, svcs, apiObject, owner) if err != nil { - log.Debug().Err(err).Msg("Failed to create headless service") + log.Err(err).Debug("Failed to create headless service") return errors.WithStack(err) } if newlyCreated { - log.Debug().Str("service", svcName).Msg("Created headless service") + log.Str("service", svcName).Debug("Created headless service") } } @@ -213,11 +211,11 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn defer cancel() svcName, newlyCreated, err := k8sutil.CreateDatabaseClientService(ctxChild, svcs, apiObject, single, withLeader, owner) if err != nil { - log.Debug().Err(err).Msg("Failed to create database client service") + log.Err(err).Debug("Failed to create database client service") return errors.WithStack(err) } if newlyCreated { - log.Debug().Str("service", svcName).Msg("Created database client service") + log.Str("service", svcName).Debug("Created database client service") } { status, lastVersion := r.context.GetStatus() @@ -237,7 +235,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn role = "single" } if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, role, "database", - shared.ArangoPort, false, withLeader, spec.ExternalAccess, apiObject, log); err != nil { + shared.ArangoPort, false, withLeader, spec.ExternalAccess, apiObject); err != nil { return errors.WithStack(err) } @@ -247,7 +245,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn eaServiceName := k8sutil.CreateSyncMasterClientServiceName(deploymentName) role := "syncmaster" if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, role, "sync", - shared.ArangoSyncMasterPort, true, false, spec.Sync.ExternalAccess.ExternalAccessSpec, apiObject, log); err != nil { + shared.ArangoSyncMasterPort, true, false, spec.Sync.ExternalAccess.ExternalAccessSpec, apiObject); err != nil { return errors.WithStack(err) } status, lastVersion := r.context.GetStatus() @@ -264,7 +262,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn defer cancel() name, _, err := k8sutil.CreateExporterService(ctxChild, cachedStatus, svcs, apiObject, apiObject.AsOwner()) if err != nil { - log.Debug().Err(err).Msgf("Failed to create %s exporter service", name) + log.Err(err).Debug("Failed to create %s exporter service", name) return errors.WithStack(err) } status, lastVersion := r.context.GetStatus() @@ -282,7 +280,9 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn // EnsureServices creates all services needed to service the deployment func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStatus inspectorInterface.Inspector, svcs servicev1.ModInterface, eaServiceName, svcRole, title string, port int, noneIsClusterIP bool, withLeader bool, - spec api.ExternalAccessSpec, apiObject k8sutil.APIObject, log zerolog.Logger) error { + spec api.ExternalAccessSpec, apiObject k8sutil.APIObject) error { + log := r.log.Str("section", "service-ea") + // Database external access service createExternalAccessService := false deleteExternalAccessService := false @@ -310,7 +310,7 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat // See if LoadBalancer has been configured & the service is "old enough" oldEnoughTimestamp := time.Now().Add(-1 * time.Minute) // How long does the load-balancer provisioner have to act. if len(existing.Status.LoadBalancer.Ingress) == 0 && existing.GetObjectMeta().GetCreationTimestamp().Time.Before(oldEnoughTimestamp) { - log.Info().Str("service", eaServiceName).Msgf("LoadBalancerIP of %s external access service is not set, switching to NodePort", title) + log.Str("service", eaServiceName).Info("LoadBalancerIP of %s external access service is not set, switching to NodePort", title) createExternalAccessService = true eaServiceType = core.ServiceTypeNodePort deleteExternalAccessService = true // Remove the LoadBalancer ex service, then add the NodePort one @@ -343,7 +343,7 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat return err }) if err != nil { - log.Debug().Err(err).Msgf("Failed to update %s external access service", title) + log.Err(err).Debug("Failed to update %s external access service", title) return errors.WithStack(err) } } @@ -355,12 +355,12 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat } if deleteExternalAccessService { - log.Info().Str("service", eaServiceName).Msgf("Removing obsolete %s external access service", title) + log.Str("service", eaServiceName).Info("Removing obsolete %s external access service", title) err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error { return svcs.Delete(ctxChild, eaServiceName, meta.DeleteOptions{}) }) if err != nil { - log.Debug().Err(err).Msgf("Failed to remove %s external access service", title) + log.Err(err).Debug("Failed to remove %s external access service", title) return errors.WithStack(err) } } @@ -374,11 +374,11 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat _, newlyCreated, err := k8sutil.CreateExternalAccessService(ctxChild, svcs, eaServiceName, svcRole, apiObject, eaServiceType, port, nodePort, loadBalancerIP, loadBalancerSourceRanges, apiObject.AsOwner(), withLeader) if err != nil { - log.Debug().Err(err).Msgf("Failed to create %s external access service", title) + log.Err(err).Debug("Failed to create %s external access service", title) return errors.WithStack(err) } if newlyCreated { - log.Debug().Str("service", eaServiceName).Msgf("Created %s external access service", title) + log.Str("service", eaServiceName).Debug("Created %s external access service", title) } } return nil diff --git a/pkg/deployment/rotation/check.go b/pkg/deployment/rotation/check.go index 015f89bd2..85895ea81 100644 --- a/pkg/deployment/rotation/check.go +++ b/pkg/deployment/rotation/check.go @@ -28,7 +28,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/deployment/acs/sutil" "github.com/arangodb/kube-arangodb/pkg/handlers/utils" "github.com/arangodb/kube-arangodb/pkg/util/constants" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" ) @@ -56,7 +55,7 @@ func CheckPossible(member api.MemberStatus) bool { return !member.Conditions.IsTrue(api.ConditionTypeTerminated) } -func IsRotationRequired(log zerolog.Logger, acs sutil.ACS, spec api.DeploymentSpec, member api.MemberStatus, group api.ServerGroup, pod *core.Pod, specTemplate, statusTemplate *api.ArangoMemberPodTemplate) (mode Mode, plan api.Plan, reason string, err error) { +func IsRotationRequired(acs sutil.ACS, spec api.DeploymentSpec, member api.MemberStatus, group api.ServerGroup, pod *core.Pod, specTemplate, statusTemplate *api.ArangoMemberPodTemplate) (mode Mode, plan api.Plan, reason string, err error) { // Determine if rotation is required based on plan and actions // Set default mode for return value @@ -124,7 +123,7 @@ func IsRotationRequired(log zerolog.Logger, acs sutil.ACS, spec api.DeploymentSp return } - if mode, plan, err := compare(log, spec, member, group, specTemplate, statusTemplate); err != nil { + if mode, plan, err := compare(spec, member, group, specTemplate, statusTemplate); err != nil { return SkippedRotation, nil, "", err } else if mode == SkippedRotation { return mode, plan, "No rotation needed", nil diff --git a/pkg/deployment/rotation/compare.go b/pkg/deployment/rotation/compare.go index 548f902a2..35b59f807 100644 --- a/pkg/deployment/rotation/compare.go +++ b/pkg/deployment/rotation/compare.go @@ -26,7 +26,7 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" "github.com/arangodb/kube-arangodb/pkg/deployment/actions" "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/rs/zerolog" + "github.com/rs/zerolog/log" core "k8s.io/api/core/v1" ) @@ -52,7 +52,7 @@ func compareFuncs(builder api.ActionBuilder, f ...compareFunc) (mode Mode, plan return } -func compare(log zerolog.Logger, deploymentSpec api.DeploymentSpec, member api.MemberStatus, group api.ServerGroup, +func compare(deploymentSpec api.DeploymentSpec, member api.MemberStatus, group api.ServerGroup, spec, status *api.ArangoMemberPodTemplate) (mode Mode, plan api.Plan, err error) { if spec.Checksum == status.Checksum { diff --git a/pkg/deployment/rotation/utils_test.go b/pkg/deployment/rotation/utils_test.go index b3b91d2e5..d8c6b1574 100644 --- a/pkg/deployment/rotation/utils_test.go +++ b/pkg/deployment/rotation/utils_test.go @@ -23,10 +23,8 @@ package rotation import ( "testing" - "github.com/arangodb/kube-arangodb/pkg/deployment/resources" - "github.com/rs/zerolog/log" - api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" + "github.com/arangodb/kube-arangodb/pkg/deployment/resources" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" ) @@ -49,7 +47,7 @@ func runTestCases(t *testing.T) func(tcs ...TestCase) { pspec := newTemplateFromSpec(t, tc.spec, api.ServerGroupAgents, tc.deploymentSpec) pstatus := newTemplateFromSpec(t, tc.status, api.ServerGroupAgents, tc.deploymentSpec) - mode, plan, err := compare(log.Logger, tc.deploymentSpec, api.MemberStatus{ID: "id"}, api.ServerGroupAgents, pspec, pstatus) + mode, plan, err := compare(tc.deploymentSpec, api.MemberStatus{ID: "id"}, api.ServerGroupAgents, pspec, pstatus) if tc.expectedErr != "" { require.Error(t, err) diff --git a/pkg/exporter/monitor.go b/pkg/exporter/monitor.go index 1f73335c5..39fbdac00 100644 --- a/pkg/exporter/monitor.go +++ b/pkg/exporter/monitor.go @@ -35,7 +35,7 @@ import ( "github.com/arangodb/go-driver" "github.com/arangodb/kube-arangodb/pkg/apis/shared" - "github.com/rs/zerolog/log" + "github.com/arangodb/kube-arangodb/pkg/logging" ) const ( @@ -44,12 +44,14 @@ const ( failRefreshInterval = time.Second * 15 ) +var logger = logging.Global().RegisterAndGetLogger("monitor", logging.Info) + var currentMembersStatus atomic.Value func NewMonitor(arangodbEndpoint string, auth Authentication, sslVerify bool, timeout time.Duration) *monitor { uri, err := setPath(arangodbEndpoint, shared.ArangoExporterClusterHealthEndpoint) if err != nil { - log.Error().Err(err).Msgf("Fatal") + logger.Err(err).Error("Fatal") os.Exit(1) } @@ -71,14 +73,14 @@ func (m monitor) UpdateMonitorStatus(ctx context.Context) { health, err := m.GetClusterHealth() if err != nil { - log.Error().Err(err).Msg("GetClusterHealth error") + logger.Err(err).Error("GetClusterHealth error") sleep = failRefreshInterval } else { var output strings.Builder for key, value := range health.Health { entry, err := m.GetMemberStatus(key, value) if err != nil { - log.Error().Err(err).Msg("GetMemberStatus error") + logger.Err(err).Error("GetMemberStatus error") sleep = failRefreshInterval } output.WriteString(entry) diff --git a/pkg/handlers/backup/backup_suite_test.go b/pkg/handlers/backup/backup_suite_test.go index 216cf1484..95daf6d19 100644 --- a/pkg/handlers/backup/backup_suite_test.go +++ b/pkg/handlers/backup/backup_suite_test.go @@ -25,8 +25,6 @@ import ( "fmt" "testing" - "github.com/rs/zerolog/log" - "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/go-driver" @@ -59,7 +57,7 @@ func newFakeHandler() *handler { kubeClient: k, arangoClientTimeout: defaultArangoClientTimeout, - eventRecorder: newEventInstance(event.NewEventRecorder(log.Logger, "mock", k)), + eventRecorder: newEventInstance(event.NewEventRecorder("mock", k)), } } diff --git a/pkg/handlers/backup/errors.go b/pkg/handlers/backup/errors.go index d236f759d..5824528c6 100644 --- a/pkg/handlers/backup/errors.go +++ b/pkg/handlers/backup/errors.go @@ -94,7 +94,7 @@ func isTemporaryError(err error) bool { } } - if v, ok := err.(utils.Causer); ok { + if v, ok := err.(errors.Causer); ok { return isTemporaryError(v.Cause()) } diff --git a/pkg/handlers/backup/finalizer.go b/pkg/handlers/backup/finalizer.go index 6b75d4ea2..6359960a0 100644 --- a/pkg/handlers/backup/finalizer.go +++ b/pkg/handlers/backup/finalizer.go @@ -26,7 +26,7 @@ import ( "github.com/arangodb/go-driver" backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1" "github.com/arangodb/kube-arangodb/pkg/handlers/utils" - "github.com/rs/zerolog/log" + "github.com/arangodb/kube-arangodb/pkg/util/errors" apiErrors "k8s.io/apimachinery/pkg/api/errors" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -54,7 +54,7 @@ func (h *handler) finalize(backup *backupApi.ArangoBackup) error { backup.Finalizers = finalizers.Remove(finalizersToRemove...) if i := len(backup.Finalizers); i > 0 { - log.Warn().Msgf("After finalizing on object %s %s/%s finalizers left: %d", + logger.Warn("After finalizing on object %s %s/%s finalizers left: %d", backup.GroupVersionKind().String(), backup.Namespace, backup.Name, @@ -85,7 +85,7 @@ func (h *handler) finalizeBackup(backup *backupApi.ArangoBackup) error { return nil } - if c, ok := err.(utils.Causer); ok { + if c, ok := err.(errors.Causer); ok { if apiErrors.IsNotFound(c.Cause()) { return nil } @@ -120,7 +120,7 @@ func (h *handler) finalizeBackup(backup *backupApi.ArangoBackup) error { } if err = h.finalizeBackupAction(backup, client); err != nil { - log.Warn().Err(err).Msgf("Operation abort failed for %s %s/%s", + logger.Err(err).Warn("Operation abort failed for %s %s/%s", backup.GroupVersionKind().String(), backup.Namespace, backup.Name) diff --git a/pkg/handlers/backup/handler.go b/pkg/handlers/backup/handler.go index 3bc89661c..ef09be954 100644 --- a/pkg/handlers/backup/handler.go +++ b/pkg/handlers/backup/handler.go @@ -48,9 +48,12 @@ import ( backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1" database "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" arangoClientSet "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" + "github.com/arangodb/kube-arangodb/pkg/logging" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) +var logger = logging.Global().RegisterAndGetLogger("backup-operator", logging.Info) + const ( defaultArangoClientTimeout = 30 * time.Second retryCount = 25 @@ -91,11 +94,11 @@ func (h *handler) start(stopCh <-chan struct{}) { case <-stopCh: return case <-t.C: - log.Debug().Msgf("Refreshing database objects") + logger.Debug("Refreshing database objects") if err := h.refresh(); err != nil { log.Error().Err(err).Msgf("Unable to refresh database objects") } - log.Debug().Msgf("Database objects refreshed") + logger.Debug("Database objects refreshed") } } } @@ -242,7 +245,7 @@ func (h *handler) Handle(item operation.Item) error { // Check if we should start finalizer if b.DeletionTimestamp != nil { - log.Debug().Msgf("Finalizing %s %s/%s", + logger.Debug("Finalizing %s %s/%s", item.Kind, item.Namespace, item.Name) @@ -350,7 +353,7 @@ func (h *handler) Handle(item operation.Item) error { b.Status = *status - log.Debug().Msgf("Updating %s %s/%s", + logger.Debug("Updating %s %s/%s", item.Kind, item.Namespace, item.Name) diff --git a/pkg/handlers/clustersync/handler.go b/pkg/handlers/clustersync/handler.go deleted file mode 100644 index b359ffe1a..000000000 --- a/pkg/handlers/clustersync/handler.go +++ /dev/null @@ -1,79 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package clustersync - -import ( - "context" - - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - arangoClientSet "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - operator "github.com/arangodb/kube-arangodb/pkg/operatorV2" - "github.com/arangodb/kube-arangodb/pkg/operatorV2/event" - "github.com/arangodb/kube-arangodb/pkg/operatorV2/operation" - "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" - - deploymentApi "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/client-go/kubernetes" -) - -type handler struct { - client arangoClientSet.Interface - kubeClient kubernetes.Interface - eventRecorder event.RecorderInstance - - operator operator.Operator -} - -func (*handler) Name() string { - return deployment.ArangoClusterSynchronizationResourceKind -} - -func (h *handler) Handle(item operation.Item) error { - // Do not act on delete event - if item.Operation == operation.Delete { - return nil - } - - // Get ClusterSynchronizations object. It also covers NotFound case - clusterSync, err := h.client.DatabaseV1().ArangoClusterSynchronizations(item.Namespace).Get(context.Background(), item.Name, meta.GetOptions{}) - if err != nil { - if k8sutil.IsNotFound(err) { - return nil - } - h.operator.GetLogger().Error().Msgf("ListSimple fetch error %v", err) - return err - } - - // Update status on object - if _, err = h.client.DatabaseV1().ArangoClusterSynchronizations(item.Namespace).UpdateStatus(context.Background(), clusterSync, meta.UpdateOptions{}); err != nil { - h.operator.GetLogger().Error().Msgf("ListSimple status update error %v", err) - return err - } - - return nil -} - -func (*handler) CanBeHandled(item operation.Item) bool { - return item.Group == deploymentApi.SchemeGroupVersion.Group && - item.Version == deploymentApi.SchemeGroupVersion.Version && - item.Kind == deployment.ArangoClusterSynchronizationResourceKind -} diff --git a/pkg/handlers/clustersync/lifecycle.go b/pkg/handlers/clustersync/lifecycle.go deleted file mode 100644 index 8597c42bf..000000000 --- a/pkg/handlers/clustersync/lifecycle.go +++ /dev/null @@ -1,57 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package clustersync - -import ( - "context" - "time" - - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - operator "github.com/arangodb/kube-arangodb/pkg/operatorV2" - - "github.com/rs/zerolog/log" - meta "k8s.io/apimachinery/pkg/apis/meta/v1" -) - -var _ operator.LifecyclePreStart = &handler{} - -// LifecyclePreStart is executed before operator starts to work, additional checks can be placed here -// Wait for CR to be present -func (h *handler) LifecyclePreStart() error { - log.Info().Msgf("Starting Lifecycle PreStart for %s", h.Name()) - - defer func() { - log.Info().Msgf("Lifecycle PreStart for %s completed", h.Name()) - }() - - for { - _, err := h.client.DatabaseV1().ArangoClusterSynchronizations(h.operator.Namespace()).List(context.Background(), meta.ListOptions{}) - - if err != nil { - log.Warn().Err(err).Msgf("CR for %s not found", deployment.ArangoClusterSynchronizationResourceKind) - - time.Sleep(250 * time.Millisecond) - continue - } - - return nil - } -} diff --git a/pkg/handlers/clustersync/register.go b/pkg/handlers/clustersync/register.go deleted file mode 100644 index 6e3a441b2..000000000 --- a/pkg/handlers/clustersync/register.go +++ /dev/null @@ -1,62 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package clustersync - -import ( - "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - v1 "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1" - arangoClientSet "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" - arangoInformer "github.com/arangodb/kube-arangodb/pkg/generated/informers/externalversions" - operator "github.com/arangodb/kube-arangodb/pkg/operatorV2" - "github.com/arangodb/kube-arangodb/pkg/operatorV2/event" - - "k8s.io/client-go/kubernetes" -) - -func newEventInstance(eventRecorder event.Recorder) event.RecorderInstance { - return eventRecorder.NewInstance(v1.SchemeGroupVersion.Group, - v1.SchemeGroupVersion.Version, - deployment.ArangoClusterSynchronizationResourceKind) -} - -// RegisterInformer into operator -func RegisterInformer(operator operator.Operator, recorder event.Recorder, client arangoClientSet.Interface, kubeClient kubernetes.Interface, informer arangoInformer.SharedInformerFactory) error { - if err := operator.RegisterInformer(informer.Database().V1().ArangoClusterSynchronizations().Informer(), - v1.SchemeGroupVersion.Group, - v1.SchemeGroupVersion.Version, - deployment.ArangoClusterSynchronizationResourceKind); err != nil { - return err - } - - h := &handler{ - client: client, - kubeClient: kubeClient, - eventRecorder: newEventInstance(recorder), - - operator: operator, - } - - if err := operator.RegisterHandler(h); err != nil { - return err - } - - return nil -} diff --git a/pkg/handlers/job/handler.go b/pkg/handlers/job/handler.go index 4962c2a9f..fc4106d62 100644 --- a/pkg/handlers/job/handler.go +++ b/pkg/handlers/job/handler.go @@ -73,7 +73,7 @@ func (h *handler) Handle(item operation.Item) error { if k8sutil.IsNotFound(err) { return nil } - h.operator.GetLogger().Error().Msgf("ArangoJob fetch error %v", err) + logger.Error("ArangoJob fetch error %v", err) return err } @@ -86,7 +86,7 @@ func (h *handler) Handle(item operation.Item) error { // Update status on object if _, err = h.client.AppsV1().ArangoJobs(item.Namespace).UpdateStatus(context.Background(), job, meta.UpdateOptions{}); err != nil { - h.operator.GetLogger().Error().Msgf("ArangoJob status update error %v", err) + logger.Error("ArangoJob status update error %v", err) return err } @@ -138,7 +138,7 @@ func (h *handler) prepareK8sJob(job *appsApi.ArangoJob) (*batchv1.Job, error) { deployment, err := h.client.DatabaseV1().ArangoDeployments(job.Namespace).Get(context.Background(), job.Spec.ArangoDeploymentName, meta.GetOptions{}) if err != nil { - h.operator.GetLogger().Error().Msgf("ArangoDeployment fetch error %v", err) + logger.Error("ArangoDeployment fetch error %v", err) return &k8sJob, err } @@ -157,7 +157,7 @@ func (h *handler) prepareK8sJob(job *appsApi.ArangoJob) (*batchv1.Job, error) { executable, err := os.Executable() if err != nil { - h.operator.GetLogger().Error().Msgf("reading Operator executable name error %v", err) + logger.Error("reading Operator executable name error %v", err) return &k8sJob, err } diff --git a/pkg/handlers/job/job_suite_test.go b/pkg/handlers/job/job_suite_test.go index 17b3782ae..5ccbf9753 100644 --- a/pkg/handlers/job/job_suite_test.go +++ b/pkg/handlers/job/job_suite_test.go @@ -33,7 +33,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/operatorV2/event" "github.com/arangodb/kube-arangodb/pkg/operatorV2/operation" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" @@ -49,8 +48,8 @@ func newFakeHandler() *handler { h := &handler{ client: f, kubeClient: k, - eventRecorder: newEventInstance(event.NewEventRecorder(log.Logger, "mock", k)), - operator: operator.NewOperator(log.Logger, "mock", "mock", "mock"), + eventRecorder: newEventInstance(event.NewEventRecorder("mock", k)), + operator: operator.NewOperator("mock", "mock", "mock"), } return h diff --git a/pkg/handlers/job/logger.go b/pkg/handlers/job/logger.go new file mode 100644 index 000000000..113b67390 --- /dev/null +++ b/pkg/handlers/job/logger.go @@ -0,0 +1,29 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package job + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("operator-arangojob-handler", logging.Info) +) diff --git a/pkg/handlers/policy/handler_suite_test.go b/pkg/handlers/policy/handler_suite_test.go index ef231298e..9509f6851 100644 --- a/pkg/handlers/policy/handler_suite_test.go +++ b/pkg/handlers/policy/handler_suite_test.go @@ -25,8 +25,6 @@ import ( "fmt" "testing" - "github.com/rs/zerolog/log" - "github.com/arangodb/kube-arangodb/pkg/apis/backup" "github.com/arangodb/kube-arangodb/pkg/apis/deployment" @@ -50,7 +48,7 @@ func newFakeHandler() *handler { h := &handler{ client: f, kubeClient: k, - eventRecorder: newEventInstance(event.NewEventRecorder(log.Logger, "mock", k)), + eventRecorder: newEventInstance(event.NewEventRecorder("mock", k)), } return h diff --git a/pkg/handlers/utils/errors.go b/pkg/handlers/utils/errors.go index 0b82356e1..57b43dd07 100644 --- a/pkg/handlers/utils/errors.go +++ b/pkg/handlers/utils/errors.go @@ -20,11 +20,6 @@ package utils -// Causer define if error has another reason error -type Causer interface { - Cause() error -} - // Temporary define if error implements temporary interface type Temporary interface { Temporary() bool diff --git a/pkg/logging/const.go b/pkg/logging/const.go deleted file mode 100644 index 92046f593..000000000 --- a/pkg/logging/const.go +++ /dev/null @@ -1,55 +0,0 @@ -// -// DISCLAIMER -// -// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// -// Copyright holder is ArangoDB GmbH, Cologne, Germany -// - -package logging - -import "github.com/rs/zerolog" - -const ( - LoggerNameOperator = "operator" - LoggerNameDeployment = "deployment" - LoggerNameInspector = "inspector" - LoggerNameKLog = "klog" - LoggerNameServer = "server" - LoggerNameDeploymentReplication = "deployment-replication" - LoggerNameStorage = "storage" - LoggerNameProvisioner = "provisioner" - LoggerNameReconciliation = "reconciliation" - LoggerNameEventRecorder = "event-recorder" -) - -var defaultLogLevels = map[string]zerolog.Level{ - LoggerNameInspector: zerolog.WarnLevel, -} - -func LoggerNames() []string { - return []string{ - LoggerNameOperator, - LoggerNameDeployment, - LoggerNameInspector, - LoggerNameKLog, - LoggerNameServer, - LoggerNameDeploymentReplication, - LoggerNameStorage, - LoggerNameProvisioner, - LoggerNameReconciliation, - LoggerNameEventRecorder, - } -} diff --git a/pkg/logging/global.go b/pkg/logging/global.go new file mode 100644 index 000000000..2b3d42b32 --- /dev/null +++ b/pkg/logging/global.go @@ -0,0 +1,29 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package logging + +import "github.com/rs/zerolog/log" + +var global = NewFactory(log.Logger) + +func Global() Factory { + return global +} diff --git a/pkg/logging/level.go b/pkg/logging/level.go new file mode 100644 index 000000000..4d483edc5 --- /dev/null +++ b/pkg/logging/level.go @@ -0,0 +1,34 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package logging + +import "github.com/rs/zerolog" + +type Level zerolog.Level + +const ( + Trace = Level(zerolog.TraceLevel) + Debug = Level(zerolog.DebugLevel) + Info = Level(zerolog.InfoLevel) + Warn = Level(zerolog.WarnLevel) + Error = Level(zerolog.ErrorLevel) + Fatal = Level(zerolog.FatalLevel) +) diff --git a/pkg/logging/logger.go b/pkg/logging/logger.go index 3f40f7673..b399c35de 100644 --- a/pkg/logging/logger.go +++ b/pkg/logging/logger.go @@ -21,170 +21,416 @@ package logging import ( - "os" - "strings" + "io" + "sort" "sync" "time" - "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" + "github.com/rs/zerolog/log" ) -var ( - globalLoggerLock sync.Mutex - globalLogger Service -) +const AllLevels = "all" -func GlobalLogger() Service { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() +type Factory interface { + Get(name string) Logger - if globalLogger == nil { - if err := initGlobalLogger("info", nil); err != nil { - panic(err) + LogLevels() map[string]Level + ApplyLogLevels(in map[string]Level) + SetRoot(log zerolog.Logger) + + RegisterLogger(name string, level Level) bool + RegisterAndGetLogger(name string, level Level) Logger + + RegisterWrappers(w ...Wrap) + + Names() []string +} + +func NewDefaultFactory() Factory { + return NewFactory(log.Logger) +} + +func NewFactory(root zerolog.Logger) Factory { + return &factory{ + root: root, + loggers: map[string]*zerolog.Logger{}, + } +} + +type factory struct { + lock sync.Mutex + + root zerolog.Logger + + wrappers []Wrap + + loggers map[string]*zerolog.Logger +} + +func (f *factory) Names() []string { + z := f.LogLevels() + + r := make([]string, 0, len(z)) + + for k := range z { + r = append(r, k) + } + + sort.Strings(r) + + return r +} + +func (f *factory) RegisterWrappers(w ...Wrap) { + f.lock.Lock() + defer f.lock.Unlock() + + f.wrappers = append(f.wrappers, w...) +} + +func (f *factory) RegisterAndGetLogger(name string, level Level) Logger { + f.RegisterLogger(name, level) + return f.Get(name) +} + +func (f *factory) SetRoot(log zerolog.Logger) { + f.lock.Lock() + defer f.lock.Unlock() + + f.root = log + + for k := range f.loggers { + l := log.Level(f.loggers[k].GetLevel()) + f.loggers[k] = &l + } +} + +func (f *factory) ApplyLogLevels(in map[string]Level) { + f.lock.Lock() + defer f.lock.Unlock() + + if def, ok := in[AllLevels]; ok { + // Apply with default log level + + for k := range f.loggers { + if ov, ok := in[k]; ok { + // Override in place + l := f.root.Level(zerolog.Level(ov)) + f.loggers[k] = &l + } else { + // Override in place + l := f.root.Level(zerolog.Level(def)) + f.loggers[k] = &l + } + } + } else { + for k := range f.loggers { + if ov, ok := in[k]; ok { + // Override in place + l := f.root.Level(zerolog.Level(ov)) + f.loggers[k] = &l + } } } - - return globalLogger } -func InitGlobalLogger(defaultLevel string, overrides []string) error { - globalLoggerLock.Lock() - defer globalLoggerLock.Unlock() +func (f *factory) RegisterLogger(name string, level Level) bool { + f.lock.Lock() + defer f.lock.Unlock() - return initGlobalLogger(defaultLevel, overrides) + if _, ok := f.loggers[name]; ok { + return false + } + + l := f.root.Level(zerolog.Level(level)) + f.loggers[name] = &l + + return true } -func initGlobalLogger(defaultLevel string, overrides []string) error { - if globalLogger != nil { - return errors.Newf("GlobalLogger already created") +func (f *factory) LogLevels() map[string]Level { + f.lock.Lock() + defer f.lock.Unlock() + + q := make(map[string]Level, len(f.loggers)) + + for k, v := range f.loggers { + q[k] = Level(v.GetLevel()) } - s, err := newService(defaultLevel, overrides) - if err != nil { - return err + return q +} + +func (f *factory) getLogger(name string) *zerolog.Logger { + f.lock.Lock() + defer f.lock.Unlock() + + l, ok := f.loggers[name] + if ok { + return l } - globalLogger = s return nil } -// Service exposes the interfaces for a logger service -// that supports different loggers with different levels. -type Service interface { - // MustGetLogger creates a logger with given name. - MustGetLogger(name string) zerolog.Logger - // MustSetLevel sets the log level for the component with given name to given level. - MustSetLevel(name, level string) - // ConfigureRootLogger calls the given callback to modify the root logger. - ConfigureRootLogger(cb func(rootLog zerolog.Logger) zerolog.Logger) +func (f *factory) Get(name string) Logger { + return &chain{ + logger: &logger{ + factory: f, + name: name, + }, + } } -// loggingService implements Service -type loggingService struct { - mutex sync.Mutex - rootLog zerolog.Logger - defaultLevel zerolog.Level - levels map[string]zerolog.Level +type LoggerIO interface { + io.Writer } -// NewRootLogger creates a new zerolog logger with default settings. -func NewRootLogger() zerolog.Logger { - return zerolog.New(zerolog.ConsoleWriter{ - Out: os.Stdout, - TimeFormat: time.RFC3339Nano, - NoColor: true, - }).With().Timestamp().Logger() +type loggerIO struct { + parent *chain + + caller func(l Logger, msg string) } -// newService creates a new Service. -func newService(defaultLevel string, overrides []string) (Service, error) { - l, err := stringToLevel(defaultLevel) - if err != nil { - return nil, errors.WithStack(err) - } - rootLog := NewRootLogger() - s := &loggingService{ - rootLog: rootLog, - defaultLevel: l, - levels: make(map[string]zerolog.Level), +func (l loggerIO) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + // Trim CR added by stdlog. + p = p[0 : n-1] } + l.caller(l.parent, string(p)) + return +} - for k, v := range defaultLogLevels { - s.levels[k] = v - } +type Logger interface { + Wrap(w Wrap) Logger + WrapObj(w WrapObj) Logger - for _, override := range overrides { - levelParts := strings.Split(override, "=") - switch size := len(levelParts); size { - case 1: - level, err := stringToLevel(levelParts[0]) - if err != nil { - return nil, errors.WithStack(err) + Bool(key string, i bool) Logger + Str(key, value string) Logger + Strs(key string, values ...string) Logger + SinceStart(key string, start time.Time) Logger + Err(err error) Logger + Int(key string, i int) Logger + Int32(key string, i int32) Logger + Int64(key string, i int64) Logger + Interface(key string, i interface{}) Logger + Dur(key string, dur time.Duration) Logger + Time(key string, time time.Time) Logger + + Trace(msg string, args ...interface{}) + Debug(msg string, args ...interface{}) + Info(msg string, args ...interface{}) + Warn(msg string, args ...interface{}) + Error(msg string, args ...interface{}) + Fatal(msg string, args ...interface{}) + + TraceIO() LoggerIO + DebugIO() LoggerIO + InfoIO() LoggerIO + WarnIO() LoggerIO + ErrorIO() LoggerIO + FatalIO() LoggerIO +} + +type logger struct { + factory *factory + + name string +} + +type chain struct { + *logger + + parent *chain + + wrap Wrap +} + +func (c *chain) TraceIO() LoggerIO { + return loggerIO{ + parent: c, + caller: func(l Logger, msg string) { + l.Trace(msg) + }, + } +} + +func (c *chain) DebugIO() LoggerIO { + return loggerIO{ + parent: c, + caller: func(l Logger, msg string) { + l.Debug(msg) + }, + } +} + +func (c *chain) InfoIO() LoggerIO { + return loggerIO{ + parent: c, + caller: func(l Logger, msg string) { + l.Info(msg) + }, + } +} + +func (c *chain) WarnIO() LoggerIO { + return loggerIO{ + parent: c, + caller: func(l Logger, msg string) { + l.Warn(msg) + }, + } +} + +func (c *chain) ErrorIO() LoggerIO { + return loggerIO{ + parent: c, + caller: func(l Logger, msg string) { + l.Error(msg) + }, + } +} + +func (c *chain) FatalIO() LoggerIO { + return loggerIO{ + parent: c, + caller: func(l Logger, msg string) { + l.Fatal(msg) + }, + } +} + +func (c *chain) Int64(key string, i int64) Logger { + return c.Wrap(Int64(key, i)) +} + +func (c *chain) WrapObj(w WrapObj) Logger { + return c.Wrap(w.WrapLogger) +} + +func (c *chain) Bool(key string, i bool) Logger { + return c.Wrap(Bool(key, i)) +} + +func (c *chain) Int32(key string, i int32) Logger { + return c.Wrap(Int32(key, i)) +} + +func (c *chain) Time(key string, time time.Time) Logger { + return c.Wrap(Time(key, time)) +} + +func (c *chain) Strs(key string, values ...string) Logger { + return c.Wrap(Strs(key, values...)) +} + +func (c *chain) Dur(key string, dur time.Duration) Logger { + return c.Wrap(Dur(key, dur)) +} + +func (c *chain) Int(key string, i int) Logger { + return c.Wrap(Int(key, i)) +} + +func (c *chain) Interface(key string, i interface{}) Logger { + return c.Wrap(Interface(key, i)) +} + +func (c *chain) Err(err error) Logger { + return c.Wrap(Err(err)) +} + +func (c *chain) SinceStart(key string, start time.Time) Logger { + return c.Wrap(SinceStart(key, start)) +} + +func (c *chain) Str(key, value string) Logger { + return c.Wrap(Str(key, value)) +} + +func (c *chain) apply(in *zerolog.Event) *zerolog.Event { + if p := c.parent; c.parent != nil { + in = p.apply(in) + } else { + // We are on root, check factory + if w := c.factory.wrappers; len(w) > 0 { + for id := range w { + if w[id] == nil { + continue + } + + in = w[id](in) } - s.defaultLevel = level - case 2: - level, err := stringToLevel(levelParts[1]) - if err != nil { - return nil, errors.WithStack(err) - } - s.levels[levelParts[0]] = level - default: - return nil, errors.Newf("invalid log definition %s: Length %d is not equal 1 or 2", override, size) } } - return s, nil -} -// ConfigureRootLogger calls the given callback to modify the root logger. -func (s *loggingService) ConfigureRootLogger(cb func(rootLog zerolog.Logger) zerolog.Logger) { - s.mutex.Lock() - defer s.mutex.Unlock() - - s.rootLog = cb(s.rootLog) -} - -// MustGetLogger creates a logger with given name -func (s *loggingService) MustGetLogger(name string) zerolog.Logger { - s.mutex.Lock() - defer s.mutex.Unlock() - - level, found := s.levels[name] - if !found { - level = s.defaultLevel + if c.wrap != nil { + return c.wrap(in) } - return s.rootLog.With().Str("component", name).Logger().Level(level) + + return in } -// MustSetLevel sets the log level for the component with given name to given level. -func (s *loggingService) MustSetLevel(name, level string) { - l, err := stringToLevel(level) - if err != nil { - panic(err) +func (c *chain) Trace(msg string, args ...interface{}) { + l := c.factory.getLogger(c.name) + if l == nil { + return } - s.mutex.Lock() - defer s.mutex.Unlock() - s.levels[name] = l + + c.apply(l.Trace()).Msgf(msg, args...) } -// stringToLevel converts a level string to a zerolog level -func stringToLevel(l string) (zerolog.Level, error) { - switch strings.ToLower(l) { - case "trace": - return zerolog.TraceLevel, nil - case "debug": - return zerolog.DebugLevel, nil - case "info": - return zerolog.InfoLevel, nil - case "warn", "warning": - return zerolog.WarnLevel, nil - case "error": - return zerolog.ErrorLevel, nil - case "fatal": - return zerolog.FatalLevel, nil - case "panic": - return zerolog.PanicLevel, nil +func (c *chain) Debug(msg string, args ...interface{}) { + l := c.factory.getLogger(c.name) + if l == nil { + return + } + + c.apply(l.Debug()).Msgf(msg, args...) +} + +func (c *chain) Info(msg string, args ...interface{}) { + l := c.factory.getLogger(c.name) + if l == nil { + return + } + + c.apply(l.Info()).Msgf(msg, args...) +} + +func (c *chain) Warn(msg string, args ...interface{}) { + l := c.factory.getLogger(c.name) + if l == nil { + return + } + + c.apply(l.Warn()).Msgf(msg, args...) +} + +func (c *chain) Error(msg string, args ...interface{}) { + l := c.factory.getLogger(c.name) + if l == nil { + return + } + + c.apply(l.Error()).Msgf(msg, args...) +} + +func (c *chain) Fatal(msg string, args ...interface{}) { + l := c.factory.getLogger(c.name) + if l == nil { + return + } + + c.apply(l.Fatal()).Msgf(msg, args...) +} + +func (c *chain) Wrap(w Wrap) Logger { + return &chain{ + logger: c.logger, + parent: c, + wrap: w, } - return zerolog.InfoLevel, errors.Newf("Unknown log level '%s'", l) } diff --git a/pkg/logging/logger_test.go b/pkg/logging/logger_test.go new file mode 100644 index 000000000..d40c6cd57 --- /dev/null +++ b/pkg/logging/logger_test.go @@ -0,0 +1,175 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package logging + +import ( + "bytes" + "encoding/json" + "testing" + "time" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/require" +) + +func logScanner() (Factory, <-chan string, func()) { + b := bytes.NewBuffer(nil) + l := zerolog.New(b) + f := NewFactory(l) + + out := make(chan string) + + closer := make(chan struct{}) + + go func() { + defer close(out) + t := time.NewTicker(time.Millisecond) + defer t.Stop() + + z := "" + + for { + for b.Len() > 0 { + q, _, _ := b.ReadRune() + if q == '\n' { + out <- z + z = "" + } else { + z = z + string(q) + } + } + + select { + case <-closer: + return + case <-t.C: + } + } + }() + + return f, out, func() { + close(closer) + } +} + +func readData(in <-chan string) (string, bool) { + t := time.NewTimer(100 * time.Millisecond) + defer t.Stop() + + select { + case text := <-in: + return text, true + case <-t.C: + return "", false + } +} + +func expectTimeout(t *testing.T, in <-chan string) { + _, ok := readData(in) + require.False(t, ok, "Data should be not present") +} + +func expectData(t *testing.T, in <-chan string) { + s, ok := readData(in) + require.True(t, ok, "Data should be present") + + var q map[string]string + + require.NoError(t, json.Unmarshal([]byte(s), &q)) +} + +func Test_Logger(t *testing.T) { + f, data, c := logScanner() + defer c() + + q := f.Get("foo") + + t.Run("Run on unregistered logger", func(t *testing.T) { + q.Info("Data") + + expectTimeout(t, data) + }) + + t.Run("Register logger", func(t *testing.T) { + f.RegisterLogger("foo", Info) + }) + + t.Run("Run on registered logger", func(t *testing.T) { + q.Info("Data") + + expectData(t, data) + }) + + t.Run("Run on too low log level logger", func(t *testing.T) { + q.Debug("Data") + + expectTimeout(t, data) + }) + + t.Run("Change log level", func(t *testing.T) { + f.ApplyLogLevels(map[string]Level{ + "foo": Debug, + }) + + q.Debug("Data") + + expectData(t, data) + + require.Equal(t, Debug, f.LogLevels()["foo"]) + }) + + t.Run("Change all log levels", func(t *testing.T) { + f.ApplyLogLevels(map[string]Level{ + "all": Info, + }) + + q.Debug("Data") + + expectTimeout(t, data) + + require.Equal(t, Info, f.LogLevels()["foo"]) + }) + + t.Run("Change invalid level", func(t *testing.T) { + f.ApplyLogLevels(map[string]Level{ + "invalid": Info, + }) + + q.Debug("Data") + + expectTimeout(t, data) + + require.Equal(t, Info, f.LogLevels()["foo"]) + }) + + t.Run("Change all log levels with override", func(t *testing.T) { + f.ApplyLogLevels(map[string]Level{ + "all": Debug, + "foo": Info, + }) + + q.Debug("Data") + + expectTimeout(t, data) + + require.Equal(t, Info, f.LogLevels()["foo"]) + }) +} diff --git a/pkg/util/profiler/profiler.go b/pkg/logging/utils.go similarity index 53% rename from pkg/util/profiler/profiler.go rename to pkg/logging/utils.go index b41f6b17f..bcfe3a7e2 100644 --- a/pkg/util/profiler/profiler.go +++ b/pkg/logging/utils.go @@ -18,32 +18,37 @@ // Copyright holder is ArangoDB GmbH, Cologne, Germany // -package profiler +package logging import ( - "time" + "strings" "github.com/rs/zerolog" ) -// Session is a single timed action -type Session time.Time +func ParseLogLevelsFromArgs(in []string) (map[string]Level, error) { + r := make(map[string]Level) -// Start a profiling session -func Start() Session { - return Session(time.Now()) -} + for _, level := range in { + z := strings.SplitN(level, "=", 2) -// Done with a profiling session, log when time is "long" -func (t Session) Done(log zerolog.Logger, msg string) { - t.LogIf(log, time.Second/4, msg) -} + switch len(z) { + case 1: + l, err := zerolog.ParseLevel(strings.ToLower(z[0])) + if err != nil { + return nil, err + } -// LogIf logs the time taken since the start of the session, if that is longer -// than the given minimum duration. -func (t Session) LogIf(log zerolog.Logger, minLen time.Duration, msg string) { - interval := time.Since(time.Time(t)) - if interval > minLen { - log.Debug().Str("time-taken", interval.String()).Msg("profiler: " + msg) + r[AllLevels] = Level(l) + case 2: + l, err := zerolog.ParseLevel(strings.ToLower(z[1])) + if err != nil { + return nil, err + } + + r[z[0]] = Level(l) + } } + + return r, nil } diff --git a/pkg/logging/utils_test.go b/pkg/logging/utils_test.go new file mode 100644 index 000000000..48ae4632c --- /dev/null +++ b/pkg/logging/utils_test.go @@ -0,0 +1,104 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package logging + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func Test_ParseLogLevelsFromArgs(t *testing.T) { + type testCase struct { + name string + in []string + + expectedErr string + expected map[string]Level + } + + testCases := []testCase{ + { + name: "empty", + + expected: map[string]Level{}, + }, + { + name: "default level", + + in: []string{ + "info", + }, + + expected: map[string]Level{ + AllLevels: Info, + }, + }, + { + name: "parse error level", + + in: []string{ + "infxx", + }, + + expectedErr: "Unknown Level String: 'infxx', defaulting to NoLevel", + }, + { + name: "default level - camel", + + in: []string{ + "iNfO", + }, + + expected: map[string]Level{ + AllLevels: Info, + }, + }, + { + name: "default level + specific", + + in: []string{ + "iNfO", + "other=debug", + }, + + expected: map[string]Level{ + AllLevels: Info, + "other": Debug, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + r, err := ParseLogLevelsFromArgs(tc.in) + + if tc.expectedErr != "" { + require.EqualError(t, err, tc.expectedErr) + require.Nil(t, r) + } else { + require.NoError(t, err) + + require.Equal(t, tc.expected, r) + } + }) + } +} diff --git a/pkg/logging/wrap.go b/pkg/logging/wrap.go new file mode 100644 index 000000000..ad63a3655 --- /dev/null +++ b/pkg/logging/wrap.go @@ -0,0 +1,99 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package logging + +import ( + "time" + + "github.com/rs/zerolog" +) + +type WrapObj interface { + WrapLogger(in *zerolog.Event) *zerolog.Event +} + +type Wrap func(in *zerolog.Event) *zerolog.Event + +func Str(key, value string) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Str(key, value) + } +} + +func Strs(key string, values ...string) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Strs(key, values) + } +} + +func Bool(key string, i bool) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Bool(key, i) + } +} + +func Int32(key string, i int32) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Int32(key, i) + } +} + +func Int64(key string, i int64) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Int64(key, i) + } +} + +func Time(key string, time time.Time) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Time(key, time) + } +} + +func SinceStart(key string, start time.Time) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Str(key, time.Since(start).String()) + } +} + +func Int(key string, i int) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Int(key, i) + } +} + +func Interface(key string, i interface{}) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Interface(key, i) + } +} + +func Err(err error) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Err(err) + } +} + +func Dur(key string, dur time.Duration) Wrap { + return func(in *zerolog.Event) *zerolog.Event { + return in.Dur(key, dur) + } +} diff --git a/pkg/operator/crd.go b/pkg/operator/crd.go index c26e363a4..440a10113 100644 --- a/pkg/operator/crd.go +++ b/pkg/operator/crd.go @@ -28,8 +28,7 @@ import ( // waitForCRD waits for the CustomResourceDefinition (created externally) to be ready. func (o *Operator) waitForCRD(crdName string, checkFn func() error) { - log := o.log - log.Debug().Msgf("Waiting for %s CRD to be ready - ", crdName) + o.log.Debug("Waiting for %s CRD to be ready - ", crdName) for { var err error = nil @@ -44,11 +43,11 @@ func (o *Operator) waitForCRD(crdName string, checkFn func() error) { if err == nil { break } else { - log.Error().Err(err).Msg("Resource initialization failed") - log.Info().Msgf("Retrying in %s...", initRetryWaitTime) + o.log.Err(err).Error("Resource initialization failed") + o.log.Info("Retrying in %s...", initRetryWaitTime) time.Sleep(initRetryWaitTime) } } - log.Debug().Msg("CRDs ready") + o.log.Debug("CRDs ready") } diff --git a/pkg/operator/operator.go b/pkg/operator/operator.go index 8cc46e583..f901fa1ed 100644 --- a/pkg/operator/operator.go +++ b/pkg/operator/operator.go @@ -45,7 +45,6 @@ import ( arangoClientSet "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned" arangoInformer "github.com/arangodb/kube-arangodb/pkg/generated/informers/externalversions" "github.com/arangodb/kube-arangodb/pkg/handlers/backup" - "github.com/arangodb/kube-arangodb/pkg/handlers/clustersync" "github.com/arangodb/kube-arangodb/pkg/handlers/job" "github.com/arangodb/kube-arangodb/pkg/handlers/policy" "github.com/arangodb/kube-arangodb/pkg/logging" @@ -67,9 +66,8 @@ const ( type operatorV2type string const ( - backupOperator operatorV2type = "backup" - appsOperator operatorV2type = "apps" - k2KClusterSyncOperator operatorV2type = "k2kclustersync" + backupOperator operatorV2type = "backup" + appsOperator operatorV2type = "apps" ) type Event struct { @@ -83,7 +81,7 @@ type Operator struct { Config Dependencies - log zerolog.Logger + log logging.Logger deployments map[string]*deployment.Deployment deploymentReplications map[string]*replication.DeploymentReplication localStorages map[string]*storage.LocalStorage @@ -111,7 +109,6 @@ type Config struct { } type Dependencies struct { - LogService logging.Service Client kclient.Client EventRecorder record.EventRecorder LivenessProbe *probe.LivenessProbe @@ -128,7 +125,6 @@ func NewOperator(config Config, deps Dependencies) (*Operator, error) { o := &Operator{ Config: config, Dependencies: deps, - log: deps.LogService.MustGetLogger(logging.LoggerNameOperator), deployments: make(map[string]*deployment.Deployment), deploymentReplications: make(map[string]*replication.DeploymentReplication), localStorages: make(map[string]*storage.LocalStorage), @@ -174,22 +170,19 @@ func (o *Operator) Run() { } } if o.Config.EnableK2KClusterSync { - if !o.Config.SingleMode { - go o.runLeaderElection("arango-k2k-cluster-sync-operator", constants.ClusterSyncLabelRole, o.onStartK2KClusterSync, o.Dependencies.K2KClusterSyncProbe) - } else { - go o.runWithoutLeaderElection("arango-k2k-cluster-sync-operator", constants.ClusterSyncLabelRole, o.onStartK2KClusterSync, o.Dependencies.K2KClusterSyncProbe) - } + // Nothing to do + o.log.Warn("K2K Cluster sync is permanently disabled") } ctx := util.CreateSignalContext(context.Background()) <-ctx.Done() - o.log.Info().Msgf("Got interrupt signal, running shutdown handler in %s...", o.Config.ShutdownDelay) + o.log.Info("Got interrupt signal, running shutdown handler in %s...", o.Config.ShutdownDelay) time.Sleep(o.Config.ShutdownDelay) o.handleShutdown() } func (o *Operator) handleShutdown() { - o.log.Info().Msg("Waiting for deployments termination...") + o.log.Info("Waiting for deployments termination...") shutdownCh := make(chan struct{}) go func() { for { @@ -202,10 +195,10 @@ func (o *Operator) handleShutdown() { }() select { case <-shutdownCh: - o.log.Info().Msg("All deployments terminated, exiting.") + o.log.Info("All deployments terminated, exiting.") return case <-time.After(o.Config.ShutdownTimeout): - o.log.Info().Msg("Timeout reached before all deployments terminated, exiting.") + o.log.Info("Timeout reached before all deployments terminated, exiting.") return } } @@ -246,15 +239,10 @@ func (o *Operator) onStartApps(stop <-chan struct{}) { o.onStartOperatorV2(appsOperator, stop) } -// onStartK2KClusterSync starts the operator and run till given channel is closed. -func (o *Operator) onStartK2KClusterSync(stop <-chan struct{}) { - o.onStartOperatorV2(k2KClusterSyncOperator, stop) -} - // onStartOperatorV2 run the operatorV2 type func (o *Operator) onStartOperatorV2(operatorType operatorV2type, stop <-chan struct{}) { operatorName := fmt.Sprintf("arangodb-%s-operator", operatorType) - operator := operatorV2.NewOperator(o.Dependencies.LogService.MustGetLogger(logging.LoggerNameReconciliation), operatorName, o.Namespace, o.OperatorImage) + operator := operatorV2.NewOperator(operatorName, o.Namespace, o.OperatorImage) rand.Seed(time.Now().Unix()) @@ -275,7 +263,7 @@ func (o *Operator) onStartOperatorV2(operatorType operatorV2type, stop <-chan st panic(err) } - eventRecorder := event.NewEventRecorder(o.Dependencies.LogService.MustGetLogger(logging.LoggerNameEventRecorder), operatorName, kubeClientSet) + eventRecorder := event.NewEventRecorder(operatorName, kubeClientSet) arangoInformer := arangoInformer.NewSharedInformerFactoryWithOptions(arangoClientSet, 10*time.Second, arangoInformer.WithNamespace(o.Namespace)) @@ -310,16 +298,6 @@ func (o *Operator) onStartOperatorV2(operatorType operatorV2type, stop <-chan st if err = policy.RegisterInformer(operator, eventRecorder, arangoClientSet, kubeClientSet, arangoInformer); err != nil { panic(err) } - case k2KClusterSyncOperator: - checkFn := func() error { - _, err := o.Client.Arango().DatabaseV1().ArangoClusterSynchronizations(o.Namespace).List(context.Background(), meta.ListOptions{}) - return err - } - o.waitForCRD(depldef.ArangoClusterSynchronizationCRDName, checkFn) - - if err = clustersync.RegisterInformer(operator, eventRecorder, arangoClientSet, kubeClientSet, arangoInformer); err != nil { - panic(err) - } } if err = operator.RegisterStarter(arangoInformer); err != nil { diff --git a/pkg/operator/operator_deployment.go b/pkg/operator/operator_deployment.go index da61af658..e07a77df4 100644 --- a/pkg/operator/operator_deployment.go +++ b/pkg/operator/operator_deployment.go @@ -22,7 +22,6 @@ package operator import ( deploymentType "github.com/arangodb/kube-arangodb/pkg/apis/deployment" - "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/errors" kwatch "k8s.io/apimachinery/pkg/watch" @@ -46,7 +45,6 @@ var ( // This registers a listener and waits until the process stops. func (o *Operator) runDeployments(stop <-chan struct{}) { rw := k8sutil.NewResourceWatcher( - o.log, o.Client.Arango().DatabaseV1().RESTClient(), deploymentType.ArangoDeploymentResourcePlural, o.Config.Namespace, @@ -67,9 +65,9 @@ func (o *Operator) onAddArangoDeployment(obj interface{}) { defer o.Dependencies.LivenessProbe.Unlock() apiObject := obj.(*api.ArangoDeployment) - o.log.Debug(). + o.log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoDeployment added") + Debug("ArangoDeployment added") o.syncArangoDeployment(apiObject) } @@ -79,9 +77,9 @@ func (o *Operator) onUpdateArangoDeployment(oldObj, newObj interface{}) { defer o.Dependencies.LivenessProbe.Unlock() apiObject := newObj.(*api.ArangoDeployment) - o.log.Debug(). + o.log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoDeployment updated") + Debug("ArangoDeployment updated") o.syncArangoDeployment(apiObject) } @@ -95,18 +93,18 @@ func (o *Operator) onDeleteArangoDeployment(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - log.Error().Interface("event-object", obj).Msg("unknown object from ArangoDeployment delete event") + log.Interface("event-object", obj).Error("unknown object from ArangoDeployment delete event") return } apiObject, ok = tombstone.Obj.(*api.ArangoDeployment) if !ok { - log.Error().Interface("event-object", obj).Msg("Tombstone contained object that is not an ArangoDeployment") + log.Interface("event-object", obj).Error("Tombstone contained object that is not an ArangoDeployment") return } } - log.Debug(). + log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoDeployment deleted") + Debug("ArangoDeployment deleted") ev := &Event{ Type: kwatch.Deleted, Deployment: apiObject, @@ -115,7 +113,7 @@ func (o *Operator) onDeleteArangoDeployment(obj interface{}) { // pt.start() err := o.handleDeploymentEvent(ev) if err != nil { - log.Warn().Err(err).Msg("Failed to handle event") + log.Err(err).Warn("Failed to handle event") } //pt.stop() } @@ -136,7 +134,7 @@ func (o *Operator) syncArangoDeployment(apiObject *api.ArangoDeployment) { //pt.start() err := o.handleDeploymentEvent(ev) if err != nil { - o.log.Warn().Err(err).Msg("Failed to handle event") + o.log.Err(err).Warn("Failed to handle event") } //pt.stop() } @@ -167,7 +165,7 @@ func (o *Operator) handleDeploymentEvent(event *Event) error { return errors.WithStack(errors.Wrapf(err, "invalid deployment spec. please fix the following problem with the deployment spec: %v", err)) } - cfg, deps := o.makeDeploymentConfigAndDeps(apiObject) + cfg, deps := o.makeDeploymentConfigAndDeps() nc, err := deployment.New(cfg, deps, apiObject) if err != nil { return errors.WithStack(errors.Newf("failed to create deployment: %s", err)) @@ -199,7 +197,7 @@ func (o *Operator) handleDeploymentEvent(event *Event) error { } // makeDeploymentConfigAndDeps creates a Config & Dependencies object for a new Deployment. -func (o *Operator) makeDeploymentConfigAndDeps(apiObject *api.ArangoDeployment) (deployment.Config, deployment.Dependencies) { +func (o *Operator) makeDeploymentConfigAndDeps() (deployment.Config, deployment.Dependencies) { cfg := deployment.Config{ ServiceAccount: o.Config.ServiceAccount, OperatorImage: o.Config.OperatorImage, @@ -209,9 +207,6 @@ func (o *Operator) makeDeploymentConfigAndDeps(apiObject *api.ArangoDeployment) Scope: o.Scope, } deps := deployment.Dependencies{ - Log: o.Dependencies.LogService.MustGetLogger(logging.LoggerNameDeployment).With(). - Str("deployment", apiObject.GetName()). - Logger(), Client: o.Client, EventRecorder: o.EventRecorder, } diff --git a/pkg/operator/operator_deployment_relication.go b/pkg/operator/operator_deployment_relication.go index 7266a2ee1..35b870eaf 100644 --- a/pkg/operator/operator_deployment_relication.go +++ b/pkg/operator/operator_deployment_relication.go @@ -22,7 +22,6 @@ package operator import ( replication2 "github.com/arangodb/kube-arangodb/pkg/apis/replication" - "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/errors" kwatch "k8s.io/apimachinery/pkg/watch" @@ -46,7 +45,6 @@ var ( // This registers a listener and waits until the process stops. func (o *Operator) runDeploymentReplications(stop <-chan struct{}) { rw := k8sutil.NewResourceWatcher( - o.log, o.Dependencies.Client.Arango().ReplicationV1().RESTClient(), replication2.ArangoDeploymentReplicationResourcePlural, o.Config.Namespace, @@ -67,9 +65,9 @@ func (o *Operator) onAddArangoDeploymentReplication(obj interface{}) { defer o.Dependencies.LivenessProbe.Unlock() apiObject := obj.(*api.ArangoDeploymentReplication) - o.log.Debug(). + o.log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoDeploymentReplication added") + Debug("ArangoDeploymentReplication added") o.syncArangoDeploymentReplication(apiObject) } @@ -79,9 +77,9 @@ func (o *Operator) onUpdateArangoDeploymentReplication(oldObj, newObj interface{ defer o.Dependencies.LivenessProbe.Unlock() apiObject := newObj.(*api.ArangoDeploymentReplication) - o.log.Debug(). + o.log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoDeploymentReplication updated") + Debug("ArangoDeploymentReplication updated") o.syncArangoDeploymentReplication(apiObject) } @@ -95,18 +93,18 @@ func (o *Operator) onDeleteArangoDeploymentReplication(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - log.Error().Interface("event-object", obj).Msg("unknown object from ArangoDeploymentReplication delete event") + log.Interface("event-object", obj).Error("unknown object from ArangoDeploymentReplication delete event") return } apiObject, ok = tombstone.Obj.(*api.ArangoDeploymentReplication) if !ok { - log.Error().Interface("event-object", obj).Msg("Tombstone contained object that is not an ArangoDeploymentReplication") + log.Interface("event-object", obj).Error("Tombstone contained object that is not an ArangoDeploymentReplication") return } } - log.Debug(). + log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoDeploymentReplication deleted") + Debug("ArangoDeploymentReplication deleted") ev := &Event{ Type: kwatch.Deleted, DeploymentReplication: apiObject, @@ -115,7 +113,7 @@ func (o *Operator) onDeleteArangoDeploymentReplication(obj interface{}) { // pt.start() err := o.handleDeploymentReplicationEvent(ev) if err != nil { - log.Warn().Err(err).Msg("Failed to handle event") + log.Err(err).Warn("Failed to handle event") } //pt.stop() } @@ -136,7 +134,7 @@ func (o *Operator) syncArangoDeploymentReplication(apiObject *api.ArangoDeployme //pt.start() err := o.handleDeploymentReplicationEvent(ev) if err != nil { - o.log.Warn().Err(err).Msg("Failed to handle event") + o.log.Err(err).Warn("Failed to handle event") } //pt.stop() } @@ -167,7 +165,7 @@ func (o *Operator) handleDeploymentReplicationEvent(event *Event) error { return errors.WithStack(errors.Wrapf(err, "invalid deployment replication spec. please fix the following problem with the deployment replication spec: %v", err)) } - cfg, deps := o.makeDeploymentReplicationConfigAndDeps(apiObject) + cfg, deps := o.makeDeploymentReplicationConfigAndDeps() nc, err := replication.New(cfg, deps, apiObject) if err != nil { return errors.WithStack(errors.Newf("failed to create deployment: %s", err)) @@ -199,16 +197,14 @@ func (o *Operator) handleDeploymentReplicationEvent(event *Event) error { } // makeDeploymentReplicationConfigAndDeps creates a Config & Dependencies object for a new DeploymentReplication. -func (o *Operator) makeDeploymentReplicationConfigAndDeps(apiObject *api.ArangoDeploymentReplication) (replication.Config, replication.Dependencies) { +func (o *Operator) makeDeploymentReplicationConfigAndDeps() (replication.Config, replication.Dependencies) { cfg := replication.Config{ Namespace: o.Config.Namespace, } deps := replication.Dependencies{ - Log: o.Dependencies.LogService.MustGetLogger(logging.LoggerNameDeploymentReplication).With(). - Str("deployment-replication", apiObject.GetName()). - Logger(), Client: o.Client, EventRecorder: o.Dependencies.EventRecorder, } + return cfg, deps } diff --git a/pkg/operator/operator_leader.go b/pkg/operator/operator_leader.go index 36827acdd..4fed9944e 100644 --- a/pkg/operator/operator_leader.go +++ b/pkg/operator/operator_leader.go @@ -28,13 +28,13 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/leaderelection" "k8s.io/client-go/tools/leaderelection/resourcelock" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/constants" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" "github.com/arangodb/kube-arangodb/pkg/util/probe" @@ -50,7 +50,7 @@ import ( func (o *Operator) runLeaderElection(lockName, label string, onStart func(stop <-chan struct{}), readyProbe *probe.ReadyProbe) { namespace := o.Config.Namespace kubecli := o.Dependencies.Client.Kubernetes() - log := o.log.With().Str("lock-name", lockName).Logger() + log := o.log.Str("lock-name", lockName) eventTarget := o.getLeaderElectionEventTarget(log) recordEvent := func(reason, message string) { if eventTarget != nil { @@ -67,7 +67,7 @@ func (o *Operator) runLeaderElection(lockName, label string, onStart func(stop < EventRecorder: o.Dependencies.EventRecorder, }) if err != nil { - log.Fatal().Err(err).Msg("Failed to create resource lock") + log.Err(err).Fatal("Failed to create resource lock") } ctx := context.Background() @@ -81,18 +81,18 @@ func (o *Operator) runLeaderElection(lockName, label string, onStart func(stop < recordEvent("Leader Election Won", fmt.Sprintf("Pod %s is running as leader", o.Config.PodName)) readyProbe.SetReady() if err := o.setRoleLabel(log, label, constants.LabelRoleLeader); err != nil { - log.Error().Msg("Cannot set leader role on Pod. Terminating process") + log.Error("Cannot set leader role on Pod. Terminating process") os.Exit(2) } onStart(ctx.Done()) }, OnStoppedLeading: func() { recordEvent("Stop Leading", fmt.Sprintf("Pod %s is stopping to run as leader", o.Config.PodName)) - log.Info().Msg("Stop leading. Terminating process") + log.Info("Stop leading. Terminating process") os.Exit(1) }, OnNewLeader: func(identity string) { - log.Info().Str("identity", identity).Msg("New leader detected") + log.Str("identity", identity).Info("New leader detected") readyProbe.SetReady() }, }, @@ -100,7 +100,7 @@ func (o *Operator) runLeaderElection(lockName, label string, onStart func(stop < } func (o *Operator) runWithoutLeaderElection(lockName, label string, onStart func(stop <-chan struct{}), readyProbe *probe.ReadyProbe) { - log := o.log.With().Str("lock-name", lockName).Logger() + log := o.log.Str("lock-name", lockName) eventTarget := o.getLeaderElectionEventTarget(log) recordEvent := func(reason, message string) { if eventTarget != nil { @@ -112,7 +112,7 @@ func (o *Operator) runWithoutLeaderElection(lockName, label string, onStart func recordEvent("Leader Election Skipped", fmt.Sprintf("Pod %s is running as leader", o.Config.PodName)) readyProbe.SetReady() if err := o.setRoleLabel(log, label, constants.LabelRoleLeader); err != nil { - log.Error().Msg("Cannot set leader role on Pod. Terminating process") + log.Error("Cannot set leader role on Pod. Terminating process") os.Exit(2) } onStart(ctx.Done()) @@ -120,48 +120,48 @@ func (o *Operator) runWithoutLeaderElection(lockName, label string, onStart func // getLeaderElectionEventTarget returns the object that leader election related // events will be added to. -func (o *Operator) getLeaderElectionEventTarget(log zerolog.Logger) runtime.Object { +func (o *Operator) getLeaderElectionEventTarget(log logging.Logger) runtime.Object { ns := o.Config.Namespace kubecli := o.Dependencies.Client.Kubernetes() pods := kubecli.CoreV1().Pods(ns) - log = log.With().Str("pod-name", o.Config.PodName).Logger() + log = log.Str("pod-name", o.Config.PodName) pod, err := pods.Get(context.Background(), o.Config.PodName, metav1.GetOptions{}) if err != nil { - log.Error().Err(err).Msg("Cannot find Pod containing this operator") + log.Err(err).Error("Cannot find Pod containing this operator") return nil } rSet, err := k8sutil.GetPodOwner(kubecli, pod, ns) if err != nil { - log.Error().Err(err).Msg("Cannot find ReplicaSet owning the Pod containing this operator") + log.Err(err).Error("Cannot find ReplicaSet owning the Pod containing this operator") return pod } if rSet == nil { - log.Error().Msg("Pod containing this operator has no ReplicaSet owner") + log.Error("Pod containing this operator has no ReplicaSet owner") return pod } - log = log.With().Str("replicaSet-name", rSet.Name).Logger() + log = log.Str("replicaSet-name", rSet.Name) depl, err := k8sutil.GetReplicaSetOwner(kubecli, rSet, ns) if err != nil { - log.Error().Err(err).Msg("Cannot find Deployment owning the ReplicataSet that owns the Pod containing this operator") + log.Err(err).Error("Cannot find Deployment owning the ReplicataSet that owns the Pod containing this operator") return rSet } if rSet == nil { - log.Error().Msg("ReplicaSet that owns the Pod containing this operator has no Deployment owner") + log.Error("ReplicaSet that owns the Pod containing this operator has no Deployment owner") return rSet } return depl } // setRoleLabel sets a label with key `role` and given value in the pod metadata. -func (o *Operator) setRoleLabel(log zerolog.Logger, label, role string) error { +func (o *Operator) setRoleLabel(log logging.Logger, label, role string) error { ns := o.Config.Namespace kubecli := o.Dependencies.Client.Kubernetes() pods := kubecli.CoreV1().Pods(ns) - log = log.With().Str("pod-name", o.Config.PodName).Logger() + log = log.Str("pod-name", o.Config.PodName) op := func() error { pod, err := pods.Get(context.Background(), o.Config.PodName, metav1.GetOptions{}) if k8sutil.IsNotFound(err) { - log.Error().Err(err).Msg("Pod not found, so we cannot set its role label") + log.Err(err).Error("Pod not found, so we cannot set its role label") return retry.Permanent(errors.WithStack(err)) } else if err != nil { return errors.WithStack(err) @@ -176,7 +176,7 @@ func (o *Operator) setRoleLabel(log zerolog.Logger, label, role string) error { // Retry it return errors.WithStack(err) } else if err != nil { - log.Error().Err(err).Msg("Failed to update Pod wrt 'role' label") + log.Err(err).Error("Failed to update Pod wrt 'role' label") return retry.Permanent(errors.WithStack(err)) } return nil diff --git a/pkg/operator/operator_local_storage.go b/pkg/operator/operator_local_storage.go index aee6c8479..519e4d97b 100644 --- a/pkg/operator/operator_local_storage.go +++ b/pkg/operator/operator_local_storage.go @@ -21,7 +21,6 @@ package operator import ( - "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/errors" kwatch "k8s.io/apimachinery/pkg/watch" "k8s.io/client-go/tools/cache" @@ -44,7 +43,6 @@ var ( // This registers a listener and waits until the process stops. func (o *Operator) runLocalStorages(stop <-chan struct{}) { rw := k8sutil.NewResourceWatcher( - o.log, o.Dependencies.Client.Arango().StorageV1alpha().RESTClient(), api.ArangoLocalStorageResourcePlural, "", //o.Config.Namespace, @@ -65,9 +63,9 @@ func (o *Operator) onAddArangoLocalStorage(obj interface{}) { defer o.Dependencies.LivenessProbe.Unlock() apiObject := obj.(*api.ArangoLocalStorage) - o.log.Debug(). + o.log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoLocalStorage added") + Debug("ArangoLocalStorage added") o.syncArangoLocalStorage(apiObject) } @@ -77,9 +75,9 @@ func (o *Operator) onUpdateArangoLocalStorage(oldObj, newObj interface{}) { defer o.Dependencies.LivenessProbe.Unlock() apiObject := newObj.(*api.ArangoLocalStorage) - o.log.Debug(). + o.log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoLocalStorage updated") + Debug("ArangoLocalStorage updated") o.syncArangoLocalStorage(apiObject) } @@ -93,18 +91,18 @@ func (o *Operator) onDeleteArangoLocalStorage(obj interface{}) { if !ok { tombstone, ok := obj.(cache.DeletedFinalStateUnknown) if !ok { - log.Error().Interface("event-object", obj).Msg("unknown object from ArangoLocalStorage delete event") + log.Interface("event-object", obj).Error("unknown object from ArangoLocalStorage delete event") return } apiObject, ok = tombstone.Obj.(*api.ArangoLocalStorage) if !ok { - log.Error().Interface("event-object", obj).Msg("Tombstone contained object that is not an ArangoLocalStorage") + log.Interface("event-object", obj).Error("Tombstone contained object that is not an ArangoLocalStorage") return } } - log.Debug(). + log. Str("name", apiObject.GetObjectMeta().GetName()). - Msg("ArangoLocalStorage deleted") + Debug("ArangoLocalStorage deleted") ev := &Event{ Type: kwatch.Deleted, LocalStorage: apiObject, @@ -113,7 +111,7 @@ func (o *Operator) onDeleteArangoLocalStorage(obj interface{}) { // pt.start() err := o.handleLocalStorageEvent(ev) if err != nil { - log.Warn().Err(err).Msg("Failed to handle event") + log.Err(err).Warn("Failed to handle event") } //pt.stop() } @@ -134,7 +132,7 @@ func (o *Operator) syncArangoLocalStorage(apiObject *api.ArangoLocalStorage) { //pt.start() err := o.handleLocalStorageEvent(ev) if err != nil { - o.log.Warn().Err(err).Msg("Failed to handle event") + o.log.Err(err).Warn("Failed to handle event") } //pt.stop() } @@ -165,7 +163,7 @@ func (o *Operator) handleLocalStorageEvent(event *Event) error { return errors.WithStack(errors.Newf("unsafe state. local storage (%s) was created before but we received event (%s)", apiObject.Name, event.Type)) } - cfg, deps := o.makeLocalStorageConfigAndDeps(apiObject) + cfg, deps := o.makeLocalStorageConfigAndDeps() stg, err := storage.New(cfg, deps, apiObject) if err != nil { return errors.WithStack(errors.Newf("failed to create local storage: %s", err)) @@ -197,16 +195,13 @@ func (o *Operator) handleLocalStorageEvent(event *Event) error { } // makeLocalStorageConfigAndDeps creates a Config & Dependencies object for a new LocalStorage. -func (o *Operator) makeLocalStorageConfigAndDeps(apiObject *api.ArangoLocalStorage) (storage.Config, storage.Dependencies) { +func (o *Operator) makeLocalStorageConfigAndDeps() (storage.Config, storage.Dependencies) { cfg := storage.Config{ Namespace: o.Config.Namespace, PodName: o.Config.PodName, ServiceAccount: o.Config.ServiceAccount, } deps := storage.Dependencies{ - Log: o.Dependencies.LogService.MustGetLogger(logging.LoggerNameStorage).With(). - Str("localStorage", apiObject.GetName()). - Logger(), Client: o.Client, EventRecorder: o.Dependencies.EventRecorder, } diff --git a/pkg/operator/server_discovery_api.go b/pkg/operator/server_discovery_api.go index cac775833..0145fcfdb 100644 --- a/pkg/operator/server_discovery_api.go +++ b/pkg/operator/server_discovery_api.go @@ -26,10 +26,10 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/server" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" ) @@ -54,15 +54,15 @@ func (o *Operator) FindOtherOperators() []server.OperatorReference { var result []server.OperatorReference namespaces, err := o.Dependencies.Client.Kubernetes().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{}) if err != nil { - log.Warn().Err(err).Msg("Failed to list namespaces") + log.Err(err).Warn("Failed to list namespaces") } else { for _, ns := range namespaces.Items { if ns.Name != o.Config.Namespace { - log.Debug().Str("namespace", ns.Name).Msg("inspecting namespace for operators") + log.Str("namespace", ns.Name).Debug("inspecting namespace for operators") refs := o.findOtherOperatorsInNamespace(log, ns.Name, func(server.OperatorType) bool { return true }) result = append(result, refs...) } else { - log.Debug().Str("namespace", ns.Name).Msg("skip inspecting my own namespace for operators") + log.Str("namespace", ns.Name).Debug("skip inspecting my own namespace for operators") } } } @@ -91,12 +91,12 @@ func (o *Operator) FindOtherOperators() []server.OperatorReference { } // findOtherOperatorsInNamespace looks up references to other operators in the given namespace. -func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace string, typePred func(server.OperatorType) bool) []server.OperatorReference { - log = log.With().Str("namespace", namespace).Logger() +func (o *Operator) findOtherOperatorsInNamespace(log logging.Logger, namespace string, typePred func(server.OperatorType) bool) []server.OperatorReference { + log = log.Str("namespace", namespace) var result []server.OperatorReference services, err := o.Dependencies.Client.Kubernetes().CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{}) if err != nil { - log.Debug().Err(err).Msg("Failed to list services") + log.Err(err).Debug("Failed to list services") return nil } nodeFetcher := func() ([]*core.Node, error) { @@ -120,7 +120,7 @@ func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace s // Filter out unwanted services selector := svc.Spec.Selector if selector[roleKey] != roleLeader { - log.Debug().Str("service", svc.Name).Msg("Service has no leader role selector") + log.Str("service", svc.Name).Debug("Service has no leader role selector") continue } var oType server.OperatorType @@ -132,7 +132,7 @@ func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace s case appStorageOperator: oType = server.OperatorTypeStorage default: - log.Debug().Str("service", svc.Name).Msg("Service has no or invalid app selector") + log.Str("service", svc.Name).Debug("Service has no or invalid app selector") continue } if !typePred(oType) { @@ -144,7 +144,7 @@ func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace s if x, err := k8sutil.CreateServiceURL(svc, "https", nil, nodeFetcher); err == nil { url = x } else { - log.Warn().Err(err).Str("service", svc.Name).Msg("Failed to create URL for service") + log.Err(err).Str("service", svc.Name).Warn("Failed to create URL for service") } default: // No suitable service type @@ -157,6 +157,6 @@ func (o *Operator) findOtherOperatorsInNamespace(log zerolog.Logger, namespace s }) } - log.Debug().Msgf("Found %d operator services", len(result)) + log.Debug("Found %d operator services", len(result)) return result } diff --git a/pkg/operatorV2/event/event.go b/pkg/operatorV2/event/event.go index 025a20b9c..743126dbe 100644 --- a/pkg/operatorV2/event/event.go +++ b/pkg/operatorV2/event/event.go @@ -24,8 +24,6 @@ import ( "context" "fmt" - "github.com/rs/zerolog" - core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/util/uuid" @@ -33,11 +31,10 @@ import ( ) // NewEventRecorder creates new event recorder -func NewEventRecorder(logger zerolog.Logger, name string, kubeClientSet kubernetes.Interface) Recorder { +func NewEventRecorder(name string, kubeClientSet kubernetes.Interface) Recorder { return &eventRecorder{ kubeClientSet: kubeClientSet, name: name, - logger: logger, } } @@ -51,7 +48,6 @@ type Recorder interface { type eventRecorder struct { name string kubeClientSet kubernetes.Interface - logger zerolog.Logger } func (e *eventRecorder) newEvent(group, version, kind string, object meta.Object, eventType, reason, message string) *core.Event { @@ -91,19 +87,19 @@ func (e *eventRecorder) newObjectReference(group, version, kind string, object m func (e *eventRecorder) event(group, version, kind string, object meta.Object, eventType, reason, message string) { _, err := e.kubeClientSet.CoreV1().Events(object.GetNamespace()).Create(context.Background(), e.newEvent(group, version, kind, object, eventType, reason, message), meta.CreateOptions{}) if err != nil { - e.logger.Warn().Err(err). + logger.Err(err). Str("APIVersion", fmt.Sprintf("%s/%s", group, version)). Str("Kind", kind). Str("Object", fmt.Sprintf("%s/%s", object.GetNamespace(), object.GetName())). - Msgf("Unable to send event") + Warn("Unable to send event") return } - e.logger.Info(). + logger. Str("APIVersion", fmt.Sprintf("%s/%s", group, version)). Str("Kind", kind). Str("Object", fmt.Sprintf("%s/%s", object.GetNamespace(), object.GetName())). - Msgf("Event send %s - %s - %s", eventType, reason, message) + Info("Event send %s - %s - %s", eventType, reason, message) } func (e *eventRecorder) NewInstance(group, version, kind string) RecorderInstance { diff --git a/pkg/operatorV2/event/event_test.go b/pkg/operatorV2/event/event_test.go index 3cc6f04c3..c7f54be5a 100644 --- a/pkg/operatorV2/event/event_test.go +++ b/pkg/operatorV2/event/event_test.go @@ -25,8 +25,6 @@ import ( "fmt" "testing" - "github.com/rs/zerolog/log" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" core "k8s.io/api/core/v1" @@ -39,7 +37,7 @@ func Test_Event_Handler(t *testing.T) { // Arrange c := fake.NewSimpleClientset() - recorder := NewEventRecorder(log.Logger, "mock", c) + recorder := NewEventRecorder("mock", c) group := string(uuid.NewUUID()) version := "v1" diff --git a/pkg/operatorV2/event/logger.go b/pkg/operatorV2/event/logger.go new file mode 100644 index 000000000..3b8c8c30d --- /dev/null +++ b/pkg/operatorV2/event/logger.go @@ -0,0 +1,29 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package event + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("operator-v2-event", logging.Info) +) diff --git a/pkg/operatorV2/logger.go b/pkg/operatorV2/logger.go new file mode 100644 index 000000000..c8e82b4b4 --- /dev/null +++ b/pkg/operatorV2/logger.go @@ -0,0 +1,30 @@ +// +// DISCLAIMER +// +// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// Copyright holder is ArangoDB GmbH, Cologne, Germany +// + +package operator + +import ( + "github.com/arangodb/kube-arangodb/pkg/logging" +) + +var ( + logger = logging.Global().RegisterAndGetLogger("operator-v2", logging.Info) + loggerWorker = logging.Global().RegisterAndGetLogger("operator-v2-worker", logging.Warn) +) diff --git a/pkg/operatorV2/operator.go b/pkg/operatorV2/operator.go index ae1455c3d..ad597fe7e 100644 --- a/pkg/operatorV2/operator.go +++ b/pkg/operatorV2/operator.go @@ -24,8 +24,6 @@ import ( "sync" "time" - "github.com/rs/zerolog" - "github.com/arangodb/kube-arangodb/pkg/util/errors" "github.com/arangodb/kube-arangodb/pkg/operatorV2/operation" @@ -59,17 +57,14 @@ type Operator interface { EnqueueItem(item operation.Item) ProcessItem(item operation.Item) error - - GetLogger() *zerolog.Logger } // NewOperator creates new operator -func NewOperator(logger zerolog.Logger, name, namespace, image string) Operator { +func NewOperator(name, namespace, image string) Operator { o := &operator{ name: name, namespace: namespace, image: image, - logger: logger, workqueue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), name), } @@ -84,8 +79,6 @@ type operator struct { started bool - logger zerolog.Logger - name string namespace string image string @@ -188,10 +181,6 @@ func (o *operator) RegisterInformer(informer cache.SharedIndexInformer, group, v return nil } -func (o *operator) GetLogger() *zerolog.Logger { - return &o.logger -} - func (o *operator) Start(threadiness int, stopCh <-chan struct{}) error { o.lock.Lock() defer o.lock.Unlock() @@ -207,14 +196,14 @@ func (o *operator) Start(threadiness int, stopCh <-chan struct{}) error { func (o *operator) start(threadiness int, stopCh <-chan struct{}) error { // Execute pre checks - o.logger.Info().Msgf("Executing Lifecycle PreStart") + logger.Info("Executing Lifecycle PreStart") for _, handler := range o.handlers { if err := ExecLifecyclePreStart(handler); err != nil { return err } } - o.logger.Info().Msgf("Starting informers") + logger.Info("Starting informers") for _, starter := range o.starters { starter.Start(stopCh) } @@ -223,12 +212,12 @@ func (o *operator) start(threadiness int, stopCh <-chan struct{}) error { return err } - o.logger.Info().Msgf("Starting workers") + logger.Info("Starting workers") for i := 0; i < threadiness; i++ { go wait.Until(o.worker, time.Second, stopCh) } - o.logger.Info().Msgf("Operator started") + logger.Info("Operator started") return nil } diff --git a/pkg/operatorV2/operator_test.go b/pkg/operatorV2/operator_test.go index 203831e44..989c05324 100644 --- a/pkg/operatorV2/operator_test.go +++ b/pkg/operatorV2/operator_test.go @@ -24,8 +24,6 @@ import ( "context" "time" - "github.com/rs/zerolog/log" - "github.com/arangodb/kube-arangodb/pkg/operatorV2/operation" "github.com/stretchr/testify/assert" @@ -43,7 +41,7 @@ import ( func Test_Operator_InformerProcessing(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) size := 64 objects := make([]string, size) @@ -92,7 +90,7 @@ func Test_Operator_InformerProcessing(t *testing.T) { func Test_Operator_MultipleInformers(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) size := 16 objects := make([]string, size) @@ -153,7 +151,7 @@ func Test_Operator_MultipleInformers(t *testing.T) { func Test_Operator_MultipleInformers_IgnoredTypes(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) size := 16 objects := make([]string, size) @@ -213,7 +211,7 @@ func Test_Operator_MultipleInformers_IgnoredTypes(t *testing.T) { func Test_Operator_MultipleInformers_MultipleHandlers(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) size := 16 objects := make([]string, size) @@ -321,7 +319,7 @@ func Test_Operator_MultipleInformers_MultipleHandlers(t *testing.T) { func Test_Operator_InformerProcessing_Namespaced(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) size := 16 objects := make([]string, size) diff --git a/pkg/operatorV2/operator_worker.go b/pkg/operatorV2/operator_worker.go index b11c5f242..c1d44ad12 100644 --- a/pkg/operatorV2/operator_worker.go +++ b/pkg/operatorV2/operator_worker.go @@ -35,11 +35,11 @@ func (o *operator) processNextItem() bool { defer func() { // Recover from panic to not shutdown whole operator if err := recover(); err != nil { - e := o.logger.Error() + e := loggerWorker.Str("type", "worker") switch obj := err.(type) { case error: - e = e.AnErr("err", obj) + e = e.Err(obj) case string: e = e.Str("err", obj) case int: @@ -48,7 +48,7 @@ func (o *operator) processNextItem() bool { e.Interface("err", obj) } - e.Msgf("Recovered from panic") + e.Error("Recovered from panic") } }() @@ -61,7 +61,7 @@ func (o *operator) processNextItem() bool { err := o.processObject(obj) if err != nil { - o.logger.Error().Err(err).Interface("object", obj).Msgf("Error during object handling") + loggerWorker.Interface("object", obj).Error("Error during object handling") return true } @@ -94,7 +94,7 @@ func (o *operator) processObject(obj interface{}) error { o.objectProcessed.Inc() - o.logger.Trace().Msgf("Received Item Action: %s, Type: %s/%s/%s, Namespace: %s, Name: %s", + loggerWorker.Trace("Received Item Action: %s, Type: %s/%s/%s, Namespace: %s, Name: %s", item.Operation, item.Group, item.Version, @@ -107,7 +107,7 @@ func (o *operator) processObject(obj interface{}) error { return errors.Newf("error syncing '%s': %s, requeuing", key, err.Error()) } - o.logger.Trace().Msgf("Processed Item Action: %s, Type: %s/%s/%s, Namespace: %s, Name: %s", + loggerWorker.Trace("Processed Item Action: %s, Type: %s/%s/%s, Namespace: %s, Name: %s", item.Operation, item.Group, item.Version, diff --git a/pkg/operatorV2/operator_worker_test.go b/pkg/operatorV2/operator_worker_test.go index 0a1dc937c..6f970ed8c 100644 --- a/pkg/operatorV2/operator_worker_test.go +++ b/pkg/operatorV2/operator_worker_test.go @@ -23,8 +23,6 @@ package operator import ( "testing" - "github.com/rs/zerolog/log" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/util/uuid" @@ -33,7 +31,7 @@ import ( func Test_Worker_Empty(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) stopCh := make(chan struct{}) @@ -53,7 +51,7 @@ func Test_Worker_Empty(t *testing.T) { func Test_Worker_CatchAll(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) stopCh := make(chan struct{}) @@ -82,7 +80,7 @@ func Test_Worker_CatchAll(t *testing.T) { func Test_Worker_EnsureFirstProcessStopLoop(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) stopCh := make(chan struct{}) @@ -116,7 +114,7 @@ func Test_Worker_EnsureFirstProcessStopLoop(t *testing.T) { func Test_Worker_EnsureObjectIsProcessedBySecondHandler(t *testing.T) { // Arrange name := string(uuid.NewUUID()) - o := NewOperator(log.Logger, name, name, name) + o := NewOperator(name, name, name) stopCh := make(chan struct{}) diff --git a/pkg/replication/deployment_replication.go b/pkg/replication/deployment_replication.go index f255a7486..db0016741 100644 --- a/pkg/replication/deployment_replication.go +++ b/pkg/replication/deployment_replication.go @@ -28,18 +28,21 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" "github.com/arangodb/arangosync-client/client" api "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/arangodb/kube-arangodb/pkg/util/retry" "github.com/arangodb/kube-arangodb/pkg/util/trigger" + "github.com/rs/zerolog" ) +var logger = logging.Global().RegisterAndGetLogger("deployment-replication", logging.Info) + // Config holds configuration settings for a DeploymentReplication type Config struct { Namespace string @@ -47,7 +50,6 @@ type Config struct { // Dependencies holds dependent services for a DeploymentReplication type Dependencies struct { - Log zerolog.Logger Client kclient.Client EventRecorder record.EventRecorder } @@ -73,6 +75,7 @@ const ( // DeploymentReplication is the in process state of an ArangoDeploymentReplication. type DeploymentReplication struct { + log logging.Logger apiObject *api.ArangoDeploymentReplication // API object status api.DeploymentReplicationStatus // Internal status of the CR config Config @@ -101,6 +104,8 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeploymentReplic stopCh: make(chan struct{}), } + dr.log = logger.WrapObj(dr) + go dr.run() return dr, nil @@ -118,7 +123,7 @@ func (dr *DeploymentReplication) Update(apiObject *api.ArangoDeploymentReplicati // Delete the deployment replication. // Called when the local storage was deleted by the user. func (dr *DeploymentReplication) Delete() { - dr.deps.Log.Info().Msg("deployment replication is deleted by user") + dr.log.Info("deployment replication is deleted by user") if atomic.CompareAndSwapInt32(&dr.stopped, 0, 1) { close(dr.stopCh) } @@ -130,10 +135,10 @@ func (dr *DeploymentReplication) send(ev *deploymentReplicationEvent) { case dr.eventCh <- ev: l, ecap := len(dr.eventCh), cap(dr.eventCh) if l > int(float64(ecap)*0.8) { - dr.deps.Log.Warn(). + dr.log. Int("used", l). Int("capacity", ecap). - Msg("event queue buffer is almost full") + Warn("event queue buffer is almost full") } case <-dr.stopCh: } @@ -143,11 +148,11 @@ func (dr *DeploymentReplication) send(ev *deploymentReplicationEvent) { // It processes the event queue and polls the state of generated // resource on a regular basis. func (dr *DeploymentReplication) run() { - log := dr.deps.Log + log := dr.log // Add finalizers if err := dr.addFinalizers(); err != nil { - log.Warn().Err(err).Msg("Failed to add finalizers") + log.Err(err).Warn("Failed to add finalizers") } inspectionInterval := maxInspectionInterval @@ -187,13 +192,13 @@ func (dr *DeploymentReplication) run() { // handleArangoDeploymentReplicationUpdatedEvent is called when the deployment replication is updated by the user. func (dr *DeploymentReplication) handleArangoDeploymentReplicationUpdatedEvent(event *deploymentReplicationEvent) error { - log := dr.deps.Log.With().Str("deployoment-replication", event.DeploymentReplication.GetName()).Logger() + log := dr.log.Str("deployoment-replication", event.DeploymentReplication.GetName()) repls := dr.deps.Client.Arango().ReplicationV1().ArangoDeploymentReplications(dr.apiObject.GetNamespace()) // Get the most recent version of the deployment replication from the API server current, err := repls.Get(context.Background(), dr.apiObject.GetName(), metav1.GetOptions{}) if err != nil { - log.Debug().Err(err).Msg("Failed to get current version of deployment replication from API server") + log.Err(err).Debug("Failed to get current version of deployment replication from API server") if k8sutil.IsNotFound(err) { return nil } @@ -205,20 +210,20 @@ func (dr *DeploymentReplication) handleArangoDeploymentReplicationUpdatedEvent(e newAPIObject.Status = dr.status resetFields := dr.apiObject.Spec.ResetImmutableFields(&newAPIObject.Spec) if len(resetFields) > 0 { - log.Debug().Strs("fields", resetFields).Msg("Found modified immutable fields") + log.Strs("fields", resetFields...).Debug("Found modified immutable fields") } if err := newAPIObject.Spec.Validate(); err != nil { dr.createEvent(k8sutil.NewErrorEvent("Validation failed", err, dr.apiObject)) // Try to reset object if err := dr.updateCRSpec(dr.apiObject.Spec); err != nil { - log.Error().Err(err).Msg("Restore original spec failed") + log.Err(err).Error("Restore original spec failed") dr.createEvent(k8sutil.NewErrorEvent("Restore original failed", err, dr.apiObject)) } return nil } if len(resetFields) > 0 { for _, fieldName := range resetFields { - log.Debug().Str("field", fieldName).Msg("Reset immutable field") + log.Str("field", fieldName).Debug("Reset immutable field") dr.createEvent(k8sutil.NewImmutableFieldEvent(fieldName, dr.apiObject)) } } @@ -248,7 +253,7 @@ func (dr *DeploymentReplication) updateCRStatus() error { } // Send update to API server - log := dr.deps.Log + log := dr.log repls := dr.deps.Client.Arango().ReplicationV1().ArangoDeploymentReplications(dr.apiObject.GetNamespace()) update := dr.apiObject.DeepCopy() attempt := 0 @@ -272,7 +277,7 @@ func (dr *DeploymentReplication) updateCRStatus() error { } } if err != nil { - log.Debug().Err(err).Msg("failed to patch ArangoDeploymentReplication status") + log.Err(err).Debug("failed to patch ArangoDeploymentReplication status") return errors.WithStack(errors.Newf("failed to patch ArangoDeploymentReplication status: %v", err)) } } @@ -282,7 +287,7 @@ func (dr *DeploymentReplication) updateCRStatus() error { // to the given object, while preserving the status. // On success, d.apiObject is updated. func (dr *DeploymentReplication) updateCRSpec(newSpec api.DeploymentReplicationSpec) error { - log := dr.deps.Log + log := dr.log repls := dr.deps.Client.Arango().ReplicationV1().ArangoDeploymentReplications(dr.apiObject.GetNamespace()) // Send update to API server @@ -309,7 +314,7 @@ func (dr *DeploymentReplication) updateCRSpec(newSpec api.DeploymentReplicationS } } if err != nil { - log.Debug().Err(err).Msg("failed to patch ArangoDeploymentReplication spec") + log.Err(err).Debug("failed to patch ArangoDeploymentReplication spec") return errors.WithStack(errors.Newf("failed to patch ArangoDeploymentReplication spec: %v", err)) } } @@ -317,8 +322,7 @@ func (dr *DeploymentReplication) updateCRSpec(newSpec api.DeploymentReplicationS // failOnError reports the given error and sets the deployment replication status to failed. func (dr *DeploymentReplication) failOnError(err error, msg string) { - log := dr.deps.Log - log.Error().Err(err).Msg(msg) + dr.log.Err(err).Error(msg) dr.status.Reason = err.Error() dr.reportFailedStatus() } @@ -326,8 +330,7 @@ func (dr *DeploymentReplication) failOnError(err error, msg string) { // reportFailedStatus sets the status of the deployment replication to Failed and keeps trying to forward // that to the API server. func (dr *DeploymentReplication) reportFailedStatus() { - log := dr.deps.Log - log.Info().Msg("local storage failed. Reporting failed reason...") + dr.log.Info("local storage failed. Reporting failed reason...") repls := dr.deps.Client.Arango().ReplicationV1().ArangoDeploymentReplications(dr.apiObject.GetNamespace()) op := func() error { @@ -339,7 +342,7 @@ func (dr *DeploymentReplication) reportFailedStatus() { } if !k8sutil.IsConflict(err) { - log.Warn().Err(err).Msg("retry report status: fail to update") + dr.log.Err(err).Warn("retry report status: fail to update") return errors.WithStack(err) } @@ -351,7 +354,7 @@ func (dr *DeploymentReplication) reportFailedStatus() { if k8sutil.IsNotFound(err) { return nil } - log.Warn().Err(err).Msg("retry report status: fail to get latest version") + dr.log.Err(err).Warn("retry report status: fail to get latest version") return errors.WithStack(err) } dr.apiObject = depl @@ -360,3 +363,7 @@ func (dr *DeploymentReplication) reportFailedStatus() { retry.Retry(op, time.Hour*24*365) } + +func (dr *DeploymentReplication) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", dr.apiObject.GetNamespace()).Str("name", dr.apiObject.GetName()) +} diff --git a/pkg/replication/finalizers.go b/pkg/replication/finalizers.go index d4e586948..1edb83799 100644 --- a/pkg/replication/finalizers.go +++ b/pkg/replication/finalizers.go @@ -26,7 +26,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "github.com/arangodb/arangosync-client/client" @@ -62,24 +61,24 @@ func (dr *DeploymentReplication) addFinalizers() error { // runFinalizers goes through the list of ArangoDeploymentReplication finalizers to see if they can be removed. func (dr *DeploymentReplication) runFinalizers(ctx context.Context, p *api.ArangoDeploymentReplication) error { - log := dr.deps.Log.With().Str("replication-name", p.GetName()).Logger() + log := dr.log.Str("replication-name", p.GetName()) var removalList []string for _, f := range p.ObjectMeta.GetFinalizers() { switch f { case constants.FinalizerDeplReplStopSync: - log.Debug().Msg("Inspecting stop-sync finalizer") - if err := dr.inspectFinalizerDeplReplStopSync(ctx, log, p); err == nil { + log.Debug("Inspecting stop-sync finalizer") + if err := dr.inspectFinalizerDeplReplStopSync(ctx, p); err == nil { removalList = append(removalList, f) } else { - log.Debug().Err(err).Str("finalizer", f).Msg("Cannot remove finalizer yet") + log.Err(err).Str("finalizer", f).Debug("Cannot remove finalizer yet") } } } // Remove finalizers (if needed) if len(removalList) > 0 { ignoreNotFound := false - if err := removeDeploymentReplicationFinalizers(log, dr.deps.Client.Arango(), p, removalList, ignoreNotFound); err != nil { - log.Debug().Err(err).Msg("Failed to update deployment replication (to remove finalizers)") + if err := removeDeploymentReplicationFinalizers(dr.deps.Client.Arango(), p, removalList, ignoreNotFound); err != nil { + log.Err(err).Debug("Failed to update deployment replication (to remove finalizers)") return errors.WithStack(err) } } @@ -88,10 +87,10 @@ func (dr *DeploymentReplication) runFinalizers(ctx context.Context, p *api.Arang // inspectFinalizerDeplReplStopSync checks the finalizer condition for stop-sync. // It returns nil if the finalizer can be removed. -func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Context, log zerolog.Logger, p *api.ArangoDeploymentReplication) error { +func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Context, p *api.ArangoDeploymentReplication) error { // Inspect phase if p.Status.Phase.IsFailed() { - log.Debug().Msg("Deployment replication is already failed, safe to remove stop-sync finalizer") + dr.log.Debug("Deployment replication is already failed, safe to remove stop-sync finalizer") return nil } @@ -101,13 +100,13 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co if name := p.Spec.Source.GetDeploymentName(); name != "" { depl, err := depls.Get(context.Background(), name, metav1.GetOptions{}) if k8sutil.IsNotFound(err) { - log.Debug().Msg("Source deployment is gone. Abort enabled") + dr.log.Debug("Source deployment is gone. Abort enabled") abort = true } else if err != nil { - log.Warn().Err(err).Msg("Failed to get source deployment") + dr.log.Err(err).Warn("Failed to get source deployment") return errors.WithStack(err) } else if depl.GetDeletionTimestamp() != nil { - log.Debug().Msg("Source deployment is being deleted. Abort enabled") + dr.log.Debug("Source deployment is being deleted. Abort enabled") abort = true } } @@ -117,13 +116,13 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co if name := p.Spec.Destination.GetDeploymentName(); name != "" { depl, err := depls.Get(context.Background(), name, metav1.GetOptions{}) if k8sutil.IsNotFound(err) { - log.Debug().Msg("Destination deployment is gone. Source cleanup enabled") + dr.log.Debug("Destination deployment is gone. Source cleanup enabled") cleanupSource = true } else if err != nil { - log.Warn().Err(err).Msg("Failed to get destinaton deployment") + dr.log.Err(err).Warn("Failed to get destinaton deployment") return errors.WithStack(err) } else if depl.GetDeletionTimestamp() != nil { - log.Debug().Msg("Destination deployment is being deleted. Source cleanup enabled") + dr.log.Debug("Destination deployment is being deleted. Source cleanup enabled") cleanupSource = true } } @@ -142,7 +141,7 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co // Destination still exists, stop/abort sync destClient, err := dr.createSyncMasterClient(p.Spec.Destination) if err != nil { - log.Warn().Err(err).Msg("Failed to create destination client") + dr.log.Err(err).Warn("Failed to create destination client") return errors.WithStack(err) } req := client.CancelSynchronizationRequest{ @@ -150,13 +149,13 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co Force: abort, ForceTimeout: time.Minute * 2, } - log.Debug().Bool("abort", abort).Msg("Stopping synchronization...") + dr.log.Bool("abort", abort).Debug("Stopping synchronization...") _, err = destClient.Master().CancelSynchronization(ctx, req) if err != nil && !client.IsPreconditionFailed(err) { - log.Warn().Err(err).Bool("abort", abort).Msg("Failed to stop synchronization") + dr.log.Err(err).Bool("abort", abort).Warn("Failed to stop synchronization") dr.status.CancelFailures++ if err := dr.updateCRStatus(); err != nil { - log.Warn().Err(err).Msg("Failed to update status to reflect cancel-failures increment") + dr.log.Err(err).Warn("Failed to update status to reflect cancel-failures increment") } return errors.WithStack(err) } @@ -165,7 +164,7 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co } // removeDeploymentReplicationFinalizers removes the given finalizers from the given DeploymentReplication. -func removeDeploymentReplicationFinalizers(log zerolog.Logger, crcli versioned.Interface, p *api.ArangoDeploymentReplication, finalizers []string, ignoreNotFound bool) error { +func removeDeploymentReplicationFinalizers(crcli versioned.Interface, p *api.ArangoDeploymentReplication, finalizers []string, ignoreNotFound bool) error { repls := crcli.ReplicationV1().ArangoDeploymentReplications(p.GetNamespace()) getFunc := func() (metav1.Object, error) { result, err := repls.Get(context.Background(), p.GetName(), metav1.GetOptions{}) @@ -183,7 +182,7 @@ func removeDeploymentReplicationFinalizers(log zerolog.Logger, crcli versioned.I *p = *result return nil } - if _, err := k8sutil.RemoveFinalizers(log, finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { + if _, err := k8sutil.RemoveFinalizers(finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { return errors.WithStack(err) } return nil diff --git a/pkg/replication/sync_client.go b/pkg/replication/sync_client.go index f9e0ee4f5..2163d876c 100644 --- a/pkg/replication/sync_client.go +++ b/pkg/replication/sync_client.go @@ -35,12 +35,11 @@ import ( api "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1" "github.com/arangodb/kube-arangodb/pkg/apis/shared" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" + "github.com/rs/zerolog/log" ) // createSyncMasterClient creates an arangosync client for the given endpoint. func (dr *DeploymentReplication) createSyncMasterClient(epSpec api.EndpointSpec) (client.API, error) { - log := dr.deps.Log - // Endpoint source, err := dr.createArangoSyncEndpoint(epSpec) if err != nil { @@ -96,7 +95,8 @@ func (dr *DeploymentReplication) createSyncMasterClient(epSpec api.EndpointSpec) auth.Password = password // Create client - c, err := dr.clientCache.GetClient(log, source, auth, insecureSkipVerify) + // TODO: Change logger in clientset + c, err := dr.clientCache.GetClient(log.Logger, source, auth, insecureSkipVerify) if err != nil { return nil, errors.WithStack(err) } @@ -110,7 +110,7 @@ func (dr *DeploymentReplication) createArangoSyncEndpoint(epSpec api.EndpointSpe depls := dr.deps.Client.Arango().DatabaseV1().ArangoDeployments(dr.apiObject.GetNamespace()) depl, err := depls.Get(context.Background(), deploymentName, metav1.GetOptions{}) if err != nil { - dr.deps.Log.Debug().Err(err).Str("deployment", deploymentName).Msg("Failed to get deployment") + dr.log.Err(err).Str("deployment", deploymentName).Debug("Failed to get deployment") return nil, errors.WithStack(err) } dnsName := k8sutil.CreateSyncMasterClientServiceDNSNameWithDomain(depl, depl.Spec.ClusterDomain) @@ -169,7 +169,7 @@ func (dr *DeploymentReplication) getEndpointSecretNames(epSpec api.EndpointSpec) depls := dr.deps.Client.Arango().DatabaseV1().ArangoDeployments(dr.apiObject.GetNamespace()) depl, err := depls.Get(context.Background(), deploymentName, metav1.GetOptions{}) if err != nil { - dr.deps.Log.Debug().Err(err).Str("deployment", deploymentName).Msg("Failed to get deployment") + dr.log.Err(err).Str("deployment", deploymentName).Debug("Failed to get deployment") return "", "", "", "", errors.WithStack(err) } return clientAuthCertKeyfileSecretName, userSecretName, depl.Spec.Sync.Authentication.GetJWTSecretName(), depl.Spec.Sync.TLS.GetCASecretName(), nil diff --git a/pkg/replication/sync_inspector.go b/pkg/replication/sync_inspector.go index 331828a3f..21c600d22 100644 --- a/pkg/replication/sync_inspector.go +++ b/pkg/replication/sync_inspector.go @@ -39,8 +39,6 @@ import ( // - once in a while // Returns the delay until this function should be called again. func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time.Duration) time.Duration { - log := dr.deps.Log - spec := dr.apiObject.Spec nextInterval := lastInterval hasError := false @@ -48,12 +46,12 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. // Add finalizers if err := dr.addFinalizers(); err != nil { - log.Warn().Err(err).Msg("Failed to add finalizers") + dr.log.Err(err).Warn("Failed to add finalizers") } // Is the deployment in failed state, if so, give up. if dr.status.Phase == api.DeploymentReplicationPhaseFailed { - log.Debug().Msg("Deployment replication is in Failed state.") + dr.log.Debug("Deployment replication is in Failed state.") return nextInterval } @@ -61,14 +59,14 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. if dr.apiObject.GetDeletionTimestamp() != nil { // Deployment replication is triggered for deletion. if err := dr.runFinalizers(ctx, dr.apiObject); err != nil { - log.Warn().Err(err).Msg("Failed to run finalizers") + dr.log.Err(err).Warn("Failed to run finalizers") hasError = true } } else { // Inspect configuration status destClient, err := dr.createSyncMasterClient(spec.Destination) if err != nil { - log.Warn().Err(err).Msg("Failed to create destination syncmaster client") + dr.log.Err(err).Warn("Failed to create destination syncmaster client") } else { // Fetch status of destination updateStatusNeeded := false @@ -76,17 +74,17 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. cancelSyncNeeded := false destEndpoint, err := destClient.Master().GetEndpoints(ctx) if err != nil { - log.Warn().Err(err).Msg("Failed to fetch endpoints from destination syncmaster") + dr.log.Err(err).Warn("Failed to fetch endpoints from destination syncmaster") } destStatus, err := destClient.Master().Status(ctx) if err != nil { - log.Warn().Err(err).Msg("Failed to fetch status from destination syncmaster") + dr.log.Err(err).Warn("Failed to fetch status from destination syncmaster") } else { // Inspect destination status if destStatus.Status.IsActive() { isIncomingEndpoint, err := dr.isIncomingEndpoint(destStatus, spec.Source) if err != nil { - log.Warn().Err(err).Msg("Failed to check is-incoming-endpoint") + dr.log.Err(err).Warn("Failed to check is-incoming-endpoint") } else { if isIncomingEndpoint { // Destination is correctly configured @@ -96,7 +94,7 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. updateStatusNeeded = true } else { // Sync is active, but from different source - log.Warn().Msg("Destination syncmaster is configured for different source") + dr.log.Warn("Destination syncmaster is configured for different source") cancelSyncNeeded = true if dr.status.Conditions.Update(api.ConditionTypeConfigured, false, "Invalid", "Destination syncmaster is configured for different source") { updateStatusNeeded = true @@ -115,17 +113,17 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. // Inspect source sourceClient, err := dr.createSyncMasterClient(spec.Source) if err != nil { - log.Warn().Err(err).Msg("Failed to create source syncmaster client") + dr.log.Err(err).Warn("Failed to create source syncmaster client") } else { sourceStatus, err := sourceClient.Master().Status(ctx) if err != nil { - log.Warn().Err(err).Msg("Failed to fetch status from source syncmaster") + dr.log.Err(err).Warn("Failed to fetch status from source syncmaster") } //if sourceStatus.Status.IsActive() { outgoingID, hasOutgoingEndpoint, err := dr.hasOutgoingEndpoint(sourceStatus, spec.Destination, destEndpoint) if err != nil { - log.Warn().Err(err).Msg("Failed to check has-outgoing-endpoint") + dr.log.Err(err).Warn("Failed to check has-outgoing-endpoint") } else if hasOutgoingEndpoint { // Destination is know in source // Fetch shard status @@ -133,14 +131,14 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. updateStatusNeeded = true } else { // We cannot find the destination in the source status - log.Info().Err(err).Msg("Destination not yet known in source syncmasters") + dr.log.Err(err).Info("Destination not yet known in source syncmasters") } } // Update status if needed if updateStatusNeeded { if err := dr.updateCRStatus(); err != nil { - log.Warn().Err(err).Msg("Failed to update status") + dr.log.Err(err).Warn("Failed to update status") hasError = true } } @@ -148,12 +146,12 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. // Cancel sync if needed if cancelSyncNeeded { req := client.CancelSynchronizationRequest{} - log.Info().Msg("Canceling synchronization") + dr.log.Info("Canceling synchronization") if _, err := destClient.Master().CancelSynchronization(ctx, req); err != nil { - log.Warn().Err(err).Msg("Failed to cancel synchronization") + dr.log.Err(err).Warn("Failed to cancel synchronization") hasError = true } else { - log.Info().Msg("Canceled synchronization") + dr.log.Info("Canceled synchronization") nextInterval = time.Second * 10 } } @@ -162,24 +160,24 @@ func (dr *DeploymentReplication) inspectDeploymentReplication(lastInterval time. if configureSyncNeeded { source, err := dr.createArangoSyncEndpoint(spec.Source) if err != nil { - log.Warn().Err(err).Msg("Failed to create syncmaster endpoint") + dr.log.Err(err).Warn("Failed to create syncmaster endpoint") hasError = true } else { auth, err := dr.createArangoSyncTLSAuthentication(spec) if err != nil { - log.Warn().Err(err).Msg("Failed to configure synchronization authentication") + dr.log.Err(err).Warn("Failed to configure synchronization authentication") hasError = true } else { req := client.SynchronizationRequest{ Source: source, Authentication: auth, } - log.Info().Msg("Configuring synchronization") + dr.log.Info("Configuring synchronization") if err := destClient.Master().Synchronize(ctx, req); err != nil { - log.Warn().Err(err).Msg("Failed to configure synchronization") + dr.log.Err(err).Warn("Failed to configure synchronization") hasError = true } else { - log.Info().Msg("Configured synchronization") + dr.log.Info("Configured synchronization") nextInterval = time.Second * 10 } } diff --git a/pkg/server/auth.go b/pkg/server/auth.go index fea10be1f..677fa78f7 100644 --- a/pkg/server/auth.go +++ b/pkg/server/auth.go @@ -32,7 +32,7 @@ import ( "github.com/dchest/uniuri" "github.com/gin-gonic/gin" - "github.com/rs/zerolog" + "github.com/arangodb/kube-arangodb/pkg/logging" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" typedv1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -43,8 +43,9 @@ const ( bearerPrefix = "bearer " ) +var authLogger = logging.Global().RegisterAndGetLogger("server-authentication", logging.Info) + type serverAuthentication struct { - log zerolog.Logger secrets typedv1.SecretInterface admin struct { mutex sync.Mutex @@ -81,9 +82,8 @@ type loginResponse struct { // newServerAuthentication creates a new server authentication service // for the given arguments. -func newServerAuthentication(log zerolog.Logger, secrets typedv1.SecretInterface, adminSecretName string, allowAnonymous bool) *serverAuthentication { +func newServerAuthentication(secrets typedv1.SecretInterface, adminSecretName string, allowAnonymous bool) *serverAuthentication { auth := &serverAuthentication{ - log: log, secrets: secrets, adminSecretName: adminSecretName, allowAnonymous: allowAnonymous, @@ -131,7 +131,7 @@ func (s *serverAuthentication) checkLogin(username, password string) error { if expectedUsername == "" { var err error if expectedUsername, expectedPassword, err = s.fetchAdminSecret(); err != nil { - s.log.Error().Err(err).Msg("Failed to fetch secret") + authLogger.Err(err).Error("Failed to fetch secret") return errors.WithStack(errors.Wrap(UnauthorizedError, "admin secret cannot be loaded")) } } @@ -160,12 +160,12 @@ func (s *serverAuthentication) checkAuthentication(c *gin.Context) { s.tokens.mutex.Lock() defer s.tokens.mutex.Unlock() if entry, found := s.tokens.tokens[token]; !found { - s.log.Debug().Str("token", token).Msg("Invalid token") + authLogger.Str("token", token).Debug("Invalid token") sendError(c, errors.WithStack(errors.Wrap(UnauthorizedError, "invalid credentials"))) c.Abort() return } else if entry.IsExpired() { - s.log.Debug().Str("token", token).Msg("Token expired") + authLogger.Str("token", token).Debug("Token expired") sendError(c, errors.WithStack(errors.Wrap(UnauthorizedError, "credentials expired"))) c.Abort() return diff --git a/pkg/server/handlers.go b/pkg/server/handlers.go index 4f1fd134c..b8d8ebeed 100644 --- a/pkg/server/handlers.go +++ b/pkg/server/handlers.go @@ -23,9 +23,12 @@ package server import ( "net/http" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/gin-gonic/gin" ) +var serverLogger = logging.Global().RegisterAndGetLogger("server", logging.Info) + type operatorsResponse struct { PodName string `json:"pod"` Namespace string `json:"namespace"` @@ -60,6 +63,6 @@ func (s *Server) handleGetOperators(c *gin.Context) { Storage: s.deps.Storage.Probe.IsReady(), Other: s.deps.Operators.FindOtherOperators(), } - s.deps.Log.Info().Interface("result", result).Msg("handleGetOperators") + serverLogger.Interface("result", result).Info("handleGetOperators") c.JSON(http.StatusOK, result) } diff --git a/pkg/server/server.go b/pkg/server/server.go index 88ff71eb2..28f7e2769 100644 --- a/pkg/server/server.go +++ b/pkg/server/server.go @@ -36,7 +36,6 @@ import ( "github.com/gin-gonic/gin" "github.com/jessevdk/go-assets" prometheus "github.com/prometheus/client_golang/prometheus/promhttp" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" @@ -64,7 +63,6 @@ type OperatorDependency struct { // Dependencies of the Server type Dependencies struct { - Log zerolog.Logger LivenessProbe *probe.LivenessProbe Deployment OperatorDependency DeploymentReplication OperatorDependency @@ -151,7 +149,7 @@ func NewServer(cli corev1.CoreV1Interface, cfg Config, deps Dependencies) (*Serv cfg: cfg, deps: deps, httpServer: httpServer, - auth: newServerAuthentication(deps.Log, deps.Secrets, cfg.AdminSecretName, cfg.AllowAnonymous), + auth: newServerAuthentication(deps.Secrets, cfg.AdminSecretName, cfg.AllowAnonymous), } // Build router @@ -220,7 +218,7 @@ func createAssetFileHandler(file *assets.File) func(c *gin.Context) { // Run the server until the program stops. func (s *Server) Run() error { - s.deps.Log.Info().Msgf("Serving on %s", s.httpServer.Addr) + serverLogger.Info("Serving on %s", s.httpServer.Addr) if err := s.httpServer.ListenAndServeTLS("", ""); err != nil && err != http.ErrServerClosed { return errors.WithStack(err) } diff --git a/pkg/storage/daemon_set.go b/pkg/storage/daemon_set.go index 37e0a37b5..abf46a55d 100644 --- a/pkg/storage/daemon_set.go +++ b/pkg/storage/daemon_set.go @@ -46,7 +46,6 @@ const ( // ensureDaemonSet ensures that a daemonset is created for the given local storage. // If it already exists, it is updated. func (ls *LocalStorage) ensureDaemonSet(apiObject *api.ArangoLocalStorage) error { - log := ls.deps.Log ns := ls.config.Namespace c := core.Container{ Name: "provisioner", @@ -137,7 +136,7 @@ func (ls *LocalStorage) ensureDaemonSet(apiObject *api.ArangoLocalStorage) error } } else { // We're done - log.Debug().Msg("Created DaemonSet") + ls.log.Debug("Created DaemonSet") return nil } @@ -158,11 +157,11 @@ func (ls *LocalStorage) ensureDaemonSet(apiObject *api.ArangoLocalStorage) error // Failed to update, try again continue } else if err != nil { - ls.deps.Log.Debug().Err(err).Msg("failed to patch DaemonSet spec") + ls.log.Err(err).Debug("failed to patch DaemonSet spec") return errors.WithStack(errors.Newf("failed to patch DaemonSet spec: %v", err)) } else { // Update was a success - log.Debug().Msg("Updated DaemonSet") + ls.log.Debug("Updated DaemonSet") return nil } } diff --git a/pkg/storage/image.go b/pkg/storage/image.go index bef446055..4550e46c0 100644 --- a/pkg/storage/image.go +++ b/pkg/storage/image.go @@ -24,7 +24,6 @@ import ( "context" "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" @@ -32,13 +31,13 @@ import ( // getMyImage fetched the docker image from my own pod func (l *LocalStorage) getMyImage() (string, core.PullPolicy, []core.LocalObjectReference, error) { - return getImage(l.deps.Log, l.config.Namespace, l.config.PodName, l.deps.Client.Kubernetes()) + return l.getImage(l.config.Namespace, l.config.PodName, l.deps.Client.Kubernetes()) } -func getImage(log zerolog.Logger, ns, name string, client kubernetes.Interface) (string, core.PullPolicy, []core.LocalObjectReference, error) { +func (l *LocalStorage) getImage(ns, name string, client kubernetes.Interface) (string, core.PullPolicy, []core.LocalObjectReference, error) { p, err := client.CoreV1().Pods(ns).Get(context.Background(), name, meta.GetOptions{}) if err != nil { - log.Debug().Err(err).Str("pod-name", name).Msg("Failed to get my own pod") + l.log.Err(err).Str("pod-name", name).Debug("Failed to get my own pod") return "", "", nil, errors.WithStack(err) } diff --git a/pkg/storage/local_storage.go b/pkg/storage/local_storage.go index 877e0a17e..f565989c6 100644 --- a/pkg/storage/local_storage.go +++ b/pkg/storage/local_storage.go @@ -28,19 +28,21 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" - "github.com/rs/zerolog" - "github.com/rs/zerolog/log" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/tools/record" api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/arangodb/kube-arangodb/pkg/util/retry" "github.com/arangodb/kube-arangodb/pkg/util/trigger" + "github.com/rs/zerolog" ) +var logger = logging.Global().RegisterAndGetLogger("deployment-storage", logging.Info) + // Config holds configuration settings for a LocalStorage type Config struct { Namespace string @@ -50,7 +52,6 @@ type Config struct { // Dependencies holds dependent services for a LocalStorage type Dependencies struct { - Log zerolog.Logger Client kclient.Client EventRecorder record.EventRecorder } @@ -81,6 +82,8 @@ const ( // LocalStorage is the in process state of an ArangoLocalStorage. type LocalStorage struct { + log logging.Logger + apiObject *api.ArangoLocalStorage // API object status api.LocalStorageStatus // Internal status of the CR config Config @@ -112,7 +115,9 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoLocalStorage) (* stopCh: make(chan struct{}), } - ls.pvCleaner = newPVCleaner(deps.Log, deps.Client.Kubernetes(), ls.GetClientByNodeName) + ls.log = logger.WrapObj(ls) + + ls.pvCleaner = newPVCleaner(deps.Client.Kubernetes(), ls.GetClientByNodeName) go ls.run() go ls.listenForPvcEvents() @@ -134,7 +139,7 @@ func (ls *LocalStorage) Update(apiObject *api.ArangoLocalStorage) { // Delete the local storage. // Called when the local storage was deleted by the user. func (ls *LocalStorage) Delete() { - ls.deps.Log.Info().Msg("local storage is deleted by user") + ls.log.Info("local storage is deleted by user") if atomic.CompareAndSwapInt32(&ls.stopped, 0, 1) { close(ls.stopCh) } @@ -146,10 +151,10 @@ func (ls *LocalStorage) send(ev *localStorageEvent) { case ls.eventCh <- ev: l, ecap := len(ls.eventCh), cap(ls.eventCh) if l > int(float64(ecap)*0.8) { - ls.deps.Log.Warn(). + ls.log. Int("used", l). Int("capacity", ecap). - Msg("event queue buffer is almost full") + Warn("event queue buffer is almost full") } case <-ls.stopCh: } @@ -159,7 +164,7 @@ func (ls *LocalStorage) send(ev *localStorageEvent) { // It processes the event queue and polls the state of generated // resource on a regular basis. func (ls *LocalStorage) run() { - //log := ls.deps.Log + //log := ls.log // Find out my image image, pullPolicy, pullSecrets, err := ls.getMyImage() @@ -289,12 +294,12 @@ func (ls *LocalStorage) run() { // handleArangoLocalStorageUpdatedEvent is called when the local storage is updated by the user. func (ls *LocalStorage) handleArangoLocalStorageUpdatedEvent(event *localStorageEvent) error { - log := ls.deps.Log.With().Str("localStorage", event.LocalStorage.GetName()).Logger() + log := ls.log.Str("localStorage", event.LocalStorage.GetName()) // Get the most recent version of the local storage from the API server current, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), ls.apiObject.GetName(), metav1.GetOptions{}) if err != nil { - log.Debug().Err(err).Msg("Failed to get current version of local storage from API server") + log.Err(err).Debug("Failed to get current version of local storage from API server") if k8sutil.IsNotFound(err) { return nil } @@ -306,20 +311,20 @@ func (ls *LocalStorage) handleArangoLocalStorageUpdatedEvent(event *localStorage newAPIObject.Status = ls.status resetFields := ls.apiObject.Spec.ResetImmutableFields(&newAPIObject.Spec) if len(resetFields) > 0 { - log.Debug().Strs("fields", resetFields).Msg("Found modified immutable fields") + log.Strs("fields", resetFields...).Debug("Found modified immutable fields") } if err := newAPIObject.Spec.Validate(); err != nil { ls.createEvent(k8sutil.NewErrorEvent("Validation failed", err, ls.apiObject)) // Try to reset object if err := ls.updateCRSpec(ls.apiObject.Spec); err != nil { - log.Error().Err(err).Msg("Restore original spec failed") + log.Err(err).Error("Restore original spec failed") ls.createEvent(k8sutil.NewErrorEvent("Restore original failed", err, ls.apiObject)) } return nil } if len(resetFields) > 0 { for _, fieldName := range resetFields { - log.Debug().Str("field", fieldName).Msg("Reset immutable field") + log.Str("field", fieldName).Debug("Reset immutable field") ls.createEvent(k8sutil.NewImmutableFieldEvent(fieldName, ls.apiObject)) } } @@ -371,7 +376,7 @@ func (ls *LocalStorage) updateCRStatus() error { } } if err != nil { - ls.deps.Log.Debug().Err(err).Msg("failed to patch ArangoLocalStorage status") + ls.log.Err(err).Debug("failed to patch ArangoLocalStorage status") return errors.WithStack(errors.Newf("failed to patch ArangoLocalStorage status: %v", err)) } } @@ -405,7 +410,7 @@ func (ls *LocalStorage) updateCRSpec(newSpec api.LocalStorageSpec) error { } } if err != nil { - ls.deps.Log.Debug().Err(err).Msg("failed to patch ArangoLocalStorage spec") + ls.log.Err(err).Debug("failed to patch ArangoLocalStorage spec") return errors.WithStack(errors.Newf("failed to patch ArangoLocalStorage spec: %v", err)) } } @@ -413,7 +418,7 @@ func (ls *LocalStorage) updateCRSpec(newSpec api.LocalStorageSpec) error { // failOnError reports the given error and sets the local storage status to failed. func (ls *LocalStorage) failOnError(err error, msg string) { - log.Error().Err(err).Msg(msg) + ls.log.Err(err).Error(msg) ls.status.Reason = err.Error() ls.reportFailedStatus() } @@ -421,8 +426,8 @@ func (ls *LocalStorage) failOnError(err error, msg string) { // reportFailedStatus sets the status of the local storage to Failed and keeps trying to forward // that to the API server. func (ls *LocalStorage) reportFailedStatus() { - log := ls.deps.Log - log.Info().Msg("local storage failed. Reporting failed reason...") + log := ls.log + log.Info("local storage failed. Reporting failed reason...") op := func() error { ls.status.State = api.LocalStorageStateFailed @@ -433,7 +438,7 @@ func (ls *LocalStorage) reportFailedStatus() { } if !k8sutil.IsConflict(err) { - log.Warn().Err(err).Msg("retry report status: fail to update") + log.Err(err).Warn("retry report status: fail to update") return errors.WithStack(err) } @@ -445,7 +450,7 @@ func (ls *LocalStorage) reportFailedStatus() { if k8sutil.IsNotFound(err) { return nil } - log.Warn().Err(err).Msg("retry report status: fail to get latest version") + log.Err(err).Warn("retry report status: fail to get latest version") return errors.WithStack(err) } ls.apiObject = depl @@ -463,3 +468,7 @@ func (ls *LocalStorage) isOwnerOf(obj metav1.Object) bool { } return ownerRefs[0].UID == ls.apiObject.UID } + +func (ls *LocalStorage) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in.Str("namespace", ls.apiObject.GetNamespace()).Str("name", ls.apiObject.GetName()) +} diff --git a/pkg/storage/provisioner/service/provisioner.go b/pkg/storage/provisioner/service/provisioner.go index bc6afabba..486867e95 100644 --- a/pkg/storage/provisioner/service/provisioner.go +++ b/pkg/storage/provisioner/service/provisioner.go @@ -29,32 +29,37 @@ import ( "github.com/rs/zerolog" "golang.org/x/sys/unix" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/storage/provisioner" ) +var logger = logging.Global().RegisterAndGetLogger("deployment-storage-service", logging.Info) + // Config for the storage provisioner type Config struct { Address string // Server address to listen on NodeName string // Name of the run I'm running now } -// Dependencies for the storage provisioner -type Dependencies struct { - Log zerolog.Logger -} - // Provisioner implements a Local storage provisioner type Provisioner struct { + Log logging.Logger Config - Dependencies } // New creates a new local storage provisioner -func New(config Config, deps Dependencies) (*Provisioner, error) { - return &Provisioner{ - Config: config, - Dependencies: deps, - }, nil +func New(config Config) (*Provisioner, error) { + p := &Provisioner{ + Config: config, + } + + p.Log = logger.WrapObj(p) + + return p, nil +} + +func (p *Provisioner) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in } // Run the provisioner until the given context is canceled. @@ -72,12 +77,12 @@ func (p *Provisioner) GetNodeInfo(ctx context.Context) (provisioner.NodeInfo, er // GetInfo fetches information from the filesystem containing // the given local path. func (p *Provisioner) GetInfo(ctx context.Context, localPath string) (provisioner.Info, error) { - log := p.Log.With().Str("local-path", localPath).Logger() + log := p.Log.Str("local-path", localPath) - log.Debug().Msg("gettting info for local path") + log.Debug("gettting info for local path") statfs := &unix.Statfs_t{} if err := unix.Statfs(localPath, statfs); err != nil { - log.Error().Err(err).Msg("Statfs failed") + log.Err(err).Error("Statfs failed") return provisioner.Info{}, errors.WithStack(err) } @@ -87,11 +92,11 @@ func (p *Provisioner) GetInfo(ctx context.Context, localPath string) (provisione // Capacity is total block count * fragment size capacity := int64(statfs.Blocks) * statfs.Bsize // nolint:typecheck - log.Debug(). + log. Str("node-name", p.NodeName). Int64("capacity", capacity). Int64("available", available). - Msg("Returning info for local path") + Debug("Returning info for local path") return provisioner.Info{ NodeInfo: provisioner.NodeInfo{ NodeName: p.NodeName, @@ -103,22 +108,22 @@ func (p *Provisioner) GetInfo(ctx context.Context, localPath string) (provisione // Prepare a volume at the given local path func (p *Provisioner) Prepare(ctx context.Context, localPath string) error { - log := p.Log.With().Str("local-path", localPath).Logger() - log.Debug().Msg("preparing local path") + log := p.Log.Str("local-path", localPath) + log.Debug("preparing local path") // Make sure directory is empty if err := os.RemoveAll(localPath); err != nil && !os.IsNotExist(err) { - log.Error().Err(err).Msg("Failed to clean existing directory") + log.Err(err).Error("Failed to clean existing directory") return errors.WithStack(err) } // Make sure directory exists if err := os.MkdirAll(localPath, 0755); err != nil { - log.Error().Err(err).Msg("Failed to make directory") + log.Err(err).Error("Failed to make directory") return errors.WithStack(err) } // Set access rights if err := os.Chmod(localPath, 0777); err != nil { - log.Error().Err(err).Msg("Failed to set directory access") + log.Err(err).Error("Failed to set directory access") return errors.WithStack(err) } return nil @@ -126,12 +131,12 @@ func (p *Provisioner) Prepare(ctx context.Context, localPath string) error { // Remove a volume with the given local path func (p *Provisioner) Remove(ctx context.Context, localPath string) error { - log := p.Log.With().Str("local-path", localPath).Logger() - log.Debug().Msg("cleanup local path") + log := p.Log.Str("local-path", localPath) + log.Debug("cleanup local path") // Make sure directory is empty if err := os.RemoveAll(localPath); err != nil && !os.IsNotExist(err) { - log.Error().Err(err).Msg("Failed to clean directory") + log.Err(err).Error("Failed to clean directory") return errors.WithStack(err) } return nil diff --git a/pkg/storage/provisioner/service/server.go b/pkg/storage/provisioner/service/server.go index 1cca88739..2a2b348b9 100644 --- a/pkg/storage/provisioner/service/server.go +++ b/pkg/storage/provisioner/service/server.go @@ -30,8 +30,7 @@ import ( "github.com/julienschmidt/httprouter" - "github.com/rs/zerolog" - + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/storage/provisioner" ) @@ -40,7 +39,7 @@ const ( ) // runServer runs a HTTP server serving the given API -func runServer(ctx context.Context, log zerolog.Logger, addr string, api provisioner.API) error { +func runServer(ctx context.Context, log logging.Logger, addr string, api provisioner.API) error { mux := httprouter.New() mux.GET("/nodeinfo", getNodeInfoHandler(api)) mux.POST("/info", getInfoHandler(api)) @@ -55,7 +54,7 @@ func runServer(ctx context.Context, log zerolog.Logger, addr string, api provisi serverErrors := make(chan error) go func() { defer close(serverErrors) - log.Info().Msgf("Listening on %s", addr) + log.Info("Listening on %s", addr) if err := httpServer.ListenAndServe(); err != nil && err != http.ErrServerClosed { serverErrors <- errors.WithStack(err) } @@ -66,7 +65,7 @@ func runServer(ctx context.Context, log zerolog.Logger, addr string, api provisi return errors.WithStack(err) case <-ctx.Done(): // Close server - log.Debug().Msg("Closing server...") + log.Debug("Closing server...") httpServer.Close() return nil } diff --git a/pkg/storage/pv_cleanup.go b/pkg/storage/pv_cleanup.go index 1dcea4909..2e3fb7d5f 100644 --- a/pkg/storage/pv_cleanup.go +++ b/pkg/storage/pv_cleanup.go @@ -32,14 +32,17 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/storage/provisioner" "github.com/arangodb/kube-arangodb/pkg/util/k8sutil" "github.com/arangodb/kube-arangodb/pkg/util/trigger" ) +var pcLogger = logging.Global().RegisterAndGetLogger("deployment-storage-pc", logging.Info) + type pvCleaner struct { mutex sync.Mutex - log zerolog.Logger + log logging.Logger cli kubernetes.Interface items []v1.PersistentVolume trigger trigger.Trigger @@ -47,12 +50,15 @@ type pvCleaner struct { } // newPVCleaner creates a new cleaner of persistent volumes. -func newPVCleaner(log zerolog.Logger, cli kubernetes.Interface, clientGetter func(nodeName string) (provisioner.API, error)) *pvCleaner { - return &pvCleaner{ - log: log, +func newPVCleaner(cli kubernetes.Interface, clientGetter func(nodeName string) (provisioner.API, error)) *pvCleaner { + c := &pvCleaner{ cli: cli, clientGetter: clientGetter, } + + c.log = pcLogger.WrapObj(c) + + return c } // Run continues cleaning PV's until the given channel is closed. @@ -61,7 +67,7 @@ func (c *pvCleaner) Run(stopCh <-chan struct{}) { delay := time.Hour hasMore, err := c.cleanFirst() if err != nil { - c.log.Error().Err(err).Msg("Failed to clean PersistentVolume") + c.log.Err(err).Error("Failed to clean PersistentVolume") } if hasMore { delay = time.Millisecond * 5 @@ -127,8 +133,8 @@ func (c *pvCleaner) cleanFirst() (bool, error) { // clean tries to clean the given PV. func (c *pvCleaner) clean(pv v1.PersistentVolume) error { - log := c.log.With().Str("name", pv.GetName()).Logger() - log.Debug().Msg("Cleaning PersistentVolume") + log := c.log.Str("name", pv.GetName()) + log.Debug("Cleaning PersistentVolume") // Find local path localSource := pv.Spec.PersistentVolumeSource.Local @@ -144,27 +150,31 @@ func (c *pvCleaner) clean(pv v1.PersistentVolume) error { } client, err := c.clientGetter(nodeName) if err != nil { - log.Debug().Err(err).Str("node", nodeName).Msg("Failed to get client for node") + log.Err(err).Str("node", nodeName).Debug("Failed to get client for node") return errors.WithStack(err) } // Clean volume through client ctx := context.Background() if err := client.Remove(ctx, localPath); err != nil { - log.Debug().Err(err). + log.Err(err). Str("node", nodeName). Str("local-path", localPath). - Msg("Failed to remove local path") + Debug("Failed to remove local path") return errors.WithStack(err) } // Remove persistent volume if err := c.cli.CoreV1().PersistentVolumes().Delete(context.Background(), pv.GetName(), metav1.DeleteOptions{}); err != nil && !k8sutil.IsNotFound(err) { - log.Debug().Err(err). + log.Err(err). Str("name", pv.GetName()). - Msg("Failed to remove PersistentVolume") + Debug("Failed to remove PersistentVolume") return errors.WithStack(err) } return nil } + +func (c *pvCleaner) WrapLogger(in *zerolog.Event) *zerolog.Event { + return in +} diff --git a/pkg/storage/pv_creator.go b/pkg/storage/pv_creator.go index 238c7e2c6..f80251e5d 100644 --- a/pkg/storage/pv_creator.go +++ b/pkg/storage/pv_creator.go @@ -60,7 +60,6 @@ var ( // createPVs creates a given number of PersistentVolume's. func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLocalStorage, unboundClaims []v1.PersistentVolumeClaim) error { - log := ls.deps.Log // Find provisioner clients clients, err := ls.createProvisionerClients() if err != nil { @@ -88,7 +87,7 @@ func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLoca var err error allowedClients, err = ls.filterAllowedNodes(nodeClientMap, deplName, role) if err != nil { - log.Warn().Err(err).Msg("Failed to filter allowed nodes") + ls.log.Err(err).Warn("Failed to filter allowed nodes") continue // We'll try this claim again later } if !enforceAniAffinity && len(allowedClients) == 0 { @@ -107,7 +106,7 @@ func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLoca } // Create PV if err := ls.createPV(ctx, apiObject, allowedClients, i, volSize, claim, deplName, role); err != nil { - log.Error().Err(err).Msg("Failed to create PersistentVolume") + ls.log.Err(err).Error("Failed to create PersistentVolume") } } @@ -116,29 +115,28 @@ func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLoca // createPV creates a PersistentVolume. func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocalStorage, clients []provisioner.API, clientsOffset int, volSize int64, claim v1.PersistentVolumeClaim, deploymentName, role string) error { - log := ls.deps.Log // Try clients for clientIdx := 0; clientIdx < len(clients); clientIdx++ { client := clients[(clientsOffset+clientIdx)%len(clients)] // Try local path within client for _, localPathRoot := range apiObject.Spec.LocalPath { - log := log.With().Str("local-path-root", localPathRoot).Logger() + log := ls.log.Str("local-path-root", localPathRoot) info, err := client.GetInfo(ctx, localPathRoot) if err != nil { - log.Error().Err(err).Msg("Failed to get client info") + log.Err(err).Error("Failed to get client info") continue } if info.Available < volSize { - log.Debug().Msg("Not enough available size") + ls.log.Debug("Not enough available size") continue } // Ok, prepare a directory name := strings.ToLower(uniuri.New()) localPath := filepath.Join(localPathRoot, name) - log = log.With().Str("local-path", localPath).Logger() + log = ls.log.Str("local-path", localPath) if err := client.Prepare(ctx, localPath); err != nil { - log.Error().Err(err).Msg("Failed to prepare local path") + log.Err(err).Error("Failed to prepare local path") continue } // Create a volume @@ -187,19 +185,19 @@ func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocal // Attach PV to ArangoLocalStorage pv.SetOwnerReferences(append(pv.GetOwnerReferences(), apiObject.AsOwner())) if _, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().Create(context.Background(), pv, metav1.CreateOptions{}); err != nil { - log.Error().Err(err).Msg("Failed to create PersistentVolume") + log.Err(err).Error("Failed to create PersistentVolume") continue } - log.Debug(). + log. Str("name", pvName). Str("node-name", info.NodeName). - Msg("Created PersistentVolume") + Debug("Created PersistentVolume") // Bind claim to volume if err := ls.bindClaimToVolume(claim, pv.GetName()); err != nil { // Try to delete the PV now if err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().Delete(context.Background(), pv.GetName(), metav1.DeleteOptions{}); err != nil { - log.Error().Err(err).Msg("Failed to delete PV after binding PVC failed") + log.Err(err).Error("Failed to delete PV after binding PVC failed") } return errors.WithStack(err) } @@ -293,7 +291,7 @@ func (ls *LocalStorage) filterAllowedNodes(clients map[string]provisioner.API, d // bindClaimToVolume tries to bind the given claim to the volume with given name. // If the claim has been updated, the function retries several times. func (ls *LocalStorage) bindClaimToVolume(claim v1.PersistentVolumeClaim, volumeName string) error { - log := ls.deps.Log.With().Str("pvc-name", claim.GetName()).Str("volume-name", volumeName).Logger() + log := ls.log.Str("pvc-name", claim.GetName()).Str("volume-name", volumeName) pvcs := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumeClaims(claim.GetNamespace()) for attempt := 0; attempt < 10; attempt++ { @@ -305,14 +303,14 @@ func (ls *LocalStorage) bindClaimToVolume(claim v1.PersistentVolumeClaim, volume if k8sutil.IsNotFound(err) { return errors.WithStack(err) } else if err != nil { - log.Warn().Err(err).Msg("Failed to load updated PersistentVolumeClaim") + log.Err(err).Warn("Failed to load updated PersistentVolumeClaim") continue } // Check claim. If already bound, bail out if !pvcNeedsVolume(*updated) { if updated.Spec.VolumeName == volumeName { - log.Info().Msg("PersistentVolumeClaim already bound to PersistentVolume") + log.Info("PersistentVolumeClaim already bound to PersistentVolume") return nil } return errors.WithStack(errors.Newf("PersistentVolumeClaim '%s' no longer needs a volume", claim.GetName())) @@ -322,15 +320,15 @@ func (ls *LocalStorage) bindClaimToVolume(claim v1.PersistentVolumeClaim, volume updated.Spec.VolumeName = volumeName if _, err := pvcs.Update(context.Background(), updated, metav1.UpdateOptions{}); k8sutil.IsConflict(err) { // Claim modified already, retry - log.Debug().Err(err).Msg("PersistentVolumeClaim has been modified. Retrying.") + log.Err(err).Debug("PersistentVolumeClaim has been modified. Retrying.") } else if err != nil { - log.Error().Err(err).Msg("Failed to bind PVC to volume") + log.Err(err).Error("Failed to bind PVC to volume") return errors.WithStack(err) } - log.Debug().Msg("Bound volume to PersistentVolumeClaim") + ls.log.Debug("Bound volume to PersistentVolumeClaim") return nil } - log.Error().Msg("All attempts to bind PVC to volume failed") + log.Error("All attempts to bind PVC to volume failed") return errors.WithStack(errors.Newf("All attempts to bind PVC to volume failed")) } diff --git a/pkg/storage/pv_informer.go b/pkg/storage/pv_informer.go index df3ee6d78..0713ed144 100644 --- a/pkg/storage/pv_informer.go +++ b/pkg/storage/pv_informer.go @@ -43,7 +43,6 @@ func (ls *LocalStorage) listenForPvEvents() { } rw := k8sutil.NewResourceWatcher( - ls.deps.Log, ls.deps.Client.Kubernetes().CoreV1().RESTClient(), "persistentvolumes", "", //ls.apiObject.GetNamespace(), diff --git a/pkg/storage/pv_inspector.go b/pkg/storage/pv_inspector.go index f4018d66b..3ecf5ceff 100644 --- a/pkg/storage/pv_inspector.go +++ b/pkg/storage/pv_inspector.go @@ -34,7 +34,6 @@ import ( // released volumes. // Returns the number of available PV's. func (ls *LocalStorage) inspectPVs() (int, error) { - log := ls.deps.Log list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), metav1.ListOptions{}) if err != nil { return 0, errors.WithStack(err) @@ -54,10 +53,10 @@ func (ls *LocalStorage) inspectPVs() (int, error) { // Let's clean it up if ls.isOwnerOf(&pv) { // Cleanup this volume - log.Debug().Str("name", pv.GetName()).Msg("Added PersistentVolume to cleaner") + ls.log.Str("name", pv.GetName()).Debug("Added PersistentVolume to cleaner") ls.pvCleaner.Add(pv) } else { - log.Debug().Str("name", pv.GetName()).Msg("PersistentVolume is not owned by us") + ls.log.Str("name", pv.GetName()).Debug("PersistentVolume is not owned by us") availableVolumes++ } } else { @@ -66,10 +65,10 @@ func (ls *LocalStorage) inspectPVs() (int, error) { case v1.VolumeReleased: if ls.isOwnerOf(&pv) { // Cleanup this volume - log.Debug().Str("name", pv.GetName()).Msg("Added PersistentVolume to cleaner") + ls.log.Str("name", pv.GetName()).Debug("Added PersistentVolume to cleaner") ls.pvCleaner.Add(pv) } else { - log.Debug().Str("name", pv.GetName()).Msg("PersistentVolume is not owned by us") + ls.log.Str("name", pv.GetName()).Debug("PersistentVolume is not owned by us") } } } diff --git a/pkg/storage/pvc_informer.go b/pkg/storage/pvc_informer.go index 1daedcabf..69c1ca94f 100644 --- a/pkg/storage/pvc_informer.go +++ b/pkg/storage/pvc_informer.go @@ -43,7 +43,6 @@ func (ls *LocalStorage) listenForPvcEvents() { } rw := k8sutil.NewResourceWatcher( - ls.deps.Log, ls.deps.Client.Kubernetes().CoreV1().RESTClient(), "persistentvolumeclaims", "", //ls.apiObject.GetNamespace(), diff --git a/pkg/storage/server_api.go b/pkg/storage/server_api.go index 6a7cb5396..e09ccdaf3 100644 --- a/pkg/storage/server_api.go +++ b/pkg/storage/server_api.go @@ -65,7 +65,7 @@ func (ls *LocalStorage) StorageClassIsDefault() bool { func (ls *LocalStorage) Volumes() []server.Volume { list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), metav1.ListOptions{}) if err != nil { - ls.deps.Log.Error().Err(err).Msg("Failed to list persistent volumes") + ls.log.Err(err).Error("Failed to list persistent volumes") return nil } result := make([]server.Volume, 0, len(list.Items)) diff --git a/pkg/storage/storage_class.go b/pkg/storage/storage_class.go index 4fe48b488..717c466cc 100644 --- a/pkg/storage/storage_class.go +++ b/pkg/storage/storage_class.go @@ -39,7 +39,6 @@ var ( // ensureStorageClass creates a storage class for the given local storage. // If such a class already exists, the create is ignored. func (l *LocalStorage) ensureStorageClass(apiObject *api.ArangoLocalStorage) error { - log := l.deps.Log spec := apiObject.Spec.StorageClass bindingMode := v1.VolumeBindingWaitForFirstConsumer reclaimPolicy := corev1.PersistentVolumeReclaimRetain @@ -55,25 +54,25 @@ func (l *LocalStorage) ensureStorageClass(apiObject *api.ArangoLocalStorage) err // ArangoLocalStorage resource may use the same StorageClass. cli := l.deps.Client.Kubernetes().StorageV1() if _, err := cli.StorageClasses().Create(context.Background(), sc, metav1.CreateOptions{}); k8sutil.IsAlreadyExists(err) { - log.Debug(). + l.log. Str("storageclass", sc.GetName()). - Msg("StorageClass already exists") + Debug("StorageClass already exists") } else if err != nil { - log.Debug().Err(err). + l.log.Err(err). Str("storageclass", sc.GetName()). - Msg("Failed to create StorageClass") + Debug("Failed to create StorageClass") return errors.WithStack(err) } else { - log.Debug(). + l.log. Str("storageclass", sc.GetName()). - Msg("StorageClass created") + Debug("StorageClass created") } if apiObject.Spec.StorageClass.IsDefault { // UnMark current default (if any) list, err := cli.StorageClasses().List(context.Background(), metav1.ListOptions{}) if err != nil { - log.Debug().Err(err).Msg("Listing StorageClasses failed") + l.log.Err(err).Debug("Listing StorageClasses failed") return errors.WithStack(err) } for _, scX := range list.Items { @@ -82,28 +81,28 @@ func (l *LocalStorage) ensureStorageClass(apiObject *api.ArangoLocalStorage) err } // Mark storage class as non-default if err := k8sutil.PatchStorageClassIsDefault(cli, scX.GetName(), false); err != nil { - log.Debug(). + l.log. Err(err). Str("storageclass", scX.GetName()). - Msg("Failed to mark StorageClass as not-default") + Debug("Failed to mark StorageClass as not-default") return errors.WithStack(err) } - log.Debug(). + l.log. Str("storageclass", scX.GetName()). - Msg("Marked StorageClass as not-default") + Debug("Marked StorageClass as not-default") } // Mark StorageClass default if err := k8sutil.PatchStorageClassIsDefault(cli, sc.GetName(), true); err != nil { - log.Debug(). + l.log. Err(err). Str("storageclass", sc.GetName()). - Msg("Failed to mark StorageClass as default") + Debug("Failed to mark StorageClass as default") return errors.WithStack(err) } - log.Debug(). + l.log. Str("storageclass", sc.GetName()). - Msg("Marked StorageClass as default") + Debug("Marked StorageClass as default") } return nil diff --git a/pkg/storage/utils_test.go b/pkg/storage/utils_test.go index 175505b9b..7bc357e8b 100644 --- a/pkg/storage/utils_test.go +++ b/pkg/storage/utils_test.go @@ -27,9 +27,9 @@ import ( "testing" api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha" + "github.com/arangodb/kube-arangodb/pkg/logging" "github.com/arangodb/kube-arangodb/pkg/util/kclient" "github.com/dchest/uniuri" - "github.com/rs/zerolog/log" "github.com/stretchr/testify/require" apps "k8s.io/api/apps/v1" core "k8s.io/api/core/v1" @@ -51,14 +51,10 @@ func generateDaemonSet(t *testing.T, podSpec core.PodSpec, lsSpec api.LocalStora Spec: podSpec, } - if _, err := client.Kubernetes().CoreV1().Pods(ns).Create(context.Background(), &pod, meta.CreateOptions{}); err != nil { - require.NoError(t, err) - } - - image, pullPolicy, pullSecrets, err := getImage(log.Logger, ns, name, client.Kubernetes()) - require.NoError(t, err) + lg := logging.NewDefaultFactory() ls := &LocalStorage{ + log: lg.RegisterAndGetLogger("test", logging.Info), apiObject: &api.ArangoLocalStorage{ ObjectMeta: meta.ObjectMeta{ Name: nameLS, @@ -73,11 +69,19 @@ func generateDaemonSet(t *testing.T, podSpec core.PodSpec, lsSpec api.LocalStora Namespace: ns, PodName: name, }, - image: image, - imagePullSecrets: pullSecrets, - imagePullPolicy: pullPolicy, } + if _, err := client.Kubernetes().CoreV1().Pods(ns).Create(context.Background(), &pod, meta.CreateOptions{}); err != nil { + require.NoError(t, err) + } + + image, pullPolicy, pullSecrets, err := ls.getImage(ns, name, client.Kubernetes()) + require.NoError(t, err) + + ls.image = image + ls.imagePullPolicy = pullPolicy + ls.imagePullSecrets = pullSecrets + err = ls.ensureDaemonSet(ls.apiObject) require.NoError(t, err) diff --git a/pkg/util/errors/errors.go b/pkg/util/errors/errors.go index b0902dd22..387bb74a2 100644 --- a/pkg/util/errors/errors.go +++ b/pkg/util/errors/errors.go @@ -31,9 +31,8 @@ import ( errs "github.com/pkg/errors" - "github.com/rs/zerolog" - driver "github.com/arangodb/go-driver" + "github.com/arangodb/kube-arangodb/pkg/logging" ) var ( @@ -42,6 +41,7 @@ var ( WithStack = errs.WithStack Wrap = errs.Wrap Wrapf = errs.Wrapf + WithMessage = errs.WithMessage WithMessagef = errs.WithMessagef ) @@ -49,41 +49,6 @@ func Newf(format string, args ...interface{}) error { return New(fmt.Sprintf(format, args...)) } -// WithMessage annotates err with a new message. -// The messages of given error is hidden. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg } -func (w *withMessage) Cause() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - type timeout interface { Timeout() bool } @@ -208,13 +173,13 @@ func libCause(err error) (bool, error) { } } -func LogError(logger zerolog.Logger, msg string, f func() error) { +func LogError(logger logging.Logger, msg string, f func() error) { if err := f(); err != nil { - logger.Error().Err(err).Msg(msg) + logger.Err(err).Error(msg) } } -type causer interface { +type Causer interface { Cause() error } @@ -227,7 +192,7 @@ func IsReconcile(err error) bool { return true } - if c, ok := err.(causer); ok { + if c, ok := err.(Causer); ok { return IsReconcile(c.Cause()) } diff --git a/pkg/util/k8sutil/finalizers.go b/pkg/util/k8sutil/finalizers.go index ce89f8ccb..a93d85e20 100644 --- a/pkg/util/k8sutil/finalizers.go +++ b/pkg/util/k8sutil/finalizers.go @@ -31,7 +31,6 @@ import ( "github.com/arangodb/kube-arangodb/pkg/util/errors" persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1" podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1" - "github.com/rs/zerolog" core "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) @@ -41,7 +40,7 @@ const ( ) // RemovePodFinalizers removes the given finalizers from the given pod. -func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log zerolog.Logger, c podv1.ModInterface, p *core.Pod, +func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, c podv1.ModInterface, p *core.Pod, finalizers []string, ignoreNotFound bool) (int, error) { getFunc := func() (metav1.Object, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) @@ -65,7 +64,7 @@ func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log ze *p = *result return nil } - if count, err := RemoveFinalizers(log, finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { + if count, err := RemoveFinalizers(finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { return 0, errors.WithStack(err) } else { return count, nil @@ -73,7 +72,7 @@ func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log ze } // RemovePVCFinalizers removes the given finalizers from the given PVC. -func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim.Inspector, log zerolog.Logger, c persistentvolumeclaimv1.ModInterface, +func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim.Inspector, c persistentvolumeclaimv1.ModInterface, p *core.PersistentVolumeClaim, finalizers []string, ignoreNotFound bool) (int, error) { getFunc := func() (metav1.Object, error) { ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx) @@ -97,7 +96,7 @@ func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim *p = *result return nil } - if count, err := RemoveFinalizers(log, finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { + if count, err := RemoveFinalizers(finalizers, getFunc, updateFunc, ignoreNotFound); err != nil { return 0, errors.WithStack(err) } else { return count, nil @@ -108,7 +107,7 @@ func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim // The functions tries to get the object using the provided get function, // then remove the given finalizers and update the update using the given update function. // In case of an update conflict, the functions tries again. -func RemoveFinalizers(log zerolog.Logger, finalizers []string, getFunc func() (metav1.Object, error), updateFunc func(metav1.Object) error, ignoreNotFound bool) (int, error) { +func RemoveFinalizers(finalizers []string, getFunc func() (metav1.Object, error), updateFunc func(metav1.Object) error, ignoreNotFound bool) (int, error) { attempts := 0 for { attempts++ @@ -118,7 +117,6 @@ func RemoveFinalizers(log zerolog.Logger, finalizers []string, getFunc func() (m // Object no longer found and we're allowed to ignore that. return 0, nil } - log.Warn().Err(err).Msg("Failed to get resource") return 0, errors.WithStack(err) } original := obj.GetFinalizers() @@ -144,7 +142,6 @@ func RemoveFinalizers(log zerolog.Logger, finalizers []string, getFunc func() (m obj.SetFinalizers(newList) if err := updateFunc(obj); IsConflict(err) { if attempts > maxRemoveFinalizersAttempts { - log.Warn().Err(err).Msg("Failed to update resource with fewer finalizers after many attempts") return 0, errors.WithStack(err) } else { // Try again @@ -154,12 +151,10 @@ func RemoveFinalizers(log zerolog.Logger, finalizers []string, getFunc func() (m // Object no longer found and we're allowed to ignore that. return 0, nil } else if err != nil { - log.Warn().Err(err).Msg("Failed to update resource with fewer finalizers") return 0, errors.WithStack(err) } return z, nil } else { - log.Debug().Msg("No finalizers needed removal. Resource unchanged") return 0, nil } } diff --git a/pkg/util/k8sutil/informer.go b/pkg/util/k8sutil/informer.go index e0fa9b7d9..9b67a151e 100644 --- a/pkg/util/k8sutil/informer.go +++ b/pkg/util/k8sutil/informer.go @@ -21,12 +21,16 @@ package k8sutil import ( - "github.com/rs/zerolog" + "github.com/arangodb/kube-arangodb/pkg/logging" "k8s.io/apimachinery/pkg/fields" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/cache" ) +var ( + informerLogger = logging.Global().Get("kubernetes-informer") +) + // ResourceWatcher is a helper to watch for events in a specific type // of resource. The handler functions are protected from panics. type ResourceWatcher struct { @@ -35,7 +39,7 @@ type ResourceWatcher struct { // NewResourceWatcher creates a helper that watches for changes in a resource of a specific type. // If wraps the given handler functions, such that panics are caught and logged. -func NewResourceWatcher(log zerolog.Logger, getter cache.Getter, resource, namespace string, +func NewResourceWatcher(getter cache.Getter, resource, namespace string, objType runtime.Object, h cache.ResourceEventHandlerFuncs) *ResourceWatcher { source := cache.NewListWatchFromClient( getter, @@ -47,7 +51,7 @@ func NewResourceWatcher(log zerolog.Logger, getter cache.Getter, resource, names AddFunc: func(obj interface{}) { defer func() { if err := recover(); err != nil { - log.Error().Interface("error", err).Msg("Recovered from panic") + informerLogger.Interface("error", err).Error("Recovered from panic") } }() if h.AddFunc != nil { @@ -57,7 +61,7 @@ func NewResourceWatcher(log zerolog.Logger, getter cache.Getter, resource, names UpdateFunc: func(oldObj, newObj interface{}) { defer func() { if err := recover(); err != nil { - log.Error().Interface("error", err).Msg("Recovered from panic") + informerLogger.Interface("error", err).Error("Recovered from panic") } }() if h.UpdateFunc != nil { @@ -67,7 +71,7 @@ func NewResourceWatcher(log zerolog.Logger, getter cache.Getter, resource, names DeleteFunc: func(obj interface{}) { defer func() { if err := recover(); err != nil { - log.Error().Interface("error", err).Msg("Recovered from panic") + informerLogger.Interface("error", err).Error("Recovered from panic") } }() if h.DeleteFunc != nil { diff --git a/pkg/util/retry/retry.go b/pkg/util/retry/retry.go index 7bbf4fbc6..73fd4c642 100644 --- a/pkg/util/retry/retry.go +++ b/pkg/util/retry/retry.go @@ -48,15 +48,11 @@ func Permanent(err error) error { } func isPermanent(err error) (*permanentError, bool) { - type causer interface { - Cause() error - } - for err != nil { if pe, ok := err.(*permanentError); ok { return pe, true } - cause, ok := err.(causer) + cause, ok := err.(errors.Causer) if !ok { break }