mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] Extract pod details (#1080)
This commit is contained in:
parent
46260630dc
commit
d95a1b1f1a
45 changed files with 481 additions and 636 deletions
|
@ -14,6 +14,7 @@
|
|||
- (Feature) (DBServer Maintenance) Agency adjustments
|
||||
- (Logging) Internal client trace
|
||||
- (QA) Member maintenance feature
|
||||
- (Feature) Extract Pod Details
|
||||
|
||||
## [1.2.15](https://github.com/arangodb/kube-arangodb/tree/1.2.15) (2022-07-20)
|
||||
- (Bugfix) Ensure pod names not too long
|
||||
|
|
|
@ -89,6 +89,7 @@ func init() {
|
|||
cmdLifecycle.AddCommand(cmdLifecycleCopy)
|
||||
cmdLifecycle.AddCommand(cmdLifecycleProbe)
|
||||
cmdLifecycle.AddCommand(cmdLifecycleWait)
|
||||
cmdLifecycle.AddCommand(cmdLifecycleStartup)
|
||||
|
||||
cmdLifecycleCopy.Flags().StringVar(&lifecycleCopyOptions.TargetDir, "target", "", "Target directory to copy the executable to")
|
||||
}
|
||||
|
|
75
cmd/lifecycle_startup.go
Normal file
75
cmd/lifecycle_startup.go
Normal file
|
@ -0,0 +1,75 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
)
|
||||
|
||||
var cmdLifecycleStartup = &cobra.Command{
|
||||
Use: "startup",
|
||||
RunE: cmdLifecycleStartupFunc,
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
func cmdLifecycleStartupFunc(cmd *cobra.Command, args []string) error {
|
||||
var close bool
|
||||
|
||||
server := &http.Server{
|
||||
Addr: fmt.Sprintf(":%d", shared.ArangoPort),
|
||||
}
|
||||
|
||||
handlers := http.NewServeMux()
|
||||
|
||||
handlers.HandleFunc("/stop", func(writer http.ResponseWriter, request *http.Request) {
|
||||
writer.WriteHeader(http.StatusOK)
|
||||
close = true
|
||||
})
|
||||
|
||||
server.Handler = handlers
|
||||
|
||||
go func() {
|
||||
for {
|
||||
if close {
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond)
|
||||
}
|
||||
server.Close()
|
||||
}()
|
||||
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
if errors.Is(err, http.ErrServerClosed) {
|
||||
return nil
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
486
cmd/reboot.go
486
cmd/reboot.go
|
@ -1,486 +0,0 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"path"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
deplv1 "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
acli "github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||
)
|
||||
|
||||
var (
|
||||
cmdReboot = &cobra.Command{
|
||||
Use: "reboot",
|
||||
Run: cmdRebootRun,
|
||||
Hidden: false,
|
||||
}
|
||||
|
||||
rebootOptions struct {
|
||||
DeploymentName string
|
||||
ImageName string
|
||||
LicenseSecretName string
|
||||
Coordinators int
|
||||
}
|
||||
|
||||
cmdRebootInspect = &cobra.Command{
|
||||
Use: "inspect",
|
||||
Run: cmdRebootInspectRun,
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
rebootInspectOptions struct {
|
||||
TargetDir string
|
||||
}
|
||||
)
|
||||
|
||||
func init() {
|
||||
cmdMain.AddCommand(cmdReboot)
|
||||
cmdReboot.AddCommand(cmdRebootInspect)
|
||||
cmdReboot.Flags().StringVar(&rebootOptions.DeploymentName, "deployment-name", "rebooted-deployment", "Name of the deployment")
|
||||
cmdReboot.Flags().StringVar(&rebootOptions.ImageName, "image-name", "arangodb/arangodb:latest", "Image used for the deployment")
|
||||
cmdReboot.Flags().StringVar(&rebootOptions.LicenseSecretName, "license-secret-name", "", "Name of secret for license key")
|
||||
cmdReboot.Flags().IntVar(&rebootOptions.Coordinators, "coordinators", 1, "Initial number of coordinators")
|
||||
|
||||
cmdRebootInspect.Flags().StringVar(&rebootInspectOptions.TargetDir, "target-dir", "/data", "Path to mounted database directory")
|
||||
}
|
||||
|
||||
type inspectResult struct {
|
||||
UUID string `json:"uuid,omitempty"`
|
||||
}
|
||||
|
||||
type inspectResponse struct {
|
||||
Error *string `json:"error,omitempty"`
|
||||
Result *inspectResult `json:"result,omitempty"`
|
||||
}
|
||||
|
||||
type VolumeInspectResult struct {
|
||||
UUID string
|
||||
Claim string
|
||||
Error error
|
||||
}
|
||||
|
||||
func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name, image, storageClassName string) (string, string, error) {
|
||||
|
||||
deletePVC := true
|
||||
claimname := "arangodb-reboot-pvc-" + name
|
||||
pvcspec := core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: claimname,
|
||||
Labels: map[string]string{
|
||||
"app": "arangodb",
|
||||
"rebooted": "yes",
|
||||
},
|
||||
},
|
||||
Spec: core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
|
||||
VolumeName: name,
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceStorage: *resource.NewQuantity(1024*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
StorageClassName: util.NewString(storageClassName),
|
||||
},
|
||||
}
|
||||
|
||||
_, err := kube.CoreV1().PersistentVolumeClaims(ns).Create(context.Background(), &pvcspec, meta.CreateOptions{})
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to create pvc")
|
||||
}
|
||||
defer func() {
|
||||
if deletePVC {
|
||||
logger.Str("pvc-name", claimname).Debug("deleting pvc")
|
||||
kube.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), claimname, meta.DeleteOptions{})
|
||||
}
|
||||
}()
|
||||
|
||||
podname := "arangodb-reboot-pod-" + name
|
||||
podspec := core.Pod{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: podname,
|
||||
},
|
||||
Spec: core.PodSpec{
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
Containers: []core.Container{
|
||||
core.Container{
|
||||
Name: "inspector",
|
||||
Image: image,
|
||||
ImagePullPolicy: core.PullAlways,
|
||||
Command: []string{"arangodb_operator"},
|
||||
Args: []string{"reboot", "inspect"},
|
||||
Env: []core.EnvVar{
|
||||
core.EnvVar{
|
||||
Name: constants.EnvOperatorPodNamespace,
|
||||
Value: ns,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
core.VolumeMount{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
Ports: []core.ContainerPort{
|
||||
core.ContainerPort{
|
||||
ContainerPort: 8080,
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &core.Probe{
|
||||
Handler: core.Handler{
|
||||
HTTPGet: &core.HTTPGetAction{
|
||||
Path: "/info",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeWithPersitantVolumeClaim("data", claimname),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = kube.CoreV1().Pods(ns).Create(context.Background(), &podspec, meta.CreateOptions{})
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to create pod")
|
||||
}
|
||||
defer kube.CoreV1().Pods(ns).Delete(context.Background(), podname, meta.DeleteOptions{})
|
||||
|
||||
podwatch, err := kube.CoreV1().Pods(ns).Watch(context.Background(), meta.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", podname).String()})
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to watch for pod")
|
||||
}
|
||||
defer podwatch.Stop()
|
||||
|
||||
// wait until pod is terminated
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return "", "", ctx.Err()
|
||||
case ev, ok := <-podwatch.ResultChan():
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("result channel bad")
|
||||
}
|
||||
|
||||
// get the pod
|
||||
pod, ok := ev.Object.(*core.Pod)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("failed to get pod")
|
||||
}
|
||||
|
||||
switch pod.Status.Phase {
|
||||
case core.PodFailed:
|
||||
return "", "", fmt.Errorf("pod failed: %s", pod.Status.Reason)
|
||||
case core.PodRunning:
|
||||
podReady := false
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == core.PodReady && c.Status == core.ConditionTrue {
|
||||
podReady = true
|
||||
}
|
||||
}
|
||||
|
||||
if !podReady {
|
||||
continue
|
||||
}
|
||||
|
||||
resp, err := http.Get("http://" + pod.Status.PodIP + ":8080/info")
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "Failed to get info")
|
||||
}
|
||||
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to read body")
|
||||
}
|
||||
|
||||
var info inspectResponse
|
||||
if err := json.Unmarshal(body, &info); err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to unmarshal response")
|
||||
}
|
||||
|
||||
if info.Error != nil {
|
||||
return "", "", fmt.Errorf("pod returned error: %s", *info.Error)
|
||||
}
|
||||
deletePVC = false
|
||||
|
||||
return info.Result.UUID, claimname, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func doVolumeInspection(ctx context.Context, kube kubernetes.Interface, ns, name, storageClassName string, resultChan chan<- VolumeInspectResult, image string) {
|
||||
// Create Volume Claim
|
||||
// Create Pod mounting this volume
|
||||
// Wait for pod to be completed
|
||||
// Read logs - parse json
|
||||
// Delete pod
|
||||
uuid, claim, err := runVolumeInspector(ctx, kube, ns, name, image, storageClassName)
|
||||
if err != nil {
|
||||
resultChan <- VolumeInspectResult{Error: err}
|
||||
}
|
||||
resultChan <- VolumeInspectResult{UUID: uuid, Claim: claim}
|
||||
}
|
||||
|
||||
func checkVolumeAvailable(kube kubernetes.Interface, vname string) (VolumeInfo, error) {
|
||||
volume, err := kube.CoreV1().PersistentVolumes().Get(context.Background(), vname, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return VolumeInfo{}, errors.Wrapf(err, "failed to GET volume %s", vname)
|
||||
}
|
||||
|
||||
switch volume.Status.Phase {
|
||||
case core.VolumeAvailable:
|
||||
break
|
||||
case core.VolumeReleased:
|
||||
// we have to remove the claim reference
|
||||
volume.Spec.ClaimRef = nil
|
||||
if _, err := kube.CoreV1().PersistentVolumes().Update(context.Background(), volume, meta.UpdateOptions{}); err != nil {
|
||||
return VolumeInfo{}, errors.Wrapf(err, "failed to remove claim reference")
|
||||
}
|
||||
default:
|
||||
return VolumeInfo{}, fmt.Errorf("Volume %s phase is %s, expected %s", vname, volume.Status.Phase, core.VolumeAvailable)
|
||||
}
|
||||
|
||||
return VolumeInfo{StorageClassName: volume.Spec.StorageClassName}, nil
|
||||
}
|
||||
|
||||
type VolumeInfo struct {
|
||||
StorageClassName string
|
||||
}
|
||||
|
||||
type VolumeListInfo map[string]VolumeInfo
|
||||
|
||||
func preflightChecks(kube kubernetes.Interface, volumes []string) (VolumeListInfo, error) {
|
||||
info := make(VolumeListInfo)
|
||||
// Check if all values are released
|
||||
for _, vname := range volumes {
|
||||
vi, err := checkVolumeAvailable(kube, vname)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "preflight checks failed")
|
||||
}
|
||||
info[vname] = vi
|
||||
}
|
||||
|
||||
return info, nil
|
||||
}
|
||||
|
||||
func getMyImage(kube kubernetes.Interface, ns, name string) (string, error) {
|
||||
pod, err := kube.CoreV1().Pods(ns).Get(context.Background(), name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return pod.Spec.Containers[0].Image, nil
|
||||
}
|
||||
|
||||
func createArangoDeployment(cli acli.Interface, ns, deplname, arangoimage string, results map[string]VolumeInspectResult) error {
|
||||
|
||||
prmr := make(map[string]VolumeInspectResult)
|
||||
agnt := make(map[string]VolumeInspectResult)
|
||||
|
||||
for vname, info := range results {
|
||||
if strings.HasPrefix(info.UUID, "PRMR") {
|
||||
prmr[vname] = info
|
||||
} else if strings.HasPrefix(info.UUID, "AGNT") {
|
||||
agnt[vname] = info
|
||||
} else {
|
||||
return fmt.Errorf("unknown server type by uuid: %s", info.UUID)
|
||||
}
|
||||
}
|
||||
|
||||
depl := deplv1.ArangoDeployment{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: deplname,
|
||||
},
|
||||
Spec: deplv1.DeploymentSpec{
|
||||
Image: util.NewString(arangoimage),
|
||||
Coordinators: deplv1.ServerGroupSpec{
|
||||
Count: util.NewInt(rebootOptions.Coordinators),
|
||||
},
|
||||
Agents: deplv1.ServerGroupSpec{
|
||||
Count: util.NewInt(len(agnt)),
|
||||
},
|
||||
DBServers: deplv1.ServerGroupSpec{
|
||||
Count: util.NewInt(len(prmr)),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
if rebootOptions.LicenseSecretName != "" {
|
||||
depl.Spec.License.SecretName = util.NewString(rebootOptions.LicenseSecretName)
|
||||
}
|
||||
|
||||
for _, info := range agnt {
|
||||
depl.Status.Members.Agents = append(depl.Status.Members.Agents, deplv1.MemberStatus{
|
||||
ID: info.UUID,
|
||||
PersistentVolumeClaimName: info.Claim,
|
||||
PodName: k8sutil.CreatePodName(deplname, deplv1.ServerGroupAgents.AsRole(), info.UUID, "-rbt"),
|
||||
})
|
||||
}
|
||||
|
||||
for _, info := range prmr {
|
||||
depl.Status.Members.DBServers = append(depl.Status.Members.DBServers, deplv1.MemberStatus{
|
||||
ID: info.UUID,
|
||||
PersistentVolumeClaimName: info.Claim,
|
||||
PodName: k8sutil.CreatePodName(deplname, deplv1.ServerGroupDBServers.AsRole(), info.UUID, "-rbt"),
|
||||
})
|
||||
}
|
||||
|
||||
if _, err := cli.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), &depl, meta.CreateOptions{}); err != nil {
|
||||
return errors.Wrap(err, "failed to create ArangoDeployment")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func cmdRebootRun(cmd *cobra.Command, args []string) {
|
||||
|
||||
volumes := args
|
||||
namespace := os.Getenv(constants.EnvOperatorPodNamespace)
|
||||
podname := os.Getenv(constants.EnvOperatorPodName)
|
||||
|
||||
// Create kubernetes client
|
||||
client, ok := kclient.GetDefaultFactory().Client()
|
||||
if !ok {
|
||||
logger.Fatal("Failed to get client")
|
||||
}
|
||||
|
||||
kubecli := client.Kubernetes()
|
||||
|
||||
extcli := client.Arango()
|
||||
|
||||
image, err := getMyImage(kubecli, namespace, podname)
|
||||
if err != nil {
|
||||
logger.Err(err).Fatal("failed to get my image")
|
||||
}
|
||||
|
||||
vinfo, err := preflightChecks(kubecli, volumes)
|
||||
if err != nil {
|
||||
logger.Err(err).Fatal("preflight checks failed")
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
ctx := context.Background()
|
||||
resultChan := make(chan VolumeInspectResult)
|
||||
received := 0
|
||||
|
||||
for _, volumeName := range volumes {
|
||||
logger.Str("volume", volumeName).Debug("Starting inspection")
|
||||
wg.Add(1)
|
||||
go func(vn string) {
|
||||
defer wg.Done()
|
||||
doVolumeInspection(ctx, kubecli, namespace, vn, vinfo[vn].StorageClassName, resultChan, image)
|
||||
}(volumeName)
|
||||
}
|
||||
|
||||
members := make(map[string]VolumeInspectResult)
|
||||
|
||||
for {
|
||||
if received == len(volumes) {
|
||||
break
|
||||
}
|
||||
|
||||
select {
|
||||
case res := <-resultChan:
|
||||
if res.Error != nil {
|
||||
logger.Err(res.Error).Error("Inspection failed")
|
||||
} else {
|
||||
logger.Str("claim", res.Claim).Str("uuid", res.UUID).Info("Inspection completed")
|
||||
}
|
||||
members[res.UUID] = res
|
||||
received++
|
||||
case <-ctx.Done():
|
||||
panic(ctx.Err())
|
||||
}
|
||||
}
|
||||
|
||||
logger.Debug("results complete - generating ArangoDeployment resource")
|
||||
|
||||
if err := createArangoDeployment(extcli, namespace, rebootOptions.DeploymentName, rebootOptions.ImageName, members); err != nil {
|
||||
logger.Err(err).Error("failed to create deployment")
|
||||
}
|
||||
|
||||
logger.Info("ArangoDeployment created.")
|
||||
|
||||
// Wait for everyone to be completed
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// inspectDatabaseDirectory inspects the given directory and returns the inspection result or an error
|
||||
func inspectDatabaseDirectory(dirname string) (*inspectResult, error) {
|
||||
// Access the database directory and look for the following files
|
||||
// UUID
|
||||
|
||||
uuidfile := path.Join(dirname, "UUID")
|
||||
uuid, err := ioutil.ReadFile(path.Clean(uuidfile))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &inspectResult{UUID: strings.TrimSpace(string(uuid))}, nil
|
||||
}
|
||||
|
||||
func cmdRebootInspectRun(cmd *cobra.Command, args []string) {
|
||||
|
||||
var response inspectResponse
|
||||
result, err := inspectDatabaseDirectory(rebootInspectOptions.TargetDir)
|
||||
if err != nil {
|
||||
response.Error = util.NewString(err.Error())
|
||||
}
|
||||
|
||||
response.Result = result
|
||||
|
||||
json, err := json.Marshal(&response)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
http.HandleFunc("/info", func(w http.ResponseWriter, req *http.Request) {
|
||||
w.Write(json)
|
||||
})
|
||||
|
||||
if http.ListenAndServe(":8080", nil); err != nil {
|
||||
logger.Err(err).Fatal("Failed to listen and serve")
|
||||
}
|
||||
}
|
|
@ -298,8 +298,8 @@ func (ds DeploymentStatusMembers) PodNames() []string {
|
|||
var n []string
|
||||
|
||||
for _, m := range ds.AsList() {
|
||||
if m.Member.PodName != "" {
|
||||
n = append(n, m.Member.PodName)
|
||||
if name := m.Member.Pod.GetName(); name != "" {
|
||||
n = append(n, name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
73
pkg/apis/deployment/v1/member_pod_status.go
Normal file
73
pkg/apis/deployment/v1/member_pod_status.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package v1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
type MemberPodStatus struct {
|
||||
Name string `json:"name"`
|
||||
UID types.UID `json:"uid"`
|
||||
SpecVersion string `json:"specVersion,omitempty"`
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) Equal(other *MemberPodStatus) bool {
|
||||
if m == nil && other == nil {
|
||||
return true
|
||||
}
|
||||
if m == nil || other == nil {
|
||||
return false
|
||||
}
|
||||
return m.Name == other.Name &&
|
||||
m.UID == other.UID &&
|
||||
m.SpecVersion == other.SpecVersion
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) GetName() string {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return m.Name
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) GetUID() types.UID {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return m.UID
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) Propagate(s *MemberStatus) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
s.PodName = ""
|
||||
s.PodUID = ""
|
||||
s.PodSpecVersion = ""
|
||||
} else {
|
||||
s.PodName = m.Name
|
||||
s.PodUID = m.UID
|
||||
s.PodSpecVersion = m.SpecVersion
|
||||
}
|
||||
}
|
|
@ -52,12 +52,6 @@ type MemberStatus struct {
|
|||
CreatedAt meta.Time `json:"created-at"`
|
||||
// PersistentVolumeClaimName holds the name of the persistent volume claim used for this member (if any).
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName,omitempty"`
|
||||
// PodName holds the name of the Pod that currently runs this member
|
||||
PodName string `json:"podName,omitempty"`
|
||||
// PodUID holds the UID of the Pod that currently runs this member
|
||||
PodUID types.UID `json:"podUID,omitempty"`
|
||||
// PodSpecVersion holds the checksum of Pod spec that currently runs this member. Used to rotate pods
|
||||
PodSpecVersion string `json:"podSpecVersion,omitempty"`
|
||||
// Conditions specific to this member
|
||||
Conditions ConditionList `json:"conditions,omitempty"`
|
||||
// RecentTerminatons holds the times when this member was recently terminated.
|
||||
|
@ -84,10 +78,20 @@ type MemberStatus struct {
|
|||
Endpoint *string `json:"endpoint,omitempty"`
|
||||
// Topology define topology member status assignment
|
||||
Topology *TopologyMemberStatus `json:"topology,omitempty"`
|
||||
Pod *MemberPodStatus `json:"pod,omitempty"`
|
||||
|
||||
// deprecated
|
||||
// SideCarSpecs contains list of specifications specified for side cars
|
||||
SideCarSpecs map[string]core.Container `json:"sidecars-specs,omitempty"`
|
||||
// deprecated
|
||||
// PodName holds the name of the Pod that currently runs this member
|
||||
PodName string `json:"podName,omitempty"`
|
||||
// deprecated
|
||||
// PodUID holds the UID of the Pod that currently runs this member
|
||||
PodUID types.UID `json:"podUID,omitempty"`
|
||||
// deprecated
|
||||
// PodSpecVersion holds the checksum of Pod spec that currently runs this member. Used to rotate pods
|
||||
PodSpecVersion string `json:"podSpecVersion,omitempty"`
|
||||
}
|
||||
|
||||
// Equal checks for equality
|
||||
|
@ -99,7 +103,7 @@ func (s MemberStatus) Equal(other MemberStatus) bool {
|
|||
s.Phase == other.Phase &&
|
||||
util.TimeCompareEqual(s.CreatedAt, other.CreatedAt) &&
|
||||
s.PersistentVolumeClaimName == other.PersistentVolumeClaimName &&
|
||||
s.PodName == other.PodName &&
|
||||
s.Pod.Equal(other.Pod) &&
|
||||
s.Conditions.Equal(other.Conditions) &&
|
||||
s.IsInitialized == other.IsInitialized &&
|
||||
s.CleanoutJobID == other.CleanoutJobID &&
|
||||
|
|
|
@ -78,7 +78,7 @@ func (l MemberStatusList) ElementByID(id string) (MemberStatus, bool) {
|
|||
// If no such element exists, an empty element and false is returned.
|
||||
func (l MemberStatusList) ElementByPodName(podName string) (MemberStatus, bool) {
|
||||
for i, x := range l {
|
||||
if x.PodName == podName {
|
||||
if x.Pod.GetName() == podName {
|
||||
return l[i], true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,13 +50,13 @@ func TestMemberStatusList(t *testing.T) {
|
|||
assert.Equal(t, m1.ID, (*list)[0].ID)
|
||||
assert.Equal(t, m2.ID, (*list)[1].ID)
|
||||
|
||||
m2.PodName = "foo"
|
||||
m2.Pod = &MemberPodStatus{Name: "foo"}
|
||||
assert.NoError(t, list.update(m2))
|
||||
assert.Equal(t, 2, len(*list))
|
||||
assert.True(t, list.ContainsID(m2.ID))
|
||||
x, found := list.ElementByPodName("foo")
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "foo", x.PodName)
|
||||
assert.Equal(t, "foo", x.Pod.GetName())
|
||||
assert.Equal(t, m2.ID, x.ID)
|
||||
|
||||
assert.NoError(t, list.add(m3))
|
||||
|
|
|
@ -32,13 +32,14 @@ const (
|
|||
ServerGroupReservedInitContainerNameLifecycle = "init-lifecycle"
|
||||
ServerGroupReservedInitContainerNameUUID = "uuid"
|
||||
ServerGroupReservedInitContainerNameWait = "wait"
|
||||
ServerGroupReservedInitContainerNameStartup = "arango-init-startup"
|
||||
ServerGroupReservedInitContainerNameUpgrade = "upgrade"
|
||||
ServerGroupReservedInitContainerNameVersionCheck = "version-check"
|
||||
)
|
||||
|
||||
func IsReservedServerGroupInitContainerName(name string) bool {
|
||||
switch name {
|
||||
case ServerGroupReservedInitContainerNameLifecycle, ServerGroupReservedInitContainerNameUUID, ServerGroupReservedInitContainerNameUpgrade, ServerGroupReservedInitContainerNameVersionCheck:
|
||||
case ServerGroupReservedInitContainerNameLifecycle, ServerGroupReservedInitContainerNameUUID, ServerGroupReservedInitContainerNameUpgrade, ServerGroupReservedInitContainerNameVersionCheck, ServerGroupReservedInitContainerNameStartup:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
|
21
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
21
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
|
@ -1659,6 +1659,22 @@ func (in List) DeepCopy() List {
|
|||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MemberPodStatus) DeepCopyInto(out *MemberPodStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberPodStatus.
|
||||
func (in *MemberPodStatus) DeepCopy() *MemberPodStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MemberPodStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MemberStatus) DeepCopyInto(out *MemberStatus) {
|
||||
*out = *in
|
||||
|
@ -1702,6 +1718,11 @@ func (in *MemberStatus) DeepCopyInto(out *MemberStatus) {
|
|||
*out = new(TopologyMemberStatus)
|
||||
**out = **in
|
||||
}
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
*out = new(MemberPodStatus)
|
||||
**out = **in
|
||||
}
|
||||
if in.SideCarSpecs != nil {
|
||||
in, out := &in.SideCarSpecs, &out.SideCarSpecs
|
||||
*out = make(map[string]corev1.Container, len(*in))
|
||||
|
|
|
@ -298,8 +298,8 @@ func (ds DeploymentStatusMembers) PodNames() []string {
|
|||
var n []string
|
||||
|
||||
for _, m := range ds.AsList() {
|
||||
if m.Member.PodName != "" {
|
||||
n = append(n, m.Member.PodName)
|
||||
if name := m.Member.Pod.GetName(); name != "" {
|
||||
n = append(n, name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
73
pkg/apis/deployment/v2alpha1/member_pod_status.go
Normal file
73
pkg/apis/deployment/v2alpha1/member_pod_status.go
Normal file
|
@ -0,0 +1,73 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package v2alpha1
|
||||
|
||||
import "k8s.io/apimachinery/pkg/types"
|
||||
|
||||
type MemberPodStatus struct {
|
||||
Name string `json:"name"`
|
||||
UID types.UID `json:"uid"`
|
||||
SpecVersion string `json:"specVersion,omitempty"`
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) Equal(other *MemberPodStatus) bool {
|
||||
if m == nil && other == nil {
|
||||
return true
|
||||
}
|
||||
if m == nil || other == nil {
|
||||
return false
|
||||
}
|
||||
return m.Name == other.Name &&
|
||||
m.UID == other.UID &&
|
||||
m.SpecVersion == other.SpecVersion
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) GetName() string {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return m.Name
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) GetUID() types.UID {
|
||||
if m == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
return m.UID
|
||||
}
|
||||
|
||||
func (m *MemberPodStatus) Propagate(s *MemberStatus) {
|
||||
if s == nil {
|
||||
return
|
||||
}
|
||||
|
||||
if m == nil {
|
||||
s.PodName = ""
|
||||
s.PodUID = ""
|
||||
s.PodSpecVersion = ""
|
||||
} else {
|
||||
s.PodName = m.Name
|
||||
s.PodUID = m.UID
|
||||
s.PodSpecVersion = m.SpecVersion
|
||||
}
|
||||
}
|
|
@ -52,12 +52,6 @@ type MemberStatus struct {
|
|||
CreatedAt meta.Time `json:"created-at"`
|
||||
// PersistentVolumeClaimName holds the name of the persistent volume claim used for this member (if any).
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName,omitempty"`
|
||||
// PodName holds the name of the Pod that currently runs this member
|
||||
PodName string `json:"podName,omitempty"`
|
||||
// PodUID holds the UID of the Pod that currently runs this member
|
||||
PodUID types.UID `json:"podUID,omitempty"`
|
||||
// PodSpecVersion holds the checksum of Pod spec that currently runs this member. Used to rotate pods
|
||||
PodSpecVersion string `json:"podSpecVersion,omitempty"`
|
||||
// Conditions specific to this member
|
||||
Conditions ConditionList `json:"conditions,omitempty"`
|
||||
// RecentTerminatons holds the times when this member was recently terminated.
|
||||
|
@ -84,10 +78,20 @@ type MemberStatus struct {
|
|||
Endpoint *string `json:"endpoint,omitempty"`
|
||||
// Topology define topology member status assignment
|
||||
Topology *TopologyMemberStatus `json:"topology,omitempty"`
|
||||
Pod *MemberPodStatus `json:"pod,omitempty"`
|
||||
|
||||
// deprecated
|
||||
// SideCarSpecs contains list of specifications specified for side cars
|
||||
SideCarSpecs map[string]core.Container `json:"sidecars-specs,omitempty"`
|
||||
// deprecated
|
||||
// PodName holds the name of the Pod that currently runs this member
|
||||
PodName string `json:"podName,omitempty"`
|
||||
// deprecated
|
||||
// PodUID holds the UID of the Pod that currently runs this member
|
||||
PodUID types.UID `json:"podUID,omitempty"`
|
||||
// deprecated
|
||||
// PodSpecVersion holds the checksum of Pod spec that currently runs this member. Used to rotate pods
|
||||
PodSpecVersion string `json:"podSpecVersion,omitempty"`
|
||||
}
|
||||
|
||||
// Equal checks for equality
|
||||
|
@ -99,7 +103,7 @@ func (s MemberStatus) Equal(other MemberStatus) bool {
|
|||
s.Phase == other.Phase &&
|
||||
util.TimeCompareEqual(s.CreatedAt, other.CreatedAt) &&
|
||||
s.PersistentVolumeClaimName == other.PersistentVolumeClaimName &&
|
||||
s.PodName == other.PodName &&
|
||||
s.Pod.Equal(other.Pod) &&
|
||||
s.Conditions.Equal(other.Conditions) &&
|
||||
s.IsInitialized == other.IsInitialized &&
|
||||
s.CleanoutJobID == other.CleanoutJobID &&
|
||||
|
|
|
@ -78,7 +78,7 @@ func (l MemberStatusList) ElementByID(id string) (MemberStatus, bool) {
|
|||
// If no such element exists, an empty element and false is returned.
|
||||
func (l MemberStatusList) ElementByPodName(podName string) (MemberStatus, bool) {
|
||||
for i, x := range l {
|
||||
if x.PodName == podName {
|
||||
if x.Pod.GetName() == podName {
|
||||
return l[i], true
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,13 +50,13 @@ func TestMemberStatusList(t *testing.T) {
|
|||
assert.Equal(t, m1.ID, (*list)[0].ID)
|
||||
assert.Equal(t, m2.ID, (*list)[1].ID)
|
||||
|
||||
m2.PodName = "foo"
|
||||
m2.Pod = &MemberPodStatus{Name: "foo"}
|
||||
assert.NoError(t, list.update(m2))
|
||||
assert.Equal(t, 2, len(*list))
|
||||
assert.True(t, list.ContainsID(m2.ID))
|
||||
x, found := list.ElementByPodName("foo")
|
||||
assert.True(t, found)
|
||||
assert.Equal(t, "foo", x.PodName)
|
||||
assert.Equal(t, "foo", x.Pod.GetName())
|
||||
assert.Equal(t, m2.ID, x.ID)
|
||||
|
||||
assert.NoError(t, list.add(m3))
|
||||
|
|
|
@ -32,13 +32,14 @@ const (
|
|||
ServerGroupReservedInitContainerNameLifecycle = "init-lifecycle"
|
||||
ServerGroupReservedInitContainerNameUUID = "uuid"
|
||||
ServerGroupReservedInitContainerNameWait = "wait"
|
||||
ServerGroupReservedInitContainerNameStartup = "arango-init-startup"
|
||||
ServerGroupReservedInitContainerNameUpgrade = "upgrade"
|
||||
ServerGroupReservedInitContainerNameVersionCheck = "version-check"
|
||||
)
|
||||
|
||||
func IsReservedServerGroupInitContainerName(name string) bool {
|
||||
switch name {
|
||||
case ServerGroupReservedInitContainerNameLifecycle, ServerGroupReservedInitContainerNameUUID, ServerGroupReservedInitContainerNameUpgrade, ServerGroupReservedInitContainerNameVersionCheck:
|
||||
case ServerGroupReservedInitContainerNameLifecycle, ServerGroupReservedInitContainerNameUUID, ServerGroupReservedInitContainerNameUpgrade, ServerGroupReservedInitContainerNameVersionCheck, ServerGroupReservedInitContainerNameStartup:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
|
|
@ -1659,6 +1659,22 @@ func (in List) DeepCopy() List {
|
|||
return *out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MemberPodStatus) DeepCopyInto(out *MemberPodStatus) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MemberPodStatus.
|
||||
func (in *MemberPodStatus) DeepCopy() *MemberPodStatus {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MemberPodStatus)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MemberStatus) DeepCopyInto(out *MemberStatus) {
|
||||
*out = *in
|
||||
|
@ -1702,6 +1718,11 @@ func (in *MemberStatus) DeepCopyInto(out *MemberStatus) {
|
|||
*out = new(TopologyMemberStatus)
|
||||
**out = **in
|
||||
}
|
||||
if in.Pod != nil {
|
||||
in, out := &in.Pod, &out.Pod
|
||||
*out = new(MemberPodStatus)
|
||||
**out = **in
|
||||
}
|
||||
if in.SideCarSpecs != nil {
|
||||
in, out := &in.SideCarSpecs, &out.SideCarSpecs
|
||||
*out = make(map[string]v1.Container, len(*in))
|
||||
|
|
|
@ -58,7 +58,7 @@ func actionWrap(a api.Action, member *api.MemberStatus, wrap ...actionWrapper) a
|
|||
func actionWrapMemberUID(a api.Action, member *api.MemberStatus) api.Action {
|
||||
switch a.Type {
|
||||
case api.ActionTypeShutdownMember, api.ActionTypeKillMemberPod, api.ActionTypeRotateStartMember, api.ActionTypeUpgradeMember:
|
||||
if q := member.PodUID; q != "" {
|
||||
if q := member.Pod.GetUID(); q != "" {
|
||||
return a.AddParam(api.ParamPodUID, string(q))
|
||||
}
|
||||
return a
|
||||
|
|
|
@ -52,7 +52,10 @@ var phase = phaseMap{
|
|||
m.RID = uuid.NewUUID()
|
||||
|
||||
// Clean Pod details
|
||||
m.PodUID = ""
|
||||
if m.Pod != nil {
|
||||
m.Pod.UID = ""
|
||||
}
|
||||
m.Pod.Propagate(m)
|
||||
|
||||
// Add ClusterID
|
||||
if m.ClusterID == "" {
|
||||
|
|
|
@ -138,7 +138,6 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: shared.CreatePersistentVolumeClaimName(deploymentName, role, id),
|
||||
PodName: "",
|
||||
Image: apiObject.Status.CurrentImage,
|
||||
Architecture: &arch,
|
||||
}, nil
|
||||
|
@ -150,7 +149,6 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: shared.CreatePersistentVolumeClaimName(deploymentName, role, id),
|
||||
PodName: "",
|
||||
Image: apiObject.Status.CurrentImage,
|
||||
Architecture: &arch,
|
||||
}, nil
|
||||
|
@ -162,7 +160,6 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: shared.CreatePersistentVolumeClaimName(deploymentName, role, id),
|
||||
PodName: "",
|
||||
Image: apiObject.Status.CurrentImage,
|
||||
Architecture: &arch,
|
||||
}, nil
|
||||
|
@ -174,7 +171,6 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: "",
|
||||
PodName: "",
|
||||
Image: apiObject.Status.CurrentImage,
|
||||
Architecture: &arch,
|
||||
}, nil
|
||||
|
@ -186,7 +182,6 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: "",
|
||||
PodName: "",
|
||||
Image: apiObject.Status.CurrentImage,
|
||||
Architecture: &arch,
|
||||
}, nil
|
||||
|
@ -198,7 +193,6 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: "",
|
||||
PodName: "",
|
||||
Image: apiObject.Status.CurrentImage,
|
||||
Architecture: &arch,
|
||||
}, nil
|
||||
|
|
|
@ -137,7 +137,7 @@ func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, erro
|
|||
}
|
||||
|
||||
if err := c.UpdateStatus(ctx, func(member *api.ArangoMember, status *api.ArangoMemberStatus) bool {
|
||||
if (status.Template == nil || status.Template.PodSpec == nil) && (m.PodSpecVersion == "" || m.PodSpecVersion == template.PodSpecChecksum) {
|
||||
if (status.Template == nil || status.Template.PodSpec == nil) && (m.Pod == nil || m.Pod.SpecVersion == "" || m.Pod.SpecVersion == template.PodSpecChecksum) {
|
||||
status.Template = template.DeepCopy()
|
||||
}
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ func (a *actionKillMemberPod) Start(ctx context.Context) (bool, error) {
|
|||
return true, nil
|
||||
}
|
||||
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.Pod.GetName(), meta.DeleteOptions{}); err != nil {
|
||||
a.log.Err(err).Error("Unable to kill pod")
|
||||
return true, nil
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (a *actionKillMemberPod) CheckProgress(ctx context.Context) (bool, bool, er
|
|||
return false, false, errors.Newf("Client is not ready")
|
||||
}
|
||||
|
||||
p, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||
p, ok := cache.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !ok {
|
||||
a.log.Error("No such member")
|
||||
return true, false, nil
|
||||
|
|
|
@ -108,9 +108,9 @@ func (a *actionRemoveMember) Start(ctx context.Context) (bool, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
if m.PodName != "" {
|
||||
if p := m.Pod.GetName(); p != "" {
|
||||
// Remove the pod (if any)
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, p, meta.DeleteOptions{}); err != nil {
|
||||
if !apiErrors.IsNotFound(err) {
|
||||
return false, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -101,7 +101,7 @@ func (a *actionRotateMember) CheckProgress(ctx context.Context) (bool, bool, err
|
|||
defer cancel()
|
||||
|
||||
// Pod is terminated, we can now remove it
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctxChild, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctxChild, m.Pod.GetName(), meta.DeleteOptions{}); err != nil {
|
||||
if !k8sutil.IsNotFound(err) {
|
||||
a.log.Err(err).Error("Unable to delete pod")
|
||||
return false, false, nil
|
||||
|
|
|
@ -97,7 +97,7 @@ func (a *actionRotateStartMember) CheckProgress(ctx context.Context) (bool, bool
|
|||
}
|
||||
|
||||
// Pod is terminated, we can now remove it
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||
if err := cache.Client().Kubernetes().CoreV1().Pods(cache.Namespace()).Delete(ctx, m.Pod.GetName(), meta.DeleteOptions{}); err != nil {
|
||||
if !k8sutil.IsNotFound(err) {
|
||||
a.log.Err(err).Error("Unable to delete pod")
|
||||
return false, false, nil
|
||||
|
|
|
@ -153,9 +153,9 @@ func (a actionRuntimeContainerArgsUpdate) Start(ctx context.Context) (bool, erro
|
|||
return false, errors.Errorf("ArangoMember %s not found", memberName)
|
||||
}
|
||||
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !ok {
|
||||
a.log.Str("podName", m.PodName).Info("pod is not present")
|
||||
a.log.Str("podName", m.Pod.GetName()).Info("pod is not present")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -153,7 +153,7 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err
|
|||
return false, err
|
||||
}
|
||||
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !ok {
|
||||
a.log.Info("pod is not present")
|
||||
return true, nil
|
||||
|
@ -221,7 +221,7 @@ func (a actionRuntimeContainerImageUpdate) CheckProgress(ctx context.Context) (b
|
|||
return false, false, nil
|
||||
}
|
||||
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !ok {
|
||||
a.log.Info("pod is not present")
|
||||
return true, false, nil
|
||||
|
|
|
@ -144,16 +144,16 @@ func (r *Reconciler) isStorageClassChanged(_ context.Context, apiObject k8sutil.
|
|||
|
||||
// From here on it is known that the member requires replacement, so `true` must be returned.
|
||||
// If pod does not exist then it will try next time.
|
||||
if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok {
|
||||
if pod, ok := cache.Pod().V1().GetSimple(member.Pod.GetName()); ok {
|
||||
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok {
|
||||
r.log.
|
||||
Str("pod-name", member.PodName).
|
||||
Str("pod-name", member.Pod.GetName()).
|
||||
Str("server-group", group.AsRole()).
|
||||
Warn("try changing a storage class name, but %s", getRequiredReplaceMessage(member.PodName))
|
||||
Warn("try changing a storage class name, but %s", getRequiredReplaceMessage(member.Pod.GetName()))
|
||||
// No return here.
|
||||
}
|
||||
} else {
|
||||
return false, "", fmt.Errorf("failed to get pod %s", member.PodName)
|
||||
return false, "", fmt.Errorf("failed to get pod %s", member.Pod.GetName())
|
||||
}
|
||||
|
||||
return true, "Storage class has changed", nil
|
||||
|
@ -210,14 +210,14 @@ func (r *Reconciler) isVolumeSizeChanged(_ context.Context, _ k8sutil.APIObject,
|
|||
|
||||
// From here on it is known that the member requires replacement, so `true` must be returned.
|
||||
// If pod does not exist then it will try next time.
|
||||
if pod, ok := cache.Pod().V1().GetSimple(member.PodName); ok {
|
||||
if pod, ok := cache.Pod().V1().GetSimple(member.Pod.GetName()); ok {
|
||||
if _, ok := pod.GetAnnotations()[deployment.ArangoDeploymentPodReplaceAnnotation]; !ok {
|
||||
r.log.Str("pod-name", member.PodName).
|
||||
Warn("try shrinking volume size, but %s", getRequiredReplaceMessage(member.PodName))
|
||||
r.log.Str("pod-name", member.Pod.GetName()).
|
||||
Warn("try shrinking volume size, but %s", getRequiredReplaceMessage(member.Pod.GetName()))
|
||||
// No return here.
|
||||
}
|
||||
} else {
|
||||
return false, "", fmt.Errorf("failed to get pod %s", member.PodName)
|
||||
return false, "", fmt.Errorf("failed to get pod %s", member.Pod.GetName())
|
||||
}
|
||||
|
||||
return true, "Volume is shrunk", nil
|
||||
|
|
|
@ -49,14 +49,14 @@ const (
|
|||
func getShutdownHelper(a actionImpl) (ActionCore, api.MemberStatus, bool) {
|
||||
m, ok := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||
if !ok {
|
||||
a.log.Str("pod-name", m.PodName).Warn("member is already gone")
|
||||
a.log.Str("pod-name", m.Pod.GetName()).Warn("member is already gone")
|
||||
|
||||
return nil, api.MemberStatus{}, false
|
||||
}
|
||||
|
||||
cache, ok := a.actionCtx.ACS().ClusterCache(m.ClusterID)
|
||||
if !ok {
|
||||
a.log.Str("pod-name", m.PodName).Warn("Cluster is not ready")
|
||||
a.log.Str("pod-name", m.Pod.GetName()).Warn("Cluster is not ready")
|
||||
|
||||
return nil, api.MemberStatus{}, false
|
||||
}
|
||||
|
@ -66,9 +66,9 @@ func getShutdownHelper(a actionImpl) (ActionCore, api.MemberStatus, bool) {
|
|||
return NewActionSuccess(), m, true
|
||||
}
|
||||
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.PodName)
|
||||
pod, ok := cache.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !ok {
|
||||
a.log.Str("pod-name", m.PodName).Warn("pod is already gone")
|
||||
a.log.Str("pod-name", m.Pod.GetName()).Warn("pod is already gone")
|
||||
// Pod does not exist, so create success action to finish it immediately.
|
||||
return NewActionSuccess(), m, true
|
||||
}
|
||||
|
@ -101,7 +101,7 @@ func (s shutdownHelperAPI) Start(ctx context.Context) (bool, error) {
|
|||
s.log.Info("Using API to shutdown member")
|
||||
|
||||
group := s.action.Group
|
||||
podName := s.memberStatus.PodName
|
||||
podName := s.memberStatus.Pod.GetName()
|
||||
if podName == "" {
|
||||
s.log.Warn("Pod is empty")
|
||||
return true, nil
|
||||
|
@ -190,7 +190,7 @@ type shutdownHelperDelete struct {
|
|||
func (s shutdownHelperDelete) Start(ctx context.Context) (bool, error) {
|
||||
s.log.Info("Using Pod Delete to shutdown member")
|
||||
|
||||
podName := s.memberStatus.PodName
|
||||
podName := s.memberStatus.Pod.GetName()
|
||||
if podName == "" {
|
||||
s.log.Warn("Pod is empty")
|
||||
return true, nil
|
||||
|
@ -225,7 +225,7 @@ func (s shutdownHelperDelete) CheckProgress(ctx context.Context) (bool, bool, er
|
|||
return false, false, nil
|
||||
}
|
||||
|
||||
podName := s.memberStatus.PodName
|
||||
podName := s.memberStatus.Pod.GetName()
|
||||
if podName != "" {
|
||||
if _, ok := cache.Pod().V1().GetSimple(podName); ok {
|
||||
s.log.Warn("Pod still exists")
|
||||
|
@ -252,7 +252,7 @@ func (s shutdownNow) Start(ctx context.Context) (bool, error) {
|
|||
|
||||
// CheckProgress starts removing pod forcefully and checks if has it been removed.
|
||||
func (s shutdownNow) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||
podName := s.memberStatus.PodName
|
||||
podName := s.memberStatus.Pod.GetName()
|
||||
|
||||
cache, ok := s.actionCtx.ACS().ClusterCache(s.memberStatus.ClusterID)
|
||||
if !ok {
|
||||
|
@ -266,7 +266,7 @@ func (s shutdownNow) CheckProgress(ctx context.Context) (bool, bool, error) {
|
|||
return true, false, nil
|
||||
}
|
||||
|
||||
if s.memberStatus.PodUID != pod.GetUID() {
|
||||
if s.memberStatus.Pod.GetUID() != pod.GetUID() {
|
||||
s.log.Info("Using shutdown now method completed because it is already rotated")
|
||||
// The new pod has been started already.
|
||||
return true, false, nil
|
||||
|
|
|
@ -153,7 +153,7 @@ func (r *Reconciler) updateMemberRotationConditionsPlan(ctx context.Context, api
|
|||
continue
|
||||
}
|
||||
|
||||
p, ok := cache.Pod().V1().GetSimple(e.Member.PodName)
|
||||
p, ok := cache.Pod().V1().GetSimple(e.Member.Pod.GetName())
|
||||
if !ok {
|
||||
p = nil
|
||||
}
|
||||
|
|
|
@ -86,7 +86,7 @@ func (r *Reconciler) createMarkToRemovePlan(ctx context.Context, apiObject k8sut
|
|||
for _, e := range status.Members.AsListInGroups(rotationByAnnotationOrder...) {
|
||||
m := e.Member
|
||||
group := e.Group
|
||||
if m.Phase != api.MemberPhaseCreated || m.PodName == "" {
|
||||
if m.Phase != api.MemberPhaseCreated || m.Pod.GetName() == "" {
|
||||
// Only rotate when phase is created
|
||||
continue
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ func (r *Reconciler) createMarkToRemovePlan(ctx context.Context, apiObject k8sut
|
|||
continue
|
||||
}
|
||||
|
||||
pod, found := cache.Pod().V1().GetSimple(m.PodName)
|
||||
pod, found := cache.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
@ -186,7 +186,7 @@ func (r *Reconciler) createUpdatePlanInternal(apiObject k8sutil.APIObject, spec
|
|||
continue
|
||||
}
|
||||
|
||||
p, ok := cache.Pod().V1().GetSimple(m.Member.PodName)
|
||||
p, ok := cache.Pod().V1().GetSimple(m.Member.Pod.GetName())
|
||||
if !ok {
|
||||
p = nil
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ func (r *Reconciler) createRotateOrUpgradeDecision(spec api.DeploymentSpec, stat
|
|||
}
|
||||
|
||||
func (r *Reconciler) createRotateOrUpgradeDecisionMember(spec api.DeploymentSpec, status api.DeploymentStatus, context PlanBuilderContext, element api.DeploymentStatusMemberElement) (d updateUpgradeDecision) {
|
||||
if element.Member.Phase == api.MemberPhaseCreated && element.Member.PodName != "" {
|
||||
if element.Member.Phase == api.MemberPhaseCreated && element.Member.Pod.GetName() != "" {
|
||||
// Only upgrade when phase is created
|
||||
|
||||
// Got pod, compare it with what it should be
|
||||
|
|
|
@ -374,8 +374,10 @@ func addAgentsToStatus(t *testing.T, status *api.DeploymentStatus, count int) {
|
|||
for i := 0; i < count; i++ {
|
||||
require.NoError(t, status.Members.Add(api.MemberStatus{
|
||||
ID: fmt.Sprintf("AGNT-%d", i),
|
||||
PodName: fmt.Sprintf("agnt-depl-xxx-%d", i),
|
||||
PodSpecVersion: "random",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: fmt.Sprintf("agnt-depl-xxx-%d", i),
|
||||
SpecVersion: "random",
|
||||
},
|
||||
Phase: api.MemberPhaseCreated,
|
||||
Conditions: []api.Condition{
|
||||
{
|
||||
|
@ -428,7 +430,9 @@ func TestCreatePlanSingleScale(t *testing.T) {
|
|||
status.Members.Single = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "id",
|
||||
PodName: "something",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something",
|
||||
},
|
||||
},
|
||||
}
|
||||
newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c)
|
||||
|
@ -445,11 +449,15 @@ func TestCreatePlanSingleScale(t *testing.T) {
|
|||
status.Members.Single = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "id1",
|
||||
PodName: "something1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something1",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "id1",
|
||||
PodName: "something1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something1",
|
||||
},
|
||||
},
|
||||
}
|
||||
newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c)
|
||||
|
@ -491,7 +499,9 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
|||
status.Members.Single = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "id",
|
||||
PodName: "something",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something",
|
||||
},
|
||||
},
|
||||
}
|
||||
newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c)
|
||||
|
@ -504,19 +514,27 @@ func TestCreatePlanActiveFailoverScale(t *testing.T) {
|
|||
status.Members.Single = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "id1",
|
||||
PodName: "something1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something1",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "id2",
|
||||
PodName: "something2",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something2",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "id3",
|
||||
PodName: "something3",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something3",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "id4",
|
||||
PodName: "something4",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something4",
|
||||
},
|
||||
},
|
||||
}
|
||||
newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c)
|
||||
|
@ -573,17 +591,23 @@ func TestCreatePlanClusterScale(t *testing.T) {
|
|||
status.Members.DBServers = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "db1",
|
||||
PodName: "something1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something1",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "db2",
|
||||
PodName: "something2",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something2",
|
||||
},
|
||||
},
|
||||
}
|
||||
status.Members.Coordinators = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "cr1",
|
||||
PodName: "coordinator1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "coordinator1",
|
||||
},
|
||||
},
|
||||
}
|
||||
newPlan, _, changed = r.createNormalPlan(ctx, depl, nil, spec, status, c)
|
||||
|
@ -600,25 +624,35 @@ func TestCreatePlanClusterScale(t *testing.T) {
|
|||
status.Members.DBServers = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "db1",
|
||||
PodName: "something1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something1",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "db2",
|
||||
PodName: "something2",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something2",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "db3",
|
||||
PodName: "something3",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "something3",
|
||||
},
|
||||
},
|
||||
}
|
||||
status.Members.Coordinators = api.MemberStatusList{
|
||||
api.MemberStatus{
|
||||
ID: "cr1",
|
||||
PodName: "coordinator1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "coordinator1",
|
||||
},
|
||||
},
|
||||
api.MemberStatus{
|
||||
ID: "cr2",
|
||||
PodName: "coordinator2",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "coordinator2",
|
||||
},
|
||||
},
|
||||
}
|
||||
spec.DBServers.Count = util.NewInt(1)
|
||||
|
@ -676,29 +710,41 @@ func TestCreatePlan(t *testing.T) {
|
|||
threeCoordinators := api.MemberStatusList{
|
||||
{
|
||||
ID: "1",
|
||||
PodName: "coordinator1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "coordinator1",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "2",
|
||||
PodName: "coordinator2",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "coordinator2",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "3",
|
||||
PodName: "coordinator3",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "coordinator3",
|
||||
},
|
||||
},
|
||||
}
|
||||
threeDBServers := api.MemberStatusList{
|
||||
{
|
||||
ID: "1",
|
||||
PodName: "dbserver1",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "dbserver1",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "2",
|
||||
PodName: "dbserver2",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "dbserver2",
|
||||
},
|
||||
},
|
||||
{
|
||||
ID: "3",
|
||||
PodName: "dbserver3",
|
||||
Pod: &api.MemberPodStatus{
|
||||
Name: "dbserver3",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
@ -959,9 +1005,9 @@ func TestCreatePlan(t *testing.T) {
|
|||
}
|
||||
|
||||
c.Pods = map[string]*core.Pod{
|
||||
c.context.ArangoDeployment.Status.Members.Agents[0].PodName: {
|
||||
c.context.ArangoDeployment.Status.Members.Agents[0].Pod.GetName(): {
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: c.context.ArangoDeployment.Status.Members.Agents[0].PodName,
|
||||
Name: c.context.ArangoDeployment.Status.Members.Agents[0].Pod.GetName(),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -75,7 +75,7 @@ func (r *Reconciler) CheckDeployment(ctx context.Context) error {
|
|||
continue
|
||||
}
|
||||
|
||||
if err := cache.Client().Kubernetes().CoreV1().Secrets(cache.Namespace()).Delete(ctx, m.PodName, meta.DeleteOptions{}); err != nil {
|
||||
if err := cache.Client().Kubernetes().CoreV1().Secrets(cache.Namespace()).Delete(ctx, m.Pod.GetName(), meta.DeleteOptions{}); err != nil {
|
||||
r.log.Err(err).Error("Failed to delete secret")
|
||||
}
|
||||
m.Phase = api.MemberPhaseNone
|
||||
|
|
|
@ -77,11 +77,11 @@ func ifPodUIDMismatch(m api.MemberStatus, a api.Action, i pod.Inspector) bool {
|
|||
|
||||
u := types.UID(ut)
|
||||
|
||||
if m.PodName == "" {
|
||||
if m.Pod.GetName() == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
p, ok := i.Pod().V1().GetSimple(m.PodName)
|
||||
p, ok := i.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
|
|
@ -322,17 +322,16 @@ func (r *Resources) RenderPodForMember(ctx context.Context, acs sutil.ACS, spec
|
|||
role := group.AsRole()
|
||||
roleAbbr := group.AsRoleAbbreviated()
|
||||
|
||||
newMember := m.DeepCopy()
|
||||
|
||||
newMember.PodName = k8sutil.CreatePodName(apiObject.GetName(), roleAbbr, newMember.ID, CreatePodSuffix(spec))
|
||||
podName := k8sutil.CreatePodName(apiObject.GetName(), roleAbbr, m.ID, CreatePodSuffix(spec))
|
||||
|
||||
var podCreator interfaces.PodCreator
|
||||
if group.IsArangod() {
|
||||
// Prepare arguments
|
||||
autoUpgrade := newMember.Conditions.IsTrue(api.ConditionTypeAutoUpgrade) || spec.Upgrade.Get().AutoUpgrade
|
||||
autoUpgrade := m.Conditions.IsTrue(api.ConditionTypeAutoUpgrade) || spec.Upgrade.Get().AutoUpgrade
|
||||
|
||||
podCreator = &MemberArangoDPod{
|
||||
status: *newMember,
|
||||
podName: podName,
|
||||
status: m,
|
||||
groupSpec: groupSpec,
|
||||
spec: spec,
|
||||
group: group,
|
||||
|
@ -357,6 +356,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, acs sutil.ACS, spec
|
|||
}
|
||||
|
||||
podCreator = &MemberSyncPod{
|
||||
podName: podName,
|
||||
groupSpec: groupSpec,
|
||||
spec: spec,
|
||||
group: group,
|
||||
|
@ -364,13 +364,13 @@ func (r *Resources) RenderPodForMember(ctx context.Context, acs sutil.ACS, spec
|
|||
imageInfo: imageInfo,
|
||||
arangoMember: *member,
|
||||
apiObject: apiObject,
|
||||
memberStatus: *newMember,
|
||||
memberStatus: m,
|
||||
}
|
||||
} else {
|
||||
return nil, errors.Newf("unable to render Pod")
|
||||
}
|
||||
|
||||
pod, err := RenderArangoPod(ctx, cache, apiObject, role, newMember.ID, newMember.PodName, podCreator)
|
||||
pod, err := RenderArangoPod(ctx, cache, apiObject, role, m.ID, podName, podCreator)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -455,7 +455,6 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
// Update pod name
|
||||
role := group.AsRole()
|
||||
|
||||
m.PodName = template.PodSpec.GetName()
|
||||
newPhase := api.MemberPhaseCreated
|
||||
// Create pod
|
||||
if group.IsArangod() {
|
||||
|
@ -472,20 +471,26 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
m.PodName = podName
|
||||
m.PodUID = uid
|
||||
m.PodSpecVersion = template.PodSpecChecksum
|
||||
var pod api.MemberPodStatus
|
||||
|
||||
pod.Name = podName
|
||||
pod.UID = uid
|
||||
pod.SpecVersion = template.PodSpecChecksum
|
||||
|
||||
m.Pod = &pod
|
||||
m.Pod.Propagate(&m)
|
||||
|
||||
m.ArangoVersion = m.Image.ArangoDBVersion
|
||||
m.ImageID = m.Image.ImageID
|
||||
|
||||
// reset old sidecar values to nil
|
||||
m.SideCarSpecs = nil
|
||||
|
||||
log.Str("pod-name", m.PodName).Debug("Created pod")
|
||||
log.Str("pod-name", pod.Name).Debug("Created pod")
|
||||
if m.Image == nil {
|
||||
log.Str("pod-name", m.PodName).Debug("Created pod with default image")
|
||||
log.Str("pod-name", pod.Name).Debug("Created pod with default image")
|
||||
} else {
|
||||
log.Str("pod-name", m.PodName).Debug("Created pod with predefined image")
|
||||
log.Str("pod-name", pod.Name).Debug("Created pod with predefined image")
|
||||
}
|
||||
} else if group.IsArangosync() {
|
||||
// Check monitoring token secret
|
||||
|
@ -511,11 +516,17 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
log.Str("pod-name", m.PodName).Debug("Created pod")
|
||||
|
||||
m.PodName = podName
|
||||
m.PodUID = uid
|
||||
m.PodSpecVersion = template.PodSpecChecksum
|
||||
var pod api.MemberPodStatus
|
||||
|
||||
pod.Name = podName
|
||||
pod.UID = uid
|
||||
pod.SpecVersion = template.PodSpecChecksum
|
||||
|
||||
m.Pod = &pod
|
||||
m.Pod.Propagate(&m)
|
||||
|
||||
log.Str("pod-name", pod.Name).Debug("Created pod")
|
||||
}
|
||||
|
||||
member.GetPhaseExecutor().Execute(r.context.GetAPIObject(), spec, group, &m, api.Action{}, newPhase)
|
||||
|
@ -532,7 +543,7 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
}
|
||||
}
|
||||
|
||||
log.Str("pod", m.PodName).Info("Updating member")
|
||||
log.Str("pod", m.Pod.GetName()).Info("Updating member")
|
||||
if err := status.Members.Update(m, group); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -540,7 +551,7 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
return errors.WithStack(err)
|
||||
}
|
||||
// Create event
|
||||
r.context.CreateEvent(k8sutil.NewPodCreatedEvent(m.PodName, role, apiObject))
|
||||
r.context.CreateEvent(k8sutil.NewPodCreatedEvent(m.Pod.GetName(), role, apiObject))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -50,6 +50,7 @@ var _ interfaces.PodCreator = &MemberArangoDPod{}
|
|||
var _ interfaces.ContainerCreator = &ArangoDContainer{}
|
||||
|
||||
type MemberArangoDPod struct {
|
||||
podName string
|
||||
status api.MemberStatus
|
||||
groupSpec api.ServerGroupSpec
|
||||
spec api.DeploymentSpec
|
||||
|
|
|
@ -61,6 +61,7 @@ var _ interfaces.PodCreator = &MemberSyncPod{}
|
|||
var _ interfaces.ContainerCreator = &ArangoSyncContainer{}
|
||||
|
||||
type MemberSyncPod struct {
|
||||
podName string
|
||||
tlsKeyfileSecretName string
|
||||
clientAuthCASecretName string
|
||||
masterJWTSecretName string
|
||||
|
|
|
@ -372,7 +372,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
|||
for _, e := range status.Members.AsList() {
|
||||
m := e.Member
|
||||
group := e.Group
|
||||
if podName := m.PodName; podName != "" {
|
||||
if podName := m.Pod.GetName(); podName != "" {
|
||||
if _, exists := cachedStatus.Pod().V1().GetSimple(podName); !exists {
|
||||
log.Str("pod-name", podName).Debug("Does not exist")
|
||||
switch m.Phase {
|
||||
|
|
|
@ -60,7 +60,7 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte
|
|||
changed := false
|
||||
group := api.ServerGroupAgents
|
||||
for _, e := range status.Members.AsListInGroup(group) {
|
||||
pod, exist := cachedStatus.Pod().V1().GetSimple(e.Member.PodName)
|
||||
pod, exist := cachedStatus.Pod().V1().GetSimple(e.Member.Pod.GetName())
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
|
@ -217,7 +217,7 @@ func (r *Resources) ensureSingleServerLeader(ctx context.Context, cachedStatus i
|
|||
|
||||
status, _ := r.context.GetStatus()
|
||||
for _, m := range status.Members.Single {
|
||||
pod, exist := cachedStatus.Pod().V1().GetSimple(m.PodName)
|
||||
pod, exist := cachedStatus.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !exist {
|
||||
continue
|
||||
}
|
||||
|
@ -241,7 +241,7 @@ func (r *Resources) ensureSingleServerLeader(ctx context.Context, cachedStatus i
|
|||
|
||||
err := r.context.ApplyPatchOnPod(ctx, pod, patch.ItemReplace(patch.NewPath("metadata", "labels"), labels))
|
||||
if err != nil {
|
||||
return errors.WithMessagef(err, "unable to change leader label for pod %s", m.PodName)
|
||||
return errors.WithMessagef(err, "unable to change leader label for pod %s", m.Pod.GetName())
|
||||
}
|
||||
changed = true
|
||||
}
|
||||
|
|
|
@ -102,10 +102,10 @@ func (r *Resources) inspectFinalizerPVCMemberExists(ctx context.Context, group a
|
|||
}
|
||||
|
||||
// Member still exists, let's trigger a delete of it
|
||||
if memberStatus.PodName != "" {
|
||||
if memberStatus.Pod.GetName() != "" {
|
||||
log.Info("Removing Pod of member, because PVC is being removed")
|
||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
return r.context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, memberStatus.PodName, meta.DeleteOptions{})
|
||||
return r.context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, memberStatus.Pod.GetName(), meta.DeleteOptions{})
|
||||
})
|
||||
if err != nil && !k8sutil.IsNotFound(err) {
|
||||
log.Err(err).Debug("Failed to delete pod")
|
||||
|
|
|
@ -87,7 +87,7 @@ func IsRotationRequired(acs sutil.ACS, spec api.DeploymentSpec, member api.Membe
|
|||
|
||||
// Check if pod details are propagated
|
||||
if pod != nil {
|
||||
if member.PodUID != pod.UID {
|
||||
if member.Pod.GetUID() != pod.UID {
|
||||
reason = "Pod UID does not match, this pod is not managed by Operator. Recreating"
|
||||
mode = EnforcedRotation
|
||||
return
|
||||
|
@ -100,7 +100,7 @@ func IsRotationRequired(acs sutil.ACS, spec api.DeploymentSpec, member api.Membe
|
|||
}
|
||||
}
|
||||
|
||||
if member.PodSpecVersion == "" {
|
||||
if p := member.Pod; p != nil && p.SpecVersion == "" {
|
||||
reason = "Pod Spec Version is nil - recreating pod"
|
||||
mode = EnforcedRotation
|
||||
return
|
||||
|
|
|
@ -100,7 +100,7 @@ func (d *Deployment) ReadyPodCount() int {
|
|||
count := 0
|
||||
status, _ := d.GetStatus()
|
||||
for _, e := range status.Members.AsList() {
|
||||
if e.Member.PodName == "" {
|
||||
if e.Member.Pod.GetName() == "" {
|
||||
continue
|
||||
}
|
||||
if e.Member.Conditions.IsTrue(api.ConditionTypeReady) {
|
||||
|
|
|
@ -47,7 +47,7 @@ func (m member) ID() string {
|
|||
|
||||
func (m member) PodName() string {
|
||||
if status, found := m.status(); found {
|
||||
return status.PodName
|
||||
return status.Pod.GetName()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue