mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Linter] Unify imports (#1042)
This commit is contained in:
parent
fb8d214e4a
commit
bca84f3331
103 changed files with 778 additions and 749 deletions
34
.golangci.yaml
Normal file
34
.golangci.yaml
Normal file
|
@ -0,0 +1,34 @@
|
|||
---
|
||||
|
||||
run:
|
||||
issues-exit-code: 3
|
||||
timeout: 30m
|
||||
skip-dirs:
|
||||
- vendor
|
||||
- .gobuild
|
||||
- deps
|
||||
- tools
|
||||
|
||||
linters:
|
||||
disable-all: true
|
||||
enable:
|
||||
- deadcode
|
||||
- gosimple
|
||||
- govet
|
||||
- ineffassign
|
||||
- staticcheck
|
||||
- structcheck
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unparam
|
||||
- unused
|
||||
- varcheck
|
||||
- importas
|
||||
|
||||
linters-settings:
|
||||
importas:
|
||||
alias:
|
||||
- pkg: k8s.io/api/core/v1
|
||||
alias: core
|
||||
- pkg: k8s.io/apimachinery/pkg/apis/meta/v1
|
||||
alias: meta
|
14
Makefile
14
Makefile
|
@ -182,10 +182,6 @@ allall: all
|
|||
# Tip: Run `eval $(minikube docker-env)` before calling make if you're developing on minikube.
|
||||
#
|
||||
|
||||
GOLANGCI_ENABLED=deadcode gosimple govet ineffassign staticcheck structcheck typecheck unconvert unparam unused varcheck
|
||||
#GOLANGCI_ENABLED=gocyclo goconst golint maligned errcheck interfacer megacheck
|
||||
#GOLANGCI_ENABLED+=dupl - disable dupl check
|
||||
|
||||
.PHONY: license-verify
|
||||
license-verify:
|
||||
@echo ">> Verify license of files"
|
||||
|
@ -208,11 +204,11 @@ fmt-verify: license-verify
|
|||
|
||||
.PHONY: linter
|
||||
linter:
|
||||
$(GOPATH)/bin/golangci-lint run --build-tags "$(RELEASE_MODE)" --no-config --issues-exit-code=1 --deadline=30m --exclude-use-default=false \
|
||||
--disable-all $(foreach EXCLUDE_DIR,$(EXCLUDE_DIRS),--skip-dirs $(EXCLUDE_DIR)) \
|
||||
$(foreach MODE,$(GOLANGCI_ENABLED),--enable $(MODE)) \
|
||||
$(foreach LINT_EXCLUDE,$(LINT_EXCLUDES),--exclude '$(LINT_EXCLUDE)') \
|
||||
./...
|
||||
$(GOPATH)/bin/golangci-lint run --build-tags "$(RELEASE_MODE)" $(foreach LINT_EXCLUDE,$(LINT_EXCLUDES),--exclude '$(LINT_EXCLUDE)') ./...
|
||||
|
||||
.PHONY: linter-fix
|
||||
linter-fix:
|
||||
$(GOPATH)/bin/golangci-lint run --fix --build-tags "$(RELEASE_MODE)" $(foreach LINT_EXCLUDE,$(LINT_EXCLUDES),--exclude '$(LINT_EXCLUDE)') ./...
|
||||
|
||||
.PHONY: build
|
||||
build: docker manifests
|
||||
|
|
12
cmd/admin.go
12
cmd/admin.go
|
@ -36,8 +36,8 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb-helper/go-certificates"
|
||||
"github.com/arangodb/go-driver/jwt"
|
||||
|
@ -314,16 +314,16 @@ func getCACertificate(ctx context.Context, secrets secretv1.ReadInterface, name
|
|||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
s, err := secrets.Get(ctxChild, name, metav1.GetOptions{})
|
||||
s, err := secrets.Get(ctxChild, name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithMessage(err, fmt.Sprintf("failed to get secret \"%s\"", name))
|
||||
}
|
||||
|
||||
if data, ok := s.Data[v1.ServiceAccountRootCAKey]; ok {
|
||||
if data, ok := s.Data[core.ServiceAccountRootCAKey]; ok {
|
||||
return certificates.LoadCertPool(string(data))
|
||||
}
|
||||
|
||||
return nil, errors.New(fmt.Sprintf("the \"%s\" does not exist in the secret \"%s\"", v1.ServiceAccountRootCAKey,
|
||||
return nil, errors.New(fmt.Sprintf("the \"%s\" does not exist in the secret \"%s\"", core.ServiceAccountRootCAKey,
|
||||
name))
|
||||
}
|
||||
|
||||
|
@ -341,7 +341,7 @@ func getDeployment(ctx context.Context, namespace, deplName string) (api.ArangoD
|
|||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
deployments, err := extCli.DatabaseV1().ArangoDeployments(namespace).List(ctxChild, metav1.ListOptions{})
|
||||
deployments, err := extCli.DatabaseV1().ArangoDeployments(namespace).List(ctxChild, meta.ListOptions{})
|
||||
if err != nil {
|
||||
if api.IsNotFound(err) {
|
||||
return api.ArangoDeployment{}, errors.WithMessage(err, "there are no deployments")
|
||||
|
|
10
cmd/cmd.go
10
cmd/cmd.go
|
@ -61,8 +61,8 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
flag "github.com/spf13/pflag"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
v1core "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
@ -448,7 +448,7 @@ func newOperatorConfigAndDeps(id, namespace, name string) (operator.Config, oper
|
|||
func getMyPodInfo(kubecli kubernetes.Interface, namespace, name string) (string, string, error) {
|
||||
var image, sa string
|
||||
op := func() error {
|
||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{})
|
||||
pod, err := kubecli.CoreV1().Pods(namespace).Get(context.Background(), name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
logger.
|
||||
Err(err).
|
||||
|
@ -480,7 +480,7 @@ func createRecorder(kubecli kubernetes.Interface, name, namespace string) record
|
|||
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: v1core.New(kubecli.CoreV1().RESTClient()).Events(namespace)})
|
||||
combinedScheme := runtime.NewScheme()
|
||||
scheme.AddToScheme(combinedScheme)
|
||||
v1.AddToScheme(combinedScheme)
|
||||
core.AddToScheme(combinedScheme)
|
||||
appsv1.AddToScheme(combinedScheme)
|
||||
return eventBroadcaster.NewRecorder(combinedScheme, v1.EventSource{Component: name})
|
||||
return eventBroadcaster.NewRecorder(combinedScheme, core.EventSource{Component: name})
|
||||
}
|
||||
|
|
|
@ -36,7 +36,7 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/version"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
|
@ -119,7 +119,7 @@ func cmdLifecyclePreStopRunFinalizer(cmd *cobra.Command, args []string) {
|
|||
pods := client.Kubernetes().CoreV1().Pods(namespace)
|
||||
recentErrors := 0
|
||||
for {
|
||||
p, err := pods.Get(context.Background(), name, metav1.GetOptions{})
|
||||
p, err := pods.Get(context.Background(), name, meta.GetOptions{})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
logger.Warn("Pod not found")
|
||||
return
|
||||
|
@ -218,7 +218,7 @@ func (c *cmdLifecyclePreStopRunPort) run(cmd *cobra.Command, args []string) erro
|
|||
|
||||
conn.Close()
|
||||
|
||||
p, err := pods.Get(context.Background(), name, metav1.GetOptions{})
|
||||
p, err := pods.Get(context.Background(), name, meta.GetOptions{})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
logger.Warn("Pod not found")
|
||||
return nil
|
||||
|
|
|
@ -41,9 +41,9 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/fields"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
@ -103,71 +103,71 @@ func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name
|
|||
|
||||
deletePVC := true
|
||||
claimname := "arangodb-reboot-pvc-" + name
|
||||
pvcspec := corev1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
pvcspec := core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: claimname,
|
||||
Labels: map[string]string{
|
||||
"app": "arangodb",
|
||||
"rebooted": "yes",
|
||||
},
|
||||
},
|
||||
Spec: corev1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []corev1.PersistentVolumeAccessMode{corev1.ReadWriteOnce},
|
||||
Spec: core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{core.ReadWriteOnce},
|
||||
VolumeName: name,
|
||||
Resources: corev1.ResourceRequirements{
|
||||
Requests: corev1.ResourceList{
|
||||
corev1.ResourceStorage: *resource.NewQuantity(1024*1024*1024, resource.DecimalSI),
|
||||
Resources: core.ResourceRequirements{
|
||||
Requests: core.ResourceList{
|
||||
core.ResourceStorage: *resource.NewQuantity(1024*1024*1024, resource.DecimalSI),
|
||||
},
|
||||
},
|
||||
StorageClassName: util.NewString(storageClassName),
|
||||
},
|
||||
}
|
||||
|
||||
_, err := kube.CoreV1().PersistentVolumeClaims(ns).Create(context.Background(), &pvcspec, metav1.CreateOptions{})
|
||||
_, err := kube.CoreV1().PersistentVolumeClaims(ns).Create(context.Background(), &pvcspec, meta.CreateOptions{})
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to create pvc")
|
||||
}
|
||||
defer func() {
|
||||
if deletePVC {
|
||||
logger.Str("pvc-name", claimname).Debug("deleting pvc")
|
||||
kube.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), claimname, metav1.DeleteOptions{})
|
||||
kube.CoreV1().PersistentVolumeClaims(ns).Delete(context.Background(), claimname, meta.DeleteOptions{})
|
||||
}
|
||||
}()
|
||||
|
||||
podname := "arangodb-reboot-pod-" + name
|
||||
podspec := corev1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
podspec := core.Pod{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: podname,
|
||||
},
|
||||
Spec: corev1.PodSpec{
|
||||
RestartPolicy: corev1.RestartPolicyNever,
|
||||
Containers: []corev1.Container{
|
||||
corev1.Container{
|
||||
Spec: core.PodSpec{
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
Containers: []core.Container{
|
||||
core.Container{
|
||||
Name: "inspector",
|
||||
Image: image,
|
||||
ImagePullPolicy: corev1.PullAlways,
|
||||
ImagePullPolicy: core.PullAlways,
|
||||
Command: []string{"arangodb_operator"},
|
||||
Args: []string{"reboot", "inspect"},
|
||||
Env: []corev1.EnvVar{
|
||||
corev1.EnvVar{
|
||||
Env: []core.EnvVar{
|
||||
core.EnvVar{
|
||||
Name: constants.EnvOperatorPodNamespace,
|
||||
Value: ns,
|
||||
},
|
||||
},
|
||||
VolumeMounts: []corev1.VolumeMount{
|
||||
corev1.VolumeMount{
|
||||
VolumeMounts: []core.VolumeMount{
|
||||
core.VolumeMount{
|
||||
MountPath: "/data",
|
||||
Name: "data",
|
||||
},
|
||||
},
|
||||
Ports: []corev1.ContainerPort{
|
||||
corev1.ContainerPort{
|
||||
Ports: []core.ContainerPort{
|
||||
core.ContainerPort{
|
||||
ContainerPort: 8080,
|
||||
},
|
||||
},
|
||||
ReadinessProbe: &corev1.Probe{
|
||||
Handler: corev1.Handler{
|
||||
HTTPGet: &corev1.HTTPGetAction{
|
||||
ReadinessProbe: &core.Probe{
|
||||
Handler: core.Handler{
|
||||
HTTPGet: &core.HTTPGetAction{
|
||||
Path: "/info",
|
||||
Port: intstr.FromInt(8080),
|
||||
},
|
||||
|
@ -175,19 +175,19 @@ func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name
|
|||
},
|
||||
},
|
||||
},
|
||||
Volumes: []corev1.Volume{
|
||||
Volumes: []core.Volume{
|
||||
k8sutil.CreateVolumeWithPersitantVolumeClaim("data", claimname),
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
_, err = kube.CoreV1().Pods(ns).Create(context.Background(), &podspec, metav1.CreateOptions{})
|
||||
_, err = kube.CoreV1().Pods(ns).Create(context.Background(), &podspec, meta.CreateOptions{})
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to create pod")
|
||||
}
|
||||
defer kube.CoreV1().Pods(ns).Delete(context.Background(), podname, metav1.DeleteOptions{})
|
||||
defer kube.CoreV1().Pods(ns).Delete(context.Background(), podname, meta.DeleteOptions{})
|
||||
|
||||
podwatch, err := kube.CoreV1().Pods(ns).Watch(context.Background(), metav1.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", podname).String()})
|
||||
podwatch, err := kube.CoreV1().Pods(ns).Watch(context.Background(), meta.ListOptions{FieldSelector: fields.OneTermEqualSelector("metadata.name", podname).String()})
|
||||
if err != nil {
|
||||
return "", "", errors.Wrap(err, "failed to watch for pod")
|
||||
}
|
||||
|
@ -204,18 +204,18 @@ func runVolumeInspector(ctx context.Context, kube kubernetes.Interface, ns, name
|
|||
}
|
||||
|
||||
// get the pod
|
||||
pod, ok := ev.Object.(*corev1.Pod)
|
||||
pod, ok := ev.Object.(*core.Pod)
|
||||
if !ok {
|
||||
return "", "", fmt.Errorf("failed to get pod")
|
||||
}
|
||||
|
||||
switch pod.Status.Phase {
|
||||
case corev1.PodFailed:
|
||||
case core.PodFailed:
|
||||
return "", "", fmt.Errorf("pod failed: %s", pod.Status.Reason)
|
||||
case corev1.PodRunning:
|
||||
case core.PodRunning:
|
||||
podReady := false
|
||||
for _, c := range pod.Status.Conditions {
|
||||
if c.Type == corev1.PodReady && c.Status == corev1.ConditionTrue {
|
||||
if c.Type == core.PodReady && c.Status == core.ConditionTrue {
|
||||
podReady = true
|
||||
}
|
||||
}
|
||||
|
@ -264,22 +264,22 @@ func doVolumeInspection(ctx context.Context, kube kubernetes.Interface, ns, name
|
|||
}
|
||||
|
||||
func checkVolumeAvailable(kube kubernetes.Interface, vname string) (VolumeInfo, error) {
|
||||
volume, err := kube.CoreV1().PersistentVolumes().Get(context.Background(), vname, metav1.GetOptions{})
|
||||
volume, err := kube.CoreV1().PersistentVolumes().Get(context.Background(), vname, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return VolumeInfo{}, errors.Wrapf(err, "failed to GET volume %s", vname)
|
||||
}
|
||||
|
||||
switch volume.Status.Phase {
|
||||
case corev1.VolumeAvailable:
|
||||
case core.VolumeAvailable:
|
||||
break
|
||||
case corev1.VolumeReleased:
|
||||
case core.VolumeReleased:
|
||||
// we have to remove the claim reference
|
||||
volume.Spec.ClaimRef = nil
|
||||
if _, err := kube.CoreV1().PersistentVolumes().Update(context.Background(), volume, metav1.UpdateOptions{}); err != nil {
|
||||
if _, err := kube.CoreV1().PersistentVolumes().Update(context.Background(), volume, meta.UpdateOptions{}); err != nil {
|
||||
return VolumeInfo{}, errors.Wrapf(err, "failed to remove claim reference")
|
||||
}
|
||||
default:
|
||||
return VolumeInfo{}, fmt.Errorf("Volume %s phase is %s, expected %s", vname, volume.Status.Phase, corev1.VolumeAvailable)
|
||||
return VolumeInfo{}, fmt.Errorf("Volume %s phase is %s, expected %s", vname, volume.Status.Phase, core.VolumeAvailable)
|
||||
}
|
||||
|
||||
return VolumeInfo{StorageClassName: volume.Spec.StorageClassName}, nil
|
||||
|
@ -306,7 +306,7 @@ func preflightChecks(kube kubernetes.Interface, volumes []string) (VolumeListInf
|
|||
}
|
||||
|
||||
func getMyImage(kube kubernetes.Interface, ns, name string) (string, error) {
|
||||
pod, err := kube.CoreV1().Pods(ns).Get(context.Background(), name, metav1.GetOptions{})
|
||||
pod, err := kube.CoreV1().Pods(ns).Get(context.Background(), name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -330,7 +330,7 @@ func createArangoDeployment(cli acli.Interface, ns, deplname, arangoimage string
|
|||
}
|
||||
|
||||
depl := deplv1.ArangoDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: deplname,
|
||||
},
|
||||
Spec: deplv1.DeploymentSpec{
|
||||
|
@ -367,7 +367,7 @@ func createArangoDeployment(cli acli.Interface, ns, deplname, arangoimage string
|
|||
})
|
||||
}
|
||||
|
||||
if _, err := cli.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), &depl, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := cli.DatabaseV1().ArangoDeployments(ns).Create(context.Background(), &depl, meta.CreateOptions{}); err != nil {
|
||||
return errors.Wrap(err, "failed to create ArangoDeployment")
|
||||
}
|
||||
|
||||
|
|
|
@ -24,15 +24,15 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/apis/apps"
|
||||
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ArangoJobList is a list of ArangoDB jobs.
|
||||
type ArangoJobList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ArangoJob `json:"items"`
|
||||
}
|
||||
|
@ -42,16 +42,16 @@ type ArangoJobList struct {
|
|||
|
||||
// ArangoJob contains definition and status of the ArangoDB type Job.
|
||||
type ArangoJob struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec ArangoJobSpec `json:"spec,omitempty"`
|
||||
Status batchv1.JobStatus `json:"status,omitempty"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec ArangoJobSpec `json:"spec,omitempty"`
|
||||
Status batchv1.JobStatus `json:"status,omitempty"`
|
||||
}
|
||||
|
||||
// AsOwner creates an OwnerReference for the given job
|
||||
func (a *ArangoJob) AsOwner() metav1.OwnerReference {
|
||||
func (a *ArangoJob) AsOwner() meta.OwnerReference {
|
||||
trueVar := true
|
||||
return metav1.OwnerReference{
|
||||
return meta.OwnerReference{
|
||||
APIVersion: SchemeGroupVersion.String(),
|
||||
Kind: apps.ArangoJobResourceKind,
|
||||
Name: a.Name,
|
||||
|
|
|
@ -23,7 +23,7 @@ package v1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/apps"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -50,6 +50,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoJob{},
|
||||
&ArangoJobList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ package v1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/backup"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -39,8 +39,8 @@ var (
|
|||
|
||||
// ArangoBackupList is a list of ArangoDB backups.
|
||||
type ArangoBackupList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ArangoBackup `json:"items"`
|
||||
}
|
||||
|
@ -50,8 +50,8 @@ type ArangoBackupList struct {
|
|||
|
||||
// ArangoBackup contains definition and status of the ArangoDB Backup.
|
||||
type ArangoBackup struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ArangoBackupSpec `json:"spec"`
|
||||
Status ArangoBackupStatus `json:"status"`
|
||||
|
|
|
@ -26,15 +26,15 @@ import (
|
|||
deployment "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/handlers/utils"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ArangoBackupPolicyList is a list of ArangoDB backup policy.
|
||||
type ArangoBackupPolicyList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ListMeta `json:"metadata,omitempty"`
|
||||
|
||||
Items []ArangoBackupPolicy `json:"items"`
|
||||
}
|
||||
|
@ -44,8 +44,8 @@ type ArangoBackupPolicyList struct {
|
|||
|
||||
// ArangoBackupPolicy contains definition and status of the ArangoDB Backup Policy.
|
||||
type ArangoBackupPolicy struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ObjectMeta `json:"metadata,omitempty"`
|
||||
|
||||
Spec ArangoBackupPolicySpec `json:"spec"`
|
||||
Status ArangoBackupPolicyStatus `json:"status"`
|
||||
|
@ -64,7 +64,7 @@ func (a *ArangoBackupPolicy) NewBackup(d *deployment.ArangoDeployment) *ArangoBa
|
|||
}
|
||||
|
||||
return &ArangoBackup{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", d.Name, utils.RandomString(8)),
|
||||
Namespace: a.Namespace,
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ package v1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/backup"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -51,6 +51,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoBackupPolicy{},
|
||||
&ArangoBackupPolicyList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ package v1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ConditionType is a strongly typed condition name
|
||||
|
@ -117,9 +117,9 @@ type Condition struct {
|
|||
// Status of the condition, one of True, False, Unknown.
|
||||
Status core.ConditionStatus `json:"status"`
|
||||
// The last time this condition was updated.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
LastUpdateTime meta.Time `json:"lastUpdateTime,omitempty"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
LastTransitionTime meta.Time `json:"lastTransitionTime,omitempty"`
|
||||
// The reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// A human readable message indicating details about the transition.
|
||||
|
@ -205,7 +205,7 @@ func (list *ConditionList) Touch(conditionType ConditionType) bool {
|
|||
src := *list
|
||||
for i, x := range src {
|
||||
if x.Type == conditionType {
|
||||
src[i].LastUpdateTime = metav1.Now()
|
||||
src[i].LastUpdateTime = meta.Now()
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func (list *ConditionList) update(conditionType ConditionType, status bool, reas
|
|||
|
||||
if index == -1 {
|
||||
// Not found
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
*list = append(src, Condition{
|
||||
Type: conditionType,
|
||||
LastUpdateTime: now,
|
||||
|
@ -250,14 +250,14 @@ func (list *ConditionList) update(conditionType ConditionType, status bool, reas
|
|||
if src[index].Status != statusX {
|
||||
// Transition to another status
|
||||
src[index].Status = statusX
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
src[index].LastTransitionTime = now
|
||||
src[index].LastUpdateTime = now
|
||||
src[index].Reason = reason
|
||||
src[index].Message = message
|
||||
src[index].Hash = hash
|
||||
} else if src[index].Reason != reason || src[index].Message != message || src[index].Hash != hash {
|
||||
src[index].LastUpdateTime = metav1.Now()
|
||||
src[index].LastUpdateTime = meta.Now()
|
||||
src[index].Reason = reason
|
||||
src[index].Message = message
|
||||
src[index].Hash = hash
|
||||
|
|
|
@ -24,7 +24,7 @@ package v1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// MetricsAuthenticationSpec contains spec for authentication with arangodb
|
||||
|
@ -74,7 +74,7 @@ type MetricsSpec struct {
|
|||
// deprecated
|
||||
Image *string `json:"image,omitempty"`
|
||||
Authentication MetricsAuthenticationSpec `json:"authentication,omitempty"`
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
Resources core.ResourceRequirements `json:"resources,omitempty"`
|
||||
// deprecated
|
||||
Mode *MetricsMode `json:"mode,omitempty"`
|
||||
TLS *bool `json:"tls,omitempty"`
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/handlers/utils"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
|
@ -67,16 +67,16 @@ func TestDeploymentSpecResetImmutableFields(t *testing.T) {
|
|||
nil,
|
||||
},
|
||||
{
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ package v1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ExternalAccessType specifies the type of external access provides for the deployment
|
||||
|
@ -47,12 +47,12 @@ func (t ExternalAccessType) IsNodePort() bool { return t == ExternalAccessTy
|
|||
|
||||
// AsServiceType returns the k8s ServiceType for this ExternalAccessType.
|
||||
// If type is "Auto", ServiceTypeLoadBalancer is returned.
|
||||
func (t ExternalAccessType) AsServiceType() v1.ServiceType {
|
||||
func (t ExternalAccessType) AsServiceType() core.ServiceType {
|
||||
switch t {
|
||||
case ExternalAccessTypeLoadBalancer, ExternalAccessTypeAuto:
|
||||
return v1.ServiceTypeLoadBalancer
|
||||
return core.ServiceTypeLoadBalancer
|
||||
case ExternalAccessTypeNodePort:
|
||||
return v1.ServiceTypeNodePort
|
||||
return core.ServiceTypeNodePort
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -21,11 +21,11 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type LifecycleSpec struct {
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
Resources core.ResourceRequirements `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
// SetDefaultsFrom fills unspecified fields with a value from given source spec.
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
driver "github.com/arangodb/go-driver"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// MemberStatus holds the current status of a single member (server)
|
||||
|
@ -48,7 +48,7 @@ type MemberStatus struct {
|
|||
// Phase holds the current lifetime phase of this member
|
||||
Phase MemberPhase `json:"phase"`
|
||||
// CreatedAt holds the creation timestamp of this member.
|
||||
CreatedAt metav1.Time `json:"created-at"`
|
||||
CreatedAt meta.Time `json:"created-at"`
|
||||
// PersistentVolumeClaimName holds the name of the persistent volume claim used for this member (if any).
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName,omitempty"`
|
||||
// PodName holds the name of the Pod that currently runs this member
|
||||
|
@ -61,7 +61,7 @@ type MemberStatus struct {
|
|||
Conditions ConditionList `json:"conditions,omitempty"`
|
||||
// RecentTerminatons holds the times when this member was recently terminated.
|
||||
// First entry is the oldest. (do not add omitempty, since we want to be able to switch from a list to an empty list)
|
||||
RecentTerminations []metav1.Time `json:"recent-terminations"`
|
||||
RecentTerminations []meta.Time `json:"recent-terminations"`
|
||||
// IsInitialized is set after the very first time a pod was created for this member.
|
||||
// After that, DBServers must have a UUID field or fail.
|
||||
IsInitialized bool `json:"initialized"`
|
||||
|
@ -86,7 +86,7 @@ type MemberStatus struct {
|
|||
|
||||
// deprecated
|
||||
// SideCarSpecs contains list of specifications specified for side cars
|
||||
SideCarSpecs map[string]v1.Container `json:"sidecars-specs,omitempty"`
|
||||
SideCarSpecs map[string]core.Container `json:"sidecars-specs,omitempty"`
|
||||
}
|
||||
|
||||
// Equal checks for equality
|
||||
|
@ -165,7 +165,7 @@ func (s MemberStatus) IsNotReadySince(timestamp time.Time) bool {
|
|||
cond, found := s.Conditions.Get(ConditionTypeReady)
|
||||
if found {
|
||||
// B
|
||||
return cond.Status != v1.ConditionTrue && cond.LastTransitionTime.Time.Before(timestamp)
|
||||
return cond.Status != core.ConditionTrue && cond.LastTransitionTime.Time.Before(timestamp)
|
||||
}
|
||||
// A
|
||||
return s.CreatedAt.Time.Before(timestamp)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// MemberStatusList is a list of MemberStatus entries
|
||||
|
@ -227,7 +227,7 @@ func (l MemberStatusList) AllMembersReady() bool {
|
|||
}
|
||||
|
||||
// AllConditionTrueSince returns true if all members satisfy the condition since the given period
|
||||
func (l MemberStatusList) AllConditionTrueSince(cond ConditionType, status v1.ConditionStatus, period time.Duration) bool {
|
||||
func (l MemberStatusList) AllConditionTrueSince(cond ConditionType, status core.ConditionStatus, period time.Duration) bool {
|
||||
for _, x := range l {
|
||||
if c, ok := x.Conditions.Get(cond); ok {
|
||||
if c.Status == status && c.LastTransitionTime.Time.Add(period).Before(time.Now()) {
|
||||
|
|
|
@ -25,25 +25,25 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TestMemberStatusRecentTerminations tests the functions related to MemberStatus.RecentTerminations.
|
||||
func TestMemberStatusRecentTerminations(t *testing.T) {
|
||||
relTime := func(delta time.Duration) metav1.Time {
|
||||
return metav1.Time{Time: time.Now().Add(delta)}
|
||||
relTime := func(delta time.Duration) meta.Time {
|
||||
return meta.Time{Time: time.Now().Add(delta)}
|
||||
}
|
||||
|
||||
s := MemberStatus{}
|
||||
assert.Equal(t, 0, s.RecentTerminationsSince(time.Now().Add(-time.Hour)))
|
||||
assert.Equal(t, 0, s.RemoveTerminationsBefore(time.Now()))
|
||||
|
||||
s.RecentTerminations = []metav1.Time{metav1.Now()}
|
||||
s.RecentTerminations = []meta.Time{meta.Now()}
|
||||
assert.Equal(t, 1, s.RecentTerminationsSince(time.Now().Add(-time.Minute)))
|
||||
assert.Equal(t, 0, s.RecentTerminationsSince(time.Now().Add(time.Minute)))
|
||||
assert.Equal(t, 0, s.RemoveTerminationsBefore(time.Now().Add(-time.Hour)))
|
||||
|
||||
s.RecentTerminations = []metav1.Time{relTime(-time.Hour), relTime(-time.Minute), relTime(time.Minute)}
|
||||
s.RecentTerminations = []meta.Time{relTime(-time.Hour), relTime(-time.Minute), relTime(time.Minute)}
|
||||
assert.Equal(t, 3, s.RecentTerminationsSince(time.Now().Add(-time.Hour*2)))
|
||||
assert.Equal(t, 2, s.RecentTerminationsSince(time.Now().Add(-time.Minute*2)))
|
||||
assert.Equal(t, 2, s.RemoveTerminationsBefore(time.Now()))
|
||||
|
@ -53,7 +53,7 @@ func TestMemberStatusRecentTerminations(t *testing.T) {
|
|||
// TestMemberStatusIsNotReadySince tests the functions related to MemberStatus.IsNotReadySince.
|
||||
func TestMemberStatusIsNotReadySince(t *testing.T) {
|
||||
s := MemberStatus{
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
}
|
||||
assert.False(t, s.IsNotReadySince(time.Now().Add(-time.Hour)))
|
||||
|
||||
|
@ -61,7 +61,7 @@ func TestMemberStatusIsNotReadySince(t *testing.T) {
|
|||
assert.False(t, s.IsNotReadySince(time.Now().Add(-2*time.Hour)))
|
||||
assert.True(t, s.IsNotReadySince(time.Now().Add(-(time.Hour - time.Minute))))
|
||||
|
||||
s.CreatedAt = metav1.Now()
|
||||
s.CreatedAt = meta.Now()
|
||||
s.Conditions.Update(ConditionTypeReady, true, "", "")
|
||||
assert.False(t, s.IsNotReadySince(time.Now().Add(-time.Minute)))
|
||||
assert.False(t, s.IsNotReadySince(time.Now().Add(time.Minute)))
|
||||
|
|
|
@ -22,7 +22,7 @@ package v1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -55,6 +55,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoTask{},
|
||||
&ArangoTaskList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ package v2alpha1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ConditionType is a strongly typed condition name
|
||||
|
@ -117,9 +117,9 @@ type Condition struct {
|
|||
// Status of the condition, one of True, False, Unknown.
|
||||
Status core.ConditionStatus `json:"status"`
|
||||
// The last time this condition was updated.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
LastUpdateTime meta.Time `json:"lastUpdateTime,omitempty"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
LastTransitionTime meta.Time `json:"lastTransitionTime,omitempty"`
|
||||
// The reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// A human readable message indicating details about the transition.
|
||||
|
@ -205,7 +205,7 @@ func (list *ConditionList) Touch(conditionType ConditionType) bool {
|
|||
src := *list
|
||||
for i, x := range src {
|
||||
if x.Type == conditionType {
|
||||
src[i].LastUpdateTime = metav1.Now()
|
||||
src[i].LastUpdateTime = meta.Now()
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -234,7 +234,7 @@ func (list *ConditionList) update(conditionType ConditionType, status bool, reas
|
|||
|
||||
if index == -1 {
|
||||
// Not found
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
*list = append(src, Condition{
|
||||
Type: conditionType,
|
||||
LastUpdateTime: now,
|
||||
|
@ -250,14 +250,14 @@ func (list *ConditionList) update(conditionType ConditionType, status bool, reas
|
|||
if src[index].Status != statusX {
|
||||
// Transition to another status
|
||||
src[index].Status = statusX
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
src[index].LastTransitionTime = now
|
||||
src[index].LastUpdateTime = now
|
||||
src[index].Reason = reason
|
||||
src[index].Message = message
|
||||
src[index].Hash = hash
|
||||
} else if src[index].Reason != reason || src[index].Message != message || src[index].Hash != hash {
|
||||
src[index].LastUpdateTime = metav1.Now()
|
||||
src[index].LastUpdateTime = meta.Now()
|
||||
src[index].Reason = reason
|
||||
src[index].Message = message
|
||||
src[index].Hash = hash
|
||||
|
|
|
@ -24,7 +24,7 @@ package v2alpha1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// MetricsAuthenticationSpec contains spec for authentication with arangodb
|
||||
|
@ -74,7 +74,7 @@ type MetricsSpec struct {
|
|||
// deprecated
|
||||
Image *string `json:"image,omitempty"`
|
||||
Authentication MetricsAuthenticationSpec `json:"authentication,omitempty"`
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
Resources core.ResourceRequirements `json:"resources,omitempty"`
|
||||
// deprecated
|
||||
Mode *MetricsMode `json:"mode,omitempty"`
|
||||
TLS *bool `json:"tls,omitempty"`
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/handlers/utils"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
|
@ -67,16 +67,16 @@ func TestDeploymentSpecResetImmutableFields(t *testing.T) {
|
|||
nil,
|
||||
},
|
||||
{
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(v1.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullAlways)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
DeploymentSpec{ImagePullPolicy: util.NewPullPolicy(core.PullNever)},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ package v2alpha1
|
|||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ExternalAccessType specifies the type of external access provides for the deployment
|
||||
|
@ -47,12 +47,12 @@ func (t ExternalAccessType) IsNodePort() bool { return t == ExternalAccessTy
|
|||
|
||||
// AsServiceType returns the k8s ServiceType for this ExternalAccessType.
|
||||
// If type is "Auto", ServiceTypeLoadBalancer is returned.
|
||||
func (t ExternalAccessType) AsServiceType() v1.ServiceType {
|
||||
func (t ExternalAccessType) AsServiceType() core.ServiceType {
|
||||
switch t {
|
||||
case ExternalAccessTypeLoadBalancer, ExternalAccessTypeAuto:
|
||||
return v1.ServiceTypeLoadBalancer
|
||||
return core.ServiceTypeLoadBalancer
|
||||
case ExternalAccessTypeNodePort:
|
||||
return v1.ServiceTypeNodePort
|
||||
return core.ServiceTypeNodePort
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -21,11 +21,11 @@
|
|||
package v2alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type LifecycleSpec struct {
|
||||
Resources v1.ResourceRequirements `json:"resources,omitempty"`
|
||||
Resources core.ResourceRequirements `json:"resources,omitempty"`
|
||||
}
|
||||
|
||||
// SetDefaultsFrom fills unspecified fields with a value from given source spec.
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
driver "github.com/arangodb/go-driver"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// MemberStatus holds the current status of a single member (server)
|
||||
|
@ -48,7 +48,7 @@ type MemberStatus struct {
|
|||
// Phase holds the current lifetime phase of this member
|
||||
Phase MemberPhase `json:"phase"`
|
||||
// CreatedAt holds the creation timestamp of this member.
|
||||
CreatedAt metav1.Time `json:"created-at"`
|
||||
CreatedAt meta.Time `json:"created-at"`
|
||||
// PersistentVolumeClaimName holds the name of the persistent volume claim used for this member (if any).
|
||||
PersistentVolumeClaimName string `json:"persistentVolumeClaimName,omitempty"`
|
||||
// PodName holds the name of the Pod that currently runs this member
|
||||
|
@ -61,7 +61,7 @@ type MemberStatus struct {
|
|||
Conditions ConditionList `json:"conditions,omitempty"`
|
||||
// RecentTerminatons holds the times when this member was recently terminated.
|
||||
// First entry is the oldest. (do not add omitempty, since we want to be able to switch from a list to an empty list)
|
||||
RecentTerminations []metav1.Time `json:"recent-terminations"`
|
||||
RecentTerminations []meta.Time `json:"recent-terminations"`
|
||||
// IsInitialized is set after the very first time a pod was created for this member.
|
||||
// After that, DBServers must have a UUID field or fail.
|
||||
IsInitialized bool `json:"initialized"`
|
||||
|
@ -86,7 +86,7 @@ type MemberStatus struct {
|
|||
|
||||
// deprecated
|
||||
// SideCarSpecs contains list of specifications specified for side cars
|
||||
SideCarSpecs map[string]v1.Container `json:"sidecars-specs,omitempty"`
|
||||
SideCarSpecs map[string]core.Container `json:"sidecars-specs,omitempty"`
|
||||
}
|
||||
|
||||
// Equal checks for equality
|
||||
|
@ -165,7 +165,7 @@ func (s MemberStatus) IsNotReadySince(timestamp time.Time) bool {
|
|||
cond, found := s.Conditions.Get(ConditionTypeReady)
|
||||
if found {
|
||||
// B
|
||||
return cond.Status != v1.ConditionTrue && cond.LastTransitionTime.Time.Before(timestamp)
|
||||
return cond.Status != core.ConditionTrue && cond.LastTransitionTime.Time.Before(timestamp)
|
||||
}
|
||||
// A
|
||||
return s.CreatedAt.Time.Before(timestamp)
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// MemberStatusList is a list of MemberStatus entries
|
||||
|
@ -227,7 +227,7 @@ func (l MemberStatusList) AllMembersReady() bool {
|
|||
}
|
||||
|
||||
// AllConditionTrueSince returns true if all members satisfy the condition since the given period
|
||||
func (l MemberStatusList) AllConditionTrueSince(cond ConditionType, status v1.ConditionStatus, period time.Duration) bool {
|
||||
func (l MemberStatusList) AllConditionTrueSince(cond ConditionType, status core.ConditionStatus, period time.Duration) bool {
|
||||
for _, x := range l {
|
||||
if c, ok := x.Conditions.Get(cond); ok {
|
||||
if c.Status == status && c.LastTransitionTime.Time.Add(period).Before(time.Now()) {
|
||||
|
|
|
@ -25,25 +25,25 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TestMemberStatusRecentTerminations tests the functions related to MemberStatus.RecentTerminations.
|
||||
func TestMemberStatusRecentTerminations(t *testing.T) {
|
||||
relTime := func(delta time.Duration) metav1.Time {
|
||||
return metav1.Time{Time: time.Now().Add(delta)}
|
||||
relTime := func(delta time.Duration) meta.Time {
|
||||
return meta.Time{Time: time.Now().Add(delta)}
|
||||
}
|
||||
|
||||
s := MemberStatus{}
|
||||
assert.Equal(t, 0, s.RecentTerminationsSince(time.Now().Add(-time.Hour)))
|
||||
assert.Equal(t, 0, s.RemoveTerminationsBefore(time.Now()))
|
||||
|
||||
s.RecentTerminations = []metav1.Time{metav1.Now()}
|
||||
s.RecentTerminations = []meta.Time{meta.Now()}
|
||||
assert.Equal(t, 1, s.RecentTerminationsSince(time.Now().Add(-time.Minute)))
|
||||
assert.Equal(t, 0, s.RecentTerminationsSince(time.Now().Add(time.Minute)))
|
||||
assert.Equal(t, 0, s.RemoveTerminationsBefore(time.Now().Add(-time.Hour)))
|
||||
|
||||
s.RecentTerminations = []metav1.Time{relTime(-time.Hour), relTime(-time.Minute), relTime(time.Minute)}
|
||||
s.RecentTerminations = []meta.Time{relTime(-time.Hour), relTime(-time.Minute), relTime(time.Minute)}
|
||||
assert.Equal(t, 3, s.RecentTerminationsSince(time.Now().Add(-time.Hour*2)))
|
||||
assert.Equal(t, 2, s.RecentTerminationsSince(time.Now().Add(-time.Minute*2)))
|
||||
assert.Equal(t, 2, s.RemoveTerminationsBefore(time.Now()))
|
||||
|
@ -53,7 +53,7 @@ func TestMemberStatusRecentTerminations(t *testing.T) {
|
|||
// TestMemberStatusIsNotReadySince tests the functions related to MemberStatus.IsNotReadySince.
|
||||
func TestMemberStatusIsNotReadySince(t *testing.T) {
|
||||
s := MemberStatus{
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
}
|
||||
assert.False(t, s.IsNotReadySince(time.Now().Add(-time.Hour)))
|
||||
|
||||
|
@ -61,7 +61,7 @@ func TestMemberStatusIsNotReadySince(t *testing.T) {
|
|||
assert.False(t, s.IsNotReadySince(time.Now().Add(-2*time.Hour)))
|
||||
assert.True(t, s.IsNotReadySince(time.Now().Add(-(time.Hour - time.Minute))))
|
||||
|
||||
s.CreatedAt = metav1.Now()
|
||||
s.CreatedAt = meta.Now()
|
||||
s.Conditions.Update(ConditionTypeReady, true, "", "")
|
||||
assert.False(t, s.IsNotReadySince(time.Now().Add(-time.Minute)))
|
||||
assert.False(t, s.IsNotReadySince(time.Now().Add(time.Minute)))
|
||||
|
|
|
@ -22,7 +22,7 @@ package v2alpha1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -55,6 +55,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoTask{},
|
||||
&ArangoTaskList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
package v1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ConditionType is a strongly typed condition name
|
||||
|
@ -40,11 +40,11 @@ type Condition struct {
|
|||
// Type of condition.
|
||||
Type ConditionType `json:"type"`
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
Status v1.ConditionStatus `json:"status"`
|
||||
Status core.ConditionStatus `json:"status"`
|
||||
// The last time this condition was updated.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
LastUpdateTime meta.Time `json:"lastUpdateTime,omitempty"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
LastTransitionTime meta.Time `json:"lastTransitionTime,omitempty"`
|
||||
// The reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// A human readable message indicating details about the transition.
|
||||
|
@ -58,7 +58,7 @@ type ConditionList []Condition
|
|||
// IsTrue return true when a condition with given type exists and its status is `True`.
|
||||
func (list ConditionList) IsTrue(conditionType ConditionType) bool {
|
||||
c, found := list.Get(conditionType)
|
||||
return found && c.Status == v1.ConditionTrue
|
||||
return found && c.Status == core.ConditionTrue
|
||||
}
|
||||
|
||||
// Get a condition by type.
|
||||
|
@ -77,22 +77,22 @@ func (list ConditionList) Get(conditionType ConditionType) (Condition, bool) {
|
|||
// Returns true when changes were made, false otherwise.
|
||||
func (list *ConditionList) Update(conditionType ConditionType, status bool, reason, message string) bool {
|
||||
src := *list
|
||||
statusX := v1.ConditionFalse
|
||||
statusX := core.ConditionFalse
|
||||
if status {
|
||||
statusX = v1.ConditionTrue
|
||||
statusX = core.ConditionTrue
|
||||
}
|
||||
for i, x := range src {
|
||||
if x.Type == conditionType {
|
||||
if x.Status != statusX {
|
||||
// Transition to another status
|
||||
src[i].Status = statusX
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
src[i].LastTransitionTime = now
|
||||
src[i].LastUpdateTime = now
|
||||
src[i].Reason = reason
|
||||
src[i].Message = message
|
||||
} else if x.Reason != reason || x.Message != message {
|
||||
src[i].LastUpdateTime = metav1.Now()
|
||||
src[i].LastUpdateTime = meta.Now()
|
||||
src[i].Reason = reason
|
||||
src[i].Message = message
|
||||
} else {
|
||||
|
@ -102,7 +102,7 @@ func (list *ConditionList) Update(conditionType ConditionType, status bool, reas
|
|||
}
|
||||
}
|
||||
// Not found
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
*list = append(src, Condition{
|
||||
Type: conditionType,
|
||||
LastUpdateTime: now,
|
||||
|
|
|
@ -22,7 +22,7 @@ package v1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/replication"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -50,6 +50,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoDeploymentReplication{},
|
||||
&ArangoDeploymentReplicationList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,18 +22,18 @@ package v1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/replication"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ArangoDeploymentReplicationList is a list of ArangoDB deployment replications.
|
||||
type ArangoDeploymentReplicationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ArangoDeploymentReplication `json:"items"`
|
||||
meta.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ArangoDeploymentReplication `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
@ -42,16 +42,16 @@ type ArangoDeploymentReplicationList struct {
|
|||
// ArangoDeploymentReplication contains the entire Kubernetes info for an ArangoDB
|
||||
// local storage provider.
|
||||
type ArangoDeploymentReplication struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec DeploymentReplicationSpec `json:"spec"`
|
||||
Status DeploymentReplicationStatus `json:"status"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec DeploymentReplicationSpec `json:"spec"`
|
||||
Status DeploymentReplicationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// AsOwner creates an OwnerReference for the given replication
|
||||
func (d *ArangoDeploymentReplication) AsOwner() metav1.OwnerReference {
|
||||
func (d *ArangoDeploymentReplication) AsOwner() meta.OwnerReference {
|
||||
trueVar := true
|
||||
return metav1.OwnerReference{
|
||||
return meta.OwnerReference{
|
||||
APIVersion: SchemeGroupVersion.String(),
|
||||
Kind: replication.ArangoDeploymentReplicationResourceKind,
|
||||
Name: d.Name,
|
||||
|
|
|
@ -21,8 +21,8 @@
|
|||
package v2alpha1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// ConditionType is a strongly typed condition name
|
||||
|
@ -40,11 +40,11 @@ type Condition struct {
|
|||
// Type of condition.
|
||||
Type ConditionType `json:"type"`
|
||||
// Status of the condition, one of True, False, Unknown.
|
||||
Status v1.ConditionStatus `json:"status"`
|
||||
Status core.ConditionStatus `json:"status"`
|
||||
// The last time this condition was updated.
|
||||
LastUpdateTime metav1.Time `json:"lastUpdateTime,omitempty"`
|
||||
LastUpdateTime meta.Time `json:"lastUpdateTime,omitempty"`
|
||||
// Last time the condition transitioned from one status to another.
|
||||
LastTransitionTime metav1.Time `json:"lastTransitionTime,omitempty"`
|
||||
LastTransitionTime meta.Time `json:"lastTransitionTime,omitempty"`
|
||||
// The reason for the condition's last transition.
|
||||
Reason string `json:"reason,omitempty"`
|
||||
// A human readable message indicating details about the transition.
|
||||
|
@ -58,7 +58,7 @@ type ConditionList []Condition
|
|||
// IsTrue return true when a condition with given type exists and its status is `True`.
|
||||
func (list ConditionList) IsTrue(conditionType ConditionType) bool {
|
||||
c, found := list.Get(conditionType)
|
||||
return found && c.Status == v1.ConditionTrue
|
||||
return found && c.Status == core.ConditionTrue
|
||||
}
|
||||
|
||||
// Get a condition by type.
|
||||
|
@ -77,22 +77,22 @@ func (list ConditionList) Get(conditionType ConditionType) (Condition, bool) {
|
|||
// Returns true when changes were made, false otherwise.
|
||||
func (list *ConditionList) Update(conditionType ConditionType, status bool, reason, message string) bool {
|
||||
src := *list
|
||||
statusX := v1.ConditionFalse
|
||||
statusX := core.ConditionFalse
|
||||
if status {
|
||||
statusX = v1.ConditionTrue
|
||||
statusX = core.ConditionTrue
|
||||
}
|
||||
for i, x := range src {
|
||||
if x.Type == conditionType {
|
||||
if x.Status != statusX {
|
||||
// Transition to another status
|
||||
src[i].Status = statusX
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
src[i].LastTransitionTime = now
|
||||
src[i].LastUpdateTime = now
|
||||
src[i].Reason = reason
|
||||
src[i].Message = message
|
||||
} else if x.Reason != reason || x.Message != message {
|
||||
src[i].LastUpdateTime = metav1.Now()
|
||||
src[i].LastUpdateTime = meta.Now()
|
||||
src[i].Reason = reason
|
||||
src[i].Message = message
|
||||
} else {
|
||||
|
@ -102,7 +102,7 @@ func (list *ConditionList) Update(conditionType ConditionType, status bool, reas
|
|||
}
|
||||
}
|
||||
// Not found
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
*list = append(src, Condition{
|
||||
Type: conditionType,
|
||||
LastUpdateTime: now,
|
||||
|
|
|
@ -22,7 +22,7 @@ package v2alpha1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/replication"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -50,6 +50,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoDeploymentReplication{},
|
||||
&ArangoDeploymentReplicationList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -22,18 +22,18 @@ package v2alpha1
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/replication"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ArangoDeploymentReplicationList is a list of ArangoDB deployment replications.
|
||||
type ArangoDeploymentReplicationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ArangoDeploymentReplication `json:"items"`
|
||||
meta.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ArangoDeploymentReplication `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
@ -42,16 +42,16 @@ type ArangoDeploymentReplicationList struct {
|
|||
// ArangoDeploymentReplication contains the entire Kubernetes info for an ArangoDB
|
||||
// local storage provider.
|
||||
type ArangoDeploymentReplication struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec DeploymentReplicationSpec `json:"spec"`
|
||||
Status DeploymentReplicationStatus `json:"status"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec DeploymentReplicationSpec `json:"spec"`
|
||||
Status DeploymentReplicationStatus `json:"status"`
|
||||
}
|
||||
|
||||
// AsOwner creates an OwnerReference for the given replication
|
||||
func (d *ArangoDeploymentReplication) AsOwner() metav1.OwnerReference {
|
||||
func (d *ArangoDeploymentReplication) AsOwner() meta.OwnerReference {
|
||||
trueVar := true
|
||||
return metav1.OwnerReference{
|
||||
return meta.OwnerReference{
|
||||
APIVersion: SchemeGroupVersion.String(),
|
||||
Kind: replication.ArangoDeploymentReplicationResourceKind,
|
||||
Name: d.Name,
|
||||
|
|
|
@ -21,18 +21,18 @@
|
|||
package v1alpha
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ArangoLocalStorageList is a list of ArangoDB local storage providers.
|
||||
type ArangoLocalStorageList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
// Standard list metadata
|
||||
// More info: http://releases.k8s.io/HEAD/docs/devel/api-conventions.md#metadata
|
||||
metav1.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ArangoLocalStorage `json:"items"`
|
||||
meta.ListMeta `json:"metadata,omitempty"`
|
||||
Items []ArangoLocalStorage `json:"items"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
|
@ -42,15 +42,15 @@ type ArangoLocalStorageList struct {
|
|||
// ArangoLocalStorage contains the entire Kubernetes info for an ArangoDB
|
||||
// local storage provider.
|
||||
type ArangoLocalStorage struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
metav1.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec LocalStorageSpec `json:"spec"`
|
||||
Status LocalStorageStatus `json:"status"`
|
||||
meta.TypeMeta `json:",inline"`
|
||||
meta.ObjectMeta `json:"metadata,omitempty"`
|
||||
Spec LocalStorageSpec `json:"spec"`
|
||||
Status LocalStorageStatus `json:"status"`
|
||||
}
|
||||
|
||||
// AsOwner creates an OwnerReference for the given storage
|
||||
func (d *ArangoLocalStorage) AsOwner() metav1.OwnerReference {
|
||||
return metav1.OwnerReference{
|
||||
func (d *ArangoLocalStorage) AsOwner() meta.OwnerReference {
|
||||
return meta.OwnerReference{
|
||||
APIVersion: SchemeGroupVersion.String(),
|
||||
Kind: ArangoLocalStorageResourceKind,
|
||||
Name: d.Name,
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
package v1alpha
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/runtime/schema"
|
||||
)
|
||||
|
@ -53,6 +53,6 @@ func addKnownTypes(s *runtime.Scheme) error {
|
|||
&ArangoLocalStorage{},
|
||||
&ArangoLocalStorageList{},
|
||||
)
|
||||
metav1.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
meta.AddToGroupVersion(s, SchemeGroupVersion)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@ import (
|
|||
|
||||
certificates "github.com/arangodb-helper/go-certificates"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
|
@ -74,8 +74,8 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
|||
if _, wanted := apNameMap[secret.GetName()]; !wanted {
|
||||
// We found an obsolete access package secret. Remove it.
|
||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
return d.SecretsModInterface().Delete(ctxChild, secret.GetName(), metav1.DeleteOptions{
|
||||
Preconditions: &metav1.Preconditions{UID: &secret.UID},
|
||||
return d.SecretsModInterface().Delete(ctxChild, secret.GetName(), meta.DeleteOptions{
|
||||
Preconditions: &meta.Preconditions{UID: &secret.UID},
|
||||
})
|
||||
})
|
||||
if err != nil && !k8sutil.IsNotFound(err) {
|
||||
|
@ -101,7 +101,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
log := d.sectionLogger("access-package")
|
||||
spec := d.apiObject.Spec
|
||||
|
||||
_, err := d.acs.CurrentClusterCache().Secret().V1().Read().Get(ctx, apSecretName, metav1.GetOptions{})
|
||||
_, err := d.acs.CurrentClusterCache().Secret().V1().Read().Get(ctx, apSecretName, meta.GetOptions{})
|
||||
if err == nil {
|
||||
// Secret already exists
|
||||
return nil
|
||||
|
@ -147,12 +147,12 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
keyfile := strings.TrimSpace(cert) + "\n" + strings.TrimSpace(key)
|
||||
|
||||
// Create secrets (in memory)
|
||||
keyfileSecret := v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
keyfileSecret := core.Secret{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Secret",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: apSecretName + "-auth",
|
||||
Labels: map[string]string{
|
||||
labelKeyOriginalDeployment: d.apiObject.GetName(),
|
||||
|
@ -163,12 +163,12 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
},
|
||||
Type: "Opaque",
|
||||
}
|
||||
tlsCASecret := v1.Secret{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
tlsCASecret := core.Secret{
|
||||
TypeMeta: meta.TypeMeta{
|
||||
APIVersion: "v1",
|
||||
Kind: "Secret",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: apSecretName + "-ca",
|
||||
Labels: map[string]string{
|
||||
labelKeyOriginalDeployment: d.apiObject.GetName(),
|
||||
|
@ -194,8 +194,8 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
allYaml := strings.TrimSpace(string(keyfileYaml)) + "\n---\n" + strings.TrimSpace(string(tlsCAYaml))
|
||||
|
||||
// Create secret containing access package
|
||||
secret := &v1.Secret{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
secret := &core.Secret{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: apSecretName,
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
|
@ -207,7 +207,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
// Attach secret to owner
|
||||
secret.SetOwnerReferences(append(secret.GetOwnerReferences(), d.apiObject.AsOwner()))
|
||||
err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
_, err := d.SecretsModInterface().Create(ctxChild, secret, metav1.CreateOptions{})
|
||||
_, err := d.SecretsModInterface().Create(ctxChild, secret, meta.CreateOptions{})
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"context"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
|
@ -36,5 +36,5 @@ type Context interface {
|
|||
// of the deployment. If the pod does not exist, the error is ignored.
|
||||
DeletePod(ctx context.Context, podName string, options meta.DeleteOptions) error
|
||||
// GetOwnedPods returns a list of all pods owned by the deployment.
|
||||
GetOwnedPods(ctx context.Context) ([]v1.Pod, error)
|
||||
GetOwnedPods(ctx context.Context) ([]core.Pod, error)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||
|
@ -212,7 +212,7 @@ func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectS
|
|||
apiObject := ci.depl.apiObject
|
||||
ctxChild, cancel = globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
current, err := ci.depl.deps.Client.Arango().DatabaseV1().ArangoDeployments(apiObject.Namespace).Get(ctxChild, apiObject.Name, metav1.GetOptions{})
|
||||
current, err := ci.depl.deps.Client.Arango().DatabaseV1().ArangoDeployments(apiObject.Namespace).Get(ctxChild, apiObject.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
|
||||
|
@ -58,7 +58,7 @@ func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus i
|
|||
depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace())
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
updated, err := depls.Get(ctxChild, d.apiObject.GetName(), metav1.GetOptions{})
|
||||
updated, err := depls.Get(ctxChild, d.apiObject.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -108,22 +108,22 @@ func (d *Deployment) inspectRemoveChildFinalizers(ctx context.Context, _ *api.Ar
|
|||
func removeDeploymentFinalizers(ctx context.Context, cli versioned.Interface,
|
||||
depl *api.ArangoDeployment, finalizers []string) error {
|
||||
depls := cli.DatabaseV1().ArangoDeployments(depl.GetNamespace())
|
||||
getFunc := func() (metav1.Object, error) {
|
||||
getFunc := func() (meta.Object, error) {
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
result, err := depls.Get(ctxChild, depl.GetName(), metav1.GetOptions{})
|
||||
result, err := depls.Get(ctxChild, depl.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
updateFunc := func(updated metav1.Object) error {
|
||||
updateFunc := func(updated meta.Object) error {
|
||||
updatedDepl := updated.(*api.ArangoDeployment)
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
result, err := depls.Update(ctxChild, updatedDepl, metav1.UpdateOptions{})
|
||||
result, err := depls.Update(ctxChild, updatedDepl, meta.UpdateOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -34,7 +34,7 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/stretchr/testify/require"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func TestEnsurePod_Sync_Error(t *testing.T) {
|
||||
|
@ -99,7 +99,7 @@ func TestEnsurePod_Sync_Error(t *testing.T) {
|
|||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, meta.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Monitoring token secret validation failed: secrets \"" +
|
||||
|
@ -133,7 +133,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Sync.TLS.GetCASecretName()
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, meta.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Failed to create TLS keyfile secret: secrets \"" +
|
||||
|
@ -161,7 +161,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Authentication.GetJWTSecretName()
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, meta.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Cluster JWT secret validation failed: secrets \"" +
|
||||
|
@ -189,7 +189,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
}
|
||||
|
||||
secretName := testCase.ArangoDeployment.Spec.Sync.Authentication.GetClientCASecretName()
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
|
||||
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, meta.DeleteOptions{})
|
||||
require.NoError(t, err)
|
||||
},
|
||||
ExpectedError: errors.New("Client authentication CA certificate secret validation failed: " +
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func runTestCases(t *testing.T, testCases ...testCaseStruct) {
|
||||
|
@ -104,7 +104,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
}
|
||||
|
||||
// Create custom resource in the fake kubernetes API
|
||||
_, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.apiObject, metav1.CreateOptions{})
|
||||
_, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.apiObject, meta.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
if testCase.Resources != nil {
|
||||
|
@ -143,7 +143,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
for _, m := range list {
|
||||
|
||||
member := api.ArangoMember{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Namespace: d.GetNamespace(),
|
||||
Name: m.ArangoMemberName(d.GetName(), group),
|
||||
},
|
||||
|
@ -160,13 +160,13 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
}
|
||||
|
||||
s := core.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: member.GetName(),
|
||||
Namespace: member.GetNamespace(),
|
||||
},
|
||||
}
|
||||
|
||||
if _, err := d.ServicesModInterface().Create(context.Background(), &s, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := d.ServicesModInterface().Create(context.Background(), &s, meta.CreateOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -228,7 +228,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
pods, err := d.deps.Client.Kubernetes().CoreV1().Pods(testNamespace).List(context.Background(), metav1.ListOptions{})
|
||||
pods, err := d.deps.Client.Kubernetes().CoreV1().Pods(testNamespace).List(context.Background(), meta.ListOptions{})
|
||||
require.NoError(t, err)
|
||||
require.Len(t, pods.Items, 1)
|
||||
if util.BoolOrDefault(testCase.CompareChecksum, true) {
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
monitoringFakeClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
|
||||
"github.com/stretchr/testify/require"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
recordfake "k8s.io/client-go/tools/record"
|
||||
|
||||
|
@ -456,7 +456,7 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara
|
|||
monitoringClientSet := monitoringFakeClient.NewSimpleClientset()
|
||||
arangoClientSet := arangofake.NewSimpleClientset()
|
||||
|
||||
arangoDeployment.ObjectMeta = metav1.ObjectMeta{
|
||||
arangoDeployment.ObjectMeta = meta.ObjectMeta{
|
||||
Name: testDeploymentName,
|
||||
Namespace: testNamespace,
|
||||
}
|
||||
|
@ -562,11 +562,11 @@ func (testCase *testCaseStruct) createTestPodData(deployment *Deployment, group
|
|||
podName := k8sutil.CreatePodName(testDeploymentName, group.AsRoleAbbreviated(), memberStatus.ID,
|
||||
resources.CreatePodSuffix(testCase.ArangoDeployment.Spec))
|
||||
|
||||
testCase.ExpectedPod.ObjectMeta = metav1.ObjectMeta{
|
||||
testCase.ExpectedPod.ObjectMeta = meta.ObjectMeta{
|
||||
Name: podName,
|
||||
Namespace: testNamespace,
|
||||
Labels: k8sutil.LabelsForMember(testDeploymentName, group.AsRole(), memberStatus.ID),
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
OwnerReferences: []meta.OwnerReference{
|
||||
testCase.ArangoDeployment.AsOwner(),
|
||||
},
|
||||
Finalizers: finalizers(group),
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"time"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
||||
|
@ -140,14 +140,14 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
|||
// Check if pod exists
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
pod, err := ib.Context.ACS().CurrentClusterCache().Pod().V1().Read().Get(ctxChild, podName, metav1.GetOptions{})
|
||||
pod, err := ib.Context.ACS().CurrentClusterCache().Pod().V1().Read().Get(ctxChild, podName, meta.GetOptions{})
|
||||
if err == nil {
|
||||
// Pod found
|
||||
if k8sutil.IsPodFailed(pod, utils.StringList{shared.ServerContainerName}) {
|
||||
// Wait some time before deleting the pod
|
||||
if time.Now().After(pod.GetCreationTimestamp().Add(30 * time.Second)) {
|
||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{})
|
||||
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, meta.DeleteOptions{})
|
||||
})
|
||||
if err != nil && !k8sutil.IsNotFound(err) {
|
||||
log.Err(err).Warn("Failed to delete Image ID Pod")
|
||||
|
@ -189,7 +189,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
|
|||
|
||||
// We have all the info we need now, kill the pod and store the image info.
|
||||
err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, metav1.DeleteOptions{})
|
||||
return ib.Context.ACS().CurrentClusterCache().PodsModInterface().V1().Delete(ctxChild, podName, meta.DeleteOptions{})
|
||||
})
|
||||
if err != nil && !k8sutil.IsNotFound(err) {
|
||||
log.Err(err).Warn("Failed to delete Image ID Pod")
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
package deployment
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
crdv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
|
@ -30,14 +30,14 @@ import (
|
|||
|
||||
// listenForPodEvents keep listening for changes in pod until the given channel is closed.
|
||||
func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) {
|
||||
getPod := func(obj interface{}) (*v1.Pod, bool) {
|
||||
pod, ok := obj.(*v1.Pod)
|
||||
getPod := func(obj interface{}) (*core.Pod, bool) {
|
||||
pod, ok := obj.(*core.Pod)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
pod, ok = tombstone.Obj.(*v1.Pod)
|
||||
pod, ok = tombstone.Obj.(*core.Pod)
|
||||
return pod, ok
|
||||
}
|
||||
return pod, true
|
||||
|
@ -47,7 +47,7 @@ func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) {
|
|||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"pods",
|
||||
d.apiObject.GetNamespace(),
|
||||
&v1.Pod{},
|
||||
&core.Pod{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
d.acs.CurrentClusterCache().GetThrottles().Pod().Invalidate()
|
||||
|
@ -74,14 +74,14 @@ func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) {
|
|||
|
||||
// listenForPVCEvents keep listening for changes in PVC's until the given channel is closed.
|
||||
func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) {
|
||||
getPVC := func(obj interface{}) (*v1.PersistentVolumeClaim, bool) {
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
getPVC := func(obj interface{}) (*core.PersistentVolumeClaim, bool) {
|
||||
pvc, ok := obj.(*core.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
pvc, ok = tombstone.Obj.(*v1.PersistentVolumeClaim)
|
||||
pvc, ok = tombstone.Obj.(*core.PersistentVolumeClaim)
|
||||
return pvc, ok
|
||||
}
|
||||
return pvc, true
|
||||
|
@ -91,7 +91,7 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) {
|
|||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"persistentvolumeclaims",
|
||||
d.apiObject.GetNamespace(),
|
||||
&v1.PersistentVolumeClaim{},
|
||||
&core.PersistentVolumeClaim{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
d.acs.CurrentClusterCache().GetThrottles().PersistentVolumeClaim().Invalidate()
|
||||
|
@ -119,13 +119,13 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) {
|
|||
// listenForSecretEvents keep listening for changes in Secrets's until the given channel is closed.
|
||||
func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
|
||||
getSecret := func(obj interface{}) bool {
|
||||
_, ok := obj.(*v1.Secret)
|
||||
_, ok := obj.(*core.Secret)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
_, ok = tombstone.Obj.(*v1.Secret)
|
||||
_, ok = tombstone.Obj.(*core.Secret)
|
||||
return ok
|
||||
}
|
||||
return true
|
||||
|
@ -135,7 +135,7 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
|
|||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"secrets",
|
||||
d.apiObject.GetNamespace(),
|
||||
&v1.Secret{},
|
||||
&core.Secret{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
// Note: For secrets we look at all of them because they do not have to be owned by this deployment.
|
||||
AddFunc: func(obj interface{}) {
|
||||
|
@ -163,14 +163,14 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
|
|||
|
||||
// listenForServiceEvents keep listening for changes in Service's until the given channel is closed.
|
||||
func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) {
|
||||
getService := func(obj interface{}) (*v1.Service, bool) {
|
||||
service, ok := obj.(*v1.Service)
|
||||
getService := func(obj interface{}) (*core.Service, bool) {
|
||||
service, ok := obj.(*core.Service)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
service, ok = tombstone.Obj.(*v1.Service)
|
||||
service, ok = tombstone.Obj.(*core.Service)
|
||||
return service, ok
|
||||
}
|
||||
return service, true
|
||||
|
@ -180,7 +180,7 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) {
|
|||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"services",
|
||||
d.apiObject.GetNamespace(),
|
||||
&v1.Service{},
|
||||
&core.Service{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
d.acs.CurrentClusterCache().GetThrottles().Service().Invalidate()
|
||||
|
|
|
@ -31,7 +31,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
|
@ -139,7 +139,7 @@ func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.Server
|
|||
return &api.MemberStatus{
|
||||
ID: id,
|
||||
UID: uuid.NewUUID(),
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: shared.CreatePersistentVolumeClaimName(deploymentName, role, id),
|
||||
PodName: "",
|
||||
|
@ -151,7 +151,7 @@ func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.Server
|
|||
return &api.MemberStatus{
|
||||
ID: id,
|
||||
UID: uuid.NewUUID(),
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: shared.CreatePersistentVolumeClaimName(deploymentName, role, id),
|
||||
PodName: "",
|
||||
|
@ -163,7 +163,7 @@ func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.Server
|
|||
return &api.MemberStatus{
|
||||
ID: id,
|
||||
UID: uuid.NewUUID(),
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: shared.CreatePersistentVolumeClaimName(deploymentName, role, id),
|
||||
PodName: "",
|
||||
|
@ -175,7 +175,7 @@ func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.Server
|
|||
return &api.MemberStatus{
|
||||
ID: id,
|
||||
UID: uuid.NewUUID(),
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: "",
|
||||
PodName: "",
|
||||
|
@ -187,7 +187,7 @@ func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.Server
|
|||
return &api.MemberStatus{
|
||||
ID: id,
|
||||
UID: uuid.NewUUID(),
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: "",
|
||||
PodName: "",
|
||||
|
@ -199,7 +199,7 @@ func (d *Deployment) renderMember(status *api.DeploymentStatus, group api.Server
|
|||
return &api.MemberStatus{
|
||||
ID: id,
|
||||
UID: uuid.NewUUID(),
|
||||
CreatedAt: metav1.Now(),
|
||||
CreatedAt: meta.Now(),
|
||||
Phase: api.MemberPhaseNone,
|
||||
PersistentVolumeClaimName: "",
|
||||
PodName: "",
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func init() {
|
||||
|
@ -193,7 +193,7 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err
|
|||
// Update pod image
|
||||
pod.Spec.Containers[id].Image = image
|
||||
|
||||
if _, err := a.actionCtx.ACS().CurrentClusterCache().PodsModInterface().V1().Update(ctx, pod, v1.UpdateOptions{}); err != nil {
|
||||
if _, err := a.actionCtx.ACS().CurrentClusterCache().PodsModInterface().V1().Update(ctx, pod, meta.UpdateOptions{}); err != nil {
|
||||
return true, err
|
||||
}
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"fmt"
|
||||
"time"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/metrics"
|
||||
|
@ -252,7 +252,7 @@ func (d *Reconciler) executePlan(ctx context.Context, statusPlan api.Plan, pg pl
|
|||
actionsCurrentPlan.WithLabelValues(d.context.GetName(), planAction.Group.AsRole(), planAction.MemberID,
|
||||
planAction.Type.String(), pg.Type()).Inc()
|
||||
|
||||
now := metav1.Now()
|
||||
now := meta.Now()
|
||||
plan[0].StartTime = &now
|
||||
}
|
||||
|
||||
|
|
|
@ -29,27 +29,27 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ArangodbExporterContainer creates metrics container
|
||||
func ArangodbExporterContainer(image string, args []string, livenessProbe *probes.HTTPProbeConfig,
|
||||
resources v1.ResourceRequirements, securityContext *v1.SecurityContext,
|
||||
spec api.DeploymentSpec) v1.Container {
|
||||
resources core.ResourceRequirements, securityContext *core.SecurityContext,
|
||||
spec api.DeploymentSpec) core.Container {
|
||||
|
||||
c := v1.Container{
|
||||
c := core.Container{
|
||||
Name: shared.ExporterContainerName,
|
||||
Image: image,
|
||||
Command: append([]string{"/app/arangodb-exporter"}, args...),
|
||||
Ports: []v1.ContainerPort{
|
||||
Ports: []core.ContainerPort{
|
||||
{
|
||||
Name: "exporter",
|
||||
ContainerPort: int32(spec.Metrics.GetPort()),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: core.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Resources: k8sutil.ExtractPodResourceRequirement(resources),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext,
|
||||
}
|
||||
|
||||
|
|
|
@ -28,35 +28,35 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// ArangodbInternalExporterContainer creates metrics container based on internal exporter
|
||||
func ArangodbInternalExporterContainer(image string, args []string, livenessProbe *probes.HTTPProbeConfig,
|
||||
resources v1.ResourceRequirements, securityContext *v1.SecurityContext,
|
||||
spec api.DeploymentSpec) (v1.Container, error) {
|
||||
resources core.ResourceRequirements, securityContext *core.SecurityContext,
|
||||
spec api.DeploymentSpec) (core.Container, error) {
|
||||
|
||||
binaryPath, err := os.Executable()
|
||||
if err != nil {
|
||||
return v1.Container{}, errors.WithStack(err)
|
||||
return core.Container{}, errors.WithStack(err)
|
||||
}
|
||||
exePath := filepath.Join(k8sutil.LifecycleVolumeMountDir, filepath.Base(binaryPath))
|
||||
|
||||
c := v1.Container{
|
||||
c := core.Container{
|
||||
Name: shared.ExporterContainerName,
|
||||
Image: image,
|
||||
Command: append([]string{exePath, "exporter"}, args...),
|
||||
Ports: []v1.ContainerPort{
|
||||
Ports: []core.ContainerPort{
|
||||
{
|
||||
Name: "exporter",
|
||||
ContainerPort: int32(spec.Metrics.GetPort()),
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: core.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Resources: k8sutil.ExtractPodResourceRequirement(resources),
|
||||
ImagePullPolicy: v1.PullIfNotPresent,
|
||||
ImagePullPolicy: core.PullIfNotPresent,
|
||||
SecurityContext: securityContext,
|
||||
VolumeMounts: []v1.VolumeMount{k8sutil.LifecycleVolumeMount()},
|
||||
VolumeMounts: []core.VolumeMount{k8sutil.LifecycleVolumeMount()},
|
||||
}
|
||||
|
||||
if livenessProbe != nil {
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
@ -45,7 +45,7 @@ const (
|
|||
|
||||
// runPodFinalizers goes through the list of pod finalizers to see if they can be removed.
|
||||
// Returns: Interval_till_next_inspection, error
|
||||
func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) (util.Interval, error) {
|
||||
func (r *Resources) runPodFinalizers(ctx context.Context, p *core.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) (util.Interval, error) {
|
||||
log := r.log.Str("section", "pod").Str("pod-name", p.GetName())
|
||||
var removalList []string
|
||||
|
||||
|
@ -130,7 +130,7 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu
|
|||
|
||||
// inspectFinalizerPodAgencyServing checks the finalizer condition for agency-serving.
|
||||
// It returns nil if the finalizer can be removed.
|
||||
func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, p *core.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
log := r.log.Str("section", "agency")
|
||||
if err := r.prepareAgencyPodTermination(p, memberStatus, func(update api.MemberStatus) error {
|
||||
if err := updateMember(update); err != nil {
|
||||
|
@ -161,7 +161,7 @@ func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, p *v1.
|
|||
|
||||
// inspectFinalizerPodDrainDBServer checks the finalizer condition for drain-dbserver.
|
||||
// It returns nil if the finalizer can be removed.
|
||||
func (r *Resources) inspectFinalizerPodDrainDBServer(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
func (r *Resources) inspectFinalizerPodDrainDBServer(ctx context.Context, p *core.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
log := r.log.Str("section", "pod")
|
||||
if err := r.prepareDBServerPodTermination(ctx, p, memberStatus, func(update api.MemberStatus) error {
|
||||
if err := updateMember(update); err != nil {
|
||||
|
|
|
@ -27,7 +27,6 @@ import (
|
|||
"time"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
@ -92,7 +91,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
|||
var podNamesWithScheduleTimeout []string
|
||||
var unscheduledPodNames []string
|
||||
|
||||
err := cachedStatus.Pod().V1().Iterate(func(pod *v1.Pod) error {
|
||||
err := cachedStatus.Pod().V1().Iterate(func(pod *core.Pod) error {
|
||||
if k8sutil.IsArangoDBImageIDAndVersionPod(pod) {
|
||||
// Image ID pods are not relevant to inspect here
|
||||
return nil
|
||||
|
|
|
@ -34,13 +34,13 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
// prepareAgencyPodTermination checks if the given agency pod is allowed to terminate
|
||||
// and if so, prepares it for termination.
|
||||
// It returns nil if the pod is allowed to terminate, an error otherwise.
|
||||
func (r *Resources) prepareAgencyPodTermination(p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
func (r *Resources) prepareAgencyPodTermination(p *core.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
log := r.log.Str("section", "pod")
|
||||
|
||||
// Inspect member phase
|
||||
|
@ -110,7 +110,7 @@ func (r *Resources) prepareAgencyPodTermination(p *v1.Pod, memberStatus api.Memb
|
|||
// prepareDBServerPodTermination checks if the given dbserver pod is allowed to terminate
|
||||
// and if so, prepares it for termination.
|
||||
// It returns nil if the pod is allowed to terminate, an error otherwise.
|
||||
func (r *Resources) prepareDBServerPodTermination(ctx context.Context, p *v1.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
func (r *Resources) prepareDBServerPodTermination(ctx context.Context, p *core.Pod, memberStatus api.MemberStatus, updateMember func(api.MemberStatus) error) error {
|
||||
log := r.log.Str("section", "pod")
|
||||
|
||||
// Inspect member phase
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
|
@ -42,7 +42,7 @@ const (
|
|||
)
|
||||
|
||||
// runPVCFinalizers goes through the list of PVC finalizers to see if they can be removed.
|
||||
func (r *Resources) runPVCFinalizers(ctx context.Context, p *v1.PersistentVolumeClaim, group api.ServerGroup,
|
||||
func (r *Resources) runPVCFinalizers(ctx context.Context, p *core.PersistentVolumeClaim, group api.ServerGroup,
|
||||
memberStatus api.MemberStatus) (util.Interval, error) {
|
||||
log := r.log.Str("section", "pvc").Str("pvc-name", p.GetName())
|
||||
var removalList []string
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"context"
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
|
@ -58,7 +58,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter
|
|||
|
||||
// Update member status from all pods found
|
||||
status, _ := r.context.GetStatus()
|
||||
if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *v1.PersistentVolumeClaim) error {
|
||||
if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *core.PersistentVolumeClaim) error {
|
||||
// PVC belongs to this deployment, update metric
|
||||
inspectedPVCsCounters.WithLabelValues(deploymentName).Inc()
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
"sort"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
|
@ -190,7 +190,7 @@ func (d *Deployment) StorageClasses() []string {
|
|||
// Empty string means that the database is not reachable outside the Kubernetes cluster.
|
||||
func (d *Deployment) DatabaseURL() string {
|
||||
eaSvcName := k8sutil.CreateDatabaseExternalAccessServiceName(d.Name())
|
||||
svc, err := d.acs.CurrentClusterCache().Service().V1().Read().Get(context.Background(), eaSvcName, metav1.GetOptions{})
|
||||
svc, err := d.acs.CurrentClusterCache().Service().V1().Read().Get(context.Background(), eaSvcName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ package deployment
|
|||
import (
|
||||
"context"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/server"
|
||||
|
@ -62,7 +62,7 @@ func (m member) PVCName() string {
|
|||
func (m member) PVName() string {
|
||||
if status, found := m.status(); found && status.PersistentVolumeClaimName != "" {
|
||||
pvcs := m.d.deps.Client.Kubernetes().CoreV1().PersistentVolumeClaims(m.d.Namespace())
|
||||
if pvc, err := pvcs.Get(context.Background(), status.PersistentVolumeClaimName, metav1.GetOptions{}); err == nil {
|
||||
if pvc, err := pvcs.Get(context.Background(), status.PersistentVolumeClaimName, meta.GetOptions{}); err == nil {
|
||||
return pvc.Spec.VolumeName
|
||||
}
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/handlers/backup/state"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
type updateStatusFunc func(status *backupApi.ArangoBackupStatus)
|
||||
|
@ -52,7 +52,7 @@ func updateStatus(backup *backupApi.ArangoBackup, update ...updateStatusFunc) *b
|
|||
func updateStatusState(state state.State, template string, a ...interface{}) updateStatusFunc {
|
||||
return func(status *backupApi.ArangoBackupStatus) {
|
||||
if status.State != state {
|
||||
status.Time = v1.Now()
|
||||
status.Time = meta.Now()
|
||||
}
|
||||
status.State = state
|
||||
status.Message = fmt.Sprintf(template, a...)
|
||||
|
@ -144,7 +144,7 @@ func createBackupFromMeta(backupMeta driver.BackupMeta, old *backupApi.ArangoBac
|
|||
obj.Keys = keysToHashList(backupMeta.Keys)
|
||||
obj.PotentiallyInconsistent = util.NewBool(backupMeta.PotentiallyInconsistent)
|
||||
obj.SizeInBytes = backupMeta.SizeInBytes
|
||||
obj.CreationTimestamp = v1.Time{
|
||||
obj.CreationTimestamp = meta.Time{
|
||||
Time: backupMeta.DateTime,
|
||||
}
|
||||
obj.NumberOfDBServers = backupMeta.NumberOfDBServers
|
||||
|
|
|
@ -35,7 +35,7 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/require"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/uuid"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
@ -114,16 +114,16 @@ func newArangoJob(name, namespace, deployment string) *appsApi.ArangoJob {
|
|||
Spec: appsApi.ArangoJobSpec{
|
||||
ArangoDeploymentName: deployment,
|
||||
JobTemplate: &batchv1.JobSpec{
|
||||
Template: v1.PodTemplateSpec{
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Template: core.PodTemplateSpec{
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Image: "perl",
|
||||
Name: "pi",
|
||||
Args: []string{"perl", "-Mbignum=bpi", "-wle", "print bpi(2000)"},
|
||||
},
|
||||
},
|
||||
RestartPolicy: v1.RestartPolicyNever,
|
||||
RestartPolicy: core.RestartPolicyNever,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/client-go/tools/leaderelection"
|
||||
"k8s.io/client-go/tools/leaderelection/resourcelock"
|
||||
|
@ -54,7 +54,7 @@ func (o *Operator) runLeaderElection(lockName, label string, onStart func(stop <
|
|||
eventTarget := o.getLeaderElectionEventTarget(log)
|
||||
recordEvent := func(reason, message string) {
|
||||
if eventTarget != nil {
|
||||
o.Dependencies.EventRecorder.Event(eventTarget, v1.EventTypeNormal, reason, message)
|
||||
o.Dependencies.EventRecorder.Event(eventTarget, core.EventTypeNormal, reason, message)
|
||||
}
|
||||
}
|
||||
rl, err := resourcelock.New(resourcelock.EndpointsResourceLock,
|
||||
|
@ -104,7 +104,7 @@ func (o *Operator) runWithoutLeaderElection(lockName, label string, onStart func
|
|||
eventTarget := o.getLeaderElectionEventTarget(log)
|
||||
recordEvent := func(reason, message string) {
|
||||
if eventTarget != nil {
|
||||
o.Dependencies.EventRecorder.Event(eventTarget, v1.EventTypeNormal, reason, message)
|
||||
o.Dependencies.EventRecorder.Event(eventTarget, core.EventTypeNormal, reason, message)
|
||||
}
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
@ -125,7 +125,7 @@ func (o *Operator) getLeaderElectionEventTarget(log logging.Logger) runtime.Obje
|
|||
kubecli := o.Dependencies.Client.Kubernetes()
|
||||
pods := kubecli.CoreV1().Pods(ns)
|
||||
log = log.Str("pod-name", o.Config.PodName)
|
||||
pod, err := pods.Get(context.Background(), o.Config.PodName, metav1.GetOptions{})
|
||||
pod, err := pods.Get(context.Background(), o.Config.PodName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
log.Err(err).Error("Cannot find Pod containing this operator")
|
||||
return nil
|
||||
|
@ -159,7 +159,7 @@ func (o *Operator) setRoleLabel(log logging.Logger, label, role string) error {
|
|||
pods := kubecli.CoreV1().Pods(ns)
|
||||
log = log.Str("pod-name", o.Config.PodName)
|
||||
op := func() error {
|
||||
pod, err := pods.Get(context.Background(), o.Config.PodName, metav1.GetOptions{})
|
||||
pod, err := pods.Get(context.Background(), o.Config.PodName, meta.GetOptions{})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
log.Err(err).Error("Pod not found, so we cannot set its role label")
|
||||
return retry.Permanent(errors.WithStack(err))
|
||||
|
@ -172,7 +172,7 @@ func (o *Operator) setRoleLabel(log logging.Logger, label, role string) error {
|
|||
}
|
||||
labels[label] = role
|
||||
pod.ObjectMeta.SetLabels(labels)
|
||||
if _, err := pods.Update(context.Background(), pod, metav1.UpdateOptions{}); k8sutil.IsConflict(err) {
|
||||
if _, err := pods.Update(context.Background(), pod, meta.UpdateOptions{}); k8sutil.IsConflict(err) {
|
||||
// Retry it
|
||||
return errors.WithStack(err)
|
||||
} else if err != nil {
|
||||
|
|
|
@ -27,7 +27,7 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||
"github.com/arangodb/kube-arangodb/pkg/server"
|
||||
|
@ -52,7 +52,7 @@ func (o *Operator) FindOtherOperators() []server.OperatorReference {
|
|||
|
||||
log := o.log
|
||||
var result []server.OperatorReference
|
||||
namespaces, err := o.Dependencies.Client.Kubernetes().CoreV1().Namespaces().List(context.Background(), metav1.ListOptions{})
|
||||
namespaces, err := o.Dependencies.Client.Kubernetes().CoreV1().Namespaces().List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
log.Err(err).Warn("Failed to list namespaces")
|
||||
} else {
|
||||
|
@ -94,7 +94,7 @@ func (o *Operator) FindOtherOperators() []server.OperatorReference {
|
|||
func (o *Operator) findOtherOperatorsInNamespace(log logging.Logger, namespace string, typePred func(server.OperatorType) bool) []server.OperatorReference {
|
||||
log = log.Str("namespace", namespace)
|
||||
var result []server.OperatorReference
|
||||
services, err := o.Dependencies.Client.Kubernetes().CoreV1().Services(namespace).List(context.Background(), metav1.ListOptions{})
|
||||
services, err := o.Dependencies.Client.Kubernetes().CoreV1().Services(namespace).List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
log.Err(err).Debug("Failed to list services")
|
||||
return nil
|
||||
|
@ -103,7 +103,7 @@ func (o *Operator) findOtherOperatorsInNamespace(log logging.Logger, namespace s
|
|||
if o.Scope.IsNamespaced() {
|
||||
return nil, nil
|
||||
}
|
||||
result, err := o.Dependencies.Client.Kubernetes().CoreV1().Nodes().List(context.Background(), metav1.ListOptions{})
|
||||
result, err := o.Dependencies.Client.Kubernetes().CoreV1().Nodes().List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
"github.com/arangodb/arangosync-client/client"
|
||||
|
@ -196,7 +196,7 @@ func (dr *DeploymentReplication) handleArangoDeploymentReplicationUpdatedEvent(e
|
|||
repls := dr.deps.Client.Arango().ReplicationV1().ArangoDeploymentReplications(dr.apiObject.GetNamespace())
|
||||
|
||||
// Get the most recent version of the deployment replication from the API server
|
||||
current, err := repls.Get(context.Background(), dr.apiObject.GetName(), metav1.GetOptions{})
|
||||
current, err := repls.Get(context.Background(), dr.apiObject.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
log.Err(err).Debug("Failed to get current version of deployment replication from API server")
|
||||
if k8sutil.IsNotFound(err) {
|
||||
|
@ -260,7 +260,7 @@ func (dr *DeploymentReplication) updateCRStatus() error {
|
|||
for {
|
||||
attempt++
|
||||
update.Status = dr.status
|
||||
newAPIObject, err := repls.Update(context.Background(), update, metav1.UpdateOptions{})
|
||||
newAPIObject, err := repls.Update(context.Background(), update, meta.UpdateOptions{})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
dr.apiObject = newAPIObject
|
||||
|
@ -270,7 +270,7 @@ func (dr *DeploymentReplication) updateCRStatus() error {
|
|||
// API object may have been changed already,
|
||||
// Reload api object and try again
|
||||
var current *api.ArangoDeploymentReplication
|
||||
current, err = repls.Get(context.Background(), update.GetName(), metav1.GetOptions{})
|
||||
current, err = repls.Get(context.Background(), update.GetName(), meta.GetOptions{})
|
||||
if err == nil {
|
||||
update = current.DeepCopy()
|
||||
continue
|
||||
|
@ -297,7 +297,7 @@ func (dr *DeploymentReplication) updateCRSpec(newSpec api.DeploymentReplicationS
|
|||
attempt++
|
||||
update.Spec = newSpec
|
||||
update.Status = dr.status
|
||||
newAPIObject, err := repls.Update(context.Background(), update, metav1.UpdateOptions{})
|
||||
newAPIObject, err := repls.Update(context.Background(), update, meta.UpdateOptions{})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
dr.apiObject = newAPIObject
|
||||
|
@ -307,7 +307,7 @@ func (dr *DeploymentReplication) updateCRSpec(newSpec api.DeploymentReplicationS
|
|||
// API object may have been changed already,
|
||||
// Reload api object and try again
|
||||
var current *api.ArangoDeploymentReplication
|
||||
current, err = repls.Get(context.Background(), update.GetName(), metav1.GetOptions{})
|
||||
current, err = repls.Get(context.Background(), update.GetName(), meta.GetOptions{})
|
||||
if err == nil {
|
||||
update = current.DeepCopy()
|
||||
continue
|
||||
|
@ -346,7 +346,7 @@ func (dr *DeploymentReplication) reportFailedStatus() {
|
|||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
depl, err := repls.Get(context.Background(), dr.apiObject.Name, metav1.GetOptions{})
|
||||
depl, err := repls.Get(context.Background(), dr.apiObject.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
// Update (PUT) will return conflict even if object is deleted since we have UID set in object.
|
||||
// Because it will check UID first and return something like:
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/arangosync-client/client"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1"
|
||||
|
@ -98,7 +98,7 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co
|
|||
abort := dr.status.CancelFailures > maxCancelFailures
|
||||
depls := dr.deps.Client.Arango().DatabaseV1().ArangoDeployments(p.GetNamespace())
|
||||
if name := p.Spec.Source.GetDeploymentName(); name != "" {
|
||||
depl, err := depls.Get(context.Background(), name, metav1.GetOptions{})
|
||||
depl, err := depls.Get(context.Background(), name, meta.GetOptions{})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
dr.log.Debug("Source deployment is gone. Abort enabled")
|
||||
abort = true
|
||||
|
@ -114,7 +114,7 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co
|
|||
// Inspect deployment deletion state in destination
|
||||
cleanupSource := false
|
||||
if name := p.Spec.Destination.GetDeploymentName(); name != "" {
|
||||
depl, err := depls.Get(context.Background(), name, metav1.GetOptions{})
|
||||
depl, err := depls.Get(context.Background(), name, meta.GetOptions{})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
dr.log.Debug("Destination deployment is gone. Source cleanup enabled")
|
||||
cleanupSource = true
|
||||
|
@ -166,16 +166,16 @@ func (dr *DeploymentReplication) inspectFinalizerDeplReplStopSync(ctx context.Co
|
|||
// removeDeploymentReplicationFinalizers removes the given finalizers from the given DeploymentReplication.
|
||||
func removeDeploymentReplicationFinalizers(crcli versioned.Interface, p *api.ArangoDeploymentReplication, finalizers []string, ignoreNotFound bool) error {
|
||||
repls := crcli.ReplicationV1().ArangoDeploymentReplications(p.GetNamespace())
|
||||
getFunc := func() (metav1.Object, error) {
|
||||
result, err := repls.Get(context.Background(), p.GetName(), metav1.GetOptions{})
|
||||
getFunc := func() (meta.Object, error) {
|
||||
result, err := repls.Get(context.Background(), p.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
updateFunc := func(updated metav1.Object) error {
|
||||
updateFunc := func(updated meta.Object) error {
|
||||
updatedRepl := updated.(*api.ArangoDeploymentReplication)
|
||||
result, err := repls.Update(context.Background(), updatedRepl, metav1.UpdateOptions{})
|
||||
result, err := repls.Update(context.Background(), updatedRepl, meta.UpdateOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
certificates "github.com/arangodb-helper/go-certificates"
|
||||
"github.com/arangodb/arangosync-client/client"
|
||||
"github.com/arangodb/arangosync-client/tasks"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/replication/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
|
@ -108,7 +108,7 @@ func (dr *DeploymentReplication) createArangoSyncEndpoint(epSpec api.EndpointSpe
|
|||
if epSpec.HasDeploymentName() {
|
||||
deploymentName := epSpec.GetDeploymentName()
|
||||
depls := dr.deps.Client.Arango().DatabaseV1().ArangoDeployments(dr.apiObject.GetNamespace())
|
||||
depl, err := depls.Get(context.Background(), deploymentName, metav1.GetOptions{})
|
||||
depl, err := depls.Get(context.Background(), deploymentName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
dr.log.Err(err).Str("deployment", deploymentName).Debug("Failed to get deployment")
|
||||
return nil, errors.WithStack(err)
|
||||
|
@ -167,7 +167,7 @@ func (dr *DeploymentReplication) getEndpointSecretNames(epSpec api.EndpointSpec)
|
|||
if epSpec.HasDeploymentName() {
|
||||
deploymentName := epSpec.GetDeploymentName()
|
||||
depls := dr.deps.Client.Arango().DatabaseV1().ArangoDeployments(dr.apiObject.GetNamespace())
|
||||
depl, err := depls.Get(context.Background(), deploymentName, metav1.GetOptions{})
|
||||
depl, err := depls.Get(context.Background(), deploymentName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
dr.log.Err(err).Str("deployment", deploymentName).Debug("Failed to get deployment")
|
||||
return "", "", "", "", errors.WithStack(err)
|
||||
|
|
|
@ -33,8 +33,8 @@ import (
|
|||
"github.com/gin-gonic/gin"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
typedv1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
)
|
||||
|
||||
|
@ -98,18 +98,18 @@ func (s *serverAuthentication) fetchAdminSecret() (string, string, error) {
|
|||
if s.adminSecretName == "" {
|
||||
return "", "", errors.WithStack(errors.Newf("No admin secret name specified"))
|
||||
}
|
||||
secret, err := s.secrets.Get(context.Background(), s.adminSecretName, metav1.GetOptions{})
|
||||
secret, err := s.secrets.Get(context.Background(), s.adminSecretName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", "", errors.WithStack(err)
|
||||
}
|
||||
var username, password string
|
||||
if raw, found := secret.Data[v1.BasicAuthUsernameKey]; !found {
|
||||
return "", "", errors.WithStack(errors.Newf("Secret '%s' contains no '%s' field", s.adminSecretName, v1.BasicAuthUsernameKey))
|
||||
if raw, found := secret.Data[core.BasicAuthUsernameKey]; !found {
|
||||
return "", "", errors.WithStack(errors.Newf("Secret '%s' contains no '%s' field", s.adminSecretName, core.BasicAuthUsernameKey))
|
||||
} else {
|
||||
username = string(raw)
|
||||
}
|
||||
if raw, found := secret.Data[v1.BasicAuthPasswordKey]; !found {
|
||||
return "", "", errors.WithStack(errors.Newf("Secret '%s' contains no '%s' field", s.adminSecretName, v1.BasicAuthPasswordKey))
|
||||
if raw, found := secret.Data[core.BasicAuthPasswordKey]; !found {
|
||||
return "", "", errors.WithStack(errors.Newf("Secret '%s' contains no '%s' field", s.adminSecretName, core.BasicAuthPasswordKey))
|
||||
} else {
|
||||
password = string(raw)
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ import (
|
|||
"github.com/jessevdk/go-assets"
|
||||
prometheus "github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
corev1 "k8s.io/client-go/kubernetes/typed/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/dashboard"
|
||||
|
@ -107,7 +107,7 @@ func NewServer(cli corev1.CoreV1Interface, cfg Config, deps Dependencies) (*Serv
|
|||
var cert, key string
|
||||
if cfg.TLSSecretName != "" && cfg.TLSSecretNamespace != "" {
|
||||
// Load TLS certificate from secret
|
||||
s, err := cli.Secrets(cfg.TLSSecretNamespace).Get(context.Background(), cfg.TLSSecretName, metav1.GetOptions{})
|
||||
s, err := cli.Secrets(cfg.TLSSecretNamespace).Get(context.Background(), cfg.TLSSecretName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/kclient"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TestGetMyImage tests getMyImage() method
|
||||
|
@ -36,23 +36,23 @@ func TestGetMyImage(t *testing.T) {
|
|||
testNamespace := "testNs"
|
||||
testPodName := "testPodname"
|
||||
testImage := "test-image"
|
||||
testPullSecrets := []v1.LocalObjectReference{
|
||||
testPullSecrets := []core.LocalObjectReference{
|
||||
{
|
||||
Name: "custom-docker",
|
||||
},
|
||||
}
|
||||
|
||||
pod := v1.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
pod := core.Pod{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: testPodName,
|
||||
Namespace: testNamespace,
|
||||
},
|
||||
Spec: v1.PodSpec{
|
||||
Containers: []v1.Container{
|
||||
Spec: core.PodSpec{
|
||||
Containers: []core.Container{
|
||||
{
|
||||
Name: "test",
|
||||
Image: testImage,
|
||||
ImagePullPolicy: v1.PullAlways,
|
||||
ImagePullPolicy: core.PullAlways,
|
||||
},
|
||||
},
|
||||
ImagePullSecrets: testPullSecrets,
|
||||
|
@ -70,13 +70,13 @@ func TestGetMyImage(t *testing.T) {
|
|||
}
|
||||
|
||||
// prepare mock
|
||||
if _, err := ls.deps.Client.Kubernetes().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := ls.deps.Client.Kubernetes().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, meta.CreateOptions{}); err != nil {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
image, pullPolicy, pullSecrets, err := ls.getMyImage()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, image, testImage)
|
||||
require.Equal(t, pullPolicy, v1.PullAlways)
|
||||
require.Equal(t, pullPolicy, core.PullAlways)
|
||||
require.Equal(t, pullSecrets, testPullSecrets)
|
||||
}
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/tools/record"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha"
|
||||
|
@ -70,8 +70,8 @@ const (
|
|||
type localStorageEvent struct {
|
||||
Type localStorageEventType
|
||||
LocalStorage *api.ArangoLocalStorage
|
||||
PersistentVolume *v1.PersistentVolume
|
||||
PersistentVolumeClaim *v1.PersistentVolumeClaim
|
||||
PersistentVolume *core.PersistentVolume
|
||||
PersistentVolumeClaim *core.PersistentVolumeClaim
|
||||
}
|
||||
|
||||
const (
|
||||
|
@ -94,8 +94,8 @@ type LocalStorage struct {
|
|||
stopped int32
|
||||
|
||||
image string
|
||||
imagePullPolicy v1.PullPolicy
|
||||
imagePullSecrets []v1.LocalObjectReference
|
||||
imagePullPolicy core.PullPolicy
|
||||
imagePullSecrets []core.LocalObjectReference
|
||||
|
||||
inspectTrigger trigger.Trigger
|
||||
pvCleaner *pvCleaner
|
||||
|
@ -297,7 +297,7 @@ func (ls *LocalStorage) handleArangoLocalStorageUpdatedEvent(event *localStorage
|
|||
log := ls.log.Str("localStorage", event.LocalStorage.GetName())
|
||||
|
||||
// Get the most recent version of the local storage from the API server
|
||||
current, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), ls.apiObject.GetName(), metav1.GetOptions{})
|
||||
current, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), ls.apiObject.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
log.Err(err).Debug("Failed to get current version of local storage from API server")
|
||||
if k8sutil.IsNotFound(err) {
|
||||
|
@ -359,7 +359,7 @@ func (ls *LocalStorage) updateCRStatus() error {
|
|||
for {
|
||||
attempt++
|
||||
update.Status = ls.status
|
||||
newAPIObject, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Update(context.Background(), update, metav1.UpdateOptions{})
|
||||
newAPIObject, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Update(context.Background(), update, meta.UpdateOptions{})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
ls.apiObject = newAPIObject
|
||||
|
@ -369,7 +369,7 @@ func (ls *LocalStorage) updateCRStatus() error {
|
|||
// API object may have been changed already,
|
||||
// Reload api object and try again
|
||||
var current *api.ArangoLocalStorage
|
||||
current, err = ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), update.GetName(), metav1.GetOptions{})
|
||||
current, err = ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), update.GetName(), meta.GetOptions{})
|
||||
if err == nil {
|
||||
update = current.DeepCopy()
|
||||
continue
|
||||
|
@ -393,7 +393,7 @@ func (ls *LocalStorage) updateCRSpec(newSpec api.LocalStorageSpec) error {
|
|||
attempt++
|
||||
update.Spec = newSpec
|
||||
update.Status = ls.status
|
||||
newAPIObject, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Update(context.Background(), update, metav1.UpdateOptions{})
|
||||
newAPIObject, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Update(context.Background(), update, meta.UpdateOptions{})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
ls.apiObject = newAPIObject
|
||||
|
@ -403,7 +403,7 @@ func (ls *LocalStorage) updateCRSpec(newSpec api.LocalStorageSpec) error {
|
|||
// API object may have been changed already,
|
||||
// Reload api object and try again
|
||||
var current *api.ArangoLocalStorage
|
||||
current, err = ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), update.GetName(), metav1.GetOptions{})
|
||||
current, err = ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), update.GetName(), meta.GetOptions{})
|
||||
if err == nil {
|
||||
update = current.DeepCopy()
|
||||
continue
|
||||
|
@ -442,7 +442,7 @@ func (ls *LocalStorage) reportFailedStatus() {
|
|||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
depl, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), ls.apiObject.Name, metav1.GetOptions{})
|
||||
depl, err := ls.deps.Client.Arango().StorageV1alpha().ArangoLocalStorages().Get(context.Background(), ls.apiObject.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
// Update (PUT) will return conflict even if object is deleted since we have UID set in object.
|
||||
// Because it will check UID first and return something like:
|
||||
|
@ -461,7 +461,7 @@ func (ls *LocalStorage) reportFailedStatus() {
|
|||
}
|
||||
|
||||
// isOwnerOf returns true if the given object belong to this local storage.
|
||||
func (ls *LocalStorage) isOwnerOf(obj metav1.Object) bool {
|
||||
func (ls *LocalStorage) isOwnerOf(obj meta.Object) bool {
|
||||
ownerRefs := obj.GetOwnerReferences()
|
||||
if len(ownerRefs) < 1 {
|
||||
return false
|
||||
|
|
|
@ -28,8 +28,8 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||
|
@ -44,7 +44,7 @@ type pvCleaner struct {
|
|||
mutex sync.Mutex
|
||||
log logging.Logger
|
||||
cli kubernetes.Interface
|
||||
items []v1.PersistentVolume
|
||||
items []core.PersistentVolume
|
||||
trigger trigger.Trigger
|
||||
clientGetter func(nodeName string) (provisioner.API, error)
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ func (c *pvCleaner) Run(stopCh <-chan struct{}) {
|
|||
}
|
||||
|
||||
// Add the given volume to the list of items to clean.
|
||||
func (c *pvCleaner) Add(pv v1.PersistentVolume) {
|
||||
func (c *pvCleaner) Add(pv core.PersistentVolume) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
|
@ -105,7 +105,7 @@ func (c *pvCleaner) Add(pv v1.PersistentVolume) {
|
|||
// cleanFirst tries to clean the first PV in the list.
|
||||
// Returns (hasMore, error)
|
||||
func (c *pvCleaner) cleanFirst() (bool, error) {
|
||||
var first *v1.PersistentVolume
|
||||
var first *core.PersistentVolume
|
||||
c.mutex.Lock()
|
||||
if len(c.items) > 0 {
|
||||
first = &c.items[0]
|
||||
|
@ -132,7 +132,7 @@ func (c *pvCleaner) cleanFirst() (bool, error) {
|
|||
}
|
||||
|
||||
// clean tries to clean the given PV.
|
||||
func (c *pvCleaner) clean(pv v1.PersistentVolume) error {
|
||||
func (c *pvCleaner) clean(pv core.PersistentVolume) error {
|
||||
log := c.log.Str("name", pv.GetName())
|
||||
log.Debug("Cleaning PersistentVolume")
|
||||
|
||||
|
@ -165,7 +165,7 @@ func (c *pvCleaner) clean(pv v1.PersistentVolume) error {
|
|||
}
|
||||
|
||||
// Remove persistent volume
|
||||
if err := c.cli.CoreV1().PersistentVolumes().Delete(context.Background(), pv.GetName(), metav1.DeleteOptions{}); err != nil && !k8sutil.IsNotFound(err) {
|
||||
if err := c.cli.CoreV1().PersistentVolumes().Delete(context.Background(), pv.GetName(), meta.DeleteOptions{}); err != nil && !k8sutil.IsNotFound(err) {
|
||||
log.Err(err).
|
||||
Str("name", pv.GetName()).
|
||||
Debug("Failed to remove PersistentVolume")
|
||||
|
|
|
@ -37,8 +37,8 @@ import (
|
|||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
|
||||
"github.com/dchest/uniuri"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner"
|
||||
|
@ -59,7 +59,7 @@ var (
|
|||
)
|
||||
|
||||
// createPVs creates a given number of PersistentVolume's.
|
||||
func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLocalStorage, unboundClaims []v1.PersistentVolumeClaim) error {
|
||||
func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLocalStorage, unboundClaims []core.PersistentVolumeClaim) error {
|
||||
// Find provisioner clients
|
||||
clients, err := ls.createProvisionerClients()
|
||||
if err != nil {
|
||||
|
@ -99,7 +99,7 @@ func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLoca
|
|||
|
||||
// Find size of PVC
|
||||
volSize := defaultVolumeSize
|
||||
if reqStorage, ok := claim.Spec.Resources.Requests[v1.ResourceStorage]; ok {
|
||||
if reqStorage, ok := claim.Spec.Resources.Requests[core.ResourceStorage]; ok {
|
||||
if v, ok := reqStorage.AsInt64(); ok && v > 0 {
|
||||
volSize = v
|
||||
}
|
||||
|
@ -114,7 +114,7 @@ func (ls *LocalStorage) createPVs(ctx context.Context, apiObject *api.ArangoLoca
|
|||
}
|
||||
|
||||
// createPV creates a PersistentVolume.
|
||||
func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocalStorage, clients []provisioner.API, clientsOffset int, volSize int64, claim v1.PersistentVolumeClaim, deploymentName, role string) error {
|
||||
func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocalStorage, clients []provisioner.API, clientsOffset int, volSize int64, claim core.PersistentVolumeClaim, deploymentName, role string) error {
|
||||
// Try clients
|
||||
for clientIdx := 0; clientIdx < len(clients); clientIdx++ {
|
||||
client := clients[(clientsOffset+clientIdx)%len(clients)]
|
||||
|
@ -141,10 +141,10 @@ func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocal
|
|||
}
|
||||
// Create a volume
|
||||
pvName := strings.ToLower(apiObject.GetName() + "-" + shortHash(info.NodeName) + "-" + name)
|
||||
volumeMode := v1.PersistentVolumeFilesystem
|
||||
volumeMode := core.PersistentVolumeFilesystem
|
||||
nodeSel := createNodeSelector(info.NodeName)
|
||||
pv := &v1.PersistentVolume{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
pv := &core.PersistentVolume{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: pvName,
|
||||
Annotations: map[string]string{
|
||||
AnnProvisionedBy: storageClassProvisioner,
|
||||
|
@ -155,36 +155,36 @@ func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocal
|
|||
k8sutil.LabelKeyRole: role,
|
||||
},
|
||||
},
|
||||
Spec: v1.PersistentVolumeSpec{
|
||||
Capacity: v1.ResourceList{
|
||||
v1.ResourceStorage: *resource.NewQuantity(volSize, resource.BinarySI),
|
||||
Spec: core.PersistentVolumeSpec{
|
||||
Capacity: core.ResourceList{
|
||||
core.ResourceStorage: *resource.NewQuantity(volSize, resource.BinarySI),
|
||||
},
|
||||
PersistentVolumeReclaimPolicy: v1.PersistentVolumeReclaimRetain,
|
||||
PersistentVolumeSource: v1.PersistentVolumeSource{
|
||||
Local: &v1.LocalVolumeSource{
|
||||
PersistentVolumeReclaimPolicy: core.PersistentVolumeReclaimRetain,
|
||||
PersistentVolumeSource: core.PersistentVolumeSource{
|
||||
Local: &core.LocalVolumeSource{
|
||||
Path: localPath,
|
||||
},
|
||||
},
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
StorageClassName: apiObject.Spec.StorageClass.Name,
|
||||
VolumeMode: &volumeMode,
|
||||
ClaimRef: &v1.ObjectReference{
|
||||
ClaimRef: &core.ObjectReference{
|
||||
Kind: "PersistentVolumeClaim",
|
||||
APIVersion: "",
|
||||
Name: claim.GetName(),
|
||||
Namespace: claim.GetNamespace(),
|
||||
UID: claim.GetUID(),
|
||||
},
|
||||
NodeAffinity: &v1.VolumeNodeAffinity{
|
||||
NodeAffinity: &core.VolumeNodeAffinity{
|
||||
Required: nodeSel,
|
||||
},
|
||||
},
|
||||
}
|
||||
// Attach PV to ArangoLocalStorage
|
||||
pv.SetOwnerReferences(append(pv.GetOwnerReferences(), apiObject.AsOwner()))
|
||||
if _, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().Create(context.Background(), pv, metav1.CreateOptions{}); err != nil {
|
||||
if _, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().Create(context.Background(), pv, meta.CreateOptions{}); err != nil {
|
||||
log.Err(err).Error("Failed to create PersistentVolume")
|
||||
continue
|
||||
}
|
||||
|
@ -196,7 +196,7 @@ func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocal
|
|||
// Bind claim to volume
|
||||
if err := ls.bindClaimToVolume(claim, pv.GetName()); err != nil {
|
||||
// Try to delete the PV now
|
||||
if err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().Delete(context.Background(), pv.GetName(), metav1.DeleteOptions{}); err != nil {
|
||||
if err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().Delete(context.Background(), pv.GetName(), meta.DeleteOptions{}); err != nil {
|
||||
log.Err(err).Error("Failed to delete PV after binding PVC failed")
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
|
@ -210,7 +210,7 @@ func (ls *LocalStorage) createPV(ctx context.Context, apiObject *api.ArangoLocal
|
|||
|
||||
// createValidEndpointList convers the given endpoints list into
|
||||
// valid addresses.
|
||||
func createValidEndpointList(list *v1.EndpointsList) []string {
|
||||
func createValidEndpointList(list *core.EndpointsList) []string {
|
||||
result := make([]string, 0, len(list.Items))
|
||||
for _, ep := range list.Items {
|
||||
for _, subset := range ep.Subsets {
|
||||
|
@ -225,14 +225,14 @@ func createValidEndpointList(list *v1.EndpointsList) []string {
|
|||
}
|
||||
|
||||
// createNodeAffinity creates a node affinity serialized to string.
|
||||
func createNodeSelector(nodeName string) *v1.NodeSelector {
|
||||
return &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
v1.NodeSelectorTerm{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
v1.NodeSelectorRequirement{
|
||||
func createNodeSelector(nodeName string) *core.NodeSelector {
|
||||
return &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{
|
||||
core.NodeSelectorTerm{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{
|
||||
core.NodeSelectorRequirement{
|
||||
Key: "kubernetes.io/hostname",
|
||||
Operator: v1.NodeSelectorOpIn,
|
||||
Operator: core.NodeSelectorOpIn,
|
||||
Values: []string{nodeName},
|
||||
},
|
||||
},
|
||||
|
@ -258,7 +258,7 @@ func createNodeClientMap(ctx context.Context, clients []provisioner.API) map[str
|
|||
// the role of the server that the claim is used for and the value for `enforceAntiAffinity`.
|
||||
// If not found, empty strings are returned.
|
||||
// Returns deploymentName, role, enforceAntiAffinity.
|
||||
func getDeploymentInfo(pvc v1.PersistentVolumeClaim) (string, string, bool) {
|
||||
func getDeploymentInfo(pvc core.PersistentVolumeClaim) (string, string, bool) {
|
||||
deploymentName := pvc.GetLabels()[k8sutil.LabelKeyArangoDeployment]
|
||||
role := pvc.GetLabels()[k8sutil.LabelKeyRole]
|
||||
enforceAntiAffinity, _ := strconv.ParseBool(pvc.GetAnnotations()[constants.AnnotationEnforceAntiAffinity]) // If annotation empty, this will yield false.
|
||||
|
@ -268,7 +268,7 @@ func getDeploymentInfo(pvc v1.PersistentVolumeClaim) (string, string, bool) {
|
|||
// filterAllowedNodes returns those clients that do not yet have a volume for the given deployment name & role.
|
||||
func (ls *LocalStorage) filterAllowedNodes(clients map[string]provisioner.API, deploymentName, role string) ([]provisioner.API, error) {
|
||||
// Find all PVs for given deployment & role
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), metav1.ListOptions{
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), meta.ListOptions{
|
||||
LabelSelector: fmt.Sprintf("%s=%s,%s=%s", k8sutil.LabelKeyArangoDeployment, deploymentName, k8sutil.LabelKeyRole, role),
|
||||
})
|
||||
if err != nil {
|
||||
|
@ -290,7 +290,7 @@ func (ls *LocalStorage) filterAllowedNodes(clients map[string]provisioner.API, d
|
|||
|
||||
// bindClaimToVolume tries to bind the given claim to the volume with given name.
|
||||
// If the claim has been updated, the function retries several times.
|
||||
func (ls *LocalStorage) bindClaimToVolume(claim v1.PersistentVolumeClaim, volumeName string) error {
|
||||
func (ls *LocalStorage) bindClaimToVolume(claim core.PersistentVolumeClaim, volumeName string) error {
|
||||
log := ls.log.Str("pvc-name", claim.GetName()).Str("volume-name", volumeName)
|
||||
pvcs := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumeClaims(claim.GetNamespace())
|
||||
|
||||
|
@ -299,7 +299,7 @@ func (ls *LocalStorage) bindClaimToVolume(claim v1.PersistentVolumeClaim, volume
|
|||
time.Sleep(time.Millisecond * time.Duration(10*attempt))
|
||||
|
||||
// Fetch latest version of claim
|
||||
updated, err := pvcs.Get(context.Background(), claim.GetName(), metav1.GetOptions{})
|
||||
updated, err := pvcs.Get(context.Background(), claim.GetName(), meta.GetOptions{})
|
||||
if k8sutil.IsNotFound(err) {
|
||||
return errors.WithStack(err)
|
||||
} else if err != nil {
|
||||
|
@ -318,7 +318,7 @@ func (ls *LocalStorage) bindClaimToVolume(claim v1.PersistentVolumeClaim, volume
|
|||
|
||||
// Try to bind
|
||||
updated.Spec.VolumeName = volumeName
|
||||
if _, err := pvcs.Update(context.Background(), updated, metav1.UpdateOptions{}); k8sutil.IsConflict(err) {
|
||||
if _, err := pvcs.Update(context.Background(), updated, meta.UpdateOptions{}); k8sutil.IsConflict(err) {
|
||||
// Claim modified already, retry
|
||||
log.Err(err).Debug("PersistentVolumeClaim has been modified. Retrying.")
|
||||
} else if err != nil {
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner"
|
||||
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner/mocks"
|
||||
|
@ -36,31 +36,31 @@ import (
|
|||
// TestCreateValidEndpointList tests createValidEndpointList.
|
||||
func TestCreateValidEndpointList(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input *v1.EndpointsList
|
||||
Input *core.EndpointsList
|
||||
Expected []string
|
||||
}{
|
||||
{
|
||||
Input: &v1.EndpointsList{},
|
||||
Input: &core.EndpointsList{},
|
||||
Expected: []string{},
|
||||
},
|
||||
{
|
||||
Input: &v1.EndpointsList{
|
||||
Items: []v1.Endpoints{
|
||||
v1.Endpoints{
|
||||
Subsets: []v1.EndpointSubset{
|
||||
v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
v1.EndpointAddress{
|
||||
Input: &core.EndpointsList{
|
||||
Items: []core.Endpoints{
|
||||
core.Endpoints{
|
||||
Subsets: []core.EndpointSubset{
|
||||
core.EndpointSubset{
|
||||
Addresses: []core.EndpointAddress{
|
||||
core.EndpointAddress{
|
||||
IP: "1.2.3.4",
|
||||
},
|
||||
},
|
||||
},
|
||||
v1.EndpointSubset{
|
||||
Addresses: []v1.EndpointAddress{
|
||||
v1.EndpointAddress{
|
||||
core.EndpointSubset{
|
||||
Addresses: []core.EndpointAddress{
|
||||
core.EndpointAddress{
|
||||
IP: "5.6.7.8",
|
||||
},
|
||||
v1.EndpointAddress{
|
||||
core.EndpointAddress{
|
||||
IP: "9.10.11.12",
|
||||
},
|
||||
},
|
||||
|
@ -127,20 +127,20 @@ func TestCreateNodeClientMap(t *testing.T) {
|
|||
// TestGetDeploymentInfo tests getDeploymentInfo.
|
||||
func TestGetDeploymentInfo(t *testing.T) {
|
||||
tests := []struct {
|
||||
Input v1.PersistentVolumeClaim
|
||||
Input core.PersistentVolumeClaim
|
||||
ExpectedDeploymentName string
|
||||
ExpectedRole string
|
||||
ExpectedEnforceAntiAffinity bool
|
||||
}{
|
||||
{
|
||||
Input: v1.PersistentVolumeClaim{},
|
||||
Input: core.PersistentVolumeClaim{},
|
||||
ExpectedDeploymentName: "",
|
||||
ExpectedRole: "",
|
||||
ExpectedEnforceAntiAffinity: false,
|
||||
},
|
||||
{
|
||||
Input: v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Input: core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"database.arangodb.com/enforce-anti-affinity": "true",
|
||||
},
|
||||
|
@ -155,8 +155,8 @@ func TestGetDeploymentInfo(t *testing.T) {
|
|||
ExpectedEnforceAntiAffinity: true,
|
||||
},
|
||||
{
|
||||
Input: v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Input: core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"database.arangodb.com/enforce-anti-affinity": "false",
|
||||
},
|
||||
|
@ -171,8 +171,8 @@ func TestGetDeploymentInfo(t *testing.T) {
|
|||
ExpectedEnforceAntiAffinity: false,
|
||||
},
|
||||
{
|
||||
Input: v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Input: core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
"database.arangodb.com/enforce-anti-affinity": "wrong",
|
||||
},
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
|
@ -29,14 +29,14 @@ import (
|
|||
|
||||
// listenForPvEvents keep listening for changes in PV's until the given channel is closed.
|
||||
func (ls *LocalStorage) listenForPvEvents() {
|
||||
getPv := func(obj interface{}) (*v1.PersistentVolume, bool) {
|
||||
pv, ok := obj.(*v1.PersistentVolume)
|
||||
getPv := func(obj interface{}) (*core.PersistentVolume, bool) {
|
||||
pv, ok := obj.(*core.PersistentVolume)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
pv, ok = tombstone.Obj.(*v1.PersistentVolume)
|
||||
pv, ok = tombstone.Obj.(*core.PersistentVolume)
|
||||
return pv, ok
|
||||
}
|
||||
return pv, true
|
||||
|
@ -46,7 +46,7 @@ func (ls *LocalStorage) listenForPvEvents() {
|
|||
ls.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"persistentvolumes",
|
||||
"", //ls.apiObject.GetNamespace(),
|
||||
&v1.PersistentVolume{},
|
||||
&core.PersistentVolume{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
// Ignore
|
||||
|
|
|
@ -26,15 +26,15 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// inspectPVs queries all PersistentVolume's and triggers a cleanup for
|
||||
// released volumes.
|
||||
// Returns the number of available PV's.
|
||||
func (ls *LocalStorage) inspectPVs() (int, error) {
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), metav1.ListOptions{})
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
|
@ -47,7 +47,7 @@ func (ls *LocalStorage) inspectPVs() (int, error) {
|
|||
continue
|
||||
}
|
||||
switch pv.Status.Phase {
|
||||
case v1.VolumeAvailable:
|
||||
case core.VolumeAvailable:
|
||||
// Is this an old volume?
|
||||
if pv.GetObjectMeta().GetCreationTimestamp().Time.Before(cleanupBeforeTimestamp) {
|
||||
// Let's clean it up
|
||||
|
@ -62,7 +62,7 @@ func (ls *LocalStorage) inspectPVs() (int, error) {
|
|||
} else {
|
||||
availableVolumes++
|
||||
}
|
||||
case v1.VolumeReleased:
|
||||
case core.VolumeReleased:
|
||||
if ls.isOwnerOf(&pv) {
|
||||
// Cleanup this volume
|
||||
ls.log.Str("name", pv.GetName()).Debug("Added PersistentVolume to cleaner")
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/client-go/tools/cache"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
|
@ -29,14 +29,14 @@ import (
|
|||
|
||||
// listenForPvcEvents keep listening for changes in PVC's until the given channel is closed.
|
||||
func (ls *LocalStorage) listenForPvcEvents() {
|
||||
getPvc := func(obj interface{}) (*v1.PersistentVolumeClaim, bool) {
|
||||
pvc, ok := obj.(*v1.PersistentVolumeClaim)
|
||||
getPvc := func(obj interface{}) (*core.PersistentVolumeClaim, bool) {
|
||||
pvc, ok := obj.(*core.PersistentVolumeClaim)
|
||||
if !ok {
|
||||
tombstone, ok := obj.(cache.DeletedFinalStateUnknown)
|
||||
if !ok {
|
||||
return nil, false
|
||||
}
|
||||
pvc, ok = tombstone.Obj.(*v1.PersistentVolumeClaim)
|
||||
pvc, ok = tombstone.Obj.(*core.PersistentVolumeClaim)
|
||||
return pvc, ok
|
||||
}
|
||||
return pvc, true
|
||||
|
@ -46,7 +46,7 @@ func (ls *LocalStorage) listenForPvcEvents() {
|
|||
ls.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"persistentvolumeclaims",
|
||||
"", //ls.apiObject.GetNamespace(),
|
||||
&v1.PersistentVolumeClaim{},
|
||||
&core.PersistentVolumeClaim{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
if pvc, ok := getPvc(obj); ok {
|
||||
|
|
|
@ -24,21 +24,21 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// inspectPVCs queries all PVC's and checks if there is a need to
|
||||
// build new persistent volumes.
|
||||
// Returns the PVC's that need a volume.
|
||||
func (ls *LocalStorage) inspectPVCs() ([]v1.PersistentVolumeClaim, error) {
|
||||
func (ls *LocalStorage) inspectPVCs() ([]core.PersistentVolumeClaim, error) {
|
||||
ns := ls.apiObject.GetNamespace()
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumeClaims(ns).List(context.Background(), metav1.ListOptions{})
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumeClaims(ns).List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
spec := ls.apiObject.Spec
|
||||
var result []v1.PersistentVolumeClaim
|
||||
var result []core.PersistentVolumeClaim
|
||||
for _, pvc := range list.Items {
|
||||
if !pvcMatchesStorageClass(pvc, spec.StorageClass.Name, spec.StorageClass.IsDefault) {
|
||||
continue
|
||||
|
@ -53,7 +53,7 @@ func (ls *LocalStorage) inspectPVCs() ([]v1.PersistentVolumeClaim, error) {
|
|||
|
||||
// pvcMatchesStorageClass checks if the given pvc requests a volume
|
||||
// of the given storage class.
|
||||
func pvcMatchesStorageClass(pvc v1.PersistentVolumeClaim, storageClassName string, isDefault bool) bool {
|
||||
func pvcMatchesStorageClass(pvc core.PersistentVolumeClaim, storageClassName string, isDefault bool) bool {
|
||||
scn := pvc.Spec.StorageClassName
|
||||
if scn == nil {
|
||||
// No storage class specified, default is used
|
||||
|
@ -63,6 +63,6 @@ func pvcMatchesStorageClass(pvc v1.PersistentVolumeClaim, storageClassName strin
|
|||
}
|
||||
|
||||
// pvcNeedsVolume checks if the given pvc is in need of a persistent volume.
|
||||
func pvcNeedsVolume(pvc v1.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == v1.ClaimPending
|
||||
func pvcNeedsVolume(pvc core.PersistentVolumeClaim) bool {
|
||||
return pvc.Status.Phase == core.ClaimPending
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/server"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// Name returns the name of the local storage resource
|
||||
|
@ -63,7 +63,7 @@ func (ls *LocalStorage) StorageClassIsDefault() bool {
|
|||
|
||||
// Volumes returns all volumes created by the local storage resource
|
||||
func (ls *LocalStorage) Volumes() []server.Volume {
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), metav1.ListOptions{})
|
||||
list, err := ls.deps.Client.Kubernetes().CoreV1().PersistentVolumes().List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
ls.log.Err(err).Error("Failed to list persistent volumes")
|
||||
return nil
|
||||
|
|
|
@ -22,10 +22,10 @@ package storage
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/server"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
type serverVolume v1.PersistentVolume
|
||||
type serverVolume core.PersistentVolume
|
||||
|
||||
// Name returns the name of the volume
|
||||
func (v serverVolume) Name() string {
|
||||
|
@ -37,9 +37,9 @@ func (v serverVolume) StateColor() server.StateColor {
|
|||
switch v.Status.Phase {
|
||||
default:
|
||||
return server.StateYellow
|
||||
case v1.VolumeBound:
|
||||
case core.VolumeBound:
|
||||
return server.StateGreen
|
||||
case v1.VolumeFailed:
|
||||
case core.VolumeFailed:
|
||||
return server.StateRed
|
||||
}
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func (v serverVolume) NodeName() string {
|
|||
|
||||
// Capacity returns the capacity of the volume in human readable form
|
||||
func (v serverVolume) Capacity() string {
|
||||
c, found := v.Spec.Capacity[v1.ResourceStorage]
|
||||
c, found := v.Spec.Capacity[core.ResourceStorage]
|
||||
if found {
|
||||
return c.String()
|
||||
}
|
||||
|
|
|
@ -24,8 +24,8 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/storage/provisioner"
|
||||
|
@ -36,16 +36,16 @@ import (
|
|||
// provisioners.
|
||||
func (ls *LocalStorage) ensureProvisionerService(apiObject *api.ArangoLocalStorage) error {
|
||||
labels := k8sutil.LabelsForLocalStorage(apiObject.GetName(), roleProvisioner)
|
||||
svc := &v1.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
svc := &core.Service{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: apiObject.GetName(),
|
||||
Labels: labels,
|
||||
},
|
||||
Spec: v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
v1.ServicePort{
|
||||
Spec: core.ServiceSpec{
|
||||
Ports: []core.ServicePort{
|
||||
core.ServicePort{
|
||||
Name: "provisioner",
|
||||
Protocol: v1.ProtocolTCP,
|
||||
Protocol: core.ProtocolTCP,
|
||||
Port: provisioner.DefaultPort,
|
||||
},
|
||||
},
|
||||
|
@ -54,7 +54,7 @@ func (ls *LocalStorage) ensureProvisionerService(apiObject *api.ArangoLocalStora
|
|||
}
|
||||
svc.SetOwnerReferences(append(svc.GetOwnerReferences(), apiObject.AsOwner()))
|
||||
ns := ls.config.Namespace
|
||||
if _, err := ls.deps.Client.Kubernetes().CoreV1().Services(ns).Create(context.Background(), svc, metav1.CreateOptions{}); err != nil && !k8sutil.IsAlreadyExists(err) {
|
||||
if _, err := ls.deps.Client.Kubernetes().CoreV1().Services(ns).Create(context.Background(), svc, meta.CreateOptions{}); err != nil && !k8sutil.IsAlreadyExists(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -24,9 +24,9 @@ import (
|
|||
"context"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/storage/v1alpha"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
|
@ -41,9 +41,9 @@ var (
|
|||
func (l *LocalStorage) ensureStorageClass(apiObject *api.ArangoLocalStorage) error {
|
||||
spec := apiObject.Spec.StorageClass
|
||||
bindingMode := v1.VolumeBindingWaitForFirstConsumer
|
||||
reclaimPolicy := corev1.PersistentVolumeReclaimRetain
|
||||
reclaimPolicy := core.PersistentVolumeReclaimRetain
|
||||
sc := &v1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: spec.Name,
|
||||
},
|
||||
ReclaimPolicy: &reclaimPolicy,
|
||||
|
@ -53,7 +53,7 @@ func (l *LocalStorage) ensureStorageClass(apiObject *api.ArangoLocalStorage) err
|
|||
// Note: We do not attach the StorageClass to the apiObject (OwnerRef) because many
|
||||
// ArangoLocalStorage resource may use the same StorageClass.
|
||||
cli := l.deps.Client.Kubernetes().StorageV1()
|
||||
if _, err := cli.StorageClasses().Create(context.Background(), sc, metav1.CreateOptions{}); k8sutil.IsAlreadyExists(err) {
|
||||
if _, err := cli.StorageClasses().Create(context.Background(), sc, meta.CreateOptions{}); k8sutil.IsAlreadyExists(err) {
|
||||
l.log.
|
||||
Str("storageclass", sc.GetName()).
|
||||
Debug("StorageClass already exists")
|
||||
|
@ -70,7 +70,7 @@ func (l *LocalStorage) ensureStorageClass(apiObject *api.ArangoLocalStorage) err
|
|||
|
||||
if apiObject.Spec.StorageClass.IsDefault {
|
||||
// UnMark current default (if any)
|
||||
list, err := cli.StorageClasses().List(context.Background(), metav1.ListOptions{})
|
||||
list, err := cli.StorageClasses().List(context.Background(), meta.ListOptions{})
|
||||
if err != nil {
|
||||
l.log.Err(err).Debug("Listing StorageClasses failed")
|
||||
return errors.WithStack(err)
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
|
||||
apiextensionsv1 "k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1"
|
||||
apiextensionsclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
||||
)
|
||||
|
@ -44,7 +44,7 @@ func WaitReady(check func() error) error {
|
|||
// WaitCRDReady waits for a custom resource definition with given name to be ready.
|
||||
func WaitCRDReady(clientset apiextensionsclient.Interface, crdName string) error {
|
||||
op := func() error {
|
||||
crd, err := clientset.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), crdName, metav1.GetOptions{})
|
||||
crd, err := clientset.ApiextensionsV1().CustomResourceDefinitions().Get(context.Background(), crdName, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -22,20 +22,20 @@ package k8sutil
|
|||
|
||||
import (
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// CreateAffinity creates pod anti-affinity for the given role.
|
||||
// role contains the name of the role to configure any-affinity with.
|
||||
// affinityWithRole contains the role to configure affinity with.
|
||||
func CreateAffinity(deploymentName, role string, required bool, affinityWithRole string) *v1.Affinity {
|
||||
a := &v1.Affinity{
|
||||
NodeAffinity: &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
func CreateAffinity(deploymentName, role string, required bool, affinityWithRole string) *core.Affinity {
|
||||
a := &core.Affinity{
|
||||
NodeAffinity: &core.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{
|
||||
{
|
||||
Key: shared.NodeArchAffinityLabel,
|
||||
Operator: "In",
|
||||
|
@ -46,40 +46,40 @@ func CreateAffinity(deploymentName, role string, required bool, affinityWithRole
|
|||
},
|
||||
},
|
||||
},
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{},
|
||||
PodAntiAffinity: &core.PodAntiAffinity{},
|
||||
}
|
||||
labels := LabelsForDeployment(deploymentName, role)
|
||||
labelSelector := &metav1.LabelSelector{
|
||||
labelSelector := &meta.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
}
|
||||
if required {
|
||||
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, v1.PodAffinityTerm{
|
||||
a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution, core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: shared.TopologyKeyHostname,
|
||||
})
|
||||
} else {
|
||||
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, v1.WeightedPodAffinityTerm{
|
||||
a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution, core.WeightedPodAffinityTerm{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: v1.PodAffinityTerm{
|
||||
PodAffinityTerm: core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: shared.TopologyKeyHostname,
|
||||
},
|
||||
})
|
||||
}
|
||||
if affinityWithRole != "" {
|
||||
a.PodAffinity = &v1.PodAffinity{}
|
||||
labelSelector := &metav1.LabelSelector{
|
||||
a.PodAffinity = &core.PodAffinity{}
|
||||
labelSelector := &meta.LabelSelector{
|
||||
MatchLabels: LabelsForDeployment(deploymentName, affinityWithRole),
|
||||
}
|
||||
if required {
|
||||
a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, v1.PodAffinityTerm{
|
||||
a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.RequiredDuringSchedulingIgnoredDuringExecution, core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: shared.TopologyKeyHostname,
|
||||
})
|
||||
} else {
|
||||
a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, v1.WeightedPodAffinityTerm{
|
||||
a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution = append(a.PodAffinity.PreferredDuringSchedulingIgnoredDuringExecution, core.WeightedPodAffinityTerm{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: v1.PodAffinityTerm{
|
||||
PodAffinityTerm: core.PodAffinityTerm{
|
||||
LabelSelector: labelSelector,
|
||||
TopologyKey: shared.TopologyKeyHostname,
|
||||
},
|
||||
|
|
|
@ -23,7 +23,7 @@ package k8sutil
|
|||
import (
|
||||
"testing"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -31,11 +31,11 @@ import (
|
|||
)
|
||||
|
||||
func TestCreateAffinity(t *testing.T) {
|
||||
expectedNodeAffinity := &v1.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
|
||||
NodeSelectorTerms: []v1.NodeSelectorTerm{
|
||||
expectedNodeAffinity := &core.NodeAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: &core.NodeSelector{
|
||||
NodeSelectorTerms: []core.NodeSelectorTerm{
|
||||
{
|
||||
MatchExpressions: []v1.NodeSelectorRequirement{
|
||||
MatchExpressions: []core.NodeSelectorRequirement{
|
||||
{
|
||||
Key: shared.NodeArchAffinityLabel,
|
||||
Operator: "In",
|
||||
|
|
|
@ -26,7 +26,7 @@ import (
|
|||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func appendDeploymentClusterDomain(dns string, domain *string) string {
|
||||
|
@ -39,13 +39,13 @@ func appendDeploymentClusterDomain(dns string, domain *string) string {
|
|||
|
||||
// CreatePodDNSName returns the DNS of a pod with a given role & id in
|
||||
// a given deployment.
|
||||
func CreatePodDNSName(deployment metav1.Object, role, id string) string {
|
||||
func CreatePodDNSName(deployment meta.Object, role, id string) string {
|
||||
return fmt.Sprintf("%s.%s.%s.svc", shared.CreatePodHostName(deployment.GetName(), role, id), CreateHeadlessServiceName(deployment.GetName()), deployment.GetNamespace())
|
||||
}
|
||||
|
||||
// CreatePodDNSName returns the DNS of a pod with a given role & id in
|
||||
// a given deployment.
|
||||
func CreatePodDNSNameWithDomain(deployment metav1.Object, domain *string, role, id string) string {
|
||||
func CreatePodDNSNameWithDomain(deployment meta.Object, domain *string, role, id string) string {
|
||||
return appendDeploymentClusterDomain(CreatePodDNSName(deployment, role, id), domain)
|
||||
}
|
||||
|
||||
|
@ -60,21 +60,21 @@ func CreateServiceDNSNameWithDomain(svc *core.Service, domain *string) string {
|
|||
}
|
||||
|
||||
// CreateDatabaseClientServiceDNSNameWithDomain returns the DNS of the database client service.
|
||||
func CreateDatabaseClientServiceDNSNameWithDomain(deployment metav1.Object, domain *string) string {
|
||||
func CreateDatabaseClientServiceDNSNameWithDomain(deployment meta.Object, domain *string) string {
|
||||
return appendDeploymentClusterDomain(CreateDatabaseClientServiceDNSName(deployment), domain)
|
||||
}
|
||||
|
||||
// CreateDatabaseClientServiceDNSName returns the DNS of the database client service.
|
||||
func CreateDatabaseClientServiceDNSName(deployment metav1.Object) string {
|
||||
func CreateDatabaseClientServiceDNSName(deployment meta.Object) string {
|
||||
return fmt.Sprintf("%s.%s.svc", CreateDatabaseClientServiceName(deployment.GetName()), deployment.GetNamespace())
|
||||
}
|
||||
|
||||
// CreateSyncMasterClientServiceDNSNameWithDomain returns the DNS of the syncmaster client service.
|
||||
func CreateSyncMasterClientServiceDNSNameWithDomain(deployment metav1.Object, domain *string) string {
|
||||
func CreateSyncMasterClientServiceDNSNameWithDomain(deployment meta.Object, domain *string) string {
|
||||
return appendDeploymentClusterDomain(CreateSyncMasterClientServiceDNSName(deployment), domain)
|
||||
}
|
||||
|
||||
// CreateSyncMasterClientServiceDNSName returns the DNS of the syncmaster client service.
|
||||
func CreateSyncMasterClientServiceDNSName(deployment metav1.Object) string {
|
||||
func CreateSyncMasterClientServiceDNSName(deployment meta.Object) string {
|
||||
return fmt.Sprintf("%s.%s.svc", CreateSyncMasterClientServiceName(deployment.GetName()), deployment.GetNamespace())
|
||||
}
|
||||
|
|
|
@ -27,12 +27,12 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TestCreatePodDNSName tests CreatePodDNSName.
|
||||
func TestCreatePodDNSName(t *testing.T) {
|
||||
depl := &metav1.ObjectMeta{
|
||||
depl := &meta.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "ns",
|
||||
}
|
||||
|
@ -42,7 +42,7 @@ func TestCreatePodDNSName(t *testing.T) {
|
|||
|
||||
// TestCreateDatabaseClientServiceDNSName tests CreateDatabaseClientServiceDNSName.
|
||||
func TestCreateDatabaseClientServiceDNSName(t *testing.T) {
|
||||
depl := &metav1.ObjectMeta{
|
||||
depl := &meta.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "ns",
|
||||
}
|
||||
|
@ -51,7 +51,7 @@ func TestCreateDatabaseClientServiceDNSName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestCreatePodDNSNameWithDomain(t *testing.T) {
|
||||
depl := &metav1.ObjectMeta{
|
||||
depl := &meta.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "ns",
|
||||
}
|
||||
|
|
|
@ -26,8 +26,8 @@ import (
|
|||
|
||||
driver "github.com/arangodb/go-driver"
|
||||
upgraderules "github.com/arangodb/go-upgrade-rules"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
|
@ -42,15 +42,15 @@ type Event struct {
|
|||
// APIObject helps to abstract an object from our custom API.
|
||||
type APIObject interface {
|
||||
runtime.Object
|
||||
metav1.Object
|
||||
meta.Object
|
||||
// AsOwner creates an OwnerReference for the given deployment
|
||||
AsOwner() metav1.OwnerReference
|
||||
AsOwner() meta.OwnerReference
|
||||
}
|
||||
|
||||
// NewMemberAddEvent creates an event indicating that a member was added.
|
||||
func NewMemberAddEvent(memberName, role string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = fmt.Sprintf("New %s Added", strings.Title(role))
|
||||
event.Message = fmt.Sprintf("New %s %s added to deployment", role, memberName)
|
||||
return event
|
||||
|
@ -59,7 +59,7 @@ func NewMemberAddEvent(memberName, role string, apiObject APIObject) *Event {
|
|||
// NewMemberRemoveEvent creates an event indicating that an existing member was removed.
|
||||
func NewMemberRemoveEvent(memberName, role string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = fmt.Sprintf("%s Removed", strings.Title(role))
|
||||
event.Message = fmt.Sprintf("Existing %s %s removed from the deployment", role, memberName)
|
||||
return event
|
||||
|
@ -68,7 +68,7 @@ func NewMemberRemoveEvent(memberName, role string, apiObject APIObject) *Event {
|
|||
// NewPodCreatedEvent creates an event indicating that a pod has been created
|
||||
func NewPodCreatedEvent(podName, role string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = fmt.Sprintf("Pod Of %s Created", strings.Title(role))
|
||||
event.Message = fmt.Sprintf("Pod %s of member %s is created", podName, role)
|
||||
return event
|
||||
|
@ -77,7 +77,7 @@ func NewPodCreatedEvent(podName, role string, apiObject APIObject) *Event {
|
|||
// NewPodGoneEvent creates an event indicating that a pod is missing
|
||||
func NewPodGoneEvent(podName, role string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = fmt.Sprintf("Pod Of %s Gone", strings.Title(role))
|
||||
event.Message = fmt.Sprintf("Pod %s of member %s is gone", podName, role)
|
||||
return event
|
||||
|
@ -87,7 +87,7 @@ func NewPodGoneEvent(podName, role string, apiObject APIObject) *Event {
|
|||
// that is immutable.
|
||||
func NewImmutableFieldEvent(fieldName string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Immutable Field Change"
|
||||
event.Message = fmt.Sprintf("Changing field %s is not possible. It has been reset to its original value.", fieldName)
|
||||
return event
|
||||
|
@ -96,7 +96,7 @@ func NewImmutableFieldEvent(fieldName string, apiObject APIObject) *Event {
|
|||
// NewPodsSchedulingFailureEvent creates an event indicating that one of more cannot be scheduled.
|
||||
func NewPodsSchedulingFailureEvent(unscheduledPodNames []string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Pods Scheduling Failure"
|
||||
event.Message = fmt.Sprintf("One or more pods are not scheduled in time. Pods: %v", unscheduledPodNames)
|
||||
return event
|
||||
|
@ -106,7 +106,7 @@ func NewPodsSchedulingFailureEvent(unscheduledPodNames []string, apiObject APIOb
|
|||
// pod scheduling has been resolved.
|
||||
func NewPodsSchedulingResolvedEvent(apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Pods Scheduling Resolved"
|
||||
event.Message = "All pods have been scheduled"
|
||||
return event
|
||||
|
@ -115,7 +115,7 @@ func NewPodsSchedulingResolvedEvent(apiObject APIObject) *Event {
|
|||
// NewSecretsChangedEvent creates an event indicating that one of more secrets have changed.
|
||||
func NewSecretsChangedEvent(changedSecretNames []string, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Secrets changed"
|
||||
event.Message = fmt.Sprintf("Found %d changed secrets. You must revert them before the operator can continue. Secrets: %v", len(changedSecretNames), changedSecretNames)
|
||||
return event
|
||||
|
@ -125,7 +125,7 @@ func NewSecretsChangedEvent(changedSecretNames []string, apiObject APIObject) *E
|
|||
// to their original values.
|
||||
func NewSecretsRestoredEvent(apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Secrets restored"
|
||||
event.Message = "All secrets have been restored to their original value"
|
||||
return event
|
||||
|
@ -135,7 +135,7 @@ func NewSecretsRestoredEvent(apiObject APIObject) *Event {
|
|||
// has been created.
|
||||
func NewAccessPackageCreatedEvent(apiObject APIObject, apSecretName string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Access package created"
|
||||
event.Message = fmt.Sprintf("An access package named %s has been created", apSecretName)
|
||||
return event
|
||||
|
@ -145,7 +145,7 @@ func NewAccessPackageCreatedEvent(apiObject APIObject, apSecretName string) *Eve
|
|||
// has been deleted.
|
||||
func NewAccessPackageDeletedEvent(apiObject APIObject, apSecretName string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Access package deleted"
|
||||
event.Message = fmt.Sprintf("An access package named %s has been deleted", apSecretName)
|
||||
return event
|
||||
|
@ -154,7 +154,7 @@ func NewAccessPackageDeletedEvent(apiObject APIObject, apSecretName string) *Eve
|
|||
// NewPlanAppendEvent creates an event indicating that an item on a reconciliation plan has been added
|
||||
func NewPlanAppendEvent(apiObject APIObject, itemType, memberID, role, reason string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Plan Action added"
|
||||
msg := fmt.Sprintf("A plan item of type %s", itemType)
|
||||
if role != "" {
|
||||
|
@ -172,7 +172,7 @@ func NewPlanAppendEvent(apiObject APIObject, itemType, memberID, role, reason st
|
|||
// finish before its deadline.
|
||||
func NewPlanTimeoutEvent(apiObject APIObject, itemType, memberID, role string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Reconciliation Plan Timeout"
|
||||
event.Message = fmt.Sprintf("A plan item of type %s or member %s with role %s did not finish in time", itemType, memberID, role)
|
||||
return event
|
||||
|
@ -182,7 +182,7 @@ func NewPlanTimeoutEvent(apiObject APIObject, itemType, memberID, role string) *
|
|||
// the entire plan.
|
||||
func NewPlanAbortedEvent(apiObject APIObject, itemType, memberID, role string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Reconciliation Plan Aborted"
|
||||
event.Message = fmt.Sprintf("A plan item of type %s or member %s with role %s wants to abort the plan", itemType, memberID, role)
|
||||
return event
|
||||
|
@ -192,7 +192,7 @@ func NewPlanAbortedEvent(apiObject APIObject, itemType, memberID, role string) *
|
|||
// but this is not possible for the given reason.
|
||||
func NewCannotChangeStorageClassEvent(apiObject APIObject, memberID, role, subReason string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = fmt.Sprintf("%s Member StorageClass Cannot Change", strings.Title(role))
|
||||
event.Message = fmt.Sprintf("Member %s with role %s should use a different StorageClass, but is cannot because: %s", memberID, role, subReason)
|
||||
return event
|
||||
|
@ -202,7 +202,7 @@ func NewCannotChangeStorageClassEvent(apiObject APIObject, memberID, role, subRe
|
|||
// is currently not allowed.
|
||||
func NewDowntimeNotAllowedEvent(apiObject APIObject, operation string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "Downtime Operation Postponed"
|
||||
event.Message = fmt.Sprintf("The '%s' operation is postponed because downtime it not allowed. Set `spec.downtimeAllowed` to true to execute this operation", operation)
|
||||
return event
|
||||
|
@ -211,7 +211,7 @@ func NewDowntimeNotAllowedEvent(apiObject APIObject, operation string) *Event {
|
|||
// NewPVCResizedEvent creates an event indicating that a PVC has been resized
|
||||
func NewPVCResizedEvent(apiObject APIObject, pvcname string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "PVC Resized"
|
||||
event.Message = fmt.Sprintf("The persistent volume claim %s has been resized", pvcname)
|
||||
return event
|
||||
|
@ -220,7 +220,7 @@ func NewPVCResizedEvent(apiObject APIObject, pvcname string) *Event {
|
|||
// NewCannotShrinkVolumeEvent creates an event indicating that the user tried to shrink a PVC
|
||||
func NewCannotShrinkVolumeEvent(apiObject APIObject, pvcname string) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
event.Reason = "PVC Shrinked"
|
||||
event.Message = fmt.Sprintf("The persistent volume claim %s can not be shrinked", pvcname)
|
||||
return event
|
||||
|
@ -231,7 +231,7 @@ func NewUpgradeNotAllowedEvent(apiObject APIObject,
|
|||
fromVersion, toVersion driver.Version,
|
||||
fromLicense, toLicense upgraderules.License) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeNormal
|
||||
event.Type = core.EventTypeNormal
|
||||
formatLicense := func(l upgraderules.License) string {
|
||||
if l == upgraderules.LicenseCommunity {
|
||||
return "Community Edition"
|
||||
|
@ -259,7 +259,7 @@ func NewUpgradeNotAllowedEvent(apiObject APIObject,
|
|||
// NewErrorEvent creates an even of type error.
|
||||
func NewErrorEvent(reason string, err error, apiObject APIObject) *Event {
|
||||
event := newDeploymentEvent(apiObject)
|
||||
event.Type = v1.EventTypeWarning
|
||||
event.Type = core.EventTypeWarning
|
||||
event.Reason = strings.Title(reason)
|
||||
event.Message = err.Error()
|
||||
return event
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1"
|
||||
podv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -42,22 +42,22 @@ const (
|
|||
// RemovePodFinalizers removes the given finalizers from the given pod.
|
||||
func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, c podv1.ModInterface, p *core.Pod,
|
||||
finalizers []string, ignoreNotFound bool) (int, error) {
|
||||
getFunc := func() (metav1.Object, error) {
|
||||
getFunc := func() (meta.Object, error) {
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
result, err := cachedStatus.Pod().V1().Read().Get(ctxChild, p.GetName(), metav1.GetOptions{})
|
||||
result, err := cachedStatus.Pod().V1().Read().Get(ctxChild, p.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
updateFunc := func(updated metav1.Object) error {
|
||||
updateFunc := func(updated meta.Object) error {
|
||||
updatedPod := updated.(*core.Pod)
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
result, err := c.Update(ctxChild, updatedPod, metav1.UpdateOptions{})
|
||||
result, err := c.Update(ctxChild, updatedPod, meta.UpdateOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -74,22 +74,22 @@ func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, c podv
|
|||
// RemovePVCFinalizers removes the given finalizers from the given PVC.
|
||||
func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim.Inspector, c persistentvolumeclaimv1.ModInterface,
|
||||
p *core.PersistentVolumeClaim, finalizers []string, ignoreNotFound bool) (int, error) {
|
||||
getFunc := func() (metav1.Object, error) {
|
||||
getFunc := func() (meta.Object, error) {
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
result, err := cachedStatus.PersistentVolumeClaim().V1().Read().Get(ctxChild, p.GetName(), metav1.GetOptions{})
|
||||
result, err := cachedStatus.PersistentVolumeClaim().V1().Read().Get(ctxChild, p.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
updateFunc := func(updated metav1.Object) error {
|
||||
updateFunc := func(updated meta.Object) error {
|
||||
updatedPVC := updated.(*core.PersistentVolumeClaim)
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
result, err := c.Update(ctxChild, updatedPVC, metav1.UpdateOptions{})
|
||||
result, err := c.Update(ctxChild, updatedPVC, meta.UpdateOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -107,7 +107,7 @@ func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim
|
|||
// The functions tries to get the object using the provided get function,
|
||||
// then remove the given finalizers and update the update using the given update function.
|
||||
// In case of an update conflict, the functions tries again.
|
||||
func RemoveFinalizers(finalizers []string, getFunc func() (metav1.Object, error), updateFunc func(metav1.Object) error, ignoreNotFound bool) (int, error) {
|
||||
func RemoveFinalizers(finalizers []string, getFunc func() (meta.Object, error), updateFunc func(meta.Object) error, ignoreNotFound bool) (int, error) {
|
||||
attempts := 0
|
||||
for {
|
||||
attempts++
|
||||
|
|
|
@ -23,7 +23,7 @@ package k8sutil
|
|||
import (
|
||||
"strings"
|
||||
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
)
|
||||
|
@ -42,7 +42,7 @@ func ConvertImageID2Image(imageID string) string {
|
|||
}
|
||||
|
||||
// GetArangoDBImageIDFromPod returns the ArangoDB specific image from a pod
|
||||
func GetArangoDBImageIDFromPod(pod *corev1.Pod) (string, error) {
|
||||
func GetArangoDBImageIDFromPod(pod *core.Pod) (string, error) {
|
||||
if pod == nil {
|
||||
return "", errors.New("failed to get container statuses from nil pod")
|
||||
}
|
||||
|
|
|
@ -26,12 +26,12 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestGetArangoDBImageIDFromPod(t *testing.T) {
|
||||
type args struct {
|
||||
pod *corev1.Pod
|
||||
pod *core.Pod
|
||||
}
|
||||
tests := map[string]struct {
|
||||
args args
|
||||
|
@ -43,15 +43,15 @@ func TestGetArangoDBImageIDFromPod(t *testing.T) {
|
|||
},
|
||||
"container statuses list is empty": {
|
||||
args: args{
|
||||
pod: &corev1.Pod{},
|
||||
pod: &core.Pod{},
|
||||
},
|
||||
wantErr: errors.New("empty list of ContainerStatuses"),
|
||||
},
|
||||
"image ID from the only container": {
|
||||
args: args{
|
||||
pod: &corev1.Pod{
|
||||
Status: corev1.PodStatus{
|
||||
ContainerStatuses: []corev1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
ImageID: dockerPullableImageIDPrefix + "test",
|
||||
},
|
||||
|
@ -63,9 +63,9 @@ func TestGetArangoDBImageIDFromPod(t *testing.T) {
|
|||
},
|
||||
"image ID from two containers": {
|
||||
args: args{
|
||||
pod: &corev1.Pod{
|
||||
Status: corev1.PodStatus{
|
||||
ContainerStatuses: []corev1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
ImageID: dockerPullableImageIDPrefix + "test_arango",
|
||||
},
|
||||
|
|
|
@ -25,19 +25,19 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes"
|
||||
)
|
||||
|
||||
// GetPodOwner returns the ReplicaSet that owns the given Pod.
|
||||
// If the Pod has no owner of the owner is not a ReplicaSet, nil is returned.
|
||||
func GetPodOwner(kubecli kubernetes.Interface, pod *v1.Pod, ns string) (*appsv1.ReplicaSet, error) {
|
||||
func GetPodOwner(kubecli kubernetes.Interface, pod *core.Pod, ns string) (*appsv1.ReplicaSet, error) {
|
||||
for _, ref := range pod.GetOwnerReferences() {
|
||||
if ref.Kind == "ReplicaSet" {
|
||||
rSets := kubecli.AppsV1().ReplicaSets(pod.GetNamespace())
|
||||
rSet, err := rSets.Get(context.Background(), ref.Name, metav1.GetOptions{})
|
||||
rSet, err := rSets.Get(context.Background(), ref.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
@ -53,7 +53,7 @@ func GetReplicaSetOwner(kubecli kubernetes.Interface, rSet *appsv1.ReplicaSet, n
|
|||
for _, ref := range rSet.GetOwnerReferences() {
|
||||
if ref.Kind == "Deployment" {
|
||||
depls := kubecli.AppsV1().Deployments(rSet.GetNamespace())
|
||||
depl, err := depls.Get(context.Background(), ref.Name, metav1.GetOptions{})
|
||||
depl, err := depls.Get(context.Background(), ref.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/json"
|
||||
|
||||
|
@ -518,7 +518,7 @@ func NewPod(deploymentName, role, id, podName string, podCreator interfaces.PodC
|
|||
|
||||
hostname := shared.CreatePodHostName(deploymentName, role, id)
|
||||
p := core.Pod{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: podName,
|
||||
Labels: LabelsForMember(deploymentName, role, id),
|
||||
Finalizers: podCreator.GetFinalizers(),
|
||||
|
@ -562,10 +562,10 @@ func GetPodSpecChecksum(podSpec core.PodSpec) (string, error) {
|
|||
// If the pod already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func CreatePod(ctx context.Context, c podv1.ModInterface, pod *core.Pod, ns string,
|
||||
owner metav1.OwnerReference) (string, types.UID, error) {
|
||||
owner meta.OwnerReference) (string, types.UID, error) {
|
||||
AddOwnerRefToObject(pod.GetObjectMeta(), &owner)
|
||||
|
||||
if createdPod, err := c.Create(ctx, pod, metav1.CreateOptions{}); err != nil {
|
||||
if createdPod, err := c.Create(ctx, pod, meta.CreateOptions{}); err != nil {
|
||||
if IsAlreadyExists(err) {
|
||||
return pod.GetName(), "", nil // If pod exists do not return any error but do not record UID (enforced rotation)
|
||||
}
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/handlers/utils"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -32,23 +32,23 @@ import (
|
|||
|
||||
// TestIsPodReady tests IsPodReady.
|
||||
func TestIsPodReady(t *testing.T) {
|
||||
assert.False(t, IsPodReady(&v1.Pod{}))
|
||||
assert.False(t, IsPodReady(&v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionFalse,
|
||||
assert.False(t, IsPodReady(&core.Pod{}))
|
||||
assert.False(t, IsPodReady(&core.Pod{
|
||||
Status: core.PodStatus{
|
||||
Conditions: []core.PodCondition{
|
||||
core.PodCondition{
|
||||
Type: core.PodReady,
|
||||
Status: core.ConditionFalse,
|
||||
},
|
||||
},
|
||||
},
|
||||
}))
|
||||
assert.True(t, IsPodReady(&v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Conditions: []v1.PodCondition{
|
||||
v1.PodCondition{
|
||||
Type: v1.PodReady,
|
||||
Status: v1.ConditionTrue,
|
||||
assert.True(t, IsPodReady(&core.Pod{
|
||||
Status: core.PodStatus{
|
||||
Conditions: []core.PodCondition{
|
||||
core.PodCondition{
|
||||
Type: core.PodReady,
|
||||
Status: core.ConditionTrue,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -58,7 +58,7 @@ func TestIsPodReady(t *testing.T) {
|
|||
// TestIsPodFailed tests IsPodFailed.
|
||||
func TestIsPodFailed(t *testing.T) {
|
||||
type args struct {
|
||||
pod *v1.Pod
|
||||
pod *core.Pod
|
||||
coreContainers utils.StringList
|
||||
}
|
||||
tests := map[string]struct {
|
||||
|
@ -67,23 +67,23 @@ func TestIsPodFailed(t *testing.T) {
|
|||
}{
|
||||
"empty pod": {
|
||||
args: args{
|
||||
pod: &v1.Pod{},
|
||||
pod: &core.Pod{},
|
||||
},
|
||||
},
|
||||
"pod is running": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodRunning,
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
Phase: core.PodRunning,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"pod is failed": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodFailed,
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
Phase: core.PodFailed,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -91,13 +91,13 @@ func TestIsPodFailed(t *testing.T) {
|
|||
},
|
||||
"one core container failed": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
|
@ -111,13 +111,13 @@ func TestIsPodFailed(t *testing.T) {
|
|||
},
|
||||
"one non-core container failed": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "non_core_container",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 1,
|
||||
},
|
||||
},
|
||||
|
@ -130,13 +130,13 @@ func TestIsPodFailed(t *testing.T) {
|
|||
},
|
||||
"one core container succeeded": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
|
@ -149,19 +149,19 @@ func TestIsPodFailed(t *testing.T) {
|
|||
},
|
||||
"first core container succeeded and second is still running": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container1",
|
||||
State: v1.ContainerState{
|
||||
Running: &v1.ContainerStateRunning{},
|
||||
State: core.ContainerState{
|
||||
Running: &core.ContainerStateRunning{},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "core_container2",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
|
@ -175,21 +175,21 @@ func TestIsPodFailed(t *testing.T) {
|
|||
},
|
||||
"all containers succeeded": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container1",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "core_container2",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
|
@ -212,7 +212,7 @@ func TestIsPodFailed(t *testing.T) {
|
|||
|
||||
func TestIsPodSucceeded(t *testing.T) {
|
||||
type args struct {
|
||||
pod *v1.Pod
|
||||
pod *core.Pod
|
||||
coreContainers utils.StringList
|
||||
}
|
||||
tests := map[string]struct {
|
||||
|
@ -221,14 +221,14 @@ func TestIsPodSucceeded(t *testing.T) {
|
|||
}{
|
||||
"empty pod": {
|
||||
args: args{
|
||||
pod: &v1.Pod{},
|
||||
pod: &core.Pod{},
|
||||
},
|
||||
},
|
||||
"pod is succeeded": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
Phase: v1.PodSucceeded,
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
Phase: core.PodSucceeded,
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -236,21 +236,21 @@ func TestIsPodSucceeded(t *testing.T) {
|
|||
},
|
||||
"all core containers succeeded": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container1",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "core_container2",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
|
@ -267,16 +267,16 @@ func TestIsPodSucceeded(t *testing.T) {
|
|||
},
|
||||
"non-core container succeeded": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container1",
|
||||
},
|
||||
{
|
||||
Name: "non-core_container",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
|
@ -289,13 +289,13 @@ func TestIsPodSucceeded(t *testing.T) {
|
|||
},
|
||||
"the only one core container succeeded": {
|
||||
args: args{
|
||||
pod: &v1.Pod{
|
||||
Status: v1.PodStatus{
|
||||
ContainerStatuses: []v1.ContainerStatus{
|
||||
pod: &core.Pod{
|
||||
Status: core.PodStatus{
|
||||
ContainerStatuses: []core.ContainerStatus{
|
||||
{
|
||||
Name: "core_container1",
|
||||
State: v1.ContainerState{
|
||||
Terminated: &v1.ContainerStateTerminated{
|
||||
State: core.ContainerState{
|
||||
Terminated: &core.ContainerStateTerminated{
|
||||
ExitCode: 0,
|
||||
},
|
||||
},
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestCreate(t *testing.T) {
|
||||
|
@ -45,13 +45,13 @@ func TestCreate(t *testing.T) {
|
|||
assert.Equal(t, probe.Handler.HTTPGet.HTTPHeaders[0].Name, "Authorization")
|
||||
assert.Equal(t, probe.Handler.HTTPGet.HTTPHeaders[0].Value, secret)
|
||||
assert.Equal(t, probe.Handler.HTTPGet.Port.IntValue(), 8529)
|
||||
assert.Equal(t, probe.Handler.HTTPGet.Scheme, v1.URISchemeHTTP)
|
||||
assert.Equal(t, probe.Handler.HTTPGet.Scheme, core.URISchemeHTTP)
|
||||
|
||||
// https
|
||||
config = HTTPProbeConfig{path, true, secret, 0, 0, 0, 0, 0, 0}
|
||||
probe = config.Create()
|
||||
|
||||
assert.Equal(t, probe.Handler.HTTPGet.Scheme, v1.URISchemeHTTPS)
|
||||
assert.Equal(t, probe.Handler.HTTPGet.Scheme, core.URISchemeHTTPS)
|
||||
|
||||
// http, custom timing
|
||||
config = HTTPProbeConfig{path, false, secret, 0, 1, 2, 3, 4, 5}
|
||||
|
|
|
@ -26,22 +26,22 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/constants"
|
||||
persistentvolumeclaimv1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim/v1"
|
||||
)
|
||||
|
||||
// IsPersistentVolumeClaimMarkedForDeletion returns true if the pvc has been marked for deletion.
|
||||
func IsPersistentVolumeClaimMarkedForDeletion(pvc *v1.PersistentVolumeClaim) bool {
|
||||
func IsPersistentVolumeClaimMarkedForDeletion(pvc *core.PersistentVolumeClaim) bool {
|
||||
return pvc.DeletionTimestamp != nil
|
||||
}
|
||||
|
||||
// IsPersistentVolumeClaimFileSystemResizePending returns true if the pvc has FileSystemResizePending set to true
|
||||
func IsPersistentVolumeClaimFileSystemResizePending(pvc *v1.PersistentVolumeClaim) bool {
|
||||
func IsPersistentVolumeClaimFileSystemResizePending(pvc *core.PersistentVolumeClaim) bool {
|
||||
for _, c := range pvc.Status.Conditions {
|
||||
if c.Type == v1.PersistentVolumeClaimFileSystemResizePending && c.Status == v1.ConditionTrue {
|
||||
if c.Type == core.PersistentVolumeClaimFileSystemResizePending && c.Status == core.ConditionTrue {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
@ -49,12 +49,12 @@ func IsPersistentVolumeClaimFileSystemResizePending(pvc *v1.PersistentVolumeClai
|
|||
}
|
||||
|
||||
// ExtractStorageResourceRequirement filters resource requirements for Pods.
|
||||
func ExtractStorageResourceRequirement(resources v1.ResourceRequirements) v1.ResourceRequirements {
|
||||
func ExtractStorageResourceRequirement(resources core.ResourceRequirements) core.ResourceRequirements {
|
||||
|
||||
filterStorage := func(list v1.ResourceList) v1.ResourceList {
|
||||
newlist := make(v1.ResourceList)
|
||||
filterStorage := func(list core.ResourceList) core.ResourceList {
|
||||
newlist := make(core.ResourceList)
|
||||
for k, v := range list {
|
||||
if k != v1.ResourceStorage && k != "iops" {
|
||||
if k != core.ResourceStorage && k != "iops" {
|
||||
continue
|
||||
}
|
||||
newlist[k] = v
|
||||
|
@ -62,7 +62,7 @@ func ExtractStorageResourceRequirement(resources v1.ResourceRequirements) v1.Res
|
|||
return newlist
|
||||
}
|
||||
|
||||
return v1.ResourceRequirements{
|
||||
return core.ResourceRequirements{
|
||||
Limits: filterStorage(resources.Limits),
|
||||
Requests: filterStorage(resources.Requests),
|
||||
}
|
||||
|
@ -72,12 +72,12 @@ func ExtractStorageResourceRequirement(resources v1.ResourceRequirements) v1.Res
|
|||
// If the pvc already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
func CreatePersistentVolumeClaim(ctx context.Context, pvcs persistentvolumeclaimv1.ModInterface, pvcName, deploymentName,
|
||||
storageClassName, role string, enforceAntiAffinity bool, resources v1.ResourceRequirements,
|
||||
vct *v1.PersistentVolumeClaim, finalizers []string, owner metav1.OwnerReference) error {
|
||||
storageClassName, role string, enforceAntiAffinity bool, resources core.ResourceRequirements,
|
||||
vct *core.PersistentVolumeClaim, finalizers []string, owner meta.OwnerReference) error {
|
||||
labels := LabelsForDeployment(deploymentName, role)
|
||||
volumeMode := v1.PersistentVolumeFilesystem
|
||||
pvc := &v1.PersistentVolumeClaim{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
volumeMode := core.PersistentVolumeFilesystem
|
||||
pvc := &core.PersistentVolumeClaim{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: pvcName,
|
||||
Labels: labels,
|
||||
Finalizers: finalizers,
|
||||
|
@ -87,9 +87,9 @@ func CreatePersistentVolumeClaim(ctx context.Context, pvcs persistentvolumeclaim
|
|||
},
|
||||
}
|
||||
if vct == nil {
|
||||
pvc.Spec = v1.PersistentVolumeClaimSpec{
|
||||
AccessModes: []v1.PersistentVolumeAccessMode{
|
||||
v1.ReadWriteOnce,
|
||||
pvc.Spec = core.PersistentVolumeClaimSpec{
|
||||
AccessModes: []core.PersistentVolumeAccessMode{
|
||||
core.ReadWriteOnce,
|
||||
},
|
||||
VolumeMode: &volumeMode,
|
||||
Resources: ExtractStorageResourceRequirement(resources),
|
||||
|
@ -102,7 +102,7 @@ func CreatePersistentVolumeClaim(ctx context.Context, pvcs persistentvolumeclaim
|
|||
pvc.Spec.StorageClassName = &storageClassName
|
||||
}
|
||||
AddOwnerRefToObject(pvc.GetObjectMeta(), &owner)
|
||||
if _, err := pvcs.Create(ctx, pvc, metav1.CreateOptions{}); err != nil && !IsAlreadyExists(err) {
|
||||
if _, err := pvcs.Create(ctx, pvc, meta.CreateOptions{}); err != nil && !IsAlreadyExists(err) {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -28,7 +28,7 @@ import (
|
|||
"strings"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
|
@ -73,7 +73,7 @@ func CreateAgentLeaderServiceName(deploymentName string) string {
|
|||
|
||||
// CreateExporterService
|
||||
func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, svcs servicev1.ModInterface,
|
||||
deployment metav1.Object, owner metav1.OwnerReference) (string, bool, error) {
|
||||
deployment meta.Object, owner meta.OwnerReference) (string, bool, error) {
|
||||
deploymentName := deployment.GetName()
|
||||
svcName := CreateExporterClientServiceName(deploymentName)
|
||||
|
||||
|
@ -84,7 +84,7 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector,
|
|||
}
|
||||
|
||||
svc := &core.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: svcName,
|
||||
Labels: LabelsForExporterService(deploymentName),
|
||||
},
|
||||
|
@ -101,7 +101,7 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector,
|
|||
},
|
||||
}
|
||||
AddOwnerRefToObject(svc.GetObjectMeta(), &owner)
|
||||
if _, err := svcs.Create(ctx, svc, metav1.CreateOptions{}); IsAlreadyExists(err) {
|
||||
if _, err := svcs.Create(ctx, svc, meta.CreateOptions{}); IsAlreadyExists(err) {
|
||||
return svcName, false, nil
|
||||
} else if err != nil {
|
||||
return svcName, false, errors.WithStack(err)
|
||||
|
@ -114,8 +114,8 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector,
|
|||
// If the service already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
// The returned bool is true if the service is created, or false when the service already existed.
|
||||
func CreateHeadlessService(ctx context.Context, svcs servicev1.ModInterface, deployment metav1.Object,
|
||||
owner metav1.OwnerReference) (string, bool, error) {
|
||||
func CreateHeadlessService(ctx context.Context, svcs servicev1.ModInterface, deployment meta.Object,
|
||||
owner meta.OwnerReference) (string, bool, error) {
|
||||
deploymentName := deployment.GetName()
|
||||
svcName := CreateHeadlessServiceName(deploymentName)
|
||||
ports := []core.ServicePort{
|
||||
|
@ -139,8 +139,8 @@ func CreateHeadlessService(ctx context.Context, svcs servicev1.ModInterface, dep
|
|||
// If the service already exists, nil is returned.
|
||||
// If another error occurs, that error is returned.
|
||||
// The returned bool is true if the service is created, or false when the service already existed.
|
||||
func CreateDatabaseClientService(ctx context.Context, svcs servicev1.ModInterface, deployment metav1.Object,
|
||||
single, withLeader bool, owner metav1.OwnerReference) (string, bool, error) {
|
||||
func CreateDatabaseClientService(ctx context.Context, svcs servicev1.ModInterface, deployment meta.Object,
|
||||
single, withLeader bool, owner meta.OwnerReference) (string, bool, error) {
|
||||
deploymentName := deployment.GetName()
|
||||
svcName := CreateDatabaseClientServiceName(deploymentName)
|
||||
ports := []core.ServicePort{
|
||||
|
@ -171,8 +171,8 @@ func CreateDatabaseClientService(ctx context.Context, svcs servicev1.ModInterfac
|
|||
// If another error occurs, that error is returned.
|
||||
// The returned bool is true if the service is created, or false when the service already existed.
|
||||
func CreateExternalAccessService(ctx context.Context, svcs servicev1.ModInterface, svcName, role string,
|
||||
deployment metav1.Object, serviceType core.ServiceType, port, nodePort int, loadBalancerIP string,
|
||||
loadBalancerSourceRanges []string, owner metav1.OwnerReference, withLeader bool) (string, bool, error) {
|
||||
deployment meta.Object, serviceType core.ServiceType, port, nodePort int, loadBalancerIP string,
|
||||
loadBalancerSourceRanges []string, owner meta.OwnerReference, withLeader bool) (string, bool, error) {
|
||||
deploymentName := deployment.GetName()
|
||||
ports := []core.ServicePort{
|
||||
{
|
||||
|
@ -197,14 +197,14 @@ func CreateExternalAccessService(ctx context.Context, svcs servicev1.ModInterfac
|
|||
// The returned bool is true if the service is created, or false when the service already existed.
|
||||
func createService(ctx context.Context, svcs servicev1.ModInterface, svcName, deploymentName, clusterIP, role string,
|
||||
serviceType core.ServiceType, ports []core.ServicePort, loadBalancerIP string, loadBalancerSourceRanges []string,
|
||||
publishNotReadyAddresses, withLeader bool, owner metav1.OwnerReference) (bool, error) {
|
||||
publishNotReadyAddresses, withLeader bool, owner meta.OwnerReference) (bool, error) {
|
||||
labels := LabelsForDeployment(deploymentName, role)
|
||||
if withLeader {
|
||||
labels[LabelKeyArangoLeader] = "true"
|
||||
}
|
||||
|
||||
svc := &core.Service{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: svcName,
|
||||
Labels: labels,
|
||||
Annotations: map[string]string{},
|
||||
|
@ -220,7 +220,7 @@ func createService(ctx context.Context, svcs servicev1.ModInterface, svcName, de
|
|||
},
|
||||
}
|
||||
AddOwnerRefToObject(svc.GetObjectMeta(), &owner)
|
||||
if _, err := svcs.Create(ctx, svc, metav1.CreateOptions{}); IsAlreadyExists(err) {
|
||||
if _, err := svcs.Create(ctx, svc, meta.CreateOptions{}); IsAlreadyExists(err) {
|
||||
return false, nil
|
||||
} else if err != nil {
|
||||
return false, errors.WithStack(err)
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/retry"
|
||||
v1 "k8s.io/api/storage/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
storagev1 "k8s.io/client-go/kubernetes/typed/storage/v1"
|
||||
)
|
||||
|
||||
|
@ -53,7 +53,7 @@ func PatchStorageClassIsDefault(cli storagev1.StorageV1Interface, name string, i
|
|||
stcs := cli.StorageClasses()
|
||||
op := func() error {
|
||||
// Fetch current version of StorageClass
|
||||
current, err := stcs.Get(context.Background(), name, metav1.GetOptions{})
|
||||
current, err := stcs.Get(context.Background(), name, meta.GetOptions{})
|
||||
if IsNotFound(err) {
|
||||
return retry.Permanent(errors.WithStack(err))
|
||||
} else if err != nil {
|
||||
|
@ -68,7 +68,7 @@ func PatchStorageClassIsDefault(cli storagev1.StorageV1Interface, name string, i
|
|||
current.SetAnnotations(ann)
|
||||
|
||||
// Save StorageClass
|
||||
if _, err := stcs.Update(context.Background(), current, metav1.UpdateOptions{}); IsConflict(err) {
|
||||
if _, err := stcs.Update(context.Background(), current, meta.UpdateOptions{}); IsConflict(err) {
|
||||
// StorageClass has been modified since we read it
|
||||
return errors.WithStack(err)
|
||||
} else if err != nil {
|
||||
|
|
|
@ -33,7 +33,7 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
v1 "k8s.io/api/storage/v1"
|
||||
er "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
k8stesting "k8s.io/client-go/testing"
|
||||
)
|
||||
|
@ -47,14 +47,14 @@ func TestStorageClassIsDefault(t *testing.T) {
|
|||
{
|
||||
Name: "Storage class without annotations",
|
||||
StorageClass: v1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{},
|
||||
ObjectMeta: meta.ObjectMeta{},
|
||||
},
|
||||
IsDefault: false,
|
||||
},
|
||||
{
|
||||
Name: "Storage class with empty annotations",
|
||||
StorageClass: v1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{},
|
||||
},
|
||||
},
|
||||
|
@ -63,7 +63,7 @@ func TestStorageClassIsDefault(t *testing.T) {
|
|||
{
|
||||
Name: "Storage class without default",
|
||||
StorageClass: v1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
annStorageClassIsDefault: "false",
|
||||
},
|
||||
|
@ -74,7 +74,7 @@ func TestStorageClassIsDefault(t *testing.T) {
|
|||
{
|
||||
Name: "Storage class with invalid value in annotation",
|
||||
StorageClass: v1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
annStorageClassIsDefault: "foo",
|
||||
},
|
||||
|
@ -85,7 +85,7 @@ func TestStorageClassIsDefault(t *testing.T) {
|
|||
{
|
||||
Name: "Default storage class exits",
|
||||
StorageClass: v1.StorageClass{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Annotations: map[string]string{
|
||||
annStorageClassIsDefault: "true",
|
||||
},
|
||||
|
@ -162,11 +162,11 @@ func TestPatchStorageClassIsDefault(t *testing.T) {
|
|||
clientSet := fake.NewSimpleClientset()
|
||||
storageSet := clientSet.StorageV1()
|
||||
_, err = storageSet.StorageClasses().Create(context.Background(), &v1.StorageClass{
|
||||
TypeMeta: metav1.TypeMeta{},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
TypeMeta: meta.TypeMeta{},
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "test",
|
||||
},
|
||||
}, metav1.CreateOptions{})
|
||||
}, meta.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
if testCase.Reactor != nil {
|
||||
|
|
|
@ -27,15 +27,15 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
var apiObjectForTest = api.ArangoDeployment{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: "Willy",
|
||||
Namespace: "Wonka",
|
||||
},
|
||||
|
@ -48,32 +48,32 @@ func TestMemberAddEvent(t *testing.T) {
|
|||
event := k8sutil.NewMemberAddEvent("name", "role", &apiObjectForTest)
|
||||
assert.Equal(t, event.Reason, "New Role Added")
|
||||
assert.Equal(t, event.Message, "New role name added to deployment")
|
||||
assert.Equal(t, event.Type, v1.EventTypeNormal)
|
||||
assert.Equal(t, event.Type, core.EventTypeNormal)
|
||||
}
|
||||
|
||||
func TestMemberRemoveEvent(t *testing.T) {
|
||||
event := k8sutil.NewMemberRemoveEvent("name", "role", &apiObjectForTest)
|
||||
assert.Equal(t, event.Reason, "Role Removed")
|
||||
assert.Equal(t, event.Message, "Existing role name removed from the deployment")
|
||||
assert.Equal(t, event.Type, v1.EventTypeNormal)
|
||||
assert.Equal(t, event.Type, core.EventTypeNormal)
|
||||
}
|
||||
|
||||
func TestPodGoneEvent(t *testing.T) {
|
||||
event := k8sutil.NewPodGoneEvent("name", "role", &apiObjectForTest)
|
||||
assert.Equal(t, event.Reason, "Pod Of Role Gone")
|
||||
assert.Equal(t, event.Message, "Pod name of member role is gone")
|
||||
assert.Equal(t, event.Type, v1.EventTypeNormal)
|
||||
assert.Equal(t, event.Type, core.EventTypeNormal)
|
||||
}
|
||||
|
||||
func TestImmutableFieldEvent(t *testing.T) {
|
||||
event := k8sutil.NewImmutableFieldEvent("name", &apiObjectForTest)
|
||||
assert.Equal(t, event.Reason, "Immutable Field Change")
|
||||
assert.Equal(t, event.Message, "Changing field name is not possible. It has been reset to its original value.")
|
||||
assert.Equal(t, event.Type, v1.EventTypeNormal)
|
||||
assert.Equal(t, event.Type, core.EventTypeNormal)
|
||||
}
|
||||
|
||||
func TestErrorEvent(t *testing.T) {
|
||||
event := k8sutil.NewErrorEvent("reason", errors.New("something"), &apiObjectForTest)
|
||||
assert.Equal(t, event.Reason, "Reason")
|
||||
assert.Equal(t, event.Type, v1.EventTypeWarning)
|
||||
assert.Equal(t, event.Type, core.EventTypeWarning)
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ package k8sutil
|
|||
import (
|
||||
"time"
|
||||
|
||||
v1 "k8s.io/api/core/v1"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -40,8 +40,8 @@ type TolerationDuration struct {
|
|||
|
||||
// NewNoExecuteToleration is a helper to create a Toleration with
|
||||
// Key=key, Operator='Exists' Effect='NoExecute', TolerationSeconds=tolerationDuration.Seconds().
|
||||
func NewNoExecuteToleration(key string, duration TolerationDuration) v1.Toleration {
|
||||
t := v1.Toleration{
|
||||
func NewNoExecuteToleration(key string, duration TolerationDuration) core.Toleration {
|
||||
t := core.Toleration{
|
||||
Key: key,
|
||||
Operator: "Exists",
|
||||
Effect: "NoExecute",
|
||||
|
@ -54,9 +54,9 @@ func NewNoExecuteToleration(key string, duration TolerationDuration) v1.Tolerati
|
|||
}
|
||||
|
||||
// AddTolerationIfNotFound adds the given tolerations, if no such toleration has been set in the given source.
|
||||
func AddTolerationIfNotFound(source []v1.Toleration, toAdd v1.Toleration) []v1.Toleration {
|
||||
func AddTolerationIfNotFound(source []core.Toleration, toAdd core.Toleration) []core.Toleration {
|
||||
if len(source) == 0 {
|
||||
return []v1.Toleration{
|
||||
return []core.Toleration{
|
||||
toAdd,
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
package k8sutil
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
|
@ -51,14 +51,14 @@ const (
|
|||
)
|
||||
|
||||
// AddOwnerRefToObject adds given owner reference to given object
|
||||
func AddOwnerRefToObject(obj metav1.Object, ownerRef *metav1.OwnerReference) {
|
||||
func AddOwnerRefToObject(obj meta.Object, ownerRef *meta.OwnerReference) {
|
||||
if ownerRef != nil {
|
||||
obj.SetOwnerReferences(append(obj.GetOwnerReferences(), *ownerRef))
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateOwnerRefToObjectIfNeeded add given owner reference to given object if it does not exist yet
|
||||
func UpdateOwnerRefToObjectIfNeeded(obj metav1.Object, ownerRef *metav1.OwnerReference) bool {
|
||||
func UpdateOwnerRefToObjectIfNeeded(obj meta.Object, ownerRef *meta.OwnerReference) bool {
|
||||
if ownerRef != nil {
|
||||
for _, existingOwnerRef := range obj.GetOwnerReferences() {
|
||||
if existingOwnerRef.UID == ownerRef.UID {
|
||||
|
@ -132,15 +132,15 @@ func LabelsForLocalStorage(localStorageName, role string) map[string]string {
|
|||
}
|
||||
|
||||
// DeploymentListOpt creates a ListOptions matching all labels for the given deployment name.
|
||||
func DeploymentListOpt(deploymentName string) metav1.ListOptions {
|
||||
return metav1.ListOptions{
|
||||
func DeploymentListOpt(deploymentName string) meta.ListOptions {
|
||||
return meta.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(LabelsForDeployment(deploymentName, "")).String(),
|
||||
}
|
||||
}
|
||||
|
||||
// LocalStorageListOpt creates a ListOptions matching all labels for the given local storage name.
|
||||
func LocalStorageListOpt(localStorageName, role string) metav1.ListOptions {
|
||||
return metav1.ListOptions{
|
||||
func LocalStorageListOpt(localStorageName, role string) meta.ListOptions {
|
||||
return meta.ListOptions{
|
||||
LabelSelector: labels.SelectorFromSet(LabelsForLocalStorage(localStorageName, role)).String(),
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue