1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00

[Feature] TLS Rotation on ALT Names change (#810)

This commit is contained in:
Adam Janikowski 2021-10-08 17:11:18 +02:00 committed by GitHub
parent b0d58c7d34
commit 371035278e
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
92 changed files with 2060 additions and 697 deletions

View file

@ -8,6 +8,7 @@
- Add Topology support
- Add ARANGODB_ZONE env to Topology Managed pods
- Add "Random pod names" feature
- Rotate TLS Secrets on ALT Names change
## [1.2.3](https://github.com/arangodb/kube-arangodb/tree/1.2.3) (2021-09-24)
- Update UBI Image to 8.4

View file

@ -0,0 +1,57 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package v1
type DeploymentStatusMemberElements []DeploymentStatusMemberElement
// DeploymentStatusMemberElement holds one specific element with group and member status
type DeploymentStatusMemberElement struct {
Group ServerGroup `json:"group,omitempty"`
Member MemberStatus `json:"member,omitempty"`
}
func (ds DeploymentStatusMembers) AsList() DeploymentStatusMemberElements {
return ds.AsListInGroups(AllServerGroups...)
}
func (ds DeploymentStatusMembers) AsListInGroups(groups ...ServerGroup) DeploymentStatusMemberElements {
var elements []DeploymentStatusMemberElement
// Always return nil, so no error handling
for _, g := range groups {
elements = append(elements, ds.AsListInGroup(g)...)
}
return elements
}
func (ds DeploymentStatusMembers) AsListInGroup(group ServerGroup) DeploymentStatusMemberElements {
var r DeploymentStatusMemberElements
for _, m := range ds.MembersOfGroup(group) {
r = append(r, DeploymentStatusMemberElement{
Group: group,
Member: m,
})
}
return r
}

View file

@ -840,6 +840,45 @@ func (in *DeploymentStatusHashesTLS) DeepCopy() *DeploymentStatusHashesTLS {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatusMemberElement) DeepCopyInto(out *DeploymentStatusMemberElement) {
*out = *in
in.Member.DeepCopyInto(&out.Member)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatusMemberElement.
func (in *DeploymentStatusMemberElement) DeepCopy() *DeploymentStatusMemberElement {
if in == nil {
return nil
}
out := new(DeploymentStatusMemberElement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in DeploymentStatusMemberElements) DeepCopyInto(out *DeploymentStatusMemberElements) {
{
in := &in
*out = make(DeploymentStatusMemberElements, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatusMemberElements.
func (in DeploymentStatusMemberElements) DeepCopy() DeploymentStatusMemberElements {
if in == nil {
return nil
}
out := new(DeploymentStatusMemberElements)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatusMembers) DeepCopyInto(out *DeploymentStatusMembers) {
*out = *in

View file

@ -0,0 +1,57 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package v2alpha1
type DeploymentStatusMemberElements []DeploymentStatusMemberElement
// DeploymentStatusMemberElement holds one specific element with group and member status
type DeploymentStatusMemberElement struct {
Group ServerGroup `json:"group,omitempty"`
Member MemberStatus `json:"member,omitempty"`
}
func (ds DeploymentStatusMembers) AsList() DeploymentStatusMemberElements {
return ds.AsListInGroups(AllServerGroups...)
}
func (ds DeploymentStatusMembers) AsListInGroups(groups ...ServerGroup) DeploymentStatusMemberElements {
var elements []DeploymentStatusMemberElement
// Always return nil, so no error handling
for _, g := range groups {
elements = append(elements, ds.AsListInGroup(g)...)
}
return elements
}
func (ds DeploymentStatusMembers) AsListInGroup(group ServerGroup) DeploymentStatusMemberElements {
var r DeploymentStatusMemberElements
for _, m := range ds.MembersOfGroup(group) {
r = append(r, DeploymentStatusMemberElement{
Group: group,
Member: m,
})
}
return r
}

View file

@ -840,6 +840,45 @@ func (in *DeploymentStatusHashesTLS) DeepCopy() *DeploymentStatusHashesTLS {
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatusMemberElement) DeepCopyInto(out *DeploymentStatusMemberElement) {
*out = *in
in.Member.DeepCopyInto(&out.Member)
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatusMemberElement.
func (in *DeploymentStatusMemberElement) DeepCopy() *DeploymentStatusMemberElement {
if in == nil {
return nil
}
out := new(DeploymentStatusMemberElement)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in DeploymentStatusMemberElements) DeepCopyInto(out *DeploymentStatusMemberElements) {
{
in := &in
*out = make(DeploymentStatusMemberElements, len(*in))
for i := range *in {
(*in)[i].DeepCopyInto(&(*out)[i])
}
return
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new DeploymentStatusMemberElements.
func (in DeploymentStatusMemberElements) DeepCopy() DeploymentStatusMemberElements {
if in == nil {
return nil
}
out := new(DeploymentStatusMemberElements)
in.DeepCopyInto(out)
return *out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *DeploymentStatusMembers) DeepCopyInto(out *DeploymentStatusMembers) {
*out = *in

View file

@ -38,10 +38,9 @@ import (
// removePodFinalizers removes all finalizers from all pods owned by us.
func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
log := d.deps.Log
kubecli := d.GetKubeCli()
if err := cachedStatus.IteratePods(func(pod *core.Pod) error {
if err := k8sutil.RemovePodFinalizers(ctx, log, kubecli, pod, pod.GetFinalizers(), true); err != nil {
if err := k8sutil.RemovePodFinalizers(ctx, cachedStatus, log, d.PodsModInterface(), pod, pod.GetFinalizers(), true); err != nil {
log.Warn().Err(err).Msg("Failed to remove pod finalizers")
return err
}
@ -49,7 +48,7 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
if err := kubecli.CoreV1().Pods(pod.GetNamespace()).Delete(ctxChild, pod.GetName(), meta.DeleteOptions{
if err := d.PodsModInterface().Delete(ctxChild, pod.GetName(), meta.DeleteOptions{
GracePeriodSeconds: util.NewInt64(1),
}); err != nil {
if !k8sutil.IsNotFound(err) {
@ -68,10 +67,9 @@ func (d *Deployment) removePodFinalizers(ctx context.Context, cachedStatus inspe
// removePVCFinalizers removes all finalizers from all PVCs owned by us.
func (d *Deployment) removePVCFinalizers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
log := d.deps.Log
kubecli := d.GetKubeCli()
if err := cachedStatus.IteratePersistentVolumeClaims(func(pvc *core.PersistentVolumeClaim) error {
if err := k8sutil.RemovePVCFinalizers(ctx, log, kubecli, pvc, pvc.GetFinalizers(), true); err != nil {
if err := k8sutil.RemovePVCFinalizers(ctx, cachedStatus, log, d.PersistentVolumeClaimsModInterface(), pvc, pvc.GetFinalizers(), true); err != nil {
log.Warn().Err(err).Msg("Failed to remove PVC finalizers")
return err
}

View file

@ -31,6 +31,16 @@ import (
"strconv"
"time"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
podMod "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor"
"github.com/arangodb/kube-arangodb/pkg/deployment/reconcile"
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
@ -89,16 +99,15 @@ func (d *Deployment) GetServerGroupIterator() resources.ServerGroupIterator {
return d.apiObject
}
// GetKubeCli returns the kubernetes client
func (d *Deployment) GetKubeCli() kubernetes.Interface {
func (d *Deployment) getKubeCli() kubernetes.Interface {
return d.deps.KubeCli
}
func (d *Deployment) GetMonitoringV1Cli() monitoringClient.MonitoringV1Interface {
func (d *Deployment) getMonitoringV1Cli() monitoringClient.MonitoringV1Interface {
return d.deps.KubeMonitoringCli
}
func (d *Deployment) GetArangoCli() versioned.Interface {
func (d *Deployment) getArangoCli() versioned.Interface {
return d.deps.DatabaseCRCli
}
@ -277,14 +286,9 @@ func (d *Deployment) getAuth() (driver.Authentication, error) {
return nil, nil
}
var secrets secret.ReadInterface = d.GetKubeCli().CoreV1().Secrets(d.GetNamespace())
if currentState := d.currentState; currentState != nil {
secrets = currentState.SecretReadInterface()
}
var secret string
if i := d.apiObject.Status.CurrentImage; i == nil || !features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
s, err := secrets.Get(context.Background(), d.apiObject.Spec.Authentication.GetJWTSecretName(), meta.GetOptions{})
s, err := d.GetCachedStatus().SecretReadInterface().Get(context.Background(), d.apiObject.Spec.Authentication.GetJWTSecretName(), meta.GetOptions{})
if err != nil {
return nil, errors.Newf("JWT Secret is missing")
}
@ -296,7 +300,7 @@ func (d *Deployment) getAuth() (driver.Authentication, error) {
secret = string(jwt)
} else {
s, err := secrets.Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{})
s, err := d.GetCachedStatus().SecretReadInterface().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{})
if err != nil {
d.deps.Log.Error().Err(err).Msgf("Unable to get secret")
return nil, errors.Newf("JWT Folder Secret is missing")
@ -328,11 +332,8 @@ func (d *Deployment) getAuth() (driver.Authentication, error) {
func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGroup, id string) (client.API, error) {
// Fetch monitoring token
log := d.deps.Log
kubecli := d.deps.KubeCli
ns := d.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
secretName := d.apiObject.Spec.Sync.Monitoring.GetTokenSecretName()
monitoringToken, err := k8sutil.GetTokenSecret(ctx, secrets, secretName)
monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().SecretReadInterface(), secretName)
if err != nil {
log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret")
return nil, errors.WithStack(err)
@ -387,19 +388,15 @@ func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id
// GetPod returns pod.
func (d *Deployment) GetPod(ctx context.Context, podName string) (*core.Pod, error) {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
return d.deps.KubeCli.CoreV1().Pods(d.GetNamespace()).Get(ctxChild, podName, meta.GetOptions{})
return d.GetCachedStatus().PodReadInterface().Get(ctx, podName, meta.GetOptions{})
}
// DeletePod deletes a pod with given name in the namespace
// of the deployment. If the pod does not exist, the error is ignored.
func (d *Deployment) DeletePod(ctx context.Context, podName string) error {
log := d.deps.Log
ns := d.GetNamespace()
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return d.deps.KubeCli.CoreV1().Pods(ns).Delete(ctxChild, podName, meta.DeleteOptions{})
return d.PodsModInterface().Delete(ctxChild, podName, meta.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Debug().Err(err).Str("pod", podName).Msg("Failed to remove pod")
@ -413,11 +410,10 @@ func (d *Deployment) DeletePod(ctx context.Context, podName string) error {
func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error {
log := d.deps.Log
podName := p.GetName()
ns := p.GetNamespace()
options := meta.NewDeleteOptions(0)
options.Preconditions = meta.NewUIDPreconditions(string(p.GetUID()))
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return d.deps.KubeCli.CoreV1().Pods(ns).Delete(ctxChild, podName, *options)
return d.PodsModInterface().Delete(ctxChild, podName, *options)
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Debug().Err(err).Str("pod", podName).Msg("Failed to cleanup pod")
@ -430,12 +426,10 @@ func (d *Deployment) CleanupPod(ctx context.Context, p *core.Pod) error {
// of the deployment. If the pod does not exist, the error is ignored.
func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) error {
log := d.deps.Log
ns := d.GetNamespace()
kubecli := d.deps.KubeCli
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
p, err := kubecli.CoreV1().Pods(ns).Get(ctxChild, podName, meta.GetOptions{})
p, err := d.GetCachedStatus().PodReadInterface().Get(ctxChild, podName, meta.GetOptions{})
if err != nil {
if k8sutil.IsNotFound(err) {
return nil
@ -443,7 +437,7 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er
return errors.WithStack(err)
}
err = k8sutil.RemovePodFinalizers(ctx, log, d.deps.KubeCli, p, p.GetFinalizers(), true)
err = k8sutil.RemovePodFinalizers(ctx, d.GetCachedStatus(), log, d.PodsModInterface(), p, p.GetFinalizers(), true)
if err != nil {
return errors.WithStack(err)
}
@ -454,9 +448,8 @@ func (d *Deployment) RemovePodFinalizers(ctx context.Context, podName string) er
// of the deployment. If the pvc does not exist, the error is ignored.
func (d *Deployment) DeletePvc(ctx context.Context, pvcName string) error {
log := d.deps.Log
ns := d.GetNamespace()
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return d.deps.KubeCli.CoreV1().PersistentVolumeClaims(ns).Delete(ctxChild, pvcName, meta.DeleteOptions{})
return d.PersistentVolumeClaimsModInterface().Delete(ctxChild, pvcName, meta.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Debug().Err(err).Str("pvc", pvcName).Msg("Failed to remove pvc")
@ -469,7 +462,7 @@ func (d *Deployment) DeletePvc(ctx context.Context, pvcName string) error {
// of the deployment. If the pvc does not exist, the error is ignored.
func (d *Deployment) UpdatePvc(ctx context.Context, pvc *core.PersistentVolumeClaim) error {
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := d.GetKubeCli().CoreV1().PersistentVolumeClaims(d.GetNamespace()).Update(ctxChild, pvc, meta.UpdateOptions{})
_, err := d.PersistentVolumeClaimsModInterface().Update(ctxChild, pvc, meta.UpdateOptions{})
return err
})
if err == nil {
@ -486,16 +479,11 @@ func (d *Deployment) UpdatePvc(ctx context.Context, pvc *core.PersistentVolumeCl
// GetOwnedPVCs returns a list of all PVCs owned by the deployment.
func (d *Deployment) GetOwnedPVCs() ([]core.PersistentVolumeClaim, error) {
// Get all current PVCs
log := d.deps.Log
pvcs, err := d.deps.KubeCli.CoreV1().PersistentVolumeClaims(d.GetNamespace()).List(context.Background(), k8sutil.DeploymentListOpt(d.GetName()))
if err != nil {
log.Debug().Err(err).Msg("Failed to list PVCs")
return nil, errors.WithStack(err)
}
myPVCs := make([]core.PersistentVolumeClaim, 0, len(pvcs.Items))
for _, p := range pvcs.Items {
if d.isOwnerOf(&p) {
myPVCs = append(myPVCs, p)
pvcs := d.GetCachedStatus().PersistentVolumeClaims()
myPVCs := make([]core.PersistentVolumeClaim, 0, len(pvcs))
for _, p := range pvcs {
if d.isOwnerOf(p) {
myPVCs = append(myPVCs, *p)
}
}
return myPVCs, nil
@ -506,7 +494,7 @@ func (d *Deployment) GetPvc(ctx context.Context, pvcName string) (*core.Persiste
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
pvc, err := d.deps.KubeCli.CoreV1().PersistentVolumeClaims(d.GetNamespace()).Get(ctxChild, pvcName, meta.GetOptions{})
pvc, err := d.GetCachedStatus().PersistentVolumeClaimReadInterface().Get(ctxChild, pvcName, meta.GetOptions{})
if err != nil {
log.Debug().Err(err).Str("pvc-name", pvcName).Msg("Failed to get PVC")
return nil, errors.WithStack(err)
@ -518,8 +506,7 @@ func (d *Deployment) GetPvc(ctx context.Context, pvcName string) (*core.Persiste
// the given member.
func (d *Deployment) GetTLSKeyfile(group api.ServerGroup, member api.MemberStatus) (string, error) {
secretName := k8sutil.CreateTLSKeyfileSecretName(d.GetName(), group.AsRole(), member.ID)
secrets := d.deps.KubeCli.CoreV1().Secrets(d.GetNamespace())
result, err := k8sutil.GetTLSKeyfileSecret(secrets, secretName)
result, err := k8sutil.GetTLSKeyfileSecret(d.GetCachedStatus().SecretReadInterface(), secretName)
if err != nil {
return "", errors.WithStack(err)
}
@ -530,9 +517,8 @@ func (d *Deployment) GetTLSKeyfile(group api.ServerGroup, member api.MemberStatu
// If the secret does not exist, the error is ignored.
func (d *Deployment) DeleteTLSKeyfile(ctx context.Context, group api.ServerGroup, member api.MemberStatus) error {
secretName := k8sutil.CreateTLSKeyfileSecretName(d.GetName(), group.AsRole(), member.ID)
ns := d.GetNamespace()
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return d.deps.KubeCli.CoreV1().Secrets(ns).Delete(ctxChild, secretName, meta.DeleteOptions{})
return d.SecretsModInterface().Delete(ctxChild, secretName, meta.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
return errors.WithStack(err)
@ -543,8 +529,7 @@ func (d *Deployment) DeleteTLSKeyfile(ctx context.Context, group api.ServerGroup
// DeleteSecret removes the Secret with given name.
// If the secret does not exist, the error is ignored.
func (d *Deployment) DeleteSecret(secretName string) error {
ns := d.GetNamespace()
if err := d.deps.KubeCli.CoreV1().Secrets(ns).Delete(context.Background(), secretName, meta.DeleteOptions{}); err != nil && !k8sutil.IsNotFound(err) {
if err := d.SecretsModInterface().Delete(context.Background(), secretName, meta.DeleteOptions{}); err != nil && !k8sutil.IsNotFound(err) {
return errors.WithStack(err)
}
return nil
@ -639,8 +624,36 @@ func (d *Deployment) WithStatusUpdate(ctx context.Context, action resources.Depl
}, force...)
}
func (d *Deployment) SecretsInterface() k8sutil.SecretInterface {
return d.GetKubeCli().CoreV1().Secrets(d.GetNamespace())
func (d *Deployment) SecretsModInterface() secret.ModInterface {
return d.getKubeCli().CoreV1().Secrets(d.GetNamespace())
}
func (d *Deployment) PodsModInterface() podMod.ModInterface {
return d.getKubeCli().CoreV1().Pods(d.GetNamespace())
}
func (d *Deployment) ServiceAccountsModInterface() serviceaccount.ModInterface {
return d.getKubeCli().CoreV1().ServiceAccounts(d.GetNamespace())
}
func (d *Deployment) ServicesModInterface() service.ModInterface {
return d.getKubeCli().CoreV1().Services(d.GetNamespace())
}
func (d *Deployment) PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface {
return d.getKubeCli().CoreV1().PersistentVolumeClaims(d.GetNamespace())
}
func (d *Deployment) PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface {
return d.getKubeCli().PolicyV1beta1().PodDisruptionBudgets(d.GetNamespace())
}
func (d *Deployment) ServiceMonitorsModInterface() servicemonitor.ModInterface {
return d.getMonitoringV1Cli().ServiceMonitors(d.GetNamespace())
}
func (d *Deployment) ArangoMembersModInterface() arangomember.ModInterface {
return d.getArangoCli().DatabaseV1().ArangoMembers(d.GetNamespace())
}
func (d *Deployment) GetName() string {
@ -648,18 +661,12 @@ func (d *Deployment) GetName() string {
}
func (d *Deployment) GetOwnedPods(ctx context.Context) ([]core.Pod, error) {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
pods := d.GetCachedStatus().Pods()
pods, err := d.GetKubeCli().CoreV1().Pods(d.GetNamespace()).List(ctxChild, meta.ListOptions{})
if err != nil {
return nil, err
}
podList := make([]core.Pod, 0, len(pods))
podList := make([]core.Pod, 0, len(pods.Items))
for _, p := range pods.Items {
if !d.isOwnerOf(&p) {
for _, p := range pods {
if !d.isOwnerOf(p) {
continue
}
c := p.DeepCopy()
@ -670,7 +677,11 @@ func (d *Deployment) GetOwnedPods(ctx context.Context) ([]core.Pod, error) {
}
func (d *Deployment) GetCachedStatus() inspectorInterface.Inspector {
return d.currentState
if c := d.currentState; c != nil {
return c
}
return inspector.NewEmptyInspector()
}
func (d *Deployment) SetCachedStatus(i inspectorInterface.Inspector) {

View file

@ -298,7 +298,7 @@ func (d *Deployment) run() {
for {
select {
case <-d.stopCh:
cachedStatus, err := inspector.NewInspector(context.Background(), d.GetKubeCli(), d.GetMonitoringV1Cli(), d.GetArangoCli(), d.GetNamespace())
cachedStatus, err := inspector.NewInspector(context.Background(), d.getKubeCli(), d.getMonitoringV1Cli(), d.getArangoCli(), d.GetNamespace())
if err != nil {
log.Error().Err(err).Msg("Unable to get resources")
}

View file

@ -856,9 +856,8 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupAgents, firstAgentStatus)
secrets := deployment.GetKubeCli().CoreV1().Secrets(testNamespace)
key := make([]byte, 32)
k8sutil.CreateEncryptionKeySecret(secrets, testRocksDBEncryptionKey, key)
k8sutil.CreateEncryptionKeySecret(deployment.SecretsModInterface(), testRocksDBEncryptionKey, key)
},
ExpectedEvent: "member agent is created",
ExpectedPod: core.Pod{
@ -1237,9 +1236,8 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
testCase.ExpectedPod.ObjectMeta.Labels[k8sutil.LabelKeyArangoExporter] = testYes
secrets := deployment.GetKubeCli().CoreV1().Secrets(testNamespace)
key := make([]byte, 32)
k8sutil.CreateEncryptionKeySecret(secrets, testRocksDBEncryptionKey, key)
k8sutil.CreateEncryptionKeySecret(deployment.SecretsModInterface(), testRocksDBEncryptionKey, key)
authorization, err := createTestToken(deployment, testCase, []string{"/_api/version"})
require.NoError(t, err)

View file

@ -60,9 +60,8 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupAgents, firstAgentStatus)
secrets := deployment.GetKubeCli().CoreV1().Secrets(testNamespace)
key := make([]byte, 32)
k8sutil.CreateEncryptionKeySecret(secrets, testRocksDBEncryptionKey, key)
k8sutil.CreateEncryptionKeySecret(deployment.SecretsModInterface(), testRocksDBEncryptionKey, key)
},
ExpectedEvent: "member agent is created",
ExpectedPod: core.Pod{
@ -127,9 +126,8 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
testCase.ExpectedPod.ObjectMeta.Labels[k8sutil.LabelKeyArangoExporter] = testYes
secrets := deployment.GetKubeCli().CoreV1().Secrets(testNamespace)
key := make([]byte, 32)
k8sutil.CreateEncryptionKeySecret(secrets, testRocksDBEncryptionKey, key)
k8sutil.CreateEncryptionKeySecret(deployment.SecretsModInterface(), testRocksDBEncryptionKey, key)
authorization, err := createTestToken(deployment, testCase, []string{"/_api/version"})
require.NoError(t, err)
@ -218,9 +216,8 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupAgents, firstAgentStatus)
secrets := deployment.GetKubeCli().CoreV1().Secrets(testNamespace)
key := make([]byte, 32)
k8sutil.CreateEncryptionKeySecret(secrets, testRocksDBEncryptionKey, key)
k8sutil.CreateEncryptionKeySecret(deployment.SecretsModInterface(), testRocksDBEncryptionKey, key)
},
ExpectedEvent: "member agent is created",
ExpectedPod: core.Pod{
@ -282,9 +279,8 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupAgents, firstAgentStatus)
secrets := deployment.GetKubeCli().CoreV1().Secrets(testNamespace)
key := make([]byte, 32)
k8sutil.CreateEncryptionKeySecret(secrets, testRocksDBEncryptionKey, key)
k8sutil.CreateEncryptionKeySecret(deployment.SecretsModInterface(), testRocksDBEncryptionKey, key)
},
ExpectedEvent: "member agent is created",
ExpectedPod: core.Pod{

View file

@ -58,21 +58,15 @@ var (
// getReconciliationTimeout gets timeout for the reconciliation loop.
// The whole reconciliation loop timeout depends on the number of nodes but not less then one minute.
func (d *Deployment) getReconciliationTimeout() (time.Duration, error) {
ctx, cancel := context.WithTimeout(context.TODO(), k8sutil.GetRequestTimeout())
defer cancel()
nodes, err := d.GetKubeCli().CoreV1().Nodes().List(ctx, metav1.ListOptions{})
if err != nil {
return 0, errors.Wrapf(err, "Unable to get nodes")
func (d *Deployment) getReconciliationTimeout() time.Duration {
if nodes, ok := d.GetCachedStatus().GetNodes(); ok {
if timeout := timeoutReconciliationPerNode * time.Duration(len(nodes.Nodes())); timeout > time.Minute {
return timeout
}
if timeout := timeoutReconciliationPerNode * time.Duration(len(nodes.Items)); timeout > time.Minute {
return timeout, nil
}
// The minimum timeout for the reconciliation loop.
return time.Minute, nil
return time.Minute
}
// inspectDeployment inspects the entire deployment, creates
@ -86,11 +80,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
log := d.deps.Log
start := time.Now()
timeout, err := d.getReconciliationTimeout()
if err != nil {
log.Error().Err(err).Msg("Unable to get nodes")
return minInspectionInterval // Retry ASAP
}
timeout := d.getReconciliationTimeout()
ctxReconciliation, cancelReconciliation := context.WithTimeout(context.Background(), timeout)
defer cancelReconciliation()
@ -104,7 +94,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
deploymentName := d.GetName()
defer metrics.SetDuration(inspectDeploymentDurationGauges.WithLabelValues(deploymentName), start)
cachedStatus, err := inspector.NewInspector(context.Background(), d.GetKubeCli(), d.GetMonitoringV1Cli(), d.GetArangoCli(), d.GetNamespace())
cachedStatus, err := inspector.NewInspector(context.Background(), d.getKubeCli(), d.getMonitoringV1Cli(), d.getArangoCli(), d.GetNamespace())
if err != nil {
log.Error().Err(err).Msg("Unable to get resources")
return minInspectionInterval // Retry ASAP
@ -175,7 +165,6 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
t := time.Now()
d.SetCachedStatus(cachedStatus)
defer d.SetCachedStatus(nil)
defer func() {
d.deps.Log.Info().Msgf("Reconciliation loop took %s", time.Since(t))

View file

@ -100,7 +100,7 @@ func TestEnsurePod_Sync_Error(t *testing.T) {
}
secretName := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
require.NoError(t, err)
},
ExpectedError: errors.New("Monitoring token secret validation failed: secrets \"" +
@ -134,7 +134,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
}
secretName := testCase.ArangoDeployment.Spec.Sync.TLS.GetCASecretName()
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
require.NoError(t, err)
},
ExpectedError: errors.New("Failed to create TLS keyfile secret: secrets \"" +
@ -162,7 +162,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
}
secretName := testCase.ArangoDeployment.Spec.Authentication.GetJWTSecretName()
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
require.NoError(t, err)
},
ExpectedError: errors.New("Cluster JWT secret validation failed: secrets \"" +
@ -190,7 +190,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
}
secretName := testCase.ArangoDeployment.Spec.Sync.Authentication.GetClientCASecretName()
err := deployment.GetKubeCli().CoreV1().Secrets(testNamespace).Delete(context.Background(), secretName, metav1.DeleteOptions{})
err := deployment.SecretsModInterface().Delete(context.Background(), secretName, metav1.DeleteOptions{})
require.NoError(t, err)
},
ExpectedError: errors.New("Client authentication CA certificate secret validation failed: " +
@ -227,7 +227,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupSyncMasters, firstSyncMaster)
name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name)
require.NoError(t, err)
testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe(
@ -306,7 +306,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupSyncMasters, firstSyncMaster)
name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name)
require.NoError(t, err)
testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe(
@ -408,7 +408,7 @@ func TestEnsurePod_Sync_Worker(t *testing.T) {
testCase.createTestPodData(deployment, api.ServerGroupSyncWorkers, firstSyncWorker)
name := testCase.ArangoDeployment.Spec.Sync.Monitoring.GetTokenSecretName()
auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
auth, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name)
require.NoError(t, err)
testCase.ExpectedPod.Spec.Containers[0].LivenessProbe = createTestLivenessProbe(

View file

@ -27,18 +27,18 @@ import (
"fmt"
"testing"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/stretchr/testify/require"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/util"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/stretchr/testify/require"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func createTLSSNISecret(t *testing.T, client kubernetes.Interface, name, namespace string) {
func createTLSSNISecret(t *testing.T, client secret.ModInterface, name, namespace string) {
secret := core.Secret{
ObjectMeta: meta.ObjectMeta{
Name: name,
@ -49,7 +49,7 @@ func createTLSSNISecret(t *testing.T, client kubernetes.Interface, name, namespa
}
secret.Data[constants.SecretTLSKeyfile] = []byte("")
_, err := client.CoreV1().Secrets(namespace).Create(context.Background(), &secret, meta.CreateOptions{})
_, err := client.Create(context.Background(), &secret, meta.CreateOptions{})
require.NoError(t, err)
}
@ -85,8 +85,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -160,8 +160,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -235,8 +235,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -310,8 +310,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{
@ -418,8 +418,8 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
TLSSNI: true,
},
Resources: func(t *testing.T, deployment *Deployment) {
createTLSSNISecret(t, deployment.GetKubeCli(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.GetKubeCli(), "sni2", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni1", deployment.Namespace())
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
},
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
deployment.status.last = api.DeploymentStatus{

View file

@ -62,13 +62,12 @@ func runTestCases(t *testing.T, testCases ...testCaseStruct) {
func runTestCase(t *testing.T, testCase testCaseStruct) {
t.Run(testCase.Name, func(t *testing.T) {
// Arrange
d, eventRecorder := createTestDeployment(testCase.config, testCase.ArangoDeployment)
d, eventRecorder := createTestDeployment(t, testCase.config, testCase.ArangoDeployment)
errs := 0
for {
cache, err := inspector.NewInspector(context.Background(), d.GetKubeCli(), d.GetMonitoringV1Cli(), d.GetArangoCli(), d.GetNamespace())
require.NoError(t, err)
err = d.resources.EnsureSecrets(context.Background(), log.Logger, cache)
require.NoError(t, d.currentState.Refresh(context.Background()))
err := d.resources.EnsureSecrets(context.Background(), log.Logger, d.GetCachedStatus())
if err == nil {
break
}
@ -123,8 +122,6 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
// Set members
if err := d.status.last.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
for _, m := range list {
c := d.GetArangoCli()
k := d.GetKubeCli()
member := api.ArangoMember{
ObjectMeta: metav1.ObjectMeta{
@ -137,7 +134,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
},
}
if _, err := c.DatabaseV1().ArangoMembers(member.GetNamespace()).Create(context.Background(), &member, metav1.CreateOptions{}); err != nil {
if _, err := d.ArangoMembersModInterface().Create(context.Background(), &member, metav1.CreateOptions{}); err != nil {
return err
}
@ -148,11 +145,11 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
},
}
if _, err := k.CoreV1().Services(member.GetNamespace()).Create(context.Background(), &s, metav1.CreateOptions{}); err != nil {
if _, err := d.ServicesModInterface().Create(context.Background(), &s, metav1.CreateOptions{}); err != nil {
return err
}
cache, err := inspector.NewInspector(context.Background(), d.GetKubeCli(), d.GetMonitoringV1Cli(), d.GetArangoCli(), d.GetNamespace())
cache, err := inspector.NewInspector(context.Background(), d.getKubeCli(), d.getMonitoringV1Cli(), d.getArangoCli(), d.GetNamespace())
require.NoError(t, err)
groupSpec := d.apiObject.Spec.GetServerGroupSpec(group)
@ -174,11 +171,11 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
member.Status.Template = podTemplate
member.Spec.Template = podTemplate
if _, err := c.DatabaseV1().ArangoMembers(member.GetNamespace()).Update(context.Background(), &member, metav1.UpdateOptions{}); err != nil {
if _, err := d.ArangoMembersModInterface().Update(context.Background(), &member, metav1.UpdateOptions{}); err != nil {
return err
}
if _, err := c.DatabaseV1().ArangoMembers(member.GetNamespace()).UpdateStatus(context.Background(), &member, metav1.UpdateOptions{}); err != nil {
if _, err := d.ArangoMembersModInterface().UpdateStatus(context.Background(), &member, metav1.UpdateOptions{}); err != nil {
return err
}
}
@ -192,9 +189,8 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
}
// Act
cache, err := inspector.NewInspector(context.Background(), d.GetKubeCli(), d.GetMonitoringV1Cli(), d.GetArangoCli(), d.GetNamespace())
require.NoError(t, err)
err = d.resources.EnsurePods(context.Background(), cache)
require.NoError(t, d.currentState.Refresh(context.Background()))
err = d.resources.EnsurePods(context.Background(), d.GetCachedStatus())
// Assert
if testCase.ExpectedError != nil {

View file

@ -31,6 +31,9 @@ import (
"path/filepath"
"testing"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
"github.com/stretchr/testify/require"
"github.com/arangodb/kube-arangodb/pkg/deployment/client"
monitoringFakeClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/fake"
@ -103,7 +106,7 @@ func createTestLifecycle() *core.Lifecycle {
func createTestToken(deployment *Deployment, testCase *testCaseStruct, paths []string) (string, error) {
name := testCase.ArangoDeployment.Spec.Authentication.GetJWTSecretName()
s, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetKubeCli().CoreV1().Secrets(testNamespace), name)
s, err := k8sutil.GetTokenSecret(context.Background(), deployment.GetCachedStatus().SecretReadInterface(), name)
if err != nil {
return "", err
}
@ -443,7 +446,7 @@ func createTestCommandForSyncWorker(name string, tls, monitoring bool) []string
return command
}
func createTestDeployment(config Config, arangoDeployment *api.ArangoDeployment) (*Deployment, *recordfake.FakeRecorder) {
func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.ArangoDeployment) (*Deployment, *recordfake.FakeRecorder) {
eventRecorder := recordfake.NewFakeRecorder(10)
kubernetesClientSet := fake.NewSimpleClientset()
@ -484,6 +487,10 @@ func createTestDeployment(config Config, arangoDeployment *api.ArangoDeployment)
}
d.clientCache = client.NewClientCache(d.getArangoDeployment, conn.NewFactory(d.getAuth, d.getConnConfig))
cachedStatus, err := inspector.NewInspector(context.Background(), d.getKubeCli(), d.getMonitoringV1Cli(), d.getArangoCli(), d.GetNamespace())
require.NoError(t, err)
d.SetCachedStatus(cachedStatus)
arangoDeployment.Spec.SetDefaults(arangoDeployment.GetName())
d.resources = resources.NewResources(deps.Log, d)

View file

@ -41,7 +41,6 @@ import (
"github.com/rs/zerolog"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
@ -73,11 +72,11 @@ type ArangoDImageUpdateContainer struct {
}
type imagesBuilder struct {
Context resources.Context
APIObject k8sutil.APIObject
Spec api.DeploymentSpec
Status api.DeploymentStatus
Log zerolog.Logger
KubeCli kubernetes.Interface
UpdateCRStatus func(status api.DeploymentStatus) error
}
@ -86,11 +85,11 @@ type imagesBuilder struct {
func (d *Deployment) ensureImages(ctx context.Context, apiObject *api.ArangoDeployment, cachedStatus inspectorInterface.Inspector) (bool, bool, error) {
status, lastVersion := d.GetStatus()
ib := imagesBuilder{
Context: d,
APIObject: apiObject,
Spec: apiObject.Spec,
Status: status,
Log: d.deps.Log,
KubeCli: d.deps.KubeCli,
UpdateCRStatus: func(status api.DeploymentStatus) error {
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
return errors.WithStack(err)
@ -129,7 +128,6 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
role := k8sutil.ImageIDAndVersionRole
id := fmt.Sprintf("%0x", sha1.Sum([]byte(image)))[:6]
podName := k8sutil.CreatePodName(ib.APIObject.GetName(), role, id, "")
ns := ib.APIObject.GetNamespace()
log := ib.Log.With().
Str("pod", podName).
Str("image", image).
@ -138,14 +136,14 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
// Check if pod exists
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
pod, err := ib.KubeCli.CoreV1().Pods(ns).Get(ctxChild, podName, metav1.GetOptions{})
pod, err := ib.Context.GetCachedStatus().PodReadInterface().Get(ctxChild, podName, metav1.GetOptions{})
if err == nil {
// Pod found
if k8sutil.IsPodFailed(pod) {
// Wait some time before deleting the pod
if time.Now().After(pod.GetCreationTimestamp().Add(30 * time.Second)) {
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return ib.KubeCli.CoreV1().Pods(ns).Delete(ctxChild, podName, metav1.DeleteOptions{})
return ib.Context.PodsModInterface().Delete(ctxChild, podName, metav1.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Warn().Err(err).Msg("Failed to delete Image ID Pod")
@ -187,7 +185,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
// We have all the info we need now, kill the pod and store the image info.
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return ib.KubeCli.CoreV1().Pods(ns).Delete(ctxChild, podName, metav1.DeleteOptions{})
return ib.Context.PodsModInterface().Delete(ctxChild, podName, metav1.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Warn().Err(err).Msg("Failed to delete Image ID Pod")
@ -233,7 +231,7 @@ func (ib *imagesBuilder) fetchArangoDBImageIDAndVersion(ctx context.Context, cac
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, _, err := resources.CreateArangoPod(ctxChild, ib.KubeCli, ib.APIObject, ib.Spec, api.ServerGroupImageDiscovery, pod)
_, _, err := resources.CreateArangoPod(ctxChild, ib.Context.PodsModInterface(), ib.APIObject, ib.Spec, api.ServerGroupImageDiscovery, pod)
return err
})
if err != nil {

View file

@ -29,10 +29,10 @@ import (
"testing"
"time"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources/inspector"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
@ -183,13 +183,12 @@ func TestEnsureImages(t *testing.T) {
},
}
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, metav1.CreateOptions{})
_, err := deployment.PodsModInterface().Create(context.Background(), &pod, metav1.CreateOptions{})
require.NoError(t, err)
},
After: func(t *testing.T, deployment *Deployment) {
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, pods.Items, 1)
pods := deployment.GetCachedStatus().Pods()
require.Len(t, pods, 1)
},
},
{
@ -208,13 +207,12 @@ func TestEnsureImages(t *testing.T) {
Phase: v1.PodFailed,
},
}
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, metav1.CreateOptions{})
_, err := deployment.PodsModInterface().Create(context.Background(), &pod, metav1.CreateOptions{})
require.NoError(t, err)
},
After: func(t *testing.T, deployment *Deployment) {
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, pods.Items, 0)
pods := deployment.GetCachedStatus().Pods()
require.Len(t, pods, 0)
},
},
{
@ -238,13 +236,12 @@ func TestEnsureImages(t *testing.T) {
},
},
}
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, metav1.CreateOptions{})
_, err := deployment.PodsModInterface().Create(context.Background(), &pod, metav1.CreateOptions{})
require.NoError(t, err)
},
After: func(t *testing.T, deployment *Deployment) {
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, pods.Items, 1)
pods := deployment.GetCachedStatus().Pods()
require.Len(t, pods, 1)
},
},
{
@ -269,13 +266,12 @@ func TestEnsureImages(t *testing.T) {
},
},
}
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, metav1.CreateOptions{})
_, err := deployment.PodsModInterface().Create(context.Background(), &pod, metav1.CreateOptions{})
require.NoError(t, err)
},
After: func(t *testing.T, deployment *Deployment) {
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, pods.Items, 1)
pods := deployment.GetCachedStatus().Pods()
require.Len(t, pods, 1)
},
},
{
@ -303,13 +299,12 @@ func TestEnsureImages(t *testing.T) {
},
},
}
_, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).Create(context.Background(), &pod, metav1.CreateOptions{})
_, err := deployment.PodsModInterface().Create(context.Background(), &pod, metav1.CreateOptions{})
require.NoError(t, err)
},
After: func(t *testing.T, deployment *Deployment) {
pods, err := deployment.GetKubeCli().CoreV1().Pods(testNamespace).List(context.Background(), metav1.ListOptions{})
require.NoError(t, err)
require.Len(t, pods.Items, 1)
pods := deployment.GetCachedStatus().Pods()
require.Len(t, pods, 1)
},
},
}
@ -318,7 +313,7 @@ func TestEnsureImages(t *testing.T) {
//nolint:scopelint
t.Run(testCase.Name, func(t *testing.T) {
// Arrange
d, _ := createTestDeployment(Config{}, testCase.ArangoDeployment)
d, _ := createTestDeployment(t, Config{}, testCase.ArangoDeployment)
d.status.last = api.DeploymentStatus{
Images: createTestImages(false),
@ -326,6 +321,7 @@ func TestEnsureImages(t *testing.T) {
if testCase.Before != nil {
testCase.Before(t, d)
require.NoError(t, d.GetCachedStatus().Refresh(context.Background()))
}
// Create custom resource in the fake kubernetes API
@ -355,6 +351,8 @@ func TestEnsureImages(t *testing.T) {
require.Equal(t, ownerRef[0], testCase.ArangoDeployment.AsOwner())
}
require.NoError(t, d.GetCachedStatus().Refresh(context.Background()))
if testCase.After != nil {
testCase.After(t, d)
}

View file

@ -29,6 +29,8 @@ import (
"fmt"
"path/filepath"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
@ -68,7 +70,7 @@ func GroupEncryptionSupported(mode api.DeploymentMode, group api.ServerGroup) bo
}
}
func GetEncryptionKey(ctx context.Context, secrets k8sutil.SecretInterface, name string) (string, []byte, bool, error) {
func GetEncryptionKey(ctx context.Context, secrets secret.ReadInterface, name string) (string, []byte, bool, error) {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()

View file

@ -152,7 +152,7 @@ func (a actionBootstrapSetPassword) ensureUserPasswordSecret(ctx context.Context
token := hex.EncodeToString(tokenData)
owner := a.actionCtx.GetAPIObject().AsOwner()
err := k8sutil.CreateBasicAuthSecret(ctx, a.actionCtx.SecretsInterface(), secret, user, token, &owner)
err := k8sutil.CreateBasicAuthSecret(ctx, a.actionCtx.SecretsModInterface(), secret, user, token, &owner)
if err != nil {
return "", err
}

View file

@ -26,9 +26,14 @@ package reconcile
import (
"context"
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1"
"k8s.io/client-go/kubernetes"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor"
"github.com/arangodb/kube-arangodb/pkg/deployment/resources"
@ -57,7 +62,8 @@ type ActionContext interface {
resources.DeploymentAgencyMaintenance
resources.ArangoMemberContext
resources.DeploymentPodRenderer
resources.DeploymentCLIGetter
resources.DeploymentModInterfaces
resources.DeploymentCachedStatus
// GetAPIObject returns the deployment as k8s object.
GetAPIObject() k8sutil.APIObject
@ -141,13 +147,10 @@ type ActionContext interface {
EnableScalingCluster(ctx context.Context) error
// WithStatusUpdate update status of ArangoDeployment with defined modifier. If action returns True action is taken
UpdateClusterCondition(ctx context.Context, conditionType api.ConditionType, status bool, reason, message string) error
SecretsInterface() k8sutil.SecretInterface
// GetBackup receives information about a backup resource
GetBackup(ctx context.Context, backup string) (*backupApi.ArangoBackup, error)
// GetName receives information about a deployment name
GetName() string
// GetCachedStatus current cached state of deployment
GetCachedStatus() inspectorInterface.Inspector
// SelectImage select currently used image by pod
SelectImage(spec api.DeploymentSpec, status api.DeploymentStatus) (api.ImageInfo, bool)
}
@ -168,18 +171,6 @@ type actionContext struct {
cachedStatus inspectorInterface.Inspector
}
func (ac *actionContext) GetKubeCli() kubernetes.Interface {
return ac.context.GetKubeCli()
}
func (ac *actionContext) GetMonitoringV1Cli() monitoringClient.MonitoringV1Interface {
return ac.context.GetMonitoringV1Cli()
}
func (ac *actionContext) GetArangoCli() versioned.Interface {
return ac.context.GetArangoCli()
}
func (ac *actionContext) RenderPodForMemberFromCurrent(ctx context.Context, cachedStatus inspectorInterface.Inspector, memberID string) (*core.Pod, error) {
return ac.context.RenderPodForMemberFromCurrent(ctx, cachedStatus, memberID)
}
@ -244,8 +235,36 @@ func (ac *actionContext) WithStatusUpdate(ctx context.Context, action resources.
return ac.context.WithStatusUpdate(ctx, action, force...)
}
func (ac *actionContext) SecretsInterface() k8sutil.SecretInterface {
return ac.context.SecretsInterface()
func (ac *actionContext) SecretsModInterface() secret.ModInterface {
return ac.context.SecretsModInterface()
}
func (ac *actionContext) PodsModInterface() pod.ModInterface {
return ac.context.PodsModInterface()
}
func (ac *actionContext) ServiceAccountsModInterface() serviceaccount.ModInterface {
return ac.context.ServiceAccountsModInterface()
}
func (ac *actionContext) ServicesModInterface() service.ModInterface {
return ac.context.ServicesModInterface()
}
func (ac *actionContext) PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface {
return ac.context.PersistentVolumeClaimsModInterface()
}
func (ac *actionContext) PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface {
return ac.context.PodDisruptionBudgetsModInterface()
}
func (ac *actionContext) ServiceMonitorsModInterface() servicemonitor.ModInterface {
return ac.context.ServiceMonitorsModInterface()
}
func (ac *actionContext) ArangoMembersModInterface() arangomember.ModInterface {
return ac.context.ArangoMembersModInterface()
}
func (ac *actionContext) GetShardSyncStatus() bool {

View file

@ -88,7 +88,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) {
secret = s
}
sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.SecretsInterface(), secret)
sha, d, exists, err := pod.GetEncryptionKey(ctx, a.actionCtx.GetCachedStatus().SecretReadInterface(), secret)
if err != nil {
a.log.Error().Err(err).Msgf("Unable to fetch current encryption key")
return true, nil
@ -108,7 +108,7 @@ func (a *encryptionKeyAddAction) Start(ctx context.Context) (bool, error) {
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -60,7 +60,7 @@ func (a *encryptionKeyRefreshAction) Start(ctx context.Context) (bool, error) {
func (a *encryptionKeyRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error) {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
keyfolder, err := a.actionCtx.SecretsInterface().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{})
keyfolder, err := a.actionCtx.GetCachedStatus().SecretReadInterface().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetName()), meta.GetOptions{})
if err != nil {
a.log.Err(err).Msgf("Unable to fetch encryption folder")
return true, false, nil

View file

@ -83,7 +83,7 @@ func (a *encryptionKeyRemoveAction) Start(ctx context.Context) (bool, error) {
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -63,7 +63,7 @@ func (a *encryptionKeyStatusUpdateAction) Start(ctx context.Context) (bool, erro
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
f, err := a.actionCtx.SecretsInterface().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{})
f, err := a.actionCtx.GetCachedStatus().SecretReadInterface().Get(ctxChild, pod.GetEncryptionFolderSecretName(a.actionCtx.GetAPIObject().GetName()), meta.GetOptions{})
if err != nil {
a.log.Error().Err(err).Msgf("Unable to get folder info")
return true, nil

View file

@ -118,7 +118,7 @@ func (a *jwtAddAction) Start(ctx context.Context) (bool, error) {
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, pod.JWTSecretFolder(a.actionCtx.GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, pod.JWTSecretFolder(a.actionCtx.GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -109,7 +109,7 @@ func (a *jwtCleanAction) Start(ctx context.Context) (bool, error) {
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, pod.JWTSecretFolder(a.actionCtx.GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, pod.JWTSecretFolder(a.actionCtx.GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -120,7 +120,7 @@ func (a *jwtSetActiveAction) Start(ctx context.Context) (bool, error) {
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, pod.JWTSecretFolder(a.actionCtx.GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, pod.JWTSecretFolder(a.actionCtx.GetName()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -197,7 +197,7 @@ func (a actionRuntimeContainerImageUpdate) Start(ctx context.Context) (bool, err
// Update pod image
pod.Spec.Containers[id].Image = image
if _, err := a.actionCtx.GetKubeCli().CoreV1().Pods(pod.GetNamespace()).Update(ctx, pod, v1.UpdateOptions{}); err != nil {
if _, err := a.actionCtx.PodsModInterface().Update(ctx, pod, v1.UpdateOptions{}); err != nil {
return true, err
}

View file

@ -117,7 +117,7 @@ func (a *appendTLSCACertificateAction) Start(ctx context.Context) (bool, error)
}
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -120,7 +120,7 @@ func (a *cleanTLSCACertificateAction) Start(ctx context.Context) (bool, error) {
a.log.Info().Msgf("Removing key %s from truststore", certChecksum)
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := a.actionCtx.SecretsInterface().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{})
_, err := a.actionCtx.SecretsModInterface().Patch(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), types.JSONPatchType, patch, meta.PatchOptions{})
return err
})
if err != nil {

View file

@ -56,8 +56,7 @@ func (a *renewTLSCACertificateAction) Start(ctx context.Context) (bool, error) {
}
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
s := a.actionCtx.SecretsInterface()
return s.Delete(ctxChild, a.actionCtx.GetSpec().TLS.GetCASecretName(), meta.DeleteOptions{})
return a.actionCtx.SecretsModInterface().Delete(ctxChild, a.actionCtx.GetSpec().TLS.GetCASecretName(), meta.DeleteOptions{})
})
if err != nil {
if !k8sutil.IsNotFound(err) {

View file

@ -60,7 +60,7 @@ func (a *tlsKeyStatusUpdateAction) Start(ctx context.Context) (bool, error) {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
f, err := a.actionCtx.SecretsInterface().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{})
f, err := a.actionCtx.GetCachedStatus().SecretReadInterface().Get(ctxChild, resources.GetCASecretName(a.actionCtx.GetAPIObject()), meta.GetOptions{})
if err != nil {
a.log.Error().Err(err).Msgf("Unable to get folder info")
return true, nil

View file

@ -48,7 +48,8 @@ type Context interface {
resources.ArangoMemberContext
resources.DeploymentPodRenderer
resources.DeploymentImageManager
resources.DeploymentCLIGetter
resources.DeploymentModInterfaces
resources.DeploymentCachedStatus
// GetAPIObject returns the deployment as k8s object.
GetAPIObject() k8sutil.APIObject
@ -117,8 +118,6 @@ type Context interface {
EnableScalingCluster(ctx context.Context) error
// GetAgencyData object for key path
GetAgencyData(ctx context.Context, i interface{}, keyParts ...string) error
// SecretsInterface return secret interface
SecretsInterface() k8sutil.SecretInterface
// GetBackup receives information about a backup resource
GetBackup(ctx context.Context, backup string) (*backupApi.ArangoBackup, error)
// GetName receives deployment name

View file

@ -46,6 +46,8 @@ type PlanBuilderContext interface {
resources.ArangoMemberContext
resources.DeploymentPodRenderer
resources.DeploymentImageManager
resources.DeploymentModInterfaces
resources.DeploymentCachedStatus
// GetTLSKeyfile returns the keyfile encoded TLS certificate+key for
// the given member.
@ -72,8 +74,6 @@ type PlanBuilderContext interface {
GetServerClient(ctx context.Context, group api.ServerGroup, id string) (driver.Client, error)
// GetAuthentication return authentication for members
GetAuthentication() conn.Auth
// SecretsInterface return secret interface
SecretsInterface() k8sutil.SecretInterface
// GetBackup receives information about a backup resource
GetBackup(ctx context.Context, backup string) (*backupApi.ArangoBackup, error)
// GetName receives deployment name

View file

@ -110,7 +110,7 @@ func createRestorePlanEncryption(ctx context.Context, log zerolog.Logger, spec a
secret := *spec.RestoreEncryptionSecret
// Additional logic to do restore with encryption key
name, _, exists, err := pod.GetEncryptionKey(ctx, builderCtx.SecretsInterface(), secret)
name, _, exists, err := pod.GetEncryptionKey(ctx, builderCtx.GetCachedStatus().SecretReadInterface(), secret)
if err != nil {
log.Err(err).Msgf("Unable to fetch encryption key")
return false, nil

View file

@ -29,6 +29,16 @@ import (
"io/ioutil"
"testing"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1"
"k8s.io/client-go/kubernetes"
@ -79,6 +89,42 @@ type testContext struct {
RecordedEvent *k8sutil.Event
}
func (c *testContext) SecretsModInterface() secret.ModInterface {
panic("implement me")
}
func (c *testContext) PodsModInterface() pod.ModInterface {
panic("implement me")
}
func (c *testContext) ServiceAccountsModInterface() serviceaccount.ModInterface {
panic("implement me")
}
func (c *testContext) ServicesModInterface() service.ModInterface {
panic("implement me")
}
func (c *testContext) PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface {
panic("implement me")
}
func (c *testContext) PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface {
panic("implement me")
}
func (c *testContext) ServiceMonitorsModInterface() servicemonitor.ModInterface {
panic("implement me")
}
func (c *testContext) ArangoMembersModInterface() arangomember.ModInterface {
panic("implement me")
}
func (c *testContext) GetCachedStatus() inspectorInterface.Inspector {
panic("implement me")
}
func (c *testContext) WithStatusUpdateErr(ctx context.Context, action resources.DeploymentStatusUpdateErrFunc, force ...bool) error {
panic("implement me")
}
@ -163,7 +209,7 @@ func (c *testContext) GetBackup(_ context.Context, backup string) (*backupApi.Ar
panic("implement me")
}
func (c *testContext) SecretsInterface() k8sutil.SecretInterface {
func (c *testContext) SecretsInterface() secret.Interface {
panic("implement me")
}
@ -602,12 +648,13 @@ type testCase struct {
PDBS map[string]*policy.PodDisruptionBudget
ServiceMonitors map[string]*monitoring.ServiceMonitor
ArangoMembers map[string]*api.ArangoMember
Nodes map[string]*core.Node
Extender func(t *testing.T, r *Reconciler, c *testCase)
}
func (t testCase) Inspector() inspectorInterface.Inspector {
return inspector.NewInspectorFromData(t.Pods, t.Secrets, t.PVCS, t.Services, t.ServiceAccounts, t.PDBS, t.ServiceMonitors, t.ArangoMembers)
return inspector.NewInspectorFromData(t.Pods, t.Secrets, t.PVCS, t.Services, t.ServiceAccounts, t.PDBS, t.ServiceMonitors, t.ArangoMembers, t.Nodes)
}
func TestCreatePlan(t *testing.T) {

View file

@ -32,6 +32,8 @@ import (
"reflect"
"time"
memberTls "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls"
"github.com/arangodb/go-driver"
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
@ -450,6 +452,14 @@ func keyfileRenewalRequired(ctx context.Context,
return false, false
}
memberName := member.ArangoMemberName(apiObject.GetName(), group)
service, ok := cachedStatus.Service(memberName)
if !ok {
log.Warn().Str("service", memberName).Msg("Service does not exists")
return false, false
}
caSecret, exists := cachedStatus.Secret(spec.TLS.GetCASecretName())
if !exists {
log.Warn().Str("secret", spec.TLS.GetCASecretName()).Msg("CA Secret does not exists")
@ -493,6 +503,26 @@ func keyfileRenewalRequired(ctx context.Context,
log.Info().Msg("Renewal margin exceeded")
return true, true
}
// Verify AltNames
altNames, err := memberTls.GetServerAltNames(apiObject, spec, spec.TLS, service, group, member)
if err != nil {
log.Warn().Msg("Unable to render alt names")
return false, false
}
var dnsNames = cert.DNSNames
for _, ip := range cert.IPAddresses {
dnsNames = append(dnsNames, ip.String())
}
if a := util.DiffStrings(altNames.AltNames, dnsNames); len(a) > 0 {
log.Info().Strs("AltNames Current", cert.DNSNames).
Strs("AltNames Expected", altNames.AltNames).
Msgf("Alt names are different")
return true, true
}
}
// Ensure secret is propagated only on 3.7.0+ enterprise and inplace mode

View file

@ -44,14 +44,11 @@ import (
type PatchFunc func(name string, d []byte) error
func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
kubecli := r.context.GetKubeCli()
monitoringcli := r.context.GetMonitoringV1Cli()
log.Info().Msgf("Ensuring annotations")
patchSecret := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := kubecli.CoreV1().Secrets(r.context.GetNamespace()).Patch(ctxChild, name, types.JSONPatchType, d,
_, err := r.context.SecretsModInterface().Patch(ctxChild, name, types.JSONPatchType, d,
meta.PatchOptions{})
return err
})
@ -68,7 +65,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
patchServiceAccount := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := kubecli.CoreV1().ServiceAccounts(r.context.GetNamespace()).Patch(ctxChild, name,
_, err := r.context.ServiceAccountsModInterface().Patch(ctxChild, name,
types.JSONPatchType, d, meta.PatchOptions{})
return err
})
@ -85,7 +82,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
patchService := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := kubecli.CoreV1().Services(r.context.GetNamespace()).Patch(ctxChild, name, types.JSONPatchType, d,
_, err := r.context.ServicesModInterface().Patch(ctxChild, name, types.JSONPatchType, d,
meta.PatchOptions{})
return err
})
@ -102,7 +99,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
patchPDB := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := kubecli.PolicyV1beta1().PodDisruptionBudgets(r.context.GetNamespace()).Patch(ctxChild, name,
_, err := r.context.PodDisruptionBudgetsModInterface().Patch(ctxChild, name,
types.JSONPatchType, d, meta.PatchOptions{})
return err
})
@ -119,7 +116,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
patchPVC := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := kubecli.CoreV1().PersistentVolumeClaims(r.context.GetNamespace()).Patch(ctxChild, name,
_, err := r.context.PersistentVolumeClaimsModInterface().Patch(ctxChild, name,
types.JSONPatchType, d, meta.PatchOptions{})
return err
})
@ -136,7 +133,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
patchPod := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := kubecli.CoreV1().Pods(r.context.GetNamespace()).Patch(ctxChild, name, types.JSONPatchType, d,
_, err := r.context.PodsModInterface().Patch(ctxChild, name, types.JSONPatchType, d,
meta.PatchOptions{})
return err
})
@ -153,7 +150,7 @@ func (r *Resources) EnsureAnnotations(ctx context.Context, cachedStatus inspecto
patchServiceMonitor := func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := monitoringcli.ServiceMonitors(r.context.GetNamespace()).Patch(ctxChild, name, types.JSONPatchType, d,
_, err := r.context.ServiceMonitorsModInterface().Patch(ctxChild, name, types.JSONPatchType, d,
meta.PatchOptions{})
return err
})

View file

@ -28,6 +28,8 @@ import (
"fmt"
"time"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/rs/zerolog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -43,7 +45,7 @@ const (
// createClientAuthCACertificate creates a client authentication CA certificate and stores it in a secret with name
// specified in the given spec.
func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secrets k8sutil.SecretInterface, spec api.SyncAuthenticationSpec, deploymentName string, ownerRef *metav1.OwnerReference) error {
func createClientAuthCACertificate(ctx context.Context, log zerolog.Logger, secrets secret.ModInterface, spec api.SyncAuthenticationSpec, deploymentName string, ownerRef *metav1.OwnerReference) error {
log = log.With().Str("secret", spec.GetClientCASecretName()).Logger()
options := certificates.CreateCertificateOptions{
CommonName: fmt.Sprintf("%s Client Authentication Root Certificate", deploymentName),

View file

@ -29,15 +29,18 @@ import (
"strings"
"time"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
certificates "github.com/arangodb-helper/go-certificates"
"github.com/rs/zerolog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
v1 "k8s.io/client-go/kubernetes/typed/core/v1"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/rs/zerolog"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
@ -47,7 +50,7 @@ const (
// createTLSCACertificate creates a CA certificate and stores it in a secret with name
// specified in the given spec.
func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets k8sutil.SecretInterface, spec api.TLSSpec,
func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets secret.ModInterface, spec api.TLSSpec,
deploymentName string, ownerRef *metav1.OwnerReference) error {
log = log.With().Str("secret", spec.GetCASecretName()).Logger()
@ -77,21 +80,14 @@ func createTLSCACertificate(ctx context.Context, log zerolog.Logger, secrets k8s
// createTLSServerCertificate creates a TLS certificate for a specific server and stores
// it in a secret with the given name.
func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, secrets v1.SecretInterface, serverNames []string, spec api.TLSSpec,
func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, names tls.KeyfileInput, spec api.TLSSpec,
secretName string, ownerRef *metav1.OwnerReference) (bool, error) {
log = log.With().Str("secret", secretName).Logger()
// Load alt names
dnsNames, ipAddresses, emailAddress, err := spec.GetParsedAltNames()
if err != nil {
log.Debug().Err(err).Msg("Failed to get alternate names")
return false, errors.WithStack(err)
}
// Load CA certificate
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
caCert, caKey, _, err := k8sutil.GetCASecret(ctxChild, secrets, spec.GetCASecretName(), nil)
caCert, caKey, _, err := k8sutil.GetCASecret(ctxChild, cachedStatus.SecretReadInterface(), spec.GetCASecretName(), nil)
if err != nil {
log.Debug().Err(err).Msg("Failed to load CA certificate")
return false, errors.WithStack(err)
@ -103,9 +99,9 @@ func createTLSServerCertificate(ctx context.Context, log zerolog.Logger, secrets
}
options := certificates.CreateCertificateOptions{
CommonName: serverNames[0],
Hosts: append(append(serverNames, dnsNames...), ipAddresses...),
EmailAddresses: emailAddress,
CommonName: names.AltNames[0],
Hosts: names.AltNames,
EmailAddresses: names.Email,
ValidFrom: time.Now(),
ValidFor: spec.GetTTL().AsDuration(),
IsCA: false,

View file

@ -26,9 +26,14 @@ package resources
import (
"context"
"github.com/arangodb/kube-arangodb/pkg/generated/clientset/versioned"
monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1"
"k8s.io/client-go/kubernetes"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/poddisruptionbudget"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/serviceaccount"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor"
"github.com/arangodb/kube-arangodb/pkg/operator/scope"
@ -87,13 +92,30 @@ type DeploymentImageManager interface {
SelectImageForMember(spec api.DeploymentSpec, status api.DeploymentStatus, member api.MemberStatus) (api.ImageInfo, bool)
}
type DeploymentCLIGetter interface {
// GetKubeCli returns the kubernetes client
GetKubeCli() kubernetes.Interface
// GetMonitoringV1Cli returns monitoring client
GetMonitoringV1Cli() monitoringClient.MonitoringV1Interface
// GetArangoCli returns the Arango CRD client
GetArangoCli() versioned.Interface
type DeploymentModInterfaces interface {
// SecretsModInterface define secret modification interface
SecretsModInterface() secret.ModInterface
// PodModInterface define pod modification interface
PodsModInterface() pod.ModInterface
// ServiceAccountModInterface define serviceaccounts modification interface
ServiceAccountsModInterface() serviceaccount.ModInterface
// ServicesModInterface define services modification interface
ServicesModInterface() service.ModInterface
// PersistentVolumeClaimsModInterface define persistentvolumeclaims modification interface
PersistentVolumeClaimsModInterface() persistentvolumeclaim.ModInterface
// PodDisruptionBudgetsModInterface define poddisruptionbudgets modification interface
PodDisruptionBudgetsModInterface() poddisruptionbudget.ModInterface
// ServiceMonitorModInterface define servicemonitor modification interface
ServiceMonitorsModInterface() servicemonitor.ModInterface
// ArangoMembersModInterface define arangomembers modification interface
ArangoMembersModInterface() arangomember.ModInterface
}
type DeploymentCachedStatus interface {
// GetCachedStatus current cached state of deployment
GetCachedStatus() inspectorInterface.Inspector
}
type ArangoMemberUpdateFunc func(obj *api.ArangoMember) bool
@ -113,7 +135,8 @@ type Context interface {
DeploymentAgencyMaintenance
ArangoMemberContext
DeploymentImageManager
DeploymentCLIGetter
DeploymentModInterfaces
DeploymentCachedStatus
// GetAPIObject returns the deployment as k8s object.
GetAPIObject() k8sutil.APIObject
@ -163,6 +186,5 @@ type Context interface {
GetBackup(ctx context.Context, backup string) (*backupApi.ArangoBackup, error)
GetScope() scope.Scope
GetCachedStatus() inspectorInterface.Inspector
SetCachedStatus(i inspectorInterface.Inspector)
}

View file

@ -27,6 +27,8 @@ import (
"context"
"sync"
"github.com/arangodb/kube-arangodb/pkg/util"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -63,8 +65,33 @@ func NewInspector(ctx context.Context, k kubernetes.Interface, m monitoringClien
return i, nil
}
func newInspector(ctx context.Context, k kubernetes.Interface, m monitoringClient.MonitoringV1Interface, c versioned.Interface, namespace string) (*inspector, error) {
var i inspector
i.namespace = namespace
i.k = k
i.m = m
i.c = c
if err := util.RunParallel(15,
podsToMap(ctx, &i, k, namespace),
secretsToMap(ctx, &i, k, namespace),
pvcsToMap(ctx, &i, k, namespace),
servicesToMap(ctx, &i, k, namespace),
serviceAccountsToMap(ctx, &i, k, namespace),
podDisruptionBudgetsToMap(ctx, &i, k, namespace),
serviceMonitorsToMap(ctx, &i, m, namespace),
arangoMembersToMap(ctx, &i, c, namespace),
nodesToMap(ctx, &i, k),
); err != nil {
return nil, err
}
return &i, nil
}
func NewEmptyInspector() inspectorInterface.Inspector {
return NewInspectorFromData(nil, nil, nil, nil, nil, nil, nil, nil)
return NewInspectorFromData(nil, nil, nil, nil, nil, nil, nil, nil, nil)
}
func NewInspectorFromData(pods map[string]*core.Pod,
@ -74,8 +101,9 @@ func NewInspectorFromData(pods map[string]*core.Pod,
serviceAccounts map[string]*core.ServiceAccount,
podDisruptionBudgets map[string]*policy.PodDisruptionBudget,
serviceMonitors map[string]*monitoring.ServiceMonitor,
arangoMembers map[string]*api.ArangoMember) inspectorInterface.Inspector {
return &inspector{
arangoMembers map[string]*api.ArangoMember,
nodes map[string]*core.Node) inspectorInterface.Inspector {
i := &inspector{
pods: pods,
secrets: secrets,
pvcs: pvcs,
@ -85,6 +113,20 @@ func NewInspectorFromData(pods map[string]*core.Pod,
serviceMonitors: serviceMonitors,
arangoMembers: arangoMembers,
}
if nodes == nil {
i.nodes = &nodeLoader{
authenticated: false,
nodes: nil,
}
} else {
i.nodes = &nodeLoader{
authenticated: true,
nodes: nodes,
}
}
return i
}
type inspector struct {
@ -104,6 +146,11 @@ type inspector struct {
podDisruptionBudgets map[string]*policy.PodDisruptionBudget
serviceMonitors map[string]*monitoring.ServiceMonitor
arangoMembers map[string]*api.ArangoMember
nodes *nodeLoader
}
func (i *inspector) IsStatic() bool {
return i.namespace == ""
}
func (i *inspector) Refresh(ctx context.Context) error {
@ -111,57 +158,23 @@ func (i *inspector) Refresh(ctx context.Context) error {
defer i.lock.Unlock()
if i.namespace == "" {
return errors.New("Inspector created fro mstatic data")
return errors.New("Inspector created from static data")
}
pods, err := podsToMap(ctx, i.k, i.namespace)
new, err := newInspector(ctx, i.k, i.m, i.c, i.namespace)
if err != nil {
return err
}
secrets, err := secretsToMap(ctx, i.k, i.namespace)
if err != nil {
return err
}
pvcs, err := pvcsToMap(ctx, i.k, i.namespace)
if err != nil {
return err
}
services, err := servicesToMap(ctx, i.k, i.namespace)
if err != nil {
return err
}
serviceAccounts, err := serviceAccountsToMap(ctx, i.k, i.namespace)
if err != nil {
return err
}
podDisruptionBudgets, err := podDisruptionBudgetsToMap(ctx, i.k, i.namespace)
if err != nil {
return err
}
serviceMonitors, err := serviceMonitorsToMap(ctx, i.m, i.namespace)
if err != nil {
return err
}
arangoMembers, err := arangoMembersToMap(ctx, i.c, i.namespace)
if err != nil {
return err
}
i.pods = pods
i.secrets = secrets
i.pvcs = pvcs
i.services = services
i.serviceAccounts = serviceAccounts
i.podDisruptionBudgets = podDisruptionBudgets
i.serviceMonitors = serviceMonitors
i.arangoMembers = arangoMembers
i.pods = new.pods
i.secrets = new.secrets
i.pvcs = new.pvcs
i.services = new.services
i.serviceAccounts = new.serviceAccounts
i.podDisruptionBudgets = new.podDisruptionBudgets
i.serviceMonitors = new.serviceMonitors
i.arangoMembers = new.arangoMembers
i.nodes = new.nodes
return nil
}

View file

@ -26,6 +26,10 @@ package inspector
import (
"context"
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
@ -35,7 +39,7 @@ import (
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (i *inspector) IterateArangoMembers(action arangomember.ArangoMemberAction, filters ...arangomember.ArangoMemberFilter) error {
func (i *inspector) IterateArangoMembers(action arangomember.Action, filters ...arangomember.Filter) error {
for _, arangoMember := range i.ArangoMembers() {
if err := i.iterateArangoMembers(arangoMember, action, filters...); err != nil {
return err
@ -44,7 +48,7 @@ func (i *inspector) IterateArangoMembers(action arangomember.ArangoMemberAction,
return nil
}
func (i *inspector) iterateArangoMembers(arangoMember *api.ArangoMember, action arangomember.ArangoMemberAction, filters ...arangomember.ArangoMemberFilter) error {
func (i *inspector) iterateArangoMembers(arangoMember *api.ArangoMember, action arangomember.Action, filters ...arangomember.Filter) error {
for _, filter := range filters {
if !filter(arangoMember) {
return nil
@ -78,10 +82,30 @@ func (i *inspector) ArangoMember(name string) (*api.ArangoMember, bool) {
return arangoMember, true
}
func arangoMembersToMap(ctx context.Context, k versioned.Interface, namespace string) (map[string]*api.ArangoMember, error) {
func (i *inspector) ArangoMemberReadInterface() arangomember.ReadInterface {
return &arangoMemberReadInterface{i: i}
}
type arangoMemberReadInterface struct {
i *inspector
}
func (s arangoMemberReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoMember, error) {
if s, ok := s.i.ArangoMember(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: deployment.ArangoDeploymentGroupName,
Resource: "arangomembers",
}, name)
} else {
return s, nil
}
}
func arangoMembersToMap(ctx context.Context, inspector *inspector, k versioned.Interface, namespace string) func() error {
return func() error {
arangoMembers, err := getArangoMembers(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
arangoMemberMap := map[string]*api.ArangoMember{}
@ -89,13 +113,16 @@ func arangoMembersToMap(ctx context.Context, k versioned.Interface, namespace st
for _, arangoMember := range arangoMembers {
_, exists := arangoMemberMap[arangoMember.GetName()]
if exists {
return nil, errors.Newf("ArangoMember %s already exists in map, error received", arangoMember.GetName())
return errors.Newf("ArangoMember %s already exists in map, error received", arangoMember.GetName())
}
arangoMemberMap[arangoMember.GetName()] = arangoMemberPointer(arangoMember)
}
return arangoMemberMap, nil
inspector.arangoMembers = arangoMemberMap
return nil
}
}
func arangoMemberPointer(pod api.ArangoMember) *api.ArangoMember {

View file

@ -0,0 +1,169 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package inspector
import (
"context"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/node"
core "k8s.io/api/core/v1"
policy "k8s.io/api/policy/v1beta1"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/kubernetes"
)
func (i *inspector) GetNodes() (node.Inspector, bool) {
i.lock.Lock()
defer i.lock.Unlock()
if i.nodes == nil {
return nil, false
}
return i.nodes, i.nodes.authenticated
}
type nodeLoader struct {
authenticated bool
nodes map[string]*core.Node
}
func (n *nodeLoader) Node(name string) (*core.Node, bool) {
node, ok := n.nodes[name]
if !ok {
return nil, false
}
return node, true
}
func (n *nodeLoader) Nodes() []*core.Node {
var r []*core.Node
for _, node := range n.nodes {
r = append(r, node)
}
return r
}
func (n *nodeLoader) IterateNodes(action node.Action, filters ...node.Filter) error {
for _, node := range n.Nodes() {
if err := n.iteratePodDisruptionBudget(node, action, filters...); err != nil {
return err
}
}
return nil
}
func (n *nodeLoader) iteratePodDisruptionBudget(node *core.Node, action node.Action, filters ...node.Filter) error {
for _, filter := range filters {
if !filter(node) {
return nil
}
}
return action(node)
}
func (n *nodeLoader) NodeReadInterface() node.ReadInterface {
return &nodeReadInterface{i: n}
}
type nodeReadInterface struct {
i *nodeLoader
}
func (s nodeReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Node, error) {
if s, ok := s.i.Node(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: policy.GroupName,
Resource: "nodes",
}, name)
} else {
return s, nil
}
}
func nodePointer(pod core.Node) *core.Node {
return &pod
}
func nodesToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface) func() error {
return func() error {
nodes, err := getNodes(ctx, k, "")
if err != nil {
if apiErrors.IsUnauthorized(err) {
inspector.nodes = &nodeLoader{
authenticated: false,
}
return nil
}
return err
}
nodesMap := map[string]*core.Node{}
for _, node := range nodes {
_, exists := nodesMap[node.GetName()]
if exists {
return errors.Newf("ArangoMember %s already exists in map, error received", node.GetName())
}
nodesMap[node.GetName()] = nodePointer(node)
}
inspector.nodes = &nodeLoader{
authenticated: true,
nodes: nodesMap,
}
return nil
}
}
func getNodes(ctx context.Context, k kubernetes.Interface, cont string) ([]core.Node, error) {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
nodes, err := k.CoreV1().Nodes().List(ctxChild, meta.ListOptions{
Limit: 128,
Continue: cont,
})
if err != nil {
return nil, err
}
if nodes.Continue != "" {
nextNodeLayer, err := getNodes(ctx, k, nodes.Continue)
if err != nil {
return nil, err
}
return append(nodes.Items, nextNodeLayer...), nil
}
return nodes.Items, nil
}

View file

@ -26,6 +26,9 @@ package inspector
import (
"context"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
@ -35,7 +38,7 @@ import (
"k8s.io/client-go/kubernetes"
)
func (i *inspector) IteratePodDisruptionBudgets(action poddisruptionbudget.PodDisruptionBudgetAction, filters ...poddisruptionbudget.PodDisruptionBudgetFilter) error {
func (i *inspector) IteratePodDisruptionBudgets(action poddisruptionbudget.Action, filters ...poddisruptionbudget.Filter) error {
for _, podDisruptionBudget := range i.PodDisruptionBudgets() {
if err := i.iteratePodDisruptionBudget(podDisruptionBudget, action, filters...); err != nil {
return err
@ -44,7 +47,7 @@ func (i *inspector) IteratePodDisruptionBudgets(action poddisruptionbudget.PodDi
return nil
}
func (i *inspector) iteratePodDisruptionBudget(podDisruptionBudget *policy.PodDisruptionBudget, action poddisruptionbudget.PodDisruptionBudgetAction, filters ...poddisruptionbudget.PodDisruptionBudgetFilter) error {
func (i *inspector) iteratePodDisruptionBudget(podDisruptionBudget *policy.PodDisruptionBudget, action poddisruptionbudget.Action, filters ...poddisruptionbudget.Filter) error {
for _, filter := range filters {
if !filter(podDisruptionBudget) {
return nil
@ -78,10 +81,30 @@ func (i *inspector) PodDisruptionBudget(name string) (*policy.PodDisruptionBudge
return podDisruptionBudget, true
}
func podDisruptionBudgetsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (map[string]*policy.PodDisruptionBudget, error) {
func (i *inspector) PodDisruptionBudgetReadInterface() poddisruptionbudget.ReadInterface {
return &podDisruptionBudgetReadInterface{i: i}
}
type podDisruptionBudgetReadInterface struct {
i *inspector
}
func (s podDisruptionBudgetReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*policy.PodDisruptionBudget, error) {
if s, ok := s.i.PodDisruptionBudget(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: policy.GroupName,
Resource: "poddisruptionbudgets",
}, name)
} else {
return s, nil
}
}
func podDisruptionBudgetsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error {
return func() error {
podDisruptionBudgets, err := getPodDisruptionBudgets(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
podDisruptionBudgetMap := map[string]*policy.PodDisruptionBudget{}
@ -89,13 +112,16 @@ func podDisruptionBudgetsToMap(ctx context.Context, k kubernetes.Interface, name
for _, podDisruptionBudget := range podDisruptionBudgets {
_, exists := podDisruptionBudgetMap[podDisruptionBudget.GetName()]
if exists {
return nil, errors.Newf("PodDisruptionBudget %s already exists in map, error received", podDisruptionBudget.GetName())
return errors.Newf("PodDisruptionBudget %s already exists in map, error received", podDisruptionBudget.GetName())
}
podDisruptionBudgetMap[podDisruptionBudget.GetName()] = podDisruptionBudgetPointer(podDisruptionBudget)
}
return podDisruptionBudgetMap, nil
inspector.podDisruptionBudgets = podDisruptionBudgetMap
return nil
}
}
func podDisruptionBudgetPointer(podDisruptionBudget policy.PodDisruptionBudget) *policy.PodDisruptionBudget {
@ -126,7 +152,7 @@ func getPodDisruptionBudgets(ctx context.Context, k kubernetes.Interface, namesp
return podDisruptionBudgets.Items, nil
}
func FilterPodDisruptionBudgetsByLabels(labels map[string]string) poddisruptionbudget.PodDisruptionBudgetFilter {
func FilterPodDisruptionBudgetsByLabels(labels map[string]string) poddisruptionbudget.Filter {
return func(podDisruptionBudget *policy.PodDisruptionBudget) bool {
for key, value := range labels {
v, ok := podDisruptionBudget.Labels[key]

View file

@ -26,6 +26,9 @@ package inspector
import (
"context"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
@ -77,10 +80,30 @@ func (i *inspector) Pod(name string) (*core.Pod, bool) {
return pod, true
}
func podsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (map[string]*core.Pod, error) {
func (i *inspector) PodReadInterface() pod.ReadInterface {
return &podReadInterface{i: i}
}
type podReadInterface struct {
i *inspector
}
func (s podReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Pod, error) {
if s, ok := s.i.Pod(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: core.GroupName,
Resource: "pods",
}, name)
} else {
return s, nil
}
}
func podsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error {
return func() error {
pods, err := getPods(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
podMap := map[string]*core.Pod{}
@ -88,13 +111,16 @@ func podsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (m
for _, pod := range pods {
_, exists := podMap[pod.GetName()]
if exists {
return nil, errors.Newf("Pod %s already exists in map, error received", pod.GetName())
return errors.Newf("Pod %s already exists in map, error received", pod.GetName())
}
podMap[pod.GetName()] = podPointer(pod)
}
return podMap, nil
inspector.pods = podMap
return nil
}
}
func podPointer(pod core.Pod) *core.Pod {

View file

@ -26,6 +26,9 @@ package inspector
import (
"context"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
@ -35,7 +38,7 @@ import (
"k8s.io/client-go/kubernetes"
)
func (i *inspector) IteratePersistentVolumeClaims(action persistentvolumeclaim.PersistentVolumeClaimAction, filters ...persistentvolumeclaim.PersistentVolumeClaimFilter) error {
func (i *inspector) IteratePersistentVolumeClaims(action persistentvolumeclaim.Action, filters ...persistentvolumeclaim.Filter) error {
for _, pvc := range i.PersistentVolumeClaims() {
if err := i.iteratePersistentVolumeClaim(pvc, action, filters...); err != nil {
return err
@ -44,7 +47,7 @@ func (i *inspector) IteratePersistentVolumeClaims(action persistentvolumeclaim.P
return nil
}
func (i *inspector) iteratePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, action persistentvolumeclaim.PersistentVolumeClaimAction, filters ...persistentvolumeclaim.PersistentVolumeClaimFilter) error {
func (i *inspector) iteratePersistentVolumeClaim(pvc *core.PersistentVolumeClaim, action persistentvolumeclaim.Action, filters ...persistentvolumeclaim.Filter) error {
for _, filter := range filters {
if !filter(pvc) {
return nil
@ -78,10 +81,30 @@ func (i *inspector) PersistentVolumeClaim(name string) (*core.PersistentVolumeCl
return pvc, true
}
func pvcsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (map[string]*core.PersistentVolumeClaim, error) {
func (i *inspector) PersistentVolumeClaimReadInterface() persistentvolumeclaim.ReadInterface {
return &persistentVolumeClaimReadInterface{i: i}
}
type persistentVolumeClaimReadInterface struct {
i *inspector
}
func (s persistentVolumeClaimReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.PersistentVolumeClaim, error) {
if s, ok := s.i.PersistentVolumeClaim(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: core.GroupName,
Resource: "persistentvolumeclaims",
}, name)
} else {
return s, nil
}
}
func pvcsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error {
return func() error {
pvcs, err := getPersistentVolumeClaims(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
pvcMap := map[string]*core.PersistentVolumeClaim{}
@ -89,13 +112,16 @@ func pvcsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (m
for _, pvc := range pvcs {
_, exists := pvcMap[pvc.GetName()]
if exists {
return nil, errors.Newf("PersistentVolumeClaim %s already exists in map, error received", pvc.GetName())
return errors.Newf("PersistentVolumeClaim %s already exists in map, error received", pvc.GetName())
}
pvcMap[pvc.GetName()] = pvcPointer(pvc)
}
return pvcMap, nil
inspector.pvcs = pvcMap
return nil
}
}
func pvcPointer(pvc core.PersistentVolumeClaim) *core.PersistentVolumeClaim {
@ -126,7 +152,7 @@ func getPersistentVolumeClaims(ctx context.Context, k kubernetes.Interface, name
return pvcs.Items, nil
}
func FilterPersistentVolumeClaimsByLabels(labels map[string]string) persistentvolumeclaim.PersistentVolumeClaimFilter {
func FilterPersistentVolumeClaimsByLabels(labels map[string]string) persistentvolumeclaim.Filter {
return func(pvc *core.PersistentVolumeClaim) bool {
for key, value := range labels {
v, ok := pvc.Labels[key]

View file

@ -26,6 +26,9 @@ package inspector
import (
"context"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
@ -78,10 +81,30 @@ func (i *inspector) ServiceAccount(name string) (*core.ServiceAccount, bool) {
return serviceAccount, true
}
func serviceAccountsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (map[string]*core.ServiceAccount, error) {
func (i *inspector) ServiceAccountReadInterface() serviceaccount.ReadInterface {
return &serviceAccountReadInterface{i: i}
}
type serviceAccountReadInterface struct {
i *inspector
}
func (s serviceAccountReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.ServiceAccount, error) {
if s, ok := s.i.ServiceAccount(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: core.GroupName,
Resource: "serviceaccounts",
}, name)
} else {
return s, nil
}
}
func serviceAccountsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error {
return func() error {
serviceAccounts, err := getServiceAccounts(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
serviceAccountMap := map[string]*core.ServiceAccount{}
@ -89,13 +112,16 @@ func serviceAccountsToMap(ctx context.Context, k kubernetes.Interface, namespace
for _, serviceAccount := range serviceAccounts {
_, exists := serviceAccountMap[serviceAccount.GetName()]
if exists {
return nil, errors.Newf("ServiceAccount %s already exists in map, error received", serviceAccount.GetName())
return errors.Newf("ServiceAccount %s already exists in map, error received", serviceAccount.GetName())
}
serviceAccountMap[serviceAccount.GetName()] = serviceAccountPointer(serviceAccount)
}
return serviceAccountMap, nil
inspector.serviceAccounts = serviceAccountMap
return nil
}
}
func serviceAccountPointer(serviceAccount core.ServiceAccount) *core.ServiceAccount {

View file

@ -92,17 +92,18 @@ func (s secretReadInterface) Get(ctx context.Context, name string, opts meta.Get
if s, ok := s.i.Secret(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: core.GroupName,
Resource: "Secret",
Resource: "secrets",
}, name)
} else {
return s, nil
}
}
func secretsToMap(ctx context.Context, k kubernetes.Interface, namespace string) (map[string]*core.Secret, error) {
func secretsToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error {
return func() error {
secrets, err := getSecrets(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
secretMap := map[string]*core.Secret{}
@ -110,13 +111,16 @@ func secretsToMap(ctx context.Context, k kubernetes.Interface, namespace string)
for _, secret := range secrets {
_, exists := secretMap[secret.GetName()]
if exists {
return nil, errors.Newf("Secret %s already exists in map, error received", secret.GetName())
return errors.Newf("Secret %s already exists in map, error received", secret.GetName())
}
secretMap[secret.GetName()] = secretPointer(secret)
}
return secretMap, nil
inspector.secrets = secretMap
return nil
}
}
func secretPointer(pod core.Secret) *core.Secret {

View file

@ -26,6 +26,9 @@ package inspector
import (
"context"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
@ -35,7 +38,7 @@ import (
"k8s.io/client-go/kubernetes"
)
func (i *inspector) IterateServices(action service.ServiceAction, filters ...service.ServiceFilter) error {
func (i *inspector) IterateServices(action service.Action, filters ...service.Filter) error {
for _, service := range i.Services() {
if err := i.iterateServices(service, action, filters...); err != nil {
return err
@ -44,7 +47,7 @@ func (i *inspector) IterateServices(action service.ServiceAction, filters ...ser
return nil
}
func (i *inspector) iterateServices(service *core.Service, action service.ServiceAction, filters ...service.ServiceFilter) error {
func (i *inspector) iterateServices(service *core.Service, action service.Action, filters ...service.Filter) error {
for _, filter := range filters {
if !filter(service) {
return nil
@ -66,6 +69,25 @@ func (i *inspector) Services() []*core.Service {
return r
}
func (i *inspector) ServiceReadInterface() service.ReadInterface {
return &serviceReadInterface{i: i}
}
type serviceReadInterface struct {
i *inspector
}
func (s serviceReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Service, error) {
if s, ok := s.i.Service(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: core.GroupName,
Resource: "services",
}, name)
} else {
return s, nil
}
}
func (i *inspector) Service(name string) (*core.Service, bool) {
i.lock.Lock()
defer i.lock.Unlock()
@ -78,10 +100,11 @@ func (i *inspector) Service(name string) (*core.Service, bool) {
return service, true
}
func servicesToMap(ctx context.Context, k kubernetes.Interface, namespace string) (map[string]*core.Service, error) {
func servicesToMap(ctx context.Context, inspector *inspector, k kubernetes.Interface, namespace string) func() error {
return func() error {
services, err := getServices(ctx, k, namespace, "")
if err != nil {
return nil, err
return err
}
serviceMap := map[string]*core.Service{}
@ -89,13 +112,16 @@ func servicesToMap(ctx context.Context, k kubernetes.Interface, namespace string
for _, service := range services {
_, exists := serviceMap[service.GetName()]
if exists {
return nil, errors.Newf("Service %s already exists in map, error received", service.GetName())
return errors.Newf("Service %s already exists in map, error received", service.GetName())
}
serviceMap[service.GetName()] = servicePointer(service)
}
return serviceMap, nil
inspector.services = serviceMap
return nil
}
}
func servicePointer(pod core.Service) *core.Service {

View file

@ -26,16 +26,20 @@ package inspector
import (
"context"
apiErrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/runtime/schema"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/servicemonitor"
monitoringGroup "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring"
monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
monitoringClient "github.com/prometheus-operator/prometheus-operator/pkg/client/versioned/typed/monitoring/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (i *inspector) IterateServiceMonitors(action servicemonitor.ServiceMonitorAction, filters ...servicemonitor.ServiceMonitorFilter) error {
func (i *inspector) IterateServiceMonitors(action servicemonitor.Action, filters ...servicemonitor.Filter) error {
for _, serviceMonitor := range i.ServiceMonitors() {
if err := i.iterateServiceMonitor(serviceMonitor, action, filters...); err != nil {
return err
@ -44,7 +48,7 @@ func (i *inspector) IterateServiceMonitors(action servicemonitor.ServiceMonitorA
return nil
}
func (i *inspector) iterateServiceMonitor(serviceMonitor *monitoring.ServiceMonitor, action servicemonitor.ServiceMonitorAction, filters ...servicemonitor.ServiceMonitorFilter) error {
func (i *inspector) iterateServiceMonitor(serviceMonitor *monitoring.ServiceMonitor, action servicemonitor.Action, filters ...servicemonitor.Filter) error {
for _, filter := range filters {
if !filter(serviceMonitor) {
return nil
@ -78,7 +82,27 @@ func (i *inspector) ServiceMonitor(name string) (*monitoring.ServiceMonitor, boo
return serviceMonitor, true
}
func serviceMonitorsToMap(ctx context.Context, m monitoringClient.MonitoringV1Interface, namespace string) (map[string]*monitoring.ServiceMonitor, error) {
func (i *inspector) ServiceMonitorReadInterface() servicemonitor.ReadInterface {
return &serviceMonitorReadInterface{i: i}
}
type serviceMonitorReadInterface struct {
i *inspector
}
func (s serviceMonitorReadInterface) Get(ctx context.Context, name string, opts meta.GetOptions) (*monitoring.ServiceMonitor, error) {
if s, ok := s.i.ServiceMonitor(name); !ok {
return nil, apiErrors.NewNotFound(schema.GroupResource{
Group: monitoringGroup.GroupName,
Resource: "servicemonitors",
}, name)
} else {
return s, nil
}
}
func serviceMonitorsToMap(ctx context.Context, inspector *inspector, m monitoringClient.MonitoringV1Interface, namespace string) func() error {
return func() error {
serviceMonitors := getServiceMonitors(ctx, m, namespace, "")
serviceMonitorMap := map[string]*monitoring.ServiceMonitor{}
@ -86,13 +110,16 @@ func serviceMonitorsToMap(ctx context.Context, m monitoringClient.MonitoringV1In
for _, serviceMonitor := range serviceMonitors {
_, exists := serviceMonitorMap[serviceMonitor.GetName()]
if exists {
return nil, errors.Newf("ServiceMonitor %s already exists in map, error received", serviceMonitor.GetName())
return errors.Newf("ServiceMonitor %s already exists in map, error received", serviceMonitor.GetName())
}
serviceMonitorMap[serviceMonitor.GetName()] = serviceMonitor
}
return serviceMonitorMap, nil
inspector.serviceMonitors = serviceMonitorMap
return nil
}
}
func getServiceMonitors(ctx context.Context, m monitoringClient.MonitoringV1Interface, namespace, cont string) []*monitoring.ServiceMonitor {
@ -110,7 +137,7 @@ func getServiceMonitors(ctx context.Context, m monitoringClient.MonitoringV1Inte
return serviceMonitors.Items
}
func FilterServiceMonitorsByLabels(labels map[string]string) servicemonitor.ServiceMonitorFilter {
func FilterServiceMonitorsByLabels(labels map[string]string) servicemonitor.Filter {
return func(serviceMonitor *monitoring.ServiceMonitor) bool {
for key, value := range labels {
v, ok := serviceMonitor.Labels[key]

View file

@ -75,7 +75,7 @@ func (r *Resources) EnsureSecretLabels(ctx context.Context, cachedStatus inspect
if err := cachedStatus.IterateSecrets(func(secret *core.Secret) error {
if ensureLabelsMap(secret.Kind, secret, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetKubeCli().CoreV1().Secrets(r.context.GetAPIObject().GetNamespace()).Patch(ctxChild,
_, err := r.context.SecretsModInterface().Patch(ctxChild,
name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
@ -102,8 +102,7 @@ func (r *Resources) EnsureServiceAccountsLabels(ctx context.Context, cachedStatu
if err := cachedStatus.IterateServiceAccounts(func(serviceAccount *core.ServiceAccount) error {
if ensureLabelsMap(serviceAccount.Kind, serviceAccount, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetKubeCli().CoreV1().ServiceAccounts(r.context.GetAPIObject().GetNamespace()).
Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
_, err := r.context.ServiceAccountsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
}) {
@ -129,8 +128,7 @@ func (r *Resources) EnsureServicesLabels(ctx context.Context, cachedStatus inspe
if err := cachedStatus.IterateServices(func(service *core.Service) error {
if ensureLabelsMap(service.Kind, service, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetKubeCli().CoreV1().Services(r.context.GetAPIObject().GetNamespace()).Patch(ctxChild,
name, types.JSONPatchType, d, meta.PatchOptions{})
_, err := r.context.ServicesModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
}) {
@ -156,8 +154,7 @@ func (r *Resources) EnsureServiceMonitorsLabels(ctx context.Context, cachedStatu
if err := cachedStatus.IterateServiceMonitors(func(serviceMonitor *monitoring.ServiceMonitor) error {
if ensureLabelsMap(serviceMonitor.Kind, serviceMonitor, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetMonitoringV1Cli().ServiceMonitors(r.context.GetAPIObject().GetNamespace()).
Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
_, err := r.context.ServiceMonitorsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
}) {
@ -183,8 +180,7 @@ func (r *Resources) EnsurePodsLabels(ctx context.Context, cachedStatus inspector
if err := cachedStatus.IteratePods(func(pod *core.Pod) error {
if ensureGroupLabelsMap(pod.Kind, pod, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetKubeCli().CoreV1().Pods(r.context.GetAPIObject().GetNamespace()).Patch(ctxChild,
name, types.JSONPatchType, d, meta.PatchOptions{})
_, err := r.context.PodsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
}) {
@ -210,8 +206,7 @@ func (r *Resources) EnsurePersistentVolumeClaimsLabels(ctx context.Context, cach
if err := cachedStatus.IteratePersistentVolumeClaims(func(persistentVolumeClaim *core.PersistentVolumeClaim) error {
if ensureGroupLabelsMap(persistentVolumeClaim.Kind, persistentVolumeClaim, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetKubeCli().CoreV1().PersistentVolumeClaims(r.context.GetAPIObject().GetNamespace()).
Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
_, err := r.context.PersistentVolumeClaimsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
}) {
@ -237,8 +232,7 @@ func (r *Resources) EnsurePodDisruptionBudgetsLabels(ctx context.Context, cached
if err := cachedStatus.IteratePodDisruptionBudgets(func(budget *policy.PodDisruptionBudget) error {
if ensureLabelsMap(budget.Kind, budget, r.context.GetSpec(), func(name string, d []byte) error {
return k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetKubeCli().PolicyV1beta1().PodDisruptionBudgets(r.context.GetAPIObject().
GetNamespace()).Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
_, err := r.context.PodDisruptionBudgetsModInterface().Patch(ctxChild, name, types.JSONPatchType, d, meta.PatchOptions{})
return err
})
}) {

View file

@ -173,7 +173,7 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec
s, _ := r.context.GetStatus()
obj := r.context.GetAPIObject()
reconcileRequired := k8sutil.NewReconcile()
reconcileRequired := k8sutil.NewReconcile(cachedStatus)
if err := s.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
for _, member := range list {
@ -197,7 +197,7 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec
}
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetArangoCli().DatabaseV1().ArangoMembers(obj.GetNamespace()).Create(ctxChild, &a, metav1.CreateOptions{})
_, err := r.context.ArangoMembersModInterface().Create(ctxChild, &a, metav1.CreateOptions{})
return err
})
if err != nil {
@ -222,7 +222,7 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec
if changed {
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := r.context.GetArangoCli().DatabaseV1().ArangoMembers(obj.GetNamespace()).Update(ctxChild, m, metav1.UpdateOptions{})
_, err := r.context.ArangoMembersModInterface().Update(ctxChild, m, metav1.UpdateOptions{})
return err
})
if err != nil {
@ -240,7 +240,7 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec
return err
}
if err := reconcileRequired.Reconcile(); err != nil {
if err := reconcileRequired.Reconcile(ctx); err != nil {
return err
}
@ -251,7 +251,7 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec
// Remove member
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return r.context.GetArangoCli().DatabaseV1().ArangoMembers(obj.GetNamespace()).Delete(ctxChild, member.GetName(), metav1.DeleteOptions{})
return r.context.ArangoMembersModInterface().Delete(ctxChild, member.GetName(), metav1.DeleteOptions{})
})
if err != nil {
if !k8sutil.IsNotFound(err) {
@ -267,7 +267,7 @@ func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspec
return err
}
if err := reconcileRequired.Reconcile(); err != nil {
if err := reconcileRequired.Reconcile(ctx); err != nil {
return err
}

View file

@ -109,14 +109,13 @@ func newPDB(minAvail int, deplname string, group api.ServerGroup, owner metav1.O
func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup, wantedMinAvail int) error {
deplname := r.context.GetAPIObject().GetName()
pdbname := PDBNameForGroup(deplname, group)
pdbcli := r.context.GetKubeCli().PolicyV1beta1().PodDisruptionBudgets(r.context.GetNamespace())
log := r.log.With().Str("group", group.AsRole()).Logger()
for {
var pdb *policyv1beta1.PodDisruptionBudget
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
var err error
pdb, err = pdbcli.Get(ctxChild, pdbname, metav1.GetOptions{})
pdb, err = r.context.GetCachedStatus().PodDisruptionBudgetReadInterface().Get(ctxChild, pdbname, metav1.GetOptions{})
return err
})
if k8sutil.IsNotFound(err) {
@ -125,7 +124,7 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup
pdb := newPDB(wantedMinAvail, deplname, group, r.context.GetAPIObject().AsOwner())
log.Debug().Msg("Creating new PDB")
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := pdbcli.Create(ctxChild, pdb, metav1.CreateOptions{})
_, err := r.context.PodDisruptionBudgetsModInterface().Create(ctxChild, pdb, metav1.CreateOptions{})
return err
})
if err != nil {
@ -150,7 +149,7 @@ func (r *Resources) ensurePDBForGroup(ctx context.Context, group api.ServerGroup
if pdb.GetDeletionTimestamp() == nil {
// Update the PDB
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return pdbcli.Delete(ctxChild, pdbname, metav1.DeleteOptions{})
return r.context.PodDisruptionBudgetsModInterface().Delete(ctxChild, pdbname, metav1.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Error().Err(err).Msg("PDB deletion failed")

View file

@ -33,22 +33,28 @@ import (
"sync"
"time"
podMod "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/kubernetes"
"github.com/arangodb/go-driver"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
"github.com/arangodb/kube-arangodb/pkg/util"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/interfaces"
"github.com/arangodb/go-driver"
"k8s.io/apimachinery/pkg/types"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
)
func versionHasAdvertisedEndpoint(v driver.Version) bool {
@ -343,10 +349,6 @@ func (r *Resources) RenderPodForMember(ctx context.Context, cachedStatus inspect
}
groupSpec := spec.GetServerGroupSpec(group)
kubecli := r.context.GetKubeCli()
ns := r.context.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
memberName := m.ArangoMemberName(r.context.GetAPIObject().GetName(), group)
member, ok := cachedStatus.ArangoMember(memberName)
@ -411,7 +413,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, cachedStatus inspect
masterJWTSecretName = spec.Sync.Authentication.GetJWTSecretName()
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return k8sutil.ValidateTokenSecret(ctxChild, secrets, masterJWTSecretName)
return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.SecretReadInterface(), masterJWTSecretName)
})
if err != nil {
return nil, errors.WithStack(errors.Wrapf(err, "Master JWT secret validation failed"))
@ -419,7 +421,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, cachedStatus inspect
monitoringTokenSecretName := spec.Sync.Monitoring.GetTokenSecretName()
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return k8sutil.ValidateTokenSecret(ctxChild, secrets, monitoringTokenSecretName)
return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.SecretReadInterface(), monitoringTokenSecretName)
})
if err != nil {
return nil, errors.WithStack(errors.Wrapf(err, "Monitoring token secret validation failed"))
@ -432,7 +434,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, cachedStatus inspect
if spec.IsAuthenticated() {
clusterJWTSecretName = spec.Authentication.GetJWTSecretName()
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return k8sutil.ValidateTokenSecret(ctxChild, secrets, clusterJWTSecretName)
return k8sutil.ValidateTokenSecret(ctxChild, cachedStatus.SecretReadInterface(), clusterJWTSecretName)
})
if err != nil {
return nil, errors.WithStack(errors.Wrapf(err, "Cluster JWT secret validation failed"))
@ -441,7 +443,7 @@ func (r *Resources) RenderPodForMember(ctx context.Context, cachedStatus inspect
// Check client-auth CA certificate secret
clientAuthCASecretName = spec.Sync.Authentication.GetClientCASecretName()
err = k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return k8sutil.ValidateCACertificateSecret(ctxChild, secrets, clientAuthCASecretName)
return k8sutil.ValidateCACertificateSecret(ctxChild, cachedStatus.SecretReadInterface(), clientAuthCASecretName)
})
if err != nil {
return nil, errors.WithStack(errors.Wrapf(err, "Client authentication CA certificate secret validation failed"))
@ -510,7 +512,7 @@ func (r *Resources) SelectImageForMember(spec api.DeploymentSpec, status api.Dep
}
// createPodForMember creates all Pods listed in member status
func (r *Resources) createPodForMember(ctx context.Context, spec api.DeploymentSpec, member *api.ArangoMember, memberID string, imageNotFoundOnce *sync.Once) error {
func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, member *api.ArangoMember, memberID string, imageNotFoundOnce *sync.Once) error {
log := r.log
status, lastVersion := r.context.GetStatus()
@ -545,11 +547,8 @@ func (r *Resources) createPodForMember(ctx context.Context, spec api.DeploymentS
imageInfo = *m.Image
kubecli := r.context.GetKubeCli()
apiObject := r.context.GetAPIObject()
ns := r.context.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
if !found {
return errors.WithStack(errors.Newf("Member '%s' not found", memberID))
}
@ -570,7 +569,7 @@ func (r *Resources) createPodForMember(ctx context.Context, spec api.DeploymentS
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
podName, uid, err := CreateArangoPod(ctxChild, kubecli, apiObject, spec, group, CreatePodFromTemplate(template.PodSpec))
podName, uid, err := CreateArangoPod(ctxChild, r.context.PodsModInterface(), apiObject, spec, group, CreatePodFromTemplate(template.PodSpec))
if err != nil {
return errors.WithStack(err)
}
@ -598,19 +597,25 @@ func (r *Resources) createPodForMember(ctx context.Context, spec api.DeploymentS
if group == api.ServerGroupSyncMasters {
// Create TLS secret
tlsKeyfileSecretName := k8sutil.CreateTLSKeyfileSecretName(apiObject.GetName(), role, m.ID)
serverNames := []string{
names, err := tls.GetAltNames(spec.Sync.TLS)
if err != nil {
return errors.WithStack(errors.Wrapf(err, "Failed to render alt names"))
}
names.AltNames = append(names.AltNames,
k8sutil.CreateSyncMasterClientServiceName(apiObject.GetName()),
k8sutil.CreateSyncMasterClientServiceDNSNameWithDomain(apiObject, spec.ClusterDomain),
k8sutil.CreatePodDNSNameWithDomain(apiObject, spec.ClusterDomain, role, m.ID),
}
)
masterEndpoint := spec.Sync.ExternalAccess.ResolveMasterEndpoint(k8sutil.CreateSyncMasterClientServiceDNSNameWithDomain(apiObject, spec.ClusterDomain), k8sutil.ArangoSyncMasterPort)
for _, ep := range masterEndpoint {
if u, err := url.Parse(ep); err == nil {
serverNames = append(serverNames, u.Hostname())
names.AltNames = append(names.AltNames, u.Hostname())
}
}
owner := apiObject.AsOwner()
_, err := createTLSServerCertificate(ctx, log, secrets, serverNames, spec.Sync.TLS, tlsKeyfileSecretName, &owner)
_, err = createTLSServerCertificate(ctx, log, cachedStatus, r.context.SecretsModInterface(), names, spec.Sync.TLS, tlsKeyfileSecretName, &owner)
if err != nil && !k8sutil.IsAlreadyExists(err) {
return errors.WithStack(errors.Wrapf(err, "Failed to create TLS keyfile secret"))
}
@ -618,7 +623,7 @@ func (r *Resources) createPodForMember(ctx context.Context, spec api.DeploymentS
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
podName, uid, err := CreateArangoPod(ctxChild, kubecli, apiObject, spec, group, CreatePodFromTemplate(template.PodSpec))
podName, uid, err := CreateArangoPod(ctxChild, r.context.PodsModInterface(), apiObject, spec, group, CreatePodFromTemplate(template.PodSpec))
if err != nil {
return errors.WithStack(err)
}
@ -713,9 +718,9 @@ func RenderArangoPod(cachedStatus inspectorInterface.Inspector, deployment k8sut
// CreateArangoPod creates a new Pod with container provided by parameter 'containerCreator'
// If the pod already exists, nil is returned.
// If another error occurs, that error is returned.
func CreateArangoPod(ctx context.Context, kubecli kubernetes.Interface, deployment k8sutil.APIObject,
func CreateArangoPod(ctx context.Context, c podMod.ModInterface, deployment k8sutil.APIObject,
deploymentSpec api.DeploymentSpec, group api.ServerGroup, pod *core.Pod) (string, types.UID, error) {
podName, uid, err := k8sutil.CreatePod(ctx, kubecli, pod, deployment.GetNamespace(), deployment.AsOwner())
podName, uid, err := k8sutil.CreatePod(ctx, c, pod, deployment.GetNamespace(), deployment.AsOwner())
if err != nil {
return "", "", errors.WithStack(err)
}
@ -774,7 +779,7 @@ func (r *Resources) EnsurePods(ctx context.Context, cachedStatus inspectorInterf
r.log.Warn().Msgf("Ensuring pod")
spec := r.context.GetSpec()
if err := r.createPodForMember(ctx, spec, member, m.ID, imageNotFoundOnce); err != nil {
if err := r.createPodForMember(ctx, cachedStatus, spec, member, m.ID, imageNotFoundOnce); err != nil {
r.log.Warn().Err(err).Msgf("Ensuring pod failed")
return errors.WithStack(err)
}

View file

@ -84,7 +84,7 @@ func (i inspectorMockStruct) AddService(t *testing.T, svc ...*core.Service) insp
}
func (i inspectorMockStruct) Get(t *testing.T) inspectorInterface.Inspector {
return inspector.NewInspectorFromData(nil, nil, nil, i.services, nil, nil, nil, nil)
return inspector.NewInspectorFromData(nil, nil, nil, i.services, nil, nil, nil, nil, nil)
}
// TestCreateArangodArgsAgent tests createArangodArgs for agent.

View file

@ -107,8 +107,7 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *v1.Pod, memberStatu
}
// Remove finalizers (if needed)
if len(removalList) > 0 {
kubecli := r.context.GetKubeCli()
if err := k8sutil.RemovePodFinalizers(ctx, log, kubecli, p, removalList, false); err != nil {
if err := k8sutil.RemovePodFinalizers(ctx, r.context.GetCachedStatus(), log, r.context.PodsModInterface(), p, removalList, false); err != nil {
log.Debug().Err(err).Msg("Failed to update pod (to remove finalizers)")
return 0, errors.WithStack(err)
}
@ -137,9 +136,8 @@ func (r *Resources) inspectFinalizerPodAgencyServing(ctx context.Context, log ze
// Remaining agents are healthy, if we need to perform complete recovery
// of the agent, also remove the PVC
if memberStatus.Conditions.IsTrue(api.ConditionTypeAgentRecoveryNeeded) {
pvcs := r.context.GetKubeCli().CoreV1().PersistentVolumeClaims(r.context.GetNamespace())
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return pvcs.Delete(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.DeleteOptions{})
return r.context.PersistentVolumeClaimsModInterface().Delete(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Warn().Err(err).Msg("Failed to delete PVC for member")
@ -167,9 +165,8 @@ func (r *Resources) inspectFinalizerPodDrainDBServer(ctx context.Context, log ze
// If this DBServer is cleaned out, we need to remove the PVC.
if memberStatus.Conditions.IsTrue(api.ConditionTypeCleanedOut) || memberStatus.Phase == api.MemberPhaseDrain {
pvcs := r.context.GetKubeCli().CoreV1().PersistentVolumeClaims(r.context.GetNamespace())
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return pvcs.Delete(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.DeleteOptions{})
return r.context.PersistentVolumeClaimsModInterface().Delete(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Warn().Err(err).Msg("Failed to delete PVC for member")

View file

@ -85,8 +85,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
// Strange, pod belongs to us, but we have no member for it.
// Remove all finalizers, so it can be removed.
log.Warn().Msg("Pod belongs to this deployment, but we don't know the member. Removing all finalizers")
kubecli := r.context.GetKubeCli()
err := k8sutil.RemovePodFinalizers(ctx, log, kubecli, pod, pod.GetFinalizers(), false)
err := k8sutil.RemovePodFinalizers(ctx, r.context.GetCachedStatus(), log, r.context.PodsModInterface(), pod, pod.GetFinalizers(), false)
if err != nil {
log.Debug().Err(err).Msg("Failed to update pod (to remove all finalizers)")
return errors.WithStack(err)

View file

@ -58,25 +58,21 @@ func (r *Resources) prepareAgencyPodTermination(ctx context.Context, log zerolog
// Check node the pod is scheduled on. Only if not in namespaced scope
agentDataWillBeGone := false
if nodes, ok := r.context.GetCachedStatus().GetNodes(); ok {
if !r.context.GetScope().IsNamespaced() && p.Spec.NodeName != "" {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
node, err := r.context.GetKubeCli().CoreV1().Nodes().Get(ctxChild, p.Spec.NodeName, metav1.GetOptions{})
if k8sutil.IsNotFound(err) {
node, ok := nodes.Node(p.Spec.NodeName)
if !ok {
log.Warn().Msg("Node not found")
} else if err != nil {
log.Warn().Err(err).Msg("Failed to get node for member")
return errors.WithStack(err)
} else if node.Spec.Unschedulable {
agentDataWillBeGone = true
}
}
}
// Check PVC
pvcs := r.context.GetKubeCli().CoreV1().PersistentVolumeClaims(apiObject.GetNamespace())
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
pvc, err := pvcs.Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{})
pvc, err := r.context.GetCachedStatus().PersistentVolumeClaimReadInterface().Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{})
if err != nil {
log.Warn().Err(err).Msg("Failed to get PVC for member")
return errors.WithStack(err)
@ -157,15 +153,10 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol
// Check node the pod is scheduled on
dbserverDataWillBeGone := false
if !r.context.GetScope().IsNamespaced() && p.Spec.NodeName != "" {
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
node, err := r.context.GetKubeCli().CoreV1().Nodes().Get(ctxChild, p.Spec.NodeName, metav1.GetOptions{})
if k8sutil.IsNotFound(err) {
if nodes, ok := r.context.GetCachedStatus().GetNodes(); ok {
node, ok := nodes.Node(p.Spec.NodeName)
if !ok {
log.Warn().Msg("Node not found")
} else if err != nil {
log.Warn().Err(err).Msg("Failed to get node for member")
return errors.WithStack(err)
} else if node.Spec.Unschedulable {
if !r.context.GetSpec().IsNetworkAttachedVolumes() || !resignJobAvailable {
dbserverDataWillBeGone = true
@ -174,10 +165,9 @@ func (r *Resources) prepareDBServerPodTermination(ctx context.Context, log zerol
}
// Check PVC
pvcs := r.context.GetKubeCli().CoreV1().PersistentVolumeClaims(apiObject.GetNamespace())
ctxChild, cancel := context.WithTimeout(ctx, k8sutil.GetRequestTimeout())
defer cancel()
pvc, err := pvcs.Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{})
pvc, err := r.context.GetCachedStatus().PersistentVolumeClaimReadInterface().Get(ctxChild, memberStatus.PersistentVolumeClaimName, metav1.GetOptions{})
if err != nil {
log.Warn().Err(err).Msg("Failed to get PVC for member")
return errors.WithStack(err)

View file

@ -61,8 +61,7 @@ func (r *Resources) runPVCFinalizers(ctx context.Context, p *v1.PersistentVolume
}
// Remove finalizers (if needed)
if len(removalList) > 0 {
kubecli := r.context.GetKubeCli()
err := k8sutil.RemovePVCFinalizers(ctx, log, kubecli, p, removalList, false)
err := k8sutil.RemovePVCFinalizers(ctx, r.context.GetCachedStatus(), log, r.context.PersistentVolumeClaimsModInterface(), p, removalList, false)
if err != nil {
log.Debug().Err(err).Msg("Failed to update PVC (to remove finalizers)")
return 0, errors.WithStack(err)
@ -107,9 +106,8 @@ func (r *Resources) inspectFinalizerPVCMemberExists(ctx context.Context, log zer
// Member still exists, let's trigger a delete of it
if memberStatus.PodName != "" {
log.Info().Msg("Removing Pod of member, because PVC is being removed")
pods := r.context.GetKubeCli().CoreV1().Pods(apiObject.GetNamespace())
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return pods.Delete(ctxChild, memberStatus.PodName, metav1.DeleteOptions{})
return r.context.PodsModInterface().Delete(ctxChild, memberStatus.PodName, metav1.DeleteOptions{})
})
if err != nil && !k8sutil.IsNotFound(err) {
log.Debug().Err(err).Msg("Failed to delete pod")

View file

@ -70,8 +70,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter
// Strange, pvc belongs to us, but we have no member for it.
// Remove all finalizers, so it can be removed.
log.Warn().Msg("PVC belongs to this deployment, but we don't know the member. Removing all finalizers")
kubecli := r.context.GetKubeCli()
err := k8sutil.RemovePVCFinalizers(ctx, log, kubecli, pvc, pvc.GetFinalizers(), false)
err := k8sutil.RemovePVCFinalizers(ctx, r.context.GetCachedStatus(), log, r.context.PersistentVolumeClaimsModInterface(), pvc, pvc.GetFinalizers(), false)
if err != nil {
log.Debug().Err(err).Msg("Failed to update PVC (to remove all finalizers)")
return errors.WithStack(err)

View file

@ -40,7 +40,6 @@ func (r *Resources) createPVCFinalizers(group api.ServerGroup) []string {
// EnsurePVCs creates all PVC's listed in member status
func (r *Resources) EnsurePVCs(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
kubecli := r.context.GetKubeCli()
apiObject := r.context.GetAPIObject()
deploymentName := apiObject.GetName()
ns := apiObject.GetNamespace()
@ -48,7 +47,6 @@ func (r *Resources) EnsurePVCs(ctx context.Context, cachedStatus inspectorInterf
iterator := r.context.GetServerGroupIterator()
status, _ := r.context.GetStatus()
enforceAntiAffinity := r.context.GetSpec().GetEnvironment().IsProduction()
pvcs := kubecli.CoreV1().PersistentVolumeClaims(apiObject.GetNamespace())
if err := iterator.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
for _, m := range *status {
@ -66,7 +64,7 @@ func (r *Resources) EnsurePVCs(ctx context.Context, cachedStatus inspectorInterf
vct := spec.VolumeClaimTemplate
finalizers := r.createPVCFinalizers(group)
err := k8sutil.RunWithTimeout(ctx, func(ctxChild context.Context) error {
return k8sutil.CreatePersistentVolumeClaim(ctxChild, pvcs, m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, enforceAntiAffinity, resources, vct, finalizers, owner)
return k8sutil.CreatePersistentVolumeClaim(ctxChild, r.context.PersistentVolumeClaimsModInterface(), m.PersistentVolumeClaimName, deploymentName, ns, storageClassName, role, enforceAntiAffinity, resources, vct, finalizers, owner)
})
if err != nil {
return errors.WithStack(err)

View file

@ -31,6 +31,10 @@ import (
"fmt"
"time"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/tls"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
@ -76,9 +80,7 @@ func GetCASecretName(apiObject k8sutil.APIObject) string {
func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cachedStatus inspectorInterface.Inspector) error {
start := time.Now()
spec := r.context.GetSpec()
kubecli := r.context.GetKubeCli()
ns := r.context.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
secrets := r.context.SecretsModInterface()
status, _ := r.context.GetStatus()
apiObject := r.context.GetAPIObject()
deploymentName := apiObject.GetName()
@ -87,14 +89,28 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache
defer metrics.SetDuration(inspectSecretsDurationGauges.WithLabelValues(deploymentName), start)
counterMetric := inspectedSecretsCounters.WithLabelValues(deploymentName)
reconcileRequired := k8sutil.NewReconcile()
members := status.Members.AsList()
reconcileRequired := k8sutil.NewReconcile(cachedStatus)
if spec.IsAuthenticated() {
counterMetric.Inc()
if err := reconcileRequired.WithError(r.ensureTokenSecret(ctx, cachedStatus, secrets, spec.Authentication.GetJWTSecretName())); err != nil {
return errors.WithStack(err)
}
}
if spec.IsSecure() {
counterMetric.Inc()
if err := reconcileRequired.WithError(r.ensureTLSCACertificateSecret(ctx, cachedStatus, secrets, spec.TLS)); err != nil {
return errors.WithStack(err)
}
}
if err := reconcileRequired.Reconcile(ctx); err != nil {
return err
}
if spec.IsAuthenticated() {
if imageFound {
if pod.VersionHasJWTSecretKeyfolder(image.ArangoDBVersion, image.Enterprise) {
if err := r.ensureTokenSecretFolder(ctx, cachedStatus, secrets, spec.Authentication.GetJWTSecretName(), pod.JWTSecretFolder(deploymentName)); err != nil {
@ -116,24 +132,16 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache
}
}
if spec.IsSecure() {
counterMetric.Inc()
if err := reconcileRequired.WithError(r.ensureTLSCACertificateSecret(ctx, cachedStatus, secrets, spec.TLS)); err != nil {
return errors.WithStack(err)
}
if err := reconcileRequired.WithError(r.ensureSecretWithEmptyKey(ctx, cachedStatus, secrets, GetCASecretName(r.context.GetAPIObject()), "empty")); err != nil {
return errors.WithStack(err)
}
if err := status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
if !group.IsArangod() {
if err := reconcileRequired.ParallelAll(len(members), func(id int) error {
if !members[id].Group.IsArangod() {
return nil
}
role := group.AsRole()
for _, m := range list {
memberName := m.ArangoMemberName(r.context.GetAPIObject().GetName(), group)
memberName := members[id].Member.ArangoMemberName(r.context.GetAPIObject().GetName(), members[id].Group)
member, ok := cachedStatus.ArangoMember(memberName)
if !ok {
@ -147,32 +155,17 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache
tlsKeyfileSecretName := k8sutil.AppendTLSKeyfileSecretPostfix(member.GetName())
if _, exists := cachedStatus.Secret(tlsKeyfileSecretName); !exists {
serverNames := []string{
k8sutil.CreateDatabaseClientServiceDNSName(apiObject),
k8sutil.CreatePodDNSName(apiObject, role, m.ID),
k8sutil.CreateServiceDNSName(service),
service.Spec.ClusterIP,
service.GetName(),
}
if spec.ClusterDomain != nil {
serverNames = append(serverNames,
k8sutil.CreateDatabaseClientServiceDNSNameWithDomain(apiObject, spec.ClusterDomain),
k8sutil.CreatePodDNSNameWithDomain(apiObject, spec.ClusterDomain, role, m.ID),
k8sutil.CreateServiceDNSNameWithDomain(service, spec.ClusterDomain))
}
if ip := spec.ExternalAccess.GetLoadBalancerIP(); ip != "" {
serverNames = append(serverNames, ip)
serverNames, err := tls.GetServerAltNames(apiObject, spec, spec.TLS, service, members[id].Group, members[id].Member)
if err != nil {
return errors.WithStack(errors.Wrapf(err, "Failed to render alt names"))
}
owner := member.AsOwner()
if created, err := createTLSServerCertificate(ctx, log, secrets, serverNames, spec.TLS, tlsKeyfileSecretName, &owner); err != nil && !k8sutil.IsAlreadyExists(err) {
if created, err := createTLSServerCertificate(ctx, log, cachedStatus, secrets, serverNames, spec.TLS, tlsKeyfileSecretName, &owner); err != nil && !k8sutil.IsAlreadyExists(err) {
return errors.WithStack(errors.Wrapf(err, "Failed to create TLS keyfile secret"))
} else if created {
reconcileRequired.Required()
}
}
}
return nil
}); err != nil {
return errors.WithStack(err)
@ -203,10 +196,10 @@ func (r *Resources) EnsureSecrets(ctx context.Context, log zerolog.Logger, cache
return errors.WithStack(err)
}
}
return reconcileRequired.Reconcile()
return reconcileRequired.Reconcile(ctx)
}
func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, secretName, folderSecretName string) error {
func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, secretName, folderSecretName string) error {
if f, exists := cachedStatus.Secret(folderSecretName); exists {
if len(f.Data) == 0 {
s, exists := cachedStatus.Secret(secretName)
@ -304,7 +297,7 @@ func (r *Resources) ensureTokenSecretFolder(ctx context.Context, cachedStatus in
return nil
}
func (r *Resources) ensureTokenSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, secretName string) error {
func (r *Resources) ensureTokenSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, secretName string) error {
if _, exists := cachedStatus.Secret(secretName); !exists {
return r.createTokenSecret(ctx, secrets, secretName)
}
@ -312,7 +305,7 @@ func (r *Resources) ensureTokenSecret(ctx context.Context, cachedStatus inspecto
return nil
}
func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, secretName, keyName string) error {
func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, secretName, keyName string) error {
if _, exists := cachedStatus.Secret(secretName); !exists {
return r.createSecretWithKey(ctx, secrets, secretName, keyName, nil)
}
@ -320,7 +313,7 @@ func (r *Resources) ensureSecretWithEmptyKey(ctx context.Context, cachedStatus i
return nil
}
func (r *Resources) createSecretWithMod(ctx context.Context, secrets k8sutil.SecretInterface, secretName string, f func(s *core.Secret)) error {
func (r *Resources) createSecretWithMod(ctx context.Context, secrets secret.ModInterface, secretName string, f func(s *core.Secret)) error {
// Create secret
secret := &core.Secret{
ObjectMeta: meta.ObjectMeta{
@ -346,13 +339,13 @@ func (r *Resources) createSecretWithMod(ctx context.Context, secrets k8sutil.Sec
return operatorErrors.Reconcile()
}
func (r *Resources) createSecretWithKey(ctx context.Context, secrets k8sutil.SecretInterface, secretName, keyName string, value []byte) error {
func (r *Resources) createSecretWithKey(ctx context.Context, secrets secret.ModInterface, secretName, keyName string, value []byte) error {
return r.createSecretWithMod(ctx, secrets, secretName, func(s *core.Secret) {
s.Data[keyName] = value
})
}
func (r *Resources) createTokenSecret(ctx context.Context, secrets k8sutil.SecretInterface, secretName string) error {
func (r *Resources) createTokenSecret(ctx context.Context, secrets secret.ModInterface, secretName string) error {
tokenData := make([]byte, 32)
rand.Read(tokenData)
token := hex.EncodeToString(tokenData)
@ -373,7 +366,7 @@ func (r *Resources) createTokenSecret(ctx context.Context, secrets k8sutil.Secre
return operatorErrors.Reconcile()
}
func (r *Resources) ensureEncryptionKeyfolderSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, keyfileSecretName, secretName string) error {
func (r *Resources) ensureEncryptionKeyfolderSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, keyfileSecretName, secretName string) error {
_, folderExists := cachedStatus.Secret(secretName)
keyfile, exists := cachedStatus.Secret(keyfileSecretName)
@ -410,7 +403,7 @@ func (r *Resources) ensureEncryptionKeyfolderSecret(ctx context.Context, cachedS
}
func AppendKeyfileToKeyfolder(ctx context.Context, cachedStatus inspectorInterface.Inspector,
secrets k8sutil.SecretInterface, ownerRef *meta.OwnerReference, secretName string, encryptionKey []byte) error {
secrets secret.ModInterface, ownerRef *meta.OwnerReference, secretName string, encryptionKey []byte) error {
encSha := fmt.Sprintf("%0x", sha256.Sum256(encryptionKey))
if _, exists := cachedStatus.Secret(secretName); !exists {
@ -447,14 +440,14 @@ var (
// ensureExporterTokenSecret checks if a secret with given name exists in the namespace
// of the deployment. If not, it will add such a secret with correct access.
func (r *Resources) ensureExporterTokenSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector,
secrets k8sutil.SecretInterface, tokenSecretName, secretSecretName string) error {
secrets secret.ModInterface, tokenSecretName, secretSecretName string) error {
if update, exists, err := r.ensureExporterTokenSecretCreateRequired(cachedStatus, tokenSecretName, secretSecretName); err != nil {
return err
} else if update {
// Create secret
if !exists {
owner := r.context.GetAPIObject().AsOwner()
err = k8sutil.CreateJWTFromSecret(ctx, secrets, tokenSecretName, secretSecretName, exporterTokenClaims, &owner)
err = k8sutil.CreateJWTFromSecret(ctx, cachedStatus.SecretReadInterface(), secrets, tokenSecretName, secretSecretName, exporterTokenClaims, &owner)
if k8sutil.IsAlreadyExists(err) {
// Secret added while we tried it also
return nil
@ -508,7 +501,7 @@ func (r *Resources) ensureExporterTokenSecretCreateRequired(cachedStatus inspect
// ensureTLSCACertificateSecret checks if a secret with given name exists in the namespace
// of the deployment. If not, it will add such a secret with a generated CA certificate.
func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, spec api.TLSSpec) error {
func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, spec api.TLSSpec) error {
if _, exists := cachedStatus.Secret(spec.GetCASecretName()); !exists {
// Secret not found, create it
apiObject := r.context.GetAPIObject()
@ -532,7 +525,7 @@ func (r *Resources) ensureTLSCACertificateSecret(ctx context.Context, cachedStat
// ensureClientAuthCACertificateSecret checks if a secret with given name exists in the namespace
// of the deployment. If not, it will add such a secret with a generated CA certificate.
func (r *Resources) ensureClientAuthCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets k8sutil.SecretInterface, spec api.SyncAuthenticationSpec) error {
func (r *Resources) ensureClientAuthCACertificateSecret(ctx context.Context, cachedStatus inspectorInterface.Inspector, secrets secret.ModInterface, spec api.SyncAuthenticationSpec) error {
if _, exists := cachedStatus.Secret(spec.GetClientCASecretName()); !exists {
// Secret not found, create it
apiObject := r.context.GetAPIObject()
@ -559,11 +552,8 @@ func (r *Resources) getJWTSecret(spec api.DeploymentSpec) (string, error) {
if !spec.IsAuthenticated() {
return "", nil
}
kubecli := r.context.GetKubeCli()
ns := r.context.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
secretName := spec.Authentication.GetJWTSecretName()
s, err := k8sutil.GetTokenSecret(context.TODO(), secrets, secretName)
s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().SecretReadInterface(), secretName)
if err != nil {
r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get JWT secret")
return "", errors.WithStack(err)
@ -573,11 +563,8 @@ func (r *Resources) getJWTSecret(spec api.DeploymentSpec) (string, error) {
// getSyncJWTSecret loads the JWT secret used for syncmasters from a Secret configured in apiObject.Spec.Sync.Authentication.JWTSecretName.
func (r *Resources) getSyncJWTSecret(spec api.DeploymentSpec) (string, error) {
kubecli := r.context.GetKubeCli()
ns := r.context.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
secretName := spec.Sync.Authentication.GetJWTSecretName()
s, err := k8sutil.GetTokenSecret(context.TODO(), secrets, secretName)
s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().SecretReadInterface(), secretName)
if err != nil {
r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync JWT secret")
return "", errors.WithStack(err)
@ -587,11 +574,8 @@ func (r *Resources) getSyncJWTSecret(spec api.DeploymentSpec) (string, error) {
// getSyncMonitoringToken loads the token secret used for monitoring sync masters & workers.
func (r *Resources) getSyncMonitoringToken(spec api.DeploymentSpec) (string, error) {
kubecli := r.context.GetKubeCli()
ns := r.context.GetNamespace()
secrets := kubecli.CoreV1().Secrets(ns)
secretName := spec.Sync.Monitoring.GetTokenSecretName()
s, err := k8sutil.GetTokenSecret(context.TODO(), secrets, secretName)
s, err := k8sutil.GetTokenSecret(context.Background(), r.context.GetCachedStatus().SecretReadInterface(), secretName)
if err != nil {
r.log.Debug().Err(err).Str("secret-name", secretName).Msg("Failed to get sync monitoring secret")
return "", errors.WithStack(err)

View file

@ -28,6 +28,8 @@ import (
"strings"
"time"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service"
"k8s.io/apimachinery/pkg/api/equality"
"k8s.io/apimachinery/pkg/util/intstr"
@ -52,20 +54,18 @@ var (
func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
log := r.log
start := time.Now()
kubecli := r.context.GetKubeCli()
apiObject := r.context.GetAPIObject()
status, _ := r.context.GetStatus()
deploymentName := apiObject.GetName()
ns := apiObject.GetNamespace()
owner := apiObject.AsOwner()
spec := r.context.GetSpec()
defer metrics.SetDuration(inspectServicesDurationGauges.WithLabelValues(deploymentName), start)
counterMetric := inspectedServicesCounters.WithLabelValues(deploymentName)
// Fetch existing services
svcs := kubecli.CoreV1().Services(ns)
svcs := r.context.ServicesModInterface()
reconcileRequired := k8sutil.NewReconcile()
reconcileRequired := k8sutil.NewReconcile(cachedStatus)
// Ensure member services
if err := status.Members.ForeachServerGroup(func(group api.ServerGroup, list api.MemberStatusList) error {
@ -234,12 +234,12 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
}
}
return reconcileRequired.Reconcile()
return reconcileRequired.Reconcile(ctx)
}
// EnsureServices creates all services needed to service the deployment
func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStatus inspectorInterface.Inspector,
svcs k8sutil.ServiceInterface, eaServiceName, svcRole, title string, port int, noneIsClusterIP bool,
svcs service.ModInterface, eaServiceName, svcRole, title string, port int, noneIsClusterIP bool,
spec api.ExternalAccessSpec, apiObject k8sutil.APIObject, log zerolog.Logger) error {
// Database external access service
createExternalAccessService := false

View file

@ -26,12 +26,13 @@ package k8sutil
import (
"context"
"github.com/rs/zerolog"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/rs/zerolog"
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
@ -39,25 +40,28 @@ const (
)
// RemovePodFinalizers removes the given finalizers from the given pod.
func RemovePodFinalizers(ctx context.Context, log zerolog.Logger, kubecli kubernetes.Interface, p *v1.Pod,
func RemovePodFinalizers(ctx context.Context, cachedStatus pod.Inspector, log zerolog.Logger, c pod.ModInterface, p *core.Pod,
finalizers []string, ignoreNotFound bool) error {
pods := kubecli.CoreV1().Pods(p.GetNamespace())
getFunc := func() (metav1.Object, error) {
ctxChild, cancel := context.WithTimeout(ctx, GetRequestTimeout())
defer cancel()
result, err := pods.Get(ctxChild, p.GetName(), metav1.GetOptions{})
if err := cachedStatus.Refresh(ctxChild); err != nil {
return nil, errors.WithStack(err)
}
result, err := cachedStatus.PodReadInterface().Get(ctxChild, p.GetName(), metav1.GetOptions{})
if err != nil {
return nil, errors.WithStack(err)
}
return result, nil
}
updateFunc := func(updated metav1.Object) error {
updatedPod := updated.(*v1.Pod)
updatedPod := updated.(*core.Pod)
ctxChild, cancel := context.WithTimeout(ctx, GetRequestTimeout())
defer cancel()
result, err := pods.Update(ctxChild, updatedPod, metav1.UpdateOptions{})
result, err := c.Update(ctxChild, updatedPod, metav1.UpdateOptions{})
if err != nil {
return errors.WithStack(err)
}
@ -71,25 +75,28 @@ func RemovePodFinalizers(ctx context.Context, log zerolog.Logger, kubecli kubern
}
// RemovePVCFinalizers removes the given finalizers from the given PVC.
func RemovePVCFinalizers(ctx context.Context, log zerolog.Logger, kubecli kubernetes.Interface,
p *v1.PersistentVolumeClaim, finalizers []string, ignoreNotFound bool) error {
pvcs := kubecli.CoreV1().PersistentVolumeClaims(p.GetNamespace())
func RemovePVCFinalizers(ctx context.Context, cachedStatus persistentvolumeclaim.Inspector, log zerolog.Logger, c persistentvolumeclaim.ModInterface,
p *core.PersistentVolumeClaim, finalizers []string, ignoreNotFound bool) error {
getFunc := func() (metav1.Object, error) {
ctxChild, cancel := context.WithTimeout(ctx, GetRequestTimeout())
defer cancel()
result, err := pvcs.Get(ctxChild, p.GetName(), metav1.GetOptions{})
if err := cachedStatus.Refresh(ctxChild); err != nil {
return nil, errors.WithStack(err)
}
result, err := cachedStatus.PersistentVolumeClaimReadInterface().Get(ctxChild, p.GetName(), metav1.GetOptions{})
if err != nil {
return nil, errors.WithStack(err)
}
return result, nil
}
updateFunc := func(updated metav1.Object) error {
updatedPVC := updated.(*v1.PersistentVolumeClaim)
updatedPVC := updated.(*core.PersistentVolumeClaim)
ctxChild, cancel := context.WithTimeout(ctx, GetRequestTimeout())
defer cancel()
result, err := pvcs.Update(ctxChild, updatedPVC, metav1.UpdateOptions{})
result, err := c.Update(ctxChild, updatedPVC, metav1.UpdateOptions{})
if err != nil {
return errors.WithStack(err)
}

View file

@ -29,13 +29,14 @@ import (
type Inspector interface {
ArangoMember(name string) (*api.ArangoMember, bool)
IterateArangoMembers(action ArangoMemberAction, filters ...ArangoMemberFilter) error
IterateArangoMembers(action Action, filters ...Filter) error
ArangoMemberReadInterface() ReadInterface
}
type ArangoMemberFilter func(pod *api.ArangoMember) bool
type ArangoMemberAction func(pod *api.ArangoMember) error
type Filter func(pod *api.ArangoMember) bool
type Action func(pod *api.ArangoMember) error
func FilterByDeploymentUID(uid types.UID) ArangoMemberFilter {
func FilterByDeploymentUID(uid types.UID) Filter {
return func(pod *api.ArangoMember) bool {
return pod.Spec.DeploymentUID == "" || pod.Spec.DeploymentUID == uid
}

View file

@ -0,0 +1,49 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package arangomember
import (
"context"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with ArangoMember resources only for creation
type ModInterface interface {
Create(ctx context.Context, arangomember *api.ArangoMember, opts meta.CreateOptions) (*api.ArangoMember, error)
Update(ctx context.Context, arangomember *api.ArangoMember, opts meta.UpdateOptions) (*api.ArangoMember, error)
UpdateStatus(ctx context.Context, arangomember *api.ArangoMember, opts meta.UpdateOptions) (*api.ArangoMember, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *api.ArangoMember, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with ArangoMember resources.
type Interface interface {
ModInterface
ReadInterface
}
// ReadInterface has methods to work with ArangoMember resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*api.ArangoMember, error)
}

View file

@ -24,7 +24,8 @@
package inspector
import (
"context"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/node"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/arangomember"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
@ -37,8 +38,7 @@ import (
)
type Inspector interface {
Refresh(ctx context.Context) error
refresh.Inspector
pod.Inspector
secret.Inspector
persistentvolumeclaim.Inspector
@ -47,4 +47,6 @@ type Inspector interface {
servicemonitor.Inspector
serviceaccount.Inspector
arangomember.Inspector
node.Loader
}

View file

@ -0,0 +1,39 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package node
import (
core "k8s.io/api/core/v1"
)
type Loader interface {
GetNodes() (Inspector, bool)
}
type Inspector interface {
Nodes() []*core.Node
Node(name string) (*core.Node, bool)
IterateNodes(action Action, filters ...Filter) error
NodeReadInterface() ReadInterface
}
type Filter func(podDisruptionBudget *core.Node) bool
type Action func(podDisruptionBudget *core.Node) error

View file

@ -0,0 +1,38 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package node
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Interface has methods to work with Node resources.
type Interface interface {
ReadInterface
}
// ReadInterface has methods to work with Node resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Node, error)
}

View file

@ -22,12 +22,19 @@
package persistentvolumeclaim
import core "k8s.io/api/core/v1"
import (
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh"
core "k8s.io/api/core/v1"
)
type Inspector interface {
refresh.Inspector
PersistentVolumeClaims() []*core.PersistentVolumeClaim
PersistentVolumeClaim(name string) (*core.PersistentVolumeClaim, bool)
IteratePersistentVolumeClaims(action PersistentVolumeClaimAction, filters ...PersistentVolumeClaimFilter) error
IteratePersistentVolumeClaims(action Action, filters ...Filter) error
PersistentVolumeClaimReadInterface() ReadInterface
}
type PersistentVolumeClaimFilter func(pvc *core.PersistentVolumeClaim) bool
type PersistentVolumeClaimAction func(pvc *core.PersistentVolumeClaim) error
type Filter func(pvc *core.PersistentVolumeClaim) bool
type Action func(pvc *core.PersistentVolumeClaim) error

View file

@ -0,0 +1,48 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package persistentvolumeclaim
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with PersistentVolumeClaim resources only for creation
type ModInterface interface {
Create(ctx context.Context, persistentvolumeclaim *core.PersistentVolumeClaim, opts meta.CreateOptions) (*core.PersistentVolumeClaim, error)
Update(ctx context.Context, persistentvolumeclaim *core.PersistentVolumeClaim, opts meta.UpdateOptions) (*core.PersistentVolumeClaim, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *core.PersistentVolumeClaim, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with PersistentVolumeClaim resources.
type Interface interface {
ModInterface
ReadInterface
}
// ReadInterface has methods to work with PersistentVolumeClaim resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.PersistentVolumeClaim, error)
}

View file

@ -22,11 +22,18 @@
package pod
import core "k8s.io/api/core/v1"
import (
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh"
core "k8s.io/api/core/v1"
)
type Inspector interface {
refresh.Inspector
Pods() []*core.Pod
Pod(name string) (*core.Pod, bool)
IteratePods(action Action, filters ...Filter) error
PodReadInterface() ReadInterface
}
type Filter func(pod *core.Pod) bool

View file

@ -0,0 +1,47 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package pod
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with Pod resources only for creation
type ModInterface interface {
Create(ctx context.Context, pod *core.Pod, opts meta.CreateOptions) (*core.Pod, error)
Update(ctx context.Context, pod *core.Pod, opts meta.UpdateOptions) (*core.Pod, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *core.Pod, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with Pod resources.
type Interface interface {
ModInterface
}
// ReadInterface has methods to work with Pod resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Pod, error)
}

View file

@ -26,8 +26,9 @@ import policy "k8s.io/api/policy/v1beta1"
type Inspector interface {
PodDisruptionBudget(name string) (*policy.PodDisruptionBudget, bool)
IteratePodDisruptionBudgets(action PodDisruptionBudgetAction, filters ...PodDisruptionBudgetFilter) error
IteratePodDisruptionBudgets(action Action, filters ...Filter) error
PodDisruptionBudgetReadInterface() ReadInterface
}
type PodDisruptionBudgetFilter func(podDisruptionBudget *policy.PodDisruptionBudget) bool
type PodDisruptionBudgetAction func(podDisruptionBudget *policy.PodDisruptionBudget) error
type Filter func(podDisruptionBudget *policy.PodDisruptionBudget) bool
type Action func(podDisruptionBudget *policy.PodDisruptionBudget) error

View file

@ -0,0 +1,48 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package poddisruptionbudget
import (
"context"
policy "k8s.io/api/policy/v1beta1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with PodDisruptionBudget resources only for creation
type ModInterface interface {
Create(ctx context.Context, poddisruptionbudget *policy.PodDisruptionBudget, opts meta.CreateOptions) (*policy.PodDisruptionBudget, error)
Update(ctx context.Context, poddisruptionbudget *policy.PodDisruptionBudget, opts meta.UpdateOptions) (*policy.PodDisruptionBudget, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *policy.PodDisruptionBudget, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with PodDisruptionBudget resources.
type Interface interface {
ModInterface
ReadInterface
}
// ReadInterface has methods to work with PodDisruptionBudget resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*policy.PodDisruptionBudget, error)
}

View file

@ -0,0 +1,32 @@
//
// DISCLAIMER
//
// Copyright 2021 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
// Author Adam Janikowski
//
package refresh
import (
"context"
)
type Inspector interface {
IsStatic() bool
Refresh(ctx context.Context) error
}

View file

@ -0,0 +1,48 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package secret
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with Secret resources only for creation
type ModInterface interface {
Create(ctx context.Context, secret *core.Secret, opts meta.CreateOptions) (*core.Secret, error)
Update(ctx context.Context, secret *core.Secret, opts meta.UpdateOptions) (*core.Secret, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *core.Secret, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with Secret resources.
type Interface interface {
ModInterface
ReadInterface
}
// ReadInterface has methods to work with Secret resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Secret, error)
}

View file

@ -22,10 +22,7 @@
package secret
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// Inspector for secrets
@ -35,10 +32,5 @@ type Inspector interface {
SecretReadInterface() ReadInterface
}
// ReadInterface has methods to work with Secret resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Secret, error)
}
type Filter func(pod *core.Secret) bool
type Action func(pod *core.Secret) error

View file

@ -0,0 +1,48 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package service
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with Service resources only for creation
type ModInterface interface {
Create(ctx context.Context, service *core.Service, opts meta.CreateOptions) (*core.Service, error)
Update(ctx context.Context, service *core.Service, opts meta.UpdateOptions) (*core.Service, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *core.Service, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// ReadInterface has methods to work with Secret resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Service, error)
}
// Interface has methods to work with Service resources.
type Interface interface {
ModInterface
ReadInterface
}

View file

@ -22,12 +22,15 @@
package service
import core "k8s.io/api/core/v1"
import (
core "k8s.io/api/core/v1"
)
type Inspector interface {
Service(name string) (*core.Service, bool)
IterateServices(action ServiceAction, filters ...ServiceFilter) error
IterateServices(action Action, filters ...Filter) error
ServiceReadInterface() ReadInterface
}
type ServiceFilter func(pod *core.Service) bool
type ServiceAction func(pod *core.Service) error
type Filter func(pod *core.Service) bool
type Action func(pod *core.Service) error

View file

@ -0,0 +1,48 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package serviceaccount
import (
"context"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with ServiceAccount resources only for creation
type ModInterface interface {
Create(ctx context.Context, serviceaccount *core.ServiceAccount, opts meta.CreateOptions) (*core.ServiceAccount, error)
Update(ctx context.Context, serviceaccount *core.ServiceAccount, opts meta.UpdateOptions) (*core.ServiceAccount, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *core.ServiceAccount, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with ServiceAccount resources.
type Interface interface {
ModInterface
ReadInterface
}
// ReadInterface has methods to work with ServiceAccount resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.ServiceAccount, error)
}

View file

@ -27,6 +27,7 @@ import core "k8s.io/api/core/v1"
type Inspector interface {
ServiceAccount(name string) (*core.ServiceAccount, bool)
IterateServiceAccounts(action Action, filters ...Filter) error
ServiceAccountReadInterface() ReadInterface
}
type Filter func(pod *core.ServiceAccount) bool

View file

@ -0,0 +1,48 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package servicemonitor
import (
"context"
monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
)
// ModInterface has methods to work with ServiceMonitor resources only for creation
type ModInterface interface {
Create(ctx context.Context, servicemonitor *monitoring.ServiceMonitor, opts meta.CreateOptions) (*monitoring.ServiceMonitor, error)
Update(ctx context.Context, servicemonitor *monitoring.ServiceMonitor, opts meta.UpdateOptions) (*monitoring.ServiceMonitor, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *monitoring.ServiceMonitor, err error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
}
// Interface has methods to work with ServiceMonitor resources.
type Interface interface {
ModInterface
ReadInterface
}
// ReadInterface has methods to work with ServiceMonitor resources with ReadOnly mode.
type ReadInterface interface {
Get(ctx context.Context, name string, opts meta.GetOptions) (*monitoring.ServiceMonitor, error)
}

View file

@ -26,8 +26,9 @@ import monitoring "github.com/prometheus-operator/prometheus-operator/pkg/apis/m
type Inspector interface {
ServiceMonitor(name string) (*monitoring.ServiceMonitor, bool)
IterateServiceMonitors(action ServiceMonitorAction, filters ...ServiceMonitorFilter) error
IterateServiceMonitors(action Action, filters ...Filter) error
ServiceMonitorReadInterface() ReadInterface
}
type ServiceMonitorFilter func(serviceMonitor *monitoring.ServiceMonitor) bool
type ServiceMonitorAction func(serviceMonitor *monitoring.ServiceMonitor) error
type Filter func(serviceMonitor *monitoring.ServiceMonitor) bool
type Action func(serviceMonitor *monitoring.ServiceMonitor) error

View file

@ -30,6 +30,8 @@ import (
"strings"
"time"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
"github.com/arangodb/kube-arangodb/pkg/util"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
@ -43,7 +45,6 @@ import (
core "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
)
const (
@ -465,18 +466,18 @@ func GetPodSpecChecksum(podSpec core.PodSpec) (string, error) {
// CreatePod adds an owner to the given pod and calls the k8s api-server to created it.
// If the pod already exists, nil is returned.
// If another error occurs, that error is returned.
func CreatePod(ctx context.Context, kubecli kubernetes.Interface, pod *core.Pod, ns string,
func CreatePod(ctx context.Context, c pod.ModInterface, pod *core.Pod, ns string,
owner metav1.OwnerReference) (string, types.UID, error) {
AddOwnerRefToObject(pod.GetObjectMeta(), &owner)
if createdPod, err := kubecli.CoreV1().Pods(ns).Create(ctx, pod, metav1.CreateOptions{}); err != nil {
if createdPod, err := c.Create(ctx, pod, metav1.CreateOptions{}); err != nil {
if IsAlreadyExists(err) {
return pod.GetName(), "", nil // If pod exists do not return any error but do not record UID (enforced rotation)
}
return "", "", errors.WithStack(err)
} else {
return createdPod.GetName(), pod.UID, nil
return createdPod.GetName(), createdPod.GetUID(), nil
}
}

View file

@ -27,6 +27,8 @@ import (
"context"
"strconv"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/persistentvolumeclaim"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
v1 "k8s.io/api/core/v1"
@ -35,12 +37,6 @@ import (
"github.com/arangodb/kube-arangodb/pkg/util/constants"
)
// PersistentVolumeClaimInterface has methods to work with PersistentVolumeClaim resources.
type PersistentVolumeClaimInterface interface {
Create(ctx context.Context, persistentVolumeClaim *v1.PersistentVolumeClaim, opts metav1.CreateOptions) (*v1.PersistentVolumeClaim, error)
Get(ctx context.Context, name string, opts metav1.GetOptions) (*v1.PersistentVolumeClaim, error)
}
// IsPersistentVolumeClaimMarkedForDeletion returns true if the pvc has been marked for deletion.
func IsPersistentVolumeClaimMarkedForDeletion(pvc *v1.PersistentVolumeClaim) bool {
return pvc.DeletionTimestamp != nil
@ -85,7 +81,7 @@ func ExtractStorageResourceRequirement(resources v1.ResourceRequirements) v1.Res
// CreatePersistentVolumeClaim creates a persistent volume claim with given name and configuration.
// If the pvc already exists, nil is returned.
// If another error occurs, that error is returned.
func CreatePersistentVolumeClaim(ctx context.Context, pvcs PersistentVolumeClaimInterface, pvcName, deploymentName, ns, storageClassName, role string, enforceAntiAffinity bool, resources v1.ResourceRequirements, vct *v1.PersistentVolumeClaim, finalizers []string, owner metav1.OwnerReference) error {
func CreatePersistentVolumeClaim(ctx context.Context, pvcs persistentvolumeclaim.ModInterface, pvcName, deploymentName, ns, storageClassName, role string, enforceAntiAffinity bool, resources v1.ResourceRequirements, vct *v1.PersistentVolumeClaim, finalizers []string, owner metav1.OwnerReference) error {
labels := LabelsForDeployment(deploymentName, role)
volumeMode := v1.PersistentVolumeFilesystem
pvc := &v1.PersistentVolumeClaim{

View file

@ -22,28 +22,117 @@
package k8sutil
import "github.com/arangodb/kube-arangodb/pkg/util/errors"
import (
"context"
"sync"
func NewReconcile() Reconcile {
return &reconcile{}
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/refresh"
)
func NewReconcile(refresh refresh.Inspector) Reconcile {
return &reconcile{refresh: refresh}
}
type Reconcile interface {
Reconcile() error
Reconcile(ctx context.Context) error
Required()
IsRequired() bool
WithError(err error) error
ParallelAll(items int, executor func(id int) error) error
Parallel(items, max int, executor func(id int) error) error
}
type reconcile struct {
required bool
refresh refresh.Inspector
}
func (r *reconcile) Reconcile() error {
func (r *reconcile) ParallelAll(items int, executor func(id int) error) error {
return r.Parallel(items, items, executor)
}
func (r *reconcile) Parallel(items, max int, executor func(id int) error) error {
var wg sync.WaitGroup
l := make([]error, items)
c := make(chan int, max)
defer func() {
close(c)
for range c {
}
}()
for i := 0; i < max; i++ {
c <- 0
}
for i := 0; i < items; i++ {
wg.Add(1)
go func(id int) {
defer wg.Done()
defer func() {
c <- 0
}()
<-c
l[id] = executor(id)
}(i)
}
wg.Wait()
for i := 0; i < items; i++ {
if l[i] == nil {
continue
}
if errors.IsReconcile(l[i]) {
continue
}
return l[i]
}
return nil
}
func (r *reconcile) WithRefresh(ctx context.Context, err error) error {
if err == nil {
return nil
}
if errors.IsReconcile(err) {
if r.refresh != nil {
return r.refresh.Refresh(ctx)
}
return nil
}
return err
}
func (r *reconcile) Reconcile(ctx context.Context) error {
if r.required {
if r.refresh.IsStatic() {
return errors.Reconcile()
}
if err := r.refresh.Refresh(ctx); err != nil {
return err
}
r.required = false
return nil
}
return nil
}

View file

@ -26,27 +26,18 @@ package k8sutil
import (
"context"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/secret"
"github.com/arangodb/kube-arangodb/pkg/util/constants"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
jg "github.com/golang-jwt/jwt"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// SecretInterface has methods to work with Secret resources.
type SecretInterface interface {
Create(ctx context.Context, secret *core.Secret, opts meta.CreateOptions) (*core.Secret, error)
Update(ctx context.Context, secret *core.Secret, opts meta.UpdateOptions) (*core.Secret, error)
Delete(ctx context.Context, name string, opts meta.DeleteOptions) error
Get(ctx context.Context, name string, opts meta.GetOptions) (*core.Secret, error)
Patch(ctx context.Context, name string, pt types.PatchType, data []byte, opts meta.PatchOptions, subresources ...string) (result *core.Secret, err error)
}
// ValidateEncryptionKeySecret checks that a secret with given name in given namespace
// exists and it contains a 'key' data field of exactly 32 bytes.
func ValidateEncryptionKeySecret(secrets SecretInterface, secretName string) error {
func ValidateEncryptionKeySecret(secrets secret.Interface, secretName string) error {
s, err := secrets.Get(context.Background(), secretName, meta.GetOptions{})
if err != nil {
return errors.WithStack(err)
@ -67,7 +58,7 @@ func ValidateEncryptionKeyFromSecret(s *core.Secret) error {
}
// CreateEncryptionKeySecret creates a secret used to store a RocksDB encryption key.
func CreateEncryptionKeySecret(secrets SecretInterface, secretName string, key []byte) error {
func CreateEncryptionKeySecret(secrets secret.ModInterface, secretName string, key []byte) error {
if len(key) != 32 {
return errors.WithStack(errors.Newf("Key in secret '%s' is expected to be 32 bytes long, got %d", secretName, len(key)))
}
@ -89,7 +80,7 @@ func CreateEncryptionKeySecret(secrets SecretInterface, secretName string, key [
// ValidateCACertificateSecret checks that a secret with given name in given namespace
// exists and it contains a 'ca.crt' data field.
func ValidateCACertificateSecret(ctx context.Context, secrets SecretInterface, secretName string) error {
func ValidateCACertificateSecret(ctx context.Context, secrets secret.ReadInterface, secretName string) error {
s, err := secrets.Get(ctx, secretName, meta.GetOptions{})
if err != nil {
return errors.WithStack(err)
@ -107,7 +98,7 @@ func ValidateCACertificateSecret(ctx context.Context, secrets SecretInterface, s
// If the secret does not exists the field is missing,
// an error is returned.
// Returns: certificate, error
func GetCACertficateSecret(ctx context.Context, secrets SecretInterface, secretName string) (string, error) {
func GetCACertficateSecret(ctx context.Context, secrets secret.Interface, secretName string) (string, error) {
ctxChild, cancel := context.WithTimeout(ctx, GetRequestTimeout())
defer cancel()
@ -128,7 +119,7 @@ func GetCACertficateSecret(ctx context.Context, secrets SecretInterface, secretN
// If the secret does not exists or one of the fields is missing,
// an error is returned.
// Returns: certificate, private-key, isOwnedByDeployment, error
func GetCASecret(ctx context.Context, secrets SecretInterface, secretName string,
func GetCASecret(ctx context.Context, secrets secret.ReadInterface, secretName string,
ownerRef *meta.OwnerReference) (string, string, bool, error) {
s, err := secrets.Get(ctx, secretName, meta.GetOptions{})
if err != nil {
@ -160,7 +151,7 @@ func GetCAFromSecret(s *core.Secret, ownerRef *meta.OwnerReference) (string, str
}
// CreateCASecret creates a secret used to store a PEM encoded CA certificate & private key.
func CreateCASecret(ctx context.Context, secrets SecretInterface, secretName string, certificate, key string,
func CreateCASecret(ctx context.Context, secrets secret.ModInterface, secretName string, certificate, key string,
ownerRef *meta.OwnerReference) error {
// Create secret
secret := &core.Secret{
@ -184,7 +175,7 @@ func CreateCASecret(ctx context.Context, secrets SecretInterface, secretName str
// GetTLSKeyfileSecret loads a secret used to store a PEM encoded keyfile
// in the format ArangoDB accepts it for its `--ssl.keyfile` option.
// Returns: keyfile (pem encoded), error
func GetTLSKeyfileSecret(secrets SecretInterface, secretName string) (string, error) {
func GetTLSKeyfileSecret(secrets secret.ReadInterface, secretName string) (string, error) {
s, err := secrets.Get(context.Background(), secretName, meta.GetOptions{})
if err != nil {
return "", errors.WithStack(err)
@ -203,7 +194,7 @@ func GetTLSKeyfileFromSecret(s *core.Secret) (string, error) {
// CreateTLSKeyfileSecret creates a secret used to store a PEM encoded keyfile
// in the format ArangoDB accepts it for its `--ssl.keyfile` option.
func CreateTLSKeyfileSecret(ctx context.Context, secrets SecretInterface, secretName string, keyfile string,
func CreateTLSKeyfileSecret(ctx context.Context, secrets secret.ModInterface, secretName string, keyfile string,
ownerRef *meta.OwnerReference) error {
// Create secret
secret := &core.Secret{
@ -225,7 +216,7 @@ func CreateTLSKeyfileSecret(ctx context.Context, secrets SecretInterface, secret
// ValidateTokenSecret checks that a secret with given name in given namespace
// exists and it contains a 'token' data field.
func ValidateTokenSecret(ctx context.Context, secrets SecretInterface, secretName string) error {
func ValidateTokenSecret(ctx context.Context, secrets secret.ReadInterface, secretName string) error {
s, err := secrets.Get(ctx, secretName, meta.GetOptions{})
if err != nil {
return errors.WithStack(err)
@ -243,7 +234,7 @@ func ValidateTokenFromSecret(s *core.Secret) error {
}
// GetTokenSecret loads the token secret from a Secret with given name.
func GetTokenSecret(ctx context.Context, secrets SecretInterface, secretName string) (string, error) {
func GetTokenSecret(ctx context.Context, secrets secret.ReadInterface, secretName string) (string, error) {
s, err := secrets.Get(ctx, secretName, meta.GetOptions{})
if err != nil {
return "", errors.WithStack(err)
@ -263,7 +254,7 @@ func GetTokenFromSecret(s *core.Secret) (string, error) {
// CreateTokenSecret creates a secret with given name in given namespace
// with a given token as value.
func CreateTokenSecret(ctx context.Context, secrets SecretInterface, secretName, token string,
func CreateTokenSecret(ctx context.Context, secrets secret.ModInterface, secretName, token string,
ownerRef *meta.OwnerReference) error {
// Create secret
secret := &core.Secret{
@ -300,10 +291,8 @@ func CreateJWTTokenFromSecret(secret string, claims map[string]interface{}) (str
// CreateJWTFromSecret creates a JWT using the secret stored in secretSecretName and stores the
// result in a new secret called tokenSecretName
func CreateJWTFromSecret(ctx context.Context, secrets SecretInterface, tokenSecretName, secretSecretName string, claims map[string]interface{}, ownerRef *meta.OwnerReference) error {
ctxChild, cancel := context.WithTimeout(ctx, GetRequestTimeout())
defer cancel()
secret, err := GetTokenSecret(ctxChild, secrets, secretSecretName)
func CreateJWTFromSecret(ctx context.Context, cachedSecrets secret.ReadInterface, secrets secret.ModInterface, tokenSecretName, secretSecretName string, claims map[string]interface{}, ownerRef *meta.OwnerReference) error {
secret, err := GetTokenSecret(ctx, cachedSecrets, secretSecretName)
if err != nil {
return errors.WithStack(err)
}
@ -324,7 +313,7 @@ func CreateJWTFromSecret(ctx context.Context, secrets SecretInterface, tokenSecr
// CreateBasicAuthSecret creates a secret with given name in given namespace
// with a given username and password as value.
func CreateBasicAuthSecret(ctx context.Context, secrets SecretInterface, secretName, username, password string,
func CreateBasicAuthSecret(ctx context.Context, secrets secret.ModInterface, secretName, username, password string,
ownerRef *meta.OwnerReference) error {
// Create secret
secret := &core.Secret{
@ -354,7 +343,7 @@ func CreateBasicAuthSecret(ctx context.Context, secrets SecretInterface, secretN
// If the secret does not exists or one of the fields is missing,
// an error is returned.
// Returns: username, password, error
func GetBasicAuthSecret(secrets SecretInterface, secretName string) (string, string, error) {
func GetBasicAuthSecret(secrets secret.Interface, secretName string) (string, string, error) {
s, err := secrets.Get(context.Background(), secretName, meta.GetOptions{})
if err != nil {
return "", "", errors.WithStack(err)

View file

@ -38,14 +38,6 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
// ServiceInterface has methods to work with Service resources.
type ServiceInterface interface {
Create(ctx context.Context, service *core.Service, opts metav1.CreateOptions) (*core.Service, error)
Update(ctx context.Context, service *core.Service, opts metav1.UpdateOptions) (*core.Service, error)
Delete(ctx context.Context, name string, opts metav1.DeleteOptions) error
Get(ctx context.Context, name string, opts metav1.GetOptions) (*core.Service, error)
}
// CreateHeadlessServiceName returns the name of the headless service for the given
// deployment name.
func CreateHeadlessServiceName(deploymentName string) string {
@ -77,7 +69,7 @@ func CreateExporterClientServiceName(deploymentName string) string {
}
// CreateExporterService
func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, svcs ServiceInterface,
func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, svcs service.ModInterface,
deployment metav1.Object, owner metav1.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
svcName := CreateExporterClientServiceName(deploymentName)
@ -119,7 +111,7 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector,
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func CreateHeadlessService(ctx context.Context, svcs ServiceInterface, deployment metav1.Object,
func CreateHeadlessService(ctx context.Context, svcs service.ModInterface, deployment metav1.Object,
owner metav1.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
svcName := CreateHeadlessServiceName(deploymentName)
@ -143,7 +135,7 @@ func CreateHeadlessService(ctx context.Context, svcs ServiceInterface, deploymen
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func CreateDatabaseClientService(ctx context.Context, svcs ServiceInterface, deployment metav1.Object, single bool,
func CreateDatabaseClientService(ctx context.Context, svcs service.ModInterface, deployment metav1.Object, single bool,
owner metav1.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
svcName := CreateDatabaseClientServiceName(deploymentName)
@ -173,7 +165,7 @@ func CreateDatabaseClientService(ctx context.Context, svcs ServiceInterface, dep
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func CreateExternalAccessService(ctx context.Context, svcs ServiceInterface, svcName, role string,
func CreateExternalAccessService(ctx context.Context, svcs service.ModInterface, svcName, role string,
deployment metav1.Object, serviceType core.ServiceType, port, nodePort int, loadBalancerIP string,
loadBalancerSourceRanges []string, owner metav1.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
@ -197,7 +189,7 @@ func CreateExternalAccessService(ctx context.Context, svcs ServiceInterface, svc
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func createService(ctx context.Context, svcs ServiceInterface, svcName, deploymentName, clusterIP, role string,
func createService(ctx context.Context, svcs service.ModInterface, svcName, deploymentName, clusterIP, role string,
serviceType core.ServiceType, ports []core.ServicePort, loadBalancerIP string, loadBalancerSourceRanges []string,
publishNotReadyAddresses bool, owner metav1.OwnerReference) (bool, error) {
labels := LabelsForDeployment(deploymentName, role)

View file

@ -0,0 +1,88 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package tls
import (
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
)
type KeyfileInput struct {
AltNames []string
Email []string
}
func (k KeyfileInput) Append(b KeyfileInput) KeyfileInput {
k.Email = append(k.Email, b.Email...)
k.AltNames = append(k.AltNames, b.AltNames...)
return k
}
func GetAltNames(tls api.TLSSpec) (KeyfileInput, error) {
var k KeyfileInput
// Load alt names
dnsNames, ipAddresses, emailAddress, err := tls.GetParsedAltNames()
if err != nil {
return k, errors.WithStack(err)
}
k.AltNames = append(k.AltNames, dnsNames...)
k.AltNames = append(k.AltNames, ipAddresses...)
k.Email = emailAddress
return k, nil
}
func GetServerAltNames(deployment meta.Object, spec api.DeploymentSpec, tls api.TLSSpec, service *core.Service, group api.ServerGroup, member api.MemberStatus) (KeyfileInput, error) {
var k KeyfileInput
k.AltNames = append(k.AltNames,
k8sutil.CreateDatabaseClientServiceDNSName(deployment),
k8sutil.CreatePodDNSName(deployment, group.AsRole(), member.ID),
k8sutil.CreateServiceDNSName(service),
service.Spec.ClusterIP,
service.GetName(),
)
if spec.ClusterDomain != nil {
k.AltNames = append(k.AltNames,
k8sutil.CreateDatabaseClientServiceDNSNameWithDomain(deployment, spec.ClusterDomain),
k8sutil.CreatePodDNSNameWithDomain(deployment, spec.ClusterDomain, group.AsRole(), member.ID),
k8sutil.CreateServiceDNSNameWithDomain(service, spec.ClusterDomain))
}
if ip := spec.ExternalAccess.GetLoadBalancerIP(); ip != "" {
k.AltNames = append(k.AltNames, ip)
}
if names, err := GetAltNames(tls); err != nil {
return k, errors.WithStack(err)
} else {
k = k.Append(names)
}
return k, nil
}

63
pkg/util/parallel.go Normal file
View file

@ -0,0 +1,63 @@
//
// DISCLAIMER
//
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package util
import "sync"
func RunParallel(max int, actions ...func() error) error {
c := make(chan int, max)
errors := make([]error, len(actions))
defer func() {
close(c)
for range c {
}
}()
for i := 0; i < max; i++ {
c <- 0
}
var wg sync.WaitGroup
for id, i := range actions {
wg.Add(1)
go func(id int, action func() error) {
defer func() {
c <- 0
wg.Done()
}()
<-c
errors[id] = action()
}(id, i)
}
wg.Wait()
for _, err := range errors {
if err != nil {
return err
}
}
return nil
}