mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] Clean old rotation methods (#547)
This commit is contained in:
parent
964dea3aaa
commit
4de3a3b96d
5 changed files with 8 additions and 583 deletions
|
@ -1,6 +1,7 @@
|
|||
# Change Log
|
||||
|
||||
## [master](https://github.com/arangodb/kube-arangodb/tree/master) (N/A)
|
||||
- Removed old rotation logic (rotation of ArangoDeployment may be enforced after Operator upgrade)
|
||||
- Added UpToDate condition in ArangoDeployment Status
|
||||
|
||||
## [1.0.1](https://github.com/arangodb/kube-arangodb/tree/1.0.1) (2020-03-25)
|
||||
|
|
|
@ -213,11 +213,6 @@ func createPlan(log zerolog.Logger, apiObject k8sutil.APIObject,
|
|||
plan = createRotateServerStoragePlan(log, apiObject, spec, status, context.GetPvc, context.CreateEvent)
|
||||
}
|
||||
|
||||
// Adjust security
|
||||
if plan.IsEmpty() {
|
||||
plan = createRotateServerSecurityPlan(log, spec, status, pods)
|
||||
}
|
||||
|
||||
// Check for the need to rotate TLS CA certificate and all members
|
||||
if plan.IsEmpty() {
|
||||
plan = createRotateTLSCAPlan(log, apiObject, spec, status, context.GetTLSCA, context.CreateEvent)
|
||||
|
|
|
@ -1,123 +0,0 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package reconcile
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/stretchr/testify/require"
|
||||
core "k8s.io/api/core/v1"
|
||||
)
|
||||
|
||||
func TestPlanBuilderProbes(t *testing.T) {
|
||||
type testCase struct {
|
||||
probe pod.Probe
|
||||
groupProbeDisabled *bool
|
||||
groupProbeSpec *api.ServerGroupProbeSpec
|
||||
containerProbe *core.Probe
|
||||
|
||||
result bool
|
||||
}
|
||||
|
||||
testCases := map[string]testCase{
|
||||
"Defaults": {},
|
||||
"Disable created probe": {
|
||||
containerProbe: &core.Probe{},
|
||||
|
||||
result: true,
|
||||
},
|
||||
"Create probe": {
|
||||
probe: pod.Probe{
|
||||
EnabledByDefault: true,
|
||||
CanBeEnabled: true,
|
||||
},
|
||||
|
||||
result: true,
|
||||
},
|
||||
"Compare probe - without overrides": {
|
||||
probe: pod.Probe{
|
||||
EnabledByDefault: true,
|
||||
CanBeEnabled: true,
|
||||
},
|
||||
containerProbe: &core.Probe{},
|
||||
},
|
||||
"Compare probe - with disabled": {
|
||||
probe: pod.Probe{
|
||||
EnabledByDefault: true,
|
||||
CanBeEnabled: true,
|
||||
},
|
||||
groupProbeDisabled: util.NewBool(true),
|
||||
containerProbe: &core.Probe{},
|
||||
|
||||
result: true,
|
||||
},
|
||||
"Compare probe - with enabled": {
|
||||
probe: pod.Probe{
|
||||
EnabledByDefault: true,
|
||||
CanBeEnabled: true,
|
||||
},
|
||||
groupProbeDisabled: util.NewBool(false),
|
||||
|
||||
result: true,
|
||||
},
|
||||
"Compare probe - override defaults": {
|
||||
probe: pod.Probe{
|
||||
EnabledByDefault: true,
|
||||
CanBeEnabled: true,
|
||||
},
|
||||
|
||||
groupProbeSpec: &api.ServerGroupProbeSpec{
|
||||
TimeoutSeconds: util.NewInt32(10),
|
||||
},
|
||||
|
||||
containerProbe: &core.Probe{},
|
||||
|
||||
result: true,
|
||||
},
|
||||
"Compare probe - override defaults - do not change": {
|
||||
probe: pod.Probe{
|
||||
EnabledByDefault: true,
|
||||
CanBeEnabled: true,
|
||||
},
|
||||
|
||||
groupProbeSpec: &api.ServerGroupProbeSpec{
|
||||
TimeoutSeconds: util.NewInt32(10),
|
||||
},
|
||||
|
||||
containerProbe: &core.Probe{
|
||||
TimeoutSeconds: 10,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for name, c := range testCases {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
res, out := compareProbes(c.probe, c.groupProbeDisabled, c.groupProbeSpec, c.containerProbe)
|
||||
|
||||
require.Equal(t, c.result, res, out)
|
||||
})
|
||||
}
|
||||
}
|
|
@ -23,17 +23,9 @@
|
|||
package reconcile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"k8s.io/apimachinery/pkg/api/equality"
|
||||
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/pod"
|
||||
|
||||
"github.com/arangodb/go-driver"
|
||||
upgraderules "github.com/arangodb/go-upgrade-rules"
|
||||
"github.com/arangodb/kube-arangodb/pkg/apis/deployment"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/rs/zerolog"
|
||||
|
@ -84,21 +76,13 @@ func createRotateOrUpgradePlan(log zerolog.Logger, apiObject k8sutil.APIObject,
|
|||
// Yes, upgrade is needed (and allowed)
|
||||
newPlan = createUpgradeMemberPlan(log, m, group, "Version upgrade", spec.GetImage(), status,
|
||||
!decision.AutoUpgradeNeeded)
|
||||
} else {
|
||||
// Upgrade is not needed, see if rotation is needed
|
||||
if m.PodUID == "" {
|
||||
rotNeeded, reason := podNeedsRotation(log, pod, apiObject, spec, group, status, m.ID, context)
|
||||
if rotNeeded {
|
||||
newPlan = createRotateMemberPlan(log, m, group, reason)
|
||||
}
|
||||
} else {
|
||||
// Use new level of rotate logic
|
||||
rotNeeded, reason := podNeedsRotationNew(log, pod, apiObject, spec, group, status, m, context)
|
||||
rotNeeded, reason := podNeedsRotation(log, pod, apiObject, spec, group, status, m, context)
|
||||
if rotNeeded {
|
||||
newPlan = createRotateMemberPlan(log, m, group, reason)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !newPlan.IsEmpty() {
|
||||
// Only rotate/upgrade 1 pod at a time
|
||||
|
@ -195,11 +179,11 @@ func podNeedsUpgrading(log zerolog.Logger, p core.Pod, spec api.DeploymentSpec,
|
|||
return upgradeDecision{UpgradeNeeded: false}
|
||||
}
|
||||
|
||||
// podNeedsRotationNew returns true when the specification of the
|
||||
// podNeedsRotation returns true when the specification of the
|
||||
// given pod differs from what it should be according to the
|
||||
// given deployment spec.
|
||||
// When true is returned, a reason for the rotation is already returned.
|
||||
func podNeedsRotationNew(log zerolog.Logger, p core.Pod, apiObject metav1.Object, spec api.DeploymentSpec,
|
||||
func podNeedsRotation(log zerolog.Logger, p core.Pod, apiObject metav1.Object, spec api.DeploymentSpec,
|
||||
group api.ServerGroup, status api.DeploymentStatus, m api.MemberStatus,
|
||||
context PlanBuilderContext) (bool, string) {
|
||||
if m.PodUID != p.UID {
|
||||
|
@ -235,257 +219,6 @@ func podNeedsRotationNew(log zerolog.Logger, p core.Pod, apiObject metav1.Object
|
|||
return false, ""
|
||||
}
|
||||
|
||||
// podNeedsRotation returns true when the specification of the
|
||||
// given pod differs from what it should be according to the
|
||||
// given deployment spec.
|
||||
// When true is returned, a reason for the rotation is already returned.
|
||||
func podNeedsRotation(log zerolog.Logger, p core.Pod, apiObject metav1.Object, spec api.DeploymentSpec,
|
||||
group api.ServerGroup, status api.DeploymentStatus, id string,
|
||||
context PlanBuilderContext) (bool, string) {
|
||||
groupSpec := spec.GetServerGroupSpec(group)
|
||||
|
||||
// Check image pull policy
|
||||
c, found := k8sutil.GetContainerByName(&p, k8sutil.ServerContainerName)
|
||||
if found {
|
||||
if c.ImagePullPolicy != spec.GetImagePullPolicy() {
|
||||
return true, "Image pull policy changed"
|
||||
}
|
||||
} else {
|
||||
return true, "Server container not found"
|
||||
}
|
||||
|
||||
podImageInfo, found := status.Images.GetByImageID(c.Image)
|
||||
if !found {
|
||||
return false, "Server Image not found"
|
||||
}
|
||||
|
||||
if group.IsExportMetrics() {
|
||||
e, hasExporter := k8sutil.GetContainerByName(&p, k8sutil.ExporterContainerName)
|
||||
|
||||
if spec.Metrics.IsEnabled() {
|
||||
if !hasExporter {
|
||||
return true, "Exporter configuration changed"
|
||||
}
|
||||
|
||||
if spec.Metrics.HasImage() {
|
||||
if e.Image != spec.Metrics.GetImage() {
|
||||
return true, "Exporter image changed"
|
||||
}
|
||||
}
|
||||
|
||||
if k8sutil.IsResourceRequirementsChanged(spec.Metrics.Resources, e.Resources) {
|
||||
return true, "Resources requirements have been changed for exporter"
|
||||
}
|
||||
} else if hasExporter {
|
||||
return true, "Exporter was disabled"
|
||||
}
|
||||
}
|
||||
|
||||
// Check arguments
|
||||
expectedArgs := strings.Join(context.GetExpectedPodArguments(apiObject, spec, group, status.Members.Agents, id, podImageInfo.ArangoDBVersion), " ")
|
||||
actualArgs := strings.Join(getContainerArgs(c), " ")
|
||||
if expectedArgs != actualArgs {
|
||||
log.Debug().
|
||||
Str("actual-args", actualArgs).
|
||||
Str("expected-args", expectedArgs).
|
||||
Msg("Arguments changed. Rotation needed.")
|
||||
return true, "Arguments changed"
|
||||
}
|
||||
|
||||
// Check service account
|
||||
if normalizeServiceAccountName(p.Spec.ServiceAccountName) != normalizeServiceAccountName(groupSpec.GetServiceAccountName()) {
|
||||
return true, "ServiceAccountName changed"
|
||||
}
|
||||
|
||||
// Check priorities
|
||||
if groupSpec.PriorityClassName != p.Spec.PriorityClassName {
|
||||
return true, "Pod priority changed"
|
||||
}
|
||||
|
||||
// Check resource requirements
|
||||
var resources core.ResourceRequirements
|
||||
if groupSpec.HasVolumeClaimTemplate() {
|
||||
resources = groupSpec.Resources // If there is a volume claim template compare all resources
|
||||
} else {
|
||||
resources = k8sutil.ExtractPodResourceRequirement(groupSpec.Resources)
|
||||
}
|
||||
|
||||
if k8sutil.IsResourceRequirementsChanged(resources, k8sutil.GetArangoDBContainerFromPod(&p).Resources) {
|
||||
return true, "Resource Requirements changed"
|
||||
}
|
||||
|
||||
var memberStatus, _, _ = status.Members.MemberStatusByPodName(p.GetName())
|
||||
if memberStatus.SideCarSpecs == nil {
|
||||
memberStatus.SideCarSpecs = make(map[string]core.Container)
|
||||
}
|
||||
|
||||
// Check for missing side cars in
|
||||
for _, specSidecar := range groupSpec.GetSidecars() {
|
||||
var stateSidecar core.Container
|
||||
if stateSidecar, found = memberStatus.SideCarSpecs[specSidecar.Name]; !found {
|
||||
return true, "Sidecar " + specSidecar.Name + " not found in running pod " + p.GetName()
|
||||
}
|
||||
if sideCarRequireRotation(specSidecar.DeepCopy(), &stateSidecar) {
|
||||
return true, "Sidecar " + specSidecar.Name + " requires rotation"
|
||||
}
|
||||
}
|
||||
|
||||
for name := range memberStatus.SideCarSpecs {
|
||||
var found = false
|
||||
for _, specSidecar := range groupSpec.GetSidecars() {
|
||||
if name == specSidecar.Name {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return true, "Sidecar " + name + " no longer in specification"
|
||||
}
|
||||
}
|
||||
|
||||
// Check for probe changes
|
||||
|
||||
// Readiness
|
||||
if rotate, reason := compareProbes(pod.ReadinessSpec(group),
|
||||
groupSpec.GetProbesSpec().GetReadinessProbeDisabled(),
|
||||
groupSpec.GetProbesSpec().ReadinessProbeSpec,
|
||||
c.ReadinessProbe); rotate {
|
||||
return rotate, reason
|
||||
}
|
||||
|
||||
// Liveness
|
||||
if rotate, reason := compareProbes(pod.LivenessSpec(group),
|
||||
groupSpec.GetProbesSpec().LivenessProbeDisabled,
|
||||
groupSpec.GetProbesSpec().LivenessProbeSpec,
|
||||
c.LivenessProbe); rotate {
|
||||
return rotate, reason
|
||||
}
|
||||
|
||||
// Volumes
|
||||
if rotate, reason := compareVolumes(groupSpec, p); rotate {
|
||||
return rotate, reason
|
||||
}
|
||||
|
||||
if rotate, reason := compareVolumeMounts(groupSpec, c); rotate {
|
||||
return rotate, reason
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func compareProbes(probe pod.Probe, groupProbeDisabled *bool, groupProbeSpec *api.ServerGroupProbeSpec, containerProbe *core.Probe) (bool, string) {
|
||||
if !probe.CanBeEnabled {
|
||||
if containerProbe != nil {
|
||||
return true, "Probe needs to be disabled"
|
||||
}
|
||||
// Nothing to do - we cannot enable probe and probe is disabled
|
||||
return false, ""
|
||||
}
|
||||
|
||||
enabled := probe.EnabledByDefault
|
||||
|
||||
if groupProbeDisabled != nil {
|
||||
enabled = !*groupProbeDisabled
|
||||
}
|
||||
|
||||
if enabled && containerProbe == nil {
|
||||
// We expected probe to be enabled but it is disabled in container
|
||||
return true, "Enabling probe"
|
||||
}
|
||||
|
||||
if !enabled {
|
||||
if containerProbe != nil {
|
||||
// We expected probe to be disabled but it is enabled in container
|
||||
return true, "Disabling probe"
|
||||
}
|
||||
|
||||
// Nothing to do - probe is disabled and it should stay as it is
|
||||
return false, ""
|
||||
}
|
||||
|
||||
if groupProbeSpec.GetTimeoutSeconds(containerProbe.TimeoutSeconds) != containerProbe.TimeoutSeconds {
|
||||
return true, "Timeout seconds does not match"
|
||||
}
|
||||
|
||||
if groupProbeSpec.GetPeriodSeconds(containerProbe.PeriodSeconds) != containerProbe.PeriodSeconds {
|
||||
return true, "Period seconds does not match"
|
||||
}
|
||||
|
||||
// Recreate probe if timeout seconds are different
|
||||
if groupProbeSpec.GetInitialDelaySeconds(containerProbe.InitialDelaySeconds) != containerProbe.InitialDelaySeconds {
|
||||
return true, "Initial delay seconds does not match"
|
||||
}
|
||||
|
||||
// Recreate probe if timeout seconds are different
|
||||
if groupProbeSpec.GetFailureThreshold(containerProbe.FailureThreshold) != containerProbe.FailureThreshold {
|
||||
return true, "Failure threshold does not match"
|
||||
}
|
||||
|
||||
// Recreate probe if timeout seconds are different
|
||||
if groupProbeSpec.GetSuccessThreshold(containerProbe.SuccessThreshold) != containerProbe.SuccessThreshold {
|
||||
return true, "Success threshold does not match"
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func compareVolumes(spec api.ServerGroupSpec, pod core.Pod) (bool, string) {
|
||||
if len(spec.Volumes) == 0 {
|
||||
return false, ""
|
||||
}
|
||||
|
||||
currentVolumes := map[string]core.Volume{}
|
||||
|
||||
for _, volume := range pod.Spec.Volumes {
|
||||
currentVolumes[volume.Name] = volume
|
||||
}
|
||||
|
||||
for _, expectedVolumeTemplate := range spec.Volumes {
|
||||
expectedVolume := expectedVolumeTemplate.Volume()
|
||||
|
||||
currentVolume, ok := currentVolumes[expectedVolume.Name]
|
||||
if !ok {
|
||||
return true, fmt.Sprintf("Volume %s is not mount. Rotating", expectedVolume.Name)
|
||||
}
|
||||
|
||||
if !equality.Semantic.DeepDerivative(expectedVolume, currentVolume) {
|
||||
return true, fmt.Sprintf("Volume %s needs to be updated", expectedVolume.Name)
|
||||
}
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
func compareVolumeMounts(spec api.ServerGroupSpec, container core.Container) (bool, string) {
|
||||
if len(container.VolumeMounts) < len(spec.VolumeMounts) {
|
||||
return true, "Missing volume mounts in container"
|
||||
}
|
||||
|
||||
// Get compared mounts
|
||||
mounts := container.VolumeMounts
|
||||
if len(mounts) > 0 {
|
||||
if container.VolumeMounts[len(mounts)-1].MountPath == "/var/run/secrets/kubernetes.io/serviceaccount" {
|
||||
// Remove last mount added by ServiceAccount from compare
|
||||
mounts = mounts[0 : len(mounts)-1]
|
||||
}
|
||||
}
|
||||
|
||||
if len(spec.VolumeMounts) > len(mounts) {
|
||||
return true, "Missing volume mounts in container"
|
||||
}
|
||||
|
||||
mounts = mounts[len(mounts)-len(spec.VolumeMounts):]
|
||||
|
||||
// Now we can compare lists
|
||||
for id, mount := range mounts {
|
||||
if !equality.Semantic.DeepDerivative(spec.VolumeMounts[id].VolumeMount(), mount) {
|
||||
return true, fmt.Sprintf("Mount with if %d does not match - got %s, expected %s", id, mount.Name, spec.VolumeMounts[id].Name)
|
||||
}
|
||||
}
|
||||
|
||||
return false, ""
|
||||
}
|
||||
|
||||
// clusterReadyForUpgrade returns true if the cluster is ready for the next update, that is:
|
||||
// - all shards are in sync
|
||||
// - all members are ready and fine
|
||||
|
@ -495,19 +228,6 @@ func clusterReadyForUpgrade(context PlanBuilderContext) bool {
|
|||
return allInSync && status.Conditions.IsTrue(api.ConditionTypeReady)
|
||||
}
|
||||
|
||||
// sideCarRequireRotation checks if side car requires rotation including default parameters
|
||||
func sideCarRequireRotation(wanted, given *core.Container) bool {
|
||||
return !reflect.DeepEqual(wanted, given)
|
||||
}
|
||||
|
||||
// normalizeServiceAccountName replaces default with empty string, otherwise returns the input.
|
||||
func normalizeServiceAccountName(name string) string {
|
||||
if name == "default" {
|
||||
return ""
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// createUpgradeMemberPlan creates a plan to upgrade (stop-recreateWithAutoUpgrade-stop-start) an existing
|
||||
// member.
|
||||
func createUpgradeMemberPlan(log zerolog.Logger, member api.MemberStatus,
|
||||
|
@ -533,10 +253,3 @@ func createUpgradeMemberPlan(log zerolog.Logger, member api.MemberStatus,
|
|||
}
|
||||
return plan
|
||||
}
|
||||
|
||||
func getContainerArgs(c core.Container) []string {
|
||||
if len(c.Command) >= 1 {
|
||||
return c.Command[1:]
|
||||
}
|
||||
return c.Args
|
||||
}
|
||||
|
|
|
@ -1,161 +0,0 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2020 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Author Adam Janikowski
|
||||
//
|
||||
|
||||
package reconcile
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
core "k8s.io/api/core/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
// createRotateServerStoragePlan creates plan to rotate a server and its volume because of a
|
||||
// different storage class or a difference in storage resource requirements.
|
||||
func createRotateServerSecurityPlan(log zerolog.Logger, spec api.DeploymentSpec, status api.DeploymentStatus,
|
||||
pods []core.Pod) api.Plan {
|
||||
var plan api.Plan
|
||||
status.Members.ForeachServerGroup(func(group api.ServerGroup, members api.MemberStatusList) error {
|
||||
for _, m := range members {
|
||||
if !plan.IsEmpty() {
|
||||
// Only 1 change at a time
|
||||
continue
|
||||
}
|
||||
|
||||
groupSpec := spec.GetServerGroupSpec(group)
|
||||
|
||||
pod, found := k8sutil.GetPodByName(pods, m.PodName)
|
||||
if !found {
|
||||
continue
|
||||
}
|
||||
|
||||
container, ok := getServerContainer(pod.Spec.Containers)
|
||||
if !ok {
|
||||
// We do not have server container in pod, which is not desired
|
||||
continue
|
||||
}
|
||||
|
||||
groupSC := groupSpec.SecurityContext.NewSecurityContext()
|
||||
containerSC := container.SecurityContext
|
||||
|
||||
if !compareSC(groupSC, containerSC) {
|
||||
log.Info().Str("member", m.ID).Str("group", group.AsRole()).Msg("Rotating security context")
|
||||
plan = append(plan,
|
||||
api.NewAction(api.ActionTypeRotateMember, group, m.ID),
|
||||
api.NewAction(api.ActionTypeWaitForMemberUp, group, m.ID),
|
||||
)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return plan
|
||||
}
|
||||
|
||||
func getServerContainer(containers []core.Container) (core.Container, bool) {
|
||||
for _, container := range containers {
|
||||
if container.Name == k8sutil.ServerContainerName {
|
||||
return container, true
|
||||
}
|
||||
}
|
||||
|
||||
return core.Container{}, false
|
||||
}
|
||||
|
||||
func compareSC(a, b *core.SecurityContext) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if ok := compareCapabilities(a.Capabilities, b.Capabilities); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareCapabilities(a, b *core.Capabilities) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if ok := compareCapabilityLists(a.Add, b.Add); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
if ok := compareCapabilityLists(a.Drop, b.Drop); !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func compareCapabilityLists(a, b []core.Capability) bool {
|
||||
if a == nil && b == nil {
|
||||
return true
|
||||
}
|
||||
|
||||
if a == nil || b == nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
checked := map[core.Capability]bool{}
|
||||
|
||||
for _, capability := range a {
|
||||
checked[capability] = false
|
||||
}
|
||||
|
||||
for _, capability := range b {
|
||||
if _, ok := checked[capability]; !ok {
|
||||
return false
|
||||
}
|
||||
|
||||
// If we got ALL on list and expect ALL to be present then it is equal for us
|
||||
if strings.EqualFold(string(capability), "ALL") {
|
||||
return true
|
||||
}
|
||||
|
||||
checked[capability] = true
|
||||
}
|
||||
|
||||
for _, check := range checked {
|
||||
if !check {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
Loading…
Reference in a new issue