mirror of
https://github.com/arangodb/kube-arangodb.git
synced 2024-12-14 11:57:37 +00:00
[Feature] Immutable spec (#1089)
This commit is contained in:
parent
f86b88a8c3
commit
20a53e7d9c
87 changed files with 1146 additions and 715 deletions
|
@ -17,6 +17,7 @@
|
|||
- (Feature) Extract Pod Details
|
||||
- (Feature) Add Timezone management
|
||||
- (Bugfix) Always recreate DBServers if they have a leader on it.
|
||||
- (Feature) Immutable spec
|
||||
|
||||
## [1.2.15](https://github.com/arangodb/kube-arangodb/tree/1.2.15) (2022-07-20)
|
||||
- (Bugfix) Ensure pod names not too long
|
||||
|
|
20
cmd/admin.go
20
cmd/admin.go
|
@ -110,13 +110,13 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) {
|
|||
logger.Err(err).Fatal("failed to create basic data for the connection")
|
||||
}
|
||||
|
||||
if d.Spec.GetMode() != api.DeploymentModeCluster {
|
||||
logger.Fatal("agency state does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(),
|
||||
if d.GetAcceptedSpec().GetMode() != api.DeploymentModeCluster {
|
||||
logger.Fatal("agency state does not work for the \"%s\" deployment \"%s\"", d.GetAcceptedSpec().GetMode(),
|
||||
d.GetName())
|
||||
}
|
||||
|
||||
dnsName := k8sutil.CreatePodDNSName(d.GetObjectMeta(), api.ServerGroupAgents.AsRole(), d.Status.Members.Agents[0].ID)
|
||||
endpoint := getArangoEndpoint(d.Spec.IsSecure(), dnsName)
|
||||
endpoint := getArangoEndpoint(d.GetAcceptedSpec().IsSecure(), dnsName)
|
||||
conn := createClient([]string{endpoint}, certCA, auth, connection.ApplicationJSON)
|
||||
leaderID, err := getAgencyLeader(ctx, conn)
|
||||
if err != nil {
|
||||
|
@ -124,7 +124,7 @@ func cmdGetAgencyState(cmd *cobra.Command, _ []string) {
|
|||
}
|
||||
|
||||
dnsLeaderName := k8sutil.CreatePodDNSName(d.GetObjectMeta(), api.ServerGroupAgents.AsRole(), leaderID)
|
||||
leaderEndpoint := getArangoEndpoint(d.Spec.IsSecure(), dnsLeaderName)
|
||||
leaderEndpoint := getArangoEndpoint(d.GetAcceptedSpec().IsSecure(), dnsLeaderName)
|
||||
conn = createClient([]string{leaderEndpoint}, certCA, auth, connection.PlainText)
|
||||
body, err := getAgencyState(ctx, conn)
|
||||
if body != nil {
|
||||
|
@ -146,12 +146,12 @@ func cmdGetAgencyDump(cmd *cobra.Command, _ []string) {
|
|||
logger.Err(err).Fatal("failed to create basic data for the connection")
|
||||
}
|
||||
|
||||
if d.Spec.GetMode() != api.DeploymentModeCluster {
|
||||
logger.Fatal("agency dump does not work for the \"%s\" deployment \"%s\"", d.Spec.GetMode(),
|
||||
if d.GetAcceptedSpec().GetMode() != api.DeploymentModeCluster {
|
||||
logger.Fatal("agency dump does not work for the \"%s\" deployment \"%s\"", d.GetAcceptedSpec().GetMode(),
|
||||
d.GetName())
|
||||
}
|
||||
|
||||
endpoint := getArangoEndpoint(d.Spec.IsSecure(), k8sutil.CreateDatabaseClientServiceDNSName(d.GetObjectMeta()))
|
||||
endpoint := getArangoEndpoint(d.GetAcceptedSpec().IsSecure(), k8sutil.CreateDatabaseClientServiceDNSName(d.GetObjectMeta()))
|
||||
conn := createClient([]string{endpoint}, certCA, auth, connection.ApplicationJSON)
|
||||
body, err := getAgencyDump(ctx, conn)
|
||||
if body != nil {
|
||||
|
@ -205,14 +205,14 @@ func getDeploymentAndCredentials(ctx context.Context,
|
|||
}
|
||||
|
||||
var secrets = kubeCli.CoreV1().Secrets(d.GetNamespace())
|
||||
certCA, err = getCACertificate(ctx, secrets, d.Spec.TLS.GetCASecretName())
|
||||
certCA, err = getCACertificate(ctx, secrets, d.GetAcceptedSpec().TLS.GetCASecretName())
|
||||
if err != nil {
|
||||
err = errors.WithMessage(err, "failed to get CA certificate")
|
||||
return
|
||||
}
|
||||
|
||||
if d.Spec.IsAuthenticated() {
|
||||
auth, err = getJWTTokenFromSecrets(ctx, secrets, d.Spec.Authentication.GetJWTSecretName())
|
||||
if d.GetAcceptedSpec().IsAuthenticated() {
|
||||
auth, err = getJWTTokenFromSecrets(ctx, secrets, d.GetAcceptedSpec().Authentication.GetJWTSecretName())
|
||||
if err != nil {
|
||||
err = errors.WithMessage(err, "failed to get JWT token")
|
||||
return
|
||||
|
|
|
@ -199,7 +199,7 @@ func init() {
|
|||
f.DurationVar(&operatorTimeouts.reconciliation, "timeout.reconciliation", globals.DefaultReconciliationTimeout, "The reconciliation timeout to the ArangoDB CR")
|
||||
f.DurationVar(&shutdownOptions.delay, "shutdown.delay", defaultShutdownDelay, "The delay before running shutdown handlers")
|
||||
f.DurationVar(&shutdownOptions.timeout, "shutdown.timeout", defaultShutdownTimeout, "Timeout for shutdown handlers")
|
||||
f.BoolVar(&operatorOptions.scalingIntegrationEnabled, "internal.scaling-integration", true, "Enable Scaling Integration")
|
||||
f.BoolVar(&operatorOptions.scalingIntegrationEnabled, "internal.scaling-integration", false, "Enable Scaling Integration")
|
||||
f.Int64Var(&operatorKubernetesOptions.maxBatchSize, "kubernetes.max-batch-size", globals.DefaultKubernetesRequestBatchSize, "Size of batch during objects read")
|
||||
f.Float32Var(&operatorKubernetesOptions.qps, "kubernetes.qps", kclient.DefaultQPS, "Number of queries per second for k8s API")
|
||||
f.IntVar(&operatorKubernetesOptions.burst, "kubernetes.burst", kclient.DefaultBurst, "Burst for the k8s API")
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
## List
|
||||
|
||||
| Name | Namespace | Group | Type | Description |
|
||||
|:---------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:------------:|:-------:|:--------------------------------------------------------------------------------------|
|
||||
|:-------------------------------------------------------------------------------------------------------------------------------------:|:-----------------:|:------------:|:-------:|:--------------------------------------------------------------------------------------|
|
||||
| [arangodb_operator_agency_errors](./arangodb_operator_agency_errors.md) | arangodb_operator | agency | Counter | Current count of agency cache fetch errors |
|
||||
| [arangodb_operator_agency_fetches](./arangodb_operator_agency_fetches.md) | arangodb_operator | agency | Counter | Current count of agency cache fetches |
|
||||
| [arangodb_operator_agency_index](./arangodb_operator_agency_index.md) | arangodb_operator | agency | Gauge | Current index of the agency cache |
|
||||
|
@ -21,3 +21,7 @@
|
|||
| [arangodb_operator_rebalancer_moves_failed](./arangodb_operator_rebalancer_moves_failed.md) | arangodb_operator | rebalancer | Counter | Define how many moves failed |
|
||||
| [arangodb_operator_rebalancer_moves_generated](./arangodb_operator_rebalancer_moves_generated.md) | arangodb_operator | rebalancer | Counter | Define how many moves were generated |
|
||||
| [arangodb_operator_rebalancer_moves_succeeded](./arangodb_operator_rebalancer_moves_succeeded.md) | arangodb_operator | rebalancer | Counter | Define how many moves succeeded |
|
||||
| [arangodb_operator_resources_arangodeployment_accepted](./arangodb_operator_resources_arangodeployment_accepted.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeployment has been accepted |
|
||||
| [arangodb_operator_resources_arangodeployment_immutable_errors](./arangodb_operator_resources_arangodeployment_immutable_errors.md) | arangodb_operator | resources | Counter | Counter for deployment immutable errors |
|
||||
| [arangodb_operator_resources_arangodeployment_uptodate](./arangodb_operator_resources_arangodeployment_uptodate.md) | arangodb_operator | resources | Gauge | Defines if ArangoDeployment is uptodate |
|
||||
| [arangodb_operator_resources_arangodeployment_validation_errors](./arangodb_operator_resources_arangodeployment_validation_errors.md) | arangodb_operator | resources | Counter | Counter for deployment validation errors |
|
||||
|
|
|
@ -0,0 +1,13 @@
|
|||
# arangodb_operator_kubernetes_events_created (Counter)
|
||||
|
||||
## Description
|
||||
|
||||
Counter for created events
|
||||
|
||||
## Labels
|
||||
|
||||
| Label | Description |
|
||||
|:---------:|:---------------------|
|
||||
| namespace | Deployment Namespace |
|
||||
| name | Deployment Name |
|
||||
| eventType | Event Type |
|
|
@ -0,0 +1,12 @@
|
|||
# arangodb_operator_resources_arangodeployment_accepted (Gauge)
|
||||
|
||||
## Description
|
||||
|
||||
Defines if ArangoDeployment has been accepted
|
||||
|
||||
## Labels
|
||||
|
||||
| Label | Description |
|
||||
|:---------:|:---------------------|
|
||||
| namespace | Deployment Namespace |
|
||||
| name | Deployment Name |
|
|
@ -0,0 +1,12 @@
|
|||
# arangodb_operator_resources_arangodeployment_immutable_errors (Counter)
|
||||
|
||||
## Description
|
||||
|
||||
Counter for deployment immutable errors
|
||||
|
||||
## Labels
|
||||
|
||||
| Label | Description |
|
||||
|:---------:|:---------------------|
|
||||
| namespace | Deployment Namespace |
|
||||
| name | Deployment Name |
|
|
@ -0,0 +1,12 @@
|
|||
# arangodb_operator_resources_arangodeployment_uptodate (Gauge)
|
||||
|
||||
## Description
|
||||
|
||||
Defines if ArangoDeployment is uptodate
|
||||
|
||||
## Labels
|
||||
|
||||
| Label | Description |
|
||||
|:---------:|:---------------------|
|
||||
| namespace | Deployment Namespace |
|
||||
| name | Deployment Name |
|
|
@ -0,0 +1,12 @@
|
|||
# arangodb_operator_resources_arangodeployment_validation_errors (Counter)
|
||||
|
||||
## Description
|
||||
|
||||
Counter for deployment validation errors
|
||||
|
||||
## Labels
|
||||
|
||||
| Label | Description |
|
||||
|:---------:|:---------------------|
|
||||
| namespace | Deployment Namespace |
|
||||
| name | Deployment Name |
|
|
@ -149,6 +149,43 @@ namespaces:
|
|||
description: "Deployment Namespace"
|
||||
- key: name
|
||||
description: "Deployment Name"
|
||||
resources:
|
||||
arangodeployment_validation_errors:
|
||||
shortDescription: "Counter for deployment validation errors"
|
||||
description: "Counter for deployment validation errors"
|
||||
type: "Counter"
|
||||
labels:
|
||||
- key: namespace
|
||||
description: "Deployment Namespace"
|
||||
- key: name
|
||||
description: "Deployment Name"
|
||||
arangodeployment_immutable_errors:
|
||||
shortDescription: "Counter for deployment immutable errors"
|
||||
description: "Counter for deployment immutable errors"
|
||||
type: "Counter"
|
||||
labels:
|
||||
- key: namespace
|
||||
description: "Deployment Namespace"
|
||||
- key: name
|
||||
description: "Deployment Name"
|
||||
arangodeployment_accepted:
|
||||
shortDescription: "Defines if ArangoDeployment has been accepted"
|
||||
description: "Defines if ArangoDeployment has been accepted"
|
||||
type: "Gauge"
|
||||
labels:
|
||||
- key: namespace
|
||||
description: "Deployment Namespace"
|
||||
- key: name
|
||||
description: "Deployment Name"
|
||||
arangodeployment_uptodate:
|
||||
shortDescription: "Defines if ArangoDeployment is uptodate"
|
||||
description: "Defines if ArangoDeployment is uptodate"
|
||||
type: "Gauge"
|
||||
labels:
|
||||
- key: namespace
|
||||
description: "Deployment Namespace"
|
||||
- key: name
|
||||
description: "Deployment Name"
|
||||
members:
|
||||
unexpected_container_exit_codes:
|
||||
shortDescription: "Counter of unexpected restarts in pod (Containers/InitContainers/EphemeralContainers)"
|
||||
|
|
|
@ -34,7 +34,7 @@ type Timezone struct {
|
|||
}
|
||||
|
||||
func (t Timezone) GetData() ([]byte, bool) {
|
||||
if d, ok := timezonesData[t.Name]; ok {
|
||||
if d, ok := timezonesData[t.Parent]; ok {
|
||||
if d, err := base64.StdEncoding.DecodeString(d); err == nil {
|
||||
return d, true
|
||||
}
|
||||
|
|
|
@ -64,6 +64,8 @@ const (
|
|||
ConditionTypeTerminating ConditionType = "Terminating"
|
||||
// ConditionTypeUpToDate indicates that the deployment is up to date.
|
||||
ConditionTypeUpToDate ConditionType = "UpToDate"
|
||||
// ConditionTypeSpecAccepted indicates that the deployment spec has been accepted.
|
||||
ConditionTypeSpecAccepted ConditionType = "SpecAccepted"
|
||||
// ConditionTypeMarkedToRemove indicates that the member is marked to be removed.
|
||||
ConditionTypeMarkedToRemove ConditionType = "MarkedToRemove"
|
||||
// ConditionTypeUpgradeFailed indicates that upgrade failed
|
||||
|
|
|
@ -73,27 +73,68 @@ func (d *ArangoDeployment) ForeachServerGroup(cb ServerGroupFunc, status *Deploy
|
|||
if status == nil {
|
||||
status = &d.Status
|
||||
}
|
||||
if err := cb(ServerGroupAgents, d.Spec.Agents, &status.Members.Agents); err != nil {
|
||||
return d.foreachServerGroup(cb, d.Spec, status)
|
||||
}
|
||||
|
||||
// ForeachServerGroupAccepted calls the given callback for all accepted server groups.
|
||||
// If the callback returns an error, this error is returned and no other server
|
||||
// groups are processed.
|
||||
// Groups are processed in this order: agents, single, dbservers, coordinators, syncmasters, syncworkers
|
||||
func (d *ArangoDeployment) ForeachServerGroupAccepted(cb ServerGroupFunc, status *DeploymentStatus) error {
|
||||
if status == nil {
|
||||
status = &d.Status
|
||||
}
|
||||
spec := d.Spec
|
||||
if a := status.AcceptedSpec; a != nil {
|
||||
spec = *a
|
||||
}
|
||||
return d.foreachServerGroup(cb, spec, status)
|
||||
}
|
||||
|
||||
func (d *ArangoDeployment) foreachServerGroup(cb ServerGroupFunc, spec DeploymentSpec, status *DeploymentStatus) error {
|
||||
if err := cb(ServerGroupAgents, spec.Agents, &status.Members.Agents); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupSingle, d.Spec.Single, &status.Members.Single); err != nil {
|
||||
if err := cb(ServerGroupSingle, spec.Single, &status.Members.Single); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupDBServers, d.Spec.DBServers, &status.Members.DBServers); err != nil {
|
||||
if err := cb(ServerGroupDBServers, spec.DBServers, &status.Members.DBServers); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupCoordinators, d.Spec.Coordinators, &status.Members.Coordinators); err != nil {
|
||||
if err := cb(ServerGroupCoordinators, spec.Coordinators, &status.Members.Coordinators); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupSyncMasters, d.Spec.SyncMasters, &status.Members.SyncMasters); err != nil {
|
||||
if err := cb(ServerGroupSyncMasters, spec.SyncMasters, &status.Members.SyncMasters); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupSyncWorkers, d.Spec.SyncWorkers, &status.Members.SyncWorkers); err != nil {
|
||||
if err := cb(ServerGroupSyncWorkers, spec.SyncWorkers, &status.Members.SyncWorkers); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAccepted checks if accepted version match current version in spec
|
||||
func (d ArangoDeployment) IsAccepted() (bool, error) {
|
||||
if as := d.Status.AcceptedSpecVersion; as != nil {
|
||||
sha, err := d.Spec.Checksum()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return *as == sha, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (d ArangoDeployment) GetAcceptedSpec() DeploymentSpec {
|
||||
if a := d.Status.AcceptedSpec; a != nil {
|
||||
return *a
|
||||
} else {
|
||||
return d.Spec
|
||||
}
|
||||
}
|
||||
|
||||
// IsUpToDate checks if applied version match current version in spec
|
||||
func (d ArangoDeployment) IsUpToDate() (bool, error) {
|
||||
sha, err := d.Spec.Checksum()
|
||||
|
|
|
@ -34,6 +34,9 @@ type DeploymentStatus struct {
|
|||
// AppliedVersion defines checksum of applied spec
|
||||
AppliedVersion string `json:"appliedVersion"`
|
||||
|
||||
// AcceptedSpecVersion defines checksum of accepted spec
|
||||
AcceptedSpecVersion *string `json:"acceptedSpecVersion,omitempty"`
|
||||
|
||||
// ServiceName holds the name of the Service a client can use (inside the k8s cluster)
|
||||
// to access ArangoDB.
|
||||
ServiceName string `json:"serviceName,omitempty"`
|
||||
|
@ -117,6 +120,7 @@ func (ds *DeploymentStatus) Equal(other DeploymentStatus) bool {
|
|||
ds.Plan.Equal(other.Plan) &&
|
||||
ds.HighPriorityPlan.Equal(other.HighPriorityPlan) &&
|
||||
ds.ResourcesPlan.Equal(other.ResourcesPlan) &&
|
||||
util.CompareStringPointers(ds.AcceptedSpecVersion, other.AcceptedSpecVersion) &&
|
||||
ds.AcceptedSpec.Equal(other.AcceptedSpec) &&
|
||||
ds.SecretHashes.Equal(other.SecretHashes) &&
|
||||
ds.Agency.Equal(other.Agency) &&
|
||||
|
|
5
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
5
pkg/apis/deployment/v1/zz_generated.deepcopy.go
generated
|
@ -1101,6 +1101,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
|
||||
*out = *in
|
||||
if in.AcceptedSpecVersion != nil {
|
||||
in, out := &in.AcceptedSpecVersion, &out.AcceptedSpecVersion
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Restore != nil {
|
||||
in, out := &in.Restore, &out.Restore
|
||||
*out = new(DeploymentRestoreResult)
|
||||
|
|
|
@ -64,6 +64,8 @@ const (
|
|||
ConditionTypeTerminating ConditionType = "Terminating"
|
||||
// ConditionTypeUpToDate indicates that the deployment is up to date.
|
||||
ConditionTypeUpToDate ConditionType = "UpToDate"
|
||||
// ConditionTypeSpecAccepted indicates that the deployment spec has been accepted.
|
||||
ConditionTypeSpecAccepted ConditionType = "SpecAccepted"
|
||||
// ConditionTypeMarkedToRemove indicates that the member is marked to be removed.
|
||||
ConditionTypeMarkedToRemove ConditionType = "MarkedToRemove"
|
||||
// ConditionTypeUpgradeFailed indicates that upgrade failed
|
||||
|
|
|
@ -73,27 +73,68 @@ func (d *ArangoDeployment) ForeachServerGroup(cb ServerGroupFunc, status *Deploy
|
|||
if status == nil {
|
||||
status = &d.Status
|
||||
}
|
||||
if err := cb(ServerGroupAgents, d.Spec.Agents, &status.Members.Agents); err != nil {
|
||||
return d.foreachServerGroup(cb, d.Spec, status)
|
||||
}
|
||||
|
||||
// ForeachServerGroupAccepted calls the given callback for all accepted server groups.
|
||||
// If the callback returns an error, this error is returned and no other server
|
||||
// groups are processed.
|
||||
// Groups are processed in this order: agents, single, dbservers, coordinators, syncmasters, syncworkers
|
||||
func (d *ArangoDeployment) ForeachServerGroupAccepted(cb ServerGroupFunc, status *DeploymentStatus) error {
|
||||
if status == nil {
|
||||
status = &d.Status
|
||||
}
|
||||
spec := d.Spec
|
||||
if a := status.AcceptedSpec; a != nil {
|
||||
spec = *a
|
||||
}
|
||||
return d.foreachServerGroup(cb, spec, status)
|
||||
}
|
||||
|
||||
func (d *ArangoDeployment) foreachServerGroup(cb ServerGroupFunc, spec DeploymentSpec, status *DeploymentStatus) error {
|
||||
if err := cb(ServerGroupAgents, spec.Agents, &status.Members.Agents); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupSingle, d.Spec.Single, &status.Members.Single); err != nil {
|
||||
if err := cb(ServerGroupSingle, spec.Single, &status.Members.Single); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupDBServers, d.Spec.DBServers, &status.Members.DBServers); err != nil {
|
||||
if err := cb(ServerGroupDBServers, spec.DBServers, &status.Members.DBServers); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupCoordinators, d.Spec.Coordinators, &status.Members.Coordinators); err != nil {
|
||||
if err := cb(ServerGroupCoordinators, spec.Coordinators, &status.Members.Coordinators); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupSyncMasters, d.Spec.SyncMasters, &status.Members.SyncMasters); err != nil {
|
||||
if err := cb(ServerGroupSyncMasters, spec.SyncMasters, &status.Members.SyncMasters); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := cb(ServerGroupSyncWorkers, d.Spec.SyncWorkers, &status.Members.SyncWorkers); err != nil {
|
||||
if err := cb(ServerGroupSyncWorkers, spec.SyncWorkers, &status.Members.SyncWorkers); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAccepted checks if accepted version match current version in spec
|
||||
func (d ArangoDeployment) IsAccepted() (bool, error) {
|
||||
if as := d.Status.AcceptedSpecVersion; as != nil {
|
||||
sha, err := d.Spec.Checksum()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return *as == sha, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (d ArangoDeployment) GetAcceptedSpec() DeploymentSpec {
|
||||
if a := d.Status.AcceptedSpec; a != nil {
|
||||
return *a
|
||||
} else {
|
||||
return d.Spec
|
||||
}
|
||||
}
|
||||
|
||||
// IsUpToDate checks if applied version match current version in spec
|
||||
func (d ArangoDeployment) IsUpToDate() (bool, error) {
|
||||
sha, err := d.Spec.Checksum()
|
||||
|
|
|
@ -34,6 +34,9 @@ type DeploymentStatus struct {
|
|||
// AppliedVersion defines checksum of applied spec
|
||||
AppliedVersion string `json:"appliedVersion"`
|
||||
|
||||
// AcceptedSpecVersion defines checksum of accepted spec
|
||||
AcceptedSpecVersion *string `json:"acceptedSpecVersion,omitempty"`
|
||||
|
||||
// ServiceName holds the name of the Service a client can use (inside the k8s cluster)
|
||||
// to access ArangoDB.
|
||||
ServiceName string `json:"serviceName,omitempty"`
|
||||
|
@ -117,6 +120,7 @@ func (ds *DeploymentStatus) Equal(other DeploymentStatus) bool {
|
|||
ds.Plan.Equal(other.Plan) &&
|
||||
ds.HighPriorityPlan.Equal(other.HighPriorityPlan) &&
|
||||
ds.ResourcesPlan.Equal(other.ResourcesPlan) &&
|
||||
util.CompareStringPointers(ds.AcceptedSpecVersion, other.AcceptedSpecVersion) &&
|
||||
ds.AcceptedSpec.Equal(other.AcceptedSpec) &&
|
||||
ds.SecretHashes.Equal(other.SecretHashes) &&
|
||||
ds.Agency.Equal(other.Agency) &&
|
||||
|
|
|
@ -1101,6 +1101,11 @@ func (in *DeploymentSpec) DeepCopy() *DeploymentSpec {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *DeploymentStatus) DeepCopyInto(out *DeploymentStatus) {
|
||||
*out = *in
|
||||
if in.AcceptedSpecVersion != nil {
|
||||
in, out := &in.AcceptedSpecVersion, &out.AcceptedSpecVersion
|
||||
*out = new(string)
|
||||
**out = **in
|
||||
}
|
||||
if in.Restore != nil {
|
||||
in, out := &in.Restore, &out.Restore
|
||||
*out = new(DeploymentRestoreResult)
|
||||
|
|
|
@ -47,7 +47,7 @@ const (
|
|||
// in spec.sync.externalAccess.accessPackageSecretNames.
|
||||
func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
||||
log := d.sectionLogger("access-package")
|
||||
spec := d.apiObject.Spec
|
||||
spec := d.GetSpec()
|
||||
|
||||
if !spec.Sync.IsEnabled() {
|
||||
// We're only relevant when sync is enabled
|
||||
|
@ -79,11 +79,11 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
|||
if err != nil && !k8sutil.IsNotFound(err) {
|
||||
// Not serious enough to stop everything now, just sectionLogger and create an event
|
||||
log.Err(err).Warn("Failed to remove obsolete access package secret")
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Access Package cleanup failed", err, d.apiObject))
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Access Package cleanup failed", err, d.currentObject))
|
||||
} else {
|
||||
// Access package removed, notify user
|
||||
log.Str("secret-name", secret.GetName()).Info("Removed access package Secret")
|
||||
d.CreateEvent(k8sutil.NewAccessPackageDeletedEvent(d.apiObject, secret.GetName()))
|
||||
d.CreateEvent(k8sutil.NewAccessPackageDeletedEvent(d.currentObject, secret.GetName()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -97,7 +97,7 @@ func (d *Deployment) createAccessPackages(ctx context.Context) error {
|
|||
// it is does not already exist.
|
||||
func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName string) error {
|
||||
log := d.sectionLogger("access-package")
|
||||
spec := d.apiObject.Spec
|
||||
spec := d.GetSpec()
|
||||
|
||||
_, err := d.acs.CurrentClusterCache().Secret().V1().Read().Get(ctx, apSecretName, meta.GetOptions{})
|
||||
if err == nil {
|
||||
|
@ -153,7 +153,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: apSecretName + "-auth",
|
||||
Labels: map[string]string{
|
||||
labelKeyOriginalDeployment: d.apiObject.GetName(),
|
||||
labelKeyOriginalDeployment: d.currentObject.GetName(),
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
|
@ -169,7 +169,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
ObjectMeta: meta.ObjectMeta{
|
||||
Name: apSecretName + "-ca",
|
||||
Labels: map[string]string{
|
||||
labelKeyOriginalDeployment: d.apiObject.GetName(),
|
||||
labelKeyOriginalDeployment: d.currentObject.GetName(),
|
||||
},
|
||||
},
|
||||
Data: map[string][]byte{
|
||||
|
@ -203,7 +203,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
},
|
||||
}
|
||||
// Attach secret to owner
|
||||
secret.SetOwnerReferences(append(secret.GetOwnerReferences(), d.apiObject.AsOwner()))
|
||||
secret.SetOwnerReferences(append(secret.GetOwnerReferences(), d.currentObject.AsOwner()))
|
||||
err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
_, err := d.SecretsModInterface().Create(ctxChild, secret, meta.CreateOptions{})
|
||||
return err
|
||||
|
@ -216,7 +216,7 @@ func (d *Deployment) ensureAccessPackage(ctx context.Context, apSecretName strin
|
|||
|
||||
// Write sectionLogger entry & create event
|
||||
log.Str("secret-name", apSecretName).Info("Created access package Secret")
|
||||
d.CreateEvent(k8sutil.NewAccessPackageCreatedEvent(d.apiObject, apSecretName))
|
||||
d.CreateEvent(k8sutil.NewAccessPackageCreatedEvent(d.currentObject, apSecretName))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -83,7 +83,7 @@ func (cc *cache) extendHost(host string) string {
|
|||
func (cc *cache) getClient(group api.ServerGroup, id string) (driver.Client, error) {
|
||||
cc.mutex.Lock()
|
||||
defer cc.mutex.Unlock()
|
||||
m, _, _ := cc.in.GetStatusSnapshot().Members.ElementByID(id)
|
||||
m, _, _ := cc.in.GetStatus().Members.ElementByID(id)
|
||||
|
||||
endpoint, err := cc.in.GenerateMemberEndpoint(group, m)
|
||||
if err != nil {
|
||||
|
@ -153,7 +153,7 @@ func (cc *cache) GetAgency(_ context.Context, agencyIDs ...string) (agency.Agenc
|
|||
|
||||
// Not found, create a new client
|
||||
var dnsNames []string
|
||||
for _, m := range cc.in.GetStatusSnapshot().Members.Agents {
|
||||
for _, m := range cc.in.GetStatus().Members.Agents {
|
||||
if len(agencyIDs) > 0 {
|
||||
if !utils.StringList(agencyIDs).Has(m.ID) {
|
||||
continue
|
||||
|
|
|
@ -26,15 +26,13 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/rs/zerolog"
|
||||
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/patch"
|
||||
"github.com/arangodb/kube-arangodb/pkg/logging"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/arangod"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/timer"
|
||||
)
|
||||
|
||||
|
@ -91,10 +89,14 @@ func (ci *clusterScalingIntegration) checkScalingCluster(ctx context.Context, ex
|
|||
defer ci.scaleEnabled.mutex.Unlock()
|
||||
|
||||
if !ci.depl.config.ScalingIntegrationEnabled {
|
||||
if err := ci.cleanClusterServers(ctx); err != nil {
|
||||
ci.log.Err(err).Debug("Clean failed")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
status, _ := ci.depl.GetStatus()
|
||||
status := ci.depl.GetStatus()
|
||||
|
||||
if !ci.scaleEnabled.enabled {
|
||||
// Check if it is possible to turn on scaling without any issue
|
||||
|
@ -110,6 +112,10 @@ func (ci *clusterScalingIntegration) checkScalingCluster(ctx context.Context, ex
|
|||
return false
|
||||
}
|
||||
|
||||
if !status.Conditions.IsTrue(api.ConditionTypeUpToDate) {
|
||||
return false
|
||||
}
|
||||
|
||||
// Update cluster with our state
|
||||
safeToAskCluster, err := ci.updateClusterServerCount(ctx, expectSuccess)
|
||||
if err != nil {
|
||||
|
@ -153,6 +159,32 @@ func (ci *clusterScalingIntegration) ListenForClusterEvents(stopCh <-chan struct
|
|||
}
|
||||
}
|
||||
|
||||
func (ci *clusterScalingIntegration) cleanClusterServers(ctx context.Context) error {
|
||||
log := ci.log
|
||||
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
c, err := ci.depl.clientCache.GetDatabase(ctxChild)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
req, err := arangod.GetNumberOfServers(ctxChild, c.Connection())
|
||||
if err != nil {
|
||||
log.Err(err).Debug("Failed to get number of servers")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
||||
if req.Coordinators != nil || req.DBServers != nil {
|
||||
if err := arangod.CleanNumberOfServers(ctx, c.Connection()); err != nil {
|
||||
log.Err(err).Debug("Failed to clean number of servers")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Perform a single inspection of the cluster
|
||||
func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectSuccess bool) error {
|
||||
log := ci.log
|
||||
|
@ -208,35 +240,18 @@ func (ci *clusterScalingIntegration) inspectCluster(ctx context.Context, expectS
|
|||
return nil
|
||||
}
|
||||
// Let's update the spec
|
||||
apiObject := ci.depl.apiObject
|
||||
ctxChild, cancel = globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
current, err := ci.depl.deps.Client.Arango().DatabaseV1().ArangoDeployments(apiObject.Namespace).Get(ctxChild, apiObject.Name, meta.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
newSpec := current.Spec.DeepCopy()
|
||||
p := make([]patch.Item, 0, 2)
|
||||
if coordinatorsChanged {
|
||||
newSpec.Coordinators.Count = util.NewInt(req.GetCoordinators())
|
||||
if min, max, expected := ci.depl.GetSpec().Coordinators.GetMinCount(), ci.depl.GetSpec().Coordinators.GetMaxCount(), req.GetCoordinators(); min <= expected && expected <= max {
|
||||
p = append(p, patch.ItemReplace(patch.NewPath("spec", "coordinators", "count"), expected))
|
||||
}
|
||||
}
|
||||
if dbserversChanged {
|
||||
newSpec.DBServers.Count = util.NewInt(req.GetDBServers())
|
||||
}
|
||||
// Validate will additionally check if
|
||||
// min <= count <= max holds for the given server groups
|
||||
if err := newSpec.Validate(); err != nil {
|
||||
// Log failure & create event
|
||||
log.Err(err).Warn("Validation of updated spec has failed")
|
||||
ci.depl.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, apiObject))
|
||||
// Restore original spec in cluster
|
||||
ci.SendUpdateToCluster(current.Spec)
|
||||
} else {
|
||||
if err := ci.depl.updateCRSpec(ctx, *newSpec); err != nil {
|
||||
log.Err(err).Warn("Failed to update current deployment")
|
||||
return errors.WithStack(err)
|
||||
if min, max, expected := ci.depl.GetSpec().DBServers.GetMinCount(), ci.depl.GetSpec().DBServers.GetMaxCount(), req.GetDBServers(); min <= expected && expected <= max {
|
||||
p = append(p, patch.ItemReplace(patch.NewPath("spec", "dbservers", "count"), expected))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
return ci.depl.ApplyPatch(ctx, p...)
|
||||
}
|
||||
|
||||
// updateClusterServerCount updates the intended number of servers of the cluster.
|
||||
|
@ -274,6 +289,7 @@ func (ci *clusterScalingIntegration) updateClusterServerCount(ctx context.Contex
|
|||
// This is to prevent unneseccary updates that may override some values written by the WebUI (in the case of a update loop)
|
||||
if coordinatorCount != lastNumberOfServers.GetCoordinators() || dbserverCount != lastNumberOfServers.GetDBServers() {
|
||||
if err := ci.depl.SetNumberOfServers(ctx, coordinatorCountPtr, dbserverCountPtr); err != nil {
|
||||
log.Debug("Setting number of servers %d/%d", coordinatorCount, dbserverCount)
|
||||
if expectSuccess {
|
||||
log.Err(err).Debug("Failed to set number of servers")
|
||||
}
|
||||
|
@ -342,6 +358,6 @@ func (ci *clusterScalingIntegration) setNumberOfServers(ctx context.Context) err
|
|||
}
|
||||
|
||||
func (ci *clusterScalingIntegration) getNumbersOfServers() (int, int) {
|
||||
status, _ := ci.depl.getStatus()
|
||||
status := ci.depl.GetStatus()
|
||||
return len(status.Members.Coordinators), len(status.Members.DBServers)
|
||||
}
|
||||
|
|
|
@ -80,12 +80,12 @@ func (d *Deployment) GetBackup(ctx context.Context, backup string) (*backupApi.A
|
|||
|
||||
// GetAPIObject returns the deployment as k8s object.
|
||||
func (d *Deployment) GetAPIObject() k8sutil.APIObject {
|
||||
return d.apiObject
|
||||
return d.currentObject
|
||||
}
|
||||
|
||||
// GetServerGroupIterator returns the deployment as ServerGroupIterator.
|
||||
func (d *Deployment) GetServerGroupIterator() reconciler.ServerGroupIterator {
|
||||
return d.apiObject
|
||||
return d.currentObject
|
||||
}
|
||||
|
||||
func (d *Deployment) GetScope() scope.Scope {
|
||||
|
@ -104,59 +104,45 @@ func (d *Deployment) GetNamespace() string {
|
|||
|
||||
// GetPhase returns the current phase of the deployment
|
||||
func (d *Deployment) GetPhase() api.DeploymentPhase {
|
||||
return d.status.last.Phase
|
||||
return d.currentObject.Status.Phase
|
||||
}
|
||||
|
||||
// GetSpec returns the current specification
|
||||
func (d *Deployment) GetSpec() api.DeploymentSpec {
|
||||
return d.apiObject.Spec
|
||||
d.currentObjectLock.RLock()
|
||||
defer d.currentObjectLock.RUnlock()
|
||||
|
||||
if s := d.currentObject.Status.AcceptedSpec; s == nil {
|
||||
return d.currentObject.Spec
|
||||
} else {
|
||||
return *s
|
||||
}
|
||||
}
|
||||
|
||||
// GetStatus returns the current status of the deployment
|
||||
// together with the current version of that status.
|
||||
func (d *Deployment) GetStatus() (api.DeploymentStatus, int32) {
|
||||
return d.getStatus()
|
||||
}
|
||||
func (d *Deployment) GetStatus() api.DeploymentStatus {
|
||||
d.currentObjectLock.RLock()
|
||||
defer d.currentObjectLock.RUnlock()
|
||||
|
||||
func (d *Deployment) getStatus() (api.DeploymentStatus, int32) {
|
||||
obj := d.status.deploymentStatusObject
|
||||
return *obj.last.DeepCopy(), obj.version
|
||||
if s := d.currentObjectStatus; s == nil {
|
||||
return api.DeploymentStatus{}
|
||||
} else {
|
||||
return *s.DeepCopy()
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateStatus replaces the status of the deployment with the given status and
|
||||
// updates the resources in k8s.
|
||||
// If the given last version does not match the actual last version of the status object,
|
||||
// an error is returned.
|
||||
func (d *Deployment) UpdateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error {
|
||||
d.status.mutex.Lock()
|
||||
defer d.status.mutex.Unlock()
|
||||
|
||||
return d.updateStatus(ctx, status, lastVersion, force...)
|
||||
}
|
||||
|
||||
func (d *Deployment) updateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error {
|
||||
if d.status.version != lastVersion {
|
||||
// Status is obsolete
|
||||
d.log.
|
||||
Int32("expected-version", lastVersion).
|
||||
Int32("actual-version", d.status.version).
|
||||
Error("UpdateStatus version conflict error.")
|
||||
return errors.WithStack(errors.Newf("Status conflict error. Expected version %d, got %d", lastVersion, d.status.version))
|
||||
}
|
||||
|
||||
d.status.deploymentStatusObject = deploymentStatusObject{
|
||||
version: d.status.deploymentStatusObject.version + 1,
|
||||
last: *status.DeepCopy(),
|
||||
}
|
||||
if err := d.updateCRStatus(ctx, force...); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
func (d *Deployment) UpdateStatus(ctx context.Context, status api.DeploymentStatus) error {
|
||||
return d.updateCRStatus(ctx, status)
|
||||
}
|
||||
|
||||
// UpdateMember updates the deployment status wrt the given member.
|
||||
func (d *Deployment) UpdateMember(ctx context.Context, member api.MemberStatus) error {
|
||||
status, lastVersion := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
_, group, found := status.Members.ElementByID(member.ID)
|
||||
if !found {
|
||||
return errors.WithStack(errors.Newf("Member %s not found", member.ID))
|
||||
|
@ -164,7 +150,7 @@ func (d *Deployment) UpdateMember(ctx context.Context, member api.MemberStatus)
|
|||
if err := status.Members.Update(member, group); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := d.UpdateStatus(ctx, status); err != nil {
|
||||
d.log.Err(err).Debug("Updating CR status failed")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -233,7 +219,7 @@ func (d *Deployment) getConnConfig() (http.ConnectionConfig, error) {
|
|||
ExpectContinueTimeout: 1 * time.Second,
|
||||
}
|
||||
|
||||
if d.apiObject.Spec.TLS.IsSecure() {
|
||||
if d.GetSpec().TLS.IsSecure() {
|
||||
transport.TLSClientConfig = &tls.Config{
|
||||
InsecureSkipVerify: true,
|
||||
}
|
||||
|
@ -248,7 +234,7 @@ func (d *Deployment) getConnConfig() (http.ConnectionConfig, error) {
|
|||
}
|
||||
|
||||
func (d *Deployment) getAuth() (driver.Authentication, error) {
|
||||
if !d.apiObject.Spec.Authentication.IsAuthenticated() {
|
||||
if !d.GetSpec().Authentication.IsAuthenticated() {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
@ -260,7 +246,7 @@ func (d *Deployment) getAuth() (driver.Authentication, error) {
|
|||
var found bool
|
||||
|
||||
// Check if we can find token in folder
|
||||
if i := d.apiObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
|
||||
if i := d.currentObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
|
||||
secret, found = d.getJWTFolderToken()
|
||||
}
|
||||
|
||||
|
@ -282,7 +268,7 @@ func (d *Deployment) getAuth() (driver.Authentication, error) {
|
|||
}
|
||||
|
||||
func (d *Deployment) getJWTFolderToken() (string, bool) {
|
||||
if i := d.apiObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
|
||||
if i := d.currentObject.Status.CurrentImage; i == nil || features.JWTRotation().Supported(i.ArangoDBVersion, i.Enterprise) {
|
||||
s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), pod.JWTSecretFolder(d.GetName()), meta.GetOptions{})
|
||||
if err != nil {
|
||||
d.log.Err(err).Error("Unable to get secret")
|
||||
|
@ -306,7 +292,7 @@ func (d *Deployment) getJWTFolderToken() (string, bool) {
|
|||
}
|
||||
|
||||
func (d *Deployment) getJWTToken() (string, bool) {
|
||||
s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), d.apiObject.Spec.Authentication.GetJWTSecretName(), meta.GetOptions{})
|
||||
s, err := d.GetCachedStatus().Secret().V1().Read().Get(context.Background(), d.GetSpec().Authentication.GetJWTSecretName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return "", false
|
||||
}
|
||||
|
@ -322,7 +308,7 @@ func (d *Deployment) getJWTToken() (string, bool) {
|
|||
// GetSyncServerClient returns a cached client for a specific arangosync server.
|
||||
func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGroup, id string) (client.API, error) {
|
||||
// Fetch monitoring token
|
||||
secretName := d.apiObject.Spec.Sync.Monitoring.GetTokenSecretName()
|
||||
secretName := d.GetSpec().Sync.Monitoring.GetTokenSecretName()
|
||||
monitoringToken, err := k8sutil.GetTokenSecret(ctx, d.GetCachedStatus().Secret().V1().Read(), secretName)
|
||||
if err != nil {
|
||||
d.log.Err(err).Str("secret-name", secretName).Debug("Failed to get sync monitoring secret")
|
||||
|
@ -330,7 +316,7 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr
|
|||
}
|
||||
|
||||
// Fetch server DNS name
|
||||
dnsName := k8sutil.CreatePodDNSNameWithDomain(d.apiObject, d.apiObject.Spec.ClusterDomain, group.AsRole(), id)
|
||||
dnsName := k8sutil.CreatePodDNSNameWithDomain(d.currentObject, d.GetSpec().ClusterDomain, group.AsRole(), id)
|
||||
|
||||
// Build client
|
||||
port := shared.ArangoSyncMasterPort
|
||||
|
@ -357,7 +343,7 @@ func (d *Deployment) GetSyncServerClient(ctx context.Context, group api.ServerGr
|
|||
// If ID is non-empty, it will be used, otherwise a new ID is created.
|
||||
func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id string, mods ...reconcile.CreateMemberMod) (string, error) {
|
||||
if err := d.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
||||
nid, err := d.createMember(d.GetSpec(), s, group, id, d.apiObject, mods...)
|
||||
nid, err := d.createMember(d.GetSpec(), s, group, id, d.currentObject, mods...)
|
||||
if err != nil {
|
||||
d.log.Err(err).Str("group", group.AsRole()).Debug("Failed to create member")
|
||||
return false, errors.WithStack(err)
|
||||
|
@ -371,7 +357,7 @@ func (d *Deployment) CreateMember(ctx context.Context, group api.ServerGroup, id
|
|||
}
|
||||
|
||||
// Create event about it
|
||||
d.CreateEvent(k8sutil.NewMemberAddEvent(id, group.AsRole(), d.apiObject))
|
||||
d.CreateEvent(k8sutil.NewMemberAddEvent(id, group.AsRole(), d.currentObject))
|
||||
|
||||
return id, nil
|
||||
}
|
||||
|
@ -565,11 +551,8 @@ func (d *Deployment) GetArangoImage() string {
|
|||
return d.config.ArangoImage
|
||||
}
|
||||
|
||||
func (d *Deployment) WithStatusUpdateErr(ctx context.Context, action reconciler.DeploymentStatusUpdateErrFunc, force ...bool) error {
|
||||
d.status.mutex.Lock()
|
||||
defer d.status.mutex.Unlock()
|
||||
|
||||
status, version := d.getStatus()
|
||||
func (d *Deployment) WithStatusUpdateErr(ctx context.Context, action reconciler.DeploymentStatusUpdateErrFunc) error {
|
||||
status := d.GetStatus()
|
||||
|
||||
changed, err := action(&status)
|
||||
|
||||
|
@ -581,13 +564,13 @@ func (d *Deployment) WithStatusUpdateErr(ctx context.Context, action reconciler.
|
|||
return nil
|
||||
}
|
||||
|
||||
return d.updateStatus(ctx, status, version, force...)
|
||||
return d.updateCRStatus(ctx, status)
|
||||
}
|
||||
|
||||
func (d *Deployment) WithStatusUpdate(ctx context.Context, action reconciler.DeploymentStatusUpdateFunc, force ...bool) error {
|
||||
func (d *Deployment) WithStatusUpdate(ctx context.Context, action reconciler.DeploymentStatusUpdateFunc) error {
|
||||
return d.WithStatusUpdateErr(ctx, func(s *api.DeploymentStatus) (bool, error) {
|
||||
return action(s), nil
|
||||
}, force...)
|
||||
})
|
||||
}
|
||||
|
||||
func (d *Deployment) SecretsModInterface() secretv1.ModInterface {
|
||||
|
@ -673,14 +656,6 @@ func (d *Deployment) GenerateMemberEndpoint(group api.ServerGroup, member api.Me
|
|||
return pod.GenerateMemberEndpoint(cache, d.GetAPIObject(), d.GetSpec(), group, member)
|
||||
}
|
||||
|
||||
func (d *Deployment) GetStatusSnapshot() api.DeploymentStatus {
|
||||
s, _ := d.GetStatus()
|
||||
|
||||
z := s.DeepCopy()
|
||||
|
||||
return *z
|
||||
}
|
||||
|
||||
func (d *Deployment) ACS() sutil.ACS {
|
||||
return d.acs
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@ package deployment
|
|||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
@ -40,6 +41,7 @@ import (
|
|||
"github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/chaos"
|
||||
deploymentClient "github.com/arangodb/kube-arangodb/pkg/deployment/client"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
|
||||
memberState "github.com/arangodb/kube-arangodb/pkg/deployment/member"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/patch"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/reconcile"
|
||||
|
@ -96,11 +98,6 @@ const (
|
|||
maxInspectionInterval = 10 * util.Interval(time.Second) // Ensure we inspect the generated resources no less than with this interval
|
||||
)
|
||||
|
||||
type deploymentStatusObject struct {
|
||||
version int32
|
||||
last api.DeploymentStatus // Internal status copy of the CR
|
||||
}
|
||||
|
||||
// Deployment is the in process state of an ArangoDeployment.
|
||||
type Deployment struct {
|
||||
log logging.Logger
|
||||
|
@ -108,11 +105,10 @@ type Deployment struct {
|
|||
name string
|
||||
namespace string
|
||||
|
||||
apiObject *api.ArangoDeployment // API object
|
||||
status struct {
|
||||
mutex sync.Mutex
|
||||
deploymentStatusObject
|
||||
}
|
||||
currentObject *api.ArangoDeployment
|
||||
currentObjectStatus *api.DeploymentStatus
|
||||
currentObjectLock sync.RWMutex
|
||||
|
||||
config Config
|
||||
deps Dependencies
|
||||
|
||||
|
@ -161,11 +157,11 @@ func (d *Deployment) GetAgencyHealth() (agency.Health, bool) {
|
|||
}
|
||||
|
||||
func (d *Deployment) RefreshAgencyCache(ctx context.Context) (uint64, error) {
|
||||
if d.apiObject.Spec.Mode.Get() == api.DeploymentModeSingle {
|
||||
if d.GetSpec().Mode.Get() == api.DeploymentModeSingle {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
if info := d.apiObject.Status.Agency; info != nil {
|
||||
if info := d.currentObject.Status.Agency; info != nil {
|
||||
if size := info.Size; size != nil {
|
||||
lCtx, c := globals.GetGlobalTimeouts().Agency().WithTimeout(ctx)
|
||||
defer c()
|
||||
|
@ -173,7 +169,7 @@ func (d *Deployment) RefreshAgencyCache(ctx context.Context) (uint64, error) {
|
|||
rsize := int(*size)
|
||||
|
||||
clients := make(map[string]agencydriver.Agency)
|
||||
for _, m := range d.GetStatusSnapshot().Members.Agents {
|
||||
for _, m := range d.GetStatus().Members.Agents {
|
||||
a, err := d.GetAgency(lCtx, m.ID)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
|
@ -235,14 +231,15 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De
|
|||
i := inspector.NewInspector(inspector.NewDefaultThrottle(), deps.Client, apiObject.GetNamespace(), apiObject.GetName())
|
||||
|
||||
d := &Deployment{
|
||||
apiObject: apiObject,
|
||||
currentObject: apiObject,
|
||||
currentObjectStatus: apiObject.Status.DeepCopy(),
|
||||
name: apiObject.GetName(),
|
||||
namespace: apiObject.GetNamespace(),
|
||||
config: config,
|
||||
deps: deps,
|
||||
eventCh: make(chan *deploymentEvent, deploymentEventQueueSize),
|
||||
stopCh: make(chan struct{}),
|
||||
agencyCache: agency.NewCache(apiObject.GetNamespace(), apiObject.GetName(), apiObject.Spec.Mode),
|
||||
agencyCache: agency.NewCache(apiObject.GetNamespace(), apiObject.GetName(), apiObject.GetAcceptedSpec().Mode),
|
||||
acs: acs.NewACS(apiObject.GetUID(), i),
|
||||
}
|
||||
|
||||
|
@ -252,14 +249,9 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De
|
|||
|
||||
d.clientCache = deploymentClient.NewClientCache(d, conn.NewFactory(d.getAuth, d.getConnConfig))
|
||||
|
||||
d.status.last = *(apiObject.Status.DeepCopy())
|
||||
d.reconciler = reconcile.NewReconciler(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||
d.resilience = resilience.NewResilience(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||
d.resources = resources.NewResources(apiObject.GetNamespace(), apiObject.GetName(), d)
|
||||
if d.status.last.AcceptedSpec == nil {
|
||||
// We've validated the spec, so let's use it from now.
|
||||
d.status.last.AcceptedSpec = apiObject.Spec.DeepCopy()
|
||||
}
|
||||
|
||||
localInventory.Add(d)
|
||||
|
||||
|
@ -277,7 +269,7 @@ func New(config Config, deps Dependencies, apiObject *api.ArangoDeployment) (*De
|
|||
go d.listenForSecretEvents(d.stopCh)
|
||||
go d.listenForServiceEvents(d.stopCh)
|
||||
go d.listenForCRDEvents(d.stopCh)
|
||||
if apiObject.Spec.GetMode() == api.DeploymentModeCluster {
|
||||
if apiObject.GetAcceptedSpec().GetMode() == api.DeploymentModeCluster {
|
||||
ci := newClusterScalingIntegration(d)
|
||||
d.clusterScalingIntegration = ci
|
||||
go ci.ListenForClusterEvents(d.stopCh)
|
||||
|
@ -347,9 +339,9 @@ func (d *Deployment) run() {
|
|||
d.CreateEvent(k8sutil.NewErrorEvent("Failed to create initial topology", err, d.GetAPIObject()))
|
||||
}
|
||||
|
||||
status, lastVersion := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
status.Phase = api.DeploymentPhaseRunning
|
||||
if err := d.UpdateStatus(context.TODO(), status, lastVersion); err != nil {
|
||||
if err := d.UpdateStatus(context.TODO(), status); err != nil {
|
||||
log.Err(err).Warn("update initial CR status failed")
|
||||
}
|
||||
log.Info("start running...")
|
||||
|
@ -362,6 +354,15 @@ func (d *Deployment) run() {
|
|||
inspectionInterval := d.inspectDeployment(minInspectionInterval)
|
||||
log.Str("interval", inspectionInterval.String()).Debug("...deployment inspect started")
|
||||
|
||||
if ci := d.clusterScalingIntegration; ci != nil {
|
||||
if c := d.currentObjectStatus; c != nil {
|
||||
if a := c.AcceptedSpec; a != nil {
|
||||
log.Debug("Send initial CI update")
|
||||
ci.SendUpdateToCluster(*a)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-d.stopCh:
|
||||
|
@ -386,10 +387,7 @@ func (d *Deployment) run() {
|
|||
d.lookForServiceMonitorCRD()
|
||||
case <-d.updateDeploymentTrigger.Done():
|
||||
inspectionInterval = minInspectionInterval
|
||||
if err := d.handleArangoDeploymentUpdatedEvent(context.TODO()); err != nil {
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Failed to handle deployment update", err, d.GetAPIObject()))
|
||||
}
|
||||
|
||||
d.handleArangoDeploymentUpdatedEvent()
|
||||
case <-inspectionInterval.After():
|
||||
// Trigger inspection
|
||||
d.inspectTrigger.Trigger()
|
||||
|
@ -399,80 +397,102 @@ func (d *Deployment) run() {
|
|||
}
|
||||
}
|
||||
|
||||
// handleArangoDeploymentUpdatedEvent is called when the deployment is updated by the user.
|
||||
func (d *Deployment) handleArangoDeploymentUpdatedEvent(ctx context.Context) error {
|
||||
log := d.log.Str("deployment", d.apiObject.GetName())
|
||||
// validateNewSpec returns (canProceed, changed, error)
|
||||
func (d *Deployment) acceptNewSpec(ctx context.Context, depl *api.ArangoDeployment) (bool, bool, error) {
|
||||
spec := depl.Spec.DeepCopy()
|
||||
|
||||
// Get the most recent version of the deployment from the API server
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
|
||||
current, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.apiObject.GetNamespace()).Get(ctxChild, d.apiObject.GetName(), meta.GetOptions{})
|
||||
origChecksum, err := spec.Checksum()
|
||||
if err != nil {
|
||||
log.Err(err).Debug("Failed to get current version of deployment from API server")
|
||||
if k8sutil.IsNotFound(err) {
|
||||
return nil
|
||||
}
|
||||
return errors.WithStack(err)
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
specBefore := d.apiObject.Spec
|
||||
status := d.status.last
|
||||
if d.status.last.AcceptedSpec != nil {
|
||||
specBefore = *status.AcceptedSpec.DeepCopy()
|
||||
}
|
||||
newAPIObject := current.DeepCopy()
|
||||
newAPIObject.Spec.SetDefaultsFrom(specBefore)
|
||||
newAPIObject.Spec.SetDefaults(d.apiObject.GetName())
|
||||
// Set defaults to the spec
|
||||
spec.SetDefaults(d.name)
|
||||
|
||||
resetFields := specBefore.ResetImmutableFields(&newAPIObject.Spec)
|
||||
if len(resetFields) > 0 {
|
||||
log.Strs("fields", resetFields...).Debug("Found modified immutable fields")
|
||||
newAPIObject.Spec.SetDefaults(d.apiObject.GetName())
|
||||
}
|
||||
if err := newAPIObject.Spec.Validate(); err != nil {
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Validation failed", err, d.apiObject))
|
||||
// Try to reset object
|
||||
if err := d.updateCRSpec(ctx, d.apiObject.Spec); err != nil {
|
||||
log.Err(err).Error("Restore original spec failed")
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Restore original failed", err, d.apiObject))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if len(resetFields) > 0 {
|
||||
for _, fieldName := range resetFields {
|
||||
log.Str("field", fieldName).Debug("Reset immutable field")
|
||||
d.CreateEvent(k8sutil.NewImmutableFieldEvent(fieldName, d.apiObject))
|
||||
if features.DeploymentSpecDefaultsRestore().Enabled() {
|
||||
if accepted := depl.Status.AcceptedSpec; accepted != nil {
|
||||
spec.SetDefaultsFrom(*accepted)
|
||||
}
|
||||
}
|
||||
|
||||
// Save updated spec
|
||||
if err := d.updateCRSpec(ctx, newAPIObject.Spec); err != nil {
|
||||
return errors.WithStack(errors.Newf("failed to update ArangoDeployment spec: %v", err))
|
||||
checksum, err := spec.Checksum()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
// Save updated accepted spec
|
||||
{
|
||||
status, lastVersion := d.GetStatus()
|
||||
if newAPIObject.Status.IsForceReload() {
|
||||
log.Warn("Forced status reload!")
|
||||
status = newAPIObject.Status
|
||||
status.ForceStatusReload = nil
|
||||
|
||||
if features.DeploymentSpecDefaultsRestore().Enabled() {
|
||||
if origChecksum != checksum {
|
||||
// Set defaults in deployment
|
||||
if err := d.updateCRSpec(ctx, *spec); err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
status.AcceptedSpec = newAPIObject.Spec.DeepCopy()
|
||||
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
return errors.WithStack(errors.Newf("failed to update ArangoDeployment status: %v", err))
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Notify cluster of desired server count
|
||||
if ci := d.clusterScalingIntegration; ci != nil {
|
||||
ci.SendUpdateToCluster(d.apiObject.Spec)
|
||||
if accepted := depl.Status.AcceptedSpec; accepted == nil {
|
||||
// There is no last status, it is fresh one
|
||||
if err := spec.Validate(); err != nil {
|
||||
d.metrics.Errors.DeploymentValidationErrors++
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
// Update accepted spec
|
||||
if err := d.patchAcceptedSpec(ctx, spec, origChecksum); err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
// Reconcile with new accepted spec
|
||||
return false, true, nil
|
||||
} else {
|
||||
// If we are equal then proceed
|
||||
acceptedChecksum, err := accepted.Checksum()
|
||||
if err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
if acceptedChecksum == checksum {
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
// We have already accepted spec, verify immutable part
|
||||
if fields := accepted.ResetImmutableFields(spec); len(fields) > 0 {
|
||||
d.metrics.Errors.DeploymentImmutableErrors += uint64(len(fields))
|
||||
if features.DeploymentSpecDefaultsRestore().Enabled() {
|
||||
d.log.Error("Restoring immutable fields: %s", strings.Join(fields, ", "))
|
||||
|
||||
// In case of enabled, do restore
|
||||
if err := d.updateCRSpec(ctx, *spec); err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
return false, true, nil
|
||||
}
|
||||
|
||||
// We have immutable fields, throw an error and proceed
|
||||
return true, false, errors.Newf("Immutable fields cannot be changed: %s", strings.Join(fields, ", "))
|
||||
}
|
||||
|
||||
// Update accepted spec
|
||||
if err := d.patchAcceptedSpec(ctx, spec, origChecksum); err != nil {
|
||||
return false, false, err
|
||||
}
|
||||
|
||||
// Reconcile with new accepted spec
|
||||
return false, true, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Deployment) patchAcceptedSpec(ctx context.Context, spec *api.DeploymentSpec, checksum string) error {
|
||||
return d.ApplyPatch(ctx, patch.ItemReplace(patch.NewPath("status", "accepted-spec"), spec),
|
||||
patch.ItemReplace(patch.NewPath("status", "acceptedSpecVersion"), checksum))
|
||||
}
|
||||
|
||||
// handleArangoDeploymentUpdatedEvent is called when the deployment is updated by the user.
|
||||
func (d *Deployment) handleArangoDeploymentUpdatedEvent() {
|
||||
// Trigger inspect
|
||||
d.inspectTrigger.Trigger()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateEvent creates a given event.
|
||||
|
@ -481,108 +501,12 @@ func (d *Deployment) CreateEvent(evt *k8sutil.Event) {
|
|||
d.deps.EventRecorder.Event(evt.InvolvedObject, evt.Type, evt.Reason, evt.Message)
|
||||
}
|
||||
|
||||
// Update the status of the API object from the internal status
|
||||
func (d *Deployment) updateCRStatus(ctx context.Context, force ...bool) error {
|
||||
if len(force) == 0 || !force[0] {
|
||||
if d.apiObject.Status.Equal(d.status.last) {
|
||||
// Nothing has changed
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Send update to API server
|
||||
depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace())
|
||||
attempt := 0
|
||||
for {
|
||||
attempt++
|
||||
q := patch.NewPatch(patch.ItemReplace(patch.NewPath("status"), d.status.last))
|
||||
|
||||
if d.apiObject.GetDeletionTimestamp() == nil {
|
||||
if ensureFinalizers(d.apiObject) {
|
||||
q.ItemAdd(patch.NewPath("metadata", "finalizers"), d.apiObject.Finalizers)
|
||||
}
|
||||
}
|
||||
|
||||
var newAPIObject *api.ArangoDeployment
|
||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
p, err := q.Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
newAPIObject, err = depls.Patch(ctxChild, d.GetName(), types.JSONPatchType, p, meta.PatchOptions{})
|
||||
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
d.apiObject = newAPIObject
|
||||
return nil
|
||||
}
|
||||
if attempt < 10 {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
d.log.Err(err).Debug("failed to patch ArangoDeployment status")
|
||||
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment status: %v", err))
|
||||
}
|
||||
}
|
||||
func (d *Deployment) updateCRStatus(ctx context.Context, status api.DeploymentStatus) error {
|
||||
return d.ApplyPatch(ctx, patch.ItemReplace(patch.NewPath("status"), status))
|
||||
}
|
||||
|
||||
// Update the spec part of the API object (d.apiObject)
|
||||
// to the given object, while preserving the status.
|
||||
// On success, d.apiObject is updated.
|
||||
func (d *Deployment) updateCRSpec(ctx context.Context, newSpec api.DeploymentSpec, force ...bool) error {
|
||||
|
||||
if len(force) == 0 || !force[0] {
|
||||
if d.apiObject.Spec.Equal(&newSpec) {
|
||||
d.log.Trace("Nothing to update in updateCRSpec")
|
||||
// Nothing to update
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Send update to API server
|
||||
update := d.apiObject.DeepCopy()
|
||||
attempt := 0
|
||||
for {
|
||||
attempt++
|
||||
update.Spec = newSpec
|
||||
update.Status = d.status.last
|
||||
ns := d.apiObject.GetNamespace()
|
||||
var newAPIObject *api.ArangoDeployment
|
||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
var err error
|
||||
newAPIObject, err = d.deps.Client.Arango().DatabaseV1().ArangoDeployments(ns).Update(ctxChild, update, meta.UpdateOptions{})
|
||||
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
d.apiObject = newAPIObject
|
||||
return nil
|
||||
}
|
||||
if attempt < 10 && k8sutil.IsConflict(err) {
|
||||
// API object may have been changed already,
|
||||
// Reload api object and try again
|
||||
var current *api.ArangoDeployment
|
||||
|
||||
err = globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
var err error
|
||||
current, err = d.deps.Client.Arango().DatabaseV1().ArangoDeployments(ns).Get(ctxChild, update.GetName(), meta.GetOptions{})
|
||||
|
||||
return err
|
||||
})
|
||||
if err == nil {
|
||||
update = current.DeepCopy()
|
||||
continue
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
d.log.Err(err).Debug("failed to patch ArangoDeployment spec")
|
||||
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment spec: %v", err))
|
||||
}
|
||||
}
|
||||
func (d *Deployment) updateCRSpec(ctx context.Context, spec api.DeploymentSpec) error {
|
||||
return d.ApplyPatch(ctx, patch.ItemReplace(patch.NewPath("spec"), spec))
|
||||
}
|
||||
|
||||
// isOwnerOf returns true if the given object belong to this deployment.
|
||||
|
@ -591,7 +515,7 @@ func (d *Deployment) isOwnerOf(obj meta.Object) bool {
|
|||
if len(ownerRefs) < 1 {
|
||||
return false
|
||||
}
|
||||
return ownerRefs[0].UID == d.apiObject.UID
|
||||
return ownerRefs[0].UID == d.currentObject.UID
|
||||
}
|
||||
|
||||
// lookForServiceMonitorCRD checks if there is a CRD for the ServiceMonitor
|
||||
|
@ -647,23 +571,52 @@ func (d *Deployment) SetNumberOfServers(ctx context.Context, noCoordinators, noD
|
|||
}
|
||||
|
||||
func (d *Deployment) ApplyPatch(ctx context.Context, p ...patch.Item) error {
|
||||
parser := patch.Patch(p)
|
||||
if len(p) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
data, err := parser.Marshal()
|
||||
d.currentObjectLock.Lock()
|
||||
defer d.currentObjectLock.Unlock()
|
||||
|
||||
return d.applyPatch(ctx, p...)
|
||||
}
|
||||
|
||||
func (d *Deployment) applyPatch(ctx context.Context, p ...patch.Item) error {
|
||||
depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace())
|
||||
|
||||
if len(p) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
pd, err := patch.NewPatch(p...).Marshal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.apiObject.GetNamespace())
|
||||
attempt := 0
|
||||
for {
|
||||
attempt++
|
||||
|
||||
var newAPIObject *api.ArangoDeployment
|
||||
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
|
||||
newAPIObject, err = depls.Patch(ctxChild, d.GetName(), types.JSONPatchType, pd, meta.PatchOptions{})
|
||||
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
depl, err := c.Patch(ctxChild, d.apiObject.GetName(), types.JSONPatchType, data, meta.PatchOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
})
|
||||
if err == nil {
|
||||
// Update internal object
|
||||
|
||||
d.apiObject = depl
|
||||
d.currentObject = newAPIObject.DeepCopy()
|
||||
d.currentObjectStatus = newAPIObject.Status.DeepCopy()
|
||||
|
||||
return nil
|
||||
}
|
||||
if attempt < 10 {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
d.log.Err(err).Debug("failed to patch ArangoDeployment")
|
||||
return errors.WithStack(errors.Newf("failed to patch ArangoDeployment: %v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -69,7 +69,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -77,7 +77,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -131,7 +131,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -139,7 +139,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -196,7 +196,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -204,7 +204,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -266,7 +266,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -274,7 +274,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -345,7 +345,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -353,7 +353,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -410,7 +410,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -418,7 +418,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -478,7 +478,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -486,7 +486,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -551,7 +551,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -559,7 +559,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -632,7 +632,7 @@ func TestEnsurePod_ArangoDB_NodeAffinity(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -640,7 +640,7 @@ func TestEnsurePod_ArangoDB_NodeAffinity(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -99,7 +99,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -159,7 +159,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -216,7 +216,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -277,7 +277,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
OperatorImage: testImageOperator,
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -334,7 +334,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -342,7 +342,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -391,7 +391,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -399,7 +399,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -448,7 +448,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -457,7 +457,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
Images: createTestImages(false),
|
||||
}
|
||||
|
||||
deployment.status.last.Members.DBServers[0].PersistentVolumeClaimName = testPersistentVolumeClaimName
|
||||
deployment.currentObjectStatus.Members.DBServers[0].PersistentVolumeClaimName = testPersistentVolumeClaimName
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
ExpectedEvent: "member dbserver is created",
|
||||
|
@ -506,7 +506,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
OperatorImage: testImageOperator,
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -514,7 +514,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -562,7 +562,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -616,7 +616,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
agentWithPersistentVolumeClaim := firstAgentStatus
|
||||
agentWithPersistentVolumeClaim.PersistentVolumeClaimName = testPersistentVolumeClaimName
|
||||
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
agentWithPersistentVolumeClaim,
|
||||
|
@ -668,7 +668,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -722,7 +722,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -782,7 +782,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -843,7 +843,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -900,7 +900,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -969,7 +969,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -1034,7 +1034,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -1105,7 +1105,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -1178,7 +1178,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -1239,7 +1239,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Single: api.MemberStatusList{
|
||||
singleStatus,
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -111,7 +111,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -194,7 +194,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -257,7 +257,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
|
|||
EncryptionRotation: true,
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
|
|
@ -43,7 +43,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -51,7 +51,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -102,7 +102,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -110,9 +110,9 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
deployment.apiObject.Spec.Features = &api.DeploymentFeatures{
|
||||
deployment.currentObject.Spec.Features = &api.DeploymentFeatures{
|
||||
FoxxQueues: util.NewBool(false),
|
||||
}
|
||||
|
||||
|
@ -165,7 +165,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -173,7 +173,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.Coordinators[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.Coordinators[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupCoordinators, firstCoordinatorStatus)
|
||||
},
|
||||
|
@ -224,7 +224,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -232,9 +232,9 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.Coordinators[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.Coordinators[0].IsInitialized = true
|
||||
|
||||
deployment.apiObject.Spec.Features = &api.DeploymentFeatures{
|
||||
deployment.currentObject.Spec.Features = &api.DeploymentFeatures{
|
||||
FoxxQueues: util.NewBool(false),
|
||||
}
|
||||
|
||||
|
@ -287,7 +287,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -295,9 +295,9 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.Coordinators[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.Coordinators[0].IsInitialized = true
|
||||
|
||||
deployment.apiObject.Spec.Features = &api.DeploymentFeatures{
|
||||
deployment.currentObject.Spec.Features = &api.DeploymentFeatures{
|
||||
FoxxQueues: util.NewBool(true),
|
||||
}
|
||||
|
||||
|
@ -350,7 +350,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Single: api.MemberStatusList{
|
||||
singleStatus,
|
||||
|
@ -358,7 +358,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.Single[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.Single[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupSingle, singleStatus)
|
||||
|
||||
|
@ -411,7 +411,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Single: api.MemberStatusList{
|
||||
singleStatus,
|
||||
|
@ -419,9 +419,9 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.Single[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.Single[0].IsInitialized = true
|
||||
|
||||
deployment.apiObject.Spec.Features = &api.DeploymentFeatures{
|
||||
deployment.currentObject.Spec.Features = &api.DeploymentFeatures{
|
||||
FoxxQueues: util.NewBool(false),
|
||||
}
|
||||
|
||||
|
@ -476,7 +476,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Single: api.MemberStatusList{
|
||||
singleStatus,
|
||||
|
@ -484,9 +484,9 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.Single[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.Single[0].IsInitialized = true
|
||||
|
||||
deployment.apiObject.Spec.Features = &api.DeploymentFeatures{
|
||||
deployment.currentObject.Spec.Features = &api.DeploymentFeatures{
|
||||
FoxxQueues: util.NewBool(true),
|
||||
}
|
||||
|
||||
|
|
|
@ -34,18 +34,34 @@ import (
|
|||
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
|
||||
)
|
||||
|
||||
var expectedFinalizers = []string{
|
||||
constants.FinalizerDeplRemoveChildFinalizers,
|
||||
}
|
||||
|
||||
// ensureFinalizers adds all required finalizers to the given deployment (in memory).
|
||||
func ensureFinalizers(depl *api.ArangoDeployment) bool {
|
||||
for _, f := range depl.GetFinalizers() {
|
||||
if f == constants.FinalizerDeplRemoveChildFinalizers {
|
||||
// Finalizer already set
|
||||
return false
|
||||
}
|
||||
}
|
||||
// Set finalizers
|
||||
depl.SetFinalizers(append(depl.GetFinalizers(), constants.FinalizerDeplRemoveChildFinalizers))
|
||||
fx := make(map[string]bool, len(expectedFinalizers))
|
||||
|
||||
return true
|
||||
st := len(depl.Finalizers)
|
||||
|
||||
for _, fn := range expectedFinalizers {
|
||||
fx[fn] = false
|
||||
}
|
||||
|
||||
for _, f := range depl.GetFinalizers() {
|
||||
if _, ok := fx[f]; ok {
|
||||
fx[f] = true
|
||||
}
|
||||
}
|
||||
|
||||
for _, fn := range expectedFinalizers {
|
||||
if !fx[fn] {
|
||||
depl.Finalizers = append(depl.Finalizers, fn)
|
||||
}
|
||||
}
|
||||
|
||||
// Set finalizers
|
||||
return st != len(depl.Finalizers)
|
||||
}
|
||||
|
||||
// runDeploymentFinalizers goes through the list of ArangoDeployoment finalizers to see if they can be removed.
|
||||
|
@ -55,7 +71,7 @@ func (d *Deployment) runDeploymentFinalizers(ctx context.Context, cachedStatus i
|
|||
depls := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(d.GetNamespace())
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
updated, err := depls.Get(ctxChild, d.apiObject.GetName(), meta.GetOptions{})
|
||||
updated, err := depls.Get(ctxChild, d.currentObject.GetName(), meta.GetOptions{})
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -63,7 +63,7 @@ func TestEnsurePod_ArangoDB_ImagePropagation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -115,7 +115,7 @@ func TestEnsurePod_ArangoDB_ImagePropagation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -167,7 +167,7 @@ func TestEnsurePod_ArangoDB_ImagePropagation(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
|
|
@ -80,7 +80,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
|||
// Deployment is marked for deletion
|
||||
if err := d.runDeploymentFinalizers(ctxReconciliation, d.GetCachedStatus()); err != nil {
|
||||
hasError = true
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("ArangoDeployment finalizer inspection failed", err, d.apiObject))
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("ArangoDeployment finalizer inspection failed", err, d.currentObject))
|
||||
}
|
||||
} else {
|
||||
// Check if maintenance annotation is set
|
||||
|
@ -91,14 +91,56 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
|||
return nextInterval
|
||||
}
|
||||
}
|
||||
|
||||
if ensureFinalizers(updated) {
|
||||
if err := d.ApplyPatch(ctxReconciliation, patch.ItemReplace(patch.NewPath("metadata", "finalizers"), updated.Finalizers)); err != nil {
|
||||
d.log.Err(err).Debug("Unable to set finalizers")
|
||||
}
|
||||
}
|
||||
|
||||
if canProceed, changed, err := d.acceptNewSpec(ctxReconciliation, updated); err != nil {
|
||||
d.log.Err(err).Debug("Verification of deployment failed")
|
||||
|
||||
if !canProceed {
|
||||
return minInspectionInterval // Retry ASAP
|
||||
}
|
||||
} else if changed {
|
||||
d.log.Info("Accepted new spec")
|
||||
// Notify cluster of desired server count
|
||||
if ci := d.clusterScalingIntegration; ci != nil {
|
||||
if c := d.currentObjectStatus; c != nil {
|
||||
if a := c.AcceptedSpec; a != nil {
|
||||
if c.Conditions.IsTrue(api.ConditionTypeUpToDate) {
|
||||
ci.SendUpdateToCluster(*a)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return minInspectionInterval // Retry ASAP
|
||||
} else if !canProceed {
|
||||
d.log.Err(err).Error("Cannot proceed with reconciliation")
|
||||
return minInspectionInterval // Retry ASAP
|
||||
}
|
||||
|
||||
// Ensure that status is up to date
|
||||
if !d.currentObjectStatus.Equal(updated.Status) {
|
||||
if err := d.updateCRStatus(ctxReconciliation, *d.currentObjectStatus); err != nil {
|
||||
d.log.Err(err).Error("Unable to refresh status")
|
||||
return minInspectionInterval // Retry ASAP
|
||||
}
|
||||
}
|
||||
|
||||
d.currentObject = updated
|
||||
|
||||
d.metrics.Deployment.Accepted = updated.Status.Conditions.IsTrue(api.ConditionTypeSpecAccepted)
|
||||
d.metrics.Deployment.UpToDate = updated.Status.Conditions.IsTrue(api.ConditionTypeUpToDate)
|
||||
|
||||
// Is the deployment in failed state, if so, give up.
|
||||
if d.GetPhase() == api.DeploymentPhaseFailed {
|
||||
d.log.Debug("Deployment is in Failed state.")
|
||||
return nextInterval
|
||||
}
|
||||
|
||||
d.apiObject = updated
|
||||
|
||||
d.GetMembersState().RefreshState(ctxReconciliation, updated.Status.Members.AsList())
|
||||
d.GetMembersState().Log(d.log)
|
||||
if err := d.WithStatusUpdateErr(ctxReconciliation, func(s *api.DeploymentStatus) (bool, error) {
|
||||
|
@ -108,7 +150,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
|||
return changed, nil
|
||||
}
|
||||
}); err != nil {
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Upgrade failed", err, d.apiObject))
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Upgrade failed", err, d.currentObject))
|
||||
nextInterval = minInspectionInterval
|
||||
d.recentInspectionErrors++
|
||||
return nextInterval.ReduceTo(maxInspectionInterval)
|
||||
|
@ -120,7 +162,7 @@ func (d *Deployment) inspectDeployment(lastInterval util.Interval) util.Interval
|
|||
nextInterval = inspectNextInterval
|
||||
hasError = true
|
||||
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Reconciliation failed", err, d.apiObject))
|
||||
d.CreateEvent(k8sutil.NewErrorEvent("Reconciliation failed", err, d.currentObject))
|
||||
} else {
|
||||
nextInterval = minInspectionInterval
|
||||
}
|
||||
|
@ -148,27 +190,47 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
|||
}()
|
||||
|
||||
// Ensure that spec and status checksum are same
|
||||
spec := d.GetSpec()
|
||||
status, _ := d.getStatus()
|
||||
currentSpec := d.currentObject.Spec
|
||||
status := d.GetStatus()
|
||||
|
||||
nextInterval = lastInterval
|
||||
inspectError = nil
|
||||
|
||||
checksum, err := spec.Checksum()
|
||||
currentChecksum, err := currentSpec.Checksum()
|
||||
if err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Calculation of spec failed")
|
||||
} else {
|
||||
condition, exists := status.Conditions.Get(api.ConditionTypeSpecAccepted)
|
||||
if v := status.AcceptedSpecVersion; (v == nil || currentChecksum != *v) && (!exists || condition.IsTrue()) {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeSpecAccepted, false, "Spec Changed", "Spec Object changed. Waiting to be accepted", currentChecksum); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to update SpecAccepted condition")
|
||||
}
|
||||
|
||||
return minInspectionInterval, nil // Retry ASAP
|
||||
} else if v != nil {
|
||||
if *v == currentChecksum && !condition.IsTrue() {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeSpecAccepted, true, "Spec Accepted", "Spec Object accepted", currentChecksum); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to update SpecAccepted condition")
|
||||
}
|
||||
|
||||
return minInspectionInterval, nil // Retry ASAP
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !status.Conditions.IsTrue(api.ConditionTypeSpecAccepted) {
|
||||
condition, exists := status.Conditions.Get(api.ConditionTypeUpToDate)
|
||||
if checksum != status.AppliedVersion && (!exists || condition.IsTrue()) {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeUpToDate, false, "Spec Changed", "Spec Object changed. Waiting until plan will be applied", checksum); err != nil {
|
||||
if !exists || condition.IsTrue() {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeUpToDate, false, "Spec Changed", "Spec Object changed. Waiting until plan will be applied", currentChecksum); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to update UpToDate condition")
|
||||
|
||||
}
|
||||
|
||||
return minInspectionInterval, nil // Retry ASAP
|
||||
}
|
||||
}
|
||||
|
||||
if err := d.acs.Inspect(ctx, d.apiObject, d.deps.Client, d.GetCachedStatus()); err != nil {
|
||||
if err := d.acs.Inspect(ctx, d.currentObject, d.deps.Client, d.GetCachedStatus()); err != nil {
|
||||
d.log.Err(err).Warn("Unable to handle ACS objects")
|
||||
}
|
||||
|
||||
|
@ -211,7 +273,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
|||
}
|
||||
|
||||
// Ensure we have image info
|
||||
if retrySoon, exists, err := d.ensureImages(ctx, d.apiObject, d.GetCachedStatus()); err != nil {
|
||||
if retrySoon, exists, err := d.ensureImages(ctx, d.currentObject, d.GetCachedStatus()); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Image detection failed")
|
||||
} else if retrySoon || !exists {
|
||||
return minInspectionInterval, nil
|
||||
|
@ -258,7 +320,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
|||
d.refreshMaintenanceTTL(ctx)
|
||||
|
||||
// Create scale/update plan
|
||||
if _, ok := d.apiObject.Annotations[deployment.ArangoDeploymentPlanCleanAnnotation]; ok {
|
||||
if _, ok := d.currentObject.Annotations[deployment.ArangoDeploymentPlanCleanAnnotation]; ok {
|
||||
if err := d.ApplyPatch(ctx, patch.ItemRemove(patch.NewPath("metadata", "annotations", deployment.ArangoDeploymentPlanCleanAnnotation))); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to create remove annotation patch")
|
||||
}
|
||||
|
@ -266,7 +328,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
|||
if err := d.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
||||
s.Plan = nil
|
||||
return true
|
||||
}, true); err != nil {
|
||||
}); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable clean plan")
|
||||
}
|
||||
} else if err, updated := d.reconciler.CreatePlan(ctx); err != nil {
|
||||
|
@ -292,20 +354,20 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
|||
}
|
||||
}
|
||||
|
||||
if d.apiObject.Status.IsPlanEmpty() && status.AppliedVersion != checksum {
|
||||
if v := status.AcceptedSpecVersion; v != nil && d.currentObject.Status.IsPlanEmpty() && status.AppliedVersion != *v {
|
||||
if err := d.WithStatusUpdate(ctx, func(s *api.DeploymentStatus) bool {
|
||||
s.AppliedVersion = checksum
|
||||
s.AppliedVersion = *v
|
||||
return true
|
||||
}); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to update UpToDate condition")
|
||||
}
|
||||
|
||||
return minInspectionInterval, nil
|
||||
} else if status.AppliedVersion == checksum {
|
||||
} else {
|
||||
isUpToDate, reason := d.isUpToDateStatus(status)
|
||||
|
||||
if !isUpToDate && status.Conditions.IsTrue(api.ConditionTypeUpToDate) {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeUpToDate, false, reason, "There are pending operations in plan or members are in restart process", checksum); err != nil {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeUpToDate, false, reason, "There are pending operations in plan or members are in restart process", *v); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to update UpToDate condition")
|
||||
}
|
||||
|
||||
|
@ -313,7 +375,7 @@ func (d *Deployment) inspectDeploymentWithError(ctx context.Context, lastInterva
|
|||
}
|
||||
|
||||
if isUpToDate && !status.Conditions.IsTrue(api.ConditionTypeUpToDate) {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeUpToDate, true, "Spec is Up To Date", "Spec is Up To Date", checksum); err != nil {
|
||||
if err = d.updateConditionWithHash(ctx, api.ConditionTypeUpToDate, true, "Spec is Up To Date", "Spec is Up To Date", *v); err != nil {
|
||||
return minInspectionInterval, errors.Wrapf(err, "Unable to update UpToDate condition")
|
||||
}
|
||||
|
||||
|
@ -359,6 +421,18 @@ func (d *Deployment) isUpToDateStatus(status api.DeploymentStatus) (upToDate boo
|
|||
|
||||
upToDate = true
|
||||
|
||||
if v := status.AcceptedSpecVersion; v == nil || status.AppliedVersion != *v {
|
||||
upToDate = false
|
||||
reason = "Spec is not accepted"
|
||||
return
|
||||
}
|
||||
|
||||
if !status.Conditions.Check(api.ConditionTypeSpecAccepted).Exists().IsTrue().Evaluate() {
|
||||
upToDate = false
|
||||
reason = "Spec is not accepted"
|
||||
return
|
||||
}
|
||||
|
||||
if !status.Conditions.Check(api.ConditionTypeReachable).Exists().IsTrue().Evaluate() {
|
||||
upToDate = false
|
||||
return
|
||||
|
@ -382,7 +456,7 @@ func (d *Deployment) isUpToDateStatus(status api.DeploymentStatus) (upToDate boo
|
|||
}
|
||||
|
||||
func (d *Deployment) refreshMaintenanceTTL(ctx context.Context) {
|
||||
if d.apiObject.Spec.Mode.Get() == api.DeploymentModeSingle {
|
||||
if d.GetSpec().Mode.Get() == api.DeploymentModeSingle {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -396,7 +470,9 @@ func (d *Deployment) refreshMaintenanceTTL(ctx context.Context) {
|
|||
return
|
||||
}
|
||||
|
||||
condition, ok := d.status.last.Conditions.Get(api.ConditionTypeMaintenance)
|
||||
status := d.GetStatus()
|
||||
|
||||
condition, ok := status.Conditions.Get(api.ConditionTypeMaintenance)
|
||||
maintenance := agencyState.Supervision.Maintenance
|
||||
|
||||
if !ok || !condition.IsTrue() {
|
||||
|
@ -405,14 +481,14 @@ func (d *Deployment) refreshMaintenanceTTL(ctx context.Context) {
|
|||
|
||||
// Check GracePeriod
|
||||
if t, ok := maintenance.Time(); ok {
|
||||
if time.Until(t) < time.Hour-d.apiObject.Spec.Timeouts.GetMaintenanceGracePeriod() {
|
||||
if time.Until(t) < time.Hour-d.GetSpec().Timeouts.GetMaintenanceGracePeriod() {
|
||||
if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
||||
return
|
||||
}
|
||||
d.log.Info("Refreshed maintenance lock")
|
||||
}
|
||||
} else {
|
||||
if condition.LastUpdateTime.Add(d.apiObject.Spec.Timeouts.GetMaintenanceGracePeriod()).Before(time.Now()) {
|
||||
if condition.LastUpdateTime.Add(d.GetSpec().Timeouts.GetMaintenanceGracePeriod()).Before(time.Now()) {
|
||||
if err := d.SetAgencyMaintenanceMode(ctx, true); err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -110,7 +110,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -170,7 +170,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -238,7 +238,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
|
|
@ -43,7 +43,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -100,7 +100,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -158,7 +158,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Agents: api.MemberStatusList{
|
||||
firstAgentStatus,
|
||||
|
@ -209,7 +209,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -265,7 +265,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -316,7 +316,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -372,7 +372,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
|
|
@ -107,7 +107,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -115,7 +115,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -165,7 +165,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -173,7 +173,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -222,7 +222,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -230,7 +230,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
|
|
@ -46,7 +46,7 @@ func TestEnsurePod_Sync_Error(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -65,7 +65,7 @@ func TestEnsurePod_Sync_Error(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -88,7 +88,7 @@ func TestEnsurePod_Sync_Error(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -122,7 +122,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -150,7 +150,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -178,7 +178,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -214,7 +214,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -305,7 +305,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncMasters: api.MemberStatusList{
|
||||
firstSyncMaster,
|
||||
|
@ -407,7 +407,7 @@ func TestEnsurePod_Sync_Worker(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
SyncWorkers: api.MemberStatusList{
|
||||
firstSyncWorker,
|
||||
|
|
|
@ -88,7 +88,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
|
|||
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -163,7 +163,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
|
|||
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -238,7 +238,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
|
|||
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -313,7 +313,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
|
|||
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
Coordinators: api.MemberStatusList{
|
||||
firstCoordinatorStatus,
|
||||
|
@ -421,7 +421,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
|
|||
createTLSSNISecret(t, deployment.SecretsModInterface(), "sni2", deployment.Namespace())
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
|
|
@ -67,7 +67,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -75,7 +75,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -127,7 +127,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -135,7 +135,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
@ -191,7 +191,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
|||
},
|
||||
},
|
||||
Helper: func(t *testing.T, deployment *Deployment, testCase *testCaseStruct) {
|
||||
deployment.status.last = api.DeploymentStatus{
|
||||
deployment.currentObjectStatus = &api.DeploymentStatus{
|
||||
Members: api.DeploymentStatusMembers{
|
||||
DBServers: api.MemberStatusList{
|
||||
firstDBServerStatus,
|
||||
|
@ -199,7 +199,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
|
|||
},
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
deployment.status.last.Members.DBServers[0].IsInitialized = true
|
||||
deployment.currentObjectStatus.Members.DBServers[0].IsInitialized = true
|
||||
|
||||
testCase.createTestPodData(deployment, api.ServerGroupDBServers, firstDBServerStatus)
|
||||
},
|
||||
|
|
|
@ -59,7 +59,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
|
||||
d, eventRecorder := createTestDeployment(t, testCase.config, testCase.ArangoDeployment)
|
||||
|
||||
startDepl := d.status.last.DeepCopy()
|
||||
startDepl := d.GetStatus()
|
||||
|
||||
errs := 0
|
||||
for {
|
||||
|
@ -88,7 +88,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
|
||||
f := startDepl.Members.AsList()
|
||||
if len(f) == 0 {
|
||||
f = d.status.last.Members.AsList()
|
||||
f = d.GetStatus().Members.AsList()
|
||||
}
|
||||
|
||||
// Add Expected pod defaults
|
||||
|
@ -102,7 +102,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
}
|
||||
|
||||
// Create custom resource in the fake kubernetes API
|
||||
_, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.apiObject, meta.CreateOptions{})
|
||||
_, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.currentObject, meta.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
if testCase.Resources != nil {
|
||||
|
@ -124,17 +124,17 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
}
|
||||
|
||||
// Set Pending phase
|
||||
for _, e := range d.status.last.Members.AsList() {
|
||||
for _, e := range d.GetStatus().Members.AsList() {
|
||||
m := e.Member
|
||||
if m.Phase == api.MemberPhaseNone {
|
||||
m.Phase = api.MemberPhasePending
|
||||
require.NoError(t, d.status.last.Members.Update(m, e.Group))
|
||||
require.NoError(t, d.currentObjectStatus.Members.Update(m, e.Group))
|
||||
}
|
||||
}
|
||||
|
||||
// Set members
|
||||
var loopErr error
|
||||
for _, e := range d.status.last.Members.AsList() {
|
||||
for _, e := range d.GetStatus().Members.AsList() {
|
||||
m := e.Member
|
||||
group := e.Group
|
||||
member := api.ArangoMember{
|
||||
|
@ -167,13 +167,13 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
|
||||
require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background()))
|
||||
|
||||
groupSpec := d.apiObject.Spec.GetServerGroupSpec(group)
|
||||
groupSpec := d.GetSpec().GetServerGroupSpec(group)
|
||||
|
||||
image, ok := d.resources.SelectImage(d.apiObject.Spec, d.status.last)
|
||||
image, ok := d.resources.SelectImage(d.GetSpec(), d.GetStatus())
|
||||
require.True(t, ok)
|
||||
|
||||
var template *core.PodTemplateSpec
|
||||
template, loopErr = d.resources.RenderPodTemplateForMember(context.Background(), d.ACS(), d.apiObject.Spec, d.status.last, m.ID, image)
|
||||
template, loopErr = d.resources.RenderPodTemplateForMember(context.Background(), d.ACS(), d.GetSpec(), d.GetStatus(), m.ID, image)
|
||||
if loopErr != nil {
|
||||
break
|
||||
}
|
||||
|
@ -237,8 +237,7 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
assert.Fail(t, "expected event", "expected event with message '%s'", testCase.ExpectedEvent)
|
||||
}
|
||||
|
||||
status, version := d.GetStatus()
|
||||
assert.Equal(t, int32(1), version)
|
||||
status := d.GetStatus()
|
||||
|
||||
checkEachMember := func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
for _, m := range *status {
|
||||
|
@ -256,12 +255,12 @@ func runTestCase(t *testing.T, testCase testCaseStruct) {
|
|||
require.Equal(t, false, exist)
|
||||
|
||||
require.NotNil(t, m.Image)
|
||||
require.True(t, m.Image.Equal(d.apiObject.Status.CurrentImage))
|
||||
require.True(t, m.Image.Equal(d.currentObject.Status.CurrentImage))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
d.GetServerGroupIterator().ForeachServerGroup(checkEachMember, &status)
|
||||
d.GetServerGroupIterator().ForeachServerGroupAccepted(checkEachMember, &status)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
@ -491,7 +491,8 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara
|
|||
i := inspector.NewInspector(throttle.NewAlwaysThrottleComponents(), deps.Client, arangoDeployment.GetNamespace(), arangoDeployment.GetName())
|
||||
|
||||
d := &Deployment{
|
||||
apiObject: arangoDeployment,
|
||||
currentObject: arangoDeployment,
|
||||
currentObjectStatus: arangoDeployment.Status.DeepCopy(),
|
||||
name: arangoDeployment.GetName(),
|
||||
namespace: arangoDeployment.GetNamespace(),
|
||||
config: config,
|
||||
|
@ -602,10 +603,10 @@ func (testCase *testCaseStruct) createTestPodData(deployment *Deployment, group
|
|||
}
|
||||
|
||||
// Add image info
|
||||
if member, group, ok := deployment.apiObject.Status.Members.ElementByID(memberStatus.ID); ok {
|
||||
member.Image = deployment.apiObject.Status.CurrentImage
|
||||
if member, group, ok := deployment.currentObject.Status.Members.ElementByID(memberStatus.ID); ok {
|
||||
member.Image = deployment.currentObject.Status.CurrentImage
|
||||
|
||||
deployment.apiObject.Status.Members.Update(member, group)
|
||||
deployment.currentObject.Status.Members.Update(member, group)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
38
pkg/deployment/features/deployment.go
Normal file
38
pkg/deployment/features/deployment.go
Normal file
|
@ -0,0 +1,38 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package features
|
||||
|
||||
func init() {
|
||||
registerFeature(deploymentSpecDefaultsRestore)
|
||||
}
|
||||
|
||||
var deploymentSpecDefaultsRestore = &feature{
|
||||
name: "deployment-spec-defaults-restore",
|
||||
description: "Restore defaults from last accepted state of deployment",
|
||||
version: "3.6.0",
|
||||
enterpriseRequired: false,
|
||||
enabledByDefault: true,
|
||||
hidden: true,
|
||||
}
|
||||
|
||||
func DeploymentSpecDefaultsRestore() Feature {
|
||||
return deploymentSpecDefaultsRestore
|
||||
}
|
|
@ -87,15 +87,15 @@ type imagesBuilder struct {
|
|||
// ensureImages creates pods needed to detect ImageID for specified images.
|
||||
// Returns: retrySoon, error
|
||||
func (d *Deployment) ensureImages(ctx context.Context, apiObject *api.ArangoDeployment, cachedStatus inspectorInterface.Inspector) (bool, bool, error) {
|
||||
status, lastVersion := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
ib := imagesBuilder{
|
||||
Context: d,
|
||||
APIObject: apiObject,
|
||||
Spec: apiObject.Spec,
|
||||
Spec: apiObject.GetAcceptedSpec(),
|
||||
Status: status,
|
||||
Log: d.log,
|
||||
UpdateCRStatus: func(status api.DeploymentStatus) error {
|
||||
if err := d.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := d.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -424,7 +424,7 @@ func TestEnsureImages(t *testing.T) {
|
|||
// Arrange
|
||||
d, _ := createTestDeployment(t, Config{}, testCase.ArangoDeployment)
|
||||
|
||||
d.status.last = api.DeploymentStatus{
|
||||
d.currentObjectStatus = &api.DeploymentStatus{
|
||||
Images: createTestImages(false),
|
||||
}
|
||||
|
||||
|
@ -434,13 +434,13 @@ func TestEnsureImages(t *testing.T) {
|
|||
}
|
||||
|
||||
// Create custom resource in the fake kubernetes API
|
||||
_, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.apiObject, meta.CreateOptions{})
|
||||
_, err := d.deps.Client.Arango().DatabaseV1().ArangoDeployments(testNamespace).Create(context.Background(), d.currentObject, meta.CreateOptions{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, d.acs.CurrentClusterCache().Refresh(context.Background()))
|
||||
|
||||
// Act
|
||||
retrySoon, _, err := d.ensureImages(context.Background(), d.apiObject, d.GetCachedStatus())
|
||||
retrySoon, _, err := d.ensureImages(context.Background(), d.currentObject, d.GetCachedStatus())
|
||||
|
||||
// Assert
|
||||
assert.EqualValues(t, testCase.RetrySoon, retrySoon)
|
||||
|
|
|
@ -46,7 +46,7 @@ func (d *Deployment) listenForPodEvents(stopCh <-chan struct{}) {
|
|||
rw := k8sutil.NewResourceWatcher(
|
||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"pods",
|
||||
d.apiObject.GetNamespace(),
|
||||
d.currentObject.GetNamespace(),
|
||||
&core.Pod{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
|
@ -90,7 +90,7 @@ func (d *Deployment) listenForPVCEvents(stopCh <-chan struct{}) {
|
|||
rw := k8sutil.NewResourceWatcher(
|
||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"persistentvolumeclaims",
|
||||
d.apiObject.GetNamespace(),
|
||||
d.currentObject.GetNamespace(),
|
||||
&core.PersistentVolumeClaim{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
|
@ -134,7 +134,7 @@ func (d *Deployment) listenForSecretEvents(stopCh <-chan struct{}) {
|
|||
rw := k8sutil.NewResourceWatcher(
|
||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"secrets",
|
||||
d.apiObject.GetNamespace(),
|
||||
d.currentObject.GetNamespace(),
|
||||
&core.Secret{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
// Note: For secrets we look at all of them because they do not have to be owned by this deployment.
|
||||
|
@ -179,7 +179,7 @@ func (d *Deployment) listenForServiceEvents(stopCh <-chan struct{}) {
|
|||
rw := k8sutil.NewResourceWatcher(
|
||||
d.deps.Client.Kubernetes().CoreV1().RESTClient(),
|
||||
"services",
|
||||
d.apiObject.GetNamespace(),
|
||||
d.currentObject.GetNamespace(),
|
||||
&core.Service{},
|
||||
cache.ResourceEventHandlerFuncs{
|
||||
AddFunc: func(obj interface{}) {
|
||||
|
|
|
@ -34,9 +34,9 @@ import (
|
|||
|
||||
func (d *Deployment) createAgencyMapping(ctx context.Context) error {
|
||||
spec := d.GetSpec()
|
||||
status := d.GetStatusSnapshot()
|
||||
status := d.GetStatus()
|
||||
|
||||
if !spec.Mode.HasAgents() {
|
||||
if !spec.Mode.Get().HasAgents() {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -127,7 +127,7 @@ func (d *Deployment) renderMember(spec api.DeploymentSpec, status *api.Deploymen
|
|||
}
|
||||
deploymentName := apiObject.GetName()
|
||||
role := group.AsRole()
|
||||
arch := apiObject.Spec.Architecture.GetDefault()
|
||||
arch := apiObject.GetAcceptedSpec().Architecture.GetDefault()
|
||||
|
||||
switch group {
|
||||
case api.ServerGroupSingle:
|
||||
|
|
|
@ -31,6 +31,14 @@ type Metrics struct {
|
|||
Fetches uint64
|
||||
Index uint64
|
||||
}
|
||||
|
||||
Errors struct {
|
||||
DeploymentValidationErrors, DeploymentImmutableErrors uint64
|
||||
}
|
||||
|
||||
Deployment struct {
|
||||
Accepted, UpToDate bool
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Deployment) CollectMetrics(m metrics.PushMetric) {
|
||||
|
@ -38,6 +46,20 @@ func (d *Deployment) CollectMetrics(m metrics.PushMetric) {
|
|||
m.Push(metric_descriptions.ArangodbOperatorAgencyFetchesCounter(float64(d.metrics.Agency.Fetches), d.namespace, d.name))
|
||||
m.Push(metric_descriptions.ArangodbOperatorAgencyIndexGauge(float64(d.metrics.Agency.Index), d.namespace, d.name))
|
||||
|
||||
m.Push(metric_descriptions.ArangodbOperatorResourcesArangodeploymentValidationErrorsCounter(float64(d.metrics.Errors.DeploymentValidationErrors), d.namespace, d.name))
|
||||
m.Push(metric_descriptions.ArangodbOperatorResourcesArangodeploymentImmutableErrorsCounter(float64(d.metrics.Errors.DeploymentValidationErrors), d.namespace, d.name))
|
||||
|
||||
if d.metrics.Deployment.Accepted {
|
||||
m.Push(metric_descriptions.ArangodbOperatorResourcesArangodeploymentAcceptedGauge(1, d.namespace, d.name))
|
||||
} else {
|
||||
m.Push(metric_descriptions.ArangodbOperatorResourcesArangodeploymentAcceptedGauge(0, d.namespace, d.name))
|
||||
}
|
||||
if d.metrics.Deployment.UpToDate {
|
||||
m.Push(metric_descriptions.ArangodbOperatorResourcesArangodeploymentUptodateGauge(1, d.namespace, d.name))
|
||||
} else {
|
||||
m.Push(metric_descriptions.ArangodbOperatorResourcesArangodeploymentUptodateGauge(0, d.namespace, d.name))
|
||||
}
|
||||
|
||||
if c := d.agencyCache; c != nil {
|
||||
m.Push(metric_descriptions.ArangodbOperatorAgencyCachePresentGauge(1, d.namespace, d.name))
|
||||
if h, ok := c.Health(); ok {
|
||||
|
|
|
@ -94,7 +94,7 @@ func (i *inventory) Collect(m chan<- prometheus.Metric) {
|
|||
}
|
||||
|
||||
spec := deployment.GetSpec()
|
||||
status, _ := deployment.GetStatus()
|
||||
status := deployment.GetStatus()
|
||||
|
||||
for _, member := range status.Members.AsList() {
|
||||
p.Push(i.deploymentMetricsMembersMetric.Gauge(1, deployment.GetNamespace(), deployment.GetName(), member.Group.AsRole(), member.Member.ID))
|
||||
|
|
|
@ -57,7 +57,7 @@ type actionArangoMemberUpdatePodSpec struct {
|
|||
// the start time needs to be recorded and a ready condition needs to be checked.
|
||||
func (a *actionArangoMemberUpdatePodSpec) Start(ctx context.Context) (bool, error) {
|
||||
spec := a.actionCtx.GetSpec()
|
||||
status := a.actionCtx.GetStatusSnapshot()
|
||||
status := a.actionCtx.GetStatus()
|
||||
|
||||
m, found := a.actionCtx.GetMemberStatusByID(a.action.MemberID)
|
||||
if !found {
|
||||
|
|
|
@ -58,7 +58,7 @@ type actionBackupRestore struct {
|
|||
|
||||
func (a actionBackupRestore) Start(ctx context.Context) (bool, error) {
|
||||
spec := a.actionCtx.GetSpec()
|
||||
status := a.actionCtx.GetStatusSnapshot()
|
||||
status := a.actionCtx.GetStatus()
|
||||
|
||||
if spec.RestoreFrom == nil {
|
||||
return true, nil
|
||||
|
@ -90,7 +90,7 @@ func (a actionBackupRestore) Start(ctx context.Context) (bool, error) {
|
|||
s.Restore = result
|
||||
|
||||
return true
|
||||
}, true); err != nil {
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ func (a actionBackupRestoreClean) Start(ctx context.Context) (bool, error) {
|
|||
|
||||
s.Restore = nil
|
||||
return true
|
||||
}, true); err != nil {
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ func (a actionBootstrapUpdate) Start(ctx context.Context) (bool, error) {
|
|||
status.Conditions.Update(api.ConditionTypeBootstrapSucceded, true, "Bootstrap successful", "The bootstrap process has been completed successfully")
|
||||
}
|
||||
return true
|
||||
}, true); err != nil {
|
||||
}); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
|
|
|
@ -199,20 +199,20 @@ func (ac *actionContext) GetMembersState() member.StateInspector {
|
|||
return ac.context.GetMembersState()
|
||||
}
|
||||
|
||||
func (ac *actionContext) UpdateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error {
|
||||
return ac.context.UpdateStatus(ctx, status, lastVersion, force...)
|
||||
func (ac *actionContext) UpdateStatus(ctx context.Context, status api.DeploymentStatus) error {
|
||||
return ac.context.UpdateStatus(ctx, status)
|
||||
}
|
||||
|
||||
func (ac *actionContext) GetNamespace() string {
|
||||
return ac.context.GetNamespace()
|
||||
}
|
||||
|
||||
func (ac *actionContext) GetStatus() (api.DeploymentStatus, int32) {
|
||||
func (ac *actionContext) GetStatus() api.DeploymentStatus {
|
||||
return ac.context.GetStatus()
|
||||
}
|
||||
|
||||
func (ac *actionContext) GetStatusSnapshot() api.DeploymentStatus {
|
||||
return ac.context.GetStatusSnapshot()
|
||||
return ac.context.GetStatus()
|
||||
}
|
||||
|
||||
func (ac *actionContext) GenerateMemberEndpoint(group api.ServerGroup, member api.MemberStatus) (string, error) {
|
||||
|
@ -255,12 +255,12 @@ func (ac *actionContext) GetBackup(ctx context.Context, backup string) (*backupA
|
|||
return ac.context.GetBackup(ctx, backup)
|
||||
}
|
||||
|
||||
func (ac *actionContext) WithStatusUpdateErr(ctx context.Context, action reconciler.DeploymentStatusUpdateErrFunc, force ...bool) error {
|
||||
return ac.context.WithStatusUpdateErr(ctx, action, force...)
|
||||
func (ac *actionContext) WithStatusUpdateErr(ctx context.Context, action reconciler.DeploymentStatusUpdateErrFunc) error {
|
||||
return ac.context.WithStatusUpdateErr(ctx, action)
|
||||
}
|
||||
|
||||
func (ac *actionContext) WithStatusUpdate(ctx context.Context, action reconciler.DeploymentStatusUpdateFunc, force ...bool) error {
|
||||
return ac.context.WithStatusUpdate(ctx, action, force...)
|
||||
func (ac *actionContext) WithStatusUpdate(ctx context.Context, action reconciler.DeploymentStatusUpdateFunc) error {
|
||||
return ac.context.WithStatusUpdate(ctx, action)
|
||||
}
|
||||
|
||||
func (ac *actionContext) UpdateClusterCondition(ctx context.Context, conditionType api.ConditionType, status bool, reason, message string) error {
|
||||
|
@ -291,7 +291,7 @@ func (ac *actionContext) GetSpec() api.DeploymentSpec {
|
|||
// Returns member status, true when found, or false
|
||||
// when no such member is found.
|
||||
func (ac *actionContext) GetMemberStatusByID(id string) (api.MemberStatus, bool) {
|
||||
status, _ := ac.context.GetStatus()
|
||||
status := ac.context.GetStatus()
|
||||
m, _, ok := status.Members.ElementByID(id)
|
||||
return m, ok
|
||||
}
|
||||
|
@ -301,7 +301,7 @@ func (ac *actionContext) GetMemberStatusByID(id string) (api.MemberStatus, bool)
|
|||
// Returns member status, true when found, or false
|
||||
// when no such member is found.
|
||||
func (ac *actionContext) GetMemberStatusAndGroupByID(id string) (api.MemberStatus, api.ServerGroup, bool) {
|
||||
status, _ := ac.context.GetStatus()
|
||||
status := ac.context.GetStatus()
|
||||
return status.Members.ElementByID(id)
|
||||
}
|
||||
|
||||
|
@ -317,7 +317,7 @@ func (ac *actionContext) CreateMember(ctx context.Context, group api.ServerGroup
|
|||
|
||||
// UpdateMember updates the deployment status wrt the given member.
|
||||
func (ac *actionContext) UpdateMember(ctx context.Context, member api.MemberStatus) error {
|
||||
status, lastVersion := ac.context.GetStatus()
|
||||
status := ac.context.GetStatus()
|
||||
_, group, found := status.Members.ElementByID(member.ID)
|
||||
if !found {
|
||||
return errors.WithStack(errors.Newf("Member %s not found", member.ID))
|
||||
|
@ -325,7 +325,7 @@ func (ac *actionContext) UpdateMember(ctx context.Context, member api.MemberStat
|
|||
if err := status.Members.Update(member, group); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := ac.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := ac.context.UpdateStatus(ctx, status); err != nil {
|
||||
ac.log.Err(err).Debug("Updating CR status failed")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -334,7 +334,7 @@ func (ac *actionContext) UpdateMember(ctx context.Context, member api.MemberStat
|
|||
|
||||
// RemoveMemberByID removes a member with given id.
|
||||
func (ac *actionContext) RemoveMemberByID(ctx context.Context, id string) error {
|
||||
status, lastVersion := ac.context.GetStatus()
|
||||
status := ac.context.GetStatus()
|
||||
_, group, found := status.Members.ElementByID(id)
|
||||
if !found {
|
||||
return nil
|
||||
|
@ -344,7 +344,7 @@ func (ac *actionContext) RemoveMemberByID(ctx context.Context, id string) error
|
|||
return errors.WithStack(err)
|
||||
}
|
||||
// Save removed member
|
||||
if err := ac.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := ac.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
|
@ -353,14 +353,14 @@ func (ac *actionContext) RemoveMemberByID(ctx context.Context, id string) error
|
|||
// GetImageInfo returns the image info for an image with given name.
|
||||
// Returns: (info, infoFound)
|
||||
func (ac *actionContext) GetImageInfo(imageName string) (api.ImageInfo, bool) {
|
||||
status, _ := ac.context.GetStatus()
|
||||
status := ac.context.GetStatus()
|
||||
return status.Images.GetByImage(imageName)
|
||||
}
|
||||
|
||||
// GetImageInfo returns the image info for an current image.
|
||||
// Returns: (info, infoFound)
|
||||
func (ac *actionContext) GetCurrentImageInfo() (api.ImageInfo, bool) {
|
||||
status, _ := ac.context.GetStatus()
|
||||
status := ac.context.GetStatus()
|
||||
|
||||
if status.CurrentImage == nil {
|
||||
return api.ImageInfo{}, false
|
||||
|
@ -378,7 +378,7 @@ func (ac *actionContext) SetCurrentImage(ctx context.Context, imageInfo api.Imag
|
|||
return true
|
||||
}
|
||||
return false
|
||||
}, true)
|
||||
})
|
||||
}
|
||||
|
||||
// DisableScalingCluster disables scaling DBservers and coordinators
|
||||
|
|
|
@ -102,7 +102,7 @@ func (a actionImpl) wrap(in *zerolog.Event) *zerolog.Event {
|
|||
Str("group", a.action.Group.AsRole()).
|
||||
Str("member-id", a.action.MemberID)
|
||||
|
||||
if status, _ := a.actionCtx.GetStatus(); status.Members.ContainsID(a.action.MemberID) {
|
||||
if status := a.actionCtx.GetStatus(); status.Members.ContainsID(a.action.MemberID) {
|
||||
if member, _, ok := status.Members.ElementByID(a.action.MemberID); ok {
|
||||
in = in.Str("phase", string(member.Phase))
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ type jwtRefreshAction struct {
|
|||
}
|
||||
|
||||
func (a *jwtRefreshAction) CheckProgress(ctx context.Context) (bool, bool, error) {
|
||||
if folder, err := ensureJWTFolderSupport(a.actionCtx.GetSpec(), a.actionCtx.GetStatusSnapshot()); err != nil || !folder {
|
||||
if folder, err := ensureJWTFolderSupport(a.actionCtx.GetSpec(), a.actionCtx.GetStatus()); err != nil || !folder {
|
||||
return true, false, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -42,7 +42,7 @@ const (
|
|||
)
|
||||
|
||||
func ensureJWTFolderSupportFromAction(actionCtx ActionContext) (bool, error) {
|
||||
return ensureJWTFolderSupport(actionCtx.GetSpec(), actionCtx.GetStatusSnapshot())
|
||||
return ensureJWTFolderSupport(actionCtx.GetSpec(), actionCtx.GetStatus())
|
||||
}
|
||||
|
||||
func ensureJWTFolderSupport(spec api.DeploymentSpec, status api.DeploymentStatus) (bool, error) {
|
||||
|
|
|
@ -22,13 +22,9 @@ package reconcile
|
|||
|
||||
import (
|
||||
"context"
|
||||
"strconv"
|
||||
|
||||
"github.com/arangodb/go-driver"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
|
@ -40,53 +36,23 @@ func (r *Reconciler) createCleanOutPlan(ctx context.Context, _ k8sutil.APIObject
|
|||
return nil
|
||||
}
|
||||
|
||||
if !status.Conditions.IsTrue(api.ConditionTypeUpToDate) {
|
||||
// Do not consider to mark cleanedout servers when changes are propagating
|
||||
return nil
|
||||
}
|
||||
|
||||
cluster, err := getCluster(ctx, planCtx)
|
||||
if err != nil {
|
||||
r.log.Err(err).Warn("Unable to get cluster")
|
||||
return nil
|
||||
}
|
||||
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
health, err := cluster.Health(ctxChild)
|
||||
if err != nil {
|
||||
r.log.Err(err).Warn("Unable to get cluster health")
|
||||
return nil
|
||||
}
|
||||
|
||||
var plan api.Plan
|
||||
|
||||
for id, member := range health.Health {
|
||||
switch member.Role {
|
||||
case driver.ServerRoleDBServer:
|
||||
memberStatus, ok := status.Members.DBServers.ElementByID(string(id))
|
||||
cache, ok := planCtx.GetAgencyCache()
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
if memberStatus.Conditions.IsTrue(api.ConditionTypeCleanedOut) {
|
||||
continue
|
||||
}
|
||||
|
||||
if isCleanedOut, err := cluster.IsCleanedOut(ctx, string(id)); err != nil {
|
||||
r.log.Err(err).Str("id", string(id)).Warn("Unable to get clean out status")
|
||||
r.log.Debug("AgencyCache is not ready")
|
||||
return nil
|
||||
} else if isCleanedOut {
|
||||
r.log.
|
||||
Str("role", string(member.Role)).
|
||||
Str("id", string(id)).
|
||||
Info("server is cleaned out so operator must do the same")
|
||||
}
|
||||
|
||||
action := actions.NewAction(api.ActionTypeSetMemberCondition, api.ServerGroupDBServers, withPredefinedMember(string(id)),
|
||||
"server is cleaned out so operator must do the same").
|
||||
AddParam(string(api.ConditionTypeCleanedOut), strconv.FormatBool(true))
|
||||
for _, m := range status.Members.AsListInGroup(api.ServerGroupDBServers) {
|
||||
cleanedStatus := m.Member.Conditions.IsTrue(api.ConditionTypeCleanedOut)
|
||||
cleanedAgency := cache.Target.CleanedServers.Contains(agency.Server(m.Member.ID))
|
||||
|
||||
plan = append(plan, action)
|
||||
if cleanedStatus != cleanedAgency {
|
||||
if cleanedAgency {
|
||||
plan = append(plan, updateMemberConditionActionV2("DBServer cleaned", api.ConditionTypeCleanedOut, m.Group, m.Member.ID, true, "DBServer cleaned", "DBServer cleaned", ""))
|
||||
} else {
|
||||
plan = append(plan, removeMemberConditionActionV2("DBServer is not cleaned", api.ConditionTypeCleanedOut, m.Group, m.Member.ID))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ func (d *Reconciler) generatePlanFunc(gen planGeneratorFunc, planner planner) pl
|
|||
// Create plan
|
||||
apiObject := d.context.GetAPIObject()
|
||||
spec := d.context.GetSpec()
|
||||
status, _ := d.context.GetStatus()
|
||||
status := d.context.GetStatus()
|
||||
builderCtx := newPlanBuilderContext(d.context)
|
||||
newPlan, backoffs, changed := gen(ctx, apiObject, planner.Get(&status), spec, status, builderCtx)
|
||||
|
||||
|
|
|
@ -80,6 +80,11 @@ func (r *Reconciler) createMemberFailedRestoreInternal(_ context.Context, _ k8su
|
|||
continue
|
||||
}
|
||||
|
||||
if agencyState.Target.CleanedServers.Contains(agency.Server(m.ID)) {
|
||||
memberLog.Info("Member is CleanedOut")
|
||||
continue
|
||||
}
|
||||
|
||||
if agencyState.Plan.Collections.IsDBServerLeader(agency.Server(m.ID)) {
|
||||
memberLog.Info("Recreating leader DBServer - it cannot be removed gracefully")
|
||||
plan = append(plan, actions.NewAction(api.ActionTypeRecreateMember, group, m))
|
||||
|
|
|
@ -25,6 +25,8 @@ import (
|
|||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/actions"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/agency"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
|
||||
)
|
||||
|
||||
|
@ -84,7 +86,7 @@ func (r *Reconciler) createScalePlan(status api.DeploymentStatus, members api.Me
|
|||
Debug("Creating scale-up plan")
|
||||
} else if len(members) > count {
|
||||
// Note, we scale down 1 member at a time
|
||||
if m, err := members.SelectMemberToRemove(topologyMissingMemberToRemoveSelector(status.Topology), topologyAwarenessMemberToRemoveSelector(group, status.Topology)); err != nil {
|
||||
if m, err := members.SelectMemberToRemove(getCleanedServer(context), topologyMissingMemberToRemoveSelector(status.Topology), topologyAwarenessMemberToRemoveSelector(group, status.Topology)); err != nil {
|
||||
r.planLogger.Err(err).Str("role", group.AsRole()).Warn("Failed to select member to remove")
|
||||
} else {
|
||||
ready, message := groupReadyForRestart(context, status, m, group)
|
||||
|
@ -158,3 +160,16 @@ func (r *Reconciler) createReplaceMemberPlan(ctx context.Context, apiObject k8su
|
|||
func filterScaleUP(a api.Action) bool {
|
||||
return a.Type == api.ActionTypeAddMember
|
||||
}
|
||||
|
||||
func getCleanedServer(ctx reconciler.ArangoAgencyGet) api.MemberToRemoveSelector {
|
||||
return func(m api.MemberStatusList) (string, error) {
|
||||
if a, ok := ctx.GetAgencyCache(); ok {
|
||||
for _, member := range m {
|
||||
if a.Target.CleanedServers.Contains(agency.Server(member.ID)) {
|
||||
return member.ID, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
}
|
||||
|
|
|
@ -140,7 +140,7 @@ func (c *testContext) ApplyPatch(ctx context.Context, p ...patch.Item) error {
|
|||
}
|
||||
|
||||
func (c *testContext) GetStatusSnapshot() api.DeploymentStatus {
|
||||
s, _ := c.GetStatus()
|
||||
s := c.GetStatus()
|
||||
return *s.DeepCopy()
|
||||
}
|
||||
|
||||
|
@ -184,7 +184,7 @@ func (c *testContext) ArangoMembersModInterface() arangomemberv1.ModInterface {
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (c *testContext) WithStatusUpdateErr(ctx context.Context, action reconciler.DeploymentStatusUpdateErrFunc, force ...bool) error {
|
||||
func (c *testContext) WithStatusUpdateErr(ctx context.Context, action reconciler.DeploymentStatusUpdateErrFunc) error {
|
||||
_, err := action(&c.ArangoDeployment.Status)
|
||||
return err
|
||||
}
|
||||
|
@ -221,7 +221,7 @@ func (c *testContext) SetAgencyMaintenanceMode(ctx context.Context, enabled bool
|
|||
panic("implement me")
|
||||
}
|
||||
|
||||
func (c *testContext) WithStatusUpdate(ctx context.Context, action reconciler.DeploymentStatusUpdateFunc, force ...bool) error {
|
||||
func (c *testContext) WithStatusUpdate(ctx context.Context, action reconciler.DeploymentStatusUpdateFunc) error {
|
||||
action(&c.ArangoDeployment.Status)
|
||||
return nil
|
||||
}
|
||||
|
@ -276,7 +276,7 @@ func (c *testContext) GetSpec() api.DeploymentSpec {
|
|||
return c.ArangoDeployment.Spec
|
||||
}
|
||||
|
||||
func (c *testContext) UpdateStatus(_ context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error {
|
||||
func (c *testContext) UpdateStatus(_ context.Context, status api.DeploymentStatus) error {
|
||||
c.ArangoDeployment.Status = status
|
||||
return nil
|
||||
}
|
||||
|
@ -366,8 +366,8 @@ func (c *testContext) GetExpectedPodArguments(apiObject meta.Object, deplSpec ap
|
|||
}
|
||||
|
||||
// GetStatus returns the current status of the deployment
|
||||
func (c *testContext) GetStatus() (api.DeploymentStatus, int32) {
|
||||
return c.ArangoDeployment.Status, 0
|
||||
func (c *testContext) GetStatus() api.DeploymentStatus {
|
||||
return c.ArangoDeployment.Status
|
||||
}
|
||||
|
||||
func addAgentsToStatus(t *testing.T, status *api.DeploymentStatus, count int) {
|
||||
|
@ -1243,7 +1243,7 @@ func TestCreatePlan(t *testing.T) {
|
|||
}
|
||||
|
||||
require.NoError(t, err)
|
||||
status, _ := testCase.context.GetStatus()
|
||||
status := testCase.context.GetStatus()
|
||||
|
||||
if len(testCase.ExpectedHighPlan) > 0 {
|
||||
require.Len(t, status.HighPriorityPlan, len(testCase.ExpectedHighPlan))
|
||||
|
|
|
@ -178,7 +178,7 @@ func (d *Reconciler) executePlanInLoop(ctx context.Context) (bool, bool, error)
|
|||
}
|
||||
|
||||
func (d *Reconciler) executePlanStatus(ctx context.Context, pg planner) (bool, bool, error) {
|
||||
loopStatus, _ := d.context.GetStatus()
|
||||
loopStatus := d.context.GetStatus()
|
||||
|
||||
plan := pg.Get(&loopStatus)
|
||||
|
||||
|
@ -189,11 +189,11 @@ func (d *Reconciler) executePlanStatus(ctx context.Context, pg planner) (bool, b
|
|||
newPlan, callAgain, callInLoop, err := d.executePlan(ctx, plan, pg)
|
||||
|
||||
// Refresh current status
|
||||
loopStatus, lastVersion := d.context.GetStatus()
|
||||
loopStatus = d.context.GetStatus()
|
||||
|
||||
if pg.Set(&loopStatus, newPlan) {
|
||||
d.planLogger.Info("Updating plan")
|
||||
if err := d.context.UpdateStatus(ctx, loopStatus, lastVersion, true); err != nil {
|
||||
if err := d.context.UpdateStatus(ctx, loopStatus); err != nil {
|
||||
d.planLogger.Err(err).Debug("Failed to update CR status")
|
||||
return false, false, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -62,7 +62,7 @@ func (r *Reconciler) WrapLogger(in *zerolog.Event) *zerolog.Event {
|
|||
// CheckDeployment checks for obviously broken things and fixes them immediately
|
||||
func (r *Reconciler) CheckDeployment(ctx context.Context) error {
|
||||
spec := r.context.GetSpec()
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
|
||||
if spec.GetMode().HasCoordinators() {
|
||||
// Check if there are coordinators
|
||||
|
|
|
@ -21,18 +21,13 @@
|
|||
package reconcile
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
|
||||
core "k8s.io/api/core/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
"github.com/arangodb/go-driver"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/errors"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/globals"
|
||||
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod"
|
||||
)
|
||||
|
||||
|
@ -52,23 +47,6 @@ func secretKeysToList(s *core.Secret) []string {
|
|||
return keys
|
||||
}
|
||||
|
||||
// getCluster returns the cluster connection.
|
||||
func getCluster(ctx context.Context, planCtx PlanBuilderContext) (driver.Cluster, error) {
|
||||
c, err := planCtx.GetMembersState().State().GetDatabaseClient()
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(errors.Wrapf(err, "Unable to get database client"))
|
||||
}
|
||||
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().ArangoD().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
cluster, err := c.Cluster(ctxChild)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(errors.Wrapf(err, "Unable to get cluster client"))
|
||||
}
|
||||
|
||||
return cluster, nil
|
||||
}
|
||||
|
||||
func ifPodUIDMismatch(m api.MemberStatus, a api.Action, i pod.Inspector) bool {
|
||||
ut, ok := a.GetParam(api.ParamPodUID)
|
||||
if !ok || ut == "" {
|
||||
|
|
|
@ -38,11 +38,11 @@ import (
|
|||
// ServerGroupIterator provides a helper to callback on every server
|
||||
// group of the deployment.
|
||||
type ServerGroupIterator interface {
|
||||
// ForeachServerGroup calls the given callback for all server groups.
|
||||
// ForeachServerGroupAccepted calls the given callback for all accepted server groups.
|
||||
// If the callback returns an error, this error is returned and no other server
|
||||
// groups are processed.
|
||||
// Groups are processed in this order: agents, single, dbservers, coordinators, syncmasters, syncworkers
|
||||
ForeachServerGroup(cb api.ServerGroupFunc, status *api.DeploymentStatus) error
|
||||
ForeachServerGroupAccepted(cb api.ServerGroupFunc, status *api.DeploymentStatus) error
|
||||
}
|
||||
|
||||
type DeploymentStatusUpdateErrFunc func(s *api.DeploymentStatus) (bool, error)
|
||||
|
@ -50,13 +50,13 @@ type DeploymentStatusUpdateFunc func(s *api.DeploymentStatus) bool
|
|||
|
||||
type DeploymentStatusUpdate interface {
|
||||
// WithStatusUpdateErr update status of ArangoDeployment with defined modifier. If action returns True action is taken
|
||||
WithStatusUpdateErr(ctx context.Context, action DeploymentStatusUpdateErrFunc, force ...bool) error
|
||||
WithStatusUpdateErr(ctx context.Context, action DeploymentStatusUpdateErrFunc) error
|
||||
// WithStatusUpdate update status of ArangoDeployment with defined modifier. If action returns True action is taken
|
||||
WithStatusUpdate(ctx context.Context, action DeploymentStatusUpdateFunc, force ...bool) error
|
||||
WithStatusUpdate(ctx context.Context, action DeploymentStatusUpdateFunc) error
|
||||
|
||||
// UpdateStatus replaces the status of the deployment with the given status and
|
||||
// updates the resources in k8s.
|
||||
UpdateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error
|
||||
UpdateStatus(ctx context.Context, status api.DeploymentStatus) error
|
||||
// UpdateMember updates the deployment status wrt the given member.
|
||||
UpdateMember(ctx context.Context, member api.MemberStatus) error
|
||||
}
|
||||
|
@ -104,9 +104,7 @@ type DeploymentInfoGetter interface {
|
|||
// GetSpec returns the current specification of the deployment
|
||||
GetSpec() api.DeploymentSpec
|
||||
// GetStatus returns the current status of the deployment
|
||||
GetStatus() (api.DeploymentStatus, int32)
|
||||
// GetStatusSnapshot returns the current status of the deployment without revision
|
||||
GetStatusSnapshot() api.DeploymentStatus
|
||||
GetStatus() api.DeploymentStatus
|
||||
// GetMode the specified mode of deployment
|
||||
GetMode() api.DeploymentMode
|
||||
// GetName returns the name of the deployment
|
||||
|
|
|
@ -21,9 +21,6 @@
|
|||
package resilience
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
||||
)
|
||||
|
||||
|
@ -31,12 +28,6 @@ import (
|
|||
type Context interface {
|
||||
reconciler.DeploymentDatabaseClient
|
||||
reconciler.ArangoAgency
|
||||
|
||||
// GetSpec returns the current specification of the deployment
|
||||
GetSpec() api.DeploymentSpec
|
||||
// GetStatus returns the current status of the deployment
|
||||
GetStatus() (api.DeploymentStatus, int32)
|
||||
// UpdateStatus replaces the status of the deployment with the given status and
|
||||
// updates the resources in k8s.
|
||||
UpdateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error
|
||||
reconciler.DeploymentInfoGetter
|
||||
reconciler.DeploymentStatusUpdate
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ const (
|
|||
// - They are frequently restarted
|
||||
// - They cannot be scheduled for a long time (TODO)
|
||||
func (r *Resilience) CheckMemberFailure(ctx context.Context) error {
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
updateStatusNeeded := false
|
||||
|
||||
for _, e := range status.Members.AsList() {
|
||||
|
@ -86,7 +86,7 @@ func (r *Resilience) CheckMemberFailure(ctx context.Context) error {
|
|||
}
|
||||
|
||||
if updateStatusNeeded {
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
core "k8s.io/api/core/v1"
|
||||
|
||||
backupApi "github.com/arangodb/kube-arangodb/pkg/apis/backup/v1"
|
||||
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/acs/sutil"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/member"
|
||||
"github.com/arangodb/kube-arangodb/pkg/deployment/reconciler"
|
||||
|
@ -52,9 +51,6 @@ type Context interface {
|
|||
|
||||
// GetServerGroupIterator returns the deployment as ServerGroupIterator.
|
||||
GetServerGroupIterator() reconciler.ServerGroupIterator
|
||||
// UpdateStatus replaces the status of the deployment with the given status and
|
||||
// updates the resources in k8s.
|
||||
UpdateStatus(ctx context.Context, status api.DeploymentStatus, lastVersion int32, force ...bool) error
|
||||
// GetOperatorImage returns the image name of operator image
|
||||
GetOperatorImage() string
|
||||
// GetArangoImage returns the image name containing the default arango image
|
||||
|
|
|
@ -79,7 +79,7 @@ func (r *Resources) syncMembersInCluster(ctx context.Context, health memberState
|
|||
return found
|
||||
}
|
||||
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
updateStatusNeeded := false
|
||||
|
||||
for _, e := range status.Members.AsListInGroups(api.ServerGroupCoordinators, api.ServerGroupDBServers) {
|
||||
|
@ -110,7 +110,7 @@ func (r *Resources) syncMembersInCluster(ctx context.Context, health memberState
|
|||
if updateStatusNeeded {
|
||||
log.Debug("UpdateStatus needed")
|
||||
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
log.Err(err).Warn("Failed to update deployment status")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ func (r *Resources) syncMembersInCluster(ctx context.Context, health memberState
|
|||
|
||||
func (r *Resources) EnsureArangoMembers(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
|
||||
// Create all missing arangomembers
|
||||
s, _ := r.context.GetStatus()
|
||||
s := r.context.GetStatus()
|
||||
obj := r.context.GetAPIObject()
|
||||
|
||||
for _, e := range s.Members.AsList() {
|
||||
|
|
|
@ -48,7 +48,7 @@ func (r *Resources) CleanupTerminatedPods(ctx context.Context) (util.Interval, e
|
|||
nextInterval := maxPodInspectorInterval // Large by default, will be made smaller if needed in the rest of the function
|
||||
|
||||
// Update member status from all pods found
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
if err := r.context.ACS().ForEachHealthyCluster(func(item sutil.ACSItem) error {
|
||||
return item.Cache().Pod().V1().Iterate(func(pod *core.Pod) error {
|
||||
if k8sutil.IsArangoDBImageIDAndVersionPod(pod) {
|
||||
|
|
|
@ -413,7 +413,7 @@ func (r *Resources) SelectImageForMember(spec api.DeploymentSpec, status api.Dep
|
|||
// createPodForMember creates all Pods listed in member status
|
||||
func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspectorInterface.Inspector, spec api.DeploymentSpec, arangoMember *api.ArangoMember, memberID string, imageNotFoundOnce *sync.Once) error {
|
||||
log := r.log.Str("section", "member")
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
|
||||
// Select image
|
||||
imageInfo, imageFound := r.SelectImage(spec, status)
|
||||
|
@ -547,7 +547,7 @@ func (r *Resources) createPodForMember(ctx context.Context, cachedStatus inspect
|
|||
if err := status.Members.Update(m, group); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
// Create event
|
||||
|
@ -659,13 +659,13 @@ func ChecksumArangoPod(groupSpec api.ServerGroupSpec, pod *core.Pod) (string, er
|
|||
// EnsurePods creates all Pods listed in member status
|
||||
func (r *Resources) EnsurePods(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
|
||||
iterator := r.context.GetServerGroupIterator()
|
||||
deploymentStatus, _ := r.context.GetStatus()
|
||||
deploymentStatus := r.context.GetStatus()
|
||||
imageNotFoundOnce := &sync.Once{}
|
||||
changed := false
|
||||
|
||||
log := r.log.Str("section", "member")
|
||||
|
||||
if err := iterator.ForeachServerGroup(func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
if err := iterator.ForeachServerGroupAccepted(func(group api.ServerGroup, groupSpec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
for _, m := range *status {
|
||||
if m.Phase != api.MemberPhasePending {
|
||||
continue
|
||||
|
|
|
@ -91,7 +91,7 @@ func (r *Resources) runPodFinalizers(ctx context.Context, p *core.Pod, memberSta
|
|||
break
|
||||
}
|
||||
|
||||
s, _ := r.context.GetStatus()
|
||||
s := r.context.GetStatus()
|
||||
_, group, ok := s.Members.ElementByID(memberStatus.ID)
|
||||
if !ok {
|
||||
continue
|
||||
|
|
|
@ -90,7 +90,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
|||
|
||||
agencyCache, agencyCachePresent := r.context.GetAgencyCache()
|
||||
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
var podNamesWithScheduleTimeout []string
|
||||
var unscheduledPodNames []string
|
||||
|
||||
|
@ -445,7 +445,7 @@ func (r *Resources) InspectPods(ctx context.Context, cachedStatus inspectorInter
|
|||
}
|
||||
|
||||
// Save status
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return 0, errors.WithStack(err)
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte
|
|||
}
|
||||
|
||||
leaderID := cache.LeaderID()
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
noLeader := len(leaderID) == 0
|
||||
changed := false
|
||||
group := api.ServerGroupAgents
|
||||
|
@ -149,7 +149,7 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte
|
|||
|
||||
// getSingleServerLeaderID returns id of a single server leader.
|
||||
func (r *Resources) getSingleServerLeaderID(ctx context.Context) (string, error) {
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
var mutex sync.Mutex
|
||||
var leaderID string
|
||||
var anyError error
|
||||
|
@ -215,7 +215,7 @@ func (r *Resources) ensureSingleServerLeader(ctx context.Context, cachedStatus i
|
|||
}
|
||||
}
|
||||
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
for _, m := range status.Members.Single {
|
||||
pod, exist := cachedStatus.Pod().V1().GetSimple(m.Pod.GetName())
|
||||
if !exist {
|
||||
|
|
|
@ -57,7 +57,7 @@ func (r *Resources) InspectPVCs(ctx context.Context, cachedStatus inspectorInter
|
|||
defer metrics.SetDuration(inspectPVCsDurationGauges.WithLabelValues(deploymentName), start)
|
||||
|
||||
// Update member status from all pods found
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
if err := cachedStatus.PersistentVolumeClaim().V1().Iterate(func(pvc *core.PersistentVolumeClaim) error {
|
||||
// PVC belongs to this deployment, update metric
|
||||
inspectedPVCsCounters.WithLabelValues(deploymentName).Inc()
|
||||
|
|
|
@ -42,10 +42,10 @@ func (r *Resources) EnsurePVCs(ctx context.Context, cachedStatus inspectorInterf
|
|||
deploymentName := apiObject.GetName()
|
||||
owner := apiObject.AsOwner()
|
||||
iterator := r.context.GetServerGroupIterator()
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
enforceAntiAffinity := r.context.GetSpec().GetEnvironment().IsProduction()
|
||||
|
||||
if err := iterator.ForeachServerGroup(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
if err := iterator.ForeachServerGroupAccepted(func(group api.ServerGroup, spec api.ServerGroupSpec, status *api.MemberStatusList) error {
|
||||
for _, m := range *status {
|
||||
if m.PersistentVolumeClaimName == "" {
|
||||
continue
|
||||
|
|
|
@ -104,7 +104,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe
|
|||
spec := r.context.GetSpec()
|
||||
deploymentName := r.context.GetAPIObject().GetName()
|
||||
var badSecretNames []string
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
image := status.CurrentImage
|
||||
getHashes := func() *api.SecretHashes {
|
||||
if status.SecretHashes == nil {
|
||||
|
@ -123,11 +123,11 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe
|
|||
status.SecretHashes.Users = make(map[string]string)
|
||||
}
|
||||
updater(status.SecretHashes)
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
// Reload status
|
||||
status, lastVersion = r.context.GetStatus()
|
||||
status = r.context.GetStatus()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -203,7 +203,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe
|
|||
if status.Conditions.Update(api.ConditionTypeSecretsChanged, true,
|
||||
"Secrets have changed", fmt.Sprintf("Found %d changed secrets", len(badSecretNames))) {
|
||||
log.Warn("Found %d changed secrets. Settings SecretsChanged condition", len(badSecretNames))
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
log.Err(err).Error("Failed to save SecretsChanged condition")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
@ -214,7 +214,7 @@ func (r *Resources) ValidateSecretHashes(ctx context.Context, cachedStatus inspe
|
|||
// All good, we van remove the SecretsChanged condition
|
||||
if status.Conditions.Remove(api.ConditionTypeSecretsChanged) {
|
||||
log.Info("Resetting SecretsChanged condition")
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
log.Err(err).Error("Failed to save SecretsChanged condition")
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -70,7 +70,7 @@ func (r *Resources) EnsureSecrets(ctx context.Context, cachedStatus inspectorInt
|
|||
start := time.Now()
|
||||
spec := r.context.GetSpec()
|
||||
secrets := cachedStatus.SecretsModInterface().V1()
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
apiObject := r.context.GetAPIObject()
|
||||
deploymentName := apiObject.GetName()
|
||||
image := status.CurrentImage
|
||||
|
|
|
@ -118,7 +118,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
|
|||
log := r.log.Str("section", "service")
|
||||
start := time.Now()
|
||||
apiObject := r.context.GetAPIObject()
|
||||
status, _ := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
deploymentName := apiObject.GetName()
|
||||
owner := apiObject.AsOwner()
|
||||
spec := r.context.GetSpec()
|
||||
|
@ -211,10 +211,10 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
|
|||
log.Str("service", svcName).Debug("Created database client service")
|
||||
}
|
||||
{
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
if status.ServiceName != svcName {
|
||||
status.ServiceName = svcName
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
@ -240,10 +240,10 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
|
|||
shared.ArangoSyncMasterPort, true, false, spec.Sync.ExternalAccess.ExternalAccessSpec, apiObject); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
if status.SyncServiceName != eaServiceName {
|
||||
status.SyncServiceName = eaServiceName
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
@ -257,10 +257,10 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
|
|||
log.Err(err).Debug("Failed to create %s exporter service", name)
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
status, lastVersion := r.context.GetStatus()
|
||||
status := r.context.GetStatus()
|
||||
if status.ExporterServiceName != name {
|
||||
status.ExporterServiceName = name
|
||||
if err := r.context.UpdateStatus(ctx, status, lastVersion); err != nil {
|
||||
if err := r.context.UpdateStatus(ctx, status); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -36,12 +36,12 @@ import (
|
|||
|
||||
// Name returns the name of the deployment.
|
||||
func (d *Deployment) Name() string {
|
||||
return d.apiObject.Name
|
||||
return d.currentObject.Name
|
||||
}
|
||||
|
||||
// Namespace returns the namespace that contains the deployment.
|
||||
func (d *Deployment) Namespace() string {
|
||||
return d.apiObject.Namespace
|
||||
return d.currentObject.Namespace
|
||||
}
|
||||
|
||||
// GetMode returns the mode of the deployment.
|
||||
|
@ -65,7 +65,7 @@ func (d *Deployment) StateColor() server.StateColor {
|
|||
if d.VolumeCount() != d.ReadyVolumeCount() {
|
||||
allGood = false
|
||||
}
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
for _, m := range status.Members.AsList() {
|
||||
switch m.Member.Phase {
|
||||
case api.MemberPhaseFailed:
|
||||
|
@ -91,14 +91,14 @@ func (d *Deployment) StateColor() server.StateColor {
|
|||
|
||||
// PodCount returns the number of pods for the deployment
|
||||
func (d *Deployment) PodCount() int {
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
return len(status.Members.PodNames())
|
||||
}
|
||||
|
||||
// ReadyPodCount returns the number of pods for the deployment that are in ready state
|
||||
func (d *Deployment) ReadyPodCount() int {
|
||||
count := 0
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
for _, e := range status.Members.AsList() {
|
||||
if e.Member.Pod.GetName() == "" {
|
||||
continue
|
||||
|
@ -113,7 +113,7 @@ func (d *Deployment) ReadyPodCount() int {
|
|||
// VolumeCount returns the number of volumes for the deployment
|
||||
func (d *Deployment) VolumeCount() int {
|
||||
count := 0
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
for _, e := range status.Members.AsList() {
|
||||
if e.Member.PersistentVolumeClaimName != "" {
|
||||
count++
|
||||
|
@ -125,7 +125,7 @@ func (d *Deployment) VolumeCount() int {
|
|||
// ReadyVolumeCount returns the number of volumes for the deployment that are in ready state
|
||||
func (d *Deployment) ReadyVolumeCount() int {
|
||||
count := 0
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
pvcs, _ := d.GetOwnedPVCs() // Ignore errors on purpose
|
||||
for _, e := range status.Members.AsList() {
|
||||
if e.Member.PersistentVolumeClaimName == "" {
|
||||
|
@ -197,7 +197,7 @@ func (d *Deployment) DatabaseURL() string {
|
|||
// DatabaseVersion returns the version used by the deployment
|
||||
// Returns versionNumber, licenseType
|
||||
func (d *Deployment) DatabaseVersion() (string, string) {
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
if current := status.CurrentImage; current != nil {
|
||||
return string(current.ArangoDBVersion), memberState.GetImageLicense(status.CurrentImage)
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ func (d *Deployment) DatabaseVersion() (string, string) {
|
|||
// Members returns all members of the deployment by role.
|
||||
func (d *Deployment) Members() map[api.ServerGroup][]server.Member {
|
||||
result := make(map[api.ServerGroup][]server.Member)
|
||||
status, _ := d.GetStatus()
|
||||
status := d.GetStatus()
|
||||
|
||||
for _, group := range api.AllServerGroups {
|
||||
list := status.Members.MembersOfGroup(group)
|
||||
|
|
|
@ -36,7 +36,7 @@ type member struct {
|
|||
}
|
||||
|
||||
func (m member) status() (api.MemberStatus, bool) {
|
||||
status, _ := m.d.GetStatus()
|
||||
status := m.d.GetStatus()
|
||||
result, _, found := status.Members.ElementByID(m.id)
|
||||
return result, found
|
||||
}
|
||||
|
|
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_accepted.go
generated
Normal file
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_accepted.go
generated
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package metric_descriptions
|
||||
|
||||
import "github.com/arangodb/kube-arangodb/pkg/util/metrics"
|
||||
|
||||
var (
|
||||
arangodbOperatorResourcesArangodeploymentAccepted = metrics.NewDescription("arangodb_operator_resources_arangodeployment_accepted", "Defines if ArangoDeployment has been accepted", []string{`namespace`, `name`}, nil)
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDescription(arangodbOperatorResourcesArangodeploymentAccepted)
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentAccepted() metrics.Description {
|
||||
return arangodbOperatorResourcesArangodeploymentAccepted
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentAcceptedGauge(value float64, namespace string, name string) metrics.Metric {
|
||||
return ArangodbOperatorResourcesArangodeploymentAccepted().Gauge(value, namespace, name)
|
||||
}
|
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_immutable_errors.go
generated
Normal file
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_immutable_errors.go
generated
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package metric_descriptions
|
||||
|
||||
import "github.com/arangodb/kube-arangodb/pkg/util/metrics"
|
||||
|
||||
var (
|
||||
arangodbOperatorResourcesArangodeploymentImmutableErrors = metrics.NewDescription("arangodb_operator_resources_arangodeployment_immutable_errors", "Counter for deployment immutable errors", []string{`namespace`, `name`}, nil)
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDescription(arangodbOperatorResourcesArangodeploymentImmutableErrors)
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentImmutableErrors() metrics.Description {
|
||||
return arangodbOperatorResourcesArangodeploymentImmutableErrors
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentImmutableErrorsCounter(value float64, namespace string, name string) metrics.Metric {
|
||||
return ArangodbOperatorResourcesArangodeploymentImmutableErrors().Gauge(value, namespace, name)
|
||||
}
|
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_uptodate.go
generated
Normal file
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_uptodate.go
generated
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package metric_descriptions
|
||||
|
||||
import "github.com/arangodb/kube-arangodb/pkg/util/metrics"
|
||||
|
||||
var (
|
||||
arangodbOperatorResourcesArangodeploymentUptodate = metrics.NewDescription("arangodb_operator_resources_arangodeployment_uptodate", "Defines if ArangoDeployment is uptodate", []string{`namespace`, `name`}, nil)
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDescription(arangodbOperatorResourcesArangodeploymentUptodate)
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentUptodate() metrics.Description {
|
||||
return arangodbOperatorResourcesArangodeploymentUptodate
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentUptodateGauge(value float64, namespace string, name string) metrics.Metric {
|
||||
return ArangodbOperatorResourcesArangodeploymentUptodate().Gauge(value, namespace, name)
|
||||
}
|
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_validation_errors.go
generated
Normal file
39
pkg/generated/metric_descriptions/arangodb_operator_resources_arangodeployment_validation_errors.go
generated
Normal file
|
@ -0,0 +1,39 @@
|
|||
//
|
||||
// DISCLAIMER
|
||||
//
|
||||
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
//
|
||||
|
||||
package metric_descriptions
|
||||
|
||||
import "github.com/arangodb/kube-arangodb/pkg/util/metrics"
|
||||
|
||||
var (
|
||||
arangodbOperatorResourcesArangodeploymentValidationErrors = metrics.NewDescription("arangodb_operator_resources_arangodeployment_validation_errors", "Counter for deployment validation errors", []string{`namespace`, `name`}, nil)
|
||||
)
|
||||
|
||||
func init() {
|
||||
registerDescription(arangodbOperatorResourcesArangodeploymentValidationErrors)
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentValidationErrors() metrics.Description {
|
||||
return arangodbOperatorResourcesArangodeploymentValidationErrors
|
||||
}
|
||||
|
||||
func ArangodbOperatorResourcesArangodeploymentValidationErrorsCounter(value float64, namespace string, name string) metrics.Metric {
|
||||
return ArangodbOperatorResourcesArangodeploymentValidationErrors().Gauge(value, namespace, name)
|
||||
}
|
|
@ -284,9 +284,9 @@ func (dr *DeploymentReplication) updateCRStatus() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Update the spec part of the API object (d.apiObject)
|
||||
// Update the spec part of the API object (d.currentObject)
|
||||
// to the given object, while preserving the status.
|
||||
// On success, d.apiObject is updated.
|
||||
// On success, d.currentObject is updated.
|
||||
func (dr *DeploymentReplication) updateCRSpec(newSpec api.DeploymentReplicationSpec) error {
|
||||
log := dr.log
|
||||
repls := dr.deps.Client.Arango().ReplicationV1().ArangoDeploymentReplications(dr.apiObject.GetNamespace())
|
||||
|
|
|
@ -382,9 +382,9 @@ func (ls *LocalStorage) updateCRStatus() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Update the spec part of the API object (d.apiObject)
|
||||
// Update the spec part of the API object (d.currentObject)
|
||||
// to the given object, while preserving the status.
|
||||
// On success, d.apiObject is updated.
|
||||
// On success, d.currentObject is updated.
|
||||
func (ls *LocalStorage) updateCRSpec(newSpec api.LocalStorageSpec) error {
|
||||
// Send update to API server
|
||||
update := ls.apiObject.DeepCopy()
|
||||
|
|
|
@ -116,7 +116,7 @@ var (
|
|||
// CreateArangodClient creates a go-driver client for a specific member in the given group.
|
||||
func CreateArangodClient(ctx context.Context, cli typedCore.CoreV1Interface, apiObject *api.ArangoDeployment, group api.ServerGroup, id string) (driver.Client, error) {
|
||||
// Create connection
|
||||
dnsName := k8sutil.CreatePodDNSNameWithDomain(apiObject, apiObject.Spec.ClusterDomain, group.AsRole(), id)
|
||||
dnsName := k8sutil.CreatePodDNSNameWithDomain(apiObject, apiObject.GetAcceptedSpec().ClusterDomain, group.AsRole(), id)
|
||||
c, err := createArangodClientForDNSName(ctx, cli, apiObject, dnsName, false)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
|
@ -127,7 +127,7 @@ func CreateArangodClient(ctx context.Context, cli typedCore.CoreV1Interface, api
|
|||
// CreateArangodDatabaseClient creates a go-driver client for accessing the entire cluster (or single server).
|
||||
func CreateArangodDatabaseClient(ctx context.Context, cli typedCore.CoreV1Interface, apiObject *api.ArangoDeployment, shortTimeout bool) (driver.Client, error) {
|
||||
// Create connection
|
||||
dnsName := k8sutil.CreateDatabaseClientServiceDNSNameWithDomain(apiObject, apiObject.Spec.ClusterDomain)
|
||||
dnsName := k8sutil.CreateDatabaseClientServiceDNSNameWithDomain(apiObject, apiObject.GetAcceptedSpec().ClusterDomain)
|
||||
c, err := createArangodClientForDNSName(ctx, cli, apiObject, dnsName, shortTimeout)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
|
@ -178,7 +178,7 @@ func createArangodHTTPConfigForDNSNames(apiObject *api.ArangoDeployment, dnsName
|
|||
if shortTimeout {
|
||||
transport = sharedHTTPTransportShortTimeout
|
||||
}
|
||||
if apiObject != nil && apiObject.Spec.IsSecure() {
|
||||
if apiObject != nil && apiObject.GetAcceptedSpec().IsSecure() {
|
||||
scheme = "https"
|
||||
transport = sharedHTTPSTransport
|
||||
if shortTimeout {
|
||||
|
@ -197,14 +197,14 @@ func createArangodHTTPConfigForDNSNames(apiObject *api.ArangoDeployment, dnsName
|
|||
|
||||
// createArangodClientAuthentication creates a go-driver authentication for the servers in the given deployment.
|
||||
func createArangodClientAuthentication(ctx context.Context, cli typedCore.CoreV1Interface, apiObject *api.ArangoDeployment) (driver.Authentication, error) {
|
||||
if apiObject != nil && apiObject.Spec.IsAuthenticated() {
|
||||
if apiObject != nil && apiObject.GetAcceptedSpec().IsAuthenticated() {
|
||||
// Authentication is enabled.
|
||||
// Should we skip using it?
|
||||
if ctx.Value(skipAuthenticationKey{}) == nil {
|
||||
secrets := cli.Secrets(apiObject.GetNamespace())
|
||||
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
|
||||
defer cancel()
|
||||
s, err := k8sutil.GetTokenSecret(ctxChild, secrets, apiObject.Spec.Authentication.GetJWTSecretName())
|
||||
s, err := k8sutil.GetTokenSecret(ctxChild, secrets, apiObject.GetAcceptedSpec().Authentication.GetJWTSecretName())
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
|
|
|
@ -93,6 +93,29 @@ func SetNumberOfServers(ctx context.Context, conn driver.Connection, noCoordinat
|
|||
return nil
|
||||
}
|
||||
|
||||
// CleanNumberOfServers removes the server count
|
||||
func CleanNumberOfServers(ctx context.Context, conn driver.Connection) error {
|
||||
req, err := conn.NewRequest("PUT", "_admin/cluster/numberOfServers")
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
input := map[string]interface{}{
|
||||
"numberOfCoordinators": nil,
|
||||
"numberOfDBServers": nil,
|
||||
}
|
||||
if _, err := req.SetBody(input); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
resp, err := conn.Do(ctx, req)
|
||||
if err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
if err := resp.CheckStatus(200); err != nil {
|
||||
return errors.WithStack(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveServerFromCluster tries to remove a coordinator or DBServer from the cluster.
|
||||
func RemoveServerFromCluster(ctx context.Context, conn driver.Connection, id driver.ServerID) error {
|
||||
req, err := conn.NewRequest("POST", "_admin/cluster/removeServer")
|
||||
|
|
Loading…
Reference in a new issue