1
0
Fork 0
mirror of https://github.com/arangodb/kube-arangodb.git synced 2024-12-14 11:57:37 +00:00

[Feature] Switch services to Port names (#1196)

This commit is contained in:
Adam Janikowski 2022-11-28 17:44:53 +01:00 committed by GitHub
parent a6c4fc609b
commit e24dde1334
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
24 changed files with 575 additions and 290 deletions

View file

@ -33,6 +33,7 @@
- (Feature) Allow to change Pod Network and PID settings
- (Feature) Pre OOM Abort function
- (Bugfix) Fix ErrorArray String function
- (Feature) Switch services to Port names
## [1.2.20](https://github.com/arangodb/kube-arangodb/tree/1.2.20) (2022-10-25)
- (Feature) Add action progress

View file

@ -64,4 +64,7 @@ const (
ClusterJWTSecretVolumeMountDir = "/secrets/cluster/jwt"
ExporterJWTVolumeMountDir = "/secrets/exporter/jwt"
MasterJWTSecretVolumeMountDir = "/secrets/master/jwt"
ServerPortName = "server"
ExporterPortName = "exporter"
)

View file

@ -92,7 +92,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -154,7 +154,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -219,7 +219,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -289,7 +289,7 @@ func TestEnsurePod_ArangoDB_AntiAffinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -368,7 +368,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -433,7 +433,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -501,7 +501,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -574,7 +574,7 @@ func TestEnsurePod_ArangoDB_Affinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -655,7 +655,7 @@ func TestEnsurePod_ArangoDB_NodeAffinity(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),

View file

@ -69,7 +69,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -120,7 +120,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -180,7 +180,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -237,7 +237,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -302,7 +302,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -357,7 +357,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -414,7 +414,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -474,7 +474,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -534,7 +534,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -586,7 +586,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -643,7 +643,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -695,7 +695,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, true, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.TlsKeyfileVolumeMount(),
@ -755,7 +755,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.ClusterJWTVolumeMount(),
@ -816,7 +816,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, true, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
ImagePullPolicy: core.PullIfNotPresent,
SecurityContext: securityContext.NewSecurityContext(),
VolumeMounts: []core.VolumeMount{
@ -873,7 +873,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.RocksdbEncryptionVolumeMount(),
@ -935,7 +935,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.ClusterJWTVolumeMount(),
@ -1004,7 +1004,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.ClusterJWTVolumeMount(),
@ -1076,7 +1076,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.LifecycleVolumeMount(),
@ -1147,7 +1147,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.LifecycleVolumeMount(),
@ -1211,7 +1211,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupCoordinators),
ImagePullPolicy: core.PullIfNotPresent,
Resources: emptyResources,
SecurityContext: securityContext.NewSecurityContext(),
@ -1277,7 +1277,7 @@ func TestEnsurePod_ArangoDB_Core(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSingleMode(true, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSingle),
ImagePullPolicy: core.PullIfNotPresent,
SecurityContext: securityContext.NewSecurityContext(),
VolumeMounts: []core.VolumeMount{

View file

@ -75,7 +75,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
AgentArgsWithTLS(firstAgentStatus.ID, false),
ArgsWithAuth(false),
ArgsWithEncryptionKey()),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.RocksdbEncryptionVolumeMount(),
@ -154,7 +154,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, true, true, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Lifecycle: createTestLifecycle(api.ServerGroupAgents),
LivenessProbe: createTestLivenessProbe(httpProbe, false, "", shared.ArangoPort),
ImagePullPolicy: core.PullIfNotPresent,
@ -223,7 +223,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
AgentArgsWithTLS(firstAgentStatus.ID, false),
ArgsWithAuth(false),
ArgsWithEncryptionKey()),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.RocksdbEncryptionVolumeMount(),
@ -290,7 +290,7 @@ func TestEnsurePod_ArangoDB_Encryption(t *testing.T) {
"rocksdb.encryption-key-rotation": "true",
}
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.RocksdbEncryptionReadOnlyVolumeMount(),

View file

@ -72,7 +72,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -135,7 +135,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -194,7 +194,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupDBServers),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -257,7 +257,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupCoordinators),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -320,7 +320,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupCoordinators),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -382,7 +382,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSingle),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -447,7 +447,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSingle),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -512,7 +512,7 @@ func TestEnsurePod_ArangoDB_Features(t *testing.T) {
return args
}),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSingle),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),

View file

@ -84,7 +84,7 @@ func TestEnsurePod_ArangoDB_ImagePropagation(t *testing.T) {
Name: shared.ServerContainerName,
Image: imageID,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -136,7 +136,7 @@ func TestEnsurePod_ArangoDB_ImagePropagation(t *testing.T) {
Name: shared.ServerContainerName,
Image: imageID,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -188,7 +188,7 @@ func TestEnsurePod_ArangoDB_ImagePropagation(t *testing.T) {
Name: shared.ServerContainerName,
Image: image,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},

View file

@ -73,7 +73,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -133,7 +133,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -193,7 +193,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: func() []core.ContainerPort {
ports := createTestPorts()
ports := createTestPorts(api.ServerGroupAgents)
ports = append(ports, core.ContainerPort{
Name: "exporter",
@ -261,7 +261,7 @@ func TestEnsurePod_Metrics(t *testing.T) {
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: func() []core.ContainerPort {
ports := createTestPorts()
ports := createTestPorts(api.ServerGroupAgents)
ports = append(ports, core.ContainerPort{
Name: "exporter",

View file

@ -72,7 +72,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -129,7 +129,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -187,7 +187,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -238,7 +238,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -294,7 +294,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -345,7 +345,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -401,7 +401,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -459,7 +459,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: createTestImageForVersion("3.10.0"),
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false, addEarlyConnection),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -513,7 +513,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: createTestImageForVersion("3.9.2"),
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -567,7 +567,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: createTestImageForVersion("3.10.0"),
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -621,7 +621,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: createTestImageForVersion("3.10.0"),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},
@ -674,7 +674,7 @@ func TestEnsurePod_ArangoDB_Probe(t *testing.T) {
Name: shared.ServerContainerName,
Image: createTestImageForVersion("3.10.0"),
Command: createTestCommandForAgent(firstAgentStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
},

View file

@ -130,7 +130,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -188,7 +188,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: k8sutil.ExtractPodResourceRequirement(resourcesUnfiltered),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -245,7 +245,7 @@ func TestEnsurePod_ArangoDB_Resources(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),

View file

@ -250,7 +250,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, true, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -341,7 +341,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -446,7 +446,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://arangodb.xyz:8629"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -548,7 +548,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://arangodb.xyz:8629"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -650,7 +650,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://arangodb.xyz:8629"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -755,7 +755,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://127.0.0.1:8629"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -855,7 +855,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -962,7 +962,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://arangodb.xyz:8629", "https://arangodb1.xyz:8629", "https://arangodb2.xyz:8629"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -1079,7 +1079,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://arangodb.xyz:8629", "https://arangodb.xyz:8639", "https://arangodb.xyz:8649"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -1194,7 +1194,7 @@ func TestEnsurePod_Sync_Master(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncMaster(firstSyncMaster.ID, true, false, true, "https://127.0.0.1:8629", "https://arangodb.xyz:8629", "https://arangodb2.xyz:8629"),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncMasters),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),
@ -1302,7 +1302,7 @@ func TestEnsurePod_Sync_Worker(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForSyncWorker(firstSyncWorker.ID, true, true),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupSyncWorkers),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoSyncMonitoringToken,
testDeploymentName+"-sync-mt", constants.SecretKeyToken),

View file

@ -110,7 +110,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.TlsKeyfileVolumeMount(),
@ -185,7 +185,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
Name: shared.ServerContainerName,
Image: createTestImageForVersion("3.6.0"),
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.TlsKeyfileVolumeMount(),
@ -260,7 +260,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForCoordinator(firstCoordinatorStatus.ID, true, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.TlsKeyfileVolumeMount(),
@ -358,7 +358,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
fmt.Sprintf("--ssl.server-name-indication=d=%s/sni2/tls.keyfile", shared.TLSSNIKeyfileVolumeMountDir))
return args
}(),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.TlsKeyfileVolumeMount(),
@ -446,7 +446,7 @@ func TestEnsurePod_ArangoDB_TLS_SNI(t *testing.T) {
args := createTestCommandForDBServer(firstDBServerStatus.ID, true, false, false)
return args
}(),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
k8sutil.TlsKeyfileVolumeMount(),

View file

@ -91,7 +91,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -152,7 +152,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),
@ -214,7 +214,7 @@ func TestEnsurePod_ArangoDB_Volumes(t *testing.T) {
Name: shared.ServerContainerName,
Image: testImage,
Command: createTestCommandForDBServer(firstDBServerStatus.ID, false, false, false),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: emptyResources,
VolumeMounts: []core.VolumeMount{
k8sutil.ArangodVolumeMount(),

View file

@ -530,11 +530,20 @@ func createTestDeployment(t *testing.T, config Config, arangoDeployment *api.Ara
return d, eventRecorder
}
func createTestPorts() []core.ContainerPort {
func createTestPorts(group api.ServerGroup) []core.ContainerPort {
port := shared.ArangoPort
switch group {
case api.ServerGroupSyncMasters:
port = shared.ArangoSyncMasterPort
case api.ServerGroupSyncWorkers:
port = shared.ArangoSyncWorkerPort
}
return []core.ContainerPort{
{
Name: "server",
ContainerPort: 8529,
ContainerPort: int32(port),
Protocol: "TCP",
},
}

View file

@ -89,7 +89,7 @@ func TestEnsureImages(t *testing.T) {
Name: shared.ServerContainerName,
Image: testNewImage,
Command: createTestCommandForImageUpdatePod(),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: core.ResourceRequirements{
Limits: make(core.ResourceList),
Requests: make(core.ResourceList),
@ -148,7 +148,7 @@ func TestEnsureImages(t *testing.T) {
Name: shared.ServerContainerName,
Image: testNewImage,
Command: createTestCommandForImageUpdatePod(),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Env: []core.EnvVar{
k8sutil.CreateEnvSecretKeySelector(constants.EnvArangoLicenseKey,
testLicense, constants.SecretKeyToken),
@ -195,7 +195,7 @@ func TestEnsureImages(t *testing.T) {
Name: shared.ServerContainerName,
Image: testNewImage,
Command: createTestCommandForImageUpdatePod(),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: core.ResourceRequirements{
Limits: make(core.ResourceList),
Requests: make(core.ResourceList),
@ -251,7 +251,7 @@ func TestEnsureImages(t *testing.T) {
Name: shared.ServerContainerName,
Image: testNewImage,
Command: createTestCommandForImageUpdatePod(),
Ports: createTestPorts(),
Ports: createTestPorts(api.ServerGroupAgents),
Resources: core.ResourceRequirements{
Limits: make(core.ResourceList),
Requests: make(core.ResourceList),

View file

@ -155,6 +155,7 @@ func (i *inspectorState) RegisterInformers(k8s informers.SharedInformerFactory,
k8s.Policy().V1beta1().PodDisruptionBudgets().Informer().AddEventHandler(i.eventHandler(definitions.PodDisruptionBudget))
}
k8s.Core().V1().Pods().Informer().AddEventHandler(i.eventHandler(definitions.Pod))
k8s.Core().V1().Secrets().Informer().AddEventHandler(i.eventHandler(definitions.Secret))
k8s.Core().V1().Services().Informer().AddEventHandler(i.eventHandler(definitions.Service))
k8s.Core().V1().ServiceAccounts().Informer().AddEventHandler(i.eventHandler(definitions.ServiceAccount))

View file

@ -99,7 +99,7 @@ type ArangoVersionCheckContainer struct {
func (a *ArangoDContainer) GetPorts() []core.ContainerPort {
ports := []core.ContainerPort{
{
Name: shared.ServerContainerName,
Name: shared.ServerPortName,
ContainerPort: int32(shared.ArangoPort),
Protocol: core.ProtocolTCP,
},
@ -109,7 +109,7 @@ func (a *ArangoDContainer) GetPorts() []core.ContainerPort {
switch a.spec.Metrics.Mode.Get() {
case api.MetricsModeInternal:
ports = append(ports, core.ContainerPort{
Name: "exporter",
Name: shared.ExporterPortName,
ContainerPort: int32(shared.ArangoPort),
Protocol: core.ProtocolTCP,
})

View file

@ -89,10 +89,16 @@ func (a *ArangoSyncContainer) GetName() string {
}
func (a *ArangoSyncContainer) GetPorts() []core.ContainerPort {
port := shared.ArangoSyncMasterPort
if a.group == api.ServerGroupSyncWorkers {
port = shared.ArangoSyncWorkerPort
}
return []core.ContainerPort{
{
Name: shared.ServerContainerName,
ContainerPort: int32(shared.ArangoPort),
ContainerPort: int32(port),
Protocol: core.ProtocolTCP,
},
}

View file

@ -37,6 +37,7 @@ import (
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil"
inspectorInterface "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/kerrors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/patcher"
)
// EnsureLeader creates leader label on the pod's agency and creates service to it.
@ -119,19 +120,18 @@ func (r *Resources) EnsureLeader(ctx context.Context, cachedStatus inspectorInte
leaderAgentSvcName := k8sutil.CreateAgentLeaderServiceName(r.context.GetAPIObject().GetName())
deploymentName := r.context.GetAPIObject().GetName()
ports := []core.ServicePort{CreateServerServicePort(group)}
ports := []core.ServicePort{CreateServerServicePort()}
selector := k8sutil.LabelsForLeaderMember(deploymentName, group.AsRole(), leaderID)
if s, ok := cachedStatus.Service().V1().GetSimple(leaderAgentSvcName); ok {
if err, adjusted := r.adjustService(ctx, s, ports, selector); err == nil {
if !adjusted {
// The service is not changed, so single server leader can be set.
if c, err := patcher.ServicePatcher(ctx, cachedStatus.ServicesModInterface().V1(), s, meta.PatchOptions{}, patcher.PatchServiceSelector(selector), patcher.PatchServicePorts(ports)); err != nil {
return err
} else {
if !c {
return r.ensureSingleServerLeader(ctx, cachedStatus)
}
return errors.Reconcile()
} else {
return err
}
}

View file

@ -26,15 +26,12 @@ import (
"time"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
"github.com/arangodb/kube-arangodb/pkg/deployment/features"
"github.com/arangodb/kube-arangodb/pkg/deployment/patch"
"github.com/arangodb/kube-arangodb/pkg/metrics"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/globals"
@ -43,6 +40,7 @@ import (
v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/pod/v1"
servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/kerrors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/patcher"
)
var (
@ -71,39 +69,8 @@ func (r *Resources) createService(name, namespace string, owner meta.OwnerRefere
}
}
// adjustService checks whether service contains is valid and if not than it reconciles service.
// Returns true if service is adjusted.
func (r *Resources) adjustService(ctx context.Context, s *core.Service, ports []core.ServicePort,
selector map[string]string) (error, bool) {
services := r.context.ACS().CurrentClusterCache().ServicesModInterface().V1()
spec := s.Spec.DeepCopy()
spec.Type = core.ServiceTypeClusterIP
spec.Ports = ports
spec.PublishNotReadyAddresses = true
spec.Selector = selector
if equality.Semantic.DeepDerivative(*spec, s.Spec) {
// The service has not changed, so nothing should be changed.
return nil, false
}
s.Spec = *spec
err := globals.GetGlobalTimeouts().Kubernetes().RunWithTimeout(ctx, func(ctxChild context.Context) error {
_, err := services.Update(ctxChild, s, meta.UpdateOptions{})
return err
})
if err != nil {
return err, false
}
// The service has been changed.
return nil, true
}
// EnsureServices creates all services needed to service the deployment
func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorInterface.Inspector) error {
log := r.log.Str("section", "service")
start := time.Now()
apiObject := r.context.GetAPIObject()
@ -111,6 +78,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
deploymentName := apiObject.GetName()
owner := apiObject.AsOwner()
spec := r.context.GetSpec()
role := spec.Mode.Get().ServingGroup().AsRole()
defer metrics.SetDuration(inspectServicesDurationGauges.WithLabelValues(deploymentName), start)
counterMetric := inspectedServicesCounters.WithLabelValues(deploymentName)
@ -129,7 +97,7 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
return errors.Newf("Member %s not found", memberName)
}
ports := CreateServerServicePortsWithSidecars(podInspector, e.Member.PodName, e.Group)
ports := CreateServerServicePortsWithSidecars(podInspector, e.Member.Pod.GetName())
selector := k8sutil.LabelsForActiveMember(deploymentName, e.Group.AsRole(), e.Member.ID)
if s, ok := cachedStatus.Service().V1().GetSimple(member.GetName()); !ok {
s := r.createService(member.GetName(), member.GetNamespace(), member.AsOwner(), ports, selector)
@ -147,23 +115,27 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
reconcileRequired.Required()
continue
} else {
if err, adjusted := r.adjustService(ctx, s, ports, selector); err == nil {
if adjusted {
reconcileRequired.Required()
}
// Continue the loop.
} else {
if changed, err := patcher.ServicePatcher(ctx, svcs, s, meta.PatchOptions{},
patcher.PatchServicePorts(ports),
patcher.PatchServiceSelector(selector),
patcher.PatchServicePublishNotReadyAddresses(true),
patcher.PatchServiceType(core.ServiceTypeClusterIP)); err != nil {
return err
} else if changed {
reconcileRequired.Required()
}
}
}
// Headless service
counterMetric.Inc()
if _, exists := cachedStatus.Service().V1().GetSimple(k8sutil.CreateHeadlessServiceName(deploymentName)); !exists {
headlessPorts, headlessSelector := k8sutil.HeadlessServiceDetails(deploymentName, role)
if s, exists := cachedStatus.Service().V1().GetSimple(k8sutil.CreateHeadlessServiceName(deploymentName)); !exists {
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
defer cancel()
svcName, newlyCreated, err := k8sutil.CreateHeadlessService(ctxChild, svcs, apiObject, owner)
svcName, newlyCreated, err := k8sutil.CreateHeadlessService(ctxChild, svcs, apiObject, headlessPorts, headlessSelector, owner)
if err != nil {
log.Err(err).Debug("Failed to create headless service")
return errors.WithStack(err)
@ -171,6 +143,13 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
if newlyCreated {
log.Str("service", svcName).Debug("Created headless service")
}
} else {
if changed, err := patcher.ServicePatcher(ctx, svcs, s, meta.PatchOptions{}, patcher.PatchServicePorts(headlessPorts), patcher.PatchServiceSelector(headlessSelector)); err != nil {
log.Err(err).Debug("Failed to patch headless service")
return errors.WithStack(err)
} else if changed {
log.Str("service", s.GetName()).Debug("Updated headless service")
}
}
// Internal database client service
@ -181,10 +160,13 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
}
}
counterMetric.Inc()
if _, exists := cachedStatus.Service().V1().GetSimple(k8sutil.CreateDatabaseClientServiceName(deploymentName)); !exists {
clientServicePorts, clientServiceSelectors := k8sutil.DatabaseClientDetails(deploymentName, role, withLeader)
if s, exists := cachedStatus.Service().V1().GetSimple(k8sutil.CreateDatabaseClientServiceName(deploymentName)); !exists {
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
defer cancel()
svcName, newlyCreated, err := k8sutil.CreateDatabaseClientService(ctxChild, svcs, apiObject, single, withLeader, owner)
svcName, newlyCreated, err := k8sutil.CreateDatabaseClientService(ctxChild, svcs, apiObject, clientServicePorts, clientServiceSelectors, owner)
if err != nil {
log.Err(err).Debug("Failed to create database client service")
return errors.WithStack(err)
@ -201,14 +183,17 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
}
}
}
} else {
if changed, err := patcher.ServicePatcher(ctx, svcs, s, meta.PatchOptions{}, patcher.PatchServicePorts(clientServicePorts), patcher.PatchServiceSelector(clientServiceSelectors)); err != nil {
log.Err(err).Debug("Failed to patch database client service")
return errors.WithStack(err)
} else if changed {
log.Str("service", s.GetName()).Debug("Updated database client service")
}
}
// Database external access service
eaServiceName := k8sutil.CreateDatabaseExternalAccessServiceName(deploymentName)
role := "coordinator"
if single {
role = "single"
}
if err := r.ensureExternalAccessServices(ctx, cachedStatus, svcs, eaServiceName, role, shared.ArangoPort,
false, withLeader, spec.ExternalAccess, apiObject); err != nil {
return errors.WithStack(err)
@ -234,7 +219,10 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
if spec.Metrics.IsEnabled() {
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
defer cancel()
name, _, err := k8sutil.CreateExporterService(ctxChild, cachedStatus, svcs, apiObject, apiObject.AsOwner())
ports, selectors := k8sutil.ExporterServiceDetails(deploymentName)
name, _, err := k8sutil.CreateExporterService(ctxChild, cachedStatus, apiObject, ports, selectors, apiObject.AsOwner())
if err != nil {
log.Err(err).Debug("Failed to create %s exporter service", name)
return errors.WithStack(err)
@ -253,16 +241,17 @@ func (r *Resources) EnsureServices(ctx context.Context, cachedStatus inspectorIn
// ensureExternalAccessServices ensures all services needed for a deployment.
func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStatus inspectorInterface.Inspector,
svcs servicev1.ModInterface, eaServiceName, svcRole string, port int, noneIsClusterIP bool, withLeader bool,
svcs servicev1.ModInterface, eaServiceName, role string, port int, noneIsClusterIP bool, withLeader bool,
spec api.ExternalAccessSpec, apiObject k8sutil.APIObject) error {
eaPorts, eaSelector := k8sutil.ExternalAccessDetails(port, spec.GetNodePort(), apiObject.GetName(), role, withLeader)
if spec.GetType().IsManaged() {
// Managed services should not be created or removed by the operator.
return r.ensureExternalAccessManagedServices(ctx, cachedStatus, svcs, eaServiceName, svcRole, spec, apiObject,
withLeader)
return r.ensureExternalAccessManagedServices(ctx, cachedStatus, eaServiceName, eaPorts, eaSelector, spec)
}
log := r.log.Str("section", "service-ea").Str("role", svcRole).Str("service", eaServiceName)
log := r.log.Str("section", "service-ea").Str("role", role).Str("service", eaServiceName)
createExternalAccessService := false
deleteExternalAccessService := false
eaServiceType := spec.GetType().AsServiceType() // Note: Type auto defaults to ServiceTypeLoadBalancer
@ -326,6 +315,14 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat
return errors.WithStack(err)
}
}
if !createExternalAccessService && !deleteExternalAccessService {
if changed, err := patcher.ServicePatcher(ctx, svcs, existing, meta.PatchOptions{}, patcher.PatchServicePorts(eaPorts), patcher.PatchServiceSelector(eaSelector)); err != nil {
log.Err(err).Debug("Failed to patch database client service")
return errors.WithStack(err)
} else if changed {
log.Str("service", existing.GetName()).Debug("Updated database client service")
}
}
} else {
// External access service does not exist
if !spec.GetType().IsNone() || noneIsClusterIP {
@ -345,13 +342,11 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat
}
if createExternalAccessService {
// Let's create or update the database external access service
nodePort := spec.GetNodePort()
loadBalancerIP := spec.GetLoadBalancerIP()
loadBalancerSourceRanges := spec.LoadBalancerSourceRanges
ctxChild, cancel := globals.GetGlobalTimeouts().Kubernetes().WithTimeout(ctx)
defer cancel()
_, newlyCreated, err := k8sutil.CreateExternalAccessService(ctxChild, svcs, eaServiceName, svcRole, apiObject,
eaServiceType, port, nodePort, loadBalancerIP, loadBalancerSourceRanges, apiObject.AsOwner(), withLeader)
_, newlyCreated, err := k8sutil.CreateExternalAccessService(ctxChild, svcs, eaServiceName, eaServiceType, eaPorts, eaSelector, loadBalancerIP, loadBalancerSourceRanges, apiObject.AsOwner())
if err != nil {
log.Err(err).Debug("Failed to create external access service")
return errors.WithStack(err)
@ -365,18 +360,16 @@ func (r *Resources) ensureExternalAccessServices(ctx context.Context, cachedStat
// ensureExternalAccessServices ensures if there are correct selectors on a managed services.
// If hardcoded external service names are not on the list of managed services then it will be checked additionally.
func (r *Resources) ensureExternalAccessManagedServices(ctx context.Context, cachedStatus inspectorInterface.Inspector,
services servicev1.ModInterface, eaServiceName, svcRole string, spec api.ExternalAccessSpec,
apiObject k8sutil.APIObject, withLeader bool) error {
func (r *Resources) ensureExternalAccessManagedServices(ctx context.Context, cachedStatus inspectorInterface.Inspector, eaServiceName string,
ports []core.ServicePort, selectors map[string]string, spec api.ExternalAccessSpec) error {
log := r.log.Str("section", "service-ea").Str("role", svcRole).Str("service", eaServiceName)
log := r.log.Str("section", "service-ea").Str("service", eaServiceName)
managedServiceNames := spec.GetManagedServiceNames()
deploymentName := apiObject.GetName()
var selector map[string]string
if withLeader {
selector = k8sutil.LabelsForLeaderMember(deploymentName, svcRole, "")
} else {
selector = k8sutil.LabelsForDeployment(deploymentName, svcRole)
apply := func(svc *core.Service) (bool, error) {
return patcher.ServicePatcher(ctx, cachedStatus.ServicesModInterface().V1(), svc, meta.PatchOptions{},
patcher.PatchServicePorts(ports),
patcher.PatchServiceSelector(selectors))
}
// Check if hardcoded service has correct selector.
@ -386,7 +379,7 @@ func (r *Resources) ensureExternalAccessManagedServices(ctx context.Context, cac
log.Warn("the field \"spec.externalAccess.managedServiceNames\" should be provided for \"managed\" service type")
return nil
}
} else if changed, err := ensureManagedServiceSelector(ctx, selector, svc, services); err != nil {
} else if changed, err := apply(svc); err != nil {
return errors.WithMessage(err, "failed to ensure service selector")
} else if changed {
log.Info("selector applied to the managed service \"%s\"", svc.GetName())
@ -404,7 +397,7 @@ func (r *Resources) ensureExternalAccessManagedServices(ctx context.Context, cac
continue
}
if changed, err := ensureManagedServiceSelector(ctx, selector, svc, services); err != nil {
if changed, err := apply(svc); err != nil {
return errors.WithMessage(err, "failed to ensure service selector")
} else if changed {
log.Info("selector applied to the managed service \"%s\"", svcName)
@ -414,35 +407,10 @@ func (r *Resources) ensureExternalAccessManagedServices(ctx context.Context, cac
return nil
}
// ensureManagedServiceSelector ensures if there is correct selector on a service.
func ensureManagedServiceSelector(ctx context.Context, selector map[string]string, svc *core.Service,
services servicev1.ModInterface) (bool, error) {
for key, value := range selector {
if currentValue, ok := svc.Spec.Selector[key]; ok && value == currentValue {
continue
}
p := patch.NewPatch()
p.ItemReplace(patch.NewPath("spec", "selector"), selector)
data, err := p.Marshal()
if err != nil {
return false, errors.WithMessage(err, "failed to marshal service selector")
}
if _, err = services.Patch(ctx, svc.GetName(), types.JSONPatchType, data, meta.PatchOptions{}); err != nil {
return false, errors.WithMessage(err, "failed to patch service selector")
}
return true, nil
}
return false, nil
}
// CreateServerServicePortsWithSidecars returns ports for the service.
func CreateServerServicePortsWithSidecars(podInspector v1.Inspector, podName string, group api.ServerGroup) []core.ServicePort {
func CreateServerServicePortsWithSidecars(podInspector v1.Inspector, podName string) []core.ServicePort {
// Create service port for the `server` container.
ports := []core.ServicePort{CreateServerServicePort(group)}
ports := []core.ServicePort{CreateServerServicePort()}
if podInspector == nil {
return ports
@ -456,9 +424,10 @@ func CreateServerServicePortsWithSidecars(podInspector v1.Inspector, podName str
}
for _, port := range c.Ports {
ports = append(ports, core.ServicePort{
Name: port.Name,
Protocol: core.ProtocolTCP,
Port: port.ContainerPort,
Name: port.Name,
Protocol: core.ProtocolTCP,
Port: port.ContainerPort,
TargetPort: intstr.FromString(port.Name),
})
}
}
@ -468,27 +437,11 @@ func CreateServerServicePortsWithSidecars(podInspector v1.Inspector, podName str
}
// CreateServerServicePort creates main server service port.
func CreateServerServicePort(group api.ServerGroup) core.ServicePort {
serverTargetPort := getTargetPort(group)
func CreateServerServicePort() core.ServicePort {
return core.ServicePort{
Name: api.ServerGroupReservedContainerNameServer,
Protocol: core.ProtocolTCP,
Port: shared.ArangoPort,
TargetPort: intstr.IntOrString{
IntVal: serverTargetPort,
},
Name: shared.ServerPortName,
Protocol: core.ProtocolTCP,
Port: shared.ArangoPort,
TargetPort: intstr.FromString(shared.ServerPortName),
}
}
// getTargetPort returns target port for the given server group.
func getTargetPort(group api.ServerGroup) int32 {
if group == api.ServerGroupSyncMasters {
return shared.ArangoSyncMasterPort
}
if group == api.ServerGroupSyncWorkers {
return shared.ArangoSyncWorkerPort
}
return shared.ArangoPort
}

View file

@ -0,0 +1,120 @@
//
// DISCLAIMER
//
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package patcher
import (
"context"
core "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/equality"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"github.com/arangodb/kube-arangodb/pkg/deployment/patch"
"github.com/arangodb/kube-arangodb/pkg/util/globals"
v1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1"
)
type ServicePatch func(in *core.Service) []patch.Item
func ServicePatcher(ctx context.Context, client v1.ModInterface, in *core.Service, opts meta.PatchOptions, functions ...ServicePatch) (bool, error) {
if in == nil {
return false, nil
}
if in.GetName() == "" {
return false, nil
}
var items []patch.Item
for id := range functions {
if f := functions[id]; f != nil {
items = append(items, f(in)...)
}
}
if len(items) == 0 {
return false, nil
}
data, err := patch.NewPatch(items...).Marshal()
if err != nil {
return false, err
}
nctx, c := globals.GetGlobals().Timeouts().Kubernetes().WithTimeout(ctx)
defer c()
if _, err := client.Patch(nctx, in.GetName(), types.JSONPatchType, data, opts); err != nil {
return false, err
}
return true, nil
}
func PatchServicePorts(ports []core.ServicePort) ServicePatch {
return func(in *core.Service) []patch.Item {
if len(ports) == len(in.Spec.Ports) && equality.Semantic.DeepDerivative(ports, in.Spec.Ports) {
return nil
}
return []patch.Item{
patch.ItemReplace(patch.NewPath("spec", "ports"), ports),
}
}
}
func PatchServiceSelector(selector map[string]string) ServicePatch {
return func(in *core.Service) []patch.Item {
if equality.Semantic.DeepEqual(in.Spec.Selector, selector) {
return nil
}
return []patch.Item{
patch.ItemReplace(patch.NewPath("spec", "selector"), selector),
}
}
}
func PatchServiceType(t core.ServiceType) ServicePatch {
return func(in *core.Service) []patch.Item {
if in.Spec.Type == t {
return nil
}
return []patch.Item{
patch.ItemReplace(patch.NewPath("spec", "type"), t),
}
}
}
func PatchServicePublishNotReadyAddresses(publishNotReadyAddresses bool) ServicePatch {
return func(in *core.Service) []patch.Item {
if in.Spec.PublishNotReadyAddresses == publishNotReadyAddresses {
return nil
}
return []patch.Item{
patch.ItemReplace(patch.NewPath("spec", "publishNotReadyAddresses"), publishNotReadyAddresses),
}
}
}

View file

@ -0,0 +1,69 @@
//
// DISCLAIMER
//
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package patcher
import (
"testing"
"github.com/stretchr/testify/require"
core "k8s.io/api/core/v1"
)
func Test_Service_Ports(t *testing.T) {
t.Run("Equal", func(t *testing.T) {
q := PatchServicePorts([]core.ServicePort{
{
Name: "test",
},
})(&core.Service{
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: "test",
},
},
},
})
require.Len(t, q, 0)
})
t.Run("Missing", func(t *testing.T) {
q := PatchServicePorts([]core.ServicePort{
{
Name: "test",
},
{
Name: "exporter",
},
})(&core.Service{
Spec: core.ServiceSpec{
Ports: []core.ServicePort{
{
Name: "test",
},
},
},
})
require.Len(t, q, 1)
})
}

View file

@ -0,0 +1,98 @@
//
// DISCLAIMER
//
// Copyright 2016-2022 ArangoDB GmbH, Cologne, Germany
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// Copyright holder is ArangoDB GmbH, Cologne, Germany
//
package patcher
import (
"context"
"testing"
"github.com/stretchr/testify/require"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"github.com/arangodb/kube-arangodb/pkg/util/tests"
)
func Test_Service(t *testing.T) {
c := tests.NewEmptyInspector(t)
t.Run("Create", func(t *testing.T) {
require.NoError(t, c.Refresh(context.Background()))
_, err := c.ServicesModInterface().V1().Create(context.Background(), &core.Service{
ObjectMeta: meta.ObjectMeta{
Name: "test",
Namespace: c.Namespace(),
},
}, meta.CreateOptions{})
require.NoError(t, err)
})
t.Run("publishNotReadyAddresses", func(t *testing.T) {
t.Run("Set to true", func(t *testing.T) {
require.NoError(t, c.Refresh(context.Background()))
svc, ok := c.Service().V1().GetSimple("test")
require.True(t, ok)
require.False(t, svc.Spec.PublishNotReadyAddresses)
changed, err := ServicePatcher(context.Background(), c.ServicesModInterface().V1(), svc, meta.PatchOptions{}, PatchServicePublishNotReadyAddresses(true))
require.NoError(t, err)
require.True(t, changed)
require.NoError(t, c.Refresh(context.Background()))
svc, ok = c.Service().V1().GetSimple("test")
require.True(t, ok)
require.True(t, svc.Spec.PublishNotReadyAddresses)
})
t.Run("Reset to true", func(t *testing.T) {
require.NoError(t, c.Refresh(context.Background()))
svc, ok := c.Service().V1().GetSimple("test")
require.True(t, ok)
require.True(t, svc.Spec.PublishNotReadyAddresses)
changed, err := ServicePatcher(context.Background(), c.ServicesModInterface().V1(), svc, meta.PatchOptions{}, PatchServicePublishNotReadyAddresses(true))
require.NoError(t, err)
require.False(t, changed)
require.NoError(t, c.Refresh(context.Background()))
svc, ok = c.Service().V1().GetSimple("test")
require.True(t, ok)
require.True(t, svc.Spec.PublishNotReadyAddresses)
})
t.Run("Set to false", func(t *testing.T) {
require.NoError(t, c.Refresh(context.Background()))
svc, ok := c.Service().V1().GetSimple("test")
require.True(t, ok)
require.True(t, svc.Spec.PublishNotReadyAddresses)
changed, err := ServicePatcher(context.Background(), c.ServicesModInterface().V1(), svc, meta.PatchOptions{}, PatchServicePublishNotReadyAddresses(false))
require.NoError(t, err)
require.True(t, changed)
require.NoError(t, c.Refresh(context.Background()))
svc, ok = c.Service().V1().GetSimple("test")
require.True(t, ok)
require.False(t, svc.Spec.PublishNotReadyAddresses)
})
})
}

View file

@ -29,13 +29,14 @@ import (
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/intstr"
api "github.com/arangodb/kube-arangodb/pkg/apis/deployment/v1"
"github.com/arangodb/kube-arangodb/pkg/apis/shared"
"github.com/arangodb/kube-arangodb/pkg/util/errors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector"
servicev1 "github.com/arangodb/kube-arangodb/pkg/util/k8sutil/inspector/service/v1"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/kerrors"
"github.com/arangodb/kube-arangodb/pkg/util/k8sutil/patcher"
)
// CreateHeadlessServiceName returns the name of the headless service for the given
@ -74,36 +75,33 @@ func CreateAgentLeaderServiceName(deploymentName string) string {
}
// CreateExporterService
func CreateExporterService(ctx context.Context, cachedStatus service.Inspector, svcs servicev1.ModInterface,
deployment meta.Object, owner meta.OwnerReference) (string, bool, error) {
func CreateExporterService(ctx context.Context, cachedStatus inspector.Inspector,
deployment meta.Object, ports []core.ServicePort, selectors map[string]string, owner meta.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
svcName := CreateExporterClientServiceName(deploymentName)
selectorLabels := LabelsForExporterServiceSelector(deploymentName)
if _, exists := cachedStatus.Service().V1().GetSimple(svcName); exists {
return svcName, false, nil
if svc, exists := cachedStatus.Service().V1().GetSimple(svcName); exists {
if changed, err := patcher.ServicePatcher(ctx, cachedStatus.ServicesModInterface().V1(), svc, meta.PatchOptions{}, patcher.PatchServiceSelector(selectors), patcher.PatchServicePorts(ports)); err != nil {
return "", false, err
} else {
return svcName, changed, nil
}
}
svc := &core.Service{
ObjectMeta: meta.ObjectMeta{
Name: svcName,
Labels: LabelsForExporterService(deploymentName),
Labels: selectors,
},
Spec: core.ServiceSpec{
ClusterIP: core.ClusterIPNone,
Ports: []core.ServicePort{
{
Name: "exporter",
Protocol: core.ProtocolTCP,
Port: shared.ArangoExporterPort,
},
},
Selector: selectorLabels,
Ports: ports,
Selector: selectors,
},
}
AddOwnerRefToObject(svc.GetObjectMeta(), &owner)
if _, err := svcs.Create(ctx, svc, meta.CreateOptions{}); kerrors.IsAlreadyExists(err) {
if _, err := cachedStatus.ServicesModInterface().V1().Create(ctx, svc, meta.CreateOptions{}); kerrors.IsAlreadyExists(err) {
return svcName, false, nil
} else if err != nil {
return svcName, false, errors.WithStack(err)
@ -111,110 +109,137 @@ func CreateExporterService(ctx context.Context, cachedStatus service.Inspector,
return svcName, true, nil
}
func ExporterServiceDetails(deploymentName string) ([]core.ServicePort, map[string]string) {
return []core.ServicePort{
{
Name: shared.ExporterPortName,
Protocol: core.ProtocolTCP,
Port: shared.ArangoExporterPort,
TargetPort: intstr.FromString(shared.ExporterPortName),
},
}, LabelsForExporterServiceSelector(deploymentName)
}
// CreateHeadlessService prepares and creates a headless service in k8s, used to provide a stable
// DNS name for all pods.
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func CreateHeadlessService(ctx context.Context, svcs servicev1.ModInterface, deployment meta.Object,
ports []core.ServicePort, selectors map[string]string,
owner meta.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
svcName := CreateHeadlessServiceName(deploymentName)
ports := []core.ServicePort{
{
Name: "server",
Protocol: core.ProtocolTCP,
Port: shared.ArangoPort,
},
}
publishNotReadyAddresses := true
serviceType := core.ServiceTypeClusterIP
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, shared.ClusterIPNone, "", serviceType, ports,
"", nil, publishNotReadyAddresses, false, owner)
newlyCreated, err := createService(ctx, svcs, svcName, shared.ClusterIPNone, core.ServiceTypeClusterIP, ports,
selectors, "", nil, true, owner)
if err != nil {
return "", false, errors.WithStack(err)
}
return svcName, newlyCreated, nil
}
func HeadlessServiceDetails(deploymentName string, role string) ([]core.ServicePort, map[string]string) {
ports := []core.ServicePort{
{
Name: shared.ServerPortName,
Protocol: core.ProtocolTCP,
Port: shared.ArangoPort,
TargetPort: intstr.FromString(shared.ServerPortName),
},
}
labels := LabelsForDeployment(deploymentName, role)
return ports, labels
}
// CreateDatabaseClientService prepares and creates a service in k8s, used by database clients within the k8s cluster.
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func CreateDatabaseClientService(ctx context.Context, svcs servicev1.ModInterface, deployment meta.Object,
single, withLeader bool, owner meta.OwnerReference) (string, bool, error) {
ports []core.ServicePort, selectors map[string]string, owner meta.OwnerReference) (string, bool, error) {
deploymentName := deployment.GetName()
svcName := CreateDatabaseClientServiceName(deploymentName)
ports := []core.ServicePort{
{
Name: api.ServerGroupReservedContainerNameServer,
Protocol: core.ProtocolTCP,
Port: shared.ArangoPort,
},
}
var role string
if single {
role = "single"
} else {
role = "coordinator"
}
serviceType := core.ServiceTypeClusterIP
publishNotReadyAddresses := false
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, "", role, serviceType, ports, "", nil,
publishNotReadyAddresses, withLeader, owner)
newlyCreated, err := createService(ctx, svcs, svcName, "", core.ServiceTypeClusterIP, ports, selectors, "", nil,
false, owner)
if err != nil {
return "", false, errors.WithStack(err)
}
return svcName, newlyCreated, nil
}
func DatabaseClientDetails(deploymentName string, role string, withLeader bool) ([]core.ServicePort, map[string]string) {
ports := []core.ServicePort{
{
Name: shared.ServerPortName,
Protocol: core.ProtocolTCP,
Port: shared.ArangoPort,
TargetPort: intstr.FromString(shared.ServerPortName),
},
}
labels := LabelsForDeployment(deploymentName, role)
if withLeader {
labels[LabelKeyArangoLeader] = "true"
}
return ports, labels
}
// CreateExternalAccessService prepares and creates a service in k8s, used to access the database/sync from outside k8s cluster.
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func CreateExternalAccessService(ctx context.Context, svcs servicev1.ModInterface, svcName, role string,
deployment meta.Object, serviceType core.ServiceType, port, nodePort int, loadBalancerIP string,
loadBalancerSourceRanges []string, owner meta.OwnerReference, withLeader bool) (string, bool, error) {
deploymentName := deployment.GetName()
ports := []core.ServicePort{
{
Name: "server",
Protocol: core.ProtocolTCP,
Port: int32(port),
NodePort: int32(nodePort),
},
}
publishNotReadyAddresses := false
newlyCreated, err := createService(ctx, svcs, svcName, deploymentName, "", role, serviceType, ports, loadBalancerIP,
loadBalancerSourceRanges, publishNotReadyAddresses, withLeader, owner)
func CreateExternalAccessService(ctx context.Context, svcs servicev1.ModInterface, svcName string, serviceType core.ServiceType,
ports []core.ServicePort, selectors map[string]string, loadBalancerIP string,
loadBalancerSourceRanges []string, owner meta.OwnerReference) (string, bool, error) {
newlyCreated, err := createService(ctx, svcs, svcName, "", serviceType, ports, selectors, loadBalancerIP,
loadBalancerSourceRanges, false, owner)
if err != nil {
return "", false, errors.WithStack(err)
}
return svcName, newlyCreated, nil
}
// createService prepares and creates a service in k8s.
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func createService(ctx context.Context, svcs servicev1.ModInterface, svcName, deploymentName, clusterIP, role string,
serviceType core.ServiceType, ports []core.ServicePort, loadBalancerIP string, loadBalancerSourceRanges []string,
publishNotReadyAddresses, withLeader bool, owner meta.OwnerReference) (bool, error) {
func ExternalAccessDetails(port, nodePort int, deploymentName, role string, withLeader bool) ([]core.ServicePort, map[string]string) {
ports := []core.ServicePort{
{
Name: shared.ServerPortName,
Protocol: core.ProtocolTCP,
Port: int32(port),
NodePort: int32(nodePort),
TargetPort: intstr.FromString(shared.ServerPortName),
},
}
labels := LabelsForDeployment(deploymentName, role)
if withLeader {
labels[LabelKeyArangoLeader] = "true"
}
return ports, labels
}
// createService prepares and creates a service in k8s.
// If the service already exists, nil is returned.
// If another error occurs, that error is returned.
// The returned bool is true if the service is created, or false when the service already existed.
func createService(ctx context.Context, svcs servicev1.ModInterface, svcName, clusterIP string,
serviceType core.ServiceType, ports []core.ServicePort, selectors map[string]string, loadBalancerIP string, loadBalancerSourceRanges []string,
publishNotReadyAddresses bool, owner meta.OwnerReference) (bool, error) {
svc := &core.Service{
ObjectMeta: meta.ObjectMeta{
Name: svcName,
Labels: labels,
Labels: selectors,
Annotations: map[string]string{},
},
Spec: core.ServiceSpec{
Type: serviceType,
Ports: ports,
Selector: labels,
Selector: selectors,
ClusterIP: clusterIP,
PublishNotReadyAddresses: publishNotReadyAddresses,
LoadBalancerIP: loadBalancerIP,