1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-21 11:48:53 +00:00

alertmanager/statefulset: move gossip port to 9094

Alertmanager default port for clustering is now 9094 over both tcp
and udp. This patch brings that change to the operator.

Fixes 
This commit is contained in:
Mickael Carl 2019-06-14 19:37:26 +02:00
parent ca400fdc3e
commit 23e9cff807
4 changed files with 39 additions and 23 deletions
Documentation
example/networkpolicies
pkg/alertmanager

View file

@ -5,7 +5,7 @@
# Network policies
[Network policies](https://kubernetes.io/docs/user-guide/networkpolicies/) allow you easily restrict the ingress traffic between pods using [k8s labels](https://kubernetes.io/docs/user-guide/labels/).
[Network policies](https://kubernetes.io/docs/user-guide/networkpolicies/) allow you easily restrict the ingress traffic between pods using [k8s labels](https://kubernetes.io/docs/user-guide/labels/).
To keep your cluster safer, it's strongly recommended to enable network policies into prometheus namespace.
# Example
@ -16,14 +16,14 @@ This example will close all inbound communication on the namespace monitoring, a
First, follow the instructions to [add Calico to an existing Kubernetes cluster](http://docs.projectcalico.org/v1.5/getting-started/kubernetes/installation/).
Next, use the following configuration to deny all the ingress (inbound) traffic.
```yaml
```yaml
apiVersion: networking.k8s.io/v1
kind: NetworkPolicy
metadata:
name: default-deny-all
namespace: monitoring
spec:
podSelector:
podSelector:
matchLabels:
```
Save the config file as default-deny-all.yaml and apply the configuration to the cluster using
@ -35,7 +35,7 @@ kubectl apply -f <path to config file>/default-deny-all.yaml
Apply the following network policies to allow the necessary traffic to access ports in the pod:
```
$ kubectl apply -n monitoring -f example/networkpolicies/
$ kubectl apply -n monitoring -f example/networkpolicies/
networkpolicy "alertmanager-web" configured
networkpolicy "alertmanager-mesh" configured
@ -48,9 +48,9 @@ networkpolicy "prometheus" configured
#### Alertmanager
* Allow inbound tcp dst port 9093 from any source to alertmanager
* Allow inbound tcp dst port 6783 from only alertmanager to alertmanager
* Allow inbound tcp dst port 9093 from any source to alertmanager
* Allow inbound tcp & udp dst port 9094 from only alertmanager to alertmanager
[embedmd]:# (../example/networkpolicies/alertmanager.yaml)
```yaml
apiVersion: networking.k8s.io/v1
@ -86,8 +86,10 @@ spec:
values:
- main
ports:
- port: 6783
- port: 9094
protocol: TCP
- port: 9094
protocol: UDP
podSelector:
matchLabels:
alertmanager: main
@ -96,7 +98,7 @@ spec:
#### Grafana
* Allow inbound tcp dst port 3000 from any source to grafana
* Allow inbound tcp dst port 3000 from any source to grafana
[embedmd]:# (../example/networkpolicies/grafana.yaml)
```yaml
@ -116,7 +118,7 @@ spec:
#### Prometheus
* Allow inbound tcp dst port 9090 from any source to prometheus
* Allow inbound tcp dst port 9090 from any source to prometheus
[embedmd]:# (../example/networkpolicies/prometheus.yaml)
```yaml
@ -137,7 +139,7 @@ spec:
#### Node-exporter
* Allow inbound tcp dst port 9100 from only prometheus to node-exporter
* Allow inbound tcp dst port 9100 from only prometheus to node-exporter
[embedmd]:# (../example/networkpolicies/node-exporter.yaml)
```yaml
@ -168,7 +170,7 @@ spec:
#### Kube-state-metrics
* Allow inbound tcp dst port 8080 from only prometheus to kube-state-metrics
* Allow inbound tcp dst port 8080 from only prometheus to kube-state-metrics
[embedmd]:# (../example/networkpolicies/kube-state-metrics.yaml)
```yaml

View file

@ -31,9 +31,12 @@ spec:
values:
- main
ports:
- port: 6783
- port: 9094
protocol: TCP
- port: 9094
protocol: UDP
podSelector:
matchLabels:
alertmanager: main
app: alertmanager
app: alertmanager

View file

@ -167,11 +167,17 @@ func makeStatefulSetService(p *monitoringv1.Alertmanager, config Config) *v1.Ser
Protocol: v1.ProtocolTCP,
},
{
Name: "mesh",
Port: 6783,
TargetPort: intstr.FromInt(6783),
Name: "mesh-tcp",
Port: 9094,
TargetPort: intstr.FromInt(9094),
Protocol: v1.ProtocolTCP,
},
{
Name: "mesh-udp",
Port: 9094,
TargetPort: intstr.FromInt(9094),
Protocol: v1.ProtocolUDP,
},
},
Selector: map[string]string{
"app": "alertmanager",
@ -210,7 +216,7 @@ func makeStatefulSetSpec(a *monitoringv1.Alertmanager, config Config) (*appsv1.S
amArgs := []string{
fmt.Sprintf("--config.file=%s", alertmanagerConfFile),
fmt.Sprintf("--cluster.listen-address=[$(POD_IP)]:%d", 6783),
fmt.Sprintf("--cluster.listen-address=[$(POD_IP)]:%d", 9094),
fmt.Sprintf("--storage.path=%s", alertmanagerStorageDir),
fmt.Sprintf("--data.retention=%s", a.Spec.Retention),
}
@ -297,7 +303,7 @@ func makeStatefulSetSpec(a *monitoringv1.Alertmanager, config Config) (*appsv1.S
podLabels["alertmanager"] = a.Name
for i := int32(0); i < *a.Spec.Replicas; i++ {
amArgs = append(amArgs, fmt.Sprintf("--cluster.peer=%s-%d.%s.%s.svc:6783", prefixedName(a.Name), i, governingServiceName, a.Namespace))
amArgs = append(amArgs, fmt.Sprintf("--cluster.peer=%s-%d.%s.%s.svc:9094", prefixedName(a.Name), i, governingServiceName, a.Namespace))
}
for _, peer := range a.Spec.AdditionalPeers {
@ -306,10 +312,15 @@ func makeStatefulSetSpec(a *monitoringv1.Alertmanager, config Config) (*appsv1.S
ports := []v1.ContainerPort{
{
Name: "mesh",
ContainerPort: 6783,
Name: "mesh-tcp",
ContainerPort: 9094,
Protocol: v1.ProtocolTCP,
},
{
Name: "mesh-udp",
ContainerPort: 9094,
Protocol: v1.ProtocolUDP,
},
}
if !a.Spec.ListenLocal {
ports = append([]v1.ContainerPort{
@ -336,7 +347,7 @@ func makeStatefulSetSpec(a *monitoringv1.Alertmanager, config Config) (*appsv1.S
for i := range amArgs {
// below Alertmanager v0.15.0 peer address port specification is not necessary
if strings.Contains(amArgs[i], "--cluster.peer") {
amArgs[i] = strings.TrimSuffix(amArgs[i], ":6783")
amArgs[i] = strings.TrimSuffix(amArgs[i], ":9094")
}
// below Alertmanager v0.15.0 high availability flags are prefixed with 'mesh' instead of 'cluster'

View file

@ -211,7 +211,7 @@ func TestListenLocal(t *testing.T) {
t.Fatal("Alertmanager readiness probe expected to be empty")
}
if len(sset.Spec.Template.Spec.Containers[0].Ports) != 1 {
if len(sset.Spec.Template.Spec.Containers[0].Ports) != 2 {
t.Fatal("Alertmanager container should only have one port defined")
}
}