mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-15 16:56:24 +00:00
Add additionalLabels
field to TopologySpreadConstraint (#5967)
--------- Co-authored-by: Simon Pasquier <spasquie@redhat.com>
This commit is contained in:
parent
3014870986
commit
0d91832394
21 changed files with 1161 additions and 36 deletions
454
Documentation/api.md
generated
454
Documentation/api.md
generated
|
@ -1956,8 +1956,8 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>topologySpreadConstraints</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
|
||||
[]Kubernetes core/v1.TopologySpreadConstraint
|
||||
<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
|
||||
[]TopologySpreadConstraint
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
|
@ -4095,6 +4095,28 @@ in clear-text. Prefer using <code>authorization</code>.</em></p>
|
|||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h3 id="monitoring.coreos.com/v1.AdditionalLabelSelectors">AdditionalLabelSelectors
|
||||
(<code>string</code> alias)</h3>
|
||||
<p>
|
||||
(<em>Appears on:</em><a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">TopologySpreadConstraint</a>)
|
||||
</p>
|
||||
<div>
|
||||
</div>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Value</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody><tr><td><p>"OnResource"</p></td>
|
||||
<td><p>Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards).</p>
|
||||
</td>
|
||||
</tr><tr><td><p>"OnShard"</p></td>
|
||||
<td><p>Automatically add a label selector that will select all pods matching the same shard.</p>
|
||||
</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
<h3 id="monitoring.coreos.com/v1.AlertingSpec">AlertingSpec
|
||||
</h3>
|
||||
<p>
|
||||
|
@ -6318,8 +6340,8 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>topologySpreadConstraints</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
|
||||
[]Kubernetes core/v1.TopologySpreadConstraint
|
||||
<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
|
||||
[]TopologySpreadConstraint
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
|
@ -7078,6 +7100,205 @@ The possible status values for this condition type are:
|
|||
</td>
|
||||
</tr></tbody>
|
||||
</table>
|
||||
<h3 id="monitoring.coreos.com/v1.CoreV1TopologySpreadConstraint">CoreV1TopologySpreadConstraint
|
||||
</h3>
|
||||
<p>
|
||||
(<em>Appears on:</em><a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">TopologySpreadConstraint</a>)
|
||||
</p>
|
||||
<div>
|
||||
</div>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Field</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<code>maxSkew</code><br/>
|
||||
<em>
|
||||
int32
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>MaxSkew describes the degree to which pods may be unevenly distributed.
|
||||
When <code>whenUnsatisfiable=DoNotSchedule</code>, it is the maximum permitted difference
|
||||
between the number of matching pods in the target topology and the global minimum.
|
||||
The global minimum is the minimum number of matching pods in an eligible domain
|
||||
or zero if the number of eligible domains is less than MinDomains.
|
||||
For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
labelSelector spread as 2/2/1:
|
||||
In this case, the global minimum is 1.
|
||||
| zone1 | zone2 | zone3 |
|
||||
| P P | P P | P |
|
||||
- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
|
||||
scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
|
||||
violate MaxSkew(1).
|
||||
- if MaxSkew is 2, incoming pod can be scheduled onto any zone.
|
||||
When <code>whenUnsatisfiable=ScheduleAnyway</code>, it is used to give higher precedence
|
||||
to topologies that satisfy it.
|
||||
It’s a required field. Default value is 1 and 0 is not allowed.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>topologyKey</code><br/>
|
||||
<em>
|
||||
string
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>TopologyKey is the key of node labels. Nodes that have a label with this key
|
||||
and identical values are considered to be in the same topology.
|
||||
We consider each <key, value> as a “bucket”, and try to put balanced number
|
||||
of pods into each bucket.
|
||||
We define a domain as a particular instance of a topology.
|
||||
Also, we define an eligible domain as a domain whose nodes meet the requirements of
|
||||
nodeAffinityPolicy and nodeTaintsPolicy.
|
||||
e.g. If TopologyKey is “kubernetes.io/hostname”, each Node is a domain of that topology.
|
||||
And, if TopologyKey is “topology.kubernetes.io/zone”, each zone is a domain of that topology.
|
||||
It’s a required field.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>whenUnsatisfiable</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#unsatisfiableconstraintaction-v1-core">
|
||||
Kubernetes core/v1.UnsatisfiableConstraintAction
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>WhenUnsatisfiable indicates how to deal with a pod if it doesn’t satisfy
|
||||
the spread constraint.
|
||||
- DoNotSchedule (default) tells the scheduler not to schedule it.
|
||||
- ScheduleAnyway tells the scheduler to schedule the pod in any location,
|
||||
but giving higher precedence to topologies that would help reduce the
|
||||
skew.
|
||||
A constraint is considered “Unsatisfiable” for an incoming pod
|
||||
if and only if every possible node assignment for that pod would violate
|
||||
“MaxSkew” on some topology.
|
||||
For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
labelSelector spread as 3/1/1:
|
||||
| zone1 | zone2 | zone3 |
|
||||
| P P P | P | P |
|
||||
If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
|
||||
to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
|
||||
MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
|
||||
won’t make it <em>more</em> imbalanced.
|
||||
It’s a required field.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>labelSelector</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta">
|
||||
Kubernetes meta/v1.LabelSelector
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>LabelSelector is used to find matching pods.
|
||||
Pods that match this label selector are counted to determine the number of pods
|
||||
in their corresponding topology domain.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>minDomains</code><br/>
|
||||
<em>
|
||||
int32
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>MinDomains indicates a minimum number of eligible domains.
|
||||
When the number of eligible domains with matching topology keys is less than minDomains,
|
||||
Pod Topology Spread treats “global minimum” as 0, and then the calculation of Skew is performed.
|
||||
And when the number of eligible domains with matching topology keys equals or greater than minDomains,
|
||||
this value has no effect on scheduling.
|
||||
As a result, when the number of eligible domains is less than minDomains,
|
||||
scheduler won’t schedule more than maxSkew Pods to those domains.
|
||||
If value is nil, the constraint behaves as if MinDomains is equal to 1.
|
||||
Valid values are integers greater than 0.
|
||||
When value is not nil, WhenUnsatisfiable must be DoNotSchedule.</p>
|
||||
<p>For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
|
||||
labelSelector spread as 2/2/2:
|
||||
| zone1 | zone2 | zone3 |
|
||||
| P P | P P | P P |
|
||||
The number of domains is less than 5(MinDomains), so “global minimum” is treated as 0.
|
||||
In this situation, new pod with the same labelSelector cannot be scheduled,
|
||||
because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
|
||||
it will violate MaxSkew.</p>
|
||||
<p>This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>nodeAffinityPolicy</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
|
||||
Kubernetes core/v1.NodeInclusionPolicy
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>NodeAffinityPolicy indicates how we will treat Pod’s nodeAffinity/nodeSelector
|
||||
when calculating pod topology spread skew. Options are:
|
||||
- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.</p>
|
||||
<p>If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>nodeTaintsPolicy</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
|
||||
Kubernetes core/v1.NodeInclusionPolicy
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
pod topology spread skew. Options are:
|
||||
- Honor: nodes without taints, along with tainted nodes for which the incoming pod
|
||||
has a toleration, are included.
|
||||
- Ignore: node taints are ignored. All nodes are included.</p>
|
||||
<p>If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>matchLabelKeys</code><br/>
|
||||
<em>
|
||||
[]string
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>MatchLabelKeys is a set of pod label keys to select the pods over which
|
||||
spreading will be calculated. The keys are used to lookup values from the
|
||||
incoming pod labels, those key-value labels are ANDed with labelSelector
|
||||
to select the group of existing pods over which spreading will be calculated
|
||||
for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
|
||||
MatchLabelKeys cannot be set when LabelSelector isn’t set.
|
||||
Keys that don’t exist in the incoming pod labels will
|
||||
be ignored. A null or empty list means only match against labelSelector.</p>
|
||||
<p>This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).</p>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h3 id="monitoring.coreos.com/v1.Duration">Duration
|
||||
(<code>string</code> alias)</h3>
|
||||
<p>
|
||||
|
@ -10233,8 +10454,8 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>topologySpreadConstraints</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
|
||||
[]Kubernetes core/v1.TopologySpreadConstraint
|
||||
<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
|
||||
[]TopologySpreadConstraint
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
|
@ -14443,6 +14664,219 @@ fail and an error will be logged.</p>
|
|||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h3 id="monitoring.coreos.com/v1.TopologySpreadConstraint">TopologySpreadConstraint
|
||||
</h3>
|
||||
<p>
|
||||
(<em>Appears on:</em><a href="#monitoring.coreos.com/v1.CommonPrometheusFields">CommonPrometheusFields</a>)
|
||||
</p>
|
||||
<div>
|
||||
</div>
|
||||
<table>
|
||||
<thead>
|
||||
<tr>
|
||||
<th>Field</th>
|
||||
<th>Description</th>
|
||||
</tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>
|
||||
<code>maxSkew</code><br/>
|
||||
<em>
|
||||
int32
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>MaxSkew describes the degree to which pods may be unevenly distributed.
|
||||
When <code>whenUnsatisfiable=DoNotSchedule</code>, it is the maximum permitted difference
|
||||
between the number of matching pods in the target topology and the global minimum.
|
||||
The global minimum is the minimum number of matching pods in an eligible domain
|
||||
or zero if the number of eligible domains is less than MinDomains.
|
||||
For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
labelSelector spread as 2/2/1:
|
||||
In this case, the global minimum is 1.
|
||||
| zone1 | zone2 | zone3 |
|
||||
| P P | P P | P |
|
||||
- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
|
||||
scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
|
||||
violate MaxSkew(1).
|
||||
- if MaxSkew is 2, incoming pod can be scheduled onto any zone.
|
||||
When <code>whenUnsatisfiable=ScheduleAnyway</code>, it is used to give higher precedence
|
||||
to topologies that satisfy it.
|
||||
It’s a required field. Default value is 1 and 0 is not allowed.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>topologyKey</code><br/>
|
||||
<em>
|
||||
string
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>TopologyKey is the key of node labels. Nodes that have a label with this key
|
||||
and identical values are considered to be in the same topology.
|
||||
We consider each <key, value> as a “bucket”, and try to put balanced number
|
||||
of pods into each bucket.
|
||||
We define a domain as a particular instance of a topology.
|
||||
Also, we define an eligible domain as a domain whose nodes meet the requirements of
|
||||
nodeAffinityPolicy and nodeTaintsPolicy.
|
||||
e.g. If TopologyKey is “kubernetes.io/hostname”, each Node is a domain of that topology.
|
||||
And, if TopologyKey is “topology.kubernetes.io/zone”, each zone is a domain of that topology.
|
||||
It’s a required field.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>whenUnsatisfiable</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#unsatisfiableconstraintaction-v1-core">
|
||||
Kubernetes core/v1.UnsatisfiableConstraintAction
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<p>WhenUnsatisfiable indicates how to deal with a pod if it doesn’t satisfy
|
||||
the spread constraint.
|
||||
- DoNotSchedule (default) tells the scheduler not to schedule it.
|
||||
- ScheduleAnyway tells the scheduler to schedule the pod in any location,
|
||||
but giving higher precedence to topologies that would help reduce the
|
||||
skew.
|
||||
A constraint is considered “Unsatisfiable” for an incoming pod
|
||||
if and only if every possible node assignment for that pod would violate
|
||||
“MaxSkew” on some topology.
|
||||
For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
|
||||
labelSelector spread as 3/1/1:
|
||||
| zone1 | zone2 | zone3 |
|
||||
| P P P | P | P |
|
||||
If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
|
||||
to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
|
||||
MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
|
||||
won’t make it <em>more</em> imbalanced.
|
||||
It’s a required field.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>labelSelector</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta">
|
||||
Kubernetes meta/v1.LabelSelector
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>LabelSelector is used to find matching pods.
|
||||
Pods that match this label selector are counted to determine the number of pods
|
||||
in their corresponding topology domain.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>minDomains</code><br/>
|
||||
<em>
|
||||
int32
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>MinDomains indicates a minimum number of eligible domains.
|
||||
When the number of eligible domains with matching topology keys is less than minDomains,
|
||||
Pod Topology Spread treats “global minimum” as 0, and then the calculation of Skew is performed.
|
||||
And when the number of eligible domains with matching topology keys equals or greater than minDomains,
|
||||
this value has no effect on scheduling.
|
||||
As a result, when the number of eligible domains is less than minDomains,
|
||||
scheduler won’t schedule more than maxSkew Pods to those domains.
|
||||
If value is nil, the constraint behaves as if MinDomains is equal to 1.
|
||||
Valid values are integers greater than 0.
|
||||
When value is not nil, WhenUnsatisfiable must be DoNotSchedule.</p>
|
||||
<p>For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
|
||||
labelSelector spread as 2/2/2:
|
||||
| zone1 | zone2 | zone3 |
|
||||
| P P | P P | P P |
|
||||
The number of domains is less than 5(MinDomains), so “global minimum” is treated as 0.
|
||||
In this situation, new pod with the same labelSelector cannot be scheduled,
|
||||
because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
|
||||
it will violate MaxSkew.</p>
|
||||
<p>This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>nodeAffinityPolicy</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
|
||||
Kubernetes core/v1.NodeInclusionPolicy
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>NodeAffinityPolicy indicates how we will treat Pod’s nodeAffinity/nodeSelector
|
||||
when calculating pod topology spread skew. Options are:
|
||||
- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
|
||||
- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.</p>
|
||||
<p>If this value is nil, the behavior is equivalent to the Honor policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>nodeTaintsPolicy</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
|
||||
Kubernetes core/v1.NodeInclusionPolicy
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>NodeTaintsPolicy indicates how we will treat node taints when calculating
|
||||
pod topology spread skew. Options are:
|
||||
- Honor: nodes without taints, along with tainted nodes for which the incoming pod
|
||||
has a toleration, are included.
|
||||
- Ignore: node taints are ignored. All nodes are included.</p>
|
||||
<p>If this value is nil, the behavior is equivalent to the Ignore policy.
|
||||
This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>matchLabelKeys</code><br/>
|
||||
<em>
|
||||
[]string
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>MatchLabelKeys is a set of pod label keys to select the pods over which
|
||||
spreading will be calculated. The keys are used to lookup values from the
|
||||
incoming pod labels, those key-value labels are ANDed with labelSelector
|
||||
to select the group of existing pods over which spreading will be calculated
|
||||
for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
|
||||
MatchLabelKeys cannot be set when LabelSelector isn’t set.
|
||||
Keys that don’t exist in the incoming pod labels will
|
||||
be ignored. A null or empty list means only match against labelSelector.</p>
|
||||
<p>This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).</p>
|
||||
</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>
|
||||
<code>additionalLabelSelectors</code><br/>
|
||||
<em>
|
||||
<a href="#monitoring.coreos.com/v1.AdditionalLabelSelectors">
|
||||
AdditionalLabelSelectors
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
<td>
|
||||
<em>(Optional)</em>
|
||||
<p>Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.</p>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
<h3 id="monitoring.coreos.com/v1.WebConfigFileFields">WebConfigFileFields
|
||||
</h3>
|
||||
<p>
|
||||
|
@ -15571,8 +16005,8 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>topologySpreadConstraints</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
|
||||
[]Kubernetes core/v1.TopologySpreadConstraint
|
||||
<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
|
||||
[]TopologySpreadConstraint
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
|
@ -19720,8 +20154,8 @@ Kubernetes core/v1.Affinity
|
|||
<td>
|
||||
<code>topologySpreadConstraints</code><br/>
|
||||
<em>
|
||||
<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
|
||||
[]Kubernetes core/v1.TopologySpreadConstraint
|
||||
<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
|
||||
[]TopologySpreadConstraint
|
||||
</a>
|
||||
</em>
|
||||
</td>
|
||||
|
|
18
bundle.yaml
generated
18
bundle.yaml
generated
|
@ -20819,9 +20819,14 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
description: Defines the pod's topology spread constraints if specified.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching
|
||||
pods among the given topology.
|
||||
properties:
|
||||
additionalLabelSelectors:
|
||||
description: Defines what Prometheus Operator managed labels
|
||||
should be added to labelSelector on the topologySpreadConstraint.
|
||||
enum:
|
||||
- OnResource
|
||||
- OnShard
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods
|
||||
that match this label selector are counted to determine the
|
||||
|
@ -30726,9 +30731,14 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
description: Defines the pod's topology spread constraints if specified.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching
|
||||
pods among the given topology.
|
||||
properties:
|
||||
additionalLabelSelectors:
|
||||
description: Defines what Prometheus Operator managed labels
|
||||
should be added to labelSelector on the topologySpreadConstraint.
|
||||
enum:
|
||||
- OnResource
|
||||
- OnShard
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods
|
||||
that match this label selector are counted to determine the
|
||||
|
|
|
@ -6218,9 +6218,14 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
description: Defines the pod's topology spread constraints if specified.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching
|
||||
pods among the given topology.
|
||||
properties:
|
||||
additionalLabelSelectors:
|
||||
description: Defines what Prometheus Operator managed labels
|
||||
should be added to labelSelector on the topologySpreadConstraint.
|
||||
enum:
|
||||
- OnResource
|
||||
- OnShard
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods
|
||||
that match this label selector are counted to determine the
|
||||
|
|
|
@ -7607,9 +7607,14 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
description: Defines the pod's topology spread constraints if specified.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching
|
||||
pods among the given topology.
|
||||
properties:
|
||||
additionalLabelSelectors:
|
||||
description: Defines what Prometheus Operator managed labels
|
||||
should be added to labelSelector on the topologySpreadConstraint.
|
||||
enum:
|
||||
- OnResource
|
||||
- OnShard
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods
|
||||
that match this label selector are counted to determine the
|
||||
|
|
|
@ -6219,9 +6219,14 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
description: Defines the pod's topology spread constraints if specified.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching
|
||||
pods among the given topology.
|
||||
properties:
|
||||
additionalLabelSelectors:
|
||||
description: Defines what Prometheus Operator managed labels
|
||||
should be added to labelSelector on the topologySpreadConstraint.
|
||||
enum:
|
||||
- OnResource
|
||||
- OnShard
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods
|
||||
that match this label selector are counted to determine the
|
||||
|
|
|
@ -7608,9 +7608,14 @@ spec:
|
|||
topologySpreadConstraints:
|
||||
description: Defines the pod's topology spread constraints if specified.
|
||||
items:
|
||||
description: TopologySpreadConstraint specifies how to spread matching
|
||||
pods among the given topology.
|
||||
properties:
|
||||
additionalLabelSelectors:
|
||||
description: Defines what Prometheus Operator managed labels
|
||||
should be added to labelSelector on the topologySpreadConstraint.
|
||||
enum:
|
||||
- OnResource
|
||||
- OnShard
|
||||
type: string
|
||||
labelSelector:
|
||||
description: LabelSelector is used to find matching pods. Pods
|
||||
that match this label selector are counted to determine the
|
||||
|
|
|
@ -5517,8 +5517,15 @@
|
|||
"topologySpreadConstraints": {
|
||||
"description": "Defines the pod's topology spread constraints if specified.",
|
||||
"items": {
|
||||
"description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
|
||||
"properties": {
|
||||
"additionalLabelSelectors": {
|
||||
"description": "Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.",
|
||||
"enum": [
|
||||
"OnResource",
|
||||
"OnShard"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
|
||||
"properties": {
|
||||
|
|
|
@ -6937,8 +6937,15 @@
|
|||
"topologySpreadConstraints": {
|
||||
"description": "Defines the pod's topology spread constraints if specified.",
|
||||
"items": {
|
||||
"description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
|
||||
"properties": {
|
||||
"additionalLabelSelectors": {
|
||||
"description": "Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.",
|
||||
"enum": [
|
||||
"OnResource",
|
||||
"OnShard"
|
||||
],
|
||||
"type": "string"
|
||||
},
|
||||
"labelSelector": {
|
||||
"description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
|
||||
"properties": {
|
||||
|
|
|
@ -57,6 +57,27 @@ func (l *Prometheus) GetStatus() PrometheusStatus {
|
|||
return l.Status
|
||||
}
|
||||
|
||||
// +kubebuilder:validation:Enum=OnResource;OnShard
|
||||
type AdditionalLabelSelectors string
|
||||
|
||||
const (
|
||||
// Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards).
|
||||
ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource"
|
||||
|
||||
// Automatically add a label selector that will select all pods matching the same shard.
|
||||
ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard"
|
||||
)
|
||||
|
||||
type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint
|
||||
|
||||
type TopologySpreadConstraint struct {
|
||||
CoreV1TopologySpreadConstraint `json:",inline"`
|
||||
|
||||
//+optional
|
||||
// Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.
|
||||
AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"`
|
||||
}
|
||||
|
||||
// CommonPrometheusFields are the options available to both the Prometheus server and agent.
|
||||
// +k8s:deepcopy-gen=true
|
||||
type CommonPrometheusFields struct {
|
||||
|
@ -320,9 +341,10 @@ type CommonPrometheusFields struct {
|
|||
// Defines the Pods' tolerations if specified.
|
||||
// +optional
|
||||
Tolerations []v1.Toleration `json:"tolerations,omitempty"`
|
||||
|
||||
// Defines the pod's topology spread constraints if specified.
|
||||
// +optional
|
||||
TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
|
||||
//+optional
|
||||
TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
|
||||
|
||||
// Defines the list of remote write configurations.
|
||||
// +optional
|
||||
|
|
63
pkg/apis/monitoring/v1/zz_generated.deepcopy.go
generated
63
pkg/apis/monitoring/v1/zz_generated.deepcopy.go
generated
|
@ -749,7 +749,7 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) {
|
|||
}
|
||||
if in.TopologySpreadConstraints != nil {
|
||||
in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
|
||||
*out = make([]corev1.TopologySpreadConstraint, len(*in))
|
||||
*out = make([]TopologySpreadConstraint, len(*in))
|
||||
for i := range *in {
|
||||
(*in)[i].DeepCopyInto(&(*out)[i])
|
||||
}
|
||||
|
@ -926,6 +926,46 @@ func (in *Condition) DeepCopy() *Condition {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *CoreV1TopologySpreadConstraint) DeepCopyInto(out *CoreV1TopologySpreadConstraint) {
|
||||
*out = *in
|
||||
if in.LabelSelector != nil {
|
||||
in, out := &in.LabelSelector, &out.LabelSelector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.MinDomains != nil {
|
||||
in, out := &in.MinDomains, &out.MinDomains
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeAffinityPolicy != nil {
|
||||
in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
|
||||
*out = new(corev1.NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.NodeTaintsPolicy != nil {
|
||||
in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
|
||||
*out = new(corev1.NodeInclusionPolicy)
|
||||
**out = **in
|
||||
}
|
||||
if in.MatchLabelKeys != nil {
|
||||
in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreV1TopologySpreadConstraint.
|
||||
func (in *CoreV1TopologySpreadConstraint) DeepCopy() *CoreV1TopologySpreadConstraint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(CoreV1TopologySpreadConstraint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) {
|
||||
*out = *in
|
||||
|
@ -3079,6 +3119,27 @@ func (in *ThanosSpec) DeepCopy() *ThanosSpec {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) {
|
||||
*out = *in
|
||||
in.CoreV1TopologySpreadConstraint.DeepCopyInto(&out.CoreV1TopologySpreadConstraint)
|
||||
if in.AdditionalLabelSelectors != nil {
|
||||
in, out := &in.AdditionalLabelSelectors, &out.AdditionalLabelSelectors
|
||||
*out = new(AdditionalLabelSelectors)
|
||||
**out = **in
|
||||
}
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint.
|
||||
func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(TopologySpreadConstraint)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *WebConfigFileFields) DeepCopyInto(out *WebConfigFileFields) {
|
||||
*out = *in
|
||||
|
|
|
@ -65,7 +65,7 @@ type CommonPrometheusFieldsApplyConfiguration struct {
|
|||
ConfigMaps []string `json:"configMaps,omitempty"`
|
||||
Affinity *corev1.Affinity `json:"affinity,omitempty"`
|
||||
Tolerations []corev1.Toleration `json:"tolerations,omitempty"`
|
||||
TopologySpreadConstraints []corev1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
|
||||
TopologySpreadConstraints []TopologySpreadConstraintApplyConfiguration `json:"topologySpreadConstraints,omitempty"`
|
||||
RemoteWrite []RemoteWriteSpecApplyConfiguration `json:"remoteWrite,omitempty"`
|
||||
SecurityContext *corev1.PodSecurityContext `json:"securityContext,omitempty"`
|
||||
ListenLocal *bool `json:"listenLocal,omitempty"`
|
||||
|
@ -452,9 +452,12 @@ func (b *CommonPrometheusFieldsApplyConfiguration) WithTolerations(values ...cor
|
|||
// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field.
|
||||
func (b *CommonPrometheusFieldsApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *CommonPrometheusFieldsApplyConfiguration {
|
||||
func (b *CommonPrometheusFieldsApplyConfiguration) WithTopologySpreadConstraints(values ...*TopologySpreadConstraintApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration {
|
||||
for i := range values {
|
||||
b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i])
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithTopologySpreadConstraints")
|
||||
}
|
||||
b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -0,0 +1,107 @@
|
|||
// Copyright The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// CoreV1TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the CoreV1TopologySpreadConstraint type for use
|
||||
// with apply.
|
||||
type CoreV1TopologySpreadConstraintApplyConfiguration struct {
|
||||
MaxSkew *int32 `json:"maxSkew,omitempty"`
|
||||
TopologyKey *string `json:"topologyKey,omitempty"`
|
||||
WhenUnsatisfiable *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
|
||||
LabelSelector *metav1.LabelSelector `json:"labelSelector,omitempty"`
|
||||
MinDomains *int32 `json:"minDomains,omitempty"`
|
||||
NodeAffinityPolicy *v1.NodeInclusionPolicy `json:"nodeAffinityPolicy,omitempty"`
|
||||
NodeTaintsPolicy *v1.NodeInclusionPolicy `json:"nodeTaintsPolicy,omitempty"`
|
||||
MatchLabelKeys []string `json:"matchLabelKeys,omitempty"`
|
||||
}
|
||||
|
||||
// CoreV1TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the CoreV1TopologySpreadConstraint type for use with
|
||||
// apply.
|
||||
func CoreV1TopologySpreadConstraint() *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
return &CoreV1TopologySpreadConstraintApplyConfiguration{}
|
||||
}
|
||||
|
||||
// WithMaxSkew sets the MaxSkew field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the MaxSkew field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMaxSkew(value int32) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.MaxSkew = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTopologyKey sets the TopologyKey field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the TopologyKey field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value string) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.TopologyKey = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value v1.UnsatisfiableConstraintAction) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.WhenUnsatisfiable = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the LabelSelector field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value metav1.LabelSelector) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.LabelSelector = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithMinDomains sets the MinDomains field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the MinDomains field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.MinDomains = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.NodeAffinityPolicy = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
b.NodeTaintsPolicy = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field.
|
||||
func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *CoreV1TopologySpreadConstraintApplyConfiguration {
|
||||
for i := range values {
|
||||
b.MatchLabelKeys = append(b.MatchLabelKeys, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -398,9 +398,12 @@ func (b *PrometheusSpecApplyConfiguration) WithTolerations(values ...corev1.Tole
|
|||
// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field.
|
||||
func (b *PrometheusSpecApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *PrometheusSpecApplyConfiguration {
|
||||
func (b *PrometheusSpecApplyConfiguration) WithTopologySpreadConstraints(values ...*TopologySpreadConstraintApplyConfiguration) *PrometheusSpecApplyConfiguration {
|
||||
for i := range values {
|
||||
b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i])
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithTopologySpreadConstraints")
|
||||
}
|
||||
b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
// Copyright The prometheus-operator Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Code generated by applyconfiguration-gen. DO NOT EDIT.
|
||||
|
||||
package v1
|
||||
|
||||
import (
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use
|
||||
// with apply.
|
||||
type TopologySpreadConstraintApplyConfiguration struct {
|
||||
CoreV1TopologySpreadConstraintApplyConfiguration `json:",inline"`
|
||||
AdditionalLabelSelectors *monitoringv1.AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"`
|
||||
}
|
||||
|
||||
// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with
|
||||
// apply.
|
||||
func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration {
|
||||
return &TopologySpreadConstraintApplyConfiguration{}
|
||||
}
|
||||
|
||||
// WithMaxSkew sets the MaxSkew field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the MaxSkew field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithMaxSkew(value int32) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.MaxSkew = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithTopologyKey sets the TopologyKey field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the TopologyKey field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value string) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.TopologyKey = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.WhenUnsatisfiable = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the LabelSelector field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value metav1.LabelSelector) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.LabelSelector = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithMinDomains sets the MinDomains field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the MinDomains field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.MinDomains = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.NodeAffinityPolicy = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.NodeTaintsPolicy = &value
|
||||
return b
|
||||
}
|
||||
|
||||
// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *TopologySpreadConstraintApplyConfiguration {
|
||||
for i := range values {
|
||||
b.MatchLabelKeys = append(b.MatchLabelKeys, values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// WithAdditionalLabelSelectors sets the AdditionalLabelSelectors field in the declarative configuration to the given value
|
||||
// and returns the receiver, so that objects can be built by chaining "With" function invocations.
|
||||
// If called multiple times, the AdditionalLabelSelectors field is set to the value of the last call.
|
||||
func (b *TopologySpreadConstraintApplyConfiguration) WithAdditionalLabelSelectors(value monitoringv1.AdditionalLabelSelectors) *TopologySpreadConstraintApplyConfiguration {
|
||||
b.AdditionalLabelSelectors = &value
|
||||
return b
|
||||
}
|
|
@ -377,9 +377,12 @@ func (b *PrometheusAgentSpecApplyConfiguration) WithTolerations(values ...corev1
|
|||
// WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration
|
||||
// and returns the receiver, so that objects can be build by chaining "With" function invocations.
|
||||
// If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field.
|
||||
func (b *PrometheusAgentSpecApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *PrometheusAgentSpecApplyConfiguration {
|
||||
func (b *PrometheusAgentSpecApplyConfiguration) WithTopologySpreadConstraints(values ...*v1.TopologySpreadConstraintApplyConfiguration) *PrometheusAgentSpecApplyConfiguration {
|
||||
for i := range values {
|
||||
b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i])
|
||||
if values[i] == nil {
|
||||
panic("nil value passed to WithTopologySpreadConstraints")
|
||||
}
|
||||
b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i])
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -69,6 +69,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
|
|||
return &monitoringv1.CommonPrometheusFieldsApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("Condition"):
|
||||
return &monitoringv1.ConditionApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("CoreV1TopologySpreadConstraint"):
|
||||
return &monitoringv1.CoreV1TopologySpreadConstraintApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("EmbeddedObjectMetadata"):
|
||||
return &monitoringv1.EmbeddedObjectMetadataApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("EmbeddedPersistentVolumeClaim"):
|
||||
|
@ -177,6 +179,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
|
|||
return &monitoringv1.ThanosSpecApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("TLSConfig"):
|
||||
return &monitoringv1.TLSConfigApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("TopologySpreadConstraint"):
|
||||
return &monitoringv1.TopologySpreadConstraintApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("TSDBSpec"):
|
||||
return &monitoringv1.TSDBSpecApplyConfiguration{}
|
||||
case v1.SchemeGroupVersion.WithKind("WebConfigFileFields"):
|
||||
|
|
|
@ -391,7 +391,7 @@ func makeStatefulSetSpec(
|
|||
Volumes: volumes,
|
||||
Tolerations: cpf.Tolerations,
|
||||
Affinity: cpf.Affinity,
|
||||
TopologySpreadConstraints: cpf.TopologySpreadConstraints,
|
||||
TopologySpreadConstraints: prompkg.MakeK8sTopologySpreadConstraint(finalSelectorLabels, cpf.TopologySpreadConstraints),
|
||||
HostAliases: operator.MakeHostAliases(cpf.HostAliases),
|
||||
HostNetwork: cpf.HostNetwork,
|
||||
},
|
||||
|
|
|
@ -21,10 +21,13 @@ import (
|
|||
|
||||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
"k8s.io/utils/ptr"
|
||||
|
||||
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
||||
monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
|
||||
|
@ -206,3 +209,152 @@ func makeStatefulSetFromPrometheus(p monitoringv1alpha1.PrometheusAgent) (*appsv
|
|||
0,
|
||||
nil)
|
||||
}
|
||||
|
||||
func TestPodTopologySpreadConstraintWithAdditionalLabels(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
spec monitoringv1alpha1.PrometheusAgentSpec
|
||||
tsc v1.TopologySpreadConstraint
|
||||
}{
|
||||
{
|
||||
name: "without labelSelector and additionalLabels",
|
||||
spec: monitoringv1alpha1.PrometheusAgentSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with labelSelector and without additionalLabels",
|
||||
spec: monitoringv1alpha1.PrometheusAgentSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with labelSelector and additionalLabels as ShardAndNameResource",
|
||||
spec: monitoringv1alpha1.PrometheusAgentSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
AdditionalLabelSelectors: ptr.To(monitoringv1.ShardAndResourceNameLabelSelector),
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
"app.kubernetes.io/instance": "test",
|
||||
"app.kubernetes.io/managed-by": "prometheus-operator",
|
||||
"app.kubernetes.io/name": "prometheus-agent",
|
||||
"operator.prometheus.io/name": "test",
|
||||
"operator.prometheus.io/shard": "0",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with labelSelector and additionalLabels as ResourceName",
|
||||
spec: monitoringv1alpha1.PrometheusAgentSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
AdditionalLabelSelectors: ptr.To(monitoringv1.ResourceNameLabelSelector),
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
"app.kubernetes.io/instance": "test",
|
||||
"app.kubernetes.io/managed-by": "prometheus-operator",
|
||||
"app.kubernetes.io/name": "prometheus-agent",
|
||||
"operator.prometheus.io/name": "test",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sts, err := makeStatefulSetFromPrometheus(monitoringv1alpha1.PrometheusAgent{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "ns-test",
|
||||
},
|
||||
Spec: tc.spec,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Greater(t, len(sts.Spec.Template.Spec.TopologySpreadConstraints), 0)
|
||||
assert.Equal(t, tc.tsc, sts.Spec.Template.Spec.TopologySpreadConstraints[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -494,7 +494,7 @@ func makeStatefulSetSpec(
|
|||
Volumes: volumes,
|
||||
Tolerations: cpf.Tolerations,
|
||||
Affinity: cpf.Affinity,
|
||||
TopologySpreadConstraints: cpf.TopologySpreadConstraints,
|
||||
TopologySpreadConstraints: prompkg.MakeK8sTopologySpreadConstraint(finalSelectorLabels, cpf.TopologySpreadConstraints),
|
||||
HostAliases: operator.MakeHostAliases(cpf.HostAliases),
|
||||
HostNetwork: cpf.HostNetwork,
|
||||
},
|
||||
|
|
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/go-kit/log"
|
||||
"github.com/go-kit/log/level"
|
||||
"github.com/kylelemons/godebug/pretty"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
|
@ -2906,3 +2907,154 @@ func TestPersistentVolumeClaimRetentionPolicy(t *testing.T) {
|
|||
t.Fatalf("expected persistentVolumeClaimDeletePolicy.WhenScaled to be %s but got %s", appsv1.DeletePersistentVolumeClaimRetentionPolicyType, sset.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodTopologySpreadConstraintWithAdditionalLabels(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
spec monitoringv1.PrometheusSpec
|
||||
tsc v1.TopologySpreadConstraint
|
||||
}{
|
||||
{
|
||||
name: "without labelSelector and additionalLabels",
|
||||
spec: monitoringv1.PrometheusSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with labelSelector and without additionalLabels",
|
||||
spec: monitoringv1.PrometheusSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with labelSelector and additionalLabels as ShardAndNameResource",
|
||||
spec: monitoringv1.PrometheusSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
AdditionalLabelSelectors: ptr.To(monitoringv1.ShardAndResourceNameLabelSelector),
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
"app.kubernetes.io/instance": "test",
|
||||
"app.kubernetes.io/managed-by": "prometheus-operator",
|
||||
"prometheus": "test",
|
||||
prompkg.ShardLabelName: "0",
|
||||
prompkg.PrometheusNameLabelName: "test",
|
||||
prompkg.PrometheusK8sLabelName: "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "with labelSelector and additionalLabels as ResourceName",
|
||||
spec: monitoringv1.PrometheusSpec{
|
||||
CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
|
||||
TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
|
||||
{
|
||||
AdditionalLabelSelectors: ptr.To(monitoringv1.ResourceNameLabelSelector),
|
||||
CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
tsc: v1.TopologySpreadConstraint{
|
||||
MaxSkew: 1,
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
WhenUnsatisfiable: v1.DoNotSchedule,
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{
|
||||
"app": "prometheus",
|
||||
"app.kubernetes.io/instance": "test",
|
||||
"app.kubernetes.io/managed-by": "prometheus-operator",
|
||||
"prometheus": "test",
|
||||
prompkg.PrometheusNameLabelName: "test",
|
||||
prompkg.PrometheusK8sLabelName: "prometheus",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
sts, err := makeStatefulSetFromPrometheus(monitoringv1.Prometheus{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "test",
|
||||
Namespace: "ns-test",
|
||||
},
|
||||
Spec: tc.spec,
|
||||
})
|
||||
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Greater(t, len(sts.Spec.Template.Spec.TopologySpreadConstraints), 0)
|
||||
assert.Equal(t, tc.tsc, sts.Spec.Template.Spec.TopologySpreadConstraints[0])
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -60,6 +60,7 @@ var (
|
|||
ShardLabelName = "operator.prometheus.io/shard"
|
||||
PrometheusNameLabelName = "operator.prometheus.io/name"
|
||||
PrometheusModeLabeLName = "operator.prometheus.io/mode"
|
||||
PrometheusK8sLabelName = "app.kubernetes.io/name"
|
||||
ProbeTimeoutSeconds int32 = 3
|
||||
LabelPrometheusName = "prometheus-name"
|
||||
)
|
||||
|
@ -496,3 +497,32 @@ func ShareProcessNamespace(p monitoringv1.PrometheusInterface) *bool {
|
|||
) == monitoringv1.ProcessSignalReloadStrategyType,
|
||||
)
|
||||
}
|
||||
|
||||
func MakeK8sTopologySpreadConstraint(selectorLabels map[string]string, tscs []monitoringv1.TopologySpreadConstraint) []v1.TopologySpreadConstraint {
|
||||
|
||||
coreTscs := make([]v1.TopologySpreadConstraint, 0, len(tscs))
|
||||
|
||||
for _, tsc := range tscs {
|
||||
if tsc.AdditionalLabelSelectors == nil {
|
||||
coreTscs = append(coreTscs, v1.TopologySpreadConstraint(tsc.CoreV1TopologySpreadConstraint))
|
||||
continue
|
||||
}
|
||||
|
||||
if tsc.LabelSelector == nil {
|
||||
tsc.LabelSelector = &metav1.LabelSelector{
|
||||
MatchLabels: make(map[string]string),
|
||||
}
|
||||
}
|
||||
|
||||
for key, value := range selectorLabels {
|
||||
if *tsc.AdditionalLabelSelectors == monitoringv1.ResourceNameLabelSelector && key == ShardLabelName {
|
||||
continue
|
||||
}
|
||||
tsc.LabelSelector.MatchLabels[key] = value
|
||||
}
|
||||
|
||||
coreTscs = append(coreTscs, v1.TopologySpreadConstraint(tsc.CoreV1TopologySpreadConstraint))
|
||||
}
|
||||
|
||||
return coreTscs
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue