diff --git a/Documentation/api.md b/Documentation/api.md
index 7eae55b3e..6ab4b0216 100644
--- a/Documentation/api.md
+++ b/Documentation/api.md
@@ -1956,8 +1956,8 @@ Kubernetes core/v1.Affinity
 <td>
 <code>topologySpreadConstraints</code><br/>
 <em>
-<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
-[]Kubernetes core/v1.TopologySpreadConstraint
+<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
+[]TopologySpreadConstraint
 </a>
 </em>
 </td>
@@ -4095,6 +4095,28 @@ in clear-text. Prefer using <code>authorization</code>.</em></p>
 </tr>
 </tbody>
 </table>
+<h3 id="monitoring.coreos.com/v1.AdditionalLabelSelectors">AdditionalLabelSelectors
+(<code>string</code> alias)</h3>
+<p>
+(<em>Appears on:</em><a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">TopologySpreadConstraint</a>)
+</p>
+<div>
+</div>
+<table>
+<thead>
+<tr>
+<th>Value</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody><tr><td><p>&#34;OnResource&#34;</p></td>
+<td><p>Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards).</p>
+</td>
+</tr><tr><td><p>&#34;OnShard&#34;</p></td>
+<td><p>Automatically add a label selector that will select all pods matching the same shard.</p>
+</td>
+</tr></tbody>
+</table>
 <h3 id="monitoring.coreos.com/v1.AlertingSpec">AlertingSpec
 </h3>
 <p>
@@ -6318,8 +6340,8 @@ Kubernetes core/v1.Affinity
 <td>
 <code>topologySpreadConstraints</code><br/>
 <em>
-<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
-[]Kubernetes core/v1.TopologySpreadConstraint
+<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
+[]TopologySpreadConstraint
 </a>
 </em>
 </td>
@@ -7078,6 +7100,205 @@ The possible status values for this condition type are:
 </td>
 </tr></tbody>
 </table>
+<h3 id="monitoring.coreos.com/v1.CoreV1TopologySpreadConstraint">CoreV1TopologySpreadConstraint
+</h3>
+<p>
+(<em>Appears on:</em><a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">TopologySpreadConstraint</a>)
+</p>
+<div>
+</div>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>
+<code>maxSkew</code><br/>
+<em>
+int32
+</em>
+</td>
+<td>
+<p>MaxSkew describes the degree to which pods may be unevenly distributed.
+When <code>whenUnsatisfiable=DoNotSchedule</code>, it is the maximum permitted difference
+between the number of matching pods in the target topology and the global minimum.
+The global minimum is the minimum number of matching pods in an eligible domain
+or zero if the number of eligible domains is less than MinDomains.
+For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+labelSelector spread as 2/2/1:
+In this case, the global minimum is 1.
+| zone1 | zone2 | zone3 |
+|  P P  |  P P  |   P   |
+- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+violate MaxSkew(1).
+- if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+When <code>whenUnsatisfiable=ScheduleAnyway</code>, it is used to give higher precedence
+to topologies that satisfy it.
+It&rsquo;s a required field. Default value is 1 and 0 is not allowed.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>topologyKey</code><br/>
+<em>
+string
+</em>
+</td>
+<td>
+<p>TopologyKey is the key of node labels. Nodes that have a label with this key
+and identical values are considered to be in the same topology.
+We consider each <key, value> as a &ldquo;bucket&rdquo;, and try to put balanced number
+of pods into each bucket.
+We define a domain as a particular instance of a topology.
+Also, we define an eligible domain as a domain whose nodes meet the requirements of
+nodeAffinityPolicy and nodeTaintsPolicy.
+e.g. If TopologyKey is &ldquo;kubernetes.io/hostname&rdquo;, each Node is a domain of that topology.
+And, if TopologyKey is &ldquo;topology.kubernetes.io/zone&rdquo;, each zone is a domain of that topology.
+It&rsquo;s a required field.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>whenUnsatisfiable</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#unsatisfiableconstraintaction-v1-core">
+Kubernetes core/v1.UnsatisfiableConstraintAction
+</a>
+</em>
+</td>
+<td>
+<p>WhenUnsatisfiable indicates how to deal with a pod if it doesn&rsquo;t satisfy
+the spread constraint.
+- DoNotSchedule (default) tells the scheduler not to schedule it.
+- ScheduleAnyway tells the scheduler to schedule the pod in any location,
+but giving higher precedence to topologies that would help reduce the
+skew.
+A constraint is considered &ldquo;Unsatisfiable&rdquo; for an incoming pod
+if and only if every possible node assignment for that pod would violate
+&ldquo;MaxSkew&rdquo; on some topology.
+For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+labelSelector spread as 3/1/1:
+| zone1 | zone2 | zone3 |
+| P P P |   P   |   P   |
+If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+won&rsquo;t make it <em>more</em> imbalanced.
+It&rsquo;s a required field.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>labelSelector</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta">
+Kubernetes meta/v1.LabelSelector
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>LabelSelector is used to find matching pods.
+Pods that match this label selector are counted to determine the number of pods
+in their corresponding topology domain.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>minDomains</code><br/>
+<em>
+int32
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>MinDomains indicates a minimum number of eligible domains.
+When the number of eligible domains with matching topology keys is less than minDomains,
+Pod Topology Spread treats &ldquo;global minimum&rdquo; as 0, and then the calculation of Skew is performed.
+And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+this value has no effect on scheduling.
+As a result, when the number of eligible domains is less than minDomains,
+scheduler won&rsquo;t schedule more than maxSkew Pods to those domains.
+If value is nil, the constraint behaves as if MinDomains is equal to 1.
+Valid values are integers greater than 0.
+When value is not nil, WhenUnsatisfiable must be DoNotSchedule.</p>
+<p>For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+labelSelector spread as 2/2/2:
+| zone1 | zone2 | zone3 |
+|  P P  |  P P  |  P P  |
+The number of domains is less than 5(MinDomains), so &ldquo;global minimum&rdquo; is treated as 0.
+In this situation, new pod with the same labelSelector cannot be scheduled,
+because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+it will violate MaxSkew.</p>
+<p>This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>nodeAffinityPolicy</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
+Kubernetes core/v1.NodeInclusionPolicy
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>NodeAffinityPolicy indicates how we will treat Pod&rsquo;s nodeAffinity/nodeSelector
+when calculating pod topology spread skew. Options are:
+- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.</p>
+<p>If this value is nil, the behavior is equivalent to the Honor policy.
+This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>nodeTaintsPolicy</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
+Kubernetes core/v1.NodeInclusionPolicy
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>NodeTaintsPolicy indicates how we will treat node taints when calculating
+pod topology spread skew. Options are:
+- Honor: nodes without taints, along with tainted nodes for which the incoming pod
+has a toleration, are included.
+- Ignore: node taints are ignored. All nodes are included.</p>
+<p>If this value is nil, the behavior is equivalent to the Ignore policy.
+This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>matchLabelKeys</code><br/>
+<em>
+[]string
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>MatchLabelKeys is a set of pod label keys to select the pods over which
+spreading will be calculated. The keys are used to lookup values from the
+incoming pod labels, those key-value labels are ANDed with labelSelector
+to select the group of existing pods over which spreading will be calculated
+for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+MatchLabelKeys cannot be set when LabelSelector isn&rsquo;t set.
+Keys that don&rsquo;t exist in the incoming pod labels will
+be ignored. A null or empty list means only match against labelSelector.</p>
+<p>This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).</p>
+</td>
+</tr>
+</tbody>
+</table>
 <h3 id="monitoring.coreos.com/v1.Duration">Duration
 (<code>string</code> alias)</h3>
 <p>
@@ -10233,8 +10454,8 @@ Kubernetes core/v1.Affinity
 <td>
 <code>topologySpreadConstraints</code><br/>
 <em>
-<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
-[]Kubernetes core/v1.TopologySpreadConstraint
+<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
+[]TopologySpreadConstraint
 </a>
 </em>
 </td>
@@ -14443,6 +14664,219 @@ fail and an error will be logged.</p>
 </tr>
 </tbody>
 </table>
+<h3 id="monitoring.coreos.com/v1.TopologySpreadConstraint">TopologySpreadConstraint
+</h3>
+<p>
+(<em>Appears on:</em><a href="#monitoring.coreos.com/v1.CommonPrometheusFields">CommonPrometheusFields</a>)
+</p>
+<div>
+</div>
+<table>
+<thead>
+<tr>
+<th>Field</th>
+<th>Description</th>
+</tr>
+</thead>
+<tbody>
+<tr>
+<td>
+<code>maxSkew</code><br/>
+<em>
+int32
+</em>
+</td>
+<td>
+<p>MaxSkew describes the degree to which pods may be unevenly distributed.
+When <code>whenUnsatisfiable=DoNotSchedule</code>, it is the maximum permitted difference
+between the number of matching pods in the target topology and the global minimum.
+The global minimum is the minimum number of matching pods in an eligible domain
+or zero if the number of eligible domains is less than MinDomains.
+For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+labelSelector spread as 2/2/1:
+In this case, the global minimum is 1.
+| zone1 | zone2 | zone3 |
+|  P P  |  P P  |   P   |
+- if MaxSkew is 1, incoming pod can only be scheduled to zone3 to become 2/2/2;
+scheduling it onto zone1(zone2) would make the ActualSkew(3-1) on zone1(zone2)
+violate MaxSkew(1).
+- if MaxSkew is 2, incoming pod can be scheduled onto any zone.
+When <code>whenUnsatisfiable=ScheduleAnyway</code>, it is used to give higher precedence
+to topologies that satisfy it.
+It&rsquo;s a required field. Default value is 1 and 0 is not allowed.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>topologyKey</code><br/>
+<em>
+string
+</em>
+</td>
+<td>
+<p>TopologyKey is the key of node labels. Nodes that have a label with this key
+and identical values are considered to be in the same topology.
+We consider each <key, value> as a &ldquo;bucket&rdquo;, and try to put balanced number
+of pods into each bucket.
+We define a domain as a particular instance of a topology.
+Also, we define an eligible domain as a domain whose nodes meet the requirements of
+nodeAffinityPolicy and nodeTaintsPolicy.
+e.g. If TopologyKey is &ldquo;kubernetes.io/hostname&rdquo;, each Node is a domain of that topology.
+And, if TopologyKey is &ldquo;topology.kubernetes.io/zone&rdquo;, each zone is a domain of that topology.
+It&rsquo;s a required field.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>whenUnsatisfiable</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#unsatisfiableconstraintaction-v1-core">
+Kubernetes core/v1.UnsatisfiableConstraintAction
+</a>
+</em>
+</td>
+<td>
+<p>WhenUnsatisfiable indicates how to deal with a pod if it doesn&rsquo;t satisfy
+the spread constraint.
+- DoNotSchedule (default) tells the scheduler not to schedule it.
+- ScheduleAnyway tells the scheduler to schedule the pod in any location,
+but giving higher precedence to topologies that would help reduce the
+skew.
+A constraint is considered &ldquo;Unsatisfiable&rdquo; for an incoming pod
+if and only if every possible node assignment for that pod would violate
+&ldquo;MaxSkew&rdquo; on some topology.
+For example, in a 3-zone cluster, MaxSkew is set to 1, and pods with the same
+labelSelector spread as 3/1/1:
+| zone1 | zone2 | zone3 |
+| P P P |   P   |   P   |
+If WhenUnsatisfiable is set to DoNotSchedule, incoming pod can only be scheduled
+to zone2(zone3) to become 3/2/1(3/1/2) as ActualSkew(2-1) on zone2(zone3) satisfies
+MaxSkew(1). In other words, the cluster can still be imbalanced, but scheduler
+won&rsquo;t make it <em>more</em> imbalanced.
+It&rsquo;s a required field.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>labelSelector</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#labelselector-v1-meta">
+Kubernetes meta/v1.LabelSelector
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>LabelSelector is used to find matching pods.
+Pods that match this label selector are counted to determine the number of pods
+in their corresponding topology domain.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>minDomains</code><br/>
+<em>
+int32
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>MinDomains indicates a minimum number of eligible domains.
+When the number of eligible domains with matching topology keys is less than minDomains,
+Pod Topology Spread treats &ldquo;global minimum&rdquo; as 0, and then the calculation of Skew is performed.
+And when the number of eligible domains with matching topology keys equals or greater than minDomains,
+this value has no effect on scheduling.
+As a result, when the number of eligible domains is less than minDomains,
+scheduler won&rsquo;t schedule more than maxSkew Pods to those domains.
+If value is nil, the constraint behaves as if MinDomains is equal to 1.
+Valid values are integers greater than 0.
+When value is not nil, WhenUnsatisfiable must be DoNotSchedule.</p>
+<p>For example, in a 3-zone cluster, MaxSkew is set to 2, MinDomains is set to 5 and pods with the same
+labelSelector spread as 2/2/2:
+| zone1 | zone2 | zone3 |
+|  P P  |  P P  |  P P  |
+The number of domains is less than 5(MinDomains), so &ldquo;global minimum&rdquo; is treated as 0.
+In this situation, new pod with the same labelSelector cannot be scheduled,
+because computed skew will be 3(3 - 0) if new Pod is scheduled to any of the three zones,
+it will violate MaxSkew.</p>
+<p>This is a beta field and requires the MinDomainsInPodTopologySpread feature gate to be enabled (enabled by default).</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>nodeAffinityPolicy</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
+Kubernetes core/v1.NodeInclusionPolicy
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>NodeAffinityPolicy indicates how we will treat Pod&rsquo;s nodeAffinity/nodeSelector
+when calculating pod topology spread skew. Options are:
+- Honor: only nodes matching nodeAffinity/nodeSelector are included in the calculations.
+- Ignore: nodeAffinity/nodeSelector are ignored. All nodes are included in the calculations.</p>
+<p>If this value is nil, the behavior is equivalent to the Honor policy.
+This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>nodeTaintsPolicy</code><br/>
+<em>
+<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#nodeinclusionpolicy-v1-core">
+Kubernetes core/v1.NodeInclusionPolicy
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>NodeTaintsPolicy indicates how we will treat node taints when calculating
+pod topology spread skew. Options are:
+- Honor: nodes without taints, along with tainted nodes for which the incoming pod
+has a toleration, are included.
+- Ignore: node taints are ignored. All nodes are included.</p>
+<p>If this value is nil, the behavior is equivalent to the Ignore policy.
+This is a beta-level feature default enabled by the NodeInclusionPolicyInPodTopologySpread feature flag.</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>matchLabelKeys</code><br/>
+<em>
+[]string
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>MatchLabelKeys is a set of pod label keys to select the pods over which
+spreading will be calculated. The keys are used to lookup values from the
+incoming pod labels, those key-value labels are ANDed with labelSelector
+to select the group of existing pods over which spreading will be calculated
+for the incoming pod. The same key is forbidden to exist in both MatchLabelKeys and LabelSelector.
+MatchLabelKeys cannot be set when LabelSelector isn&rsquo;t set.
+Keys that don&rsquo;t exist in the incoming pod labels will
+be ignored. A null or empty list means only match against labelSelector.</p>
+<p>This is a beta field and requires the MatchLabelKeysInPodTopologySpread feature gate to be enabled (enabled by default).</p>
+</td>
+</tr>
+<tr>
+<td>
+<code>additionalLabelSelectors</code><br/>
+<em>
+<a href="#monitoring.coreos.com/v1.AdditionalLabelSelectors">
+AdditionalLabelSelectors
+</a>
+</em>
+</td>
+<td>
+<em>(Optional)</em>
+<p>Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.</p>
+</td>
+</tr>
+</tbody>
+</table>
 <h3 id="monitoring.coreos.com/v1.WebConfigFileFields">WebConfigFileFields
 </h3>
 <p>
@@ -15571,8 +16005,8 @@ Kubernetes core/v1.Affinity
 <td>
 <code>topologySpreadConstraints</code><br/>
 <em>
-<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
-[]Kubernetes core/v1.TopologySpreadConstraint
+<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
+[]TopologySpreadConstraint
 </a>
 </em>
 </td>
@@ -19720,8 +20154,8 @@ Kubernetes core/v1.Affinity
 <td>
 <code>topologySpreadConstraints</code><br/>
 <em>
-<a href="https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.24/#topologyspreadconstraint-v1-core">
-[]Kubernetes core/v1.TopologySpreadConstraint
+<a href="#monitoring.coreos.com/v1.TopologySpreadConstraint">
+[]TopologySpreadConstraint
 </a>
 </em>
 </td>
diff --git a/bundle.yaml b/bundle.yaml
index 6d7e5c4ca..eb3049c29 100644
--- a/bundle.yaml
+++ b/bundle.yaml
@@ -20819,9 +20819,14 @@ spec:
               topologySpreadConstraints:
                 description: Defines the pod's topology spread constraints if specified.
                 items:
-                  description: TopologySpreadConstraint specifies how to spread matching
-                    pods among the given topology.
                   properties:
+                    additionalLabelSelectors:
+                      description: Defines what Prometheus Operator managed labels
+                        should be added to labelSelector on the topologySpreadConstraint.
+                      enum:
+                      - OnResource
+                      - OnShard
+                      type: string
                     labelSelector:
                       description: LabelSelector is used to find matching pods. Pods
                         that match this label selector are counted to determine the
@@ -30726,9 +30731,14 @@ spec:
               topologySpreadConstraints:
                 description: Defines the pod's topology spread constraints if specified.
                 items:
-                  description: TopologySpreadConstraint specifies how to spread matching
-                    pods among the given topology.
                   properties:
+                    additionalLabelSelectors:
+                      description: Defines what Prometheus Operator managed labels
+                        should be added to labelSelector on the topologySpreadConstraint.
+                      enum:
+                      - OnResource
+                      - OnShard
+                      type: string
                     labelSelector:
                       description: LabelSelector is used to find matching pods. Pods
                         that match this label selector are counted to determine the
diff --git a/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheusagents.yaml b/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheusagents.yaml
index 8c902682d..52df89a5f 100644
--- a/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheusagents.yaml
+++ b/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheusagents.yaml
@@ -6218,9 +6218,14 @@ spec:
               topologySpreadConstraints:
                 description: Defines the pod's topology spread constraints if specified.
                 items:
-                  description: TopologySpreadConstraint specifies how to spread matching
-                    pods among the given topology.
                   properties:
+                    additionalLabelSelectors:
+                      description: Defines what Prometheus Operator managed labels
+                        should be added to labelSelector on the topologySpreadConstraint.
+                      enum:
+                      - OnResource
+                      - OnShard
+                      type: string
                     labelSelector:
                       description: LabelSelector is used to find matching pods. Pods
                         that match this label selector are counted to determine the
diff --git a/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheuses.yaml b/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheuses.yaml
index 4ed2dcd16..b8c826a5e 100644
--- a/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheuses.yaml
+++ b/example/prometheus-operator-crd-full/monitoring.coreos.com_prometheuses.yaml
@@ -7607,9 +7607,14 @@ spec:
               topologySpreadConstraints:
                 description: Defines the pod's topology spread constraints if specified.
                 items:
-                  description: TopologySpreadConstraint specifies how to spread matching
-                    pods among the given topology.
                   properties:
+                    additionalLabelSelectors:
+                      description: Defines what Prometheus Operator managed labels
+                        should be added to labelSelector on the topologySpreadConstraint.
+                      enum:
+                      - OnResource
+                      - OnShard
+                      type: string
                     labelSelector:
                       description: LabelSelector is used to find matching pods. Pods
                         that match this label selector are counted to determine the
diff --git a/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml b/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
index fe1729782..8b73a23f1 100644
--- a/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
+++ b/example/prometheus-operator-crd/monitoring.coreos.com_prometheusagents.yaml
@@ -6219,9 +6219,14 @@ spec:
               topologySpreadConstraints:
                 description: Defines the pod's topology spread constraints if specified.
                 items:
-                  description: TopologySpreadConstraint specifies how to spread matching
-                    pods among the given topology.
                   properties:
+                    additionalLabelSelectors:
+                      description: Defines what Prometheus Operator managed labels
+                        should be added to labelSelector on the topologySpreadConstraint.
+                      enum:
+                      - OnResource
+                      - OnShard
+                      type: string
                     labelSelector:
                       description: LabelSelector is used to find matching pods. Pods
                         that match this label selector are counted to determine the
diff --git a/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml b/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
index 4051be30d..d4e77b2ca 100644
--- a/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
+++ b/example/prometheus-operator-crd/monitoring.coreos.com_prometheuses.yaml
@@ -7608,9 +7608,14 @@ spec:
               topologySpreadConstraints:
                 description: Defines the pod's topology spread constraints if specified.
                 items:
-                  description: TopologySpreadConstraint specifies how to spread matching
-                    pods among the given topology.
                   properties:
+                    additionalLabelSelectors:
+                      description: Defines what Prometheus Operator managed labels
+                        should be added to labelSelector on the topologySpreadConstraint.
+                      enum:
+                      - OnResource
+                      - OnShard
+                      type: string
                     labelSelector:
                       description: LabelSelector is used to find matching pods. Pods
                         that match this label selector are counted to determine the
diff --git a/jsonnet/prometheus-operator/prometheusagents-crd.json b/jsonnet/prometheus-operator/prometheusagents-crd.json
index e93f6acaa..9716a2f03 100644
--- a/jsonnet/prometheus-operator/prometheusagents-crd.json
+++ b/jsonnet/prometheus-operator/prometheusagents-crd.json
@@ -5517,8 +5517,15 @@
                   "topologySpreadConstraints": {
                     "description": "Defines the pod's topology spread constraints if specified.",
                     "items": {
-                      "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
                       "properties": {
+                        "additionalLabelSelectors": {
+                          "description": "Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.",
+                          "enum": [
+                            "OnResource",
+                            "OnShard"
+                          ],
+                          "type": "string"
+                        },
                         "labelSelector": {
                           "description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
                           "properties": {
diff --git a/jsonnet/prometheus-operator/prometheuses-crd.json b/jsonnet/prometheus-operator/prometheuses-crd.json
index ea3939d1c..9c3013c3f 100644
--- a/jsonnet/prometheus-operator/prometheuses-crd.json
+++ b/jsonnet/prometheus-operator/prometheuses-crd.json
@@ -6937,8 +6937,15 @@
                   "topologySpreadConstraints": {
                     "description": "Defines the pod's topology spread constraints if specified.",
                     "items": {
-                      "description": "TopologySpreadConstraint specifies how to spread matching pods among the given topology.",
                       "properties": {
+                        "additionalLabelSelectors": {
+                          "description": "Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.",
+                          "enum": [
+                            "OnResource",
+                            "OnShard"
+                          ],
+                          "type": "string"
+                        },
                         "labelSelector": {
                           "description": "LabelSelector is used to find matching pods. Pods that match this label selector are counted to determine the number of pods in their corresponding topology domain.",
                           "properties": {
diff --git a/pkg/apis/monitoring/v1/prometheus_types.go b/pkg/apis/monitoring/v1/prometheus_types.go
index e9abd48de..fc2269398 100644
--- a/pkg/apis/monitoring/v1/prometheus_types.go
+++ b/pkg/apis/monitoring/v1/prometheus_types.go
@@ -57,6 +57,27 @@ func (l *Prometheus) GetStatus() PrometheusStatus {
 	return l.Status
 }
 
+// +kubebuilder:validation:Enum=OnResource;OnShard
+type AdditionalLabelSelectors string
+
+const (
+	// Automatically add a label selector that will select all pods matching the same Prometheus/PrometheusAgent resource (irrespective of their shards).
+	ResourceNameLabelSelector AdditionalLabelSelectors = "OnResource"
+
+	// Automatically add a label selector that will select all pods matching the same shard.
+	ShardAndResourceNameLabelSelector AdditionalLabelSelectors = "OnShard"
+)
+
+type CoreV1TopologySpreadConstraint v1.TopologySpreadConstraint
+
+type TopologySpreadConstraint struct {
+	CoreV1TopologySpreadConstraint `json:",inline"`
+
+	//+optional
+	// Defines what Prometheus Operator managed labels should be added to labelSelector on the topologySpreadConstraint.
+	AdditionalLabelSelectors *AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"`
+}
+
 // CommonPrometheusFields are the options available to both the Prometheus server and agent.
 // +k8s:deepcopy-gen=true
 type CommonPrometheusFields struct {
@@ -320,9 +341,10 @@ type CommonPrometheusFields struct {
 	// Defines the Pods' tolerations if specified.
 	// +optional
 	Tolerations []v1.Toleration `json:"tolerations,omitempty"`
+
 	// Defines the pod's topology spread constraints if specified.
-	// +optional
-	TopologySpreadConstraints []v1.TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
+	//+optional
+	TopologySpreadConstraints []TopologySpreadConstraint `json:"topologySpreadConstraints,omitempty"`
 
 	// Defines the list of remote write configurations.
 	// +optional
diff --git a/pkg/apis/monitoring/v1/zz_generated.deepcopy.go b/pkg/apis/monitoring/v1/zz_generated.deepcopy.go
index 9b1e7f754..d70c20f60 100644
--- a/pkg/apis/monitoring/v1/zz_generated.deepcopy.go
+++ b/pkg/apis/monitoring/v1/zz_generated.deepcopy.go
@@ -749,7 +749,7 @@ func (in *CommonPrometheusFields) DeepCopyInto(out *CommonPrometheusFields) {
 	}
 	if in.TopologySpreadConstraints != nil {
 		in, out := &in.TopologySpreadConstraints, &out.TopologySpreadConstraints
-		*out = make([]corev1.TopologySpreadConstraint, len(*in))
+		*out = make([]TopologySpreadConstraint, len(*in))
 		for i := range *in {
 			(*in)[i].DeepCopyInto(&(*out)[i])
 		}
@@ -926,6 +926,46 @@ func (in *Condition) DeepCopy() *Condition {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *CoreV1TopologySpreadConstraint) DeepCopyInto(out *CoreV1TopologySpreadConstraint) {
+	*out = *in
+	if in.LabelSelector != nil {
+		in, out := &in.LabelSelector, &out.LabelSelector
+		*out = new(metav1.LabelSelector)
+		(*in).DeepCopyInto(*out)
+	}
+	if in.MinDomains != nil {
+		in, out := &in.MinDomains, &out.MinDomains
+		*out = new(int32)
+		**out = **in
+	}
+	if in.NodeAffinityPolicy != nil {
+		in, out := &in.NodeAffinityPolicy, &out.NodeAffinityPolicy
+		*out = new(corev1.NodeInclusionPolicy)
+		**out = **in
+	}
+	if in.NodeTaintsPolicy != nil {
+		in, out := &in.NodeTaintsPolicy, &out.NodeTaintsPolicy
+		*out = new(corev1.NodeInclusionPolicy)
+		**out = **in
+	}
+	if in.MatchLabelKeys != nil {
+		in, out := &in.MatchLabelKeys, &out.MatchLabelKeys
+		*out = make([]string, len(*in))
+		copy(*out, *in)
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CoreV1TopologySpreadConstraint.
+func (in *CoreV1TopologySpreadConstraint) DeepCopy() *CoreV1TopologySpreadConstraint {
+	if in == nil {
+		return nil
+	}
+	out := new(CoreV1TopologySpreadConstraint)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *EmbeddedObjectMetadata) DeepCopyInto(out *EmbeddedObjectMetadata) {
 	*out = *in
@@ -3079,6 +3119,27 @@ func (in *ThanosSpec) DeepCopy() *ThanosSpec {
 	return out
 }
 
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *TopologySpreadConstraint) DeepCopyInto(out *TopologySpreadConstraint) {
+	*out = *in
+	in.CoreV1TopologySpreadConstraint.DeepCopyInto(&out.CoreV1TopologySpreadConstraint)
+	if in.AdditionalLabelSelectors != nil {
+		in, out := &in.AdditionalLabelSelectors, &out.AdditionalLabelSelectors
+		*out = new(AdditionalLabelSelectors)
+		**out = **in
+	}
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TopologySpreadConstraint.
+func (in *TopologySpreadConstraint) DeepCopy() *TopologySpreadConstraint {
+	if in == nil {
+		return nil
+	}
+	out := new(TopologySpreadConstraint)
+	in.DeepCopyInto(out)
+	return out
+}
+
 // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
 func (in *WebConfigFileFields) DeepCopyInto(out *WebConfigFileFields) {
 	*out = *in
diff --git a/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go b/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go
index ce40a281d..d4a2cf8b7 100644
--- a/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go
+++ b/pkg/client/applyconfiguration/monitoring/v1/commonprometheusfields.go
@@ -65,7 +65,7 @@ type CommonPrometheusFieldsApplyConfiguration struct {
 	ConfigMaps                           []string                                                `json:"configMaps,omitempty"`
 	Affinity                             *corev1.Affinity                                        `json:"affinity,omitempty"`
 	Tolerations                          []corev1.Toleration                                     `json:"tolerations,omitempty"`
-	TopologySpreadConstraints            []corev1.TopologySpreadConstraint                       `json:"topologySpreadConstraints,omitempty"`
+	TopologySpreadConstraints            []TopologySpreadConstraintApplyConfiguration            `json:"topologySpreadConstraints,omitempty"`
 	RemoteWrite                          []RemoteWriteSpecApplyConfiguration                     `json:"remoteWrite,omitempty"`
 	SecurityContext                      *corev1.PodSecurityContext                              `json:"securityContext,omitempty"`
 	ListenLocal                          *bool                                                   `json:"listenLocal,omitempty"`
@@ -452,9 +452,12 @@ func (b *CommonPrometheusFieldsApplyConfiguration) WithTolerations(values ...cor
 // WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration
 // and returns the receiver, so that objects can be build by chaining "With" function invocations.
 // If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field.
-func (b *CommonPrometheusFieldsApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *CommonPrometheusFieldsApplyConfiguration {
+func (b *CommonPrometheusFieldsApplyConfiguration) WithTopologySpreadConstraints(values ...*TopologySpreadConstraintApplyConfiguration) *CommonPrometheusFieldsApplyConfiguration {
 	for i := range values {
-		b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i])
+		if values[i] == nil {
+			panic("nil value passed to WithTopologySpreadConstraints")
+		}
+		b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i])
 	}
 	return b
 }
diff --git a/pkg/client/applyconfiguration/monitoring/v1/corev1topologyspreadconstraint.go b/pkg/client/applyconfiguration/monitoring/v1/corev1topologyspreadconstraint.go
new file mode 100644
index 000000000..22eef23f7
--- /dev/null
+++ b/pkg/client/applyconfiguration/monitoring/v1/corev1topologyspreadconstraint.go
@@ -0,0 +1,107 @@
+// Copyright The prometheus-operator Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// CoreV1TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the CoreV1TopologySpreadConstraint type for use
+// with apply.
+type CoreV1TopologySpreadConstraintApplyConfiguration struct {
+	MaxSkew            *int32                            `json:"maxSkew,omitempty"`
+	TopologyKey        *string                           `json:"topologyKey,omitempty"`
+	WhenUnsatisfiable  *v1.UnsatisfiableConstraintAction `json:"whenUnsatisfiable,omitempty"`
+	LabelSelector      *metav1.LabelSelector             `json:"labelSelector,omitempty"`
+	MinDomains         *int32                            `json:"minDomains,omitempty"`
+	NodeAffinityPolicy *v1.NodeInclusionPolicy           `json:"nodeAffinityPolicy,omitempty"`
+	NodeTaintsPolicy   *v1.NodeInclusionPolicy           `json:"nodeTaintsPolicy,omitempty"`
+	MatchLabelKeys     []string                          `json:"matchLabelKeys,omitempty"`
+}
+
+// CoreV1TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the CoreV1TopologySpreadConstraint type for use with
+// apply.
+func CoreV1TopologySpreadConstraint() *CoreV1TopologySpreadConstraintApplyConfiguration {
+	return &CoreV1TopologySpreadConstraintApplyConfiguration{}
+}
+
+// WithMaxSkew sets the MaxSkew field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MaxSkew field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMaxSkew(value int32) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.MaxSkew = &value
+	return b
+}
+
+// WithTopologyKey sets the TopologyKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TopologyKey field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value string) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.TopologyKey = &value
+	return b
+}
+
+// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value v1.UnsatisfiableConstraintAction) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.WhenUnsatisfiable = &value
+	return b
+}
+
+// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LabelSelector field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value metav1.LabelSelector) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.LabelSelector = &value
+	return b
+}
+
+// WithMinDomains sets the MinDomains field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinDomains field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.MinDomains = &value
+	return b
+}
+
+// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value v1.NodeInclusionPolicy) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.NodeAffinityPolicy = &value
+	return b
+}
+
+// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value v1.NodeInclusionPolicy) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	b.NodeTaintsPolicy = &value
+	return b
+}
+
+// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field.
+func (b *CoreV1TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *CoreV1TopologySpreadConstraintApplyConfiguration {
+	for i := range values {
+		b.MatchLabelKeys = append(b.MatchLabelKeys, values[i])
+	}
+	return b
+}
diff --git a/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go b/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go
index 63f761469..b3cd37b0f 100644
--- a/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go
+++ b/pkg/client/applyconfiguration/monitoring/v1/prometheusspec.go
@@ -398,9 +398,12 @@ func (b *PrometheusSpecApplyConfiguration) WithTolerations(values ...corev1.Tole
 // WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration
 // and returns the receiver, so that objects can be build by chaining "With" function invocations.
 // If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field.
-func (b *PrometheusSpecApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *PrometheusSpecApplyConfiguration {
+func (b *PrometheusSpecApplyConfiguration) WithTopologySpreadConstraints(values ...*TopologySpreadConstraintApplyConfiguration) *PrometheusSpecApplyConfiguration {
 	for i := range values {
-		b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i])
+		if values[i] == nil {
+			panic("nil value passed to WithTopologySpreadConstraints")
+		}
+		b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i])
 	}
 	return b
 }
diff --git a/pkg/client/applyconfiguration/monitoring/v1/topologyspreadconstraint.go b/pkg/client/applyconfiguration/monitoring/v1/topologyspreadconstraint.go
new file mode 100644
index 000000000..2ce462f46
--- /dev/null
+++ b/pkg/client/applyconfiguration/monitoring/v1/topologyspreadconstraint.go
@@ -0,0 +1,110 @@
+// Copyright The prometheus-operator Authors
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//     http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Code generated by applyconfiguration-gen. DO NOT EDIT.
+
+package v1
+
+import (
+	monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
+	corev1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// TopologySpreadConstraintApplyConfiguration represents an declarative configuration of the TopologySpreadConstraint type for use
+// with apply.
+type TopologySpreadConstraintApplyConfiguration struct {
+	CoreV1TopologySpreadConstraintApplyConfiguration `json:",inline"`
+	AdditionalLabelSelectors                         *monitoringv1.AdditionalLabelSelectors `json:"additionalLabelSelectors,omitempty"`
+}
+
+// TopologySpreadConstraintApplyConfiguration constructs an declarative configuration of the TopologySpreadConstraint type for use with
+// apply.
+func TopologySpreadConstraint() *TopologySpreadConstraintApplyConfiguration {
+	return &TopologySpreadConstraintApplyConfiguration{}
+}
+
+// WithMaxSkew sets the MaxSkew field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MaxSkew field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithMaxSkew(value int32) *TopologySpreadConstraintApplyConfiguration {
+	b.MaxSkew = &value
+	return b
+}
+
+// WithTopologyKey sets the TopologyKey field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the TopologyKey field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithTopologyKey(value string) *TopologySpreadConstraintApplyConfiguration {
+	b.TopologyKey = &value
+	return b
+}
+
+// WithWhenUnsatisfiable sets the WhenUnsatisfiable field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the WhenUnsatisfiable field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithWhenUnsatisfiable(value corev1.UnsatisfiableConstraintAction) *TopologySpreadConstraintApplyConfiguration {
+	b.WhenUnsatisfiable = &value
+	return b
+}
+
+// WithLabelSelector sets the LabelSelector field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the LabelSelector field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithLabelSelector(value metav1.LabelSelector) *TopologySpreadConstraintApplyConfiguration {
+	b.LabelSelector = &value
+	return b
+}
+
+// WithMinDomains sets the MinDomains field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the MinDomains field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithMinDomains(value int32) *TopologySpreadConstraintApplyConfiguration {
+	b.MinDomains = &value
+	return b
+}
+
+// WithNodeAffinityPolicy sets the NodeAffinityPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeAffinityPolicy field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithNodeAffinityPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
+	b.NodeAffinityPolicy = &value
+	return b
+}
+
+// WithNodeTaintsPolicy sets the NodeTaintsPolicy field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the NodeTaintsPolicy field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithNodeTaintsPolicy(value corev1.NodeInclusionPolicy) *TopologySpreadConstraintApplyConfiguration {
+	b.NodeTaintsPolicy = &value
+	return b
+}
+
+// WithMatchLabelKeys adds the given value to the MatchLabelKeys field in the declarative configuration
+// and returns the receiver, so that objects can be build by chaining "With" function invocations.
+// If called multiple times, values provided by each call will be appended to the MatchLabelKeys field.
+func (b *TopologySpreadConstraintApplyConfiguration) WithMatchLabelKeys(values ...string) *TopologySpreadConstraintApplyConfiguration {
+	for i := range values {
+		b.MatchLabelKeys = append(b.MatchLabelKeys, values[i])
+	}
+	return b
+}
+
+// WithAdditionalLabelSelectors sets the AdditionalLabelSelectors field in the declarative configuration to the given value
+// and returns the receiver, so that objects can be built by chaining "With" function invocations.
+// If called multiple times, the AdditionalLabelSelectors field is set to the value of the last call.
+func (b *TopologySpreadConstraintApplyConfiguration) WithAdditionalLabelSelectors(value monitoringv1.AdditionalLabelSelectors) *TopologySpreadConstraintApplyConfiguration {
+	b.AdditionalLabelSelectors = &value
+	return b
+}
diff --git a/pkg/client/applyconfiguration/monitoring/v1alpha1/prometheusagentspec.go b/pkg/client/applyconfiguration/monitoring/v1alpha1/prometheusagentspec.go
index f7121ed5c..a34544c1f 100644
--- a/pkg/client/applyconfiguration/monitoring/v1alpha1/prometheusagentspec.go
+++ b/pkg/client/applyconfiguration/monitoring/v1alpha1/prometheusagentspec.go
@@ -377,9 +377,12 @@ func (b *PrometheusAgentSpecApplyConfiguration) WithTolerations(values ...corev1
 // WithTopologySpreadConstraints adds the given value to the TopologySpreadConstraints field in the declarative configuration
 // and returns the receiver, so that objects can be build by chaining "With" function invocations.
 // If called multiple times, values provided by each call will be appended to the TopologySpreadConstraints field.
-func (b *PrometheusAgentSpecApplyConfiguration) WithTopologySpreadConstraints(values ...corev1.TopologySpreadConstraint) *PrometheusAgentSpecApplyConfiguration {
+func (b *PrometheusAgentSpecApplyConfiguration) WithTopologySpreadConstraints(values ...*v1.TopologySpreadConstraintApplyConfiguration) *PrometheusAgentSpecApplyConfiguration {
 	for i := range values {
-		b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, values[i])
+		if values[i] == nil {
+			panic("nil value passed to WithTopologySpreadConstraints")
+		}
+		b.TopologySpreadConstraints = append(b.TopologySpreadConstraints, *values[i])
 	}
 	return b
 }
diff --git a/pkg/client/applyconfiguration/utils.go b/pkg/client/applyconfiguration/utils.go
index 93cde02e9..a4d062b19 100644
--- a/pkg/client/applyconfiguration/utils.go
+++ b/pkg/client/applyconfiguration/utils.go
@@ -69,6 +69,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
 		return &monitoringv1.CommonPrometheusFieldsApplyConfiguration{}
 	case v1.SchemeGroupVersion.WithKind("Condition"):
 		return &monitoringv1.ConditionApplyConfiguration{}
+	case v1.SchemeGroupVersion.WithKind("CoreV1TopologySpreadConstraint"):
+		return &monitoringv1.CoreV1TopologySpreadConstraintApplyConfiguration{}
 	case v1.SchemeGroupVersion.WithKind("EmbeddedObjectMetadata"):
 		return &monitoringv1.EmbeddedObjectMetadataApplyConfiguration{}
 	case v1.SchemeGroupVersion.WithKind("EmbeddedPersistentVolumeClaim"):
@@ -177,6 +179,8 @@ func ForKind(kind schema.GroupVersionKind) interface{} {
 		return &monitoringv1.ThanosSpecApplyConfiguration{}
 	case v1.SchemeGroupVersion.WithKind("TLSConfig"):
 		return &monitoringv1.TLSConfigApplyConfiguration{}
+	case v1.SchemeGroupVersion.WithKind("TopologySpreadConstraint"):
+		return &monitoringv1.TopologySpreadConstraintApplyConfiguration{}
 	case v1.SchemeGroupVersion.WithKind("TSDBSpec"):
 		return &monitoringv1.TSDBSpecApplyConfiguration{}
 	case v1.SchemeGroupVersion.WithKind("WebConfigFileFields"):
diff --git a/pkg/prometheus/agent/statefulset.go b/pkg/prometheus/agent/statefulset.go
index 87795b361..4c936b440 100644
--- a/pkg/prometheus/agent/statefulset.go
+++ b/pkg/prometheus/agent/statefulset.go
@@ -391,7 +391,7 @@ func makeStatefulSetSpec(
 				Volumes:                       volumes,
 				Tolerations:                   cpf.Tolerations,
 				Affinity:                      cpf.Affinity,
-				TopologySpreadConstraints:     cpf.TopologySpreadConstraints,
+				TopologySpreadConstraints:     prompkg.MakeK8sTopologySpreadConstraint(finalSelectorLabels, cpf.TopologySpreadConstraints),
 				HostAliases:                   operator.MakeHostAliases(cpf.HostAliases),
 				HostNetwork:                   cpf.HostNetwork,
 			},
diff --git a/pkg/prometheus/agent/statefulset_test.go b/pkg/prometheus/agent/statefulset_test.go
index 232f2e7e2..ecec57c4c 100644
--- a/pkg/prometheus/agent/statefulset_test.go
+++ b/pkg/prometheus/agent/statefulset_test.go
@@ -21,10 +21,13 @@ import (
 
 	"github.com/go-kit/log"
 	"github.com/go-kit/log/level"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	appsv1 "k8s.io/api/apps/v1"
 	v1 "k8s.io/api/core/v1"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 	"k8s.io/apimachinery/pkg/util/intstr"
+	"k8s.io/utils/ptr"
 
 	monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
 	monitoringv1alpha1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1alpha1"
@@ -206,3 +209,152 @@ func makeStatefulSetFromPrometheus(p monitoringv1alpha1.PrometheusAgent) (*appsv
 		0,
 		nil)
 }
+
+func TestPodTopologySpreadConstraintWithAdditionalLabels(t *testing.T) {
+	for _, tc := range []struct {
+		name string
+		spec monitoringv1alpha1.PrometheusAgentSpec
+		tsc  v1.TopologySpreadConstraint
+	}{
+		{
+			name: "without labelSelector and additionalLabels",
+			spec: monitoringv1alpha1.PrometheusAgentSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+			},
+		},
+		{
+			name: "with labelSelector and without additionalLabels",
+			spec: monitoringv1alpha1.PrometheusAgentSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+								LabelSelector: &metav1.LabelSelector{
+									MatchLabels: map[string]string{
+										"app": "prometheus",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+				LabelSelector: &metav1.LabelSelector{
+					MatchLabels: map[string]string{
+						"app": "prometheus",
+					},
+				},
+			},
+		},
+		{
+			name: "with labelSelector and additionalLabels as ShardAndNameResource",
+			spec: monitoringv1alpha1.PrometheusAgentSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							AdditionalLabelSelectors: ptr.To(monitoringv1.ShardAndResourceNameLabelSelector),
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+								LabelSelector: &metav1.LabelSelector{
+									MatchLabels: map[string]string{
+										"app": "prometheus",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+				LabelSelector: &metav1.LabelSelector{
+					MatchLabels: map[string]string{
+						"app":                          "prometheus",
+						"app.kubernetes.io/instance":   "test",
+						"app.kubernetes.io/managed-by": "prometheus-operator",
+						"app.kubernetes.io/name":       "prometheus-agent",
+						"operator.prometheus.io/name":  "test",
+						"operator.prometheus.io/shard": "0",
+					},
+				},
+			},
+		},
+		{
+			name: "with labelSelector and additionalLabels as ResourceName",
+			spec: monitoringv1alpha1.PrometheusAgentSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							AdditionalLabelSelectors: ptr.To(monitoringv1.ResourceNameLabelSelector),
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+								LabelSelector: &metav1.LabelSelector{
+									MatchLabels: map[string]string{
+										"app": "prometheus",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+				LabelSelector: &metav1.LabelSelector{
+					MatchLabels: map[string]string{
+						"app":                          "prometheus",
+						"app.kubernetes.io/instance":   "test",
+						"app.kubernetes.io/managed-by": "prometheus-operator",
+						"app.kubernetes.io/name":       "prometheus-agent",
+						"operator.prometheus.io/name":  "test",
+					},
+				},
+			},
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			sts, err := makeStatefulSetFromPrometheus(monitoringv1alpha1.PrometheusAgent{
+				ObjectMeta: metav1.ObjectMeta{
+					Name:      "test",
+					Namespace: "ns-test",
+				},
+				Spec: tc.spec,
+			})
+
+			require.NoError(t, err)
+
+			assert.Greater(t, len(sts.Spec.Template.Spec.TopologySpreadConstraints), 0)
+			assert.Equal(t, tc.tsc, sts.Spec.Template.Spec.TopologySpreadConstraints[0])
+		})
+	}
+}
diff --git a/pkg/prometheus/server/statefulset.go b/pkg/prometheus/server/statefulset.go
index f1a4230a3..f1cba221c 100644
--- a/pkg/prometheus/server/statefulset.go
+++ b/pkg/prometheus/server/statefulset.go
@@ -494,7 +494,7 @@ func makeStatefulSetSpec(
 				Volumes:                       volumes,
 				Tolerations:                   cpf.Tolerations,
 				Affinity:                      cpf.Affinity,
-				TopologySpreadConstraints:     cpf.TopologySpreadConstraints,
+				TopologySpreadConstraints:     prompkg.MakeK8sTopologySpreadConstraint(finalSelectorLabels, cpf.TopologySpreadConstraints),
 				HostAliases:                   operator.MakeHostAliases(cpf.HostAliases),
 				HostNetwork:                   cpf.HostNetwork,
 			},
diff --git a/pkg/prometheus/server/statefulset_test.go b/pkg/prometheus/server/statefulset_test.go
index b4297cbb2..ecde60bac 100644
--- a/pkg/prometheus/server/statefulset_test.go
+++ b/pkg/prometheus/server/statefulset_test.go
@@ -25,6 +25,7 @@ import (
 	"github.com/go-kit/log"
 	"github.com/go-kit/log/level"
 	"github.com/kylelemons/godebug/pretty"
+	"github.com/stretchr/testify/assert"
 	"github.com/stretchr/testify/require"
 	appsv1 "k8s.io/api/apps/v1"
 	v1 "k8s.io/api/core/v1"
@@ -2906,3 +2907,154 @@ func TestPersistentVolumeClaimRetentionPolicy(t *testing.T) {
 		t.Fatalf("expected persistentVolumeClaimDeletePolicy.WhenScaled to be %s but got %s", appsv1.DeletePersistentVolumeClaimRetentionPolicyType, sset.Spec.PersistentVolumeClaimRetentionPolicy.WhenScaled)
 	}
 }
+
+func TestPodTopologySpreadConstraintWithAdditionalLabels(t *testing.T) {
+	for _, tc := range []struct {
+		name string
+		spec monitoringv1.PrometheusSpec
+		tsc  v1.TopologySpreadConstraint
+	}{
+		{
+			name: "without labelSelector and additionalLabels",
+			spec: monitoringv1.PrometheusSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+			},
+		},
+		{
+			name: "with labelSelector and without additionalLabels",
+			spec: monitoringv1.PrometheusSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+								LabelSelector: &metav1.LabelSelector{
+									MatchLabels: map[string]string{
+										"app": "prometheus",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+				LabelSelector: &metav1.LabelSelector{
+					MatchLabels: map[string]string{
+						"app": "prometheus",
+					},
+				},
+			},
+		},
+		{
+			name: "with labelSelector and additionalLabels as ShardAndNameResource",
+			spec: monitoringv1.PrometheusSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							AdditionalLabelSelectors: ptr.To(monitoringv1.ShardAndResourceNameLabelSelector),
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+								LabelSelector: &metav1.LabelSelector{
+									MatchLabels: map[string]string{
+										"app": "prometheus",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+				LabelSelector: &metav1.LabelSelector{
+					MatchLabels: map[string]string{
+						"app":                           "prometheus",
+						"app.kubernetes.io/instance":    "test",
+						"app.kubernetes.io/managed-by":  "prometheus-operator",
+						"prometheus":                    "test",
+						prompkg.ShardLabelName:          "0",
+						prompkg.PrometheusNameLabelName: "test",
+						prompkg.PrometheusK8sLabelName:  "prometheus",
+					},
+				},
+			},
+		},
+		{
+			name: "with labelSelector and additionalLabels as ResourceName",
+			spec: monitoringv1.PrometheusSpec{
+				CommonPrometheusFields: monitoringv1.CommonPrometheusFields{
+					TopologySpreadConstraints: []monitoringv1.TopologySpreadConstraint{
+						{
+							AdditionalLabelSelectors: ptr.To(monitoringv1.ResourceNameLabelSelector),
+							CoreV1TopologySpreadConstraint: monitoringv1.CoreV1TopologySpreadConstraint{
+								MaxSkew:           1,
+								TopologyKey:       "kubernetes.io/hostname",
+								WhenUnsatisfiable: v1.DoNotSchedule,
+								LabelSelector: &metav1.LabelSelector{
+									MatchLabels: map[string]string{
+										"app": "prometheus",
+									},
+								},
+							},
+						},
+					},
+				},
+			},
+			tsc: v1.TopologySpreadConstraint{
+				MaxSkew:           1,
+				TopologyKey:       "kubernetes.io/hostname",
+				WhenUnsatisfiable: v1.DoNotSchedule,
+				LabelSelector: &metav1.LabelSelector{
+					MatchLabels: map[string]string{
+						"app":                           "prometheus",
+						"app.kubernetes.io/instance":    "test",
+						"app.kubernetes.io/managed-by":  "prometheus-operator",
+						"prometheus":                    "test",
+						prompkg.PrometheusNameLabelName: "test",
+						prompkg.PrometheusK8sLabelName:  "prometheus",
+					},
+				},
+			},
+		},
+	} {
+		t.Run(tc.name, func(t *testing.T) {
+			sts, err := makeStatefulSetFromPrometheus(monitoringv1.Prometheus{
+				ObjectMeta: metav1.ObjectMeta{
+					Name:      "test",
+					Namespace: "ns-test",
+				},
+				Spec: tc.spec,
+			})
+
+			require.NoError(t, err)
+
+			assert.Greater(t, len(sts.Spec.Template.Spec.TopologySpreadConstraints), 0)
+			assert.Equal(t, tc.tsc, sts.Spec.Template.Spec.TopologySpreadConstraints[0])
+		})
+	}
+}
diff --git a/pkg/prometheus/statefulset.go b/pkg/prometheus/statefulset.go
index 29d21b80b..9e92fde2c 100644
--- a/pkg/prometheus/statefulset.go
+++ b/pkg/prometheus/statefulset.go
@@ -60,6 +60,7 @@ var (
 	ShardLabelName                = "operator.prometheus.io/shard"
 	PrometheusNameLabelName       = "operator.prometheus.io/name"
 	PrometheusModeLabeLName       = "operator.prometheus.io/mode"
+	PrometheusK8sLabelName        = "app.kubernetes.io/name"
 	ProbeTimeoutSeconds     int32 = 3
 	LabelPrometheusName           = "prometheus-name"
 )
@@ -496,3 +497,32 @@ func ShareProcessNamespace(p monitoringv1.PrometheusInterface) *bool {
 		) == monitoringv1.ProcessSignalReloadStrategyType,
 	)
 }
+
+func MakeK8sTopologySpreadConstraint(selectorLabels map[string]string, tscs []monitoringv1.TopologySpreadConstraint) []v1.TopologySpreadConstraint {
+
+	coreTscs := make([]v1.TopologySpreadConstraint, 0, len(tscs))
+
+	for _, tsc := range tscs {
+		if tsc.AdditionalLabelSelectors == nil {
+			coreTscs = append(coreTscs, v1.TopologySpreadConstraint(tsc.CoreV1TopologySpreadConstraint))
+			continue
+		}
+
+		if tsc.LabelSelector == nil {
+			tsc.LabelSelector = &metav1.LabelSelector{
+				MatchLabels: make(map[string]string),
+			}
+		}
+
+		for key, value := range selectorLabels {
+			if *tsc.AdditionalLabelSelectors == monitoringv1.ResourceNameLabelSelector && key == ShardLabelName {
+				continue
+			}
+			tsc.LabelSelector.MatchLabels[key] = value
+		}
+
+		coreTscs = append(coreTscs, v1.TopologySpreadConstraint(tsc.CoreV1TopologySpreadConstraint))
+	}
+
+	return coreTscs
+}