2016-11-01 16:59:36 +01:00
|
|
|
// Copyright 2016 The prometheus-operator Authors
|
|
|
|
//
|
|
|
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
// you may not use this file except in compliance with the License.
|
|
|
|
// You may obtain a copy of the License at
|
|
|
|
//
|
|
|
|
// http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
//
|
|
|
|
// Unless required by applicable law or agreed to in writing, software
|
|
|
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
// See the License for the specific language governing permissions and
|
|
|
|
// limitations under the License.
|
|
|
|
|
2016-11-04 15:02:40 +01:00
|
|
|
package prometheus
|
2016-09-22 15:25:57 +02:00
|
|
|
|
|
|
|
import (
|
2016-10-26 20:39:15 +02:00
|
|
|
"fmt"
|
2017-06-22 14:58:01 +02:00
|
|
|
"net/url"
|
|
|
|
"path"
|
|
|
|
"strings"
|
|
|
|
|
2019-05-09 11:50:09 +02:00
|
|
|
appsv1 "k8s.io/api/apps/v1"
|
2019-03-04 15:42:28 +01:00
|
|
|
v1 "k8s.io/api/core/v1"
|
2017-05-11 14:05:39 +02:00
|
|
|
"k8s.io/apimachinery/pkg/api/resource"
|
|
|
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
|
|
|
"k8s.io/apimachinery/pkg/util/intstr"
|
2016-12-19 16:23:40 +01:00
|
|
|
|
2017-04-18 15:08:11 +02:00
|
|
|
"github.com/blang/semver"
|
|
|
|
"github.com/pkg/errors"
|
2020-08-05 13:13:46 +02:00
|
|
|
monitoringv1 "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1"
|
|
|
|
"github.com/prometheus-operator/prometheus-operator/pkg/k8sutil"
|
|
|
|
"github.com/prometheus-operator/prometheus-operator/pkg/operator"
|
2016-09-22 15:25:57 +02:00
|
|
|
)
|
|
|
|
|
2017-01-18 19:07:22 +01:00
|
|
|
const (
|
2019-02-15 16:23:13 +00:00
|
|
|
governingServiceName = "prometheus-operated"
|
|
|
|
defaultRetention = "24h"
|
|
|
|
defaultReplicaExternalLabelName = "prometheus_replica"
|
|
|
|
storageDir = "/prometheus"
|
|
|
|
confDir = "/etc/prometheus/config"
|
|
|
|
confOutDir = "/etc/prometheus/config_out"
|
pkg/prometheus: Enable users to configure bearer token from secret
To configure a bearer token users could only specify a file path in the
service monitor, pointing to a bearer token file in the Prometheus
container. This enables hostile users, being able to configure a service
monitor and controlling the scrape target, to retrieve arbitrary files
in the Prometheus container.
In cases where users can not be trusted, this patch adds an option to
disallow the above file path specification and replaces it by a secret
reference. This secret has to be in the same namespace as the service
monitor, shrinking the attack vector.
pkg/prometheus: Add option to deny file system access through service monitors
ArbitraryFSAccessThroughSMsConfig enables users to configure, whether
a service monitor selected by the Prometheus instance is allowed to use
arbitrary files on the file system of the Prometheus container. This is
the case when e.g. a service monitor specifies a BearerTokenFile in an
endpoint. A malicious user could create a service monitor
selecting arbitrary secret files in the Prometheus container. Those
secrets would then be send with a scrape request by Prometheus to a
malicious target. Denying the above would prevent the attack, users can
instead use the BearerTokenSecret field.
test/basic-auth-test-app: Add mTLS endpoint
pkg/prometheus: Enable users to configure tls from secret
pkg/prometheus/operator: Validate TLS configs before retrieving assets
Before retrieving TLS assets from Kubernetes secrets for a given service
monitor, make sure the user did not specify both file and secret
reference, e.g. both `CAFile` and `CASecret`.
test: Rename basic-auth-test-app to instrumented-sample-app
Given that the basic-auth-test-app not only supports basic auth, but
also bearer token as well as tls authentication, this patch renames the
app to a more generic name.
test/e2e/prometheus_test: Test ArbitraryFSAccessThroughSM option for tls
The Prometheus custom resource has the option to disable arbitrary
filesystem access configured through service monitors. This commit adds
an end-to-end test for this option in combination with the TLS
configuration via files or secret references in service monitors.
pkg/prometheus/operator: Move check for arbitrary fs access into func
2019-03-18 15:56:38 +01:00
|
|
|
tlsAssetsDir = "/etc/prometheus/certs"
|
2019-02-15 16:23:13 +00:00
|
|
|
rulesDir = "/etc/prometheus/rules"
|
|
|
|
secretsDir = "/etc/prometheus/secrets/"
|
|
|
|
configmapsDir = "/etc/prometheus/configmaps/"
|
|
|
|
configFilename = "prometheus.yaml.gz"
|
|
|
|
configEnvsubstFilename = "prometheus.env.yaml"
|
|
|
|
sSetInputHashName = "prometheus-operator-input-hash"
|
2019-07-13 09:48:05 +01:00
|
|
|
defaultPortName = "web"
|
2017-01-18 19:07:22 +01:00
|
|
|
)
|
|
|
|
|
2017-02-24 13:17:07 +01:00
|
|
|
var (
|
2017-05-04 10:36:02 +02:00
|
|
|
minReplicas int32 = 1
|
2018-12-04 19:31:43 +08:00
|
|
|
defaultMaxConcurrency int32 = 20
|
2017-05-04 10:36:02 +02:00
|
|
|
managedByOperatorLabel = "managed-by"
|
|
|
|
managedByOperatorLabelValue = "prometheus-operator"
|
|
|
|
managedByOperatorLabels = map[string]string{
|
|
|
|
managedByOperatorLabel: managedByOperatorLabelValue,
|
2017-03-09 20:08:38 +01:00
|
|
|
}
|
2017-04-27 18:20:21 +03:00
|
|
|
probeTimeoutSeconds int32 = 3
|
2017-02-24 13:17:07 +01:00
|
|
|
)
|
|
|
|
|
2018-05-08 09:43:45 +02:00
|
|
|
func makeStatefulSet(
|
|
|
|
p monitoringv1.Prometheus,
|
|
|
|
config *Config,
|
2018-07-02 20:23:59 +02:00
|
|
|
ruleConfigMapNames []string,
|
2018-08-02 15:09:55 +02:00
|
|
|
inputHash string,
|
2018-05-08 09:43:45 +02:00
|
|
|
) (*appsv1.StatefulSet, error) {
|
pkg/*/statefulset.go: Do not mutate shared object
Users have reported high CPU usage of the Prometheus Operator when
adding an annotation to a Prometheus object. The Operator would update
the respective StatefulSet in an infinite loop.
Whether a given StatefulSet needs updating is determined by the hash of
the inputs needed to generate the StatefulSet, which is calculated and
then attached to the StatefulSet as an annotation. On subsequent
reconciliations this hash is compared to the hash of the new inputs.
The function to build the StatefulSet definition is passed the
Prometheus object. This is done by value, not by reference. This does
not enforce a deep copy but merely a shallow copy. In the build function
the new StatefulSet would inherit the annotation map of the Prometheus
object. Next the input hash would be added to this map, resulting in
both the Statefulset having the hash annotation, as intended, as well as
the Prometheus object (same map, shared as a reference).
On subsequent reconciliations the same Prometheus object is used to
calculate the input hash, this time accidentally containing the has
annotation from the previous run. Even though the actual inputs never
changed, this results in a new hash, thereby updating the StatefulSet,
...
The solution is to deep copy the Prometheus object before using it in
the StatefulSet build function, thereby never mutating the annotations
of the Prometheus object. Same measure is taken for the Alertmanager
StatefulSet build function.
2018-08-13 13:42:41 +02:00
|
|
|
// p is passed in by value, not by reference. But p contains references like
|
|
|
|
// to annotation map, that do not get copied on function invocation. Ensure to
|
|
|
|
// prevent side effects before editing p by creating a deep copy. For more
|
2020-08-05 13:13:46 +02:00
|
|
|
// details see https://github.com/prometheus-operator/prometheus-operator/issues/1659.
|
pkg/*/statefulset.go: Do not mutate shared object
Users have reported high CPU usage of the Prometheus Operator when
adding an annotation to a Prometheus object. The Operator would update
the respective StatefulSet in an infinite loop.
Whether a given StatefulSet needs updating is determined by the hash of
the inputs needed to generate the StatefulSet, which is calculated and
then attached to the StatefulSet as an annotation. On subsequent
reconciliations this hash is compared to the hash of the new inputs.
The function to build the StatefulSet definition is passed the
Prometheus object. This is done by value, not by reference. This does
not enforce a deep copy but merely a shallow copy. In the build function
the new StatefulSet would inherit the annotation map of the Prometheus
object. Next the input hash would be added to this map, resulting in
both the Statefulset having the hash annotation, as intended, as well as
the Prometheus object (same map, shared as a reference).
On subsequent reconciliations the same Prometheus object is used to
calculate the input hash, this time accidentally containing the has
annotation from the previous run. Even though the actual inputs never
changed, this results in a new hash, thereby updating the StatefulSet,
...
The solution is to deep copy the Prometheus object before using it in
the StatefulSet build function, thereby never mutating the annotations
of the Prometheus object. Same measure is taken for the Alertmanager
StatefulSet build function.
2018-08-13 13:42:41 +02:00
|
|
|
p = *p.DeepCopy()
|
|
|
|
|
2020-02-24 17:01:14 -06:00
|
|
|
promVersion := operator.StringValOrDefault(p.Spec.Version, operator.DefaultPrometheusVersion)
|
|
|
|
parsedVersion, err := semver.ParseTolerant(promVersion)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to parse prometheus version")
|
2016-10-26 20:39:15 +02:00
|
|
|
}
|
2018-04-27 15:34:50 +01:00
|
|
|
|
2019-07-13 09:48:05 +01:00
|
|
|
if p.Spec.PortName == "" {
|
|
|
|
p.Spec.PortName = defaultPortName
|
|
|
|
}
|
|
|
|
|
2017-09-15 15:28:16 +02:00
|
|
|
if p.Spec.Replicas == nil {
|
|
|
|
p.Spec.Replicas = &minReplicas
|
|
|
|
}
|
2017-10-18 14:51:59 +02:00
|
|
|
intZero := int32(0)
|
|
|
|
if p.Spec.Replicas != nil && *p.Spec.Replicas < 0 {
|
|
|
|
p.Spec.Replicas = &intZero
|
2016-10-26 20:39:15 +02:00
|
|
|
}
|
2016-11-14 14:32:40 +01:00
|
|
|
if p.Spec.Retention == "" {
|
2017-01-18 19:07:22 +01:00
|
|
|
p.Spec.Retention = defaultRetention
|
2016-11-02 11:50:36 -07:00
|
|
|
}
|
2016-11-21 14:48:17 +01:00
|
|
|
|
|
|
|
if p.Spec.Resources.Requests == nil {
|
|
|
|
p.Spec.Resources.Requests = v1.ResourceList{}
|
|
|
|
}
|
2018-01-11 10:57:42 +01:00
|
|
|
_, memoryRequestFound := p.Spec.Resources.Requests[v1.ResourceMemory]
|
|
|
|
memoryLimit, memoryLimitFound := p.Spec.Resources.Limits[v1.ResourceMemory]
|
2020-02-24 17:01:14 -06:00
|
|
|
if !memoryRequestFound && parsedVersion.Major == 1 {
|
2018-01-11 10:57:42 +01:00
|
|
|
defaultMemoryRequest := resource.MustParse("2Gi")
|
|
|
|
compareResult := memoryLimit.Cmp(defaultMemoryRequest)
|
|
|
|
// If limit is given and smaller or equal to 2Gi, then set memory
|
|
|
|
// request to the given limit. This is necessary as if limit < request,
|
|
|
|
// then a Pod is not schedulable.
|
|
|
|
if memoryLimitFound && compareResult <= 0 {
|
|
|
|
p.Spec.Resources.Requests[v1.ResourceMemory] = memoryLimit
|
|
|
|
} else {
|
|
|
|
p.Spec.Resources.Requests[v1.ResourceMemory] = defaultMemoryRequest
|
|
|
|
}
|
2016-11-14 16:17:28 +01:00
|
|
|
}
|
2016-10-26 20:39:15 +02:00
|
|
|
|
2020-02-24 17:01:14 -06:00
|
|
|
spec, err := makeStatefulSetSpec(p, config, ruleConfigMapNames, parsedVersion)
|
2017-04-18 15:08:11 +02:00
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "make StatefulSet spec")
|
|
|
|
}
|
|
|
|
|
2017-10-16 10:59:41 +02:00
|
|
|
boolTrue := true
|
2020-01-08 18:02:32 +01:00
|
|
|
// do not transfer kubectl annotations to the statefulset so it is not
|
|
|
|
// pruned by kubectl
|
|
|
|
annotations := make(map[string]string)
|
|
|
|
for key, value := range p.ObjectMeta.Annotations {
|
|
|
|
if !strings.HasPrefix(key, "kubectl.kubernetes.io/") {
|
|
|
|
annotations[key] = value
|
|
|
|
}
|
|
|
|
}
|
2018-03-09 15:12:59 +01:00
|
|
|
statefulset := &appsv1.StatefulSet{
|
2017-05-11 14:05:39 +02:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2017-01-18 19:07:22 +01:00
|
|
|
Name: prefixedName(p.Name),
|
2017-10-12 12:34:51 +02:00
|
|
|
Labels: config.Labels.Merge(p.ObjectMeta.Labels),
|
2020-01-08 18:02:32 +01:00
|
|
|
Annotations: annotations,
|
2017-10-16 10:59:41 +02:00
|
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
|
|
{
|
|
|
|
APIVersion: p.APIVersion,
|
|
|
|
BlockOwnerDeletion: &boolTrue,
|
|
|
|
Controller: &boolTrue,
|
|
|
|
Kind: p.Kind,
|
|
|
|
Name: p.Name,
|
|
|
|
UID: p.UID,
|
|
|
|
},
|
|
|
|
},
|
2016-09-22 15:25:57 +02:00
|
|
|
},
|
2017-04-18 15:08:11 +02:00
|
|
|
Spec: *spec,
|
2016-10-27 19:08:25 +02:00
|
|
|
}
|
2017-04-27 16:42:44 +03:00
|
|
|
|
2018-05-08 09:43:45 +02:00
|
|
|
if statefulset.ObjectMeta.Annotations == nil {
|
|
|
|
statefulset.ObjectMeta.Annotations = map[string]string{
|
2018-08-02 15:09:55 +02:00
|
|
|
sSetInputHashName: inputHash,
|
2018-05-08 09:43:45 +02:00
|
|
|
}
|
|
|
|
} else {
|
2018-08-02 15:09:55 +02:00
|
|
|
statefulset.ObjectMeta.Annotations[sSetInputHashName] = inputHash
|
2018-05-08 09:43:45 +02:00
|
|
|
}
|
|
|
|
|
2017-04-27 16:42:44 +03:00
|
|
|
if p.Spec.ImagePullSecrets != nil && len(p.Spec.ImagePullSecrets) > 0 {
|
2017-04-27 18:20:21 +03:00
|
|
|
statefulset.Spec.Template.Spec.ImagePullSecrets = p.Spec.ImagePullSecrets
|
2017-04-27 16:42:44 +03:00
|
|
|
}
|
2017-06-07 17:02:30 -07:00
|
|
|
storageSpec := p.Spec.Storage
|
|
|
|
if storageSpec == nil {
|
2016-12-19 16:23:40 +01:00
|
|
|
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, v1.Volume{
|
2017-01-18 19:07:22 +01:00
|
|
|
Name: volumeName(p.Name),
|
2016-10-28 17:42:35 +02:00
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
EmptyDir: &v1.EmptyDirVolumeSource{},
|
|
|
|
},
|
|
|
|
})
|
2018-01-05 15:23:16 -05:00
|
|
|
} else if storageSpec.EmptyDir != nil {
|
|
|
|
emptyDir := storageSpec.EmptyDir
|
|
|
|
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, v1.Volume{
|
|
|
|
Name: volumeName(p.Name),
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
EmptyDir: emptyDir,
|
|
|
|
},
|
|
|
|
})
|
2016-10-28 17:42:35 +02:00
|
|
|
} else {
|
2020-04-02 15:21:48 -05:00
|
|
|
pvcTemplate := operator.MakeVolumeClaimTemplate(storageSpec.VolumeClaimTemplate)
|
2018-06-27 13:30:47 -05:00
|
|
|
if pvcTemplate.Name == "" {
|
|
|
|
pvcTemplate.Name = volumeName(p.Name)
|
|
|
|
}
|
2019-12-13 10:33:04 +01:00
|
|
|
if storageSpec.VolumeClaimTemplate.Spec.AccessModes == nil {
|
|
|
|
pvcTemplate.Spec.AccessModes = []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}
|
|
|
|
} else {
|
|
|
|
pvcTemplate.Spec.AccessModes = storageSpec.VolumeClaimTemplate.Spec.AccessModes
|
|
|
|
}
|
2017-06-27 13:10:33 -07:00
|
|
|
pvcTemplate.Spec.Resources = storageSpec.VolumeClaimTemplate.Spec.Resources
|
|
|
|
pvcTemplate.Spec.Selector = storageSpec.VolumeClaimTemplate.Spec.Selector
|
2020-04-02 15:21:48 -05:00
|
|
|
statefulset.Spec.VolumeClaimTemplates = append(statefulset.Spec.VolumeClaimTemplates, *pvcTemplate)
|
2016-10-28 17:42:35 +02:00
|
|
|
}
|
|
|
|
|
2019-08-26 17:42:41 -07:00
|
|
|
for _, volume := range p.Spec.Volumes {
|
|
|
|
statefulset.Spec.Template.Spec.Volumes = append(statefulset.Spec.Template.Spec.Volumes, volume)
|
|
|
|
}
|
|
|
|
|
2017-04-18 15:08:11 +02:00
|
|
|
return statefulset, nil
|
2016-10-27 19:08:25 +02:00
|
|
|
}
|
|
|
|
|
2018-05-08 09:43:45 +02:00
|
|
|
func makeEmptyConfigurationSecret(p *monitoringv1.Prometheus, config Config) (*v1.Secret, error) {
|
|
|
|
s := makeConfigSecret(p, config)
|
2017-03-15 15:39:06 +01:00
|
|
|
|
|
|
|
s.ObjectMeta.Annotations = map[string]string{
|
|
|
|
"empty": "true",
|
|
|
|
}
|
|
|
|
|
|
|
|
return s, nil
|
|
|
|
}
|
|
|
|
|
2018-05-08 09:43:45 +02:00
|
|
|
func makeConfigSecret(p *monitoringv1.Prometheus, config Config) *v1.Secret {
|
2017-10-16 10:59:41 +02:00
|
|
|
boolTrue := true
|
2017-03-09 10:04:28 +01:00
|
|
|
return &v1.Secret{
|
2017-05-11 14:05:39 +02:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2017-10-16 10:59:41 +02:00
|
|
|
Name: configSecretName(p.Name),
|
2017-10-12 12:34:51 +02:00
|
|
|
Labels: config.Labels.Merge(managedByOperatorLabels),
|
2017-10-16 10:59:41 +02:00
|
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
|
|
{
|
|
|
|
APIVersion: p.APIVersion,
|
|
|
|
BlockOwnerDeletion: &boolTrue,
|
|
|
|
Controller: &boolTrue,
|
|
|
|
Kind: p.Kind,
|
|
|
|
Name: p.Name,
|
|
|
|
UID: p.UID,
|
|
|
|
},
|
|
|
|
},
|
2016-10-30 10:14:32 +01:00
|
|
|
},
|
2017-03-09 10:04:28 +01:00
|
|
|
Data: map[string][]byte{
|
2018-07-20 17:53:51 +02:00
|
|
|
configFilename: {},
|
2016-10-30 10:14:32 +01:00
|
|
|
},
|
2018-05-08 09:43:45 +02:00
|
|
|
}
|
2016-10-30 10:14:32 +01:00
|
|
|
}
|
|
|
|
|
2017-10-12 12:34:51 +02:00
|
|
|
func makeStatefulSetService(p *monitoringv1.Prometheus, config Config) *v1.Service {
|
2019-07-13 09:48:05 +01:00
|
|
|
p = p.DeepCopy()
|
|
|
|
|
|
|
|
if p.Spec.PortName == "" {
|
|
|
|
p.Spec.PortName = defaultPortName
|
|
|
|
}
|
|
|
|
|
2016-10-28 16:08:11 +02:00
|
|
|
svc := &v1.Service{
|
2017-05-11 14:05:39 +02:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2017-01-18 19:07:22 +01:00
|
|
|
Name: governingServiceName,
|
2018-09-27 11:59:18 -04:00
|
|
|
OwnerReferences: []metav1.OwnerReference{
|
|
|
|
metav1.OwnerReference{
|
|
|
|
Name: p.GetName(),
|
|
|
|
Kind: p.Kind,
|
|
|
|
APIVersion: p.APIVersion,
|
|
|
|
UID: p.GetUID(),
|
|
|
|
},
|
|
|
|
},
|
2017-10-12 12:34:51 +02:00
|
|
|
Labels: config.Labels.Merge(map[string]string{
|
2017-02-22 14:54:30 +01:00
|
|
|
"operated-prometheus": "true",
|
2017-10-12 12:34:51 +02:00
|
|
|
}),
|
2016-10-28 16:08:11 +02:00
|
|
|
},
|
|
|
|
Spec: v1.ServiceSpec{
|
|
|
|
ClusterIP: "None",
|
|
|
|
Ports: []v1.ServicePort{
|
|
|
|
{
|
2019-07-13 09:48:05 +01:00
|
|
|
Name: p.Spec.PortName,
|
2016-10-28 16:08:11 +02:00
|
|
|
Port: 9090,
|
2019-07-13 09:48:05 +01:00
|
|
|
TargetPort: intstr.FromString(p.Spec.PortName),
|
2016-10-28 16:08:11 +02:00
|
|
|
},
|
|
|
|
},
|
2016-10-30 10:14:32 +01:00
|
|
|
Selector: map[string]string{
|
2016-11-01 16:50:53 -07:00
|
|
|
"app": "prometheus",
|
2016-10-30 10:14:32 +01:00
|
|
|
},
|
2016-10-28 16:08:11 +02:00
|
|
|
},
|
|
|
|
}
|
2019-09-10 16:33:01 +02:00
|
|
|
|
|
|
|
if p.Spec.Thanos != nil {
|
|
|
|
svc.Spec.Ports = append(svc.Spec.Ports, v1.ServicePort{
|
|
|
|
Name: "grpc",
|
|
|
|
Port: 10901,
|
|
|
|
TargetPort: intstr.FromString("grpc"),
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-10-28 16:08:11 +02:00
|
|
|
return svc
|
|
|
|
}
|
|
|
|
|
2020-02-24 17:01:14 -06:00
|
|
|
func makeStatefulSetSpec(p monitoringv1.Prometheus, c *Config, ruleConfigMapNames []string,
|
|
|
|
version semver.Version) (*appsv1.StatefulSetSpec, error) {
|
2016-10-28 14:07:14 +02:00
|
|
|
// Prometheus may take quite long to shut down to checkpoint existing data.
|
|
|
|
// Allow up to 10 minutes for clean termination.
|
|
|
|
terminationGracePeriod := int64(600)
|
|
|
|
|
2020-02-24 17:01:14 -06:00
|
|
|
baseImage := operator.StringValOrDefault(p.Spec.BaseImage, operator.DefaultPrometheusBaseImage)
|
|
|
|
if p.Spec.Image != nil && strings.TrimSpace(*p.Spec.Image) != "" {
|
2020-07-24 09:16:35 +02:00
|
|
|
baseImage = *p.Spec.Image
|
2017-04-18 15:08:11 +02:00
|
|
|
}
|
2020-08-06 18:08:38 +02:00
|
|
|
prometheusImagePath, err := operator.BuildImagePath(baseImage, p.Spec.Version, p.Spec.Tag, p.Spec.SHA)
|
|
|
|
if err != nil {
|
|
|
|
return nil, err
|
|
|
|
}
|
2018-06-11 11:19:52 +02:00
|
|
|
promArgs := []string{
|
|
|
|
"-web.console.templates=/etc/prometheus/consoles",
|
2018-07-20 09:58:59 -04:00
|
|
|
"-web.console.libraries=/etc/prometheus/console_libraries",
|
2018-06-11 11:19:52 +02:00
|
|
|
}
|
|
|
|
|
2017-04-18 15:08:11 +02:00
|
|
|
switch version.Major {
|
|
|
|
case 1:
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
"-storage.local.retention="+p.Spec.Retention,
|
|
|
|
"-storage.local.num-fingerprint-mutexes=4096",
|
2018-03-22 15:59:22 +01:00
|
|
|
fmt.Sprintf("-storage.local.path=%s", storageDir),
|
2017-04-18 15:08:11 +02:00
|
|
|
"-storage.local.chunk-encoding-version=2",
|
2018-03-22 15:59:22 +01:00
|
|
|
fmt.Sprintf("-config.file=%s", path.Join(confOutDir, configEnvsubstFilename)),
|
|
|
|
)
|
2017-04-18 15:08:11 +02:00
|
|
|
// We attempt to specify decent storage tuning flags based on how much the
|
|
|
|
// requested memory can fit. The user has to specify an appropriate buffering
|
|
|
|
// in memory limits to catch increased memory usage during query bursts.
|
|
|
|
// More info: https://prometheus.io/docs/operating/storage/.
|
|
|
|
reqMem := p.Spec.Resources.Requests[v1.ResourceMemory]
|
|
|
|
|
|
|
|
if version.Minor < 6 {
|
|
|
|
// 1024 byte is the fixed chunk size. With increasing number of chunks actually
|
|
|
|
// in memory, overhead owed to their management, higher ingestion buffers, etc.
|
|
|
|
// increases.
|
|
|
|
// We are conservative for now an assume this to be 80% as the Kubernetes environment
|
|
|
|
// generally has a very high time series churn.
|
|
|
|
memChunks := reqMem.Value() / 1024 / 5
|
|
|
|
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
"-storage.local.memory-chunks="+fmt.Sprintf("%d", memChunks),
|
|
|
|
"-storage.local.max-chunks-to-persist="+fmt.Sprintf("%d", memChunks/2),
|
|
|
|
)
|
|
|
|
} else {
|
|
|
|
// Leave 1/3 head room for other overhead.
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
"-storage.local.target-heap-size="+fmt.Sprintf("%d", reqMem.Value()/3*2),
|
|
|
|
)
|
|
|
|
}
|
2017-05-12 13:48:32 +02:00
|
|
|
case 2:
|
2019-04-04 10:43:02 +02:00
|
|
|
retentionTimeFlag := "-storage.tsdb.retention="
|
|
|
|
if version.Minor >= 7 {
|
|
|
|
retentionTimeFlag = "-storage.tsdb.retention.time="
|
2019-05-15 20:54:46 -04:00
|
|
|
if p.Spec.RetentionSize != "" {
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
fmt.Sprintf("-storage.tsdb.retention.size=%s", p.Spec.RetentionSize),
|
|
|
|
)
|
|
|
|
}
|
2019-04-04 10:43:02 +02:00
|
|
|
}
|
2017-05-12 13:48:32 +02:00
|
|
|
promArgs = append(promArgs,
|
2018-03-22 15:59:22 +01:00
|
|
|
fmt.Sprintf("-config.file=%s", path.Join(confOutDir, configEnvsubstFilename)),
|
|
|
|
fmt.Sprintf("-storage.tsdb.path=%s", storageDir),
|
2019-04-04 10:43:02 +02:00
|
|
|
retentionTimeFlag+p.Spec.Retention,
|
2017-08-22 10:10:28 -03:00
|
|
|
"-web.enable-lifecycle",
|
2018-01-04 10:32:13 +01:00
|
|
|
"-storage.tsdb.no-lockfile",
|
2017-08-22 10:10:28 -03:00
|
|
|
)
|
2018-12-04 19:31:43 +08:00
|
|
|
|
|
|
|
if p.Spec.Query != nil && p.Spec.Query.LookbackDelta != nil {
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
fmt.Sprintf("-query.lookback-delta=%s", *p.Spec.Query.LookbackDelta),
|
|
|
|
)
|
|
|
|
}
|
2019-01-07 23:43:13 +03:00
|
|
|
|
|
|
|
if version.Minor >= 4 {
|
|
|
|
if p.Spec.Rules.Alert.ForOutageTolerance != "" {
|
|
|
|
promArgs = append(promArgs, "-rules.alert.for-outage-tolerance="+p.Spec.Rules.Alert.ForOutageTolerance)
|
|
|
|
}
|
|
|
|
if p.Spec.Rules.Alert.ForGracePeriod != "" {
|
|
|
|
promArgs = append(promArgs, "-rules.alert.for-grace-period="+p.Spec.Rules.Alert.ForGracePeriod)
|
|
|
|
}
|
|
|
|
if p.Spec.Rules.Alert.ResendDelay != "" {
|
|
|
|
promArgs = append(promArgs, "-rules.alert.resend-delay="+p.Spec.Rules.Alert.ResendDelay)
|
|
|
|
}
|
|
|
|
}
|
2019-03-25 14:37:08 -07:00
|
|
|
|
|
|
|
if version.Minor >= 5 {
|
|
|
|
if p.Spec.Query != nil && p.Spec.Query.MaxSamples != nil {
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
fmt.Sprintf("-query.max-samples=%d", *p.Spec.Query.MaxSamples),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
2017-04-18 15:08:11 +02:00
|
|
|
default:
|
|
|
|
return nil, errors.Errorf("unsupported Prometheus major version %s", version)
|
2017-01-02 10:50:28 +01:00
|
|
|
}
|
2017-01-04 14:57:06 +01:00
|
|
|
|
2018-12-04 19:31:43 +08:00
|
|
|
if p.Spec.Query != nil {
|
|
|
|
if p.Spec.Query.MaxConcurrency != nil {
|
|
|
|
if *p.Spec.Query.MaxConcurrency < 1 {
|
|
|
|
p.Spec.Query.MaxConcurrency = &defaultMaxConcurrency
|
|
|
|
}
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
fmt.Sprintf("-query.max-concurrency=%d", *p.Spec.Query.MaxConcurrency),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
if p.Spec.Query.Timeout != nil {
|
|
|
|
promArgs = append(promArgs,
|
|
|
|
fmt.Sprintf("-query.timeout=%s", *p.Spec.Query.Timeout),
|
|
|
|
)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-01-17 21:28:30 +05:30
|
|
|
if p.Spec.EnableAdminAPI {
|
2019-01-15 23:19:45 +05:30
|
|
|
promArgs = append(promArgs, "-web.enable-admin-api")
|
|
|
|
}
|
2018-06-22 16:28:47 -05:00
|
|
|
|
2017-01-02 10:50:28 +01:00
|
|
|
if p.Spec.ExternalURL != "" {
|
2017-04-18 15:08:11 +02:00
|
|
|
promArgs = append(promArgs, "-web.external-url="+p.Spec.ExternalURL)
|
2017-01-02 10:50:28 +01:00
|
|
|
}
|
|
|
|
|
2017-05-23 12:35:02 +02:00
|
|
|
webRoutePrefix := "/"
|
2017-01-27 14:38:31 +01:00
|
|
|
if p.Spec.RoutePrefix != "" {
|
|
|
|
webRoutePrefix = p.Spec.RoutePrefix
|
|
|
|
}
|
2017-06-21 11:49:24 +02:00
|
|
|
promArgs = append(promArgs, "-web.route-prefix="+webRoutePrefix)
|
2017-01-27 14:38:31 +01:00
|
|
|
|
2017-12-21 13:46:12 +01:00
|
|
|
if p.Spec.LogLevel != "" && p.Spec.LogLevel != "info" {
|
|
|
|
promArgs = append(promArgs, fmt.Sprintf("-log.level=%s", p.Spec.LogLevel))
|
|
|
|
}
|
2019-01-25 11:52:02 +03:00
|
|
|
if version.GTE(semver.MustParse("2.6.0")) {
|
|
|
|
if p.Spec.LogFormat != "" && p.Spec.LogFormat != "logfmt" {
|
|
|
|
promArgs = append(promArgs, fmt.Sprintf("-log.format=%s", p.Spec.LogFormat))
|
|
|
|
}
|
|
|
|
}
|
2017-12-21 13:46:12 +01:00
|
|
|
|
2019-07-22 10:13:14 -06:00
|
|
|
if version.GTE(semver.MustParse("2.11.0")) && p.Spec.WALCompression != nil {
|
|
|
|
if *p.Spec.WALCompression {
|
|
|
|
promArgs = append(promArgs, "-storage.tsdb.wal-compression")
|
|
|
|
} else {
|
|
|
|
promArgs = append(promArgs, "-no-storage.tsdb.wal-compression")
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2020-06-12 22:24:03 -04:00
|
|
|
if version.GTE(semver.MustParse("2.8.0")) && p.Spec.AllowOverlappingBlocks {
|
|
|
|
promArgs = append(promArgs, "-storage.tsdb.allow-overlapping-blocks")
|
|
|
|
}
|
|
|
|
|
2018-03-07 14:45:08 +01:00
|
|
|
var ports []v1.ContainerPort
|
|
|
|
if p.Spec.ListenLocal {
|
|
|
|
promArgs = append(promArgs, "-web.listen-address=127.0.0.1:9090")
|
|
|
|
} else {
|
|
|
|
ports = []v1.ContainerPort{
|
|
|
|
{
|
2019-07-13 09:48:05 +01:00
|
|
|
Name: p.Spec.PortName,
|
2018-03-07 14:45:08 +01:00
|
|
|
ContainerPort: 9090,
|
|
|
|
Protocol: v1.ProtocolTCP,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-06-22 14:58:01 +02:00
|
|
|
if version.Major == 2 {
|
|
|
|
for i, a := range promArgs {
|
|
|
|
promArgs[i] = "-" + a
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-04 14:57:06 +01:00
|
|
|
localReloadURL := &url.URL{
|
|
|
|
Scheme: "http",
|
2018-07-23 15:54:41 -04:00
|
|
|
Host: c.LocalHost + ":9090",
|
2017-01-04 14:57:06 +01:00
|
|
|
Path: path.Clean(webRoutePrefix + "/-/reload"),
|
|
|
|
}
|
|
|
|
|
2017-04-05 14:25:03 +02:00
|
|
|
volumes := []v1.Volume{
|
2017-03-09 20:08:38 +01:00
|
|
|
{
|
|
|
|
Name: "config",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
Secret: &v1.SecretVolumeSource{
|
|
|
|
SecretName: configSecretName(p.Name),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
pkg/prometheus: Enable users to configure bearer token from secret
To configure a bearer token users could only specify a file path in the
service monitor, pointing to a bearer token file in the Prometheus
container. This enables hostile users, being able to configure a service
monitor and controlling the scrape target, to retrieve arbitrary files
in the Prometheus container.
In cases where users can not be trusted, this patch adds an option to
disallow the above file path specification and replaces it by a secret
reference. This secret has to be in the same namespace as the service
monitor, shrinking the attack vector.
pkg/prometheus: Add option to deny file system access through service monitors
ArbitraryFSAccessThroughSMsConfig enables users to configure, whether
a service monitor selected by the Prometheus instance is allowed to use
arbitrary files on the file system of the Prometheus container. This is
the case when e.g. a service monitor specifies a BearerTokenFile in an
endpoint. A malicious user could create a service monitor
selecting arbitrary secret files in the Prometheus container. Those
secrets would then be send with a scrape request by Prometheus to a
malicious target. Denying the above would prevent the attack, users can
instead use the BearerTokenSecret field.
test/basic-auth-test-app: Add mTLS endpoint
pkg/prometheus: Enable users to configure tls from secret
pkg/prometheus/operator: Validate TLS configs before retrieving assets
Before retrieving TLS assets from Kubernetes secrets for a given service
monitor, make sure the user did not specify both file and secret
reference, e.g. both `CAFile` and `CASecret`.
test: Rename basic-auth-test-app to instrumented-sample-app
Given that the basic-auth-test-app not only supports basic auth, but
also bearer token as well as tls authentication, this patch renames the
app to a more generic name.
test/e2e/prometheus_test: Test ArbitraryFSAccessThroughSM option for tls
The Prometheus custom resource has the option to disable arbitrary
filesystem access configured through service monitors. This commit adds
an end-to-end test for this option in combination with the TLS
configuration via files or secret references in service monitors.
pkg/prometheus/operator: Move check for arbitrary fs access into func
2019-03-18 15:56:38 +01:00
|
|
|
{
|
|
|
|
Name: "tls-assets",
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
Secret: &v1.SecretVolumeSource{
|
|
|
|
SecretName: tlsAssetsSecretName(p.Name),
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2017-03-15 14:29:38 +01:00
|
|
|
{
|
2018-03-22 15:59:22 +01:00
|
|
|
Name: "config-out",
|
2017-03-15 14:29:38 +01:00
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
EmptyDir: &v1.EmptyDirVolumeSource{},
|
|
|
|
},
|
|
|
|
},
|
2018-07-02 20:23:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, name := range ruleConfigMapNames {
|
|
|
|
volumes = append(volumes, v1.Volume{
|
|
|
|
Name: name,
|
2018-05-08 09:43:45 +02:00
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
|
|
LocalObjectReference: v1.LocalObjectReference{
|
2018-07-02 20:23:59 +02:00
|
|
|
Name: name,
|
2018-05-08 09:43:45 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2018-07-02 20:23:59 +02:00
|
|
|
})
|
2017-04-05 14:25:03 +02:00
|
|
|
}
|
2017-03-09 20:08:38 +01:00
|
|
|
|
2018-07-02 10:29:13 -05:00
|
|
|
volName := volumeName(p.Name)
|
|
|
|
if p.Spec.Storage != nil {
|
|
|
|
if p.Spec.Storage.VolumeClaimTemplate.Name != "" {
|
|
|
|
volName = p.Spec.Storage.VolumeClaimTemplate.Name
|
|
|
|
}
|
2018-06-29 13:31:02 -05:00
|
|
|
}
|
|
|
|
|
2017-04-05 14:25:03 +02:00
|
|
|
promVolumeMounts := []v1.VolumeMount{
|
2017-03-09 20:08:38 +01:00
|
|
|
{
|
2018-03-22 15:59:22 +01:00
|
|
|
Name: "config-out",
|
2017-03-15 14:29:38 +01:00
|
|
|
ReadOnly: true,
|
2018-03-22 15:59:22 +01:00
|
|
|
MountPath: confOutDir,
|
2017-03-15 14:29:38 +01:00
|
|
|
},
|
pkg/prometheus: Enable users to configure bearer token from secret
To configure a bearer token users could only specify a file path in the
service monitor, pointing to a bearer token file in the Prometheus
container. This enables hostile users, being able to configure a service
monitor and controlling the scrape target, to retrieve arbitrary files
in the Prometheus container.
In cases where users can not be trusted, this patch adds an option to
disallow the above file path specification and replaces it by a secret
reference. This secret has to be in the same namespace as the service
monitor, shrinking the attack vector.
pkg/prometheus: Add option to deny file system access through service monitors
ArbitraryFSAccessThroughSMsConfig enables users to configure, whether
a service monitor selected by the Prometheus instance is allowed to use
arbitrary files on the file system of the Prometheus container. This is
the case when e.g. a service monitor specifies a BearerTokenFile in an
endpoint. A malicious user could create a service monitor
selecting arbitrary secret files in the Prometheus container. Those
secrets would then be send with a scrape request by Prometheus to a
malicious target. Denying the above would prevent the attack, users can
instead use the BearerTokenSecret field.
test/basic-auth-test-app: Add mTLS endpoint
pkg/prometheus: Enable users to configure tls from secret
pkg/prometheus/operator: Validate TLS configs before retrieving assets
Before retrieving TLS assets from Kubernetes secrets for a given service
monitor, make sure the user did not specify both file and secret
reference, e.g. both `CAFile` and `CASecret`.
test: Rename basic-auth-test-app to instrumented-sample-app
Given that the basic-auth-test-app not only supports basic auth, but
also bearer token as well as tls authentication, this patch renames the
app to a more generic name.
test/e2e/prometheus_test: Test ArbitraryFSAccessThroughSM option for tls
The Prometheus custom resource has the option to disable arbitrary
filesystem access configured through service monitors. This commit adds
an end-to-end test for this option in combination with the TLS
configuration via files or secret references in service monitors.
pkg/prometheus/operator: Move check for arbitrary fs access into func
2019-03-18 15:56:38 +01:00
|
|
|
{
|
|
|
|
Name: "tls-assets",
|
|
|
|
ReadOnly: true,
|
|
|
|
MountPath: tlsAssetsDir,
|
|
|
|
},
|
2017-03-09 20:08:38 +01:00
|
|
|
{
|
2018-07-02 10:29:13 -05:00
|
|
|
Name: volName,
|
2018-03-22 15:59:22 +01:00
|
|
|
MountPath: storageDir,
|
2017-03-09 20:08:38 +01:00
|
|
|
SubPath: subPathForStorage(p.Spec.Storage),
|
|
|
|
},
|
2017-04-05 14:25:03 +02:00
|
|
|
}
|
|
|
|
|
2019-11-19 16:42:04 +01:00
|
|
|
promVolumeMounts = append(promVolumeMounts, p.Spec.VolumeMounts...)
|
2018-07-02 20:23:59 +02:00
|
|
|
for _, name := range ruleConfigMapNames {
|
|
|
|
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
|
|
|
|
Name: name,
|
|
|
|
MountPath: rulesDir + "/" + name,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-04-05 14:25:03 +02:00
|
|
|
for _, s := range p.Spec.Secrets {
|
|
|
|
volumes = append(volumes, v1.Volume{
|
2018-09-17 15:07:05 +02:00
|
|
|
Name: k8sutil.SanitizeVolumeName("secret-" + s),
|
2017-04-05 14:25:03 +02:00
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
Secret: &v1.SecretVolumeSource{
|
|
|
|
SecretName: s,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
|
2018-09-17 15:07:05 +02:00
|
|
|
Name: k8sutil.SanitizeVolumeName("secret-" + s),
|
2017-04-05 14:25:03 +02:00
|
|
|
ReadOnly: true,
|
2018-03-22 15:59:22 +01:00
|
|
|
MountPath: secretsDir + s,
|
2017-04-05 14:25:03 +02:00
|
|
|
})
|
|
|
|
}
|
2017-03-09 20:08:38 +01:00
|
|
|
|
2018-10-18 15:03:47 +02:00
|
|
|
for _, c := range p.Spec.ConfigMaps {
|
|
|
|
volumes = append(volumes, v1.Volume{
|
|
|
|
Name: k8sutil.SanitizeVolumeName("configmap-" + c),
|
|
|
|
VolumeSource: v1.VolumeSource{
|
|
|
|
ConfigMap: &v1.ConfigMapVolumeSource{
|
|
|
|
LocalObjectReference: v1.LocalObjectReference{
|
|
|
|
Name: c,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
})
|
|
|
|
promVolumeMounts = append(promVolumeMounts, v1.VolumeMount{
|
|
|
|
Name: k8sutil.SanitizeVolumeName("configmap-" + c),
|
|
|
|
ReadOnly: true,
|
|
|
|
MountPath: configmapsDir + c,
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2017-04-05 14:25:03 +02:00
|
|
|
configReloadVolumeMounts := []v1.VolumeMount{
|
2017-03-09 20:08:38 +01:00
|
|
|
{
|
|
|
|
Name: "config",
|
2018-03-22 15:59:22 +01:00
|
|
|
MountPath: confDir,
|
2017-03-09 20:08:38 +01:00
|
|
|
},
|
2017-03-15 14:29:38 +01:00
|
|
|
{
|
2018-03-22 15:59:22 +01:00
|
|
|
Name: "config-out",
|
|
|
|
MountPath: confOutDir,
|
2017-03-15 14:29:38 +01:00
|
|
|
},
|
2017-04-05 14:25:03 +02:00
|
|
|
}
|
2017-03-09 20:08:38 +01:00
|
|
|
|
2017-04-05 14:25:03 +02:00
|
|
|
configReloadArgs := []string{
|
2018-07-20 16:10:53 +02:00
|
|
|
fmt.Sprintf("--log-format=%s", c.LogFormat),
|
2018-03-22 15:59:22 +01:00
|
|
|
fmt.Sprintf("--reload-url=%s", localReloadURL),
|
|
|
|
fmt.Sprintf("--config-file=%s", path.Join(confDir, configFilename)),
|
|
|
|
fmt.Sprintf("--config-envsubst-file=%s", path.Join(confOutDir, configEnvsubstFilename)),
|
2017-04-05 14:25:03 +02:00
|
|
|
}
|
2017-03-09 20:08:38 +01:00
|
|
|
|
2019-12-31 14:20:34 +01:00
|
|
|
const localProbe = `if [ -x "$(command -v curl)" ]; then curl %s; elif [ -x "$(command -v wget)" ]; then wget -q -O /dev/null %s; else exit 1; fi`
|
2019-09-23 18:38:20 +02:00
|
|
|
|
2017-10-07 01:05:16 -04:00
|
|
|
var readinessProbeHandler v1.Handler
|
|
|
|
if (version.Major == 1 && version.Minor >= 8) || version.Major == 2 {
|
2019-09-17 19:09:57 +02:00
|
|
|
{
|
|
|
|
healthyPath := path.Clean(webRoutePrefix + "/-/healthy")
|
|
|
|
if p.Spec.ListenLocal {
|
2019-09-23 18:38:20 +02:00
|
|
|
localHealthyPath := fmt.Sprintf("http://localhost:9090%s", healthyPath)
|
2020-09-14 16:59:54 +02:00
|
|
|
readinessProbeHandler.Exec = &v1.ExecAction{
|
2019-09-17 19:09:57 +02:00
|
|
|
Command: []string{
|
2019-09-23 18:38:20 +02:00
|
|
|
"sh",
|
|
|
|
"-c",
|
|
|
|
fmt.Sprintf(localProbe, localHealthyPath, localHealthyPath),
|
2019-09-17 19:09:57 +02:00
|
|
|
},
|
|
|
|
}
|
|
|
|
} else {
|
2020-09-14 16:59:54 +02:00
|
|
|
readinessProbeHandler.HTTPGet = &v1.HTTPGetAction{
|
2019-09-17 19:09:57 +02:00
|
|
|
Path: healthyPath,
|
|
|
|
Port: intstr.FromString(p.Spec.PortName),
|
|
|
|
}
|
|
|
|
}
|
2017-10-07 01:05:16 -04:00
|
|
|
}
|
2019-09-17 19:09:57 +02:00
|
|
|
{
|
|
|
|
readyPath := path.Clean(webRoutePrefix + "/-/ready")
|
|
|
|
if p.Spec.ListenLocal {
|
2019-09-23 18:38:20 +02:00
|
|
|
localReadyPath := fmt.Sprintf("http://localhost:9090%s", readyPath)
|
2019-09-17 19:09:57 +02:00
|
|
|
readinessProbeHandler.Exec = &v1.ExecAction{
|
|
|
|
Command: []string{
|
2019-09-23 18:38:20 +02:00
|
|
|
"sh",
|
|
|
|
"-c",
|
|
|
|
fmt.Sprintf(localProbe, localReadyPath, localReadyPath),
|
2019-09-17 19:09:57 +02:00
|
|
|
},
|
|
|
|
}
|
2019-09-23 18:38:20 +02:00
|
|
|
|
2019-09-17 19:09:57 +02:00
|
|
|
} else {
|
|
|
|
readinessProbeHandler.HTTPGet = &v1.HTTPGetAction{
|
|
|
|
Path: readyPath,
|
|
|
|
Port: intstr.FromString(p.Spec.PortName),
|
|
|
|
}
|
|
|
|
}
|
2017-10-07 01:05:16 -04:00
|
|
|
}
|
2019-09-17 19:09:57 +02:00
|
|
|
|
2017-10-07 01:05:16 -04:00
|
|
|
} else {
|
2020-09-14 16:59:54 +02:00
|
|
|
readinessProbeHandler = v1.Handler{
|
2017-10-07 01:05:16 -04:00
|
|
|
HTTPGet: &v1.HTTPGetAction{
|
|
|
|
Path: path.Clean(webRoutePrefix + "/status"),
|
2019-07-13 09:48:05 +01:00
|
|
|
Port: intstr.FromString(p.Spec.PortName),
|
2017-10-07 01:05:16 -04:00
|
|
|
},
|
|
|
|
}
|
2017-04-24 12:45:30 +02:00
|
|
|
}
|
2017-10-07 01:05:16 -04:00
|
|
|
|
2020-09-14 16:59:54 +02:00
|
|
|
// TODO(paulfantom): Re-add livenessProbe and add startupProbe when kubernetes 1.21 is available.
|
|
|
|
// This would be a follow-up to https://github.com/prometheus-operator/prometheus-operator/pull/3502
|
2019-09-17 19:09:57 +02:00
|
|
|
readinessProbe := &v1.Probe{
|
|
|
|
Handler: readinessProbeHandler,
|
|
|
|
TimeoutSeconds: probeTimeoutSeconds,
|
|
|
|
PeriodSeconds: 5,
|
|
|
|
FailureThreshold: 120, // Allow up to 10m on startup for data recovery
|
2018-03-07 14:45:08 +01:00
|
|
|
}
|
|
|
|
|
2017-08-28 15:44:02 -07:00
|
|
|
podAnnotations := map[string]string{}
|
|
|
|
podLabels := map[string]string{}
|
2020-06-25 10:41:17 +02:00
|
|
|
podSelectorLabels := map[string]string{
|
|
|
|
"app": "prometheus",
|
|
|
|
"prometheus": p.Name,
|
|
|
|
}
|
2017-09-15 15:28:16 +02:00
|
|
|
if p.Spec.PodMetadata != nil {
|
|
|
|
if p.Spec.PodMetadata.Labels != nil {
|
|
|
|
for k, v := range p.Spec.PodMetadata.Labels {
|
|
|
|
podLabels[k] = v
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if p.Spec.PodMetadata.Annotations != nil {
|
|
|
|
for k, v := range p.Spec.PodMetadata.Annotations {
|
|
|
|
podAnnotations[k] = v
|
|
|
|
}
|
|
|
|
}
|
2017-08-28 15:44:02 -07:00
|
|
|
}
|
2018-03-22 15:59:22 +01:00
|
|
|
|
2020-06-25 10:41:17 +02:00
|
|
|
for k, v := range podSelectorLabels {
|
|
|
|
podLabels[k] = v
|
|
|
|
}
|
2018-03-09 15:12:59 +01:00
|
|
|
|
2020-06-25 10:41:17 +02:00
|
|
|
finalSelectorLabels := c.Labels.Merge(podSelectorLabels)
|
2018-03-22 13:03:37 +01:00
|
|
|
finalLabels := c.Labels.Merge(podLabels)
|
|
|
|
|
2019-04-18 10:12:08 -04:00
|
|
|
var additionalContainers []v1.Container
|
2018-06-14 18:47:15 +02:00
|
|
|
|
2018-07-02 20:23:59 +02:00
|
|
|
if len(ruleConfigMapNames) != 0 {
|
|
|
|
container := v1.Container{
|
|
|
|
Name: "rules-configmap-reloader",
|
|
|
|
Image: c.ConfigReloaderImage,
|
|
|
|
Args: []string{
|
|
|
|
fmt.Sprintf("--webhook-url=%s", localReloadURL),
|
|
|
|
},
|
2020-01-15 17:58:00 +01:00
|
|
|
VolumeMounts: []v1.VolumeMount{},
|
|
|
|
Resources: v1.ResourceRequirements{
|
|
|
|
Limits: v1.ResourceList{}, Requests: v1.ResourceList{}},
|
2019-10-18 10:50:44 +02:00
|
|
|
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
2019-04-16 15:33:21 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if c.ConfigReloaderCPU != "0" {
|
|
|
|
container.Resources.Limits[v1.ResourceCPU] = resource.MustParse(c.ConfigReloaderCPU)
|
2020-01-15 17:58:00 +01:00
|
|
|
container.Resources.Requests[v1.ResourceCPU] = resource.MustParse(c.ConfigReloaderCPU)
|
2019-04-16 15:33:21 +02:00
|
|
|
}
|
|
|
|
if c.ConfigReloaderMemory != "0" {
|
|
|
|
container.Resources.Limits[v1.ResourceMemory] = resource.MustParse(c.ConfigReloaderMemory)
|
2020-01-15 17:58:00 +01:00
|
|
|
container.Resources.Requests[v1.ResourceMemory] = resource.MustParse(c.ConfigReloaderMemory)
|
2018-07-02 20:23:59 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
for _, name := range ruleConfigMapNames {
|
|
|
|
mountPath := rulesDir + "/" + name
|
|
|
|
container.VolumeMounts = append(container.VolumeMounts, v1.VolumeMount{
|
|
|
|
Name: name,
|
|
|
|
MountPath: mountPath,
|
|
|
|
})
|
|
|
|
container.Args = append(container.Args, fmt.Sprintf("--volume-dir=%s", mountPath))
|
|
|
|
}
|
|
|
|
|
|
|
|
additionalContainers = append(additionalContainers, container)
|
|
|
|
}
|
|
|
|
|
2019-11-26 16:15:30 +00:00
|
|
|
disableCompaction := p.Spec.DisableCompaction
|
2018-06-14 18:47:15 +02:00
|
|
|
if p.Spec.Thanos != nil {
|
2020-02-24 17:01:14 -06:00
|
|
|
thBaseImage := operator.StringPtrValOrDefault(p.Spec.Thanos.BaseImage, operator.DefaultThanosBaseImage)
|
|
|
|
thVersion := operator.StringPtrValOrDefault(p.Spec.Thanos.Version, operator.DefaultThanosVersion)
|
|
|
|
thTag := operator.StringPtrValOrDefault(p.Spec.Thanos.Tag, "")
|
|
|
|
thSHA := operator.StringPtrValOrDefault(p.Spec.Thanos.SHA, "")
|
2020-08-06 18:08:38 +02:00
|
|
|
thanosImage, err := operator.BuildImagePath(thBaseImage, thVersion, thTag, thSHA)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to build image path")
|
|
|
|
}
|
2020-02-24 17:01:14 -06:00
|
|
|
// If the image path is set in the custom resource, override other image settings.
|
|
|
|
if p.Spec.Thanos.Image != nil && strings.TrimSpace(*p.Spec.Thanos.Image) != "" {
|
2019-01-08 11:55:28 +01:00
|
|
|
thanosImage = *p.Spec.Thanos.Image
|
|
|
|
}
|
2020-02-24 17:01:14 -06:00
|
|
|
|
2019-10-02 18:09:27 +01:00
|
|
|
bindAddress := "[$(POD_IP)]"
|
|
|
|
if p.Spec.Thanos.ListenLocal {
|
|
|
|
bindAddress = "127.0.0.1"
|
|
|
|
}
|
2018-07-05 11:27:37 +02:00
|
|
|
|
2020-03-11 06:16:45 -05:00
|
|
|
thanosArgs := []string{"sidecar",
|
|
|
|
fmt.Sprintf("--prometheus.url=http://%s:9090%s", c.LocalHost, path.Clean(webRoutePrefix)),
|
|
|
|
fmt.Sprintf("--grpc-address=%s:10901", bindAddress),
|
|
|
|
fmt.Sprintf("--http-address=%s:10902", bindAddress),
|
|
|
|
}
|
|
|
|
|
|
|
|
if p.Spec.Thanos.GRPCServerTLSConfig != nil {
|
|
|
|
tls := p.Spec.Thanos.GRPCServerTLSConfig
|
|
|
|
if tls.CertFile != "" {
|
|
|
|
thanosArgs = append(thanosArgs, "--grpc-server-tls-cert="+tls.CertFile)
|
|
|
|
}
|
|
|
|
if tls.KeyFile != "" {
|
|
|
|
thanosArgs = append(thanosArgs, "--grpc-server-tls-key="+tls.KeyFile)
|
|
|
|
}
|
|
|
|
if tls.CAFile != "" {
|
|
|
|
thanosArgs = append(thanosArgs, "--grpc-server-tls-client-ca="+tls.CAFile)
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2019-06-05 11:43:01 -07:00
|
|
|
container := v1.Container{
|
2019-10-18 10:50:44 +02:00
|
|
|
Name: "thanos-sidecar",
|
|
|
|
Image: thanosImage,
|
|
|
|
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
2020-03-11 06:16:45 -05:00
|
|
|
Args: thanosArgs,
|
2019-06-05 11:43:01 -07:00
|
|
|
Env: []v1.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "POD_IP",
|
|
|
|
ValueFrom: &v1.EnvVarSource{
|
|
|
|
FieldRef: &v1.ObjectFieldSelector{
|
|
|
|
FieldPath: "status.podIP",
|
|
|
|
},
|
2018-08-29 11:49:30 +02:00
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
2019-06-05 11:43:01 -07:00
|
|
|
Ports: []v1.ContainerPort{
|
|
|
|
{
|
|
|
|
Name: "http",
|
|
|
|
ContainerPort: 10902,
|
|
|
|
},
|
|
|
|
{
|
|
|
|
Name: "grpc",
|
|
|
|
ContainerPort: 10901,
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Resources: p.Spec.Thanos.Resources,
|
2018-08-29 11:49:30 +02:00
|
|
|
}
|
2019-01-08 06:46:10 -08:00
|
|
|
|
|
|
|
if p.Spec.Thanos.ObjectStorageConfig != nil {
|
2019-06-05 11:43:01 -07:00
|
|
|
container.Args = append(container.Args, "--objstore.config=$(OBJSTORE_CONFIG)")
|
|
|
|
container.Env = append(container.Env, v1.EnvVar{
|
2019-01-08 06:46:10 -08:00
|
|
|
Name: "OBJSTORE_CONFIG",
|
|
|
|
ValueFrom: &v1.EnvVarSource{
|
|
|
|
SecretKeyRef: p.Spec.Thanos.ObjectStorageConfig,
|
|
|
|
},
|
|
|
|
})
|
2020-09-11 16:16:19 +02:00
|
|
|
|
|
|
|
container.Args = append(container.Args, fmt.Sprintf("--tsdb.path=%s", storageDir))
|
|
|
|
container.VolumeMounts = append(
|
|
|
|
container.VolumeMounts,
|
|
|
|
v1.VolumeMount{
|
|
|
|
Name: volName,
|
|
|
|
MountPath: storageDir,
|
|
|
|
SubPath: subPathForStorage(p.Spec.Storage),
|
|
|
|
},
|
|
|
|
)
|
|
|
|
|
2019-11-04 11:11:09 +00:00
|
|
|
// NOTE(bwplotka): As described in https://thanos.io/components/sidecar.md/ we have to turn off compaction of Prometheus
|
|
|
|
// to avoid races during upload, if the uploads are configured.
|
2019-11-26 16:15:30 +00:00
|
|
|
disableCompaction = true
|
2019-01-08 06:46:10 -08:00
|
|
|
}
|
|
|
|
|
2020-01-31 11:10:33 -05:00
|
|
|
if p.Spec.Thanos.TracingConfig != nil {
|
|
|
|
container.Args = append(container.Args, "--tracing.config=$(TRACING_CONFIG)")
|
|
|
|
container.Env = append(container.Env, v1.EnvVar{
|
|
|
|
Name: "TRACING_CONFIG",
|
|
|
|
ValueFrom: &v1.EnvVarSource{
|
|
|
|
SecretKeyRef: p.Spec.Thanos.TracingConfig,
|
|
|
|
},
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2020-05-25 17:47:00 +02:00
|
|
|
if p.Spec.Thanos.LogLevel != "" {
|
|
|
|
container.Args = append(container.Args, "--log.level="+p.Spec.Thanos.LogLevel)
|
|
|
|
} else if p.Spec.LogLevel != "" {
|
|
|
|
container.Args = append(container.Args, "--log.level="+p.Spec.LogLevel)
|
2018-06-14 18:47:15 +02:00
|
|
|
}
|
2020-05-25 17:47:00 +02:00
|
|
|
if p.Spec.Thanos.LogFormat != "" {
|
|
|
|
container.Args = append(container.Args, "--log.format="+p.Spec.Thanos.LogFormat)
|
|
|
|
} else if p.Spec.LogFormat != "" {
|
|
|
|
container.Args = append(container.Args, "--log.format="+p.Spec.LogFormat)
|
2018-06-14 18:47:15 +02:00
|
|
|
}
|
2020-05-28 12:27:59 -07:00
|
|
|
|
|
|
|
if p.Spec.Thanos.MinTime != "" {
|
|
|
|
container.Args = append(container.Args, "--min-time="+p.Spec.Thanos.MinTime)
|
|
|
|
}
|
2019-06-05 11:43:01 -07:00
|
|
|
additionalContainers = append(additionalContainers, container)
|
2018-06-14 18:47:15 +02:00
|
|
|
}
|
2019-11-26 16:15:30 +00:00
|
|
|
if disableCompaction {
|
|
|
|
promArgs = append(promArgs, "--storage.tsdb.max-block-duration=2h")
|
|
|
|
}
|
2018-06-14 18:47:15 +02:00
|
|
|
|
2020-01-15 17:58:00 +01:00
|
|
|
prometheusConfigReloaderResources := v1.ResourceRequirements{
|
|
|
|
Limits: v1.ResourceList{}, Requests: v1.ResourceList{}}
|
2019-04-16 15:33:21 +02:00
|
|
|
if c.ConfigReloaderCPU != "0" {
|
|
|
|
prometheusConfigReloaderResources.Limits[v1.ResourceCPU] = resource.MustParse(c.ConfigReloaderCPU)
|
2020-01-15 17:58:00 +01:00
|
|
|
prometheusConfigReloaderResources.Requests[v1.ResourceCPU] = resource.MustParse(c.ConfigReloaderCPU)
|
2019-04-16 15:33:21 +02:00
|
|
|
}
|
|
|
|
if c.ConfigReloaderMemory != "0" {
|
|
|
|
prometheusConfigReloaderResources.Limits[v1.ResourceMemory] = resource.MustParse(c.ConfigReloaderMemory)
|
2020-01-15 17:58:00 +01:00
|
|
|
prometheusConfigReloaderResources.Requests[v1.ResourceMemory] = resource.MustParse(c.ConfigReloaderMemory)
|
2019-04-16 15:33:21 +02:00
|
|
|
}
|
|
|
|
|
2019-04-18 10:12:08 -04:00
|
|
|
operatorContainers := append([]v1.Container{
|
|
|
|
{
|
2019-10-18 10:50:44 +02:00
|
|
|
Name: "prometheus",
|
2020-02-24 17:01:14 -06:00
|
|
|
Image: prometheusImagePath,
|
2019-10-18 10:50:44 +02:00
|
|
|
Ports: ports,
|
|
|
|
Args: promArgs,
|
|
|
|
VolumeMounts: promVolumeMounts,
|
|
|
|
ReadinessProbe: readinessProbe,
|
|
|
|
Resources: p.Spec.Resources,
|
|
|
|
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
2019-04-18 10:12:08 -04:00
|
|
|
}, {
|
2019-10-18 10:50:44 +02:00
|
|
|
Name: "prometheus-config-reloader",
|
|
|
|
Image: c.PrometheusConfigReloaderImage,
|
|
|
|
TerminationMessagePolicy: v1.TerminationMessageFallbackToLogsOnError,
|
2019-04-18 10:12:08 -04:00
|
|
|
Env: []v1.EnvVar{
|
|
|
|
{
|
|
|
|
Name: "POD_NAME",
|
|
|
|
ValueFrom: &v1.EnvVarSource{
|
|
|
|
FieldRef: &v1.ObjectFieldSelector{FieldPath: "metadata.name"},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
},
|
|
|
|
Command: []string{"/bin/prometheus-config-reloader"},
|
|
|
|
Args: configReloadArgs,
|
|
|
|
VolumeMounts: configReloadVolumeMounts,
|
|
|
|
Resources: prometheusConfigReloaderResources,
|
|
|
|
},
|
|
|
|
}, additionalContainers...)
|
|
|
|
|
|
|
|
containers, err := k8sutil.MergePatchContainers(operatorContainers, p.Spec.Containers)
|
|
|
|
if err != nil {
|
|
|
|
return nil, errors.Wrap(err, "failed to merge containers spec")
|
|
|
|
}
|
2020-01-31 22:56:18 -05:00
|
|
|
// PodManagementPolicy is set to Parallel to mitigate issues in kubernetes: https://github.com/kubernetes/kubernetes/issues/60164
|
2019-07-17 12:13:13 +02:00
|
|
|
// This is also mentioned as one of limitations of StatefulSets: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#limitations
|
2018-03-09 15:12:59 +01:00
|
|
|
return &appsv1.StatefulSetSpec{
|
2017-10-04 11:47:19 +01:00
|
|
|
ServiceName: governingServiceName,
|
|
|
|
Replicas: p.Spec.Replicas,
|
2018-03-09 15:12:59 +01:00
|
|
|
PodManagementPolicy: appsv1.ParallelPodManagement,
|
|
|
|
UpdateStrategy: appsv1.StatefulSetUpdateStrategy{
|
|
|
|
Type: appsv1.RollingUpdateStatefulSetStrategyType,
|
|
|
|
},
|
2018-03-22 13:03:37 +01:00
|
|
|
Selector: &metav1.LabelSelector{
|
2020-06-25 10:41:17 +02:00
|
|
|
MatchLabels: finalSelectorLabels,
|
2018-03-22 13:03:37 +01:00
|
|
|
},
|
2016-10-27 19:08:25 +02:00
|
|
|
Template: v1.PodTemplateSpec{
|
2017-05-11 14:05:39 +02:00
|
|
|
ObjectMeta: metav1.ObjectMeta{
|
2018-03-22 13:03:37 +01:00
|
|
|
Labels: finalLabels,
|
2017-08-28 15:44:02 -07:00
|
|
|
Annotations: podAnnotations,
|
2016-10-27 19:08:25 +02:00
|
|
|
},
|
|
|
|
Spec: v1.PodSpec{
|
2019-04-18 10:12:08 -04:00
|
|
|
Containers: containers,
|
2019-03-29 15:51:58 +01:00
|
|
|
InitContainers: p.Spec.InitContainers,
|
2020-02-20 17:09:01 -06:00
|
|
|
SecurityContext: p.Spec.SecurityContext,
|
2017-02-20 15:19:22 +01:00
|
|
|
ServiceAccountName: p.Spec.ServiceAccountName,
|
2017-01-18 12:14:47 -06:00
|
|
|
NodeSelector: p.Spec.NodeSelector,
|
2018-09-10 11:52:19 -04:00
|
|
|
PriorityClassName: p.Spec.PriorityClassName,
|
2016-10-28 14:07:14 +02:00
|
|
|
TerminationGracePeriodSeconds: &terminationGracePeriod,
|
2019-01-08 17:15:09 +01:00
|
|
|
Volumes: volumes,
|
|
|
|
Tolerations: p.Spec.Tolerations,
|
|
|
|
Affinity: p.Spec.Affinity,
|
2016-09-22 15:25:57 +02:00
|
|
|
},
|
|
|
|
},
|
2017-04-18 15:08:11 +02:00
|
|
|
}, nil
|
2016-09-22 15:25:57 +02:00
|
|
|
}
|
2016-12-22 14:43:21 +01:00
|
|
|
|
2017-03-09 10:04:28 +01:00
|
|
|
func configSecretName(name string) string {
|
2019-09-24 13:22:49 +02:00
|
|
|
return prefixedName(name)
|
pkg/prometheus: Enable users to configure bearer token from secret
To configure a bearer token users could only specify a file path in the
service monitor, pointing to a bearer token file in the Prometheus
container. This enables hostile users, being able to configure a service
monitor and controlling the scrape target, to retrieve arbitrary files
in the Prometheus container.
In cases where users can not be trusted, this patch adds an option to
disallow the above file path specification and replaces it by a secret
reference. This secret has to be in the same namespace as the service
monitor, shrinking the attack vector.
pkg/prometheus: Add option to deny file system access through service monitors
ArbitraryFSAccessThroughSMsConfig enables users to configure, whether
a service monitor selected by the Prometheus instance is allowed to use
arbitrary files on the file system of the Prometheus container. This is
the case when e.g. a service monitor specifies a BearerTokenFile in an
endpoint. A malicious user could create a service monitor
selecting arbitrary secret files in the Prometheus container. Those
secrets would then be send with a scrape request by Prometheus to a
malicious target. Denying the above would prevent the attack, users can
instead use the BearerTokenSecret field.
test/basic-auth-test-app: Add mTLS endpoint
pkg/prometheus: Enable users to configure tls from secret
pkg/prometheus/operator: Validate TLS configs before retrieving assets
Before retrieving TLS assets from Kubernetes secrets for a given service
monitor, make sure the user did not specify both file and secret
reference, e.g. both `CAFile` and `CASecret`.
test: Rename basic-auth-test-app to instrumented-sample-app
Given that the basic-auth-test-app not only supports basic auth, but
also bearer token as well as tls authentication, this patch renames the
app to a more generic name.
test/e2e/prometheus_test: Test ArbitraryFSAccessThroughSM option for tls
The Prometheus custom resource has the option to disable arbitrary
filesystem access configured through service monitors. This commit adds
an end-to-end test for this option in combination with the TLS
configuration via files or secret references in service monitors.
pkg/prometheus/operator: Move check for arbitrary fs access into func
2019-03-18 15:56:38 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func tlsAssetsSecretName(name string) string {
|
|
|
|
return fmt.Sprintf("%s-tls-assets", prefixedName(name))
|
2017-01-18 19:07:22 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
func volumeName(name string) string {
|
|
|
|
return fmt.Sprintf("%s-db", prefixedName(name))
|
|
|
|
}
|
|
|
|
|
|
|
|
func prefixedName(name string) string {
|
|
|
|
return fmt.Sprintf("prometheus-%s", name)
|
|
|
|
}
|
|
|
|
|
2017-08-22 12:56:04 +02:00
|
|
|
func subPathForStorage(s *monitoringv1.StorageSpec) string {
|
2016-12-22 14:43:21 +01:00
|
|
|
if s == nil {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2020-04-16 10:14:21 +02:00
|
|
|
if s.DisableMountSubPath {
|
|
|
|
return ""
|
|
|
|
}
|
|
|
|
|
2016-12-22 14:43:21 +01:00
|
|
|
return "prometheus-db"
|
|
|
|
}
|