mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-15 16:56:24 +00:00
Remove old manifests and replace with jsonnet build
This commit is contained in:
parent
8b54c34352
commit
2f6b3ab674
132 changed files with 6594 additions and 8735 deletions
|
@ -7,7 +7,7 @@
|
|||
|
||||
To authenticate a `ServiceMonitor`s over a metrics endpoint use [`basicAuth`](../api.md#basicauth)
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/examples/basic-auth/service-monitor.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/examples/basic-auth/service-monitor.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -33,7 +33,7 @@ spec:
|
|||
app: myapp
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/examples/basic-auth/secrets.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/examples/basic-auth/secrets.yaml)
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
|
|
|
@ -85,9 +85,10 @@ By default every Kubernetes cluster has a `Service` for easy access to the API s
|
|||
|
||||
Aside from the kubelet and the API server the other Kubernetes components all run on top of Kubernetes itself. To discover Kubernetes components that run in a Pod, they simply have to be added to a `Service`.
|
||||
|
||||
> Note the `Service` manifests for the scheduler and controller-manager are just examples. They may need to be adapted respective to a cluster.
|
||||
|
||||
kube-scheduler:
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/k8s/self-hosted/kube-scheduler.yaml)
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@ -110,7 +111,6 @@ spec:
|
|||
|
||||
kube-controller-manager:
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/k8s/self-hosted/kube-controller-manager.yaml)
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@ -137,75 +137,69 @@ Unrelated to Kubernetes itself, but still important is to gather various metrics
|
|||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter/node-exporter-daemonset.yaml)
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1beta2
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app: node-exporter
|
||||
name: node-exporter
|
||||
namespace: monitoring
|
||||
spec:
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: node-exporter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: node-exporter
|
||||
name: node-exporter
|
||||
spec:
|
||||
serviceAccountName: node-exporter
|
||||
containers:
|
||||
- args:
|
||||
- --web.listen-address=127.0.0.1:9101
|
||||
- --path.procfs=/host/proc
|
||||
- --path.sysfs=/host/sys
|
||||
image: quay.io/prometheus/node-exporter:v0.15.2
|
||||
name: node-exporter
|
||||
resources:
|
||||
limits:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
requests:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host/proc
|
||||
name: proc
|
||||
readOnly: false
|
||||
- mountPath: /host/sys
|
||||
name: sys
|
||||
readOnly: false
|
||||
- args:
|
||||
- --secure-listen-address=:9100
|
||||
- --upstream=http://127.0.0.1:9101/
|
||||
image: quay.io/coreos/kube-rbac-proxy:v0.3.0
|
||||
name: kube-rbac-proxy
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- image: quay.io/prometheus/node-exporter:v0.15.2
|
||||
args:
|
||||
- "--web.listen-address=127.0.0.1:9101"
|
||||
- "--path.procfs=/host/proc"
|
||||
- "--path.sysfs=/host/sys"
|
||||
name: node-exporter
|
||||
resources:
|
||||
requests:
|
||||
memory: 30Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 50Mi
|
||||
cpu: 200m
|
||||
volumeMounts:
|
||||
- name: proc
|
||||
readOnly: true
|
||||
mountPath: /host/proc
|
||||
- name: sys
|
||||
readOnly: true
|
||||
mountPath: /host/sys
|
||||
- name: kube-rbac-proxy
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.2.0
|
||||
args:
|
||||
- "--secure-listen-address=:9100"
|
||||
- "--upstream=http://127.0.0.1:9101/"
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
hostPort: 9100
|
||||
name: https
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 40Mi
|
||||
cpu: 20m
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
serviceAccountName: node-exporter
|
||||
volumes:
|
||||
- name: proc
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
- name: sys
|
||||
hostPath:
|
||||
name: proc
|
||||
- hostPath:
|
||||
path: /sys
|
||||
|
||||
name: sys
|
||||
```
|
||||
|
||||
And the respective `Service` manifest:
|
||||
|
@ -216,105 +210,117 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: node-exporter
|
||||
k8s-app: node-exporter
|
||||
name: node-exporter
|
||||
namespace: monitoring
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: https
|
||||
port: 9100
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
app: node-exporter
|
||||
|
||||
```
|
||||
|
||||
And last but not least, kube-state-metrics which collects information about Kubernetes objects themselves as they are accessible from the API. Find more information on what kind of metrics kube-state-metrics exposes in [its repository](https://github.com/kubernetes/kube-state-metrics).
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics/kube-state-metrics-deployment.yaml)
|
||||
```yaml
|
||||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-state-metrics
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-state-metrics
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-state-metrics
|
||||
spec:
|
||||
serviceAccountName: kube-state-metrics
|
||||
containers:
|
||||
- args:
|
||||
- --secure-listen-address=:8443
|
||||
- --upstream=http://127.0.0.1:8081/
|
||||
image: quay.io/coreos/kube-rbac-proxy:v0.3.0
|
||||
name: kube-rbac-proxy-main
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https-main
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- args:
|
||||
- --secure-listen-address=:9443
|
||||
- --upstream=http://127.0.0.1:8082/
|
||||
image: quay.io/coreos/kube-rbac-proxy:v0.3.0
|
||||
name: kube-rbac-proxy-self
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: https-self
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- args:
|
||||
- --host=127.0.0.1
|
||||
- --port=8081
|
||||
- --telemetry-host=127.0.0.1
|
||||
- --telemetry-port=8082
|
||||
image: quay.io/coreos/kube-state-metrics:v1.3.0
|
||||
name: kube-state-metrics
|
||||
resources:
|
||||
limits:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
requests:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
- command:
|
||||
- /pod_nanny
|
||||
- --container=kube-state-metrics
|
||||
- --cpu=100m
|
||||
- --extra-cpu=2m
|
||||
- --memory=150Mi
|
||||
- --extra-memory=30Mi
|
||||
- --threshold=5
|
||||
- --deployment=kube-state-metrics
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
image: quay.io/coreos/addon-resizer:1.0
|
||||
name: addon-resizer
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 30Mi
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: kube-rbac-proxy-main
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.2.0
|
||||
args:
|
||||
- "--secure-listen-address=:8443"
|
||||
- "--upstream=http://127.0.0.1:8081/"
|
||||
ports:
|
||||
- name: https-main
|
||||
containerPort: 8443
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 40Mi
|
||||
cpu: 20m
|
||||
- name: kube-rbac-proxy-self
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.2.0
|
||||
args:
|
||||
- "--secure-listen-address=:9443"
|
||||
- "--upstream=http://127.0.0.1:8082/"
|
||||
ports:
|
||||
- name: https-self
|
||||
containerPort: 9443
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 40Mi
|
||||
cpu: 20m
|
||||
- name: kube-state-metrics
|
||||
image: quay.io/coreos/kube-state-metrics:v1.2.0
|
||||
args:
|
||||
- "--host=127.0.0.1"
|
||||
- "--port=8081"
|
||||
- "--telemetry-host=127.0.0.1"
|
||||
- "--telemetry-port=8082"
|
||||
- name: addon-resizer
|
||||
image: gcr.io/google_containers/addon-resizer:1.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --container=kube-state-metrics
|
||||
- --cpu=100m
|
||||
- --extra-cpu=2m
|
||||
- --memory=150Mi
|
||||
- --extra-memory=30Mi
|
||||
- --threshold=5
|
||||
- --deployment=kube-state-metrics
|
||||
serviceAccountName: kube-state-metrics
|
||||
```
|
||||
|
||||
> Make sure that the `ServiceAccount` called `kube-state-metrics` exists and if using RBAC, is bound to the correct role. See the kube-state-metrics [repository for RBAC requirements](https://github.com/kubernetes/kube-state-metrics/tree/master/kubernetes).
|
||||
|
@ -327,237 +333,239 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-state-metrics
|
||||
k8s-app: kube-state-metrics
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: https-main
|
||||
port: 8443
|
||||
targetPort: https-main
|
||||
protocol: TCP
|
||||
- name: https-self
|
||||
port: 9443
|
||||
targetPort: https-self
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: kube-state-metrics
|
||||
|
||||
```
|
||||
|
||||
## Setup Monitoring
|
||||
|
||||
Once all the steps in the previous section have been taken there should be `Endpoints` objects containing the IPs of all of the above mentioned Kubernetes components. Now to setup the actual Prometheus and Alertmanager clusters. This manifest assumes that the Alertmanager cluster will be deployed in the `monitoring` namespace.
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
metadata:
|
||||
name: k8s
|
||||
labels:
|
||||
prometheus: k8s
|
||||
name: k8s
|
||||
namespace: monitoring
|
||||
spec:
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- name: alertmanager-main
|
||||
namespace: monitoring
|
||||
port: web
|
||||
replicas: 2
|
||||
version: v2.2.1
|
||||
resources:
|
||||
requests:
|
||||
memory: 400Mi
|
||||
ruleSelector:
|
||||
matchLabels:
|
||||
prometheus: k8s
|
||||
role: alert-rules
|
||||
serviceAccountName: prometheus-k8s
|
||||
serviceMonitorSelector:
|
||||
matchExpressions:
|
||||
- {key: k8s-app, operator: Exists}
|
||||
ruleSelector:
|
||||
matchLabels:
|
||||
role: alert-rules
|
||||
prometheus: k8s
|
||||
resources:
|
||||
requests:
|
||||
# 2Gi is default, but won't schedule if you don't have a node with >2Gi
|
||||
# memory. Modify based on your target and time-series count for
|
||||
# production use. This value is mainly meant for demonstration/testing
|
||||
# purposes.
|
||||
memory: 400Mi
|
||||
alerting:
|
||||
alertmanagers:
|
||||
- namespace: monitoring
|
||||
name: alertmanager-main
|
||||
port: web
|
||||
- key: k8s-app
|
||||
operator: Exists
|
||||
version: v2.2.1
|
||||
```
|
||||
|
||||
> Make sure that the `ServiceAccount` called `prometheus-k8s` exists and if using RBAC, is bound to the correct role. Read more on [RBAC when using the Prometheus Operator](../rbac.md).
|
||||
|
||||
The expression to match for selecting `ServiceMonitor`s here is that they must have a label which has a key called `k8s-app`. If you look closely at all the `Service` objects described above they all have a label called `k8s-app` and their component name this allows to conveniently select them with `ServiceMonitor`s.
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s-service-monitor-apiserver.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-apiserver.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: kube-apiserver
|
||||
labels:
|
||||
k8s-app: apiserver
|
||||
name: kube-apiserver
|
||||
namespace: monitoring
|
||||
spec:
|
||||
jobLabel: component
|
||||
selector:
|
||||
matchLabels:
|
||||
component: apiserver
|
||||
provider: kubernetes
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- default
|
||||
endpoints:
|
||||
- port: https
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
interval: 30s
|
||||
port: https
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
|
||||
serverName: kubernetes
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
jobLabel: component
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- default
|
||||
selector:
|
||||
matchLabels:
|
||||
component: apiserver
|
||||
provider: kubernetes
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s-service-monitor-kubelet.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kubelet.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: kubelet
|
||||
labels:
|
||||
k8s-app: kubelet
|
||||
name: kubelet
|
||||
namespace: monitoring
|
||||
spec:
|
||||
jobLabel: k8s-app
|
||||
endpoints:
|
||||
- port: https-metrics
|
||||
scheme: https
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
interval: 30s
|
||||
port: https-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
- port: https-metrics
|
||||
scheme: https
|
||||
path: /metrics/cadvisor
|
||||
interval: 30s
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 30s
|
||||
path: /metrics/cadvisor
|
||||
port: https-metrics
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kubelet
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s-service-monitor-kube-controller-manager.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kube-controller-manager.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: kube-controller-manager
|
||||
labels:
|
||||
k8s-app: kube-controller-manager
|
||||
name: kube-controller-manager
|
||||
namespace: monitoring
|
||||
spec:
|
||||
jobLabel: k8s-app
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
interval: 30s
|
||||
- interval: 30s
|
||||
port: http-metrics
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-controller-manager
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s-service-monitor-kube-scheduler.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kube-scheduler.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: kube-scheduler
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
name: kube-scheduler
|
||||
namespace: monitoring
|
||||
spec:
|
||||
jobLabel: k8s-app
|
||||
endpoints:
|
||||
- port: http-metrics
|
||||
interval: 30s
|
||||
- interval: 30s
|
||||
port: http-metrics
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-scheduler
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- kube-system
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s-service-monitor-kube-state-metrics.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kube-state-metrics.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
labels:
|
||||
k8s-app: kube-state-metrics
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
spec:
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
honorLabels: true
|
||||
interval: 30s
|
||||
port: https-main
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
interval: 30s
|
||||
port: https-self
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
jobLabel: k8s-app
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- monitoring
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: kube-state-metrics
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- monitoring
|
||||
endpoints:
|
||||
- port: https-main
|
||||
scheme: https
|
||||
interval: 30s
|
||||
honorLabels: true
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
- port: https-self
|
||||
scheme: https
|
||||
interval: 30s
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus/prometheus-k8s-service-monitor-node-exporter.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-node-exporter.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
metadata:
|
||||
name: node-exporter
|
||||
labels:
|
||||
k8s-app: node-exporter
|
||||
name: node-exporter
|
||||
namespace: monitoring
|
||||
spec:
|
||||
endpoints:
|
||||
- bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
interval: 30s
|
||||
port: https
|
||||
scheme: https
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
jobLabel: k8s-app
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: node-exporter
|
||||
namespaceSelector:
|
||||
matchNames:
|
||||
- monitoring
|
||||
endpoints:
|
||||
- port: https
|
||||
scheme: https
|
||||
interval: 30s
|
||||
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
|
||||
tlsConfig:
|
||||
insecureSkipVerify: true
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: node-exporter
|
||||
```
|
||||
|
||||
And the Alertmanager:
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/alertmanager/alertmanager.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/alertmanager-main/alertmanager-main.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Alertmanager
|
||||
metadata:
|
||||
name: main
|
||||
labels:
|
||||
alertmanager: main
|
||||
name: main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 3
|
||||
serviceAccountName: alertmanager-main
|
||||
version: v0.14.0
|
||||
```
|
||||
|
||||
|
|
8
Makefile
8
Makefile
|
@ -71,7 +71,7 @@ docs: embedmd po-docgen
|
|||
$(GOPATH)/bin/po-docgen compatibility > Documentation/compatibility.md
|
||||
|
||||
generate: jsonnet-docker
|
||||
docker run --rm -v `pwd`:/go/src/github.com/coreos/prometheus-operator po-jsonnet make generate-deepcopy generate-openapi jsonnet generate-bundle docs generate-kube-prometheus generate-crd
|
||||
docker run --rm -u=$(shell id -u $(USER)):$(shell id -g $(USER)) -v `pwd`:/go/src/github.com/coreos/prometheus-operator po-jsonnet make generate-deepcopy generate-openapi jsonnet generate-bundle docs generate-kube-prometheus generate-crd
|
||||
|
||||
|
||||
$(GOBIN)/openapi-gen:
|
||||
|
@ -102,9 +102,9 @@ generate-kube-prometheus:
|
|||
cd contrib/kube-prometheus; $(MAKE) generate-raw
|
||||
|
||||
jsonnet:
|
||||
jsonnet -J /ksonnet-lib hack/generate/prometheus-operator.jsonnet | json2yaml > example/non-rbac/prometheus-operator.yaml
|
||||
jsonnet -J /ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | json2yaml > example/rbac/prometheus-operator/prometheus-operator.yaml
|
||||
jsonnet -J /ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | json2yaml > contrib/kube-prometheus/manifests/prometheus-operator/prometheus-operator.yaml
|
||||
jsonnet -J /go/src/github.com/ksonnet/ksonnet-lib hack/generate/prometheus-operator.jsonnet | gojsontoyaml > example/non-rbac/prometheus-operator.yaml
|
||||
jsonnet -J /go/src/github.com/ksonnet/ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | gojsontoyaml > example/rbac/prometheus-operator/prometheus-operator.yaml
|
||||
jsonnet -J /go/src/github.com/ksonnet/ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | gojsontoyaml > contrib/kube-prometheus/manifests/prometheus-operator/prometheus-operator.yaml
|
||||
|
||||
jsonnet-docker:
|
||||
docker build -f scripts/jsonnet/Dockerfile -t po-jsonnet .
|
||||
|
|
|
@ -5,7 +5,7 @@ image:
|
|||
|
||||
generate: image
|
||||
@echo ">> Compiling assets and generating Kubernetes manifests"
|
||||
docker run --rm -v `pwd`:/go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus --workdir /go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus po-jsonnet make generate-raw
|
||||
docker run --rm -u=$(shell id -u $(USER)):$(shell id -g $(USER)) -v `pwd`:/go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus --workdir /go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus po-jsonnet make generate-raw
|
||||
|
||||
generate-raw:
|
||||
./hack/scripts/generate-manifests.sh
|
||||
./hack/scripts/build-jsonnet.sh example-dist/base/kube-prometheus.jsonnet manifests
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
# kube-prometheus
|
||||
|
||||
> Note that everything in the `contrib/kube-prometheus/` directory is experimental and may change significantly at any time.
|
||||
|
||||
This repository collects Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and
|
||||
[Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)
|
||||
combined with documentation and scripts to provide single-command deployments of end-to-end
|
||||
|
@ -46,16 +48,15 @@ install
|
|||
Simply run:
|
||||
|
||||
```bash
|
||||
export KUBECONFIG=<path> # defaults to "~/.kube/config"
|
||||
cd contrib/kube-prometheus/
|
||||
hack/cluster-monitoring/deploy
|
||||
```
|
||||
|
||||
After all pods are ready, you can reach:
|
||||
After all pods are ready, you can reach each of the UIs by port-forwarding:
|
||||
|
||||
* Prometheus UI on node port `30900`
|
||||
* Alertmanager UI on node port `30903`
|
||||
* Grafana on node port `30902`
|
||||
* Prometheus UI on node port `kubectl -n monitoring port-forward prometheus-k8s-0 9090`
|
||||
* Alertmanager UI on node port `kubectl -n monitoring port-forward alertmanager-main-0 9093`
|
||||
* Grafana on node port `kubectl -n monitoring port-forward $(kubectl get pods -n monitoring -lapp=grafana -ojsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}') 3000`
|
||||
|
||||
To tear it all down again, run:
|
||||
|
||||
|
@ -63,9 +64,53 @@ To tear it all down again, run:
|
|||
hack/cluster-monitoring/teardown
|
||||
```
|
||||
|
||||
## Customizing
|
||||
|
||||
As everyone's infrastructure is slightly different, different organizations have different requirements. Thereby there may be modifications you want to do on kube-prometheus to fit your needs.
|
||||
|
||||
The kube-prometheus stack is intended to be a jsonnet library for organizations to consume and use in their own infrastructure repository. Below is an example how it can be used to deploy the stack properly on minikube.
|
||||
|
||||
The three "distribution" examples we have assembled can be found in:
|
||||
|
||||
* `example-dist/base`: contains the plain kube-prometheus stack for organizations to build on.
|
||||
* `example-dist/kubeadm`: contains the kube-prometheus stack with slight modifications to work properly monitoring kubeadm clusters and exposes UIs on NodePorts for demonstration purposes.
|
||||
* `example-dist/bootkube`: contains the kube-prometheus stack with slight modifications to work properly on clusters created with bootkube.
|
||||
|
||||
The examples in `example-dist/` are purely meant for demonstration purposes, the `kube-prometheus.jsonnet` file should live in your organizations infrastructure repository and use the kube-prometheus library provided here.
|
||||
|
||||
Examples of additoinal modifications you may want to make could be adding an `Ingress` object for each of the UIs, but the point of this is that as opposed to other solutions out there, this library does not need to yield all possible customization options, it's all up to the user to customize!
|
||||
|
||||
### minikube kubeadm example
|
||||
|
||||
See `example-dist/kubeadm` for an example for deploying on minikube, using the minikube kubeadm bootstrapper. The `example-dist/kubeadm/kube-prometheus.jsonnet` file renders the kube-prometheus manifests using jsonnet and then merges the result with kubeadm specifics, such as information on how to monitor kube-controller-manager and kube-scheduler as created by kubeadm. In addition for demonstration purposes, it converts the services selecting Prometheus, Alertmanager and Grafana to NodePort services.
|
||||
|
||||
Let's give that a try, and create a minikube cluster:
|
||||
|
||||
```
|
||||
minikube delete && minikube start --kubernetes-version=v1.9.6 --memory=4096 --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
```
|
||||
|
||||
Then we can render the manifests for kubeadm (because we are using the minikube kubeadm bootstrapper):
|
||||
|
||||
```
|
||||
docker run --rm \
|
||||
-v `pwd`:/go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus \
|
||||
--workdir /go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus \
|
||||
po-jsonnet \
|
||||
./hack/scripts/build-jsonnet.sh example-dist/kubeadm/kube-prometheus.jsonnet example-dist/kubeadm/manifests
|
||||
```
|
||||
|
||||
> Note the `po-jsonnet` docker image is built using [this Dockerfile](/scripts/jsonnet/Dockerfile), you can also build it using `make image` from the `contrib/kube-prometheus` folder.
|
||||
|
||||
Then the stack can be deployed using
|
||||
|
||||
```
|
||||
hack/cluster-monitoring/deploy example-dist/kubeadm
|
||||
```
|
||||
|
||||
## Monitoring custom services
|
||||
|
||||
The example manifests in [manifests/examples/example-app](/contrib/kube-prometheus/manifests/examples/example-app)
|
||||
The example manifests in [examples/example-app](/contrib/kube-prometheus/examples/example-app)
|
||||
deploy a fake service exposing Prometheus metrics. They additionally define a new Prometheus
|
||||
server and a [`ServiceMonitor`](https://github.com/coreos/prometheus-operator/blob/master/Documentation/design.md#servicemonitor),
|
||||
which specifies how the example service should be monitored.
|
||||
|
@ -76,10 +121,13 @@ manage its life cycle.
|
|||
hack/example-service-monitoring/deploy
|
||||
```
|
||||
|
||||
After all pods are ready you can reach the Prometheus server on node port `30100` and observe
|
||||
how it monitors the service as specified. Same as before, this Prometheus server automatically
|
||||
discovers the Alertmanager cluster deployed in the [Monitoring Kubernetes](#Monitoring-Kubernetes)
|
||||
section.
|
||||
After all pods are ready you can reach the Prometheus server similar to the Prometheus server above:
|
||||
|
||||
```bash
|
||||
kubectl port-forward prometheus-frontend-0 9090
|
||||
```
|
||||
|
||||
Then you can access Prometheus through `http://localhost:9090/`.
|
||||
|
||||
Teardown:
|
||||
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
prefix="tmp/manifests"
|
||||
json="tmp/manifests.json"
|
||||
|
||||
rm -rf ${prefix}
|
||||
mkdir -p $(dirname "${json}")
|
||||
jsonnet -J /home/brancz/.jsonnet-bundler/src/git/git@github.com-ksonnet-ksonnet-lib/master jsonnet/kube-prometheus.jsonnet > ${json}
|
||||
|
||||
files=$(jq -r 'keys[]' ${json})
|
||||
|
||||
for file in ${files}; do
|
||||
dir=$(dirname "${file}")
|
||||
path="${prefix}/${dir}"
|
||||
mkdir -p ${path}
|
||||
jq -r ".[\"${file}\"]" ${json} | yaml2json | json2yaml > "${prefix}/${file}"
|
||||
done
|
|
@ -0,0 +1,6 @@
|
|||
local kubePrometheus = import "kube-prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
local objects = kubePrometheus.new(namespace);
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
2
contrib/kube-prometheus/example-dist/bootkube/.gitignore
vendored
Normal file
2
contrib/kube-prometheus/example-dist/bootkube/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
tmp/
|
||||
manifests/
|
|
@ -0,0 +1,36 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local kubePrometheus = import "kube-prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
|
||||
local controllerManagerService = service.new("kube-controller-manager-prometheus-discovery", {"k8s-app": "kube-controller-manager"}, servicePort.newNamed("http-metrics", 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-controller-manager"});
|
||||
|
||||
local schedulerService = service.new("kube-scheduler-prometheus-discovery", {"k8s-app": "kube-scheduler"}, servicePort.newNamed("http-metrics", 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-scheduler"});
|
||||
|
||||
local kubeDNSService = service.new("kube-dns-prometheus-discovery", {"k8s-app": "kube-dns"}, [servicePort.newNamed("http-metrics-skydns", 10055, 10055), servicePort.newNamed("http-metrics-dnsmasq", 10054, 10054)]) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-dns"});
|
||||
|
||||
local objects = kubePrometheus.new(namespace) +
|
||||
{
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9090, "web") + servicePort.withNodePort(30900)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"alertmanager-main/alertmanager-main-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9093, "web") + servicePort.withNodePort(30903)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"grafana/grafana-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("http", 3000, "http") + servicePort.withNodePort(30902)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"prometheus-k8s/kube-controller-manager-prometheus-discovery-service.yaml": controllerManagerService,
|
||||
"prometheus-k8s/kube-scheduler-prometheus-discovery-service.yaml": schedulerService,
|
||||
"prometheus-k8s/kube-dns-prometheus-discovery-service.yaml": kubeDNSService,
|
||||
};
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
2
contrib/kube-prometheus/example-dist/kubeadm/.gitignore
vendored
Normal file
2
contrib/kube-prometheus/example-dist/kubeadm/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
tmp/
|
||||
manifests/
|
|
@ -0,0 +1,31 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local kubePrometheus = import "kube-prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
|
||||
local controllerManagerService = service.new("kube-controller-manager-prometheus-discovery", {component: "kube-controller-manager"}, servicePort.newNamed("http-metrics", 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-controller-manager"});
|
||||
|
||||
local schedulerService = service.new("kube-scheduler-prometheus-discovery", {component: "kube-scheduler"}, servicePort.newNamed("http-metrics", 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-scheduler"});
|
||||
|
||||
local objects = kubePrometheus.new(namespace) +
|
||||
{
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9090, "web") + servicePort.withNodePort(30900)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"alertmanager-main/alertmanager-main-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9093, "web") + servicePort.withNodePort(30903)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"grafana/grafana-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("http", 3000, "http") + servicePort.withNodePort(30902)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"prometheus-k8s/kube-controller-manager-prometheus-discovery-service.yaml": controllerManagerService,
|
||||
"prometheus-k8s/kube-scheduler-prometheus-discovery-service.yaml": schedulerService,
|
||||
};
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
|
@ -1,40 +1,24 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${KUBECONFIG}" ]; then
|
||||
export KUBECONFIG=~/.kube/config
|
||||
fi
|
||||
manifest_prefix=${1-.}
|
||||
|
||||
# CAUTION - setting NAMESPACE will deploy most components to the given namespace
|
||||
# however some are hardcoded to 'monitoring'. Only use if you have reviewed all manifests.
|
||||
kubectl create namespace monitoring
|
||||
|
||||
if [ -z "${NAMESPACE}" ]; then
|
||||
NAMESPACE=monitoring
|
||||
fi
|
||||
|
||||
kubectl create namespace "$NAMESPACE"
|
||||
|
||||
kctl() {
|
||||
kubectl --namespace "$NAMESPACE" "$@"
|
||||
}
|
||||
|
||||
kctl apply -f manifests/prometheus-operator
|
||||
kubectl apply -f ${manifest_prefix}/manifests/prometheus-operator/
|
||||
|
||||
# Wait for CRDs to be ready.
|
||||
printf "Waiting for Operator to register custom resource definitions..."
|
||||
until kctl get customresourcedefinitions servicemonitors.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kctl get customresourcedefinitions prometheuses.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kctl get customresourcedefinitions alertmanagers.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kctl get servicemonitors.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kctl get prometheuses.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kctl get alertmanagers.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kubectl get customresourcedefinitions servicemonitors.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kubectl get customresourcedefinitions prometheuses.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kubectl get customresourcedefinitions alertmanagers.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kubectl get servicemonitors.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kubectl get prometheuses.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
until kubectl get alertmanagers.monitoring.coreos.com > /dev/null 2>&1; do sleep 1; printf "."; done
|
||||
echo "done!"
|
||||
|
||||
kctl apply -f manifests/node-exporter
|
||||
kctl apply -f manifests/kube-state-metrics
|
||||
kctl apply -f manifests/grafana/grafana-credentials.yaml
|
||||
kctl apply -f manifests/grafana
|
||||
find manifests/prometheus -type f ! -name prometheus-k8s-roles.yaml ! -name prometheus-k8s-role-bindings.yaml -exec kubectl --namespace "$NAMESPACE" apply -f {} \;
|
||||
kubectl apply -f manifests/prometheus/prometheus-k8s-roles.yaml
|
||||
kubectl apply -f manifests/prometheus/prometheus-k8s-role-bindings.yaml
|
||||
kctl apply -f manifests/alertmanager/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/node-exporter/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/kube-state-metrics/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/grafana/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/prometheus-k8s/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/alertmanager-main/
|
||||
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# We assume that the kubelet uses token authN and authZ, as otherwise
|
||||
# Prometheus needs a client certificate, which gives it full access to the
|
||||
# kubelet, rather than just the metrics. Token authN and authZ allows more fine
|
||||
# grained and easier access control. Simply start minikube with the following
|
||||
# command (you can of course adapt the version and memory to your needs):
|
||||
#
|
||||
# $ minikube delete && minikube start --kubernetes-version=v1.9.1 --memory=4096 --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
#
|
||||
# In future versions of minikube and kubeadm this will be the default, but for
|
||||
# the time being, we will have to configure it ourselves.
|
||||
|
||||
hack/cluster-monitoring/deploy
|
||||
|
||||
kubectl --namespace=kube-system apply -f manifests/k8s/kubeadm/
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
hack/cluster-monitoring/teardown
|
||||
|
||||
kubectl --namespace=kube-system delete -f manifests/k8s/minikube
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
hack/cluster-monitoring/deploy
|
||||
|
||||
kubectl apply -f manifests/k8s/self-hosted
|
||||
|
|
@ -1,6 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
hack/cluster-monitoring/teardown
|
||||
|
||||
kubectl delete -f manifests/k8s/self-hosted
|
||||
|
|
@ -1,30 +1,4 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
if [ -z "${KUBECONFIG}" ]; then
|
||||
export KUBECONFIG=~/.kube/config
|
||||
fi
|
||||
|
||||
# CAUTION - NAMESPACE must match its value when deploy script was run.
|
||||
# Some resources are always deployed to the monitoring namespace.
|
||||
|
||||
if [ -z "${NAMESPACE}" ]; then
|
||||
NAMESPACE=monitoring
|
||||
fi
|
||||
|
||||
kctl() {
|
||||
kubectl --namespace "$NAMESPACE" "$@"
|
||||
}
|
||||
|
||||
kctl delete -f manifests/node-exporter
|
||||
kctl delete -f manifests/kube-state-metrics
|
||||
kctl delete -f manifests/grafana
|
||||
find manifests/prometheus -type f ! -name prometheus-k8s-roles.yaml ! -name prometheus-k8s-role-bindings.yaml -exec kubectl --namespace "$NAMESPACE" delete -f {} \;
|
||||
kubectl delete -f manifests/prometheus/prometheus-k8s-roles.yaml
|
||||
kubectl delete -f manifests/prometheus/prometheus-k8s-role-bindings.yaml
|
||||
kctl delete -f manifests/alertmanager
|
||||
|
||||
# Hack: wait a bit to let the controller delete the deployed Prometheus server.
|
||||
sleep 5
|
||||
|
||||
kctl delete -f manifests/prometheus-operator
|
||||
kubectl delete namespace monitoring
|
||||
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
kubectl apply -f manifests/examples/example-app
|
||||
kubectl apply -f examples/example-app
|
||||
|
|
|
@ -1,3 +1,3 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
kubectl delete -f manifests/examples/example-app
|
||||
kubectl delete -f examples/example-app
|
||||
|
|
25
contrib/kube-prometheus/hack/scripts/build-jsonnet.sh
Executable file
25
contrib/kube-prometheus/hack/scripts/build-jsonnet.sh
Executable file
|
@ -0,0 +1,25 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
set -x
|
||||
|
||||
jsonnet="${1-kube-prometheus.jsonnet}"
|
||||
prefix="${2-manifests}"
|
||||
json="tmp/manifests.json"
|
||||
|
||||
rm -rf ${prefix}
|
||||
mkdir -p $(dirname "${json}")
|
||||
jsonnet \
|
||||
-J $GOPATH/src/github.com/ksonnet/ksonnet-lib \
|
||||
-J $GOPATH/src/github.com/grafana/grafonnet-lib \
|
||||
-J $GOPATH/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet \
|
||||
-J $GOPATH/src/github.com/brancz/kubernetes-grafana/src/kubernetes-jsonnet \
|
||||
${jsonnet} > ${json}
|
||||
|
||||
files=$(jq -r 'keys[]' ${json})
|
||||
|
||||
for file in ${files}; do
|
||||
dir=$(dirname "${file}")
|
||||
path="${prefix}/${dir}"
|
||||
mkdir -p ${path}
|
||||
jq -r ".[\"${file}\"]" ${json} | gojsontoyaml -yamltojson | gojsontoyaml > "${prefix}/${file}"
|
||||
done
|
|
@ -1,11 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
cat <<-EOF
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: alertmanager-main
|
||||
data:
|
||||
alertmanager.yaml: $(cat assets/alertmanager/alertmanager.yaml | base64 --wrap=0)
|
||||
EOF
|
||||
|
|
@ -1,39 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
set +x
|
||||
|
||||
cat <<-EOF
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: grafana-dashboard-definitions-0
|
||||
data:
|
||||
EOF
|
||||
|
||||
for f in assets/grafana/generated/*-dashboard.json
|
||||
do
|
||||
rm -rf $f
|
||||
done
|
||||
|
||||
virtualenv -p python3 .env 2>&1 > /dev/null
|
||||
source .env/bin/activate 2>&1 > /dev/null
|
||||
pip install -Ur requirements.txt 2>&1 > /dev/null
|
||||
for f in assets/grafana/*.dashboard.py
|
||||
do
|
||||
basefilename=$(basename $f)
|
||||
JSON_FILENAME="assets/grafana/generated/${basefilename%%.*}-dashboard.json"
|
||||
generate-dashboard $f -o $JSON_FILENAME 2>&1 > /dev/null
|
||||
done
|
||||
|
||||
cp assets/grafana/raw-json-dashboards/*-dashboard.json assets/grafana/generated/
|
||||
|
||||
for f in assets/grafana/generated/*-dashboard.json
|
||||
do
|
||||
basefilename=$(basename $f)
|
||||
echo " $basefilename: |+"
|
||||
if [ "$basefilename" = "etcd-dashboard.json" ]; then
|
||||
hack/scripts/wrap-dashboard.sh $f prometheus-etcd | sed "s/^/ /g"
|
||||
else
|
||||
hack/scripts/wrap-dashboard.sh $f prometheus | sed "s/^/ /g"
|
||||
fi
|
||||
done
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
if [ "$#" -ne 2 ]; then
|
||||
echo "Usage: $0 user password"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
user=$1
|
||||
password=$2
|
||||
|
||||
cat <<-EOF
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-credentials
|
||||
data:
|
||||
user: $(echo -n ${user} | base64 --wrap=0)
|
||||
password: $(echo -n ${password} | base64 --wrap=0)
|
||||
EOF
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -e
|
||||
set +x
|
||||
|
||||
# Generate Alert Rules ConfigMap
|
||||
hack/scripts/generate-rules-configmap.sh > manifests/prometheus/prometheus-k8s-rules.yaml
|
||||
|
||||
# Generate Dashboard ConfigMap
|
||||
hack/scripts/generate-dashboards-configmap.sh > manifests/grafana/grafana-dashboard-definitions.yaml
|
||||
|
||||
# Generate Dashboard ConfigMap with configmap-generator tool
|
||||
# Max Size per ConfigMap: 240000
|
||||
# Input dir: assets/grafana
|
||||
# output file: manifests/grafana/grafana-dashboards.yaml
|
||||
# grafana deployment output file: manifests/grafana/grafana-deployment.yaml
|
||||
test -f manifests/grafana/grafana-dashboard-definitions.yaml && rm -f manifests/grafana/grafana-dashboard-definitions.yaml
|
||||
test -f manifests/grafana/grafana-deployment.yaml && rm -f manifests/grafana/grafana-deployment.yaml
|
||||
test -f manifests/grafana/grafana-dashboards.yaml && rm -f manifests/grafana/grafana-dashboards.yaml
|
||||
hack/grafana-dashboards-configmap-generator/bin/grafana_dashboards_generate.sh -s 240000 -i assets/grafana/generated -o manifests/grafana/grafana-dashboard-definitions.yaml -g manifests/grafana/grafana-deployment.yaml -d manifests/grafana/grafana-dashboards.yaml
|
||||
|
||||
# Generate Grafana Credentials Secret
|
||||
hack/scripts/generate-grafana-credentials-secret.sh admin admin > manifests/grafana/grafana-credentials.yaml
|
||||
|
||||
# Generate Secret for Alertmanager config
|
||||
hack/scripts/generate-alertmanager-config-secret.sh > manifests/alertmanager/alertmanager-config.yaml
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
cat <<-EOF
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: prometheus-k8s-rules
|
||||
labels:
|
||||
role: alert-rules
|
||||
prometheus: k8s
|
||||
data:
|
||||
EOF
|
||||
|
||||
for f in assets/prometheus/rules/*.rules.y*ml
|
||||
do
|
||||
echo " $(basename "$f"): |+"
|
||||
cat $f | sed "s/^/ /g"
|
||||
done
|
|
@ -1,51 +0,0 @@
|
|||
#!/bin/bash -eu
|
||||
|
||||
# Intended usage:
|
||||
# * Edit dashboard in Grafana (you need to login first with admin/admin
|
||||
# login/password).
|
||||
# * Save dashboard in Grafana to check is specification is correct.
|
||||
# Looks like this is the only way to check if dashboard specification
|
||||
# has errors.
|
||||
# * Download dashboard specification as JSON file in Grafana:
|
||||
# Share -> Export -> Save to file.
|
||||
# * Drop dashboard specification in assets folder:
|
||||
# mv Nodes-1488465802729.json assets/grafana/node-dashboard.json
|
||||
# * Regenerate Grafana configmap:
|
||||
# ./hack/scripts/generate-manifests.sh
|
||||
# * Apply new configmap:
|
||||
# kubectl -n monitoring apply -f manifests/grafana/grafana-cm.yaml
|
||||
|
||||
if [ "$#" -ne 2 ]; then
|
||||
echo "Usage: $0 path-to-dashboard.json grafana-prometheus-datasource-name"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
dashboardjson=$1
|
||||
datasource_name=$2
|
||||
inputname="DS_PROMETHEUS"
|
||||
|
||||
if [ "$datasource_name" = "prometheus-etcd" ]; then
|
||||
inputname="DS_PROMETHEUS-ETCD"
|
||||
fi
|
||||
|
||||
cat <<EOF
|
||||
{
|
||||
"dashboard":
|
||||
EOF
|
||||
|
||||
cat $dashboardjson
|
||||
|
||||
cat <<EOF
|
||||
,
|
||||
"inputs": [
|
||||
{
|
||||
"name": "$inputname",
|
||||
"pluginId": "prometheus",
|
||||
"type": "datasource",
|
||||
"value": "$datasource_name"
|
||||
}
|
||||
],
|
||||
"overwrite": true
|
||||
}
|
||||
EOF
|
||||
|
|
@ -1,25 +1,8 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local secret = k.core.v1.secret;
|
||||
|
||||
local plainConfig = "global:
|
||||
resolve_timeout: 5m
|
||||
route:
|
||||
group_by: ['job']
|
||||
group_wait: 30s
|
||||
group_interval: 5m
|
||||
repeat_interval: 12h
|
||||
receiver: 'null'
|
||||
routes:
|
||||
- match:
|
||||
alertname: DeadMansSwitch
|
||||
receiver: 'null'
|
||||
receivers:
|
||||
- name: 'null'";
|
||||
|
||||
local config = std.base64(plainConfig);
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
secret.new("alertmanager-main", {"alertmanager.yaml": config}) +
|
||||
new(namespace, plainConfig)::
|
||||
secret.new("alertmanager-main", {"alertmanager.yaml": std.base64(plainConfig)}) +
|
||||
secret.mixin.metadata.withNamespace(namespace)
|
||||
}
|
||||
|
|
|
@ -1,62 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
|
||||
local alertmanager = import "alertmanager/alertmanager.libsonnet";
|
||||
local ksm = import "kube-state-metrics/kube-state-metrics.libsonnet";
|
||||
local nodeExporter = import "node-exporter/node-exporter.libsonnet";
|
||||
local po = import "prometheus-operator/prometheus-operator.libsonnet";
|
||||
local prometheus = import "prometheus/prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
|
||||
local objects = {
|
||||
"alertmanager-main/alertmanager-main-secret.yaml": alertmanager.config.new(namespace),
|
||||
"alertmanager-main/alertmanager-main-service-account.yaml": alertmanager.serviceAccount.new(namespace),
|
||||
"alertmanager-main/alertmanager-main-service.yaml": alertmanager.service.new(namespace),
|
||||
"alertmanager-main/alertmanager-main.yaml": alertmanager.alertmanager.new(namespace),
|
||||
|
||||
"kube-state-metrics/kube-state-metrics-cluster-role-binding": ksm.clusterRoleBinding.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-cluster-role.yaml": ksm.clusterRole.new(),
|
||||
"kube-state-metrics/kube-state-metrics-deployment.yaml": ksm.deployment.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-role-binding.yaml": ksm.roleBinding.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-role.yaml": ksm.role.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service-account.yaml": ksm.serviceAccount.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service.yaml": ksm.service.new(namespace),
|
||||
|
||||
"node-exporter/node-exporter-cluster-role-binding.yaml": nodeExporter.clusterRoleBinding.new(namespace),
|
||||
"node-exporter/node-exporter-cluster-role.yaml": nodeExporter.clusterRole.new(),
|
||||
"node-exporter/node-exporter-daemonset.yaml": nodeExporter.daemonset.new(namespace),
|
||||
"node-exporter/node-exporter-service-account.yaml": nodeExporter.serviceAccount.new(namespace),
|
||||
"node-exporter/node-exporter-service.yaml": nodeExporter.service.new(namespace),
|
||||
|
||||
"prometheus-operator/prometheus-operator-cluster-role-binding.yaml": po.clusterRoleBinding.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-cluster-role.yaml": po.clusterRole.new(),
|
||||
"prometheus-operator/prometheus-operator-deployment.yaml": po.deployment.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service.yaml": po.service.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service-account.yaml": po.serviceAccount.new(namespace),
|
||||
|
||||
"prometheus-k8s/prometheus-k8s-cluster-role-binding.yaml": prometheus.clusterRoleBinding.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-cluster-role.yaml": prometheus.clusterRole.new(),
|
||||
"prometheus-k8s/prometheus-k8s-service-account.yaml": prometheus.serviceAccount.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml": prometheus.service.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s.yaml": prometheus.prometheus.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-config.yaml": prometheus.roleBindingConfig.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-namespace.yaml": prometheus.roleBindingNamespace.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-kube-system.yaml": prometheus.roleBindingKubeSystem.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-default.yaml": prometheus.roleBindingDefault.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-config.yaml": prometheus.roleConfig.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-namespace.yaml": prometheus.roleNamespace.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-kube-system.yaml": prometheus.roleKubeSystem.new(),
|
||||
"prometheus-k8s/prometheus-k8s-role-default.yaml": prometheus.roleDefault.new(),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-alertmanager.yaml": prometheus.serviceMonitorAlertmanager.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-apiserver.yaml": prometheus.serviceMonitorApiserver.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-coredns.yaml": prometheus.serviceMonitorCoreDNS.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-controller-manager.yaml": prometheus.serviceMonitorControllerManager.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-scheduler.yaml": prometheus.serviceMonitorScheduler.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-state-metrics.yaml": prometheus.serviceMonitorKubeStateMetrics.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kubelet.yaml": prometheus.serviceMonitorKubelet.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-node-exporter.yaml": prometheus.serviceMonitorNodeExporter.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-prometheus-operator.yaml": prometheus.serviceMonitorPrometheusOperator.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-prometheus.yaml": prometheus.serviceMonitorPrometheus.new(namespace),
|
||||
};
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
85
contrib/kube-prometheus/jsonnet/kube-prometheus.libsonnet
Normal file
85
contrib/kube-prometheus/jsonnet/kube-prometheus.libsonnet
Normal file
|
@ -0,0 +1,85 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
|
||||
local alertmanager = import "alertmanager/alertmanager.libsonnet";
|
||||
local ksm = import "kube-state-metrics/kube-state-metrics.libsonnet";
|
||||
local nodeExporter = import "node-exporter/node-exporter.libsonnet";
|
||||
local po = import "prometheus-operator/prometheus-operator.libsonnet";
|
||||
local prometheus = import "prometheus/prometheus.libsonnet";
|
||||
local grafana = import "grafana/grafana.libsonnet";
|
||||
|
||||
local alertmanagerConfig = importstr "../assets/alertmanager/alertmanager.yaml";
|
||||
|
||||
local ruleFiles = {
|
||||
"alertmanager.rules.yaml": importstr "../assets/prometheus/rules/alertmanager.rules.yaml",
|
||||
"etcd3.rules.yaml": importstr "../assets/prometheus/rules/etcd3.rules.yaml",
|
||||
"general.rules.yaml": importstr "../assets/prometheus/rules/general.rules.yaml",
|
||||
"kube-controller-manager.rules.yaml": importstr "../assets/prometheus/rules/kube-controller-manager.rules.yaml",
|
||||
"kube-scheduler.rules.yaml": importstr "../assets/prometheus/rules/kube-scheduler.rules.yaml",
|
||||
"kube-state-metrics.rules.yaml": importstr "../assets/prometheus/rules/kube-state-metrics.rules.yaml",
|
||||
"kubelet.rules.yaml": importstr "../assets/prometheus/rules/kubelet.rules.yaml",
|
||||
"kubernetes.rules.yaml": importstr "../assets/prometheus/rules/kubernetes.rules.yaml",
|
||||
"node.rules.yaml": importstr "../assets/prometheus/rules/node.rules.yaml",
|
||||
"prometheus.rules.yaml": importstr "../assets/prometheus/rules/prometheus.rules.yaml",
|
||||
};
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"grafana/grafana-dashboard-definitions.yaml": grafana.dashboardDefinitions.new(namespace),
|
||||
"grafana/grafana-dashboard-sources.yaml": grafana.dashboardSources.new(namespace),
|
||||
"grafana/grafana-datasources.yaml": grafana.dashboardDatasources.new(namespace),
|
||||
"grafana/grafana-deployment.yaml": grafana.deployment.new(namespace),
|
||||
"grafana/grafana-service-account.yaml": grafana.serviceAccount.new(namespace),
|
||||
"grafana/grafana-service.yaml": grafana.service.new(namespace),
|
||||
|
||||
"alertmanager-main/alertmanager-main-secret.yaml": alertmanager.config.new(namespace, alertmanagerConfig),
|
||||
"alertmanager-main/alertmanager-main-service-account.yaml": alertmanager.serviceAccount.new(namespace),
|
||||
"alertmanager-main/alertmanager-main-service.yaml": alertmanager.service.new(namespace),
|
||||
"alertmanager-main/alertmanager-main.yaml": alertmanager.alertmanager.new(namespace),
|
||||
|
||||
"kube-state-metrics/kube-state-metrics-cluster-role-binding.yaml": ksm.clusterRoleBinding.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-cluster-role.yaml": ksm.clusterRole.new(),
|
||||
"kube-state-metrics/kube-state-metrics-deployment.yaml": ksm.deployment.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-role-binding.yaml": ksm.roleBinding.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-role.yaml": ksm.role.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service-account.yaml": ksm.serviceAccount.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service.yaml": ksm.service.new(namespace),
|
||||
|
||||
"node-exporter/node-exporter-cluster-role-binding.yaml": nodeExporter.clusterRoleBinding.new(namespace),
|
||||
"node-exporter/node-exporter-cluster-role.yaml": nodeExporter.clusterRole.new(),
|
||||
"node-exporter/node-exporter-daemonset.yaml": nodeExporter.daemonset.new(namespace),
|
||||
"node-exporter/node-exporter-service-account.yaml": nodeExporter.serviceAccount.new(namespace),
|
||||
"node-exporter/node-exporter-service.yaml": nodeExporter.service.new(namespace),
|
||||
|
||||
"prometheus-operator/prometheus-operator-cluster-role-binding.yaml": po.clusterRoleBinding.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-cluster-role.yaml": po.clusterRole.new(),
|
||||
"prometheus-operator/prometheus-operator-deployment.yaml": po.deployment.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service.yaml": po.service.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service-account.yaml": po.serviceAccount.new(namespace),
|
||||
|
||||
"prometheus-k8s/prometheus-k8s-cluster-role-binding.yaml": prometheus.clusterRoleBinding.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-cluster-role.yaml": prometheus.clusterRole.new(),
|
||||
"prometheus-k8s/prometheus-k8s-service-account.yaml": prometheus.serviceAccount.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml": prometheus.service.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s.yaml": prometheus.prometheus.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-rules.yaml": prometheus.rules.new(namespace, ruleFiles),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-config.yaml": prometheus.roleBindingConfig.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-namespace.yaml": prometheus.roleBindingNamespace.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-kube-system.yaml": prometheus.roleBindingKubeSystem.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-default.yaml": prometheus.roleBindingDefault.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-config.yaml": prometheus.roleConfig.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-namespace.yaml": prometheus.roleNamespace.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-kube-system.yaml": prometheus.roleKubeSystem.new(),
|
||||
"prometheus-k8s/prometheus-k8s-role-default.yaml": prometheus.roleDefault.new(),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-alertmanager.yaml": prometheus.serviceMonitorAlertmanager.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-apiserver.yaml": prometheus.serviceMonitorApiserver.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-coredns.yaml": prometheus.serviceMonitorCoreDNS.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-controller-manager.yaml": prometheus.serviceMonitorControllerManager.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-scheduler.yaml": prometheus.serviceMonitorScheduler.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-state-metrics.yaml": prometheus.serviceMonitorKubeStateMetrics.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kubelet.yaml": prometheus.serviceMonitorKubelet.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-node-exporter.yaml": prometheus.serviceMonitorNodeExporter.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-prometheus-operator.yaml": prometheus.serviceMonitorPrometheusOperator.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-prometheus.yaml": prometheus.serviceMonitorPrometheus.new(namespace),
|
||||
}
|
||||
}
|
|
@ -2,7 +2,7 @@ local k = import "ksonnet.beta.3/k.libsonnet";
|
|||
local rawVersion = importstr "../../../../VERSION";
|
||||
|
||||
local removeLineBreaks = function(str) std.join("", std.filter(function(c) c != "\n", std.stringChars(str)));
|
||||
local version = removeLineBreaks(rawVersion);
|
||||
local version = "v0.18.1";//removeLineBreaks(rawVersion);
|
||||
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
|
@ -12,7 +12,7 @@ local targetPort = 8080;
|
|||
local podLabels = {"k8s-app": "prometheus-operator"};
|
||||
|
||||
local operatorContainer =
|
||||
container.new("prometheus-operator", "quay.io/coreos/prometheus-operator:v" + version) +
|
||||
container.new("prometheus-operator", "quay.io/coreos/prometheus-operator:" + version) +
|
||||
container.withPorts(containerPort.newNamed("http", targetPort)) +
|
||||
container.withArgs(["--kubelet-service=kube-system/kubelet", "--config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1"]) +
|
||||
container.mixin.resources.withRequests({cpu: "100m", memory: "50Mi"}) +
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local configMap = k.core.v1.configMap;
|
||||
|
||||
{
|
||||
new(namespace, ruleFiles)::
|
||||
configMap.new("prometheus-k8s-rules", ruleFiles) +
|
||||
configMap.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -4,7 +4,6 @@ local servicePort = k.core.v1.service.mixin.spec.portsType;
|
|||
|
||||
local prometheusPort = servicePort.newNamed("web", 9090, "web");
|
||||
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
service.new("prometheus-k8s", {app: "prometheus", prometheus: "k8s"}, prometheusPort) +
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
roleNamespace:: import "prometheus-k8s-role-namespace.libsonnet",
|
||||
roleKubeSystem:: import "prometheus-k8s-role-kube-system.libsonnet",
|
||||
roleDefault:: import "prometheus-k8s-role-default.libsonnet",
|
||||
rules:: import "prometheus-k8s-rules.libsonnet",
|
||||
serviceAccount:: import "prometheus-k8s-service-account.libsonnet",
|
||||
serviceMonitorAlertmanager:: import "prometheus-k8s-service-monitor-alertmanager.libsonnet",
|
||||
serviceMonitorApiserver:: import "prometheus-k8s-service-monitor-apiserver.libsonnet",
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
alertmanager.yaml: Z2xvYmFsOgogIHJlc29sdmVfdGltZW91dDogNW0Kcm91dGU6CiAgZ3JvdXBfYnk6IFsnam9iJ10KICBncm91cF93YWl0OiAzMHMKICBncm91cF9pbnRlcnZhbDogNW0KICByZXBlYXRfaW50ZXJ2YWw6IDEyaAogIHJlY2VpdmVyOiAnbnVsbCcKICByb3V0ZXM6CiAgLSBtYXRjaDoKICAgICAgYWxlcnRuYW1lOiBEZWFkTWFuc1N3aXRjaAogICAgcmVjZWl2ZXI6ICdudWxsJwpyZWNlaXZlcnM6Ci0gbmFtZTogJ251bGwnCg==
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: alertmanager-main
|
||||
data:
|
||||
alertmanager.yaml: Z2xvYmFsOgogIHJlc29sdmVfdGltZW91dDogNW0Kcm91dGU6CiAgZ3JvdXBfYnk6IFsnam9iJ10KICBncm91cF93YWl0OiAzMHMKICBncm91cF9pbnRlcnZhbDogNW0KICByZXBlYXRfaW50ZXJ2YWw6IDEyaAogIHJlY2VpdmVyOiAnbnVsbCcKICByb3V0ZXM6CiAgLSBtYXRjaDoKICAgICAgYWxlcnRuYW1lOiBEZWFkTWFuc1N3aXRjaAogICAgcmVjZWl2ZXI6ICdudWxsJwpyZWNlaXZlcnM6Ci0gbmFtZTogJ251bGwnCg==
|
||||
namespace: monitoring
|
||||
type: Opaque
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
|
@ -4,13 +4,12 @@ metadata:
|
|||
labels:
|
||||
alertmanager: main
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- name: web
|
||||
nodePort: 30903
|
||||
port: 9093
|
||||
protocol: TCP
|
||||
targetPort: web
|
||||
selector:
|
||||
alertmanager: main
|
||||
app: alertmanager
|
|
@ -1,9 +1,11 @@
|
|||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Alertmanager
|
||||
metadata:
|
||||
name: main
|
||||
labels:
|
||||
alertmanager: main
|
||||
name: main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 3
|
||||
serviceAccountName: alertmanager-main
|
||||
version: v0.14.0
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: etcd-k8s
|
||||
labels:
|
||||
k8s-app: etcd
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: api
|
||||
port: 2379
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: etcd-k8s
|
||||
labels:
|
||||
k8s-app: etcd
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: 10.142.0.2
|
||||
nodeName: 10.142.0.2
|
||||
ports:
|
||||
- name: api
|
||||
port: 2379
|
||||
protocol: TCP
|
|
@ -1,28 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: etcd-k8s
|
||||
labels:
|
||||
k8s-app: etcd
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: api
|
||||
port: 2379
|
||||
protocol: TCP
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Endpoints
|
||||
metadata:
|
||||
name: etcd-k8s
|
||||
labels:
|
||||
k8s-app: etcd
|
||||
subsets:
|
||||
- addresses:
|
||||
- ip: 172.17.4.51
|
||||
nodeName: 172.17.4.51
|
||||
ports:
|
||||
- name: api
|
||||
port: 2379
|
||||
protocol: TCP
|
|
@ -1,7 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: grafana-credentials
|
||||
data:
|
||||
user: YWRtaW4=
|
||||
password: YWRtaW4=
|
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,18 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
dashboards.yaml: |-
|
||||
[
|
||||
{
|
||||
"folder": "",
|
||||
"name": "0",
|
||||
"options": {
|
||||
"path": "/grafana-dashboard-definitions/0"
|
||||
},
|
||||
"org_id": 1,
|
||||
"type": "file"
|
||||
}
|
||||
]
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: grafana-dashboards
|
||||
namespace: monitoring
|
|
@ -1,12 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: grafana-dashboards
|
||||
data:
|
||||
dashboards.yaml: |+
|
||||
- name: '0'
|
||||
org_id: 1
|
||||
folder: ''
|
||||
type: file
|
||||
options:
|
||||
folder: /grafana-dashboard-definitions/0
|
|
@ -1,15 +1,20 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
prometheus.yaml: |-
|
||||
{
|
||||
"datasources": [
|
||||
{
|
||||
"access": "proxy",
|
||||
"etitable": false,
|
||||
"name": "prometheus",
|
||||
"org_id": 1,
|
||||
"type": "prometheus",
|
||||
"url": "http://prometheus-k8s.monitoring.svc:9090",
|
||||
"version": 1
|
||||
}
|
||||
]
|
||||
}
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: grafana-datasources
|
||||
data:
|
||||
prometheus.yaml: |+
|
||||
datasources:
|
||||
- name: prometheus
|
||||
type: prometheus
|
||||
access: proxy
|
||||
org_id: 1
|
||||
url: http://prometheus-k8s.monitoring.svc:9090
|
||||
version: 1
|
||||
editable: false
|
||||
|
||||
namespace: monitoring
|
||||
|
|
|
@ -1,48 +1,59 @@
|
|||
apiVersion: apps/v1beta1
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: grafana
|
||||
name: grafana
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: grafana
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: grafana
|
||||
spec:
|
||||
containers:
|
||||
- image: quay.io/coreos/monitoring-grafana:5.0.3
|
||||
name: grafana
|
||||
ports:
|
||||
- containerPort: 3000
|
||||
name: http
|
||||
resources:
|
||||
limits:
|
||||
cpu: 200m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
volumeMounts:
|
||||
- mountPath: /data
|
||||
name: grafana-storage
|
||||
readOnly: false
|
||||
- mountPath: /grafana/conf/provisioning/datasources
|
||||
name: grafana-datasources
|
||||
readOnly: false
|
||||
- mountPath: /grafana/conf/provisioning/dashboards
|
||||
name: grafana-dashboards
|
||||
readOnly: false
|
||||
- mountPath: /grafana-dashboard-definitions/0
|
||||
name: grafana-dashboard-definitions
|
||||
readOnly: false
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: grafana
|
||||
image: quay.io/coreos/monitoring-grafana:5.0.3
|
||||
volumeMounts:
|
||||
- name: grafana-storage
|
||||
mountPath: /data
|
||||
- name: grafana-datasources
|
||||
mountPath: /grafana/conf/provisioning/datasources
|
||||
- name: grafana-dashboards
|
||||
mountPath: /grafana/conf/provisioning/dashboards
|
||||
- name: grafana-dashboard-definitions-0
|
||||
mountPath: /grafana-dashboard-definitions/0
|
||||
ports:
|
||||
- name: web
|
||||
containerPort: 3000
|
||||
resources:
|
||||
requests:
|
||||
memory: 100Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 200Mi
|
||||
cpu: 200m
|
||||
serviceAccountName: grafana
|
||||
volumes:
|
||||
- name: grafana-storage
|
||||
emptyDir: {}
|
||||
- name: grafana-datasources
|
||||
configMap:
|
||||
- emptyDir: {}
|
||||
name: grafana-storage
|
||||
- configMap:
|
||||
name: grafana-datasources
|
||||
- name: grafana-dashboards
|
||||
configMap:
|
||||
name: grafana-datasources
|
||||
- configMap:
|
||||
name: grafana-dashboards
|
||||
- name: grafana-dashboard-definitions-0
|
||||
configMap:
|
||||
name: grafana-dashboard-definitions-0
|
||||
name: grafana-dashboards
|
||||
- configMap:
|
||||
name: grafana-dashboard-definitions
|
||||
name: grafana-dashboard-definitions
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: grafana
|
||||
namespace: monitoring
|
|
@ -2,14 +2,11 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
name: grafana
|
||||
labels:
|
||||
app: grafana
|
||||
namespace: monitoring
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 3000
|
||||
protocol: TCP
|
||||
nodePort: 30902
|
||||
targetPort: web
|
||||
- name: http
|
||||
port: 3000
|
||||
targetPort: http
|
||||
selector:
|
||||
app: grafana
|
||||
|
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kube-controller-manager-prometheus-discovery
|
||||
labels:
|
||||
k8s-app: kube-controller-manager
|
||||
spec:
|
||||
selector:
|
||||
component: kube-controller-manager
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10252
|
||||
targetPort: 10252
|
||||
protocol: TCP
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kube-scheduler-prometheus-discovery
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
spec:
|
||||
selector:
|
||||
component: kube-scheduler
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10251
|
||||
targetPort: 10251
|
||||
protocol: TCP
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kube-controller-manager-prometheus-discovery
|
||||
labels:
|
||||
k8s-app: kube-controller-manager
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-controller-manager
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10252
|
||||
targetPort: 10252
|
||||
protocol: TCP
|
|
@ -1,21 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kube-dns-prometheus-discovery
|
||||
labels:
|
||||
k8s-app: kube-dns
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-dns
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics-skydns
|
||||
port: 10055
|
||||
targetPort: 10055
|
||||
protocol: TCP
|
||||
- name: http-metrics-dnsmasq
|
||||
port: 10054
|
||||
targetPort: 10054
|
||||
protocol: TCP
|
|
@ -1,17 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: kube-scheduler-prometheus-discovery
|
||||
labels:
|
||||
k8s-app: kube-scheduler
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: kube-scheduler
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http-metrics
|
||||
port: 10251
|
||||
targetPort: 10251
|
||||
protocol: TCP
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
|
|
|
@ -1,10 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- configmaps
|
||||
- secrets
|
||||
- nodes
|
||||
- pods
|
||||
- services
|
||||
|
@ -15,31 +18,49 @@ rules:
|
|||
- persistentvolumes
|
||||
- namespaces
|
||||
- endpoints
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["extensions"]
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- daemonsets
|
||||
- deployments
|
||||
- replicasets
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["apps"]
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- apps
|
||||
resources:
|
||||
- statefulsets
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["batch"]
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- batch
|
||||
resources:
|
||||
- cronjobs
|
||||
- jobs
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["autoscaling"]
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- autoscaling
|
||||
resources:
|
||||
- horizontalpodautoscalers
|
||||
verbs: ["list", "watch"]
|
||||
- apiGroups: ["authentication.k8s.io"]
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["authorization.k8s.io"]
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs: ["create"]
|
||||
verbs:
|
||||
- create
|
||||
|
|
|
@ -1,80 +1,95 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-state-metrics
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: kube-state-metrics
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-state-metrics
|
||||
spec:
|
||||
serviceAccountName: kube-state-metrics
|
||||
containers:
|
||||
- args:
|
||||
- --secure-listen-address=:8443
|
||||
- --upstream=http://127.0.0.1:8081/
|
||||
image: quay.io/coreos/kube-rbac-proxy:v0.3.0
|
||||
name: kube-rbac-proxy-main
|
||||
ports:
|
||||
- containerPort: 8443
|
||||
name: https-main
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- args:
|
||||
- --secure-listen-address=:9443
|
||||
- --upstream=http://127.0.0.1:8082/
|
||||
image: quay.io/coreos/kube-rbac-proxy:v0.3.0
|
||||
name: kube-rbac-proxy-self
|
||||
ports:
|
||||
- containerPort: 9443
|
||||
name: https-self
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
- args:
|
||||
- --host=127.0.0.1
|
||||
- --port=8081
|
||||
- --telemetry-host=127.0.0.1
|
||||
- --telemetry-port=8082
|
||||
image: quay.io/coreos/kube-state-metrics:v1.3.0
|
||||
name: kube-state-metrics
|
||||
resources:
|
||||
limits:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
requests:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
- command:
|
||||
- /pod_nanny
|
||||
- --container=kube-state-metrics
|
||||
- --cpu=100m
|
||||
- --extra-cpu=2m
|
||||
- --memory=150Mi
|
||||
- --extra-memory=30Mi
|
||||
- --threshold=5
|
||||
- --deployment=kube-state-metrics
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
image: quay.io/coreos/addon-resizer:1.0
|
||||
name: addon-resizer
|
||||
resources:
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 30Mi
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
containers:
|
||||
- name: kube-rbac-proxy-main
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.2.0
|
||||
args:
|
||||
- "--secure-listen-address=:8443"
|
||||
- "--upstream=http://127.0.0.1:8081/"
|
||||
ports:
|
||||
- name: https-main
|
||||
containerPort: 8443
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 40Mi
|
||||
cpu: 20m
|
||||
- name: kube-rbac-proxy-self
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.2.0
|
||||
args:
|
||||
- "--secure-listen-address=:9443"
|
||||
- "--upstream=http://127.0.0.1:8082/"
|
||||
ports:
|
||||
- name: https-self
|
||||
containerPort: 9443
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 40Mi
|
||||
cpu: 20m
|
||||
- name: kube-state-metrics
|
||||
image: quay.io/coreos/kube-state-metrics:v1.2.0
|
||||
args:
|
||||
- "--host=127.0.0.1"
|
||||
- "--port=8081"
|
||||
- "--telemetry-host=127.0.0.1"
|
||||
- "--telemetry-port=8082"
|
||||
- name: addon-resizer
|
||||
image: gcr.io/google_containers/addon-resizer:1.0
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 30Mi
|
||||
env:
|
||||
- name: MY_POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: MY_POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
command:
|
||||
- /pod_nanny
|
||||
- --container=kube-state-metrics
|
||||
- --cpu=100m
|
||||
- --extra-cpu=2m
|
||||
- --memory=150Mi
|
||||
- --extra-memory=30Mi
|
||||
- --threshold=5
|
||||
- --deployment=kube-state-metrics
|
||||
serviceAccountName: kube-state-metrics
|
||||
|
|
|
@ -1,12 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: kube-state-metrics-resizer
|
||||
name: kube-state-metrics-addon-resizer
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: kube-state-metrics
|
||||
|
||||
|
|
|
@ -1,15 +1,21 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: kube-state-metrics-resizer
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs: ["get"]
|
||||
- apiGroups: ["extensions"]
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- kube-state-metrics
|
||||
resources:
|
||||
- deployments
|
||||
resourceNames: ["kube-state-metrics"]
|
||||
verbs: ["get", "update"]
|
||||
|
||||
verbs:
|
||||
- get
|
||||
- update
|
||||
|
|
|
@ -2,3 +2,4 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
|
|
|
@ -2,20 +2,16 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: kube-state-metrics
|
||||
k8s-app: kube-state-metrics
|
||||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: https-main
|
||||
port: 8443
|
||||
targetPort: https-main
|
||||
protocol: TCP
|
||||
- name: https-self
|
||||
port: 9443
|
||||
targetPort: https-self
|
||||
protocol: TCP
|
||||
selector:
|
||||
app: kube-state-metrics
|
||||
|
||||
|
|
|
@ -3,11 +3,15 @@ kind: ClusterRole
|
|||
metadata:
|
||||
name: node-exporter
|
||||
rules:
|
||||
- apiGroups: ["authentication.k8s.io"]
|
||||
- apiGroups:
|
||||
- authentication.k8s.io
|
||||
resources:
|
||||
- tokenreviews
|
||||
verbs: ["create"]
|
||||
- apiGroups: ["authorization.k8s.io"]
|
||||
verbs:
|
||||
- create
|
||||
- apiGroups:
|
||||
- authorization.k8s.io
|
||||
resources:
|
||||
- subjectaccessreviews
|
||||
verbs: ["create"]
|
||||
verbs:
|
||||
- create
|
||||
|
|
|
@ -1,69 +1,63 @@
|
|||
apiVersion: extensions/v1beta1
|
||||
apiVersion: apps/v1beta2
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app: node-exporter
|
||||
name: node-exporter
|
||||
namespace: monitoring
|
||||
spec:
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
selector:
|
||||
matchLabels:
|
||||
app: node-exporter
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: node-exporter
|
||||
name: node-exporter
|
||||
spec:
|
||||
serviceAccountName: node-exporter
|
||||
containers:
|
||||
- args:
|
||||
- --web.listen-address=127.0.0.1:9101
|
||||
- --path.procfs=/host/proc
|
||||
- --path.sysfs=/host/sys
|
||||
image: quay.io/prometheus/node-exporter:v0.15.2
|
||||
name: node-exporter
|
||||
resources:
|
||||
limits:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
requests:
|
||||
cpu: 102m
|
||||
memory: 180Mi
|
||||
volumeMounts:
|
||||
- mountPath: /host/proc
|
||||
name: proc
|
||||
readOnly: false
|
||||
- mountPath: /host/sys
|
||||
name: sys
|
||||
readOnly: false
|
||||
- args:
|
||||
- --secure-listen-address=:9100
|
||||
- --upstream=http://127.0.0.1:9101/
|
||||
image: quay.io/coreos/kube-rbac-proxy:v0.3.0
|
||||
name: kube-rbac-proxy
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
name: https
|
||||
resources:
|
||||
limits:
|
||||
cpu: 20m
|
||||
memory: 40Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
containers:
|
||||
- image: quay.io/prometheus/node-exporter:v0.15.2
|
||||
args:
|
||||
- "--web.listen-address=127.0.0.1:9101"
|
||||
- "--path.procfs=/host/proc"
|
||||
- "--path.sysfs=/host/sys"
|
||||
name: node-exporter
|
||||
resources:
|
||||
requests:
|
||||
memory: 30Mi
|
||||
cpu: 100m
|
||||
limits:
|
||||
memory: 50Mi
|
||||
cpu: 200m
|
||||
volumeMounts:
|
||||
- name: proc
|
||||
readOnly: true
|
||||
mountPath: /host/proc
|
||||
- name: sys
|
||||
readOnly: true
|
||||
mountPath: /host/sys
|
||||
- name: kube-rbac-proxy
|
||||
image: quay.io/brancz/kube-rbac-proxy:v0.2.0
|
||||
args:
|
||||
- "--secure-listen-address=:9100"
|
||||
- "--upstream=http://127.0.0.1:9101/"
|
||||
ports:
|
||||
- containerPort: 9100
|
||||
hostPort: 9100
|
||||
name: https
|
||||
resources:
|
||||
requests:
|
||||
memory: 20Mi
|
||||
cpu: 10m
|
||||
limits:
|
||||
memory: 40Mi
|
||||
cpu: 20m
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
serviceAccountName: node-exporter
|
||||
volumes:
|
||||
- name: proc
|
||||
hostPath:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
- name: sys
|
||||
hostPath:
|
||||
name: proc
|
||||
- hostPath:
|
||||
path: /sys
|
||||
|
||||
name: sys
|
||||
|
|
|
@ -2,3 +2,4 @@ apiVersion: v1
|
|||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: node-exporter
|
||||
namespace: monitoring
|
||||
|
|
|
@ -2,16 +2,13 @@ apiVersion: v1
|
|||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: node-exporter
|
||||
k8s-app: node-exporter
|
||||
name: node-exporter
|
||||
namespace: monitoring
|
||||
spec:
|
||||
type: ClusterIP
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: https
|
||||
port: 9100
|
||||
protocol: TCP
|
||||
targetPort: https
|
||||
selector:
|
||||
app: node-exporter
|
||||
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: prometheus-k8s
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: prometheus-k8s
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-k8s
|
||||
namespace: monitoring
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: prometheus-k8s
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/metrics
|
||||
verbs:
|
||||
- get
|
||||
- nonResourceURLs:
|
||||
- /metrics
|
||||
verbs:
|
||||
- get
|
|
@ -0,0 +1,13 @@
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: prometheus-k8s-config
|
||||
namespace: monitoring
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: prometheus-k8s-config
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: prometheus-k8s-config
|
||||
namespace: monitoring
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue