mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-15 16:56:24 +00:00
kube-prometheus/jsonnet: Use jsonnet-bundler
This commit is contained in:
parent
f7cb6f5906
commit
1f582ad398
150 changed files with 11574 additions and 7258 deletions
|
@ -37,7 +37,7 @@ The manifests used here use the [Prometheus Operator](https://github.com/coreos/
|
|||
>
|
||||
> In future versions of minikube and kubeadm this will be the default, but for the time being, we will have to configure it ourselves.
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-operator/prometheus-operator-deployment.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/0prometheus-operator-deployment.yaml)
|
||||
```yaml
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
|
@ -72,6 +72,8 @@ spec:
|
|||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
@ -136,7 +138,7 @@ spec:
|
|||
|
||||
Unrelated to Kubernetes itself, but still important is to gather various metrics about the actual nodes. Typical metrics are CPU, memory, disk and network utilization, all of these metrics can be gathered using the node_exporter.
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter/node-exporter-daemonset.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter-daemonset.yaml)
|
||||
```yaml
|
||||
apiVersion: apps/v1beta2
|
||||
kind: DaemonSet
|
||||
|
@ -190,10 +192,15 @@ spec:
|
|||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
serviceAccountName: node-exporter
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
volumes:
|
||||
- hostPath:
|
||||
path: /proc
|
||||
|
@ -205,7 +212,7 @@ spec:
|
|||
|
||||
And the respective `Service` manifest:
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter/node-exporter-service.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter-service.yaml)
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@ -215,6 +222,7 @@ metadata:
|
|||
name: node-exporter
|
||||
namespace: monitoring
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: https
|
||||
port: 9100
|
||||
|
@ -225,7 +233,7 @@ spec:
|
|||
|
||||
And last but not least, kube-state-metrics which collects information about Kubernetes objects themselves as they are accessible from the API. Find more information on what kind of metrics kube-state-metrics exposes in [its repository](https://github.com/kubernetes/kube-state-metrics).
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics/kube-state-metrics-deployment.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics-deployment.yaml)
|
||||
```yaml
|
||||
apiVersion: apps/v1beta2
|
||||
kind: Deployment
|
||||
|
@ -318,6 +326,8 @@ spec:
|
|||
requests:
|
||||
cpu: 10m
|
||||
memory: 30Mi
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
|
@ -328,7 +338,7 @@ spec:
|
|||
|
||||
And the respective `Service` manifest:
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics/kube-state-metrics-service.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics-service.yaml)
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
@ -338,6 +348,7 @@ metadata:
|
|||
name: kube-state-metrics
|
||||
namespace: monitoring
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: https-main
|
||||
port: 8443
|
||||
|
@ -353,7 +364,7 @@ spec:
|
|||
|
||||
Once all the steps in the previous section have been taken there should be `Endpoints` objects containing the IPs of all of the above mentioned Kubernetes components. Now to setup the actual Prometheus and Alertmanager clusters. This manifest assumes that the Alertmanager cluster will be deployed in the `monitoring` namespace.
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-prometheus.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Prometheus
|
||||
|
@ -368,6 +379,9 @@ spec:
|
|||
- name: alertmanager-main
|
||||
namespace: monitoring
|
||||
port: web
|
||||
baseImage: quay.io/prometheus/prometheus
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
replicas: 2
|
||||
resources:
|
||||
requests:
|
||||
|
@ -388,7 +402,7 @@ spec:
|
|||
|
||||
The expression to match for selecting `ServiceMonitor`s here is that they must have a label which has a key called `k8s-app`. If you look closely at all the `Service` objects described above they all have a label called `k8s-app` and their component name this allows to conveniently select them with `ServiceMonitor`s.
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-apiserver.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-service-monitor-apiserver.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -416,7 +430,7 @@ spec:
|
|||
provider: kubernetes
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kubelet.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-service-monitor-kubelet.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -450,7 +464,7 @@ spec:
|
|||
k8s-app: kubelet
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kube-controller-manager.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-service-monitor-kube-controller-manager.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -472,7 +486,7 @@ spec:
|
|||
k8s-app: kube-controller-manager
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-k8s/prometheus-k8s-service-monitor-kube-scheduler.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/prometheus-service-monitor-kube-scheduler.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -494,7 +508,7 @@ spec:
|
|||
k8s-app: kube-scheduler
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics/kube-state-metrics-service-monitor.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/kube-state-metrics-service-monitor.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -527,7 +541,7 @@ spec:
|
|||
k8s-app: kube-state-metrics
|
||||
```
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter/node-exporter-service-monitor.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/node-exporter-service-monitor.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: ServiceMonitor
|
||||
|
@ -555,7 +569,7 @@ spec:
|
|||
|
||||
And the Alertmanager:
|
||||
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/alertmanager-main/alertmanager-main.yaml)
|
||||
[embedmd]:# (../../contrib/kube-prometheus/manifests/alertmanager-alertmanager.yaml)
|
||||
```yaml
|
||||
apiVersion: monitoring.coreos.com/v1
|
||||
kind: Alertmanager
|
||||
|
@ -565,6 +579,9 @@ metadata:
|
|||
name: main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
baseImage: quay.io/prometheus/alertmanager
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
replicas: 3
|
||||
serviceAccountName: alertmanager-main
|
||||
version: v0.14.0
|
||||
|
|
17
Makefile
17
Makefile
|
@ -71,7 +71,7 @@ docs: embedmd po-docgen
|
|||
$(GOPATH)/bin/po-docgen compatibility > Documentation/compatibility.md
|
||||
|
||||
generate: jsonnet-docker
|
||||
docker run --rm -u=$(shell id -u $(USER)):$(shell id -g $(USER)) -v `pwd`:/go/src/github.com/coreos/prometheus-operator po-jsonnet make generate-deepcopy generate-openapi jsonnet generate-bundle generate-kube-prometheus docs generate-crd
|
||||
docker run --rm -u=$(shell id -u $(USER)):$(shell id -g $(USER)) -v `pwd`:/go/src/github.com/coreos/prometheus-operator po-jsonnet make generate-deepcopy generate-openapi generate-crd jsonnet generate-bundle generate-kube-prometheus docs
|
||||
|
||||
|
||||
$(GOBIN)/openapi-gen:
|
||||
|
@ -101,14 +101,17 @@ generate-bundle:
|
|||
generate-kube-prometheus:
|
||||
# Update the Prometheus Operator version in kube-prometheus
|
||||
sed -i \
|
||||
"s/local version = \".*\";/local version = \"v$(shell cat VERSION)\";/" \
|
||||
contrib/kube-prometheus/jsonnet/prometheus-operator/prometheus-operator-deployment.libsonnet;
|
||||
"s/prometheusOperator: 'v.*',/prometheusOperator: 'v$(shell cat VERSION)',/" \
|
||||
contrib/kube-prometheus/jsonnet/kube-prometheus/prometheus-operator/prometheus-operator.libsonnet;
|
||||
cd contrib/kube-prometheus; $(MAKE) generate-raw
|
||||
|
||||
jsonnet:
|
||||
jsonnet -J /go/src/github.com/ksonnet/ksonnet-lib hack/generate/prometheus-operator.jsonnet | gojsontoyaml > example/non-rbac/prometheus-operator.yaml
|
||||
jsonnet -J /go/src/github.com/ksonnet/ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | gojsontoyaml > example/rbac/prometheus-operator/prometheus-operator.yaml
|
||||
jsonnet -J /go/src/github.com/ksonnet/ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | gojsontoyaml > contrib/kube-prometheus/manifests/prometheus-operator/prometheus-operator.yaml
|
||||
jsonnet: jb
|
||||
cd hack/generate; jb install
|
||||
jsonnet -J hack/generate/vendor hack/generate/prometheus-operator.jsonnet | gojsontoyaml > example/non-rbac/prometheus-operator.yaml
|
||||
jsonnet -J hack/generate/vendor hack/generate/prometheus-operator-rbac.jsonnet | gojsontoyaml > example/rbac/prometheus-operator/prometheus-operator.yaml
|
||||
|
||||
jb:
|
||||
go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb
|
||||
|
||||
jsonnet-docker:
|
||||
docker build -f scripts/jsonnet/Dockerfile -t po-jsonnet .
|
||||
|
|
1
contrib/kube-prometheus/.gitignore
vendored
1
contrib/kube-prometheus/.gitignore
vendored
|
@ -1 +1,2 @@
|
|||
tmp/
|
||||
minikube-manifests/
|
||||
|
|
|
@ -7,5 +7,11 @@ generate: image
|
|||
@echo ">> Compiling assets and generating Kubernetes manifests"
|
||||
docker run --rm -u=$(shell id -u $(USER)):$(shell id -g $(USER)) -v `pwd`:/go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus --workdir /go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus po-jsonnet make generate-raw
|
||||
|
||||
crdtojsonnet:
|
||||
cat ../../example/prometheus-operator-crd/alertmanager.crd.yaml | gojsontoyaml -yamltojson > jsonnet/kube-prometheus/prometheus-operator/alertmanager-crd.libsonnet
|
||||
cat ../../example/prometheus-operator-crd/prometheus.crd.yaml | gojsontoyaml -yamltojson > jsonnet/kube-prometheus/prometheus-operator/prometheus-crd.libsonnet
|
||||
cat ../../example/prometheus-operator-crd/servicemonitor.crd.yaml | gojsontoyaml -yamltojson > jsonnet/kube-prometheus/prometheus-operator/servicemonitor-crd.libsonnet
|
||||
|
||||
generate-raw:
|
||||
./hack/scripts/build-jsonnet.sh example-dist/base/kube-prometheus.jsonnet manifests
|
||||
cd jsonnet/kube-prometheus; jb install
|
||||
./hack/scripts/build-jsonnet.sh hack/scripts/kube-prometheus-base.jsonnet manifests
|
||||
|
|
|
@ -2,221 +2,184 @@
|
|||
|
||||
> Note that everything in the `contrib/kube-prometheus/` directory is experimental and may change significantly at any time.
|
||||
|
||||
This repository collects Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and
|
||||
[Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/)
|
||||
combined with documentation and scripts to provide single-command deployments of end-to-end
|
||||
Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) (Operator).
|
||||
This repository collects Kubernetes manifests, [Grafana](http://grafana.com/) dashboards, and [Prometheus rules](https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/) combined with documentation and scripts to provide easy to operate end-to-end Kubernetes cluster monitoring with [Prometheus](https://prometheus.io/) using the Prometheus Operator.
|
||||
|
||||
The content of this project is written in [jsonnet](http://jsonnet.org/). This project could both be described as a package as well as a library.
|
||||
|
||||
Components included in this package:
|
||||
|
||||
* The [Prometheus Operator](https://github.com/coreos/prometheus-operator)
|
||||
* Highly available [Prometheus](https://prometheus.io/)
|
||||
* Highly available [Alertmanager](https://github.com/prometheus/alertmanager)
|
||||
* [Prometheus node-exporter](https://github.com/prometheus/node_exporter)
|
||||
* [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)
|
||||
* [Grafana](https://grafana.com/)
|
||||
|
||||
This stack is meant for cluster monitoring, so it is pre-configured to collect metrics from all Kubernetes components. In addition to that it delivers a default set of dashboards and alerting rules. Many of the useful dashboards and alerts come from the [kubernetes-mixin project](https://github.com/kubernetes-monitoring/kubernetes-mixin), similar to this project it provides composable jsonnet as a library for users to customize to their needs.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
First, you need a running Kubernetes cluster. If you don't have one, we recommend you create one
|
||||
with [Tectonic Installer](https://coreos.com/tectonic/docs/latest/). Despite the name,
|
||||
Tectonic Installer gives you also the choice to create a barebones Kubernetes cluster, without
|
||||
CoreOS' Tectonic technology. Otherwise, you can simply make use of
|
||||
[bootkube](https://github.com/kubernetes-incubator/bootkube) or
|
||||
[minikube](https://github.com/kubernetes/minikube) for local testing. Some sample contents of this
|
||||
repository are adapted to work with a [multi-node setup](https://github.com/kubernetes-incubator/bootkube/tree/master/hack/multi-node)
|
||||
using [bootkube](https://github.com/kubernetes-incubator/bootkube).
|
||||
You will need a Kubernetes cluster, that's it! By default it is assumed, that the kubelet uses token authN and authZ, as otherwise Prometheus needs a client certificate, which gives it full access to the kubelet, rather than just the metrics. Token authN and authZ allows more fine grained and easier access control.
|
||||
|
||||
### minikube
|
||||
|
||||
> We assume that the kubelet uses token authN and authZ, as otherwise
|
||||
> Prometheus needs a client certificate, which gives it full access to the
|
||||
> kubelet, rather than just the metrics. Token authN and authZ allows more fine
|
||||
> grained and easier access control. Simply start minikube with the following
|
||||
> command (you can of course adapt the version and memory to your needs):
|
||||
>
|
||||
> $ minikube delete && minikube start --kubernetes-version=v1.9.1 --memory=4096 --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
>
|
||||
> In future versions of minikube and kubeadm this will be the default, but for
|
||||
> the time being, we will have to configure it ourselves.
|
||||
In order to just try out this stack, start minikube with the following command:
|
||||
|
||||
## Monitoring Kubernetes
|
||||
```
|
||||
$ minikube delete && minikube start --kubernetes-version=v1.10.1 --memory=4096 --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
```
|
||||
|
||||
The manifests here use the [Prometheus Operator](https://github.com/coreos/prometheus-operator),
|
||||
which manages Prometheus servers and their configuration in a cluster. With a single command we can
|
||||
install
|
||||
## Quickstart
|
||||
|
||||
* The Operator itself
|
||||
* The Prometheus [node_exporter](https://github.com/prometheus/node_exporter)
|
||||
* [kube-state-metrics](https://github.com/kubernetes/kube-state-metrics)
|
||||
* The [Prometheus specification](https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#prometheus) based on which the Operator deploys a Prometheus setup
|
||||
* A Prometheus configuration covering monitoring of all Kubernetes core components and exporters
|
||||
* A default set of alerting rules on the cluster components' health
|
||||
* A Grafana instance serving dashboards on cluster metrics
|
||||
* A three node highly available Alertmanager cluster
|
||||
Although this project is intended to be used as a library, a compiled version of the Kubernetes manifests generated with this library is checked into this repository in order to try the content our quickly.
|
||||
|
||||
Simply run:
|
||||
Simply create the stack:
|
||||
|
||||
```
|
||||
$ kubectl create -f manifests/
|
||||
```
|
||||
|
||||
## Usage
|
||||
|
||||
The content of this project consists of a set of [jsonnet](http://jsonnet.org/) files making up a library to be consumed.
|
||||
|
||||
Install this library in your own project with [jsonnet-bundler](https://github.com/jsonnet-bundler/jsonnet-bundler#install):
|
||||
|
||||
```
|
||||
$ mkdir my-kube-prometheus; cd my-kube-prometheus
|
||||
$ jb init
|
||||
$ jb install github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet/kube-prometheus
|
||||
```
|
||||
|
||||
> `jb` can be installed with `go get github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb`
|
||||
|
||||
You may wish to not use ksonnet and simply render the generated manifests to files on disk, this can be done with:
|
||||
|
||||
[embedmd]:# (hack/scripts/kube-prometheus-base.jsonnet)
|
||||
```jsonnet
|
||||
local kp = (import "kube-prometheus/kube-prometheus.libsonnet") + {
|
||||
_config+:: {
|
||||
namespace: "monitoring",
|
||||
}
|
||||
};
|
||||
|
||||
{["0prometheus-operator-"+name+".yaml"]: std.manifestYamlDoc(kp.prometheusOperator[name]) for name in std.objectFields(kp.prometheusOperator)} +
|
||||
{["node-exporter-"+name+".yaml"]: std.manifestYamlDoc(kp.nodeExporter[name]) for name in std.objectFields(kp.nodeExporter)} +
|
||||
{["kube-state-metrics-"+name+".yaml"]: std.manifestYamlDoc(kp.kubeStateMetrics[name]) for name in std.objectFields(kp.kubeStateMetrics)} +
|
||||
{["alertmanager-"+name+".yaml"]: std.manifestYamlDoc(kp.alertmanager[name]) for name in std.objectFields(kp.alertmanager)} +
|
||||
{["prometheus-"+name+".yaml"]: std.manifestYamlDoc(kp.prometheus[name]) for name in std.objectFields(kp.prometheus)} +
|
||||
{["grafana-"+name+".yaml"]: std.manifestYamlDoc(kp.grafana[name]) for name in std.objectFields(kp.grafana)}
|
||||
```
|
||||
|
||||
This renders all manifests in a json structure of `{filename: manifest-content}`. To split this into files on disk use:
|
||||
|
||||
> Note you need `jsonnet`, `jq`, `sed`, `tr` and `gojsonyaml` (`go get github.com/brancz/gojsontoyaml`) installed.
|
||||
|
||||
```bash
|
||||
cd contrib/kube-prometheus/
|
||||
hack/cluster-monitoring/deploy
|
||||
jsonnet -J vendor example.jsonnet > tmp.json
|
||||
|
||||
files=$(jq -r 'keys[]' tmp.json)
|
||||
|
||||
for file in ${files}; do
|
||||
# prepare directory
|
||||
dir=$(dirname "${file}")
|
||||
path="${dir}"
|
||||
mkdir -p ${path}
|
||||
|
||||
# covert file name to snake case with dashes
|
||||
fullfile=$(echo ${file} | sed -r 's/([a-z0-9])([A-Z])/\1-\L\2/g' | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
# write each value to the path in key; convert multiple times to prettify yaml
|
||||
jq -r ".[\"${file}\"]" tmp.json | gojsontoyaml -yamltojson | gojsontoyaml > "${fullfile}"
|
||||
done
|
||||
|
||||
rm tmp.json
|
||||
```
|
||||
|
||||
After all pods are ready, you can reach each of the UIs by port-forwarding:
|
||||
## Configuration
|
||||
|
||||
* Prometheus UI on node port `kubectl -n monitoring port-forward prometheus-k8s-0 9090`
|
||||
* Alertmanager UI on node port `kubectl -n monitoring port-forward alertmanager-main-0 9093`
|
||||
* Grafana on node port `kubectl -n monitoring port-forward $(kubectl get pods -n monitoring -lapp=grafana -ojsonpath='{range .items[*]}{.metadata.name}{"\n"}{end}') 3000`
|
||||
|
||||
To tear it all down again, run:
|
||||
|
||||
```bash
|
||||
hack/cluster-monitoring/teardown
|
||||
```
|
||||
|
||||
## Customizing
|
||||
|
||||
As everyone's infrastructure is slightly different, different organizations have different requirements. Thereby there may be modifications you want to do on kube-prometheus to fit your needs.
|
||||
|
||||
The kube-prometheus stack is intended to be a jsonnet library for organizations to consume and use in their own infrastructure repository. Below is an example how it can be used to deploy the stack properly on minikube.
|
||||
|
||||
The three "distribution" examples we have assembled can be found in:
|
||||
|
||||
* `example-dist/base`: contains the plain kube-prometheus stack for organizations to build on.
|
||||
* `example-dist/kubeadm`: contains the kube-prometheus stack with slight modifications to work properly monitoring kubeadm clusters and exposes UIs on NodePorts for demonstration purposes.
|
||||
* `example-dist/bootkube`: contains the kube-prometheus stack with slight modifications to work properly on clusters created with bootkube.
|
||||
|
||||
The examples in `example-dist/` are purely meant for demonstration purposes, the `kube-prometheus.jsonnet` file should live in your organizations infrastructure repository and use the kube-prometheus library provided here.
|
||||
|
||||
Examples of additoinal modifications you may want to make could be adding an `Ingress` object for each of the UIs, but the point of this is that as opposed to other solutions out there, this library does not need to yield all possible customization options, it's all up to the user to customize!
|
||||
|
||||
### minikube kubeadm example
|
||||
|
||||
See `example-dist/kubeadm` for an example for deploying on minikube, using the minikube kubeadm bootstrapper. The `example-dist/kubeadm/kube-prometheus.jsonnet` file renders the kube-prometheus manifests using jsonnet and then merges the result with kubeadm specifics, such as information on how to monitor kube-controller-manager and kube-scheduler as created by kubeadm. In addition for demonstration purposes, it converts the services selecting Prometheus, Alertmanager and Grafana to NodePort services.
|
||||
|
||||
Let's give that a try, and create a minikube cluster:
|
||||
A hidden `_config` field is located at the top level of the object this library provides. These are the available fields with their respective default values:
|
||||
|
||||
```
|
||||
minikube delete && minikube start --kubernetes-version=v1.9.6 --memory=4096 --bootstrapper=kubeadm --extra-config=kubelet.authentication-token-webhook=true --extra-config=kubelet.authorization-mode=Webhook --extra-config=scheduler.address=0.0.0.0 --extra-config=controller-manager.address=0.0.0.0
|
||||
{
|
||||
_config+:: {
|
||||
namespace: "default",
|
||||
|
||||
versions+:: {
|
||||
alertmanager: "v0.14.0",
|
||||
nodeExporter: "v0.15.2",
|
||||
kubeStateMetrics: "v1.3.0",
|
||||
kubeRbacProxy: "v0.3.0",
|
||||
addonResizer: "1.0",
|
||||
prometheusOperator: "v0.18.1",
|
||||
prometheus: "v2.2.1",
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
prometheus: "quay.io/prometheus/prometheus",
|
||||
alertmanager: "quay.io/prometheus/alertmanager",
|
||||
kubeStateMetrics: "quay.io/coreos/kube-state-metrics",
|
||||
kubeRbacProxy: "quay.io/coreos/kube-rbac-proxy",
|
||||
addonResizer: "quay.io/coreos/addon-resizer",
|
||||
nodeExporter: "quay.io/prometheus/node-exporter",
|
||||
prometheusOperator: "quay.io/coreos/prometheus-operator",
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
replicas: 2,
|
||||
rules: {},
|
||||
},
|
||||
|
||||
alertmanager+:: {
|
||||
config: alertmanagerConfig,
|
||||
replicas: 3,
|
||||
},
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
Then we can render the manifests for kubeadm (because we are using the minikube kubeadm bootstrapper):
|
||||
## Customization
|
||||
|
||||
```
|
||||
docker run --rm \
|
||||
-v `pwd`:/go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus \
|
||||
--workdir /go/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus \
|
||||
po-jsonnet \
|
||||
./hack/scripts/build-jsonnet.sh example-dist/kubeadm/kube-prometheus.jsonnet example-dist/kubeadm/manifests
|
||||
Jsonnet is a turing complete language, any logic can be reflected in it. It also has powerful merge functionalities, allowing sophisticated customizations of any kind simply by merging it into the object the library provides.
|
||||
|
||||
A common example is that not all Kubernetes clusters are created exactly the same way, meaning the configuration to monitor them may be slightly different. For [kubeadm]() and [bootkube]() clusters there are mixins available to easily configure these:
|
||||
|
||||
kubeadm:
|
||||
[embedmd]:# (examples/kubeadm.jsonnet)
|
||||
|
||||
bootkube:
|
||||
[embedmd]:# (examples/bootkube.jsonnet)
|
||||
|
||||
Another mixin that may be useful for exploring the stack is to expose the UIs of Prometheus, Alertmanager and Grafana on NodePorts:
|
||||
|
||||
[embedmd]:# (examples/node-ports.jsonnet)
|
||||
|
||||
For example the name of the `Prometheus` object provided by this library can be overridden:
|
||||
|
||||
[embedmd]:# (examples/prometheus-name-override.jsonnet)
|
||||
```jsonnet
|
||||
((import "kube-prometheus/kube-prometheus.libsonnet") + {
|
||||
prometheus+: {
|
||||
prometheus+: {
|
||||
metadata+: {
|
||||
name: "my-name",
|
||||
}
|
||||
}
|
||||
}
|
||||
}).prometheus.prometheus
|
||||
```
|
||||
|
||||
> Note the `po-jsonnet` docker image is built using [this Dockerfile](/scripts/jsonnet/Dockerfile), you can also build it using `make image` from the `contrib/kube-prometheus` folder.
|
||||
Standard Kubernetes manifests are all written using [ksonnet-lib](https://github.com/ksonnet/ksonnet-lib/), so they can be modified with the mixins supplied by ksonnet-lib. For example to override the namespace of the node-exporter DaemonSet:
|
||||
|
||||
Then the stack can be deployed using
|
||||
[embedmd]:# (examples/ksonnet-example.jsonnet)
|
||||
```jsonnet
|
||||
local k = import "ksonnet/ksonnet.beta.3/k.libsonnet";
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
|
||||
((import "kube-prometheus/kube-prometheus.libsonnet") + {
|
||||
nodeExporter+: {
|
||||
daemonset+:
|
||||
daemonset.mixin.metadata.withNamespace("my-custom-namespace") +
|
||||
}
|
||||
}).nodeExporter.daemonset
|
||||
```
|
||||
hack/cluster-monitoring/deploy example-dist/kubeadm
|
||||
```
|
||||
|
||||
## Monitoring custom services
|
||||
|
||||
The example manifests in [examples/example-app](/contrib/kube-prometheus/examples/example-app)
|
||||
deploy a fake service exposing Prometheus metrics. They additionally define a new Prometheus
|
||||
server and a [`ServiceMonitor`](https://github.com/coreos/prometheus-operator/blob/master/Documentation/design.md#servicemonitor),
|
||||
which specifies how the example service should be monitored.
|
||||
The Prometheus Operator will deploy and configure the desired Prometheus instance and continuously
|
||||
manage its life cycle.
|
||||
|
||||
```bash
|
||||
hack/example-service-monitoring/deploy
|
||||
```
|
||||
|
||||
After all pods are ready you can reach the Prometheus server similar to the Prometheus server above:
|
||||
|
||||
```bash
|
||||
kubectl port-forward prometheus-frontend-0 9090
|
||||
```
|
||||
|
||||
Then you can access Prometheus through `http://localhost:9090/`.
|
||||
|
||||
Teardown:
|
||||
|
||||
```bash
|
||||
hack/example-service-monitoring/teardown
|
||||
```
|
||||
|
||||
## Dashboarding
|
||||
|
||||
The provided manifests deploy a Grafana instance serving dashboards provided via ConfigMaps.
|
||||
Said ConfigMaps are generated from Python scripts in assets/grafana, that all have the extension
|
||||
.dashboard.py as they are loaded by the [grafanalib](https://github.com/aknuds1/grafanalib)
|
||||
Grafana dashboard generator. Bear in mind that we are for now using a fork of grafanalib as
|
||||
we needed to make extensive changes to it, in order to be able to generate our dashboards. We are
|
||||
hoping to be able to consolidate our version with the original.
|
||||
|
||||
As such, in order to make changes to the dashboard bundle, you need to change the \*.dashboard.py
|
||||
files in assets/grafana, eventually add your own, and then run `make generate` in the
|
||||
kube-prometheus root directory.
|
||||
|
||||
To read more in depth about developing dashboards, read the
|
||||
[Developing Prometheus Rules and Grafana Dashboards](docs/developing-alerts-and-dashboards.md)
|
||||
documentation.
|
||||
|
||||
### Reloading of dashboards
|
||||
|
||||
Currently, Grafana does not support serving dashboards from static files. Instead, the `grafana-watcher`
|
||||
sidecar container aims to emulate the behavior, by keeping the Grafana database always in sync
|
||||
with the provided ConfigMap. Hence, the Grafana pod is effectively stateless.
|
||||
This allows managing dashboards via `git` etc. and easily deploying them via CD pipelines.
|
||||
|
||||
In the future, a separate Grafana operator will support gathering dashboards from multiple
|
||||
ConfigMaps based on label selection.
|
||||
|
||||
WARNING: If you deploy multiple Grafana instances for HA, you must use session affinity.
|
||||
Otherwise if pods restart the prometheus datasource ID can get out of sync between the pods,
|
||||
breaking the UI
|
||||
|
||||
## Roadmap
|
||||
|
||||
* Grafana Operator that dynamically discovers and deploys dashboards from ConfigMaps
|
||||
* KPM/Helm packages to easily provide production-ready cluster-monitoring setup (essentially contents of `hack/cluster-monitoring`)
|
||||
* Add meta-monitoring to default cluster monitoring setup
|
||||
* Build out the provided dashboards and alerts for cluster monitoring to have full coverage of all system aspects
|
||||
|
||||
## Monitoring other Cluster Components
|
||||
|
||||
Discovery of API servers and kubelets works the same across all clusters.
|
||||
Depending on a cluster's setup several other core components, such as etcd or the
|
||||
scheduler, may be deployed in different ways.
|
||||
The easiest integration point is for the cluster operator to provide headless services
|
||||
of all those components to provide a common interface of discovering them. With that
|
||||
setup they will automatically be discovered by the provided Prometheus configuration.
|
||||
|
||||
For the `kube-scheduler` and `kube-controller-manager` there are headless
|
||||
services prepared, simply add them to your running cluster:
|
||||
|
||||
```bash
|
||||
kubectl -n kube-system create -f manifests/k8s/
|
||||
```
|
||||
|
||||
> Hint: if you use this for a cluster not created with bootkube, make sure you
|
||||
> populate an endpoints object with the address to your `kube-scheduler` and
|
||||
> `kube-controller-manager`, or adapt the label selectors to match your setup.
|
||||
|
||||
Aside from Kubernetes specific components, etcd is an important part of a
|
||||
working cluster, but is typically deployed outside of it. This monitoring
|
||||
setup assumes that it is made visible from within the cluster through a headless
|
||||
service as well.
|
||||
|
||||
> Note that minikube hides some components like etcd so to see the extend of
|
||||
> this setup we recommend setting up a [local cluster using bootkube](https://github.com/kubernetes-incubator/bootkube/tree/master/hack/multi-node).
|
||||
|
||||
An example for bootkube's multi-node vagrant setup is [here](/contrib/kube-prometheus/manifests/etcd/etcd-bootkube-vagrant-multi.yaml).
|
||||
|
||||
> Hint: this is merely an example for a local setup. The addresses will have to
|
||||
> be adapted for a setup, that is not a single etcd bootkube created cluster.
|
||||
|
||||
With that setup the headless services provide endpoint lists consumed by
|
||||
Prometheus to discover the endpoints as targets:
|
||||
|
||||
```bash
|
||||
$ kubectl get endpoints --all-namespaces
|
||||
NAMESPACE NAME ENDPOINTS AGE
|
||||
default kubernetes 172.17.4.101:443 2h
|
||||
kube-system kube-controller-manager-prometheus-discovery 10.2.30.2:10252 1h
|
||||
kube-system kube-scheduler-prometheus-discovery 10.2.30.4:10251 1h
|
||||
monitoring etcd-k8s 172.17.4.51:2379 1h
|
||||
```
|
||||
|
||||
## Other Documentation
|
||||
[Install Docs for a cluster created with KOPS on AWS](docs/KOPSonAWS.md)
|
||||
|
|
|
@ -1,6 +0,0 @@
|
|||
local kubePrometheus = import "kube-prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
local objects = kubePrometheus.new(namespace);
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
|
@ -1,2 +0,0 @@
|
|||
tmp/
|
||||
manifests/
|
|
@ -1,36 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local kubePrometheus = import "kube-prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
|
||||
local controllerManagerService = service.new("kube-controller-manager-prometheus-discovery", {"k8s-app": "kube-controller-manager"}, servicePort.newNamed("http-metrics", 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-controller-manager"});
|
||||
|
||||
local schedulerService = service.new("kube-scheduler-prometheus-discovery", {"k8s-app": "kube-scheduler"}, servicePort.newNamed("http-metrics", 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-scheduler"});
|
||||
|
||||
local kubeDNSService = service.new("kube-dns-prometheus-discovery", {"k8s-app": "kube-dns"}, [servicePort.newNamed("http-metrics-skydns", 10055, 10055), servicePort.newNamed("http-metrics-dnsmasq", 10054, 10054)]) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-dns"});
|
||||
|
||||
local objects = kubePrometheus.new(namespace) +
|
||||
{
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9090, "web") + servicePort.withNodePort(30900)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"alertmanager-main/alertmanager-main-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9093, "web") + servicePort.withNodePort(30903)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"grafana/grafana-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("http", 3000, "http") + servicePort.withNodePort(30902)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"prometheus-k8s/kube-controller-manager-prometheus-discovery-service.yaml": controllerManagerService,
|
||||
"prometheus-k8s/kube-scheduler-prometheus-discovery-service.yaml": schedulerService,
|
||||
"prometheus-k8s/kube-dns-prometheus-discovery-service.yaml": kubeDNSService,
|
||||
};
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
|
@ -1,2 +0,0 @@
|
|||
tmp/
|
||||
manifests/
|
|
@ -1,31 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
local kubePrometheus = import "kube-prometheus.libsonnet";
|
||||
|
||||
local namespace = "monitoring";
|
||||
|
||||
local controllerManagerService = service.new("kube-controller-manager-prometheus-discovery", {component: "kube-controller-manager"}, servicePort.newNamed("http-metrics", 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-controller-manager"});
|
||||
|
||||
local schedulerService = service.new("kube-scheduler-prometheus-discovery", {component: "kube-scheduler"}, servicePort.newNamed("http-metrics", 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace("kube-system") +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-scheduler"});
|
||||
|
||||
local objects = kubePrometheus.new(namespace) +
|
||||
{
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9090, "web") + servicePort.withNodePort(30900)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"alertmanager-main/alertmanager-main-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("web", 9093, "web") + servicePort.withNodePort(30903)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"grafana/grafana-service.yaml"+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed("http", 3000, "http") + servicePort.withNodePort(30902)) +
|
||||
service.mixin.spec.withType("NodePort"),
|
||||
"prometheus-k8s/kube-controller-manager-prometheus-discovery-service.yaml": controllerManagerService,
|
||||
"prometheus-k8s/kube-scheduler-prometheus-discovery-service.yaml": schedulerService,
|
||||
};
|
||||
|
||||
{[path]: std.manifestYamlDoc(objects[path]) for path in std.objectFields(objects)}
|
2
contrib/kube-prometheus/examples/bootkube.jsonnet
Normal file
2
contrib/kube-prometheus/examples/bootkube.jsonnet
Normal file
|
@ -0,0 +1,2 @@
|
|||
(import "kube-prometheus/kube-prometheus.libsonnet") +
|
||||
(import "kube-prometheus/kube-prometheus-bootkube.libsonnet")
|
9
contrib/kube-prometheus/examples/ksonnet-example.jsonnet
Normal file
9
contrib/kube-prometheus/examples/ksonnet-example.jsonnet
Normal file
|
@ -0,0 +1,9 @@
|
|||
local k = import "ksonnet/ksonnet.beta.3/k.libsonnet";
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
|
||||
((import "kube-prometheus/kube-prometheus.libsonnet") + {
|
||||
nodeExporter+: {
|
||||
daemonset+:
|
||||
daemonset.mixin.metadata.withNamespace("my-custom-namespace")
|
||||
}
|
||||
}).nodeExporter.daemonset
|
2
contrib/kube-prometheus/examples/kubeadm.jsonnet
Normal file
2
contrib/kube-prometheus/examples/kubeadm.jsonnet
Normal file
|
@ -0,0 +1,2 @@
|
|||
(import "kube-prometheus/kube-prometheus.libsonnet") +
|
||||
(import "kube-prometheus/kube-prometheus-kubeadm.libsonnet")
|
2
contrib/kube-prometheus/examples/node-ports.jsonnet
Normal file
2
contrib/kube-prometheus/examples/node-ports.jsonnet
Normal file
|
@ -0,0 +1,2 @@
|
|||
(import "kube-prometheus/kube-prometheus.libsonnet") +
|
||||
(import "kube-prometheus/kube-prometheus-node-ports.libsonnet")
|
|
@ -0,0 +1,9 @@
|
|||
((import "kube-prometheus/kube-prometheus.libsonnet") + {
|
||||
prometheus+: {
|
||||
prometheus+: {
|
||||
metadata+: {
|
||||
name: "my-name",
|
||||
}
|
||||
}
|
||||
}
|
||||
}).prometheus.prometheus
|
|
@ -8,11 +8,11 @@ set -u
|
|||
# print each command before executing it
|
||||
set -x
|
||||
|
||||
manifest_prefix=${1-.}
|
||||
manifest_prefix=${1-./manifests}
|
||||
|
||||
kubectl create namespace monitoring
|
||||
|
||||
find ${manifest_prefix}/manifests/prometheus-operator/ -type f ! -name prometheus-operator-service-monitor.yaml -exec kubectl apply -f {} \;
|
||||
find ${manifest_prefix}/prometheus-operator/ -type f ! -name service-monitor.yaml -exec kubectl apply -f {} \;
|
||||
|
||||
# Wait for CRDs to be ready.
|
||||
printf "Waiting for Operator to register custom resource definitions..."
|
||||
|
@ -25,14 +25,14 @@ until kubectl get alertmanagers.monitoring.coreos.com > /dev/null 2>&1; do sleep
|
|||
echo "done!"
|
||||
|
||||
# need to ensure that ServiceMonitors are registered before we can create the prometheus-operator ServiceMonitor
|
||||
kubectl apply -f ${manifest_prefix}/manifests/prometheus-operator/prometheus-operator-service-monitor.yaml
|
||||
kubectl apply -f ${manifest_prefix}/prometheus-operator/service-monitor.yaml
|
||||
|
||||
kubectl apply -f ${manifest_prefix}/manifests/node-exporter/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/kube-state-metrics/
|
||||
find ${manifest_prefix}/manifests/grafana/ -type f ! -name grafana-dashboard-definitions.yaml -exec kubectl apply -f {} \;
|
||||
kubectl apply -f ${manifest_prefix}/node-exporter/
|
||||
kubectl apply -f ${manifest_prefix}/kube-state-metrics/
|
||||
find ${manifest_prefix}/grafana/ -type f ! -name dashboard-definitions.yaml -exec kubectl apply -f {} \;
|
||||
|
||||
# kubectl apply wants to put the previous version in an annotation, which is too large, therefore create instead of apply
|
||||
kubectl create -f ${manifest_prefix}/manifests/grafana/grafana-dashboard-definitions.yaml
|
||||
kubectl apply -f ${manifest_prefix}/manifests/prometheus-k8s/
|
||||
kubectl apply -f ${manifest_prefix}/manifests/alertmanager-main/
|
||||
kubectl create -f ${manifest_prefix}/grafana/dashboard-definitions.yaml
|
||||
kubectl apply -f ${manifest_prefix}/prometheus/
|
||||
kubectl apply -f ${manifest_prefix}/alertmanager/
|
||||
|
||||
|
|
|
@ -8,12 +8,7 @@ json="tmp/manifests.json"
|
|||
|
||||
rm -rf ${prefix}
|
||||
mkdir -p $(dirname "${json}")
|
||||
jsonnet \
|
||||
-J $GOPATH/src/github.com/ksonnet/ksonnet-lib \
|
||||
-J $GOPATH/src/github.com/grafana/grafonnet-lib \
|
||||
-J $GOPATH/src/github.com/coreos/prometheus-operator/contrib/kube-prometheus/jsonnet \
|
||||
-J $GOPATH/src/github.com/brancz/kubernetes-grafana/src/kubernetes-jsonnet \
|
||||
${jsonnet} > ${json}
|
||||
jsonnet -J jsonnet/kube-prometheus/vendor -J jsonnet ${jsonnet} > ${json}
|
||||
|
||||
files=$(jq -r 'keys[]' ${json})
|
||||
|
||||
|
@ -21,5 +16,6 @@ for file in ${files}; do
|
|||
dir=$(dirname "${file}")
|
||||
path="${prefix}/${dir}"
|
||||
mkdir -p ${path}
|
||||
jq -r ".[\"${file}\"]" ${json} | gojsontoyaml -yamltojson | gojsontoyaml > "${prefix}/${file}"
|
||||
fullfile=$(echo ${file} | sed -r 's/([a-z0-9])([A-Z])/\1-\L\2/g' | tr '[:upper:]' '[:lower:]')
|
||||
jq -r ".[\"${file}\"]" ${json} | gojsontoyaml -yamltojson | gojsontoyaml > "${prefix}/${fullfile}"
|
||||
done
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet') + {
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['0prometheus-operator-' + name + '.yaml']: std.manifestYamlDoc(kp.prometheusOperator[name]) for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name + '.yaml']: std.manifestYamlDoc(kp.nodeExporter[name]) for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name + '.yaml']: std.manifestYamlDoc(kp.kubeStateMetrics[name]) for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name + '.yaml']: std.manifestYamlDoc(kp.alertmanager[name]) for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name + '.yaml']: std.manifestYamlDoc(kp.prometheus[name]) for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name + '.yaml']: std.manifestYamlDoc(kp.grafana[name]) for name in std.objectFields(kp.grafana) }
|
|
@ -0,0 +1,16 @@
|
|||
local kp =
|
||||
(import 'kube-prometheus/kube-prometheus.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-kubeadm.libsonnet') +
|
||||
(import 'kube-prometheus/kube-prometheus-node-ports.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'monitoring',
|
||||
},
|
||||
};
|
||||
|
||||
{ ['0prometheus-operator-' + name + '.yaml']: std.manifestYamlDoc(kp.prometheusOperator[name]) for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name + '.yaml']: std.manifestYamlDoc(kp.nodeExporter[name]) for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name + '.yaml']: std.manifestYamlDoc(kp.kubeStateMetrics[name]) for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name + '.yaml']: std.manifestYamlDoc(kp.alertmanager[name]) for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name + '.yaml']: std.manifestYamlDoc(kp.prometheus[name]) for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name + '.yaml']: std.manifestYamlDoc(kp.grafana[name]) for name in std.objectFields(kp.grafana) }
|
|
@ -1,8 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local secret = k.core.v1.secret;
|
||||
|
||||
{
|
||||
new(namespace, plainConfig)::
|
||||
secret.new("alertmanager-main", {"alertmanager.yaml": std.base64(plainConfig)}) +
|
||||
secret.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
serviceAccount.new("alertmanager-main") +
|
||||
serviceAccount.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "alertmanager",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "alertmanager"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"alertmanager": "main"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"monitoring"
|
||||
]
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "web",
|
||||
"interval": "30s"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local alertmanagerPort = servicePort.newNamed("web", 9093, "web");
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
service.new("alertmanager-main", {app: "alertmanager", alertmanager: "main"}, alertmanagerPort) +
|
||||
service.mixin.metadata.withNamespace(namespace) +
|
||||
service.mixin.metadata.withLabels({alertmanager: "main"})
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
apiVersion: "monitoring.coreos.com/v1",
|
||||
kind: "Alertmanager",
|
||||
metadata: {
|
||||
name: "main",
|
||||
namespace: namespace,
|
||||
labels: {
|
||||
alertmanager: "main",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: 3,
|
||||
version: "v0.14.0",
|
||||
serviceAccountName: "alertmanager-main",
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
{
|
||||
config:: import "alertmanager-main-secret.libsonnet",
|
||||
serviceAccount:: import "alertmanager-main-service-account.libsonnet",
|
||||
service:: import "alertmanager-main-service.libsonnet",
|
||||
serviceMonitor:: import "alertmanager-main-service-monitor.libsonnet",
|
||||
alertmanager:: import "alertmanager-main.libsonnet",
|
||||
}
|
|
@ -1,85 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
|
||||
local alertmanager = import "alertmanager/alertmanager.libsonnet";
|
||||
local ksm = import "kube-state-metrics/kube-state-metrics.libsonnet";
|
||||
local nodeExporter = import "node-exporter/node-exporter.libsonnet";
|
||||
local po = import "prometheus-operator/prometheus-operator.libsonnet";
|
||||
local prometheus = import "prometheus/prometheus.libsonnet";
|
||||
local grafana = import "grafana/grafana.libsonnet";
|
||||
|
||||
local alertmanagerConfig = importstr "../assets/alertmanager/alertmanager.yaml";
|
||||
|
||||
local ruleFiles = {
|
||||
"alertmanager.rules.yaml": importstr "../assets/prometheus/rules/alertmanager.rules.yaml",
|
||||
"etcd3.rules.yaml": importstr "../assets/prometheus/rules/etcd3.rules.yaml",
|
||||
"general.rules.yaml": importstr "../assets/prometheus/rules/general.rules.yaml",
|
||||
"kube-controller-manager.rules.yaml": importstr "../assets/prometheus/rules/kube-controller-manager.rules.yaml",
|
||||
"kube-scheduler.rules.yaml": importstr "../assets/prometheus/rules/kube-scheduler.rules.yaml",
|
||||
"kube-state-metrics.rules.yaml": importstr "../assets/prometheus/rules/kube-state-metrics.rules.yaml",
|
||||
"kubelet.rules.yaml": importstr "../assets/prometheus/rules/kubelet.rules.yaml",
|
||||
"kubernetes.rules.yaml": importstr "../assets/prometheus/rules/kubernetes.rules.yaml",
|
||||
"node.rules.yaml": importstr "../assets/prometheus/rules/node.rules.yaml",
|
||||
"prometheus.rules.yaml": importstr "../assets/prometheus/rules/prometheus.rules.yaml",
|
||||
};
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"grafana/grafana-dashboard-definitions.yaml": grafana.dashboardDefinitions.new(namespace),
|
||||
"grafana/grafana-dashboard-sources.yaml": grafana.dashboardSources.new(namespace),
|
||||
"grafana/grafana-datasources.yaml": grafana.dashboardDatasources.new(namespace),
|
||||
"grafana/grafana-deployment.yaml": grafana.deployment.new(namespace),
|
||||
"grafana/grafana-service-account.yaml": grafana.serviceAccount.new(namespace),
|
||||
"grafana/grafana-service.yaml": grafana.service.new(namespace),
|
||||
|
||||
"alertmanager-main/alertmanager-main-secret.yaml": alertmanager.config.new(namespace, alertmanagerConfig),
|
||||
"alertmanager-main/alertmanager-main-service-account.yaml": alertmanager.serviceAccount.new(namespace),
|
||||
"alertmanager-main/alertmanager-main-service.yaml": alertmanager.service.new(namespace),
|
||||
"alertmanager-main/alertmanager-main-service-monitor.yaml": alertmanager.serviceMonitor.new(namespace),
|
||||
"alertmanager-main/alertmanager-main.yaml": alertmanager.alertmanager.new(namespace),
|
||||
|
||||
"kube-state-metrics/kube-state-metrics-cluster-role-binding.yaml": ksm.clusterRoleBinding.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-cluster-role.yaml": ksm.clusterRole.new(),
|
||||
"kube-state-metrics/kube-state-metrics-deployment.yaml": ksm.deployment.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-role-binding.yaml": ksm.roleBinding.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-role.yaml": ksm.role.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service-account.yaml": ksm.serviceAccount.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service.yaml": ksm.service.new(namespace),
|
||||
"kube-state-metrics/kube-state-metrics-service-monitor.yaml": ksm.serviceMonitor.new(namespace),
|
||||
|
||||
"node-exporter/node-exporter-cluster-role-binding.yaml": nodeExporter.clusterRoleBinding.new(namespace),
|
||||
"node-exporter/node-exporter-cluster-role.yaml": nodeExporter.clusterRole.new(),
|
||||
"node-exporter/node-exporter-daemonset.yaml": nodeExporter.daemonset.new(namespace),
|
||||
"node-exporter/node-exporter-service-account.yaml": nodeExporter.serviceAccount.new(namespace),
|
||||
"node-exporter/node-exporter-service.yaml": nodeExporter.service.new(namespace),
|
||||
"node-exporter/node-exporter-service-monitor.yaml": nodeExporter.serviceMonitor.new(namespace),
|
||||
|
||||
"prometheus-operator/prometheus-operator-cluster-role-binding.yaml": po.clusterRoleBinding.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-cluster-role.yaml": po.clusterRole.new(),
|
||||
"prometheus-operator/prometheus-operator-deployment.yaml": po.deployment.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service.yaml": po.service.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service-monitor.yaml": po.serviceMonitor.new(namespace),
|
||||
"prometheus-operator/prometheus-operator-service-account.yaml": po.serviceAccount.new(namespace),
|
||||
|
||||
"prometheus-k8s/prometheus-k8s-cluster-role-binding.yaml": prometheus.clusterRoleBinding.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-cluster-role.yaml": prometheus.clusterRole.new(),
|
||||
"prometheus-k8s/prometheus-k8s-service-account.yaml": prometheus.serviceAccount.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service.yaml": prometheus.service.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s.yaml": prometheus.prometheus.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-rules.yaml": prometheus.rules.new(namespace, ruleFiles),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-config.yaml": prometheus.roleBindingConfig.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-namespace.yaml": prometheus.roleBindingNamespace.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-kube-system.yaml": prometheus.roleBindingKubeSystem.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-binding-default.yaml": prometheus.roleBindingDefault.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-config.yaml": prometheus.roleConfig.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-namespace.yaml": prometheus.roleNamespace.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-role-kube-system.yaml": prometheus.roleKubeSystem.new(),
|
||||
"prometheus-k8s/prometheus-k8s-role-default.yaml": prometheus.roleDefault.new(),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-apiserver.yaml": prometheus.serviceMonitorApiserver.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-coredns.yaml": prometheus.serviceMonitorCoreDNS.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-controller-manager.yaml": prometheus.serviceMonitorControllerManager.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kube-scheduler.yaml": prometheus.serviceMonitorScheduler.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-kubelet.yaml": prometheus.serviceMonitorKubelet.new(namespace),
|
||||
"prometheus-k8s/prometheus-k8s-service-monitor-prometheus.yaml": prometheus.serviceMonitorPrometheus.new(namespace),
|
||||
}
|
||||
}
|
2
contrib/kube-prometheus/jsonnet/kube-prometheus/.gitignore
vendored
Normal file
2
contrib/kube-prometheus/jsonnet/kube-prometheus/.gitignore
vendored
Normal file
|
@ -0,0 +1,2 @@
|
|||
jsonnetfile.lock.json
|
||||
vendor/
|
|
@ -0,0 +1,97 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
local alertmanagerConfig = "\nglobal:\n resolve_timeout: 5m\nroute:\n group_by: ['job']\n group_wait: 30s\n group_interval: 5m\n repeat_interval: 12h\n receiver: 'null'\n routes:\n - match:\n alertname: DeadMansSwitch\n receiver: 'null'\nreceivers:\n- name: 'null'\n";
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
alertmanager: 'v0.14.0',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
alertmanager: 'quay.io/prometheus/alertmanager',
|
||||
},
|
||||
|
||||
alertmanager+:: {
|
||||
config: alertmanagerConfig,
|
||||
replicas: 3,
|
||||
},
|
||||
},
|
||||
|
||||
alertmanager+:: {
|
||||
secret:
|
||||
local secret = k.core.v1.secret;
|
||||
|
||||
secret.new('alertmanager-main', { 'alertmanager.yaml': std.base64($._config.alertmanager.config) }) +
|
||||
secret.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('alertmanager-main') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local alertmanagerPort = servicePort.newNamed('web', 9093, 'web');
|
||||
|
||||
service.new('alertmanager-main', { app: 'alertmanager', alertmanager: 'main' }, alertmanagerPort) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ alertmanager: 'main' }),
|
||||
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'alertmanager',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'alertmanager',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
alertmanager: 'main',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'monitoring',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'web',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
alertmanager:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'Alertmanager',
|
||||
metadata: {
|
||||
name: 'main',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
alertmanager: 'main',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: $._config.alertmanager.replicas,
|
||||
version: $._config.versions.alertmanager,
|
||||
baseImage: $._config.imageRepos.alertmanager,
|
||||
nodeSelector: { 'beta.kubernetes.io/os': 'linux' },
|
||||
serviceAccountName: 'alertmanager-main',
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
{
|
||||
"dependencies": [
|
||||
{
|
||||
"name": "ksonnet",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/ksonnet/ksonnet-lib",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"name": "kubernetes-mixin",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/kubernetes-monitoring/kubernetes-mixin",
|
||||
"subdir": ""
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
},
|
||||
{
|
||||
"name": "grafana",
|
||||
"source": {
|
||||
"git": {
|
||||
"remote": "https://github.com/brancz/kubernetes-grafana",
|
||||
"subdir": "grafana"
|
||||
}
|
||||
},
|
||||
"version": "master"
|
||||
}
|
||||
]
|
||||
}
|
|
@ -0,0 +1,23 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
{
|
||||
prometheus+:: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { 'k8s-app': 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { 'k8s-app': 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeDnsPrometheusDiscoveryService:
|
||||
service.new('kube-dns-prometheus-discovery', { 'k8s-app': 'kube-dns' }, [servicePort.newNamed('http-metrics-skydns', 10055, 10055), servicePort.newNamed('http-metrics-dnsmasq', 10054, 10054)]) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-dns' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
},
|
||||
}
|
|
@ -0,0 +1,8 @@
|
|||
local kp = (import 'kube-prometheus/kube-prometheus.libsonnet');
|
||||
|
||||
{ ['0prometheus-operator-' + name]: kp.prometheusOperator[name] for name in std.objectFields(kp.prometheusOperator) } +
|
||||
{ ['node-exporter-' + name]: kp.nodeExporter[name] for name in std.objectFields(kp.nodeExporter) } +
|
||||
{ ['kube-state-metrics-' + name]: kp.kubeStateMetrics[name] for name in std.objectFields(kp.kubeStateMetrics) } +
|
||||
{ ['alertmanager-' + name]: kp.alertmanager[name] for name in std.objectFields(kp.alertmanager) } +
|
||||
{ ['prometheus-' + name]: kp.prometheus[name] for name in std.objectFields(kp.prometheus) } +
|
||||
{ ['grafana-' + name]: kp.grafana[name] for name in std.objectFields(kp.grafana) }
|
|
@ -0,0 +1,18 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
{
|
||||
prometheus+: {
|
||||
kubeControllerManagerPrometheusDiscoveryService:
|
||||
service.new('kube-controller-manager-prometheus-discovery', { component: 'kube-controller-manager' }, servicePort.newNamed('http-metrics', 10252, 10252)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-controller-manager' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
kubeSchedulerPrometheusDiscoveryService:
|
||||
service.new('kube-scheduler-prometheus-discovery', { component: 'kube-scheduler' }, servicePort.newNamed('http-metrics', 10251, 10251)) +
|
||||
service.mixin.metadata.withNamespace('kube-system') +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-scheduler' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
},
|
||||
}
|
|
@ -0,0 +1,21 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
{
|
||||
prometheus+: {
|
||||
service+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed('web', 9090, 'web') + servicePort.withNodePort(30900)) +
|
||||
service.mixin.spec.withType('NodePort'),
|
||||
},
|
||||
alertmanager+: {
|
||||
service+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed('web', 9093, 'web') + servicePort.withNodePort(30903)) +
|
||||
service.mixin.spec.withType('NodePort'),
|
||||
},
|
||||
grafana+: {
|
||||
service+:
|
||||
service.mixin.spec.withPorts(servicePort.newNamed('http', 3000, 'http') + servicePort.withNodePort(30902)) +
|
||||
service.mixin.spec.withType('NodePort'),
|
||||
},
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
(import 'grafana/grafana.libsonnet') +
|
||||
(import 'kube-state-metrics/kube-state-metrics.libsonnet') +
|
||||
(import 'node-exporter/node-exporter.libsonnet') +
|
||||
(import 'alertmanager/alertmanager.libsonnet') +
|
||||
(import 'prometheus-operator/prometheus-operator.libsonnet') +
|
||||
(import 'prometheus/prometheus.libsonnet') +
|
||||
(import 'kubernetes-mixin/mixin.libsonnet') +
|
||||
{
|
||||
_config+:: {
|
||||
kubeStateMetricsSelector: 'job="kube-state-metrics"',
|
||||
cadvisorSelector: 'job="kubelet"',
|
||||
nodeExporterSelector: 'job="node-exporter"',
|
||||
kubeletSelector: 'job="kubelet"',
|
||||
notKubeDnsSelector: 'job!="kube-dns"',
|
||||
|
||||
prometheus+:: {
|
||||
rules: $.prometheusRules + $.prometheusAlerts,
|
||||
},
|
||||
|
||||
grafana+:: {
|
||||
dashboards: $.grafanaDashboards,
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,286 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
kubeStateMetrics: 'v1.3.0',
|
||||
kubeRbacProxy: 'v0.3.0',
|
||||
addonResizer: '1.0',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
kubeStateMetrics: 'quay.io/coreos/kube-state-metrics',
|
||||
kubeRbacProxy: 'quay.io/coreos/kube-rbac-proxy',
|
||||
addonResizer: 'quay.io/coreos/addon-resizer',
|
||||
},
|
||||
},
|
||||
|
||||
kubeStateMetrics+:: {
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('kube-state-metrics') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('kube-state-metrics') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'kube-state-metrics', namespace: $._config.namespace }]),
|
||||
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'configmaps',
|
||||
'secrets',
|
||||
'nodes',
|
||||
'pods',
|
||||
'services',
|
||||
'resourcequotas',
|
||||
'replicationcontrollers',
|
||||
'limitranges',
|
||||
'persistentvolumeclaims',
|
||||
'persistentvolumes',
|
||||
'namespaces',
|
||||
'endpoints',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'watch']);
|
||||
|
||||
local extensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['extensions']) +
|
||||
policyRule.withResources([
|
||||
'daemonsets',
|
||||
'deployments',
|
||||
'replicasets',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'watch']);
|
||||
|
||||
local appsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['apps']) +
|
||||
policyRule.withResources([
|
||||
'statefulsets',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'watch']);
|
||||
|
||||
local batchRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['batch']) +
|
||||
policyRule.withResources([
|
||||
'cronjobs',
|
||||
'jobs',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'watch']);
|
||||
|
||||
local autoscalingRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['autoscaling']) +
|
||||
policyRule.withResources([
|
||||
'horizontalpodautoscalers',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'watch']);
|
||||
|
||||
local authenticationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(['authentication.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'tokenreviews',
|
||||
]) +
|
||||
policyRule.withVerbs(['create']);
|
||||
|
||||
local authorizationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(['authorization.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'subjectaccessreviews',
|
||||
]) +
|
||||
policyRule.withVerbs(['create']);
|
||||
|
||||
local rules = [coreRule, extensionsRule, appsRule, batchRule, autoscalingRule, authenticationRole, authorizationRole];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('kube-state-metrics') +
|
||||
clusterRole.withRules(rules),
|
||||
deployment:
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
local volume = k.apps.v1beta2.deployment.mixin.spec.template.spec.volumesType;
|
||||
local containerPort = container.portsType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
local podSelector = deployment.mixin.spec.template.spec.selectorType;
|
||||
|
||||
local podLabels = { app: 'kube-state-metrics' };
|
||||
|
||||
local proxyClusterMetrics =
|
||||
container.new('kube-rbac-proxy-main', $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy) +
|
||||
container.withArgs([
|
||||
'--secure-listen-address=:8443',
|
||||
'--upstream=http://127.0.0.1:8081/',
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed('https-main', 8443)) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '20Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '20m', memory: '40Mi' });
|
||||
|
||||
local proxySelfMetrics =
|
||||
container.new('kube-rbac-proxy-self', $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy) +
|
||||
container.withArgs([
|
||||
'--secure-listen-address=:9443',
|
||||
'--upstream=http://127.0.0.1:8082/',
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed('https-self', 9443)) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '20Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '20m', memory: '40Mi' });
|
||||
|
||||
local kubeStateMetrics =
|
||||
container.new('kube-state-metrics', $._config.imageRepos.kubeStateMetrics + ':' + $._config.versions.kubeStateMetrics) +
|
||||
container.withArgs([
|
||||
'--host=127.0.0.1',
|
||||
'--port=8081',
|
||||
'--telemetry-host=127.0.0.1',
|
||||
'--telemetry-port=8082',
|
||||
]) +
|
||||
container.mixin.resources.withRequests({ cpu: '102m', memory: '180Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '102m', memory: '180Mi' });
|
||||
|
||||
local addonResizer =
|
||||
container.new('addon-resizer', $._config.imageRepos.addonResizer + ':' + $._config.versions.addonResizer) +
|
||||
container.withCommand([
|
||||
'/pod_nanny',
|
||||
'--container=kube-state-metrics',
|
||||
'--cpu=100m',
|
||||
'--extra-cpu=2m',
|
||||
'--memory=150Mi',
|
||||
'--extra-memory=30Mi',
|
||||
'--threshold=5',
|
||||
'--deployment=kube-state-metrics',
|
||||
]) +
|
||||
container.withEnv([
|
||||
{
|
||||
name: 'MY_POD_NAME',
|
||||
valueFrom: {
|
||||
fieldRef: { apiVersion: 'v1', fieldPath: 'metadata.name' },
|
||||
},
|
||||
},
|
||||
{
|
||||
name: 'MY_POD_NAMESPACE',
|
||||
valueFrom: {
|
||||
fieldRef: { apiVersion: 'v1', fieldPath: 'metadata.namespace' },
|
||||
},
|
||||
},
|
||||
]) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '30Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '10m', memory: '30Mi' });
|
||||
|
||||
local c = [proxyClusterMetrics, proxySelfMetrics, kubeStateMetrics, addonResizer];
|
||||
|
||||
deployment.new('kube-state-metrics', 1, c, podLabels) +
|
||||
deployment.mixin.metadata.withNamespace($._config.namespace) +
|
||||
deployment.mixin.metadata.withLabels(podLabels) +
|
||||
deployment.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
deployment.mixin.spec.template.spec.withNodeSelector({ 'beta.kubernetes.io/os': 'linux' }) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName('kube-state-metrics'),
|
||||
|
||||
roleBinding:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('kube-state-metrics') +
|
||||
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('kube-state-metrics') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'kube-state-metrics' }]),
|
||||
|
||||
role:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'pods',
|
||||
]) +
|
||||
policyRule.withVerbs(['get']);
|
||||
|
||||
local extensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['extensions']) +
|
||||
policyRule.withResources([
|
||||
'deployments',
|
||||
]) +
|
||||
policyRule.withVerbs(['get', 'update']) +
|
||||
policyRule.withResourceNames(['kube-state-metrics']);
|
||||
|
||||
local rules = [coreRule, extensionsRule];
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('kube-state-metrics') +
|
||||
role.mixin.metadata.withNamespace($._config.namespace) +
|
||||
role.withRules(rules),
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('kube-state-metrics') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local ksmServicePortMain = servicePort.newNamed('https-main', 8443, 'https-main');
|
||||
local ksmServicePortSelf = servicePort.newNamed('https-self', 9443, 'https-self');
|
||||
|
||||
service.new('kube-state-metrics', $.kubeStateMetrics.deployment.spec.selector.matchLabels, [ksmServicePortMain, ksmServicePortSelf]) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'kube-state-metrics' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-state-metrics',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kube-state-metrics',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-state-metrics',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'monitoring',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-main',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
port: 'https-self',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,167 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
nodeExporter: 'v0.15.2',
|
||||
kubeRbacProxy: 'v0.3.0',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
nodeExporter: 'quay.io/prometheus/node-exporter',
|
||||
kubeRbacProxy: 'quay.io/coreos/kube-rbac-proxy',
|
||||
},
|
||||
},
|
||||
|
||||
nodeExporter+:: {
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('node-exporter') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('node-exporter') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'node-exporter', namespace: $._config.namespace }]),
|
||||
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local authenticationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(['authentication.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'tokenreviews',
|
||||
]) +
|
||||
policyRule.withVerbs(['create']);
|
||||
|
||||
local authorizationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(['authorization.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'subjectaccessreviews',
|
||||
]) +
|
||||
policyRule.withVerbs(['create']);
|
||||
|
||||
local rules = [authenticationRole, authorizationRole];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('node-exporter') +
|
||||
clusterRole.withRules(rules),
|
||||
|
||||
daemonset:
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
local container = daemonset.mixin.spec.template.spec.containersType;
|
||||
local volume = daemonset.mixin.spec.template.spec.volumesType;
|
||||
local containerPort = container.portsType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
local podSelector = daemonset.mixin.spec.template.spec.selectorType;
|
||||
local toleration = daemonset.mixin.spec.template.spec.tolerationsType;
|
||||
|
||||
local podLabels = { app: 'node-exporter' };
|
||||
|
||||
local masterToleration = toleration.new() +
|
||||
toleration.withEffect('NoSchedule') +
|
||||
toleration.withKey('node-role.kubernetes.io/master');
|
||||
|
||||
local procVolumeName = 'proc';
|
||||
local procVolume = volume.fromHostPath(procVolumeName, '/proc');
|
||||
local procVolumeMount = containerVolumeMount.new(procVolumeName, '/host/proc');
|
||||
|
||||
local sysVolumeName = 'sys';
|
||||
local sysVolume = volume.fromHostPath(sysVolumeName, '/sys');
|
||||
local sysVolumeMount = containerVolumeMount.new(sysVolumeName, '/host/sys');
|
||||
|
||||
local nodeExporter =
|
||||
container.new('node-exporter', $._config.imageRepos.nodeExporter + ':' + $._config.versions.nodeExporter) +
|
||||
container.withArgs([
|
||||
'--web.listen-address=127.0.0.1:9101',
|
||||
'--path.procfs=/host/proc',
|
||||
'--path.sysfs=/host/sys',
|
||||
]) +
|
||||
container.withVolumeMounts([procVolumeMount, sysVolumeMount]) +
|
||||
container.mixin.resources.withRequests({ cpu: '102m', memory: '180Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '102m', memory: '180Mi' });
|
||||
|
||||
local proxy =
|
||||
container.new('kube-rbac-proxy', $._config.imageRepos.kubeRbacProxy + ':' + $._config.versions.kubeRbacProxy) +
|
||||
container.withArgs([
|
||||
'--secure-listen-address=:9100',
|
||||
'--upstream=http://127.0.0.1:9101/',
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed('https', 9100)) +
|
||||
container.mixin.resources.withRequests({ cpu: '10m', memory: '20Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '20m', memory: '40Mi' });
|
||||
|
||||
local c = [nodeExporter, proxy];
|
||||
|
||||
daemonset.new() +
|
||||
daemonset.mixin.metadata.withName('node-exporter') +
|
||||
daemonset.mixin.metadata.withNamespace($._config.namespace) +
|
||||
daemonset.mixin.metadata.withLabels(podLabels) +
|
||||
daemonset.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
daemonset.mixin.spec.template.metadata.withLabels(podLabels) +
|
||||
daemonset.mixin.spec.template.spec.withTolerations([masterToleration]) +
|
||||
daemonset.mixin.spec.template.spec.withNodeSelector({ 'beta.kubernetes.io/os': 'linux' }) +
|
||||
daemonset.mixin.spec.template.spec.withContainers(c) +
|
||||
daemonset.mixin.spec.template.spec.withVolumes([procVolume, sysVolume]) +
|
||||
daemonset.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
daemonset.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
daemonset.mixin.spec.template.spec.withServiceAccountName('node-exporter'),
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('node-exporter') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
serviceMonitor:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'node-exporter',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'node-exporter',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'node-exporter',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'monitoring',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local nodeExporterPort = servicePort.newNamed('https', 9100, 'https');
|
||||
|
||||
service.new('node-exporter', $.nodeExporter.daemonset.spec.selector.matchLabels, nodeExporterPort) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'node-exporter' }) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
},
|
||||
}
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,152 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
prometheusOperator: 'v0.19.0',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
prometheusOperator: 'quay.io/coreos/prometheus-operator',
|
||||
},
|
||||
},
|
||||
|
||||
prometheusOperator+:: {
|
||||
// Prefixing with 0 to ensure these manifests are listed and therefore created first.
|
||||
'0alertmanagerCustomResourceDefinition': import 'alertmanager-crd.libsonnet',
|
||||
'0prometheusCustomResourceDefinition': import 'prometheus-crd.libsonnet',
|
||||
'0servicemonitorCustomResourceDefinition': import 'servicemonitor-crd.libsonnet',
|
||||
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('prometheus-operator') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('prometheus-operator') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-operator', namespace: $._config.namespace }]),
|
||||
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local extensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['extensions']) +
|
||||
policyRule.withResources([
|
||||
'thirdpartyresources',
|
||||
]) +
|
||||
policyRule.withVerbs(['*']);
|
||||
|
||||
local apiExtensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['apiextensions.k8s.io']) +
|
||||
policyRule.withResources([
|
||||
'customresourcedefinitions',
|
||||
]) +
|
||||
policyRule.withVerbs(['*']);
|
||||
|
||||
local monitoringRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['monitoring.coreos.com']) +
|
||||
policyRule.withResources([
|
||||
'alertmanagers',
|
||||
'prometheuses',
|
||||
'prometheuses/finalizers',
|
||||
'alertmanagers/finalizers',
|
||||
'servicemonitors',
|
||||
]) +
|
||||
policyRule.withVerbs(['*']);
|
||||
|
||||
local appsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['apps']) +
|
||||
policyRule.withResources([
|
||||
'statefulsets',
|
||||
]) +
|
||||
policyRule.withVerbs(['*']);
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'configmaps',
|
||||
'secrets',
|
||||
]) +
|
||||
policyRule.withVerbs(['*']);
|
||||
|
||||
local podRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'pods',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'delete']);
|
||||
|
||||
local routingRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'services',
|
||||
'endpoints',
|
||||
]) +
|
||||
policyRule.withVerbs(['get', 'create', 'update']);
|
||||
|
||||
local nodeRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'nodes',
|
||||
]) +
|
||||
policyRule.withVerbs(['list', 'watch']);
|
||||
|
||||
local namespaceRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'namespaces',
|
||||
]) +
|
||||
policyRule.withVerbs(['list']);
|
||||
|
||||
local rules = [extensionsRule, apiExtensionsRule, monitoringRule, appsRule, coreRule, podRule, routingRule, nodeRule, namespaceRule];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('prometheus-operator') +
|
||||
clusterRole.withRules(rules),
|
||||
|
||||
deployment:
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
local containerPort = container.portsType;
|
||||
|
||||
local targetPort = 8080;
|
||||
local podLabels = { 'k8s-app': 'prometheus-operator' };
|
||||
|
||||
local operatorContainer =
|
||||
container.new('prometheus-operator', $._config.imageRepos.prometheusOperator + ':' + $._config.versions.prometheusOperator) +
|
||||
container.withPorts(containerPort.newNamed('http', targetPort)) +
|
||||
container.withArgs(['--kubelet-service=kube-system/kubelet', '--config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1']) +
|
||||
container.mixin.resources.withRequests({ cpu: '100m', memory: '50Mi' }) +
|
||||
container.mixin.resources.withLimits({ cpu: '200m', memory: '100Mi' });
|
||||
|
||||
deployment.new('prometheus-operator', 1, operatorContainer, podLabels) +
|
||||
deployment.mixin.metadata.withNamespace($._config.namespace) +
|
||||
deployment.mixin.metadata.withLabels(podLabels) +
|
||||
deployment.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
deployment.mixin.spec.template.spec.withNodeSelector({ 'beta.kubernetes.io/os': 'linux' }) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName('prometheus-operator'),
|
||||
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('prometheus-operator') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local poServicePort = servicePort.newNamed('http', 8080, 'http');
|
||||
|
||||
service.new('prometheus-operator', $.prometheusOperator.deployment.spec.selector.matchLabels, [poServicePort]) +
|
||||
service.mixin.metadata.withLabels({ 'k8s-app': 'prometheus-operator' }) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.spec.withClusterIp('None'),
|
||||
},
|
||||
}
|
File diff suppressed because one or more lines are too long
|
@ -0,0 +1,454 @@
|
|||
local k = import 'ksonnet/ksonnet.beta.3/k.libsonnet';
|
||||
|
||||
{
|
||||
_config+:: {
|
||||
namespace: 'default',
|
||||
|
||||
versions+:: {
|
||||
prometheus: 'v2.2.1',
|
||||
},
|
||||
|
||||
imageRepos+:: {
|
||||
prometheus: 'quay.io/prometheus/prometheus',
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
replicas: 2,
|
||||
rules: {},
|
||||
},
|
||||
},
|
||||
|
||||
prometheus+:: {
|
||||
serviceAccount:
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
serviceAccount.new('prometheus-k8s') +
|
||||
serviceAccount.mixin.metadata.withNamespace($._config.namespace),
|
||||
service:
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local prometheusPort = servicePort.newNamed('web', 9090, 'web');
|
||||
|
||||
service.new('prometheus-k8s', { app: 'prometheus', prometheus: 'k8s' }, prometheusPort) +
|
||||
service.mixin.metadata.withNamespace($._config.namespace) +
|
||||
service.mixin.metadata.withLabels({ prometheus: 'k8s' }),
|
||||
rules:
|
||||
local configMap = k.core.v1.configMap;
|
||||
|
||||
configMap.new('prometheus-k8s-rules', { 'all.rules.yaml': std.manifestYamlDoc($._config.prometheus.rules) }) +
|
||||
configMap.mixin.metadata.withLabels({ role: 'alert-rules', prometheus: 'k8s' }) +
|
||||
configMap.mixin.metadata.withNamespace($._config.namespace),
|
||||
roleBindingDefault:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('prometheus-k8s') +
|
||||
roleBinding.mixin.metadata.withNamespace('default') +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('prometheus-k8s') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-k8s', namespace: $._config.namespace }]),
|
||||
clusterRole:
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local nodeMetricsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources(['nodes/metrics']) +
|
||||
policyRule.withVerbs(['get']);
|
||||
|
||||
local metricsRule = policyRule.new() +
|
||||
policyRule.withNonResourceUrls('/metrics') +
|
||||
policyRule.withVerbs(['get']);
|
||||
|
||||
local rules = [nodeMetricsRule, metricsRule];
|
||||
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName('prometheus-k8s') +
|
||||
clusterRole.withRules(rules),
|
||||
roleConfig:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local configmapRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'configmaps',
|
||||
]) +
|
||||
policyRule.withVerbs(['get']);
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('prometheus-k8s-config') +
|
||||
role.mixin.metadata.withNamespace($._config.namespace) +
|
||||
role.withRules(configmapRule),
|
||||
roleBindingConfig:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('prometheus-k8s-config') +
|
||||
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('prometheus-k8s-config') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-k8s', namespace: $._config.namespace }]),
|
||||
roleBindingNamespace:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('prometheus-k8s') +
|
||||
roleBinding.mixin.metadata.withNamespace($._config.namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('prometheus-k8s') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-k8s', namespace: $._config.namespace }]),
|
||||
clusterRoleBinding:
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName('prometheus-k8s') +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
clusterRoleBinding.mixin.roleRef.withName('prometheus-k8s') +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({ kind: 'ClusterRole' }) +
|
||||
clusterRoleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-k8s', namespace: $._config.namespace }]),
|
||||
roleKubeSystem:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'nodes',
|
||||
'services',
|
||||
'endpoints',
|
||||
'pods',
|
||||
]) +
|
||||
policyRule.withVerbs(['get', 'list', 'watch']);
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('prometheus-k8s') +
|
||||
role.mixin.metadata.withNamespace('kube-system') +
|
||||
role.withRules(coreRule),
|
||||
roleDefault:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'nodes',
|
||||
'services',
|
||||
'endpoints',
|
||||
'pods',
|
||||
]) +
|
||||
policyRule.withVerbs(['get', 'list', 'watch']);
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('prometheus-k8s') +
|
||||
role.mixin.metadata.withNamespace('default') +
|
||||
role.withRules(coreRule),
|
||||
roleBindingKubeSystem:
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName('prometheus-k8s') +
|
||||
roleBinding.mixin.metadata.withNamespace('kube-system') +
|
||||
roleBinding.mixin.roleRef.withApiGroup('rbac.authorization.k8s.io') +
|
||||
roleBinding.mixin.roleRef.withName('prometheus-k8s') +
|
||||
roleBinding.mixin.roleRef.mixinInstance({ kind: 'Role' }) +
|
||||
roleBinding.withSubjects([{ kind: 'ServiceAccount', name: 'prometheus-k8s', namespace: $._config.namespace }]),
|
||||
roleNamespace:
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups(['']) +
|
||||
policyRule.withResources([
|
||||
'nodes',
|
||||
'services',
|
||||
'endpoints',
|
||||
'pods',
|
||||
]) +
|
||||
policyRule.withVerbs(['get', 'list', 'watch']);
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName('prometheus-k8s') +
|
||||
role.mixin.metadata.withNamespace($._config.namespace) +
|
||||
role.withRules(coreRule),
|
||||
prometheus:
|
||||
local container = k.core.v1.pod.mixin.spec.containersType;
|
||||
local resourceRequirements = container.mixin.resourcesType;
|
||||
local selector = k.apps.v1beta2.deployment.mixin.spec.selectorType;
|
||||
|
||||
local resources = resourceRequirements.new() +
|
||||
resourceRequirements.withRequests({ memory: '400Mi' });
|
||||
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'Prometheus',
|
||||
metadata: {
|
||||
name: 'k8s',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
prometheus: 'k8s',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: $._config.prometheus.replicas,
|
||||
version: $._config.versions.prometheus,
|
||||
baseImage: $._config.imageRepos.prometheus,
|
||||
serviceAccountName: 'prometheus-k8s',
|
||||
serviceMonitorSelector: selector.withMatchExpressions({ key: 'k8s-app', operator: 'Exists' }),
|
||||
nodeSelector: { 'beta.kubernetes.io/os': 'linux' },
|
||||
ruleSelector: selector.withMatchLabels({
|
||||
role: 'alert-rules',
|
||||
prometheus: 'k8s',
|
||||
}),
|
||||
resources: resources,
|
||||
alerting: {
|
||||
alertmanagers: [
|
||||
{
|
||||
namespace: $._config.namespace,
|
||||
name: 'alertmanager-main',
|
||||
port: 'web',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorPrometheus:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'prometheus',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'prometheus',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
selector: {
|
||||
matchLabels: {
|
||||
prometheus: 'k8s',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'monitoring',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'web',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
serviceMonitorPrometheusOperator:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'prometheus-operator',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'prometheus-operator',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'prometheus-operator',
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorKubeScheduler:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-scheduler',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kube-scheduler',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-scheduler',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorKubelet:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kubelet',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kubelet',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
interval: '30s',
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
{
|
||||
port: 'https-metrics',
|
||||
scheme: 'https',
|
||||
path: '/metrics/cadvisor',
|
||||
interval: '30s',
|
||||
honorLabels: true,
|
||||
tlsConfig: {
|
||||
insecureSkipVerify: true,
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kubelet',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorKubeControllerManager:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-controller-manager',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'kube-controller-manager',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
interval: '30s',
|
||||
},
|
||||
],
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'kube-controller-manager',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
serviceMonitorApiserver:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'kube-apiserver',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'apiserver',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'component',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
component: 'apiserver',
|
||||
provider: 'kubernetes',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'default',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'https',
|
||||
interval: '30s',
|
||||
scheme: 'https',
|
||||
tlsConfig: {
|
||||
caFile: '/var/run/secrets/kubernetes.io/serviceaccount/ca.crt',
|
||||
serverName: 'kubernetes',
|
||||
},
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
serviceMonitorCoreDNS:
|
||||
{
|
||||
apiVersion: 'monitoring.coreos.com/v1',
|
||||
kind: 'ServiceMonitor',
|
||||
metadata: {
|
||||
name: 'coredns',
|
||||
namespace: $._config.namespace,
|
||||
labels: {
|
||||
'k8s-app': 'coredns',
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
jobLabel: 'k8s-app',
|
||||
selector: {
|
||||
matchLabels: {
|
||||
'k8s-app': 'coredns',
|
||||
component: 'metrics',
|
||||
},
|
||||
},
|
||||
namespaceSelector: {
|
||||
matchNames: [
|
||||
'kube-system',
|
||||
],
|
||||
},
|
||||
endpoints: [
|
||||
{
|
||||
port: 'http-metrics',
|
||||
interval: '15s',
|
||||
bearerTokenFile: '/var/run/secrets/kubernetes.io/serviceaccount/token',
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName("kube-state-metrics") +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup("rbac.authorization.k8s.io") +
|
||||
clusterRoleBinding.mixin.roleRef.withName("kube-state-metrics") +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({kind: "ClusterRole"}) +
|
||||
clusterRoleBinding.withSubjects([{kind: "ServiceAccount", name: "kube-state-metrics", namespace: namespace}])
|
||||
}
|
|
@ -1,75 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"configmaps",
|
||||
"secrets",
|
||||
"nodes",
|
||||
"pods",
|
||||
"services",
|
||||
"resourcequotas",
|
||||
"replicationcontrollers",
|
||||
"limitranges",
|
||||
"persistentvolumeclaims",
|
||||
"persistentvolumes",
|
||||
"namespaces",
|
||||
"endpoints",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "watch"]);
|
||||
|
||||
local extensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["extensions"]) +
|
||||
policyRule.withResources([
|
||||
"daemonsets",
|
||||
"deployments",
|
||||
"replicasets",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "watch"]);
|
||||
|
||||
local appsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["apps"]) +
|
||||
policyRule.withResources([
|
||||
"statefulsets",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "watch"]);
|
||||
|
||||
local batchRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["batch"]) +
|
||||
policyRule.withResources([
|
||||
"cronjobs",
|
||||
"jobs",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "watch"]);
|
||||
|
||||
local autoscalingRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["autoscaling"]) +
|
||||
policyRule.withResources([
|
||||
"horizontalpodautoscalers",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "watch"]);
|
||||
|
||||
local authenticationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(["authentication.k8s.io"]) +
|
||||
policyRule.withResources([
|
||||
"tokenreviews",
|
||||
]) +
|
||||
policyRule.withVerbs(["create"]);
|
||||
|
||||
local authorizationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(["authorization.k8s.io"]) +
|
||||
policyRule.withResources([
|
||||
"subjectaccessreviews",
|
||||
]) +
|
||||
policyRule.withVerbs(["create"]);
|
||||
|
||||
local rules = [coreRule, extensionsRule, appsRule, batchRule, autoscalingRule, authenticationRole, authorizationRole];
|
||||
|
||||
{
|
||||
new()::
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName("kube-state-metrics") +
|
||||
clusterRole.withRules(rules)
|
||||
}
|
|
@ -1,86 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
local volume = k.apps.v1beta2.deployment.mixin.spec.template.spec.volumesType;
|
||||
local containerPort = container.portsType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
local podSelector = deployment.mixin.spec.template.spec.selectorType;
|
||||
|
||||
local kubeStateMetricsVersion = "v1.3.0";
|
||||
local kubeRbacProxyVersion = "v0.3.0";
|
||||
local addonResizerVersion = "1.0";
|
||||
local podLabels = {"app": "kube-state-metrics"};
|
||||
|
||||
local proxyClusterMetrics =
|
||||
container.new("kube-rbac-proxy-main", "quay.io/coreos/kube-rbac-proxy:" + kubeRbacProxyVersion) +
|
||||
container.withArgs([
|
||||
"--secure-listen-address=:8443",
|
||||
"--upstream=http://127.0.0.1:8081/",
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed("https-main", 8443)) +
|
||||
container.mixin.resources.withRequests({cpu: "10m", memory: "20Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "20m", memory: "40Mi"});
|
||||
|
||||
local proxySelfMetrics =
|
||||
container.new("kube-rbac-proxy-self", "quay.io/coreos/kube-rbac-proxy:" + kubeRbacProxyVersion) +
|
||||
container.withArgs([
|
||||
"--secure-listen-address=:9443",
|
||||
"--upstream=http://127.0.0.1:8082/",
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed("https-self", 9443)) +
|
||||
container.mixin.resources.withRequests({cpu: "10m", memory: "20Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "20m", memory: "40Mi"});
|
||||
|
||||
local kubeStateMetrics =
|
||||
container.new("kube-state-metrics", "quay.io/coreos/kube-state-metrics:" + kubeStateMetricsVersion) +
|
||||
container.withArgs([
|
||||
"--host=127.0.0.1",
|
||||
"--port=8081",
|
||||
"--telemetry-host=127.0.0.1",
|
||||
"--telemetry-port=8082",
|
||||
]) +
|
||||
container.mixin.resources.withRequests({cpu: "102m", memory: "180Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "102m", memory: "180Mi"});
|
||||
|
||||
local addonResizer =
|
||||
container.new("addon-resizer", "quay.io/coreos/addon-resizer:" + addonResizerVersion) +
|
||||
container.withCommand([
|
||||
"/pod_nanny",
|
||||
"--container=kube-state-metrics",
|
||||
"--cpu=100m",
|
||||
"--extra-cpu=2m",
|
||||
"--memory=150Mi",
|
||||
"--extra-memory=30Mi",
|
||||
"--threshold=5",
|
||||
"--deployment=kube-state-metrics",
|
||||
]) +
|
||||
container.withEnv([
|
||||
{
|
||||
name: "MY_POD_NAME",
|
||||
valueFrom: {
|
||||
fieldRef: {apiVersion: "v1", fieldPath: "metadata.name"}
|
||||
}
|
||||
}, {
|
||||
name: "MY_POD_NAMESPACE",
|
||||
valueFrom: {
|
||||
fieldRef: {apiVersion: "v1", fieldPath: "metadata.namespace"}
|
||||
}
|
||||
}
|
||||
]) +
|
||||
container.mixin.resources.withRequests({cpu: "10m", memory: "30Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "10m", memory: "30Mi"});
|
||||
|
||||
local c = [proxyClusterMetrics, proxySelfMetrics, kubeStateMetrics, addonResizer];
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
deployment.new("kube-state-metrics", 1, c, podLabels) +
|
||||
deployment.mixin.metadata.withNamespace(namespace) +
|
||||
deployment.mixin.metadata.withLabels(podLabels) +
|
||||
deployment.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName("kube-state-metrics")
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName("kube-state-metrics") +
|
||||
roleBinding.mixin.metadata.withNamespace(namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup("rbac.authorization.k8s.io") +
|
||||
roleBinding.mixin.roleRef.withName("kube-state-metrics") +
|
||||
roleBinding.mixin.roleRef.mixinInstance({kind: "Role"}) +
|
||||
roleBinding.withSubjects([{kind: "ServiceAccount", name: "kube-state-metrics"}])
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"pods",
|
||||
]) +
|
||||
policyRule.withVerbs(["get"]);
|
||||
|
||||
local extensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["extensions"]) +
|
||||
policyRule.withResources([
|
||||
"deployments",
|
||||
]) +
|
||||
policyRule.withVerbs(["get", "update"]) +
|
||||
policyRule.withResourceNames(["kube-state-metrics"]);
|
||||
|
||||
local rules = [coreRule, extensionsRule];
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
role.new() +
|
||||
role.mixin.metadata.withName("kube-state-metrics") +
|
||||
role.mixin.metadata.withNamespace(namespace) +
|
||||
role.withRules(rules)
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
serviceAccount.new("kube-state-metrics") +
|
||||
serviceAccount.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "kube-state-metrics",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "k8s-app",
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "kube-state-metrics"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"monitoring"
|
||||
]
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "https-main",
|
||||
"scheme": "https",
|
||||
"interval": "30s",
|
||||
"honorLabels": true,
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
"tlsConfig": {
|
||||
"insecureSkipVerify": true
|
||||
}
|
||||
},
|
||||
{
|
||||
"port": "https-self",
|
||||
"scheme": "https",
|
||||
"interval": "30s",
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
"tlsConfig": {
|
||||
"insecureSkipVerify": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local ksmDeployment = import "kube-state-metrics-deployment.libsonnet";
|
||||
|
||||
local ksmServicePortMain = servicePort.newNamed("https-main", 8443, "https-main");
|
||||
local ksmServicePortSelf = servicePort.newNamed("https-self", 9443, "https-self");
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
service.new("kube-state-metrics", ksmDeployment.new(namespace).spec.selector.matchLabels, [ksmServicePortMain, ksmServicePortSelf]) +
|
||||
service.mixin.metadata.withNamespace(namespace) +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "kube-state-metrics"})
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
{
|
||||
clusterRoleBinding:: import "kube-state-metrics-cluster-role-binding.libsonnet",
|
||||
clusterRole:: import "kube-state-metrics-cluster-role.libsonnet",
|
||||
deployment:: import "kube-state-metrics-deployment.libsonnet",
|
||||
roleBinding:: import "kube-state-metrics-role-binding.libsonnet",
|
||||
role:: import "kube-state-metrics-role.libsonnet",
|
||||
serviceAccount:: import "kube-state-metrics-service-account.libsonnet",
|
||||
service:: import "kube-state-metrics-service.libsonnet",
|
||||
serviceMonitor:: import "kube-state-metrics-service-monitor.libsonnet",
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName("node-exporter") +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup("rbac.authorization.k8s.io") +
|
||||
clusterRoleBinding.mixin.roleRef.withName("node-exporter") +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({kind: "ClusterRole"}) +
|
||||
clusterRoleBinding.withSubjects([{kind: "ServiceAccount", name: "node-exporter", namespace: namespace}])
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local authenticationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(["authentication.k8s.io"]) +
|
||||
policyRule.withResources([
|
||||
"tokenreviews",
|
||||
]) +
|
||||
policyRule.withVerbs(["create"]);
|
||||
|
||||
local authorizationRole = policyRule.new() +
|
||||
policyRule.withApiGroups(["authorization.k8s.io"]) +
|
||||
policyRule.withResources([
|
||||
"subjectaccessreviews",
|
||||
]) +
|
||||
policyRule.withVerbs(["create"]);
|
||||
|
||||
local rules = [authenticationRole, authorizationRole];
|
||||
|
||||
{
|
||||
new()::
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName("node-exporter") +
|
||||
clusterRole.withRules(rules)
|
||||
}
|
|
@ -1,58 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
|
||||
local daemonset = k.apps.v1beta2.daemonSet;
|
||||
local container = daemonset.mixin.spec.template.spec.containersType;
|
||||
local volume = daemonset.mixin.spec.template.spec.volumesType;
|
||||
local containerPort = container.portsType;
|
||||
local containerVolumeMount = container.volumeMountsType;
|
||||
local podSelector = daemonset.mixin.spec.template.spec.selectorType;
|
||||
|
||||
local nodeExporterVersion = "v0.15.2";
|
||||
local kubeRbacProxyVersion = "v0.3.0";
|
||||
local podLabels = {"app": "node-exporter"};
|
||||
|
||||
local procVolumeName = "proc";
|
||||
local procVolume = volume.fromHostPath(procVolumeName, "/proc");
|
||||
local procVolumeMount = containerVolumeMount.new(procVolumeName, "/host/proc");
|
||||
|
||||
local sysVolumeName = "sys";
|
||||
local sysVolume = volume.fromHostPath(sysVolumeName, "/sys");
|
||||
local sysVolumeMount = containerVolumeMount.new(sysVolumeName, "/host/sys");
|
||||
|
||||
local nodeExporter =
|
||||
container.new("node-exporter", "quay.io/prometheus/node-exporter:" + nodeExporterVersion) +
|
||||
container.withArgs([
|
||||
"--web.listen-address=127.0.0.1:9101",
|
||||
"--path.procfs=/host/proc",
|
||||
"--path.sysfs=/host/sys",
|
||||
]) +
|
||||
container.withVolumeMounts([procVolumeMount, sysVolumeMount]) +
|
||||
container.mixin.resources.withRequests({cpu: "102m", memory: "180Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "102m", memory: "180Mi"});
|
||||
|
||||
local proxy =
|
||||
container.new("kube-rbac-proxy", "quay.io/coreos/kube-rbac-proxy:" + kubeRbacProxyVersion) +
|
||||
container.withArgs([
|
||||
"--secure-listen-address=:9100",
|
||||
"--upstream=http://127.0.0.1:9101/",
|
||||
]) +
|
||||
container.withPorts(containerPort.newNamed("https", 9100)) +
|
||||
container.mixin.resources.withRequests({cpu: "10m", memory: "20Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "20m", memory: "40Mi"});
|
||||
|
||||
local c = [nodeExporter, proxy];
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
daemonset.new() +
|
||||
daemonset.mixin.metadata.withName("node-exporter") +
|
||||
daemonset.mixin.metadata.withNamespace(namespace) +
|
||||
daemonset.mixin.metadata.withLabels(podLabels) +
|
||||
daemonset.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
daemonset.mixin.spec.template.metadata.withLabels(podLabels) +
|
||||
daemonset.mixin.spec.template.spec.withContainers(c) +
|
||||
daemonset.mixin.spec.template.spec.withVolumes([procVolume, sysVolume]) +
|
||||
daemonset.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
daemonset.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
daemonset.mixin.spec.template.spec.withServiceAccountName("node-exporter")
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
serviceAccount.new("node-exporter") +
|
||||
serviceAccount.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "node-exporter",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "node-exporter"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "k8s-app",
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "node-exporter"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"monitoring"
|
||||
]
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "https",
|
||||
"scheme": "https",
|
||||
"interval": "30s",
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token",
|
||||
"tlsConfig": {
|
||||
"insecureSkipVerify": true
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local nodeExporterDaemonset = import "node-exporter-daemonset.libsonnet";
|
||||
|
||||
local nodeExporterPort = servicePort.newNamed("https", 9100, "https");
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
service.new("node-exporter", nodeExporterDaemonset.new(namespace).spec.selector.matchLabels, nodeExporterPort) +
|
||||
service.mixin.metadata.withNamespace(namespace) +
|
||||
service.mixin.metadata.withLabels({"k8s-app": "node-exporter"})
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
clusterRoleBinding:: import "node-exporter-cluster-role-binding.libsonnet",
|
||||
clusterRole:: import "node-exporter-cluster-role.libsonnet",
|
||||
daemonset:: import "node-exporter-daemonset.libsonnet",
|
||||
serviceAccount:: import "node-exporter-service-account.libsonnet",
|
||||
service:: import "node-exporter-service.libsonnet",
|
||||
serviceMonitor:: import "node-exporter-service-monitor.libsonnet",
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName("prometheus-operator") +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup("rbac.authorization.k8s.io") +
|
||||
clusterRoleBinding.mixin.roleRef.withName("prometheus-operator") +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({kind: "ClusterRole"}) +
|
||||
clusterRoleBinding.withSubjects([{kind: "ServiceAccount", name: "prometheus-operator", namespace: namespace}])
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local extensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["extensions"]) +
|
||||
policyRule.withResources([
|
||||
"thirdpartyresources",
|
||||
]) +
|
||||
policyRule.withVerbs(["*"]);
|
||||
|
||||
local apiExtensionsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["apiextensions.k8s.io"]) +
|
||||
policyRule.withResources([
|
||||
"customresourcedefinitions",
|
||||
]) +
|
||||
policyRule.withVerbs(["*"]);
|
||||
|
||||
local monitoringRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["monitoring.coreos.com"]) +
|
||||
policyRule.withResources([
|
||||
"alertmanagers",
|
||||
"prometheuses",
|
||||
"prometheuses/finalizers",
|
||||
"alertmanagers/finalizers",
|
||||
"servicemonitors",
|
||||
]) +
|
||||
policyRule.withVerbs(["*"]);
|
||||
|
||||
local appsRule = policyRule.new() +
|
||||
policyRule.withApiGroups(["apps"]) +
|
||||
policyRule.withResources([
|
||||
"statefulsets",
|
||||
]) +
|
||||
policyRule.withVerbs(["*"]);
|
||||
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"configmaps",
|
||||
"secrets",
|
||||
]) +
|
||||
policyRule.withVerbs(["*"]);
|
||||
|
||||
local podRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"pods",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "delete"]);
|
||||
|
||||
local routingRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"services",
|
||||
"endpoints",
|
||||
]) +
|
||||
policyRule.withVerbs(["get", "create", "update"]);
|
||||
|
||||
local nodeRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"nodes",
|
||||
]) +
|
||||
policyRule.withVerbs(["list", "watch"]);
|
||||
|
||||
local namespaceRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"namespaces",
|
||||
]) +
|
||||
policyRule.withVerbs(["list"]);
|
||||
|
||||
local rules = [extensionsRule, apiExtensionsRule, monitoringRule, appsRule, coreRule, podRule, routingRule, nodeRule, namespaceRule];
|
||||
|
||||
{
|
||||
new()::
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName("prometheus-operator") +
|
||||
clusterRole.withRules(rules)
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
|
||||
local version = "v0.19.0";
|
||||
|
||||
local deployment = k.apps.v1beta2.deployment;
|
||||
local container = k.apps.v1beta2.deployment.mixin.spec.template.spec.containersType;
|
||||
local containerPort = container.portsType;
|
||||
|
||||
local targetPort = 8080;
|
||||
local podLabels = {"k8s-app": "prometheus-operator"};
|
||||
|
||||
local operatorContainer =
|
||||
container.new("prometheus-operator", "quay.io/coreos/prometheus-operator:" + version) +
|
||||
container.withPorts(containerPort.newNamed("http", targetPort)) +
|
||||
container.withArgs(["--kubelet-service=kube-system/kubelet", "--config-reloader-image=quay.io/coreos/configmap-reload:v0.0.1"]) +
|
||||
container.mixin.resources.withRequests({cpu: "100m", memory: "50Mi"}) +
|
||||
container.mixin.resources.withLimits({cpu: "200m", memory: "100Mi"});
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
deployment.new("prometheus-operator", 1, operatorContainer, podLabels) +
|
||||
deployment.mixin.metadata.withNamespace(namespace) +
|
||||
deployment.mixin.metadata.withLabels(podLabels) +
|
||||
deployment.mixin.spec.selector.withMatchLabels(podLabels) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsNonRoot(true) +
|
||||
deployment.mixin.spec.template.spec.securityContext.withRunAsUser(65534) +
|
||||
deployment.mixin.spec.template.spec.withServiceAccountName("prometheus-operator")
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
serviceAccount.new("prometheus-operator") +
|
||||
serviceAccount.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "prometheus-operator",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "prometheus-operator"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "http"
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "prometheus-operator"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local poDeployment = import "prometheus-operator-deployment.libsonnet";
|
||||
|
||||
local poServicePort = servicePort.newNamed("http", 8080, "http");
|
||||
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
service.new("prometheus-operator", poDeployment.new(namespace).spec.selector.matchLabels, [poServicePort]) +
|
||||
service.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
{
|
||||
clusterRoleBinding:: import "prometheus-operator-cluster-role-binding.libsonnet",
|
||||
clusterRole:: import "prometheus-operator-cluster-role.libsonnet",
|
||||
deployment:: import "prometheus-operator-deployment.libsonnet",
|
||||
serviceAccount:: import "prometheus-operator-service-account.libsonnet",
|
||||
service:: import "prometheus-operator-service.libsonnet",
|
||||
serviceMonitor:: import "prometheus-operator-service-monitor.libsonnet",
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRoleBinding = k.rbac.v1.clusterRoleBinding;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
clusterRoleBinding.new() +
|
||||
clusterRoleBinding.mixin.metadata.withName("prometheus-k8s") +
|
||||
clusterRoleBinding.mixin.roleRef.withApiGroup("rbac.authorization.k8s.io") +
|
||||
clusterRoleBinding.mixin.roleRef.withName("prometheus-k8s") +
|
||||
clusterRoleBinding.mixin.roleRef.mixinInstance({kind: "ClusterRole"}) +
|
||||
clusterRoleBinding.withSubjects([{kind: "ServiceAccount", name: "prometheus-k8s", namespace: namespace}])
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local clusterRole = k.rbac.v1.clusterRole;
|
||||
local policyRule = clusterRole.rulesType;
|
||||
|
||||
local nodeMetricsRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources(["nodes/metrics"]) +
|
||||
policyRule.withVerbs(["get"]);
|
||||
|
||||
local metricsRule = policyRule.new() +
|
||||
policyRule.withNonResourceUrls("/metrics") +
|
||||
policyRule.withVerbs(["get"]);
|
||||
|
||||
local rules = [nodeMetricsRule, metricsRule];
|
||||
|
||||
{
|
||||
new()::
|
||||
clusterRole.new() +
|
||||
clusterRole.mixin.metadata.withName("prometheus-k8s") +
|
||||
clusterRole.withRules(rules)
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRoleBinding = import "prometheus-namespace-role-binding.libsonnet";
|
||||
|
||||
{
|
||||
new(namespace):: prometheusNamespaceRoleBinding.new(namespace, namespace, "prometheus-k8s-config", "prometheus-k8s")
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRoleBinding = import "prometheus-namespace-role-binding.libsonnet";
|
||||
|
||||
{
|
||||
new(namespace):: prometheusNamespaceRoleBinding.new(namespace, "default", "prometheus-k8s", "prometheus-k8s")
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRoleBinding = import "prometheus-namespace-role-binding.libsonnet";
|
||||
|
||||
{
|
||||
new(namespace):: prometheusNamespaceRoleBinding.new(namespace, "kube-system", "prometheus-k8s", "prometheus-k8s")
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRoleBinding = import "prometheus-namespace-role-binding.libsonnet";
|
||||
|
||||
{
|
||||
new(namespace):: prometheusNamespaceRoleBinding.new(namespace, namespace, "prometheus-k8s", "prometheus-k8s")
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
local configmapRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"configmaps",
|
||||
]) +
|
||||
policyRule.withVerbs(["get"]);
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
role.new() +
|
||||
role.mixin.metadata.withName("prometheus-k8s-config") +
|
||||
role.mixin.metadata.withNamespace(namespace) +
|
||||
role.withRules(configmapRule),
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRole = import "prometheus-namespace-role.libsonnet";
|
||||
|
||||
{
|
||||
new():: prometheusNamespaceRole.new("default")
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRole = import "prometheus-namespace-role.libsonnet";
|
||||
|
||||
{
|
||||
new():: prometheusNamespaceRole.new("kube-system")
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
local prometheusNamespaceRole = import "prometheus-namespace-role.libsonnet";
|
||||
|
||||
{
|
||||
new(namespace):: prometheusNamespaceRole.new(namespace)
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local configMap = k.core.v1.configMap;
|
||||
|
||||
{
|
||||
new(namespace, ruleFiles)::
|
||||
configMap.new("prometheus-k8s-rules", ruleFiles) +
|
||||
configMap.mixin.metadata.withLabels({role: "alert-rules", prometheus: "k8s"}) +
|
||||
configMap.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local serviceAccount = k.core.v1.serviceAccount;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
serviceAccount.new("prometheus-k8s") +
|
||||
serviceAccount.mixin.metadata.withNamespace(namespace)
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "kube-apiserver",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "apiserver"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "component",
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"component": "apiserver",
|
||||
"provider": "kubernetes"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"default"
|
||||
]
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "https",
|
||||
"interval": "30s",
|
||||
"scheme": "https",
|
||||
"tlsConfig": {
|
||||
"caFile": "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt",
|
||||
"serverName": "kubernetes"
|
||||
},
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "coredns",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "coredns"
|
||||
},
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "k8s-app",
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "coredns",
|
||||
"component": "metrics"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"kube-system"
|
||||
]
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "http-metrics",
|
||||
"interval": "15s",
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "kube-controller-manager",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "kube-controller-manager"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "k8s-app",
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "http-metrics",
|
||||
"interval": "30s"
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "kube-controller-manager"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"kube-system"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "kube-scheduler",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "kube-scheduler"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "k8s-app",
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "http-metrics",
|
||||
"interval": "30s"
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "kube-scheduler"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"kube-system"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,49 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "kubelet",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "kubelet"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"jobLabel": "k8s-app",
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "https-metrics",
|
||||
"scheme": "https",
|
||||
"interval": "30s",
|
||||
"tlsConfig": {
|
||||
"insecureSkipVerify": true
|
||||
},
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
},
|
||||
{
|
||||
"port": "https-metrics",
|
||||
"scheme": "https",
|
||||
"path": "/metrics/cadvisor",
|
||||
"interval": "30s",
|
||||
"honorLabels": true,
|
||||
"tlsConfig": {
|
||||
"insecureSkipVerify": true
|
||||
},
|
||||
"bearerTokenFile": "/var/run/secrets/kubernetes.io/serviceaccount/token"
|
||||
}
|
||||
],
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"k8s-app": "kubelet"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"kube-system"
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,32 +0,0 @@
|
|||
{
|
||||
new(namespace)::
|
||||
{
|
||||
"apiVersion": "monitoring.coreos.com/v1",
|
||||
"kind": "ServiceMonitor",
|
||||
"metadata": {
|
||||
"name": "prometheus",
|
||||
"namespace": namespace,
|
||||
"labels": {
|
||||
"k8s-app": "prometheus"
|
||||
}
|
||||
},
|
||||
"spec": {
|
||||
"selector": {
|
||||
"matchLabels": {
|
||||
"prometheus": "k8s"
|
||||
}
|
||||
},
|
||||
"namespaceSelector": {
|
||||
"matchNames": [
|
||||
"monitoring"
|
||||
]
|
||||
},
|
||||
"endpoints": [
|
||||
{
|
||||
"port": "web",
|
||||
"interval": "30s"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local service = k.core.v1.service;
|
||||
local servicePort = k.core.v1.service.mixin.spec.portsType;
|
||||
|
||||
local prometheusPort = servicePort.newNamed("web", 9090, "web");
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
service.new("prometheus-k8s", {app: "prometheus", prometheus: "k8s"}, prometheusPort) +
|
||||
service.mixin.metadata.withNamespace(namespace) +
|
||||
service.mixin.metadata.withLabels({prometheus: "k8s"})
|
||||
}
|
|
@ -1,43 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
|
||||
local container = k.core.v1.pod.mixin.spec.containersType;
|
||||
local resourceRequirements = container.mixin.resourcesType;
|
||||
local selector = k.apps.v1beta2.deployment.mixin.spec.selectorType;
|
||||
|
||||
local resources = resourceRequirements.new() +
|
||||
resourceRequirements.withRequests({memory: "400Mi"});
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
{
|
||||
apiVersion: "monitoring.coreos.com/v1",
|
||||
kind: "Prometheus",
|
||||
metadata: {
|
||||
name: "k8s",
|
||||
namespace: namespace,
|
||||
labels: {
|
||||
prometheus: "k8s",
|
||||
},
|
||||
},
|
||||
spec: {
|
||||
replicas: 2,
|
||||
version: "v2.2.1",
|
||||
serviceAccountName: "prometheus-k8s",
|
||||
serviceMonitorSelector: selector.withMatchExpressions({key: "k8s-app", operator: "Exists"}),
|
||||
ruleSelector: selector.withMatchLabels({
|
||||
role: "alert-rules",
|
||||
prometheus: "k8s",
|
||||
}),
|
||||
resources: resources,
|
||||
alerting: {
|
||||
alertmanagers: [
|
||||
{
|
||||
namespace: "monitoring",
|
||||
name: "alertmanager-main",
|
||||
port: "web",
|
||||
},
|
||||
],
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local roleBinding = k.rbac.v1.roleBinding;
|
||||
|
||||
{
|
||||
new(serviceAccountNamespace, namespace, roleName, serviceAccountName)::
|
||||
roleBinding.new() +
|
||||
roleBinding.mixin.metadata.withName(roleName) +
|
||||
roleBinding.mixin.metadata.withNamespace(namespace) +
|
||||
roleBinding.mixin.roleRef.withApiGroup("rbac.authorization.k8s.io") +
|
||||
roleBinding.mixin.roleRef.withName(roleName) +
|
||||
roleBinding.mixin.roleRef.mixinInstance({kind: "Role"}) +
|
||||
roleBinding.withSubjects([{kind: "ServiceAccount", name: serviceAccountName, namespace: serviceAccountNamespace}])
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
local k = import "ksonnet.beta.3/k.libsonnet";
|
||||
local role = k.rbac.v1.role;
|
||||
local policyRule = role.rulesType;
|
||||
|
||||
{
|
||||
new(namespace)::
|
||||
local coreRule = policyRule.new() +
|
||||
policyRule.withApiGroups([""]) +
|
||||
policyRule.withResources([
|
||||
"nodes",
|
||||
"services",
|
||||
"endpoints",
|
||||
"pods",
|
||||
]) +
|
||||
policyRule.withVerbs(["get", "list", "watch"]);
|
||||
|
||||
role.new() +
|
||||
role.mixin.metadata.withName("prometheus-k8s") +
|
||||
role.mixin.metadata.withNamespace(namespace) +
|
||||
role.withRules(coreRule)
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
{
|
||||
clusterRoleBinding:: import "prometheus-k8s-cluster-role-binding.libsonnet",
|
||||
clusterRole:: import "prometheus-k8s-cluster-role.libsonnet",
|
||||
roleBindingConfig:: import "prometheus-k8s-role-binding-config.libsonnet",
|
||||
roleBindingNamespace:: import "prometheus-k8s-role-binding-namespace.libsonnet",
|
||||
roleBindingKubeSystem:: import "prometheus-k8s-role-binding-kube-system.libsonnet",
|
||||
roleBindingDefault:: import "prometheus-k8s-role-binding-default.libsonnet",
|
||||
roleConfig:: import "prometheus-k8s-role-config.libsonnet",
|
||||
roleNamespace:: import "prometheus-k8s-role-namespace.libsonnet",
|
||||
roleKubeSystem:: import "prometheus-k8s-role-kube-system.libsonnet",
|
||||
roleDefault:: import "prometheus-k8s-role-default.libsonnet",
|
||||
rules:: import "prometheus-k8s-rules.libsonnet",
|
||||
serviceAccount:: import "prometheus-k8s-service-account.libsonnet",
|
||||
serviceMonitorApiserver:: import "prometheus-k8s-service-monitor-apiserver.libsonnet",
|
||||
serviceMonitorCoreDNS:: import "prometheus-k8s-service-monitor-coredns.libsonnet",
|
||||
serviceMonitorControllerManager:: import "prometheus-k8s-service-monitor-kube-controller-manager.libsonnet",
|
||||
serviceMonitorScheduler:: import "prometheus-k8s-service-monitor-kube-scheduler.libsonnet",
|
||||
serviceMonitorKubelet:: import "prometheus-k8s-service-monitor-kubelet.libsonnet",
|
||||
serviceMonitorPrometheus:: import "prometheus-k8s-service-monitor-prometheus.libsonnet",
|
||||
service:: import "prometheus-k8s-service.libsonnet",
|
||||
prometheus:: import "prometheus-k8s.libsonnet",
|
||||
}
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -0,0 +1,236 @@
|
|||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
name: servicemonitors.monitoring.coreos.com
|
||||
spec:
|
||||
group: monitoring.coreos.com
|
||||
names:
|
||||
kind: ServiceMonitor
|
||||
plural: servicemonitors
|
||||
scope: Namespaced
|
||||
validation:
|
||||
openAPIV3Schema:
|
||||
description: ServiceMonitor defines monitoring for a set of services.
|
||||
properties:
|
||||
apiVersion:
|
||||
description: 'APIVersion defines the versioned schema of this representation
|
||||
of an object. Servers should convert recognized schemas to the latest
|
||||
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||
type: string
|
||||
kind:
|
||||
description: 'Kind is a string value representing the REST resource this
|
||||
object represents. Servers may infer this from the endpoint the client
|
||||
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||
type: string
|
||||
spec:
|
||||
description: ServiceMonitorSpec contains specification parameters for a
|
||||
ServiceMonitor.
|
||||
properties:
|
||||
endpoints:
|
||||
description: A list of endpoints allowed as part of this ServiceMonitor.
|
||||
items:
|
||||
description: Endpoint defines a scrapeable endpoint serving Prometheus
|
||||
metrics.
|
||||
properties:
|
||||
basicAuth:
|
||||
description: 'BasicAuth allow an endpoint to authenticate over
|
||||
basic authentication More info: https://prometheus.io/docs/operating/configuration/#endpoints'
|
||||
properties:
|
||||
password:
|
||||
description: SecretKeySelector selects a key of a Secret.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or it's key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
username:
|
||||
description: SecretKeySelector selects a key of a Secret.
|
||||
properties:
|
||||
key:
|
||||
description: The key of the secret to select from. Must
|
||||
be a valid secret key.
|
||||
type: string
|
||||
name:
|
||||
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||
type: string
|
||||
optional:
|
||||
description: Specify whether the Secret or it's key must
|
||||
be defined
|
||||
type: boolean
|
||||
required:
|
||||
- key
|
||||
bearerTokenFile:
|
||||
description: File to read bearer token for scraping targets.
|
||||
type: string
|
||||
honorLabels:
|
||||
description: HonorLabels chooses the metric's labels on collisions
|
||||
with target labels.
|
||||
type: boolean
|
||||
interval:
|
||||
description: Interval at which metrics should be scraped
|
||||
type: string
|
||||
metricRelabelings:
|
||||
description: MetricRelabelConfigs to apply to samples before ingestion.
|
||||
items:
|
||||
description: 'RelabelConfig allows dynamic rewriting of the
|
||||
label set, being applied to samples before ingestion. It defines
|
||||
`<metric_relabel_configs>`-section of Prometheus configuration.
|
||||
More info: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs'
|
||||
properties:
|
||||
action:
|
||||
description: Action to perform based on regex matching.
|
||||
Default is 'replace'
|
||||
type: string
|
||||
modulus:
|
||||
description: Modulus to take of the hash of the source label
|
||||
values.
|
||||
format: int64
|
||||
type: integer
|
||||
regex:
|
||||
description: Regular expression against which the extracted
|
||||
value is matched. defailt is '(.*)'
|
||||
type: string
|
||||
replacement:
|
||||
description: Replacement value against which a regex replace
|
||||
is performed if the regular expression matches. Regex
|
||||
capture groups are available. Default is '$1'
|
||||
type: string
|
||||
separator:
|
||||
description: Separator placed between concatenated source
|
||||
label values. default is ';'.
|
||||
type: string
|
||||
sourceLabels:
|
||||
description: The source labels select values from existing
|
||||
labels. Their content is concatenated using the configured
|
||||
separator and matched against the configured regular expression
|
||||
for the replace, keep, and drop actions.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
targetLabel:
|
||||
description: Label to which the resulting value is written
|
||||
in a replace action. It is mandatory for replace actions.
|
||||
Regex capture groups are available.
|
||||
type: string
|
||||
type: array
|
||||
params:
|
||||
description: Optional HTTP URL parameters
|
||||
type: object
|
||||
path:
|
||||
description: HTTP path to scrape for metrics.
|
||||
type: string
|
||||
port:
|
||||
description: Name of the service port this endpoint refers to.
|
||||
Mutually exclusive with targetPort.
|
||||
type: string
|
||||
scheme:
|
||||
description: HTTP scheme to use for scraping.
|
||||
type: string
|
||||
scrapeTimeout:
|
||||
description: Timeout after which the scrape is ended
|
||||
type: string
|
||||
targetPort: {}
|
||||
tlsConfig:
|
||||
description: TLSConfig specifies TLS configuration parameters.
|
||||
properties:
|
||||
caFile:
|
||||
description: The CA cert to use for the targets.
|
||||
type: string
|
||||
certFile:
|
||||
description: The client cert file for the targets.
|
||||
type: string
|
||||
insecureSkipVerify:
|
||||
description: Disable target certificate validation.
|
||||
type: boolean
|
||||
keyFile:
|
||||
description: The client key file for the targets.
|
||||
type: string
|
||||
serverName:
|
||||
description: Used to verify the hostname for the targets.
|
||||
type: string
|
||||
type: array
|
||||
jobLabel:
|
||||
description: The label to use to retrieve the job name from.
|
||||
type: string
|
||||
namespaceSelector:
|
||||
description: A selector for selecting namespaces either selecting all
|
||||
namespaces or a list of namespaces.
|
||||
properties:
|
||||
any:
|
||||
description: Boolean describing whether all namespaces are selected
|
||||
in contrast to a list restricting them.
|
||||
type: boolean
|
||||
matchNames:
|
||||
description: List of namespace names.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
selector:
|
||||
description: A label selector is a label query over a set of resources.
|
||||
The result of matchLabels and matchExpressions are ANDed. An empty
|
||||
label selector matches all objects. A null label selector matches
|
||||
no objects.
|
||||
properties:
|
||||
matchExpressions:
|
||||
description: matchExpressions is a list of label selector requirements.
|
||||
The requirements are ANDed.
|
||||
items:
|
||||
description: A label selector requirement is a selector that contains
|
||||
values, a key, and an operator that relates the key and values.
|
||||
properties:
|
||||
key:
|
||||
description: key is the label key that the selector applies
|
||||
to.
|
||||
type: string
|
||||
operator:
|
||||
description: operator represents a key's relationship to a
|
||||
set of values. Valid operators are In, NotIn, Exists and
|
||||
DoesNotExist.
|
||||
type: string
|
||||
values:
|
||||
description: values is an array of string values. If the operator
|
||||
is In or NotIn, the values array must be non-empty. If the
|
||||
operator is Exists or DoesNotExist, the values array must
|
||||
be empty. This array is replaced during a strategic merge
|
||||
patch.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- key
|
||||
- operator
|
||||
type: array
|
||||
matchLabels:
|
||||
description: matchLabels is a map of {key,value} pairs. A single
|
||||
{key,value} in the matchLabels map is equivalent to an element
|
||||
of matchExpressions, whose key field is "key", the operator is
|
||||
"In", and the values array contains only "value". The requirements
|
||||
are ANDed.
|
||||
type: object
|
||||
targetLabels:
|
||||
description: TargetLabels transfers labels on the Kubernetes Service
|
||||
onto the target.
|
||||
items:
|
||||
type: string
|
||||
type: array
|
||||
required:
|
||||
- endpoints
|
||||
- selector
|
||||
required:
|
||||
- spec
|
||||
version: v1
|
||||
status:
|
||||
acceptedNames:
|
||||
kind: ""
|
||||
plural: ""
|
||||
conditions: null
|
|
@ -31,6 +31,8 @@ spec:
|
|||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
|
@ -1,9 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: prometheus-operator
|
||||
name: prometheus-operator
|
||||
namespace: monitoring
|
||||
spec:
|
||||
clusterIP: None
|
||||
ports:
|
||||
- name: http
|
||||
port: 8080
|
|
@ -6,6 +6,9 @@ metadata:
|
|||
name: main
|
||||
namespace: monitoring
|
||||
spec:
|
||||
baseImage: quay.io/prometheus/alertmanager
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
replicas: 3
|
||||
serviceAccountName: alertmanager-main
|
||||
version: v0.14.0
|
|
@ -1,8 +0,0 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
alertmanager.yaml: Z2xvYmFsOgogIHJlc29sdmVfdGltZW91dDogNW0Kcm91dGU6CiAgZ3JvdXBfYnk6IFsnam9iJ10KICBncm91cF93YWl0OiAzMHMKICBncm91cF9pbnRlcnZhbDogNW0KICByZXBlYXRfaW50ZXJ2YWw6IDEyaAogIHJlY2VpdmVyOiAnbnVsbCcKICByb3V0ZXM6CiAgLSBtYXRjaDoKICAgICAgYWxlcnRuYW1lOiBEZWFkTWFuc1N3aXRjaAogICAgcmVjZWl2ZXI6ICdudWxsJwpyZWNlaXZlcnM6Ci0gbmFtZTogJ251bGwnCg==
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
type: Opaque
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: v1
|
||||
data:
|
||||
alertmanager.yaml: Cmdsb2JhbDoKICByZXNvbHZlX3RpbWVvdXQ6IDVtCnJvdXRlOgogIGdyb3VwX2J5OiBbJ2pvYiddCiAgZ3JvdXBfd2FpdDogMzBzCiAgZ3JvdXBfaW50ZXJ2YWw6IDVtCiAgcmVwZWF0X2ludGVydmFsOiAxMmgKICByZWNlaXZlcjogJ251bGwnCiAgcm91dGVzOgogIC0gbWF0Y2g6CiAgICAgIGFsZXJ0bmFtZTogRGVhZE1hbnNTd2l0Y2gKICAgIHJlY2VpdmVyOiAnbnVsbCcKcmVjZWl2ZXJzOgotIG5hbWU6ICdudWxsJwo=
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: alertmanager-main
|
||||
namespace: monitoring
|
||||
type: Opaque
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue