mirror of
https://github.com/prometheus-operator/prometheus-operator.git
synced 2025-04-21 03:38:43 +00:00
test: Bring up k8s cluster via tectonic-installer
This commit is contained in:
parent
501f071233
commit
bc2d4a0848
16 changed files with 363 additions and 207 deletions
JenkinsfileMakefile
pkg/k8sutil
scripts/jenkins
DockerfileMakefile
docker-golang-env
ed.shkubectl-env
kubernetes-vanilla.tfvarsmanifests/kops
post-e2e-tests.shrun-e2e-tests.shtemplates
test/e2e/framework
12
Jenkinsfile
vendored
12
Jenkinsfile
vendored
|
@ -66,6 +66,12 @@ job('po-tests-pr') {
|
|||
onlyIfBuildSucceeds(false)
|
||||
onlyIfBuildFails(false)
|
||||
}
|
||||
postBuildScripts {
|
||||
archiveArtifacts('build/**/*')
|
||||
onlyIfBuildSucceeds(false)
|
||||
onlyIfBuildFails(false)
|
||||
}
|
||||
wsCleanup()
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -125,6 +131,11 @@ job('po-tests-master') {
|
|||
onlyIfBuildSucceeds(false)
|
||||
onlyIfBuildFails(false)
|
||||
}
|
||||
postBuildScripts {
|
||||
archiveArtifacts('build/**/*')
|
||||
onlyIfBuildSucceeds(false)
|
||||
onlyIfBuildFails(false)
|
||||
}
|
||||
slackNotifier {
|
||||
room('#team-monitoring')
|
||||
teamDomain('coreos')
|
||||
|
@ -133,5 +144,6 @@ job('po-tests-master') {
|
|||
notifyRegression(true)
|
||||
notifyRepeatedFailure(true)
|
||||
}
|
||||
wsCleanup()
|
||||
}
|
||||
}
|
||||
|
|
5
Makefile
5
Makefile
|
@ -1,11 +1,12 @@
|
|||
REPO?=quay.io/coreos/prometheus-operator
|
||||
TAG?=$(shell git rev-parse --short HEAD)
|
||||
NAMESPACE?=po-e2e-$(shell LC_CTYPE=C tr -dc a-z0-9 < /dev/urandom | head -c 13 ; echo '')
|
||||
KUBECONFIG?=$(HOME)/.kube/config
|
||||
|
||||
PROMU := $(GOPATH)/bin/promu
|
||||
PREFIX ?= $(shell pwd)
|
||||
|
||||
CLUSTER_IP?=$(shell kubectl config view --minify | grep server: | cut -f 3 -d ":" | tr -d "//")
|
||||
CLUSTER_IP?=$(kubectl config view --minify | grep server: | cut -f 3 -d ":" | tr -d "//")
|
||||
|
||||
pkgs = $(shell go list ./... | grep -v /vendor/ | grep -v /test/)
|
||||
|
||||
|
@ -30,7 +31,7 @@ container:
|
|||
docker build -t $(REPO):$(TAG) .
|
||||
|
||||
e2e-test:
|
||||
go test -timeout 20m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig "$(HOME)/.kube/config" --operator-image=$(REPO):$(TAG) --namespace=$(NAMESPACE) --cluster-ip=$(CLUSTER_IP)
|
||||
go test -timeout 20m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig=$(KUBECONFIG) --operator-image=$(REPO):$(TAG) --namespace=$(NAMESPACE) --cluster-ip=$(CLUSTER_IP)
|
||||
|
||||
e2e-status:
|
||||
kubectl get prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods,cm,secrets,replicationcontrollers --all-namespaces
|
||||
|
|
|
@ -32,7 +32,7 @@ import (
|
|||
// WaitForTPRReady waits for a third party resource to be available
|
||||
// for use.
|
||||
func WaitForTPRReady(restClient rest.Interface, tprGroup, tprVersion, tprName string) error {
|
||||
err := wait.Poll(3*time.Second, 30*time.Second, func() (bool, error) {
|
||||
err := wait.Poll(3*time.Second, 5*time.Minute, func() (bool, error) {
|
||||
res := restClient.Get().AbsPath("apis", tprGroup, tprVersion, tprName).Do()
|
||||
err := res.Error()
|
||||
if err != nil {
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
FROM golang:1.8.1-stretch
|
||||
|
||||
ENV TERRAFORM_VERSION 0.8.7
|
||||
ENV KOPS_VERSION 1.5.1
|
||||
ENV DOCKER_VERSION 1.13.1
|
||||
|
||||
RUN echo "deb http://ftp.debian.org/debian wheezy-backports main" >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
wget \
|
||||
unzip \
|
||||
python python-pip jq python-setuptools \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz | tar -xvz && \
|
||||
mv docker/docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm -r docker
|
||||
|
||||
RUN wget -q -O /terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" && \
|
||||
unzip /terraform.zip -d /bin
|
||||
|
||||
RUN wget -q -O /kops "https://github.com/kubernetes/kops/releases/download/${KOPS_VERSION}/kops-linux-amd64" && \
|
||||
chmod +x /kops && \
|
||||
mv /kops /bin
|
||||
|
||||
RUN curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "awscli-bundle.zip" && \
|
||||
unzip awscli-bundle.zip && \
|
||||
./awscli-bundle/install -i /usr/local/aws -b /bin/aws && \
|
||||
rm -r awscli-bundle awscli-bundle.zip
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
|
||||
chmod +x ./kubectl && \
|
||||
mv ./kubectl /bin/kubectl
|
||||
|
||||
RUN pip install yq
|
||||
|
|
@ -1,55 +0,0 @@
|
|||
CLUSTER_NAME ?= prom-test-$(shell whoami)
|
||||
DOMAIN ?= dev.coreos.systems
|
||||
AMOUNT_NODES = $$(($(shell cat manifests/kops/regular-ig.yaml | yq '.spec.minSize')+1))
|
||||
|
||||
path ?= clusters/${CLUSTER_NAME}
|
||||
build_path := $(path)/.build
|
||||
aws_region = eu-west-1
|
||||
|
||||
KOPS_CMD = kops --state $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_state_bucket)
|
||||
TERRAFORM_FLAGS = -var "dns_domain=$(DOMAIN)" -var "cluster_name=$(CLUSTER_NAME)" -state "$(build_path)/terraform.tfstate"
|
||||
|
||||
all: check-deps gen-ssh cluster wait-for-cluster run-e2e-tests
|
||||
|
||||
check-deps:
|
||||
@which aws || echo "AWS cli is missing."
|
||||
@which kops || echo "Kops is missing."
|
||||
@which kubectl || echo "Kubectl is missing."
|
||||
@which terraform || echo "Terraform is missing."
|
||||
@which jq || echo "jq is missing."
|
||||
@which yq || echo "yq is missing."
|
||||
|
||||
clean: clean-cluster clean-aws-deps
|
||||
|
||||
gen-ssh:
|
||||
ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa -q
|
||||
|
||||
aws-deps:
|
||||
AWS_REGION=$(aws_region) terraform apply $(TERRAFORM_FLAGS) ./templates
|
||||
|
||||
cluster: aws-deps
|
||||
$(KOPS_CMD) get cluster | grep -v $(CLUSTER_NAME).$(DOMAIN) || \
|
||||
$(KOPS_CMD) create cluster \
|
||||
--name $(CLUSTER_NAME).$(DOMAIN) \
|
||||
--cloud aws --zones $(aws_region)a --kubernetes-version 1.5.2 \
|
||||
--master-size t2.medium --yes \
|
||||
--master-security-groups $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_master_security_group) \
|
||||
--node-security-groups $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_master_security_group) \
|
||||
--vpc $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_main_vpc)
|
||||
EDITOR='./ed.sh manifests/kops/regular-ig.yaml' $(KOPS_CMD) edit ig nodes
|
||||
$(KOPS_CMD) update cluster --yes
|
||||
|
||||
run-e2e-tests:
|
||||
$(MAKE) -C ../../ e2e-test
|
||||
|
||||
wait-for-cluster:
|
||||
timeout 1800 ./wait-for-cluster.sh $(AMOUNT_NODES)
|
||||
|
||||
clean-cluster:
|
||||
$(KOPS_CMD) delete cluster --name $(CLUSTER_NAME).$(DOMAIN) --yes
|
||||
|
||||
clean-aws-deps:
|
||||
AWS_REGION=$(aws_region) terraform destroy -force $(TERRAFORM_FLAGS) ./templates
|
||||
rm -f $(build_path)/terraform.tfstate*
|
||||
|
||||
.PHONY: all check-deps clean gen-ssh aws-deps cluster run-e2e-tests wait-for-cluster clean-cluster clean-aws-deps
|
8
scripts/jenkins/docker-golang-env/Dockerfile
Normal file
8
scripts/jenkins/docker-golang-env/Dockerfile
Normal file
|
@ -0,0 +1,8 @@
|
|||
FROM golang:1.8-stretch
|
||||
|
||||
ENV DOCKER_VERSION 1.13.1
|
||||
|
||||
RUN curl https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz | tar -xvz && \
|
||||
mv docker/docker /usr/local/bin/docker && \
|
||||
chmod +x /usr/local/bin/docker && \
|
||||
rm -r docker
|
|
@ -1,14 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Kops requires user input through an editor to update a ressource. Instead of
|
||||
# interacting with an editor we give Kops a fake editor via the 'EDITOR' env
|
||||
# var. This editor always writes the content of file '$1' into file '$2'. In the
|
||||
# Makefile before calling 'kops edit ig nodes' we set the 'EDITOR' env var to
|
||||
# this script with the wanted file as the first argument. The second argument
|
||||
# which is the file that is supposed to be edited by the user is passed in by
|
||||
# kops later.
|
||||
|
||||
WANTED_FILE=$1
|
||||
TO_EDIT_FILE=$2
|
||||
|
||||
cat $WANTED_FILE > $TO_EDIT_FILE
|
13
scripts/jenkins/kubectl-env/Dockerfile
Normal file
13
scripts/jenkins/kubectl-env/Dockerfile
Normal file
|
@ -0,0 +1,13 @@
|
|||
FROM golang:1.8-stretch
|
||||
|
||||
RUN echo "deb http://ftp.debian.org/debian wheezy-backports main" >> /etc/apt/sources.list
|
||||
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
unzip \
|
||||
python python-pip jq \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
|
||||
chmod +x ./kubectl && \
|
||||
mv ./kubectl /bin/kubectl
|
230
scripts/jenkins/kubernetes-vanilla.tfvars
Normal file
230
scripts/jenkins/kubernetes-vanilla.tfvars
Normal file
|
@ -0,0 +1,230 @@
|
|||
// The e-mail address used to login as the admin user to the Tectonic Console.
|
||||
//
|
||||
// Note: This field MUST be set manually prior to creating the cluster.
|
||||
tectonic_admin_email = "monitoring@coreos.com"
|
||||
|
||||
// The bcrypt hash of admin user password to login to the Tectonic Console.
|
||||
// Use the bcrypt-hash tool (https://github.com/coreos/bcrypt-tool/releases/tag/v1.0.0) to generate it.
|
||||
//
|
||||
// Note: This field MUST be set manually prior to creating the cluster.
|
||||
tectonic_admin_password_hash = ""
|
||||
|
||||
// (optional) Extra AWS tags to be applied to created autoscaling group resources.
|
||||
// This is a list of maps having the keys `key`, `value` and `propagate_at_launch`.
|
||||
//
|
||||
// Example: `[ { key = "foo", value = "bar", propagate_at_launch = true } ]`
|
||||
// tectonic_autoscaling_group_extra_tags = ""
|
||||
|
||||
// Number of Availability Zones your EC2 instances will be deployed across.
|
||||
// This should be less than or equal to the total number available in the region.
|
||||
// Be aware that some regions only have 2.
|
||||
// If set worker and master subnet CIDRs are calculated automatically.
|
||||
//
|
||||
// Note:
|
||||
// This field MUST be set manually prior to creating the cluster.
|
||||
// It MUST NOT be set if availability zones CIDRs are configured using `tectonic_aws_master_custom_subnets` and `tectonic_aws_worker_custom_subnets`.
|
||||
tectonic_aws_az_count = "2"
|
||||
|
||||
// Instance size for the etcd node(s). Example: `t2.medium`.
|
||||
tectonic_aws_etcd_ec2_type = "t2.medium"
|
||||
|
||||
// The amount of provisioned IOPS for the root block device of etcd nodes.
|
||||
tectonic_aws_etcd_root_volume_iops = "100"
|
||||
|
||||
// The size of the volume in gigabytes for the root block device of etcd nodes.
|
||||
tectonic_aws_etcd_root_volume_size = "30"
|
||||
|
||||
// The type of volume for the root block device of etcd nodes.
|
||||
tectonic_aws_etcd_root_volume_type = "gp2"
|
||||
|
||||
// (optional) List of subnet IDs within an existing VPC to deploy master nodes into.
|
||||
// Required to use an existing VPC and the list must match the AZ count.
|
||||
//
|
||||
// Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
|
||||
// tectonic_aws_external_master_subnet_ids = ""
|
||||
|
||||
// (optional) ID of an existing VPC to launch nodes into.
|
||||
// If unset a new VPC is created.
|
||||
//
|
||||
// Example: `vpc-123456`
|
||||
// tectonic_aws_external_vpc_id = ""
|
||||
|
||||
// If set to true, create public facing ingress resources (ELB, A-records).
|
||||
// If set to false, a "private" cluster will be created with an internal ELB only.
|
||||
tectonic_aws_external_vpc_public = true
|
||||
|
||||
// (optional) List of subnet IDs within an existing VPC to deploy worker nodes into.
|
||||
// Required to use an existing VPC and the list must match the AZ count.
|
||||
//
|
||||
// Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
|
||||
// tectonic_aws_external_worker_subnet_ids = ""
|
||||
|
||||
// (optional) Extra AWS tags to be applied to created resources.
|
||||
// tectonic_aws_extra_tags = ""
|
||||
|
||||
// (optional) This configures master availability zones and their corresponding subnet CIDRs directly.
|
||||
//
|
||||
// Example:
|
||||
// `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }`
|
||||
//
|
||||
// Note that `tectonic_aws_az_count` must be unset if this is specified.
|
||||
// tectonic_aws_master_custom_subnets = ""
|
||||
|
||||
// Instance size for the master node(s). Example: `t2.medium`.
|
||||
tectonic_aws_master_ec2_type = "t2.medium"
|
||||
|
||||
// The amount of provisioned IOPS for the root block device of master nodes.
|
||||
tectonic_aws_master_root_volume_iops = "100"
|
||||
|
||||
// The size of the volume in gigabytes for the root block device of master nodes.
|
||||
tectonic_aws_master_root_volume_size = "30"
|
||||
|
||||
// The type of volume for the root block device of master nodes.
|
||||
tectonic_aws_master_root_volume_type = "gp2"
|
||||
|
||||
// The target AWS region for the cluster.
|
||||
tectonic_aws_region = "eu-west-2"
|
||||
|
||||
// Name of an SSH key located within the AWS region. Example: coreos-user.
|
||||
tectonic_aws_ssh_key = "jenkins-tpo-ssh-key"
|
||||
|
||||
// Block of IP addresses used by the VPC.
|
||||
// This should not overlap with any other networks, such as a private datacenter connected via Direct Connect.
|
||||
tectonic_aws_vpc_cidr_block = "10.0.0.0/16"
|
||||
|
||||
// (optional) This configures worker availability zones and their corresponding subnet CIDRs directly.
|
||||
//
|
||||
// Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }`
|
||||
//
|
||||
// Note that `tectonic_aws_az_count` must be unset if this is specified.
|
||||
// tectonic_aws_worker_custom_subnets = ""
|
||||
|
||||
// Instance size for the worker node(s). Example: `t2.medium`.
|
||||
tectonic_aws_worker_ec2_type = "t2.medium"
|
||||
|
||||
// The amount of provisioned IOPS for the root block device of worker nodes.
|
||||
tectonic_aws_worker_root_volume_iops = "100"
|
||||
|
||||
// The size of the volume in gigabytes for the root block device of worker nodes.
|
||||
tectonic_aws_worker_root_volume_size = "30"
|
||||
|
||||
// The type of volume for the root block device of worker nodes.
|
||||
tectonic_aws_worker_root_volume_type = "gp2"
|
||||
|
||||
// The base DNS domain of the cluster.
|
||||
//
|
||||
// Example: `openstack.dev.coreos.systems`.
|
||||
//
|
||||
// Note: This field MUST be set manually prior to creating the cluster.
|
||||
// This applies only to cloud platforms.
|
||||
tectonic_base_domain = "dev.coreos.systems"
|
||||
|
||||
// (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate.
|
||||
// If left blank, a CA certificate will be automatically generated.
|
||||
// tectonic_ca_cert = ""
|
||||
|
||||
// (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate.
|
||||
// This field is mandatory if `tectonic_ca_cert` is set.
|
||||
// tectonic_ca_key = ""
|
||||
|
||||
// (optional) The algorithm used to generate tectonic_ca_key.
|
||||
// The default value is currently recommend.
|
||||
// This field is mandatory if `tectonic_ca_cert` is set.
|
||||
// tectonic_ca_key_alg = "RSA"
|
||||
|
||||
// The Container Linux update channel.
|
||||
//
|
||||
// Examples: `stable`, `beta`, `alpha`
|
||||
tectonic_cl_channel = "stable"
|
||||
|
||||
// This declares the IP range to assign Kubernetes pod IPs in CIDR notation.
|
||||
tectonic_cluster_cidr = "10.2.0.0/16"
|
||||
|
||||
// The name of the cluster.
|
||||
// If used in a cloud-environment, this will be prepended to `tectonic_base_domain` resulting in the URL to the Tectonic console.
|
||||
//
|
||||
// Note: This field MUST be set manually prior to creating the cluster.
|
||||
// Set via env variable
|
||||
//tectonic_cluster_name = ""
|
||||
|
||||
// (optional) DNS prefix used to construct the console and API server endpoints.
|
||||
// tectonic_dns_name = ""
|
||||
|
||||
// (optional) The path of the file containing the CA certificate for TLS communication with etcd.
|
||||
//
|
||||
// Note: This works only when used in conjunction with an external etcd cluster.
|
||||
// If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_client_cert_path`, and `tectonic_etcd_client_key_path` must also be set.
|
||||
// tectonic_etcd_ca_cert_path = ""
|
||||
|
||||
// (optional) The path of the file containing the client certificate for TLS communication with etcd.
|
||||
//
|
||||
// Note: This works only when used in conjunction with an external etcd cluster.
|
||||
// If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_key_path` must also be set.
|
||||
// tectonic_etcd_client_cert_path = ""
|
||||
|
||||
// (optional) The path of the file containing the client key for TLS communication with etcd.
|
||||
//
|
||||
// Note: This works only when used in conjunction with an external etcd cluster.
|
||||
// If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_cert_path` must also be set.
|
||||
// tectonic_etcd_client_key_path = ""
|
||||
|
||||
// The number of etcd nodes to be created.
|
||||
// If set to zero, the count of etcd nodes will be determined automatically.
|
||||
//
|
||||
// Note: This is currently only supported on AWS.
|
||||
tectonic_etcd_count = "0"
|
||||
|
||||
// (optional) List of external etcd v3 servers to connect with (hostnames/IPs only).
|
||||
// Needs to be set if using an external etcd cluster.
|
||||
//
|
||||
// Example: `["etcd1", "etcd2", "etcd3"]`
|
||||
// tectonic_etcd_servers = ""
|
||||
|
||||
// If set to true, experimental Tectonic assets are being deployed.
|
||||
tectonic_experimental = false
|
||||
|
||||
// The Kubernetes service IP used to reach kube-apiserver inside the cluster
|
||||
// as returned by `kubectl -n default get service kubernetes`.
|
||||
tectonic_kube_apiserver_service_ip = "10.3.0.1"
|
||||
|
||||
// The Kubernetes service IP used to reach kube-dns inside the cluster
|
||||
// as returned by `kubectl -n kube-system get service kube-dns`.
|
||||
tectonic_kube_dns_service_ip = "10.3.0.10"
|
||||
|
||||
// The Kubernetes service IP used to reach self-hosted etcd inside the cluster
|
||||
// as returned by `kubectl -n kube-system get service etcd-service`.
|
||||
tectonic_kube_etcd_service_ip = "10.3.0.15"
|
||||
|
||||
// The path to the tectonic licence file.
|
||||
//
|
||||
// Note: This field MUST be set manually prior to creating the cluster.
|
||||
tectonic_license_path = "/go/src/github.com/coreos/tectonic-installer/license"
|
||||
|
||||
// The number of master nodes to be created.
|
||||
// This applies only to cloud platforms.
|
||||
tectonic_master_count = "1"
|
||||
|
||||
// The path the pull secret file in JSON format.
|
||||
//
|
||||
// Note: This field MUST be set manually prior to creating the cluster.
|
||||
tectonic_pull_secret_path = "/go/src/github.com/coreos/tectonic-installer/secret"
|
||||
|
||||
// This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation.
|
||||
tectonic_service_cidr = "10.3.0.0/16"
|
||||
|
||||
// If set to true, a vanilla Kubernetes cluster will be deployed, omitting any Tectonic assets.
|
||||
tectonic_vanilla_k8s = true
|
||||
|
||||
// The number of worker nodes to be created.
|
||||
// This applies only to cloud platforms.
|
||||
tectonic_worker_count = "3"
|
||||
|
||||
tectonic_autoscaling_group_extra_tags = [
|
||||
{ key = "createdBy", value = "team-monitoring@coreos.com", propagate_at_launch = true },
|
||||
{ key = "expirationDate", value = "2017-01-01", propagate_at_launch = true }
|
||||
]
|
||||
|
||||
tectonic_aws_extra_tags = {
|
||||
"createdBy"="team-monitoring@coreos.com",
|
||||
"expirationDate"="2017-01-01"
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
apiVersion: kops/v1alpha2
|
||||
kind: InstanceGroup
|
||||
metadata:
|
||||
name: nodes
|
||||
spec:
|
||||
associatePublicIp: true
|
||||
machineType: t2.medium
|
||||
maxSize: 2
|
||||
minSize: 2
|
||||
nodeLabels:
|
||||
isolation: none
|
||||
role: Node
|
||||
zones:
|
||||
- eu-west-1a
|
|
@ -8,14 +8,26 @@ set -u
|
|||
# print each command before executing it
|
||||
set -x
|
||||
|
||||
PO_GOPATH=/go/src/github.com/coreos/prometheus-operator
|
||||
export {TF_GET_OPTIONS,TF_PLAN_OPTIONS,TF_APPLY_OPTIONS,TF_DESTROY_OPTIONS}="-no-color"
|
||||
|
||||
CLUSTER="po-$(git rev-parse --short HEAD)-${BUILD_ID}"
|
||||
TF_VAR_tectonic_cluster_name="${CLUSTER}"
|
||||
TF_VAR_tectonic_dns_name="${CLUSTER}"
|
||||
TECTONIC_INSTALLER_DIR=/go/src/github.com/coreos/tectonic-installer
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
-v $PWD:$PO_GOPATH \
|
||||
-w $PO_GOPATH/scripts/jenkins \
|
||||
cluster-setup-env \
|
||||
/bin/bash -c "make clean"
|
||||
-v $PWD/build/:$TECTONIC_INSTALLER_DIR/build/ \
|
||||
-v ~/.ssh:$HOME/.ssh \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e TF_GET_OPTIONS \
|
||||
-e TF_DESTROY_OPTIONS \
|
||||
-e CLUSTER=${CLUSTER} \
|
||||
-w $TECTONIC_INSTALLER_DIR \
|
||||
-e TF_VAR_tectonic_cluster_name=${TF_VAR_tectonic_cluster_name} \
|
||||
-e TF_VAR_tectonic_dns_name=${TF_VAR_tectonic_dns_name} \
|
||||
quay.io/coreos/tectonic-installer:master \
|
||||
/bin/bash -c "make destroy || make destroy || make destroy"
|
||||
|
||||
docker rmi quay.io/coreos/prometheus-operator-dev:$BUILD_ID
|
||||
|
|
|
@ -8,25 +8,79 @@ set -u
|
|||
# print each command before executing it
|
||||
set -x
|
||||
|
||||
|
||||
# Push docker image
|
||||
|
||||
DOCKER_SOCKET=/var/run/docker.sock
|
||||
PO_QUAY_REPO=quay.io/coreos/prometheus-operator-dev
|
||||
|
||||
docker build -t cluster-setup-env scripts/jenkins/.
|
||||
docker build -t docker-golang-env -f scripts/jenkins/docker-golang-env/Dockerfile .
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
-v $PWD:$PWD -v $DOCKER_SOCKET:$DOCKER_SOCKET \
|
||||
cluster-setup-env \
|
||||
docker-golang-env \
|
||||
/bin/bash -c "cd $PWD && make crossbuild"
|
||||
|
||||
docker build -t $PO_QUAY_REPO:$BUILD_ID .
|
||||
docker login -u="$QUAY_ROBOT_USERNAME" -p="$QUAY_ROBOT_SECRET" quay.io
|
||||
docker push $PO_QUAY_REPO:$BUILD_ID
|
||||
|
||||
|
||||
# Bring up k8s cluster
|
||||
|
||||
export {TF_GET_OPTIONS,TF_PLAN_OPTIONS,TF_APPLY_OPTIONS,TF_DESTROY_OPTIONS}="-no-color"
|
||||
|
||||
CLUSTER="po-$(git rev-parse --short HEAD)-${BUILD_ID}"
|
||||
TF_VAR_tectonic_cluster_name="${CLUSTER}"
|
||||
TF_VAR_tectonic_dns_name="${CLUSTER}"
|
||||
TECTONIC_INSTALLER_DIR=/go/src/github.com/coreos/tectonic-installer
|
||||
PO_DIR=/go/src/github.com/coreos/prometheus-operator
|
||||
KUBECONFIG="${PO_DIR}/build/${CLUSTER}/generated/auth/kubeconfig"
|
||||
|
||||
mkdir -p build/${CLUSTER}
|
||||
cp ${WORKSPACE}/scripts/jenkins/kubernetes-vanilla.tfvars build/${CLUSTER}/terraform.tfvars
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
-e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY \
|
||||
-e REPO=$PO_QUAY_REPO -e TAG=$BUILD_ID \
|
||||
-v $PWD:/go/src/github.com/coreos/prometheus-operator \
|
||||
-w /go/src/github.com/coreos/prometheus-operator/scripts/jenkins \
|
||||
cluster-setup-env \
|
||||
/bin/bash -c "make"
|
||||
-v $PWD/build/:$TECTONIC_INSTALLER_DIR/build/ \
|
||||
-v ~/.ssh:$HOME/.ssh \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e TF_GET_OPTIONS \
|
||||
-e TF_PLAN_OPTIONS \
|
||||
-e TF_APPLY_OPTIONS \
|
||||
-e CLUSTER=${CLUSTER} \
|
||||
-e TF_VAR_tectonic_cluster_name=${TF_VAR_tectonic_cluster_name} \
|
||||
-e TF_VAR_tectonic_dns_name=${TF_VAR_tectonic_dns_name} \
|
||||
-w $TECTONIC_INSTALLER_DIR \
|
||||
quay.io/coreos/tectonic-installer:master \
|
||||
/bin/bash -c "touch license secret && make plan && make apply"
|
||||
|
||||
docker build \
|
||||
-t kubectl-env \
|
||||
-f scripts/jenkins/kubectl-env/Dockerfile \
|
||||
.
|
||||
|
||||
sleep 5m
|
||||
docker run \
|
||||
--rm \
|
||||
-v $PWD:$PO_DIR \
|
||||
-w $PO_DIR \
|
||||
-e KUBECONFIG=${KUBECONFIG} \
|
||||
kubectl-env \
|
||||
/bin/bash -c "timeout 900 ./scripts/jenkins/wait-for-cluster.sh 4"
|
||||
|
||||
|
||||
# Run e2e tests
|
||||
|
||||
docker run \
|
||||
--rm \
|
||||
-v $PWD:$PO_DIR \
|
||||
-w $PO_DIR \
|
||||
-e KUBECONFIG=${KUBECONFIG} \
|
||||
-e REPO=$PO_QUAY_REPO \
|
||||
-e TAG=$BUILD_ID \
|
||||
kubectl-env \
|
||||
/bin/bash -c "make e2e-test"
|
||||
|
||||
|
|
|
@ -1,56 +0,0 @@
|
|||
variable "dns_domain" {}
|
||||
|
||||
variable "cluster_name" {}
|
||||
|
||||
data "aws_route53_zone" "monitoring_zone" {
|
||||
name = "${var.dns_domain}"
|
||||
}
|
||||
|
||||
resource "aws_route53_zone" "cluster_zone" {
|
||||
name = "${var.cluster_name}.${var.dns_domain}"
|
||||
}
|
||||
|
||||
resource "aws_route53_record" "cluster_zone_record" {
|
||||
name = "${var.cluster_name}.${var.dns_domain}"
|
||||
zone_id = "${data.aws_route53_zone.monitoring_zone.zone_id}"
|
||||
type = "NS"
|
||||
ttl = "300"
|
||||
records = ["${aws_route53_zone.cluster_zone.name_servers}"]
|
||||
}
|
||||
|
||||
resource "aws_s3_bucket" "kops-state" {
|
||||
bucket = "kops-${sha1("${var.cluster_name}-${var.dns_domain}")}"
|
||||
}
|
||||
|
||||
resource "aws_security_group" "allow_all" {
|
||||
name = "allow_all"
|
||||
description = "Allow all inbound traffic"
|
||||
vpc_id = "${aws_vpc.main.id}"
|
||||
|
||||
ingress {
|
||||
from_port = 30000
|
||||
to_port = 32767
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
ingress {
|
||||
from_port = 80
|
||||
to_port = 80
|
||||
protocol = "tcp"
|
||||
cidr_blocks = ["0.0.0.0/0"]
|
||||
}
|
||||
|
||||
tags {
|
||||
Name = "allow_all"
|
||||
}
|
||||
}
|
||||
|
||||
resource "aws_vpc" "main" {
|
||||
cidr_block = "172.20.0.0/16"
|
||||
}
|
||||
|
||||
resource "aws_internet_gateway" "gw" {
|
||||
vpc_id = "${aws_vpc.main.id}"
|
||||
}
|
||||
|
|
@ -1,11 +0,0 @@
|
|||
output "kops_state_bucket" {
|
||||
value = "s3://${aws_s3_bucket.kops-state.id}"
|
||||
}
|
||||
|
||||
output "kops_master_security_group" {
|
||||
value = "${aws_security_group.allow_all.id}"
|
||||
}
|
||||
|
||||
output "kops_main_vpc" {
|
||||
value = "${aws_vpc.main.id}"
|
||||
}
|
|
@ -21,12 +21,14 @@ import (
|
|||
rbacv1alpha1 "k8s.io/client-go/pkg/apis/rbac/v1alpha1"
|
||||
)
|
||||
|
||||
func CreateClusterRoleBinding(kubeClient kubernetes.Interface, relativePath string) error {
|
||||
func CreateClusterRoleBinding(kubeClient kubernetes.Interface, ns string, relativePath string) error {
|
||||
clusterRoleBinding, err := parseClusterRoleBindingYaml(relativePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
clusterRoleBinding.Subjects[0].Namespace = ns
|
||||
|
||||
_, err = kubeClient.RbacV1alpha1().ClusterRoleBindings().Get(clusterRoleBinding.Name, metav1.GetOptions{})
|
||||
|
||||
if err == nil {
|
||||
|
|
|
@ -94,7 +94,19 @@ func (f *Framework) setup(opImage string) error {
|
|||
}
|
||||
|
||||
func (f *Framework) setupPrometheusOperator(opImage string) error {
|
||||
deploy, err := MakeDeployment("../../example/non-rbac/prometheus-operator.yaml")
|
||||
if err := CreateServiceAccount(f.KubeClient, f.Namespace.Name, "../../example/rbac/prometheus-operator/prometheus-operator-service-account.yaml"); err != nil {
|
||||
return errors.Wrap(err, "failed to create prometheus operator service account")
|
||||
}
|
||||
|
||||
if err := CreateClusterRole(f.KubeClient, "../../example/rbac/prometheus-operator/prometheus-operator-cluster-role.yaml"); err != nil {
|
||||
return errors.Wrap(err, "failed to create prometheus operator cluster role")
|
||||
}
|
||||
|
||||
if err := CreateClusterRoleBinding(f.KubeClient, f.Namespace.Name, "../../example/rbac/prometheus-operator/prometheus-operator-cluster-role-binding.yaml"); err != nil {
|
||||
return errors.Wrap(err, "failed to create prometheus operator cluster role binding")
|
||||
}
|
||||
|
||||
deploy, err := MakeDeployment("../../example/rbac/prometheus-operator/prometheus-operator.yaml")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue