1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-21 03:38:43 +00:00

test: Setup jenkins ci

This commit is contained in:
Max Leonard Inden 2017-02-14 17:54:37 -08:00
parent 2033741c93
commit cb58d995e7
No known key found for this signature in database
GPG key ID: 5403C5464810BC26
12 changed files with 306 additions and 4 deletions

60
Jenkinsfile vendored Normal file
View file

@ -0,0 +1,60 @@
job('e2e-tests') {
concurrentBuild()
scm {
git {
remote {
github('coreos/prometheus-operator')
refspec('+refs/pull/*:refs/remotes/origin/pr/*')
}
branch('${sha1}')
}
}
wrappers {
credentialsBinding {
amazonWebServicesCredentialsBinding{
accessKeyVariable('AWS_ACCESS_KEY_ID')
secretKeyVariable('AWS_SECRET_ACCESS_KEY')
credentialsId('Jenkins-Monitoring-AWS-User')
}
usernamePassword('QUAY_ROBOT_USERNAME', 'QUAY_ROBOT_SECRET', 'quay_robot')
}
}
triggers {
githubPullRequest {
useGitHubHooks()
orgWhitelist(['coreos-inc'])
}
}
steps {
shell('docker build -t cluster-setup-env scripts/jenkins/.')
}
steps {
shell('docker run --rm -v /var/jenkins/workspace/e2e-playground:/var/jenkins/workspace/e2e-playground -v /var/run/docker.sock:/var/run/docker.sock cluster-setup-env /bin/bash -c "cd /var/jenkins/workspace/e2e-playground && make crossbuild"')
}
steps {
shell('docker build -t quay.io/coreos/prometheus-operator-dev:$BUILD_ID .')
shell('docker login -u="$QUAY_ROBOT_USERNAME" -p="$QUAY_ROBOT_SECRET" quay.io')
shell('docker push quay.io/coreos/prometheus-operator-dev:$BUILD_ID')
}
steps {
shell('docker run --rm -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -v $PWD:/go/src/github.com/coreos/prometheus-operator cluster-setup-env /bin/bash -c "cd /go/src/github.com/coreos/prometheus-operator/scripts/jenkins && REPO=quay.io/coreos/prometheus-operator-dev TAG=$BUILD_ID make"')
}
publishers {
postBuildScripts {
steps {
shell('docker run --rm -e AWS_ACCESS_KEY_ID -e AWS_SECRET_ACCESS_KEY -v $PWD:/go/src/github.com/coreos/prometheus-operator cluster-setup-env /bin/bash -c "cd /go/src/github.com/coreos/prometheus-operator/scripts/jenkins && make clean"')
shell('docker rmi quay.io/coreos/prometheus-operator-dev:$BUILD_ID')
}
onlyIfBuildSucceeds(false)
onlyIfBuildFails(false)
}
}
}

View file

@ -5,7 +5,7 @@ NAMESPACE?=prometheus-operator-e2e-tests-$(shell LC_CTYPE=C tr -dc a-z0-9 < /dev
PROMU := $(GOPATH)/bin/promu
PREFIX ?= $(shell pwd)
CLUSTER_IP?=$(shell minikube ip)
CLUSTER_IP?=$(shell kubectl config view --minify | grep server: | cut -f 3 -d ":" | tr -d "//")
pkgs = $(shell go list ./... | grep -v /vendor/ | grep -v /test/)
@ -30,7 +30,7 @@ container:
docker build -t $(REPO):$(TAG) .
e2e-test:
go test -timeout 20m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig "$(HOME)/.kube/config" --operator-image=quay.io/coreos/prometheus-operator:$(TAG) --namespace=$(NAMESPACE) --cluster-ip=$(CLUSTER_IP)
go test -timeout 20m -v ./test/e2e/ $(TEST_RUN_ARGS) --kubeconfig "$(HOME)/.kube/config" --operator-image=$(REPO):$(TAG) --namespace=$(NAMESPACE) --cluster-ip=$(CLUSTER_IP)
e2e-status:
kubectl get prometheus,alertmanager,servicemonitor,statefulsets,deploy,svc,endpoints,pods,cm,replicationcontrollers --all-namespaces

View file

@ -0,0 +1,38 @@
FROM golang:1.7.5-wheezy
ENV TERRAFORM_VERSION 0.8.7
ENV KOPS_VERSION 1.5.1
ENV DOCKER_VERSION 1.13.1
RUN echo "deb http://ftp.debian.org/debian wheezy-backports main" >> /etc/apt/sources.list
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
wget \
unzip \
python python-pip jq \
&& rm -rf /var/lib/apt/lists/*
RUN curl https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz | tar -xvz && \
mv docker/docker /usr/local/bin/docker && \
chmod +x /usr/local/bin/docker && \
rm -r docker
RUN wget -q -O /terraform.zip "https://releases.hashicorp.com/terraform/${TERRAFORM_VERSION}/terraform_${TERRAFORM_VERSION}_linux_amd64.zip" && \
unzip /terraform.zip -d /bin
RUN wget -q -O /kops "https://github.com/kubernetes/kops/releases/download/${KOPS_VERSION}/kops-linux-amd64" && \
chmod +x /kops && \
mv /kops /bin
RUN curl "https://s3.amazonaws.com/aws-cli/awscli-bundle.zip" -o "awscli-bundle.zip" && \
unzip awscli-bundle.zip && \
./awscli-bundle/install -i /usr/local/aws -b /bin/aws && \
rm -r awscli-bundle awscli-bundle.zip
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x ./kubectl && \
mv ./kubectl /bin/kubectl
RUN pip install yq

55
scripts/jenkins/Makefile Normal file
View file

@ -0,0 +1,55 @@
CLUSTER_NAME ?= prom-test-$(shell whoami)
DOMAIN ?= dev.coreos.systems
AMOUNT_NODES = $$(($(shell cat manifests/kops/regular-ig.yaml | yq '.spec.minSize')+1))
path ?= clusters/${CLUSTER_NAME}
build_path := $(path)/.build
aws_region = eu-west-1
KOPS_CMD = kops --state $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_state_bucket)
TERRAFORM_FLAGS = -var "dns_domain=$(DOMAIN)" -var "cluster_name=$(CLUSTER_NAME)" -state "$(build_path)/terraform.tfstate"
all: check-deps gen-ssh cluster wait-for-cluster run-e2e-tests
check-deps:
@which aws || echo "AWS cli is missing."
@which kops || echo "Kops is missing."
@which kubectl || echo "Kubectl is missing."
@which terraform || echo "Terraform is missing."
@which jq || echo "jq is missing."
@which yq || echo "yq is missing."
clean: clean-cluster clean-aws-deps
gen-ssh:
ssh-keygen -t rsa -N "" -f /root/.ssh/id_rsa -q
aws-deps:
AWS_REGION=$(aws_region) terraform apply $(TERRAFORM_FLAGS) ./templates
cluster: aws-deps
$(KOPS_CMD) get cluster | grep -v $(CLUSTER_NAME).$(DOMAIN) || \
$(KOPS_CMD) create cluster \
--name $(CLUSTER_NAME).$(DOMAIN) \
--cloud aws --zones $(aws_region)a --kubernetes-version 1.5.2 \
--master-size t2.medium --yes \
--master-security-groups $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_master_security_group) \
--node-security-groups $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_master_security_group) \
--vpc $(shell terraform output -state "$(build_path)/terraform.tfstate" kops_main_vpc)
EDITOR='./ed.sh manifests/kops/regular-ig.yaml' $(KOPS_CMD) edit ig nodes
$(KOPS_CMD) update cluster --yes
run-e2e-tests:
$(MAKE) -C ../../ e2e-test
wait-for-cluster:
timeout 1800 ./wait-for-cluster.sh $(AMOUNT_NODES)
clean-cluster:
$(KOPS_CMD) delete cluster --name $(CLUSTER_NAME).$(DOMAIN) --yes
clean-aws-deps:
AWS_REGION=$(aws_region) terraform destroy -force $(TERRAFORM_FLAGS) ./templates
rm -f $(build_path)/terraform.tfstate*
.PHONY: all check-deps clean gen-ssh aws-deps cluster run-e2e-tests wait-for-cluster clean-cluster clean-aws-deps

14
scripts/jenkins/ed.sh Executable file
View file

@ -0,0 +1,14 @@
#!/bin/bash
# Kops requires user input through an editor to update a ressource. Instead of
# interacting with an editor we give Kops a fake editor via the 'EDITOR' env
# var. This editor always writes the content of file '$1' into file '$2'. In the
# Makefile before calling 'kops edit ig nodes' we set the 'EDITOR' env var to
# this script with the wanted file as the first argument. The second argument
# which is the file that is supposed to be edited by the user is passed in by
# kops later.
WANTED_FILE=$1
TO_EDIT_FILE=$2
cat $WANTED_FILE > $TO_EDIT_FILE

View file

@ -0,0 +1,14 @@
apiVersion: kops/v1alpha2
kind: InstanceGroup
metadata:
name: nodes
spec:
associatePublicIp: true
machineType: t2.medium
maxSize: 2
minSize: 2
nodeLabels:
isolation: none
role: Node
zones:
- eu-west-1a

View file

@ -0,0 +1,56 @@
variable "dns_domain" {}
variable "cluster_name" {}
data "aws_route53_zone" "monitoring_zone" {
name = "${var.dns_domain}"
}
resource "aws_route53_zone" "cluster_zone" {
name = "${var.cluster_name}.${var.dns_domain}"
}
resource "aws_route53_record" "cluster_zone_record" {
name = "${var.cluster_name}.${var.dns_domain}"
zone_id = "${data.aws_route53_zone.monitoring_zone.zone_id}"
type = "NS"
ttl = "300"
records = ["${aws_route53_zone.cluster_zone.name_servers}"]
}
resource "aws_s3_bucket" "kops-state" {
bucket = "kops-${sha1("${var.cluster_name}-${var.dns_domain}")}"
}
resource "aws_security_group" "allow_all" {
name = "allow_all"
description = "Allow all inbound traffic"
vpc_id = "${aws_vpc.main.id}"
ingress {
from_port = 30000
to_port = 32767
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
ingress {
from_port = 80
to_port = 80
protocol = "tcp"
cidr_blocks = ["0.0.0.0/0"]
}
tags {
Name = "allow_all"
}
}
resource "aws_vpc" "main" {
cidr_block = "172.20.0.0/16"
}
resource "aws_internet_gateway" "gw" {
vpc_id = "${aws_vpc.main.id}"
}

View file

@ -0,0 +1,11 @@
output "kops_state_bucket" {
value = "s3://${aws_s3_bucket.kops-state.id}"
}
output "kops_master_security_group" {
value = "${aws_security_group.allow_all.id}"
}
output "kops_main_vpc" {
value = "${aws_vpc.main.id}"
}

View file

@ -0,0 +1,22 @@
#!/bin/bash
set -e
AMOUNT_NODES=$1
# Wait for kubernetes cluster to become available
until kubectl cluster-info
do
sleep 10
done
function getAmountReadyNodes {
kubectl get nodes -ojson | jq '[.items[].status.conditions[] | select( .type=="Ready" and .status=="True")] | length'
}
# Wait for all nodes to become ready
until [[ $(getAmountReadyNodes) == $AMOUNT_NODES ]]
do
echo "Waiting for nodes to become ready: $(getAmountReadyNodes) / $AMOUNT_NODES are ready."
sleep 10
done

View file

@ -178,7 +178,12 @@ func TestExposingAlertmanagerWithIngress(t *testing.T) {
t.Fatal(err)
}
err := framework.WaitForHTTPSuccessStatusCode(time.Second*30, fmt.Sprintf("http://%s/metrics", framework.ClusterIP))
ip, err := framework.GetIngressIP(ingress.Name)
if err != nil {
t.Fatal(err)
}
err = framework.WaitForHTTPSuccessStatusCode(time.Second*30, fmt.Sprintf("http://%s/metrics", *ip))
if err != nil {
t.Fatal(err)
}

View file

@ -21,6 +21,7 @@ import (
"k8s.io/client-go/pkg/apis/extensions/v1beta1"
"k8s.io/client-go/pkg/util/intstr"
"os"
"time"
)
func (f *Framework) MakeBasicIngress(serviceName string, servicePort int) *v1beta1.Ingress {
@ -118,3 +119,24 @@ func (f *Framework) DeleteNginxIngressControllerIncDefaultBackend() error {
return nil
}
func (f *Framework) GetIngressIP(ingressName string) (*string, error) {
var ingress *v1beta1.Ingress
err := f.Poll(time.Minute*5, time.Millisecond*500, func() (bool, error) {
var err error
ingress, err = f.KubeClient.Extensions().Ingresses(f.Namespace.Name).Get(ingressName, metav1.GetOptions{})
if err != nil {
return false, err
}
ingresses := ingress.Status.LoadBalancer.Ingress
if len(ingresses) != 0 {
return true, nil
}
return false, nil
})
if err != nil {
return nil, err
}
return &ingress.Status.LoadBalancer.Ingress[0].IP, nil
}

View file

@ -394,7 +394,12 @@ func TestExposingPrometheusWithIngress(t *testing.T) {
t.Fatal(err)
}
err = framework.WaitForHTTPSuccessStatusCode(time.Second*30, fmt.Sprintf("http://%s:/metrics", framework.ClusterIP))
ip, err := framework.GetIngressIP(ingress.Name)
if err != nil {
t.Fatal(err)
}
err = framework.WaitForHTTPSuccessStatusCode(time.Second*30, fmt.Sprintf("http://%s:/metrics", *ip))
if err != nil {
t.Fatal(err)
}