1
0
Fork 0
mirror of https://github.com/prometheus-operator/prometheus-operator.git synced 2025-04-16 09:16:38 +00:00

Remove Jenkins

This commit is contained in:
Frederic Branczyk 2017-11-06 18:12:55 +01:00
parent 1eb814f635
commit 48f3614fce
No known key found for this signature in database
GPG key ID: 7741A52782A90069
14 changed files with 3 additions and 625 deletions

View file

@ -7,7 +7,7 @@ services:
jobs: jobs:
include: include:
- stage: Check generated contents are up to date and code is formatted. - stage: Check generated contents are up to date and code is formatted.
script: ./scripts/jenkins/check-make-generate.sh script: ./scripts/check-make-generate.sh
- stage: Unit tests - stage: Unit tests
script: make test script: make test
- stage: E2e tests - stage: E2e tests

171
Jenkinsfile vendored
View file

@ -1,171 +0,0 @@
job('po-tests-pr') {
concurrentBuild()
// logRotator(daysToKeep, numberToKeep)
logRotator(30, 10)
parameters {
stringParam('sha1')
}
scm {
git {
remote {
github('coreos/prometheus-operator')
refspec('+refs/pull/*:refs/remotes/origin/pr/*')
}
branch('${sha1}')
}
}
wrappers {
credentialsBinding {
amazonWebServicesCredentialsBinding{
accessKeyVariable('AWS_ACCESS_KEY_ID')
secretKeyVariable('AWS_SECRET_ACCESS_KEY')
credentialsId('Jenkins-Monitoring-AWS-User')
}
usernamePassword('QUAY_ROBOT_USERNAME', 'QUAY_ROBOT_SECRET', 'quay_robot')
}
}
triggers {
githubPullRequest {
useGitHubHooks()
orgWhitelist(['coreos-inc'])
allowMembersOfWhitelistedOrgsAsAdmin()
triggerPhrase('test this please|please test this')
extensions {
commitStatus {
context('prometheus-operator-tests')
triggeredStatus('Tests triggered')
startedStatus('Tests started')
completedStatus('SUCCESS', 'Success')
completedStatus('FAILURE', 'Failure')
completedStatus('PENDING', 'Pending')
completedStatus('ERROR', 'Error')
}
}
}
}
steps {
shell('./scripts/jenkins/check-make-generate.sh')
}
steps {
shell('./scripts/jenkins/make-test.sh')
}
steps {
shell('./scripts/jenkins/run-e2e-tests.sh')
}
publishers {
postBuildScripts {
steps {
shell('./scripts/jenkins/post-e2e-tests.sh')
}
onlyIfBuildSucceeds(false)
onlyIfBuildFails(false)
}
wsCleanup()
}
}
job('po-tests-master') {
concurrentBuild()
// logRotator(daysToKeep, numberToKeep)
logRotator(30, 5)
scm {
git {
remote {
github('coreos/prometheus-operator')
}
branch('master')
}
}
wrappers {
credentialsBinding {
amazonWebServicesCredentialsBinding{
accessKeyVariable('AWS_ACCESS_KEY_ID')
secretKeyVariable('AWS_SECRET_ACCESS_KEY')
credentialsId('Jenkins-Monitoring-AWS-User')
}
usernamePassword('QUAY_ROBOT_USERNAME', 'QUAY_ROBOT_SECRET', 'quay_robot')
}
}
triggers {
githubPush()
gitHubPushTrigger()
cron('@daily')
pollSCM{scmpoll_spec('')}
}
steps {
shell('./scripts/jenkins/check-make-generate.sh')
}
steps {
shell('./scripts/jenkins/make-test.sh')
}
steps {
shell('./scripts/jenkins/run-e2e-tests.sh')
}
publishers {
postBuildScripts {
steps {
shell('./scripts/jenkins/push-to-quay.sh')
}
onlyIfBuildSucceeds(true)
}
postBuildScripts {
steps {
shell('./scripts/jenkins/post-e2e-tests.sh')
}
onlyIfBuildSucceeds(false)
onlyIfBuildFails(false)
}
slackNotifier {
room('#team-monitoring')
teamDomain('coreos')
authTokenCredentialId('team-monitoring-slack-jenkins')
notifyFailure(true)
notifyRegression(true)
notifyRepeatedFailure(true)
}
wsCleanup()
}
}
job('cleanup') {
// logRotator(daysToKeep, numberToKeep)
logRotator(30, 2)
triggers {
cron('@weekly')
}
steps {
shell('docker system prune -a -f')
shell('docker system df')
}
publishers {
slackNotifier {
room('#team-monitoring')
teamDomain('coreos')
authTokenCredentialId('team-monitoring-slack-jenkins')
notifyFailure(true)
notifyRegression(true)
notifyRepeatedFailure(true)
}
wsCleanup()
}
}

View file

@ -75,6 +75,6 @@ jsonnet:
jsonnet -J /ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | json2yaml > contrib/kube-prometheus/manifests/prometheus-operator/prometheus-operator.yaml jsonnet -J /ksonnet-lib hack/generate/prometheus-operator-rbac.jsonnet | json2yaml > contrib/kube-prometheus/manifests/prometheus-operator/prometheus-operator.yaml
jsonnet-docker: jsonnet-docker:
docker build -f scripts/jenkins/jsonnet/Dockerfile -t po-jsonnet . docker build -f scripts/Dockerfile -t po-jsonnet .
.PHONY: all build crossbuild test format check-license container e2e-test e2e-status e2e clean-e2e embedmd apidocgen docs .PHONY: all build crossbuild test format check-license container e2e-test e2e-status e2e clean-e2e embedmd apidocgen docs

View file

@ -1,5 +1,5 @@
# Prometheus Operator # Prometheus Operator
[![Build Status](https://jenkins-monitoring.prod.coreos.systems/buildStatus/icon?job=po-tests-master)](https://jenkins-monitoring.prod.coreos.systems/job/po-tests-master/) [![Build Status](https://travis-ci.org/coreos/prometheus-operator.svg?branch=master)](https://travis-ci.org/coreos/prometheus-operator)
[![Go Report Card](https://goreportcard.com/badge/coreos/prometheus-operator "Go Report Card")](https://goreportcard.com/report/coreos/prometheus-operator) [![Go Report Card](https://goreportcard.com/badge/coreos/prometheus-operator "Go Report Card")](https://goreportcard.com/report/coreos/prometheus-operator)
**Project status: *beta*** Not all planned features are completed. The API, spec, status and other user facing objects may change, but in a backward compatible way. **Project status: *beta*** Not all planned features are completed. The API, spec, status and other user facing objects may change, but in a backward compatible way.

View file

@ -1,8 +0,0 @@
FROM golang:1.8-stretch
ENV DOCKER_VERSION 1.13.1
RUN curl https://get.docker.com/builds/Linux/x86_64/docker-${DOCKER_VERSION}.tgz | tar -xvz && \
mv docker/docker /usr/local/bin/docker && \
chmod +x /usr/local/bin/docker && \
rm -r docker

View file

@ -1,13 +0,0 @@
FROM golang:1.8-stretch
RUN echo "deb http://ftp.debian.org/debian wheezy-backports main" >> /etc/apt/sources.list
RUN apt-get update && apt-get install -y --no-install-recommends \
ca-certificates \
unzip \
python python-pip jq \
&& rm -rf /var/lib/apt/lists/*
RUN curl -LO https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && \
chmod +x ./kubectl && \
mv ./kubectl /bin/kubectl

View file

@ -1,230 +0,0 @@
// The e-mail address used to login as the admin user to the Tectonic Console.
//
// Note: This field MUST be set manually prior to creating the cluster.
tectonic_admin_email = "monitoring@coreos.com"
// The bcrypt hash of admin user password to login to the Tectonic Console.
// Use the bcrypt-hash tool (https://github.com/coreos/bcrypt-tool/releases/tag/v1.0.0) to generate it.
//
// Note: This field MUST be set manually prior to creating the cluster.
tectonic_admin_password = ""
// (optional) Extra AWS tags to be applied to created autoscaling group resources.
// This is a list of maps having the keys `key`, `value` and `propagate_at_launch`.
//
// Example: `[ { key = "foo", value = "bar", propagate_at_launch = true } ]`
// tectonic_autoscaling_group_extra_tags = ""
// Number of Availability Zones your EC2 instances will be deployed across.
// This should be less than or equal to the total number available in the region.
// Be aware that some regions only have 2.
// If set worker and master subnet CIDRs are calculated automatically.
//
// Note:
// This field MUST be set manually prior to creating the cluster.
// It MUST NOT be set if availability zones CIDRs are configured using `tectonic_aws_master_custom_subnets` and `tectonic_aws_worker_custom_subnets`.
tectonic_aws_az_count = "2"
// Instance size for the etcd node(s). Example: `t2.medium`.
tectonic_aws_etcd_ec2_type = "t2.medium"
// The amount of provisioned IOPS for the root block device of etcd nodes.
tectonic_aws_etcd_root_volume_iops = "100"
// The size of the volume in gigabytes for the root block device of etcd nodes.
tectonic_aws_etcd_root_volume_size = "30"
// The type of volume for the root block device of etcd nodes.
tectonic_aws_etcd_root_volume_type = "gp2"
// (optional) List of subnet IDs within an existing VPC to deploy master nodes into.
// Required to use an existing VPC and the list must match the AZ count.
//
// Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
// tectonic_aws_external_master_subnet_ids = ""
// (optional) ID of an existing VPC to launch nodes into.
// If unset a new VPC is created.
//
// Example: `vpc-123456`
// tectonic_aws_external_vpc_id = ""
// If set to true, create public facing ingress resources (ELB, A-records).
// If set to false, a "private" cluster will be created with an internal ELB only.
tectonic_aws_external_vpc_public = true
// (optional) List of subnet IDs within an existing VPC to deploy worker nodes into.
// Required to use an existing VPC and the list must match the AZ count.
//
// Example: `["subnet-111111", "subnet-222222", "subnet-333333"]`
// tectonic_aws_external_worker_subnet_ids = ""
// (optional) Extra AWS tags to be applied to created resources.
// tectonic_aws_extra_tags = ""
// (optional) This configures master availability zones and their corresponding subnet CIDRs directly.
//
// Example:
// `{ eu-west-1a = "10.0.0.0/20", eu-west-1b = "10.0.16.0/20" }`
//
// Note that `tectonic_aws_az_count` must be unset if this is specified.
// tectonic_aws_master_custom_subnets = ""
// Instance size for the master node(s). Example: `t2.medium`.
tectonic_aws_master_ec2_type = "t2.medium"
// The amount of provisioned IOPS for the root block device of master nodes.
tectonic_aws_master_root_volume_iops = "100"
// The size of the volume in gigabytes for the root block device of master nodes.
tectonic_aws_master_root_volume_size = "30"
// The type of volume for the root block device of master nodes.
tectonic_aws_master_root_volume_type = "gp2"
// The target AWS region for the cluster.
tectonic_aws_region = "eu-west-2"
// Name of an SSH key located within the AWS region. Example: coreos-user.
tectonic_aws_ssh_key = "jenkins-tpo-ssh-key"
// Block of IP addresses used by the VPC.
// This should not overlap with any other networks, such as a private datacenter connected via Direct Connect.
tectonic_aws_vpc_cidr_block = "10.0.0.0/16"
// (optional) This configures worker availability zones and their corresponding subnet CIDRs directly.
//
// Example: `{ eu-west-1a = "10.0.64.0/20", eu-west-1b = "10.0.80.0/20" }`
//
// Note that `tectonic_aws_az_count` must be unset if this is specified.
// tectonic_aws_worker_custom_subnets = ""
// Instance size for the worker node(s). Example: `t2.medium`.
tectonic_aws_worker_ec2_type = "t2.medium"
// The amount of provisioned IOPS for the root block device of worker nodes.
tectonic_aws_worker_root_volume_iops = "100"
// The size of the volume in gigabytes for the root block device of worker nodes.
tectonic_aws_worker_root_volume_size = "30"
// The type of volume for the root block device of worker nodes.
tectonic_aws_worker_root_volume_type = "gp2"
// The base DNS domain of the cluster.
//
// Example: `openstack.dev.coreos.systems`.
//
// Note: This field MUST be set manually prior to creating the cluster.
// This applies only to cloud platforms.
tectonic_base_domain = "dev.coreos.systems"
// (optional) The content of the PEM-encoded CA certificate, used to generate Tectonic Console's server certificate.
// If left blank, a CA certificate will be automatically generated.
// tectonic_ca_cert = ""
// (optional) The content of the PEM-encoded CA key, used to generate Tectonic Console's server certificate.
// This field is mandatory if `tectonic_ca_cert` is set.
// tectonic_ca_key = ""
// (optional) The algorithm used to generate tectonic_ca_key.
// The default value is currently recommend.
// This field is mandatory if `tectonic_ca_cert` is set.
// tectonic_ca_key_alg = "RSA"
// The Container Linux update channel.
//
// Examples: `stable`, `beta`, `alpha`
tectonic_cl_channel = "stable"
// This declares the IP range to assign Kubernetes pod IPs in CIDR notation.
tectonic_cluster_cidr = "10.2.0.0/16"
// The name of the cluster.
// If used in a cloud-environment, this will be prepended to `tectonic_base_domain` resulting in the URL to the Tectonic console.
//
// Note: This field MUST be set manually prior to creating the cluster.
// Set via env variable
//tectonic_cluster_name = ""
// (optional) DNS prefix used to construct the console and API server endpoints.
// tectonic_dns_name = ""
// (optional) The path of the file containing the CA certificate for TLS communication with etcd.
//
// Note: This works only when used in conjunction with an external etcd cluster.
// If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_client_cert_path`, and `tectonic_etcd_client_key_path` must also be set.
// tectonic_etcd_ca_cert_path = ""
// (optional) The path of the file containing the client certificate for TLS communication with etcd.
//
// Note: This works only when used in conjunction with an external etcd cluster.
// If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_key_path` must also be set.
// tectonic_etcd_client_cert_path = ""
// (optional) The path of the file containing the client key for TLS communication with etcd.
//
// Note: This works only when used in conjunction with an external etcd cluster.
// If set, the variables `tectonic_etcd_servers`, `tectonic_etcd_ca_cert_path`, and `tectonic_etcd_client_cert_path` must also be set.
// tectonic_etcd_client_key_path = ""
// The number of etcd nodes to be created.
// If set to zero, the count of etcd nodes will be determined automatically.
//
// Note: This is currently only supported on AWS.
tectonic_etcd_count = "0"
// (optional) List of external etcd v3 servers to connect with (hostnames/IPs only).
// Needs to be set if using an external etcd cluster.
//
// Example: `["etcd1", "etcd2", "etcd3"]`
// tectonic_etcd_servers = ""
// If set to true, experimental Tectonic assets are being deployed.
tectonic_experimental = false
// The Kubernetes service IP used to reach kube-apiserver inside the cluster
// as returned by `kubectl -n default get service kubernetes`.
tectonic_kube_apiserver_service_ip = "10.3.0.1"
// The Kubernetes service IP used to reach kube-dns inside the cluster
// as returned by `kubectl -n kube-system get service kube-dns`.
tectonic_kube_dns_service_ip = "10.3.0.10"
// The Kubernetes service IP used to reach self-hosted etcd inside the cluster
// as returned by `kubectl -n kube-system get service etcd-service`.
tectonic_kube_etcd_service_ip = "10.3.0.15"
// The path to the tectonic licence file.
//
// Note: This field MUST be set manually prior to creating the cluster.
tectonic_license_path = "/go/src/github.com/coreos/tectonic-installer/license"
// The number of master nodes to be created.
// This applies only to cloud platforms.
tectonic_master_count = "1"
// The path the pull secret file in JSON format.
//
// Note: This field MUST be set manually prior to creating the cluster.
tectonic_pull_secret_path = "/go/src/github.com/coreos/tectonic-installer/secret"
// This declares the IP range to assign Kubernetes service cluster IPs in CIDR notation.
tectonic_service_cidr = "10.3.0.0/16"
// If set to true, a vanilla Kubernetes cluster will be deployed, omitting any Tectonic assets.
tectonic_vanilla_k8s = true
// The number of worker nodes to be created.
// This applies only to cloud platforms.
tectonic_worker_count = "3"
tectonic_autoscaling_group_extra_tags = [
{ key = "createdBy", value = "team-monitoring@coreos.com", propagate_at_launch = true },
{ key = "expirationDate", value = "2017-01-01", propagate_at_launch = true }
]
tectonic_aws_extra_tags = {
"createdBy"="team-monitoring@coreos.com",
"expirationDate"="2017-01-01"
}

View file

@ -1,15 +0,0 @@
#!/usr/bin/env bash
# exit immediately when a command fails
set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
# error on unset variables
set -u
# print each command before executing it
set -x
docker run \
--rm \
-v $PWD:/go/src/github.com/coreos/prometheus-operator \
-w /go/src/github.com/coreos/prometheus-operator \
golang make test

View file

@ -1,37 +0,0 @@
#!/usr/bin/env bash
# This is a cleanup script, if one command fails we still want all others to run
# set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
# error on unset variables
set -u
# print each command before executing it
set -x
export {TF_GET_OPTIONS,TF_PLAN_OPTIONS,TF_APPLY_OPTIONS,TF_DESTROY_OPTIONS}="-no-color"
CLUSTER="po-$(git rev-parse --short HEAD)-${BUILD_ID}"
TF_VAR_tectonic_cluster_name="${CLUSTER}"
TF_VAR_tectonic_dns_name="${CLUSTER}"
TECTONIC_INSTALLER_DIR=/go/src/github.com/coreos/tectonic-installer
# Destroy cluster
docker run \
--rm \
-v $PWD/build/:$TECTONIC_INSTALLER_DIR/build/ \
-v ~/.ssh:$HOME/.ssh \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e TF_GET_OPTIONS \
-e TF_DESTROY_OPTIONS \
-e CLUSTER=${CLUSTER} \
-w $TECTONIC_INSTALLER_DIR \
-e TF_VAR_tectonic_cluster_name=${TF_VAR_tectonic_cluster_name} \
-e TF_VAR_tectonic_dns_name=${TF_VAR_tectonic_dns_name} \
quay.io/coreos/tectonic-installer:master \
/bin/bash -c "make destroy || make destroy || make destroy"
# Cleanup folders created by docker (root)
sudo rm -rf build .build
docker rmi quay.io/coreos/prometheus-operator-dev:$BUILD_ID

View file

@ -1,38 +0,0 @@
#!/usr/bin/env bash
# exit immediately when a command fails
set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
# error on unset variables
set -u
# print each command before executing it
set -x
PO_QUAY_REPO=quay.io/coreos/prometheus-operator-dev
docker login -u="$QUAY_ROBOT_USERNAME" -p="$QUAY_ROBOT_SECRET" quay.io
docker tag \
$PO_QUAY_REPO:$BUILD_ID \
$PO_QUAY_REPO:master
# Retry pushing docker image multiple times to prevent net/http: TLS handshake
# timeout
retry=0
maxRetries=5
until [ ${retry} -ge ${maxRetries} ]
do
docker push $PO_QUAY_REPO:master && break
retry=$[${retry}+1]
done
docker logout quay.io
if [ ${retry} -ge ${maxRetries} ]; then
echo "Failed to push docker image after ${maxRetries} attempts!"
exit 1
fi

View file

@ -1,88 +0,0 @@
#!/usr/bin/env bash
# exit immediately when a command fails
set -e
# only exit with zero if all commands of the pipeline exit successfully
set -o pipefail
# error on unset variables
set -u
# print each command before executing it
set -x
# Push docker image
DOCKER_SOCKET=/var/run/docker.sock
PO_QUAY_REPO=quay.io/coreos/prometheus-operator-dev
docker build -t docker-golang-env -f scripts/jenkins/docker-golang-env/Dockerfile .
docker run \
--rm \
-v $PWD:$PWD -v $DOCKER_SOCKET:$DOCKER_SOCKET \
docker-golang-env \
/bin/bash -c "cd $PWD && make crossbuild"
docker build -t $PO_QUAY_REPO:$BUILD_ID .
docker login -u="$QUAY_ROBOT_USERNAME" -p="$QUAY_ROBOT_SECRET" quay.io
docker push $PO_QUAY_REPO:$BUILD_ID
# Bring up k8s cluster
export {TF_GET_OPTIONS,TF_PLAN_OPTIONS,TF_APPLY_OPTIONS,TF_DESTROY_OPTIONS}="-no-color"
CLUSTER="po-$(git rev-parse --short HEAD)-${BUILD_ID}"
TF_VAR_tectonic_cluster_name="${CLUSTER}"
TF_VAR_tectonic_dns_name="${CLUSTER}"
TECTONIC_INSTALLER_DIR=/go/src/github.com/coreos/tectonic-installer
PO_DIR=/go/src/github.com/coreos/prometheus-operator
KUBECONFIG="${PO_DIR}/build/${CLUSTER}/generated/auth/kubeconfig"
TECTONIC_INSTALLER="quay.io/coreos/tectonic-installer:master"
mkdir -p build/${CLUSTER}
cp ${WORKSPACE}/scripts/jenkins/kubernetes-vanilla.tfvars build/${CLUSTER}/terraform.tfvars
docker pull $TECTONIC_INSTALLER
docker run \
--rm \
-v $PWD/build/:$TECTONIC_INSTALLER_DIR/build/ \
-v ~/.ssh:$HOME/.ssh \
-e AWS_ACCESS_KEY_ID \
-e AWS_SECRET_ACCESS_KEY \
-e TF_GET_OPTIONS \
-e TF_PLAN_OPTIONS \
-e TF_APPLY_OPTIONS \
-e CLUSTER=${CLUSTER} \
-e TF_VAR_tectonic_cluster_name=${TF_VAR_tectonic_cluster_name} \
-e TF_VAR_tectonic_dns_name=${TF_VAR_tectonic_dns_name} \
-w $TECTONIC_INSTALLER_DIR \
$TECTONIC_INSTALLER \
/bin/bash -c "touch license secret && make plan && make apply"
docker build \
-t kubectl-env \
-f scripts/jenkins/kubectl-env/Dockerfile \
.
sleep 5m
docker run \
--rm \
-v $PWD:$PO_DIR \
-w $PO_DIR \
-e KUBECONFIG=${KUBECONFIG} \
kubectl-env \
/bin/bash -c "timeout 900 ./scripts/jenkins/wait-for-cluster.sh 4"
# Run e2e tests
docker run \
--rm \
-v $PWD:$PO_DIR \
-w $PO_DIR \
-e KUBECONFIG=${KUBECONFIG} \
-e REPO=$PO_QUAY_REPO \
-e TAG=$BUILD_ID \
kubectl-env \
/bin/bash -c "make e2e-test"

View file

@ -1,22 +0,0 @@
#!/bin/bash
set -e
AMOUNT_NODES=$1
# Wait for kubernetes cluster to become available
until kubectl cluster-info
do
sleep 10
done
function getAmountReadyNodes {
kubectl get nodes -ojson | jq '[.items[].status.conditions[] | select( .type=="Ready" and .status=="True")] | length'
}
# Wait for all nodes to become ready
until [[ $(getAmountReadyNodes) == $AMOUNT_NODES ]]
do
echo "Waiting for nodes to become ready: $(getAmountReadyNodes) / $AMOUNT_NODES are ready."
sleep 10
done