gitlab-org--gitlab-foss/vendor/gitlab-ci-yml/Auto-DevOps.gitlab-ci.yml

573 lines
17 KiB
YAML
Raw Normal View History

2017-09-07 05:25:06 -04:00
# Auto DevOps
# This CI/CD configuration provides a standard pipeline for
# * building a Docker image (using a buildpack if necessary),
# * storing the image in the container registry,
# * running tests from a buildpack,
# * running code quality analysis,
# * creating a review app for each topic branch,
# * and continuous deployment to production
#
# In order to deploy, you must have a Kubernetes cluster configured either
# via a project integration, or via group/project variables.
# AUTO_DEVOPS_DOMAIN must also be set as a variable at the group or project
# level, or manually added below.
#
# If you want to deploy to staging first, or enable canary deploys,
# uncomment the relevant jobs in the pipeline below.
#
# If Auto DevOps fails to detect the proper buildpack, or if you want to
# specify a custom buildpack, set a project variable `BUILDPACK_URL` to the
# repository URL of the buildpack.
# e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142
# If you need multiple buildpacks, add a file to your project called
# `.buildpacks` that contains the URLs, one on each line, in order.
# Note: Auto CI does not work with multiple buildpacks yet
image: alpine:latest
variables:
# AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level.
# AUTO_DEVOPS_DOMAIN: domain.example.com
POSTGRES_USER: user
POSTGRES_PASSWORD: testing-password
POSTGRES_ENABLED: "true"
POSTGRES_DB: $CI_ENVIRONMENT_SLUG
KUBERNETES_VERSION: 1.8.6
HELM_VERSION: 2.6.1
CODECLIMATE_VERSION: 0.69.0
2017-09-07 05:25:06 -04:00
stages:
- build
- test
- review
2018-01-19 03:41:26 -05:00
- dast
2017-09-07 05:25:06 -04:00
- staging
- canary
- production
2017-12-20 08:03:28 -05:00
- performance
2017-09-07 05:25:06 -04:00
- cleanup
build:
stage: build
image: docker:git
services:
- docker:dind
variables:
DOCKER_DRIVER: overlay2
script:
- setup_docker
- build
only:
- branches
test:
services:
- postgres:latest
variables:
POSTGRES_DB: test
stage: test
image: gliderlabs/herokuish:latest
script:
- setup_test_db
- cp -R . /tmp/app
- /bin/herokuish buildpack test
only:
- branches
codequality:
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:dind
script:
- setup_docker
- codeclimate
artifacts:
paths: [codeclimate.json]
2017-12-20 08:03:28 -05:00
performance:
stage: performance
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:dind
2017-12-20 08:03:28 -05:00
script:
- setup_docker
2017-12-20 08:03:28 -05:00
- performance
artifacts:
paths:
- performance.json
- sitespeed-results/
2017-12-20 08:03:28 -05:00
only:
refs:
- branches
kubernetes: active
2017-12-12 23:41:28 -05:00
sast:
image: docker:latest
2017-12-12 23:41:28 -05:00
variables:
DOCKER_DRIVER: overlay2
2017-12-12 23:41:28 -05:00
allow_failure: true
services:
- docker:dind
2017-12-12 23:41:28 -05:00
script:
- setup_docker
- sast
2017-12-12 23:41:28 -05:00
artifacts:
paths: [gl-sast-report.json]
2018-01-06 09:58:42 -05:00
sast:container:
image: docker:latest
variables:
DOCKER_DRIVER: overlay2
allow_failure: true
services:
- docker:dind
script:
- setup_docker
- sast_container
artifacts:
paths: [gl-sast-container-report.json]
2017-12-12 23:41:28 -05:00
2018-01-19 03:41:26 -05:00
dast:
stage: dast
allow_failure: true
image: owasp/zap2docker-stable
variables:
POSTGRES_DB: "false"
script:
- dast
artifacts:
paths: [gl-dast-report.json]
only:
refs:
- branches
kubernetes: active
except:
- master
2017-09-07 05:25:06 -04:00
review:
stage: review
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- install_tiller
- create_secret
- deploy
2017-12-20 08:03:28 -05:00
- persist_environment_url
2017-09-07 05:25:06 -04:00
environment:
name: review/$CI_COMMIT_REF_NAME
url: http://$CI_PROJECT_PATH_SLUG-$CI_ENVIRONMENT_SLUG.$AUTO_DEVOPS_DOMAIN
on_stop: stop_review
2017-12-20 08:03:28 -05:00
artifacts:
paths: [environment_url.txt]
2017-09-07 05:25:06 -04:00
only:
refs:
- branches
kubernetes: active
except:
- master
stop_review:
stage: cleanup
variables:
GIT_STRATEGY: none
script:
- install_dependencies
- delete
environment:
name: review/$CI_COMMIT_REF_NAME
action: stop
when: manual
allow_failure: true
only:
refs:
- branches
kubernetes: active
except:
- master
# Keys that start with a dot (.) will not be processed by GitLab CI.
# Staging and canary jobs are disabled by default, to enable them
# remove the dot (.) before the job name.
# https://docs.gitlab.com/ee/ci/yaml/README.html#hidden-keys
# Staging deploys are disabled by default since
# continuous deployment to production is enabled by default
# If you prefer to automatically deploy to staging and
# only manually promote to production, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.
.staging:
stage: staging
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- install_tiller
- create_secret
- deploy
environment:
name: staging
url: http://$CI_PROJECT_PATH_SLUG-staging.$AUTO_DEVOPS_DOMAIN
only:
refs:
- master
kubernetes: active
# Canaries are disabled by default, but if you want them,
# and know what the downsides are, enable this job by removing the dot (.),
# and uncomment the `when: manual` line in the `production` job.
.canary:
stage: canary
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- install_tiller
- create_secret
- deploy canary
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$AUTO_DEVOPS_DOMAIN
when: manual
only:
refs:
- master
kubernetes: active
# This job continuously deploys to production on every push to `master`.
# To make this a manual process, either because you're enabling `staging`
# or `canary` deploys, or you simply want more control over when you deploy
# to production, uncomment the `when: manual` line in the `production` job.
production:
stage: production
script:
- check_kube_domain
- install_dependencies
- download_chart
- ensure_namespace
- install_tiller
- create_secret
- deploy
- delete canary
2017-12-20 08:03:28 -05:00
- persist_environment_url
2017-09-07 05:25:06 -04:00
environment:
name: production
url: http://$CI_PROJECT_PATH_SLUG.$AUTO_DEVOPS_DOMAIN
2017-12-20 08:03:28 -05:00
artifacts:
paths: [environment_url.txt]
2017-09-07 05:25:06 -04:00
# when: manual
only:
refs:
- master
kubernetes: active
# ---------------------------------------------------------------------------
.auto_devops: &auto_devops |
# Auto DevOps variables and functions
[[ "$TRACE" ]] && set -x
auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB}
export DATABASE_URL=${DATABASE_URL-$auto_database_url}
export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG
export CI_APPLICATION_TAG=$CI_COMMIT_SHA
export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID}
export TILLER_NAMESPACE=$KUBE_NAMESPACE
2018-01-06 09:58:42 -05:00
function sast_container() {
if [[ -n "$CI_REGISTRY_USER" ]]; then
echo "Logging to GitLab Container Registry with CI credentials..."
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY"
echo ""
fi
2018-01-06 09:58:42 -05:00
docker run -d --name db arminc/clair-db:latest
docker run -p 6060:6060 --link db:postgres -d --name clair arminc/clair-local-scan:v2.0.1
apk add -U wget ca-certificates
docker pull ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG}
2018-01-19 03:41:26 -05:00
wget https://github.com/arminc/clair-scanner/releases/download/v8/clair-scanner_linux_amd64
mv clair-scanner_linux_amd64 clair-scanner
2018-01-06 09:58:42 -05:00
chmod +x clair-scanner
touch clair-whitelist.yml
./clair-scanner -c http://docker:6060 --ip $(hostname -i) -r gl-sast-container-report.json -l clair.log -w clair-whitelist.yml ${CI_APPLICATION_REPOSITORY}:${CI_APPLICATION_TAG} || true
}
2017-09-07 05:25:06 -04:00
function codeclimate() {
cc_opts="--env CODECLIMATE_CODE="$PWD" \
--volume "$PWD":/code \
--volume /var/run/docker.sock:/var/run/docker.sock \
--volume /tmp/cc:/tmp/cc"
docker run ${cc_opts} "codeclimate/codeclimate:${CODECLIMATE_VERSION}" init
docker run ${cc_opts} "codeclimate/codeclimate:${CODECLIMATE_VERSION}" analyze -f json > codeclimate.json
2017-09-07 05:25:06 -04:00
}
function sast() {
case "$CI_SERVER_VERSION" in
*-ee)
# Extract "MAJOR.MINOR" from CI_SERVER_VERSION and generate "MAJOR-MINOR-stable"
SAST_VERSION=$(echo "$CI_SERVER_VERSION" | sed 's/^\([0-9]*\)\.\([0-9]*\).*/\1-\2-stable/')
# Deprecation notice for CONFIDENCE_LEVEL variable
if [ -z "$SAST_CONFIDENCE_LEVEL" -a "$CONFIDENCE_LEVEL" ]; then
SAST_CONFIDENCE_LEVEL="$CONFIDENCE_LEVEL"
echo "WARNING: CONFIDENCE_LEVEL is deprecated and MUST be replaced with SAST_CONFIDENCE_LEVEL"
fi
docker run --env SAST_CONFIDENCE_LEVEL="${SAST_CONFIDENCE_LEVEL:-3}" \
--env SAST_DISABLE_REMOTE_CHECKS="${SAST_DISABLE_REMOTE_CHECKS:-false}" \
--volume "$PWD:/code" \
--volume /var/run/docker.sock:/var/run/docker.sock \
"registry.gitlab.com/gitlab-org/security-products/sast:$SAST_VERSION" /app/bin/run /code
;;
*)
echo "GitLab EE is required"
;;
esac
}
2017-09-07 05:25:06 -04:00
function deploy() {
track="${1-stable}"
name="$CI_ENVIRONMENT_SLUG"
if [[ "$track" != "stable" ]]; then
name="$name-$track"
fi
replicas="1"
service_enabled="false"
postgres_enabled="$POSTGRES_ENABLED"
# canary uses stable db
[[ "$track" == "canary" ]] && postgres_enabled="false"
env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' )
env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' )
if [[ "$track" == "stable" ]]; then
# for stable track get number of replicas from `PRODUCTION_REPLICAS`
eval new_replicas=\$${env_slug}_REPLICAS
service_enabled="true"
else
# for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS`
eval new_replicas=\$${env_track}_${env_slug}_REPLICAS
fi
if [[ -n "$new_replicas" ]]; then
replicas="$new_replicas"
fi
if [[ "$CI_PROJECT_VISIBILITY" != "public" ]]; then
secret_name='gitlab-registry'
else
secret_name=''
fi
2017-09-07 05:25:06 -04:00
helm upgrade --install \
--wait \
--set service.enabled="$service_enabled" \
--set releaseOverride="$CI_ENVIRONMENT_SLUG" \
--set image.repository="$CI_APPLICATION_REPOSITORY" \
--set image.tag="$CI_APPLICATION_TAG" \
--set image.pullPolicy=IfNotPresent \
--set image.secrets[0].name="$secret_name" \
2017-09-07 05:25:06 -04:00
--set application.track="$track" \
--set application.database_url="$DATABASE_URL" \
--set service.url="$CI_ENVIRONMENT_URL" \
--set replicaCount="$replicas" \
--set postgresql.enabled="$postgres_enabled" \
--set postgresql.nameOverride="postgres" \
--set postgresql.postgresUser="$POSTGRES_USER" \
--set postgresql.postgresPassword="$POSTGRES_PASSWORD" \
--set postgresql.postgresDatabase="$POSTGRES_DB" \
--namespace="$KUBE_NAMESPACE" \
--version="$CI_PIPELINE_ID-$CI_JOB_ID" \
"$name" \
chart/
}
function install_dependencies() {
apk add -U openssl curl tar gzip bash ca-certificates git
wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://raw.githubusercontent.com/sgerrand/alpine-pkg-glibc/master/sgerrand.rsa.pub
wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.23-r3/glibc-2.23-r3.apk
apk add glibc-2.23-r3.apk
rm glibc-2.23-r3.apk
curl "https://kubernetes-helm.storage.googleapis.com/helm-v${HELM_VERSION}-linux-amd64.tar.gz" | tar zx
2017-09-07 05:25:06 -04:00
mv linux-amd64/helm /usr/bin/
helm version --client
curl -L -o /usr/bin/kubectl "https://storage.googleapis.com/kubernetes-release/release/v${KUBERNETES_VERSION}/bin/linux/amd64/kubectl"
2017-09-07 05:25:06 -04:00
chmod +x /usr/bin/kubectl
kubectl version --client
}
function setup_docker() {
if ! docker info &>/dev/null; then
if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then
export DOCKER_HOST='tcp://localhost:2375'
fi
fi
}
function setup_test_db() {
if [ -z ${KUBERNETES_PORT+x} ]; then
DB_HOST=postgres
else
DB_HOST=localhost
fi
export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}"
}
function download_chart() {
if [[ ! -d chart ]]; then
auto_chart=${AUTO_DEVOPS_CHART:-gitlab/auto-deploy-app}
auto_chart_name=$(basename $auto_chart)
auto_chart_name=${auto_chart_name%.tgz}
else
auto_chart="chart"
auto_chart_name="chart"
fi
helm init --client-only
helm repo add gitlab https://charts.gitlab.io
if [[ ! -d "$auto_chart" ]]; then
helm fetch ${auto_chart} --untar
fi
if [ "$auto_chart_name" != "chart" ]; then
mv ${auto_chart_name} chart
fi
helm dependency update chart/
helm dependency build chart/
}
function ensure_namespace() {
kubectl describe namespace "$KUBE_NAMESPACE" || kubectl create namespace "$KUBE_NAMESPACE"
}
function check_kube_domain() {
if [ -z ${AUTO_DEVOPS_DOMAIN+x} ]; then
2017-11-07 14:07:28 -05:00
echo "In order to deploy or use Review Apps, AUTO_DEVOPS_DOMAIN variable must be set"
echo "You can do it in Auto DevOps project settings or defining a secret variable at group or project level"
echo "You can also manually add it in .gitlab-ci.yml"
2017-09-07 05:25:06 -04:00
false
else
true
fi
}
function build() {
2017-12-12 23:41:28 -05:00
if [[ -n "$CI_REGISTRY_USER" ]]; then
echo "Logging to GitLab Container Registry with CI credentials..."
docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY"
echo ""
fi
2017-09-07 05:25:06 -04:00
if [[ -f Dockerfile ]]; then
echo "Building Dockerfile-based application..."
docker build -t "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" .
else
echo "Building Heroku-based application using gliderlabs/herokuish docker image..."
docker run -i --name="$CI_CONTAINER_NAME" -v "$(pwd):/tmp/app:ro" gliderlabs/herokuish /bin/herokuish buildpack build
docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
docker rm "$CI_CONTAINER_NAME" >/dev/null
echo ""
echo "Configuring $CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG docker image..."
docker create --expose 5000 --env PORT=5000 --name="$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" /bin/herokuish procfile start web
docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
docker rm "$CI_CONTAINER_NAME" >/dev/null
echo ""
fi
echo "Pushing to GitLab Container Registry..."
docker push "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG"
echo ""
}
function install_tiller() {
echo "Checking Tiller..."
helm init --upgrade
kubectl rollout status -n "$TILLER_NAMESPACE" -w "deployment/tiller-deploy"
if ! helm version --debug; then
echo "Failed to init Tiller."
return 1
fi
echo ""
}
function create_secret() {
2018-01-19 03:41:26 -05:00
echo "Create secret..."
if [[ "$CI_PROJECT_VISIBILITY" == "public" ]]; then
return
fi
2018-01-19 03:41:26 -05:00
2017-09-07 05:25:06 -04:00
kubectl create secret -n "$KUBE_NAMESPACE" \
docker-registry gitlab-registry \
--docker-server="$CI_REGISTRY" \
--docker-username="$CI_REGISTRY_USER" \
--docker-password="$CI_REGISTRY_PASSWORD" \
--docker-email="$GITLAB_USER_EMAIL" \
-o yaml --dry-run | kubectl replace -n "$KUBE_NAMESPACE" --force -f -
}
2018-01-19 03:41:26 -05:00
function dast() {
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
mkdir /zap/wrk/
/zap/zap-baseline.py -J gl-dast-report.json -t "$CI_ENVIRONMENT_URL" || true
cp /zap/wrk/gl-dast-report.json .
}
2017-12-20 08:03:28 -05:00
function performance() {
export CI_ENVIRONMENT_URL=$(cat environment_url.txt)
2017-12-20 08:03:28 -05:00
mkdir gitlab-exporter
wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-5/index.js
2017-12-20 08:03:28 -05:00
mkdir sitespeed-results
2017-12-20 08:03:28 -05:00
if [ -f .gitlab-urls.txt ]
then
sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt
2017-12-20 08:03:28 -05:00
else
docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.3.1 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL"
2017-12-20 08:03:28 -05:00
fi
2017-12-20 08:03:28 -05:00
mv sitespeed-results/data/performance.json performance.json
}
2017-12-20 08:03:28 -05:00
function persist_environment_url() {
echo $CI_ENVIRONMENT_URL > environment_url.txt
}
2017-09-07 05:25:06 -04:00
function delete() {
track="${1-stable}"
name="$CI_ENVIRONMENT_SLUG"
if [[ "$track" != "stable" ]]; then
name="$name-$track"
fi
2017-12-12 23:41:28 -05:00
if [[ -n "$(helm ls -q "^$name$")" ]]; then
helm delete "$name"
fi
2017-09-07 05:25:06 -04:00
}
before_script:
- *auto_devops