#!/bin/bash set -e # This script looks for bundles built by make.sh, and releases them on a # public S3 bucket. # # Bundles should be available for the VERSION string passed as argument. # # The correct way to call this script is inside a container built by the # official Dockerfile at the root of the Docker source code. The Dockerfile, # make.sh and release.sh should all be from the same source code revision. set -o pipefail # Print a usage message and exit. usage() { cat >&2 <<'EOF' To run, I need: - to be in a container generated by the Dockerfile at the top of the Docker repository; - to be provided with the name of an S3 bucket, in environment variable AWS_S3_BUCKET; - to be provided with AWS credentials for this S3 bucket, in environment variables AWS_ACCESS_KEY and AWS_SECRET_KEY; - the passphrase to unlock the GPG key which will sign the deb packages (passed as environment variable GPG_PASSPHRASE); - a generous amount of good will and nice manners. The canonical way to run me is to run the image produced by the Dockerfile: e.g.:" docker run -e AWS_S3_BUCKET=get-staging.docker.io \ -e AWS_ACCESS_KEY=AKI1234... \ -e AWS_SECRET_KEY=sEs4mE... \ -e GPG_PASSPHRASE=m0resEs4mE... \ -i -t -privileged \ docker ./hack/release.sh EOF exit 1 } [ "$AWS_S3_BUCKET" ] || usage [ "$AWS_ACCESS_KEY" ] || usage [ "$AWS_SECRET_KEY" ] || usage [ "$GPG_PASSPHRASE" ] || usage [ -d /go/src/github.com/dotcloud/docker ] || usage cd /go/src/github.com/dotcloud/docker [ -x hack/make.sh ] || usage RELEASE_BUNDLES=( binary cross tgz ubuntu ) if [ "$1" != '--release-regardless-of-test-failure' ]; then RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" ) fi if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then echo >&2 echo >&2 'The build or tests appear to have failed.' echo >&2 echo >&2 'You, as the release maintainer, now have a couple options:' echo >&2 '- delay release and fix issues' echo >&2 '- delay release and fix issues' echo >&2 '- did we mention how important this is? issues need fixing :)' echo >&2 echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,' echo >&2 ' really knows all the hairy problems at hand with the current release' echo >&2 ' issues) may bypass this checking by running this script again with the' echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip' echo >&2 ' running the test suite, and will only build the binaries and packages. Please' echo >&2 ' avoid using this if at all possible.' echo >&2 echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass' echo >&2 ' should be used. If there are release issues, we should always err on the' echo >&2 ' side of caution.' echo >&2 exit 1 fi VERSION=$(cat VERSION) BUCKET=$AWS_S3_BUCKET setup_s3() { # Try creating the bucket. Ignore errors (it might already exist). s3cmd mb s3://$BUCKET 2>/dev/null || true # Check access to the bucket. # s3cmd has no useful exit status, so we cannot check that. # Instead, we check if it outputs anything on standard output. # (When there are problems, it uses standard error instead.) s3cmd info s3://$BUCKET | grep -q . # Make the bucket accessible through website endpoints. s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET } # write_to_s3 uploads the contents of standard input to the specified S3 url. write_to_s3() { DEST=$1 F=`mktemp` cat > $F s3cmd --acl-public --mime-type='text/plain' put $F $DEST rm -f $F } s3_url() { case "$BUCKET" in get.docker.io|test.docker.io) echo "https://$BUCKET" ;; *) s3cmd ws-info s3://$BUCKET | awk -v 'FS=: +' '/http:\/\/'$BUCKET'/ { gsub(/\/+$/, "", $2); print $2 }' ;; esac } # Upload the 'ubuntu' bundle to S3: # 1. A full APT repository is published at $BUCKET/ubuntu/ # 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/index release_ubuntu() { [ -e bundles/$VERSION/ubuntu ] || { echo >&2 './hack/make.sh must be run before release_ubuntu' exit 1 } # Make sure that we have our keys mkdir -p /.gnupg/ s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true gpg --list-keys releasedocker >/dev/null || { gpg --gen-key --batch < $APTDIR/conf/distributions < bundles/$VERSION/ubuntu/gpg s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg # Upload repo s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/ cat < /etc/apt/sources.list.d/docker.list # Then import the repository key curl $(s3_url)/gpg | apt-key add - # Install docker apt-get update ; apt-get install -y lxc-docker # # Alternatively, just use the curl-able install.sh script provided at $(s3_url) # EOF # Add redirect at /ubuntu/info for URL-backwards-compatibility rm -rf /tmp/emptyfile && touch /tmp/emptyfile s3cmd --acl-public --add-header='x-amz-website-redirect-location:/ubuntu/' --mime-type='text/plain' put /tmp/emptyfile s3://$BUCKET/ubuntu/info echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu" } # Upload a tgz to S3 release_tgz() { [ -e bundles/$VERSION/tgz/docker-$VERSION.tgz ] || { echo >&2 './hack/make.sh must be run before release_binary' exit 1 } S3DIR=s3://$BUCKET/builds/Linux/x86_64 s3cmd --acl-public put bundles/$VERSION/tgz/docker-$VERSION.tgz $S3DIR/docker-$VERSION.tgz if [ -z "$NOLATEST" ]; then echo "Copying docker-$VERSION.tgz to docker-latest.tgz" s3cmd --acl-public cp $S3DIR/docker-$VERSION.tgz $S3DIR/docker-latest.tgz fi } # Upload a static binary to S3 release_binary() { [ -e bundles/$VERSION/binary/docker-$VERSION ] || { echo >&2 './hack/make.sh must be run before release_binary' exit 1 } S3DIR=s3://$BUCKET/builds/Linux/x86_64 s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION cat <&2 './hack/make.sh must be run before release_binary' exit 1 } # TODO find out from @shykes what URLs he'd like to use here } # Upload the index script release_index() { sed "s,https://get.docker.io/,$(s3_url)/," hack/install.sh | write_to_s3 s3://$BUCKET/index } release_test() { if [ -e "bundles/$VERSION/test" ]; then s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/ fi } main() { setup_s3 release_binary release_cross release_tgz release_ubuntu release_index release_test } main