mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
227 lines
6.9 KiB
Bash
Executable file
227 lines
6.9 KiB
Bash
Executable file
#!/bin/bash
|
|
set -e
|
|
|
|
# This script looks for bundles built by make.sh, and releases them on a
|
|
# public S3 bucket.
|
|
#
|
|
# Bundles should be available for the VERSION string passed as argument.
|
|
#
|
|
# The correct way to call this script is inside a container built by the
|
|
# official Dockerfile at the root of the Docker source code. The Dockerfile,
|
|
# make.sh and release.sh should all be from the same source code revision.
|
|
|
|
set -o pipefail
|
|
|
|
# Print a usage message and exit.
|
|
usage() {
|
|
cat >&2 <<'EOF'
|
|
To run, I need:
|
|
- to be in a container generated by the Dockerfile at the top of the Docker
|
|
repository;
|
|
- to be provided with the name of an S3 bucket, in environment variable
|
|
AWS_S3_BUCKET;
|
|
- to be provided with AWS credentials for this S3 bucket, in environment
|
|
variables AWS_ACCESS_KEY and AWS_SECRET_KEY;
|
|
- the passphrase to unlock the GPG key which will sign the deb packages
|
|
(passed as environment variable GPG_PASSPHRASE);
|
|
- a generous amount of good will and nice manners.
|
|
The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
|
|
|
|
docker run -e AWS_S3_BUCKET=get-staging.docker.io \
|
|
-e AWS_ACCESS_KEY=AKI1234... \
|
|
-e AWS_SECRET_KEY=sEs4mE... \
|
|
-e GPG_PASSPHRASE=m0resEs4mE... \
|
|
-i -t -privileged \
|
|
docker ./hack/release.sh
|
|
EOF
|
|
exit 1
|
|
}
|
|
|
|
[ "$AWS_S3_BUCKET" ] || usage
|
|
[ "$AWS_ACCESS_KEY" ] || usage
|
|
[ "$AWS_SECRET_KEY" ] || usage
|
|
[ "$GPG_PASSPHRASE" ] || usage
|
|
[ -d /go/src/github.com/dotcloud/docker ] || usage
|
|
cd /go/src/github.com/dotcloud/docker
|
|
[ -x hack/make.sh ] || usage
|
|
|
|
RELEASE_BUNDLES=(
|
|
binary
|
|
ubuntu
|
|
)
|
|
|
|
if [ "$1" != '--release-regardless-of-test-failure' ]; then
|
|
RELEASE_BUNDLES=( test "${RELEASE_BUNDLES[@]}" )
|
|
fi
|
|
|
|
if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
|
|
echo >&2
|
|
echo >&2 'The build or tests appear to have failed.'
|
|
echo >&2
|
|
echo >&2 'You, as the release maintainer, now have a couple options:'
|
|
echo >&2 '- delay release and fix issues'
|
|
echo >&2 '- delay release and fix issues'
|
|
echo >&2 '- did we mention how important this is? issues need fixing :)'
|
|
echo >&2
|
|
echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
|
|
echo >&2 ' really knows all the hairy problems at hand with the current release'
|
|
echo >&2 ' issues) may bypass this checking by running this script again with the'
|
|
echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
|
|
echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
|
|
echo >&2 ' avoid using this if at all possible.'
|
|
echo >&2
|
|
echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
|
|
echo >&2 ' should be used. If there are release issues, we should always err on the'
|
|
echo >&2 ' side of caution.'
|
|
echo >&2
|
|
exit 1
|
|
fi
|
|
|
|
VERSION=$(cat VERSION)
|
|
BUCKET=$AWS_S3_BUCKET
|
|
|
|
setup_s3() {
|
|
# Try creating the bucket. Ignore errors (it might already exist).
|
|
s3cmd mb s3://$BUCKET 2>/dev/null || true
|
|
# Check access to the bucket.
|
|
# s3cmd has no useful exit status, so we cannot check that.
|
|
# Instead, we check if it outputs anything on standard output.
|
|
# (When there are problems, it uses standard error instead.)
|
|
s3cmd info s3://$BUCKET | grep -q .
|
|
# Make the bucket accessible through website endpoints.
|
|
s3cmd ws-create --ws-index index --ws-error error s3://$BUCKET
|
|
}
|
|
|
|
# write_to_s3 uploads the contents of standard input to the specified S3 url.
|
|
write_to_s3() {
|
|
DEST=$1
|
|
F=`mktemp`
|
|
cat > $F
|
|
s3cmd --acl-public put $F $DEST
|
|
rm -f $F
|
|
}
|
|
|
|
s3_url() {
|
|
case "$BUCKET" in
|
|
get.docker.io|test.docker.io)
|
|
echo "https://$BUCKET"
|
|
;;
|
|
*)
|
|
echo "http://$BUCKET.s3.amazonaws.com"
|
|
;;
|
|
esac
|
|
}
|
|
|
|
# Upload the 'ubuntu' bundle to S3:
|
|
# 1. A full APT repository is published at $BUCKET/ubuntu/
|
|
# 2. Instructions for using the APT repository are uploaded at $BUCKET/ubuntu/info
|
|
release_ubuntu() {
|
|
[ -e bundles/$VERSION/ubuntu ] || {
|
|
echo >&2 './hack/make.sh must be run before release_ubuntu'
|
|
exit 1
|
|
}
|
|
# Make sure that we have our keys
|
|
mkdir -p /.gnupg/
|
|
s3cmd sync s3://$BUCKET/ubuntu/.gnupg/ /.gnupg/ || true
|
|
gpg --list-keys releasedocker >/dev/null || {
|
|
gpg --gen-key --batch <<EOF
|
|
Key-Type: RSA
|
|
Key-Length: 2048
|
|
Passphrase: $GPG_PASSPHRASE
|
|
Name-Real: Docker Release Tool
|
|
Name-Email: docker@dotcloud.com
|
|
Name-Comment: releasedocker
|
|
Expire-Date: 0
|
|
%commit
|
|
EOF
|
|
}
|
|
|
|
# Sign our packages
|
|
dpkg-sig -g "--passphrase $GPG_PASSPHRASE" -k releasedocker \
|
|
--sign builder bundles/$VERSION/ubuntu/*.deb
|
|
|
|
# Setup the APT repo
|
|
APTDIR=bundles/$VERSION/ubuntu/apt
|
|
mkdir -p $APTDIR/conf $APTDIR/db
|
|
s3cmd sync s3://$BUCKET/ubuntu/db/ $APTDIR/db/ || true
|
|
cat > $APTDIR/conf/distributions <<EOF
|
|
Codename: docker
|
|
Components: main
|
|
Architectures: amd64 i386
|
|
EOF
|
|
|
|
# Add the DEB package to the APT repo
|
|
DEBFILE=bundles/$VERSION/ubuntu/lxc-docker*.deb
|
|
reprepro -b $APTDIR includedeb docker $DEBFILE
|
|
|
|
# Sign
|
|
for F in $(find $APTDIR -name Release); do
|
|
gpg -u releasedocker --passphrase $GPG_PASSPHRASE \
|
|
--armor --sign --detach-sign \
|
|
--output $F.gpg $F
|
|
done
|
|
|
|
# Upload keys
|
|
s3cmd sync /.gnupg/ s3://$BUCKET/ubuntu/.gnupg/
|
|
gpg --armor --export releasedocker > bundles/$VERSION/ubuntu/gpg
|
|
s3cmd --acl-public put bundles/$VERSION/ubuntu/gpg s3://$BUCKET/gpg
|
|
|
|
# Upload repo
|
|
s3cmd --acl-public sync $APTDIR/ s3://$BUCKET/ubuntu/
|
|
cat <<EOF | write_to_s3 s3://$BUCKET/ubuntu/info
|
|
# Add the repository to your APT sources
|
|
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
|
|
# Then import the repository key
|
|
curl $(s3_url)/gpg | apt-key add -
|
|
# Install docker
|
|
apt-get update ; apt-get install -y lxc-docker
|
|
|
|
#
|
|
# Alternatively, just use the curl-able install.sh script provided at $(s3_url)
|
|
#
|
|
EOF
|
|
echo "APT repository uploaded. Instructions available at $(s3_url)/ubuntu/info"
|
|
}
|
|
|
|
# Upload a static binary to S3
|
|
release_binary() {
|
|
[ -e bundles/$VERSION/binary/docker-$VERSION ] || {
|
|
echo >&2 './hack/make.sh must be run before release_binary'
|
|
exit 1
|
|
}
|
|
S3DIR=s3://$BUCKET/builds/Linux/x86_64
|
|
s3cmd --acl-public put bundles/$VERSION/binary/docker-$VERSION $S3DIR/docker-$VERSION
|
|
cat <<EOF | write_to_s3 s3://$BUCKET/builds/info
|
|
# To install, run the following command as root:
|
|
curl -O $(s3_url)/builds/Linux/x86_64/docker-$VERSION && chmod +x docker-$VERSION && sudo mv docker-$VERSION /usr/local/bin/docker
|
|
# Then start docker in daemon mode:
|
|
sudo /usr/local/bin/docker -d
|
|
EOF
|
|
if [ -z "$NOLATEST" ]; then
|
|
echo "Copying docker-$VERSION to docker-latest"
|
|
s3cmd --acl-public cp $S3DIR/docker-$VERSION $S3DIR/docker-latest
|
|
echo "Advertising $VERSION on $BUCKET as most recent version"
|
|
echo $VERSION | write_to_s3 s3://$BUCKET/latest
|
|
fi
|
|
}
|
|
|
|
# Upload the index script
|
|
release_index() {
|
|
sed "s,https://get.docker.io/,$(s3_url)/," hack/install.sh | write_to_s3 s3://$BUCKET/index
|
|
}
|
|
|
|
release_test() {
|
|
if [ -e "bundles/$VERSION/test" ]; then
|
|
s3cmd --acl-public sync bundles/$VERSION/test/ s3://$BUCKET/test/
|
|
fi
|
|
}
|
|
|
|
main() {
|
|
setup_s3
|
|
release_binary
|
|
release_ubuntu
|
|
release_index
|
|
release_test
|
|
}
|
|
|
|
main
|