mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
Remove packaging scripts
These scripts have not been used for a while now, and should not be used again because they are for releasing docker, not moby Signed-off-by: Daniel Nephin <dnephin@docker.com>
This commit is contained in:
parent
147ec3be2e
commit
eef85648e4
30 changed files with 22 additions and 1812 deletions
|
@ -1 +0,0 @@
|
|||
9
|
|
@ -1,29 +0,0 @@
|
|||
Source: docker-engine
|
||||
Section: admin
|
||||
Priority: optional
|
||||
Maintainer: Docker <support@docker.com>
|
||||
Standards-Version: 3.9.6
|
||||
Homepage: https://dockerproject.org
|
||||
Vcs-Browser: https://github.com/docker/docker
|
||||
Vcs-Git: git://github.com/docker/docker.git
|
||||
|
||||
Package: docker-engine
|
||||
Architecture: linux-any
|
||||
Depends: iptables, ${misc:Depends}, ${perl:Depends}, ${shlibs:Depends}
|
||||
Recommends: aufs-tools,
|
||||
ca-certificates,
|
||||
cgroupfs-mount | cgroup-lite,
|
||||
git,
|
||||
xz-utils,
|
||||
${apparmor:Recommends}
|
||||
Conflicts: docker (<< 1.5~), docker.io, lxc-docker, lxc-docker-virtual-package, docker-engine-cs
|
||||
Description: Docker: the open-source application container engine
|
||||
Docker is an open source project to build, ship and run any application as a
|
||||
lightweight container
|
||||
.
|
||||
Docker containers are both hardware-agnostic and platform-agnostic. This means
|
||||
they can run anywhere, from your laptop to the largest EC2 compute instance and
|
||||
everything in between - and they don't require you to use a particular
|
||||
language, framework or packaging system. That makes them great building blocks
|
||||
for deploying and scaling web apps, databases, and backend services without
|
||||
depending on a particular stack or provider.
|
|
@ -1 +0,0 @@
|
|||
contrib/completion/bash/docker
|
|
@ -1 +0,0 @@
|
|||
../../../contrib/init/sysvinit-debian/docker.default
|
|
@ -1 +0,0 @@
|
|||
../../../contrib/init/sysvinit-debian/docker
|
|
@ -1 +0,0 @@
|
|||
../../../contrib/init/upstart/docker.conf
|
|
@ -1,12 +0,0 @@
|
|||
#contrib/syntax/vim/doc/* /usr/share/vim/vimfiles/doc/
|
||||
#contrib/syntax/vim/ftdetect/* /usr/share/vim/vimfiles/ftdetect/
|
||||
#contrib/syntax/vim/syntax/* /usr/share/vim/vimfiles/syntax/
|
||||
contrib/*-integration usr/share/docker-engine/contrib/
|
||||
contrib/check-config.sh usr/share/docker-engine/contrib/
|
||||
contrib/completion/fish/docker.fish usr/share/fish/vendor_completions.d/
|
||||
contrib/completion/zsh/_docker usr/share/zsh/vendor-completions/
|
||||
contrib/init/systemd/docker.service lib/systemd/system/
|
||||
contrib/init/systemd/docker.socket lib/systemd/system/
|
||||
contrib/mk* usr/share/docker-engine/contrib/
|
||||
contrib/nuke-graph-directory.sh usr/share/docker-engine/contrib/
|
||||
contrib/syntax/nano/Dockerfile.nanorc usr/share/nano/
|
|
@ -1 +0,0 @@
|
|||
man/man*/*
|
|
@ -1,20 +0,0 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
|
||||
case "$1" in
|
||||
configure)
|
||||
if [ -z "$2" ]; then
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
fi
|
||||
;;
|
||||
abort-*)
|
||||
# How'd we get here??
|
||||
exit 1
|
||||
;;
|
||||
*)
|
||||
;;
|
||||
esac
|
||||
|
||||
#DEBHELPER#
|
|
@ -1 +0,0 @@
|
|||
../../../contrib/udev/80-docker.rules
|
|
@ -1 +0,0 @@
|
|||
README.md
|
|
@ -1,53 +0,0 @@
|
|||
#!/usr/bin/make -f
|
||||
|
||||
VERSION = $(shell cat VERSION)
|
||||
SYSTEMD_VERSION := $(shell dpkg-query -W -f='$${Version}\n' systemd | cut -d- -f1)
|
||||
SYSTEMD_GT_227 := $(shell [ '$(SYSTEMD_VERSION)' ] && [ '$(SYSTEMD_VERSION)' -gt 227 ] && echo true )
|
||||
|
||||
override_dh_gencontrol:
|
||||
# if we're on Ubuntu, we need to Recommends: apparmor
|
||||
echo 'apparmor:Recommends=$(shell dpkg-vendor --is Ubuntu && echo apparmor)' >> debian/docker-engine.substvars
|
||||
dh_gencontrol
|
||||
|
||||
override_dh_auto_build:
|
||||
./hack/make.sh dynbinary
|
||||
# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
|
||||
|
||||
override_dh_auto_test:
|
||||
./bundles/$(VERSION)/dynbinary-daemon/dockerd -v
|
||||
|
||||
override_dh_strip:
|
||||
# Go has lots of problems with stripping, so just don't
|
||||
|
||||
override_dh_auto_install:
|
||||
mkdir -p debian/docker-engine/usr/bin
|
||||
cp -aT "$$(readlink -f bundles/$(VERSION)/dynbinary-daemon/dockerd)" debian/docker-engine/usr/bin/dockerd
|
||||
cp -aT /usr/local/bin/docker-proxy debian/docker-engine/usr/bin/docker-proxy
|
||||
cp -aT /usr/local/bin/docker-containerd debian/docker-engine/usr/bin/docker-containerd
|
||||
cp -aT /usr/local/bin/docker-containerd-shim debian/docker-engine/usr/bin/docker-containerd-shim
|
||||
cp -aT /usr/local/bin/docker-containerd-ctr debian/docker-engine/usr/bin/docker-containerd-ctr
|
||||
cp -aT /usr/local/bin/docker-runc debian/docker-engine/usr/bin/docker-runc
|
||||
cp -aT /usr/local/bin/docker-init debian/docker-engine/usr/bin/docker-init
|
||||
mkdir -p debian/docker-engine/usr/lib/docker
|
||||
|
||||
override_dh_installinit:
|
||||
# use "docker" as our service name, not "docker-engine"
|
||||
dh_installinit --name=docker
|
||||
ifeq (true, $(SYSTEMD_GT_227))
|
||||
$(warning "Setting TasksMax=infinity")
|
||||
sed -i -- 's/#TasksMax=infinity/TasksMax=infinity/' debian/docker-engine/lib/systemd/system/docker.service
|
||||
endif
|
||||
|
||||
override_dh_installudev:
|
||||
# match our existing priority
|
||||
dh_installudev --priority=z80
|
||||
|
||||
override_dh_install:
|
||||
dh_install
|
||||
dh_apparmor --profile-name=docker-engine -pdocker-engine
|
||||
|
||||
override_dh_shlibdeps:
|
||||
dh_shlibdeps --dpkg-shlibdeps-params=--ignore-missing-info
|
||||
|
||||
%:
|
||||
dh $@ --with=bash-completion $(shell command -v dh_systemd_enable > /dev/null 2>&1 && echo --with=systemd)
|
|
@ -1,99 +0,0 @@
|
|||
# Some bits borrowed from the openstack-selinux package
|
||||
Name: docker-engine-selinux
|
||||
Version: %{_version}
|
||||
Release: %{_release}%{?dist}
|
||||
Summary: SELinux Policies for the open-source application container engine
|
||||
BuildArch: noarch
|
||||
Group: Tools/Docker
|
||||
|
||||
License: GPLv2
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
%global selinux_policyver 3.13.1-102
|
||||
%if 0%{?oraclelinux} >= 7
|
||||
%global selinux_policyver 3.13.1-102.0.3.el7_3.15
|
||||
%endif # oraclelinux 7
|
||||
%global selinuxtype targeted
|
||||
%global moduletype services
|
||||
%global modulenames docker
|
||||
|
||||
Requires(post): selinux-policy-base >= %{selinux_policyver}, selinux-policy-targeted >= %{selinux_policyver}, policycoreutils, policycoreutils-python libselinux-utils
|
||||
BuildRequires: selinux-policy selinux-policy-devel
|
||||
|
||||
# conflicting packages
|
||||
Conflicts: docker-selinux
|
||||
|
||||
# Usage: _format var format
|
||||
# Expand 'modulenames' into various formats as needed
|
||||
# Format must contain '$x' somewhere to do anything useful
|
||||
%global _format() export %1=""; for x in %{modulenames}; do %1+=%2; %1+=" "; done;
|
||||
|
||||
# Relabel files
|
||||
%global relabel_files() \
|
||||
/sbin/restorecon -R %{_bindir}/docker %{_localstatedir}/run/docker.sock %{_localstatedir}/run/docker.pid %{_sysconfdir}/docker %{_localstatedir}/log/docker %{_localstatedir}/log/lxc %{_localstatedir}/lock/lxc %{_usr}/lib/systemd/system/docker.service /root/.docker &> /dev/null || : \
|
||||
|
||||
%description
|
||||
SELinux policy modules for use with Docker
|
||||
|
||||
%prep
|
||||
%if 0%{?centos} <= 6
|
||||
%setup -n %{name}
|
||||
%else
|
||||
%autosetup -n %{name}
|
||||
%endif
|
||||
|
||||
%build
|
||||
make SHARE="%{_datadir}" TARGETS="%{modulenames}"
|
||||
|
||||
%install
|
||||
|
||||
# Install SELinux interfaces
|
||||
%_format INTERFACES $x.if
|
||||
install -d %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
|
||||
install -p -m 644 $INTERFACES %{buildroot}%{_datadir}/selinux/devel/include/%{moduletype}
|
||||
|
||||
# Install policy modules
|
||||
%_format MODULES $x.pp.bz2
|
||||
install -d %{buildroot}%{_datadir}/selinux/packages
|
||||
install -m 0644 $MODULES %{buildroot}%{_datadir}/selinux/packages
|
||||
|
||||
%post
|
||||
#
|
||||
# Install all modules in a single transaction
|
||||
#
|
||||
if [ $1 -eq 1 ]; then
|
||||
%{_sbindir}/setsebool -P -N virt_use_nfs=1 virt_sandbox_use_all_caps=1
|
||||
fi
|
||||
%_format MODULES %{_datadir}/selinux/packages/$x.pp.bz2
|
||||
%{_sbindir}/semodule -n -s %{selinuxtype} -i $MODULES
|
||||
if %{_sbindir}/selinuxenabled ; then
|
||||
%{_sbindir}/load_policy
|
||||
%relabel_files
|
||||
if [ $1 -eq 1 ]; then
|
||||
restorecon -R %{_sharedstatedir}/docker
|
||||
fi
|
||||
fi
|
||||
|
||||
%postun
|
||||
if [ $1 -eq 0 ]; then
|
||||
%{_sbindir}/semodule -n -r %{modulenames} &> /dev/null || :
|
||||
if %{_sbindir}/selinuxenabled ; then
|
||||
%{_sbindir}/load_policy
|
||||
%relabel_files
|
||||
fi
|
||||
fi
|
||||
|
||||
%files
|
||||
%doc LICENSE
|
||||
%defattr(-,root,root,0755)
|
||||
%attr(0644,root,root) %{_datadir}/selinux/packages/*.pp.bz2
|
||||
%attr(0644,root,root) %{_datadir}/selinux/devel/include/%{moduletype}/*.if
|
||||
|
||||
%changelog
|
||||
* Tue Dec 1 2015 Jessica Frazelle <acidburn@docker.com> 1.9.1-1
|
||||
- add licence to rpm
|
||||
- add selinux-policy and docker-engine-selinux rpm
|
|
@ -1,249 +0,0 @@
|
|||
Name: docker-engine
|
||||
Version: %{_version}
|
||||
Release: %{_release}%{?dist}
|
||||
Summary: The open-source application container engine
|
||||
Group: Tools/Docker
|
||||
|
||||
License: ASL 2.0
|
||||
Source: %{name}.tar.gz
|
||||
|
||||
URL: https://dockerproject.org
|
||||
Vendor: Docker
|
||||
Packager: Docker <support@docker.com>
|
||||
|
||||
# is_systemd conditional
|
||||
%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?suse_version} >= 1210
|
||||
%global is_systemd 1
|
||||
%endif
|
||||
|
||||
# required packages for build
|
||||
# most are already in the container (see contrib/builder/rpm/ARCH/generate.sh)
|
||||
# only require systemd on those systems
|
||||
%if 0%{?is_systemd}
|
||||
%if 0%{?suse_version} >= 1210
|
||||
BuildRequires: systemd-rpm-macros
|
||||
%{?systemd_requires}
|
||||
%else
|
||||
%if 0%{?fedora} >= 25
|
||||
# Systemd 230 and up no longer have libsystemd-journal (see https://bugzilla.redhat.com/show_bug.cgi?id=1350301)
|
||||
BuildRequires: pkgconfig(systemd)
|
||||
Requires: systemd-units
|
||||
%else
|
||||
BuildRequires: pkgconfig(systemd)
|
||||
Requires: systemd-units
|
||||
BuildRequires: pkgconfig(libsystemd-journal)
|
||||
%endif
|
||||
%endif
|
||||
%else
|
||||
Requires(post): chkconfig
|
||||
Requires(preun): chkconfig
|
||||
# This is for /sbin/service
|
||||
Requires(preun): initscripts
|
||||
%endif
|
||||
|
||||
# required packages on install
|
||||
Requires: /bin/sh
|
||||
Requires: iptables
|
||||
%if !0%{?suse_version}
|
||||
Requires: libcgroup
|
||||
%else
|
||||
Requires: libcgroup1
|
||||
%endif
|
||||
Requires: tar
|
||||
Requires: xz
|
||||
%if 0%{?fedora} >= 21 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7 || 0%{?amzn} >= 1
|
||||
# Resolves: rhbz#1165615
|
||||
Requires: device-mapper-libs >= 1.02.90-1
|
||||
%endif
|
||||
%if 0%{?oraclelinux} >= 6
|
||||
# Require Oracle Unbreakable Enterprise Kernel R4 and newer device-mapper
|
||||
Requires: kernel-uek >= 4.1
|
||||
Requires: device-mapper >= 1.02.90-2
|
||||
%endif
|
||||
|
||||
# docker-selinux conditional
|
||||
%if 0%{?fedora} >= 20 || 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global with_selinux 1
|
||||
%endif
|
||||
|
||||
# DWZ problem with multiple golang binary, see bug
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=995136#c12
|
||||
%if 0%{?fedora} >= 20 || 0%{?rhel} >= 7 || 0%{?oraclelinux} >= 7
|
||||
%global _dwz_low_mem_die_limit 0
|
||||
%endif
|
||||
|
||||
# start if with_selinux
|
||||
%if 0%{?with_selinux}
|
||||
|
||||
%if 0%{?centos} >= 7 || 0%{?rhel} >= 7 || 0%{?fedora} >= 25
|
||||
Requires: container-selinux >= 2.9
|
||||
%endif# centos 7, rhel 7, fedora 25
|
||||
|
||||
%if 0%{?oraclelinux} >= 7
|
||||
%global selinux_policyver 3.13.1-102.0.3.el7_3.15
|
||||
%endif # oraclelinux 7
|
||||
%if 0%{?fedora} == 24
|
||||
%global selinux_policyver 3.13.1-191
|
||||
%endif # fedora 24 -- container-selinux on fedora24 does not properly set dockerd, for now just carry docker-engine-selinux for it
|
||||
%if 0%{?oraclelinux} >= 7 || 0%{?fedora} == 24
|
||||
Requires: selinux-policy >= %{selinux_policyver}
|
||||
Requires(pre): %{name}-selinux >= %{version}-%{release}
|
||||
%endif # selinux-policy for oraclelinux-7, fedora-24
|
||||
|
||||
%endif # with_selinux
|
||||
|
||||
# conflicting packages
|
||||
Conflicts: docker
|
||||
Conflicts: docker-io
|
||||
Conflicts: docker-engine-cs
|
||||
|
||||
%description
|
||||
Docker is an open source project to build, ship and run any application as a
|
||||
lightweight container.
|
||||
|
||||
Docker containers are both hardware-agnostic and platform-agnostic. This means
|
||||
they can run anywhere, from your laptop to the largest EC2 compute instance and
|
||||
everything in between - and they don't require you to use a particular
|
||||
language, framework or packaging system. That makes them great building blocks
|
||||
for deploying and scaling web apps, databases, and backend services without
|
||||
depending on a particular stack or provider.
|
||||
|
||||
%prep
|
||||
%if 0%{?centos} <= 6 || 0%{?oraclelinux} <=6
|
||||
%setup -n %{name}
|
||||
%else
|
||||
%autosetup -n %{name}
|
||||
%endif
|
||||
|
||||
%build
|
||||
export DOCKER_GITCOMMIT=%{_gitcommit}
|
||||
./hack/make.sh dynbinary
|
||||
# ./man/md2man-all.sh runs outside the build container (if at all), since we don't have go-md2man here
|
||||
|
||||
%check
|
||||
./bundles/%{_origversion}/dynbinary-daemon/dockerd -v
|
||||
|
||||
%install
|
||||
# install binary
|
||||
install -d $RPM_BUILD_ROOT/%{_bindir}
|
||||
install -p -m 755 bundles/%{_origversion}/dynbinary-daemon/dockerd-%{_origversion} $RPM_BUILD_ROOT/%{_bindir}/dockerd
|
||||
|
||||
# install proxy
|
||||
install -p -m 755 /usr/local/bin/docker-proxy $RPM_BUILD_ROOT/%{_bindir}/docker-proxy
|
||||
|
||||
# install containerd
|
||||
install -p -m 755 /usr/local/bin/docker-containerd $RPM_BUILD_ROOT/%{_bindir}/docker-containerd
|
||||
install -p -m 755 /usr/local/bin/docker-containerd-shim $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-shim
|
||||
install -p -m 755 /usr/local/bin/docker-containerd-ctr $RPM_BUILD_ROOT/%{_bindir}/docker-containerd-ctr
|
||||
|
||||
# install runc
|
||||
install -p -m 755 /usr/local/bin/docker-runc $RPM_BUILD_ROOT/%{_bindir}/docker-runc
|
||||
|
||||
# install tini
|
||||
install -p -m 755 /usr/local/bin/docker-init $RPM_BUILD_ROOT/%{_bindir}/docker-init
|
||||
|
||||
# install udev rules
|
||||
install -d $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d
|
||||
install -p -m 644 contrib/udev/80-docker.rules $RPM_BUILD_ROOT/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
|
||||
# add init scripts
|
||||
install -d $RPM_BUILD_ROOT/etc/sysconfig
|
||||
install -d $RPM_BUILD_ROOT/%{_initddir}
|
||||
|
||||
|
||||
%if 0%{?is_systemd}
|
||||
install -d $RPM_BUILD_ROOT/%{_unitdir}
|
||||
install -p -m 644 contrib/init/systemd/docker.service.rpm $RPM_BUILD_ROOT/%{_unitdir}/docker.service
|
||||
%else
|
||||
install -p -m 644 contrib/init/sysvinit-redhat/docker.sysconfig $RPM_BUILD_ROOT/etc/sysconfig/docker
|
||||
install -p -m 755 contrib/init/sysvinit-redhat/docker $RPM_BUILD_ROOT/%{_initddir}/docker
|
||||
%endif
|
||||
# add bash, zsh, and fish completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/bash-completion/completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions
|
||||
install -d $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d
|
||||
install -p -m 644 contrib/completion/bash/docker $RPM_BUILD_ROOT/usr/share/bash-completion/completions/docker
|
||||
install -p -m 644 contrib/completion/zsh/_docker $RPM_BUILD_ROOT/usr/share/zsh/vendor-completions/_docker
|
||||
install -p -m 644 contrib/completion/fish/docker.fish $RPM_BUILD_ROOT/usr/share/fish/vendor_completions.d/docker.fish
|
||||
|
||||
# install manpages
|
||||
install -d %{buildroot}%{_mandir}/man1
|
||||
install -p -m 644 man/man1/*.1 $RPM_BUILD_ROOT/%{_mandir}/man1
|
||||
install -d %{buildroot}%{_mandir}/man5
|
||||
install -p -m 644 man/man5/*.5 $RPM_BUILD_ROOT/%{_mandir}/man5
|
||||
install -d %{buildroot}%{_mandir}/man8
|
||||
install -p -m 644 man/man8/*.8 $RPM_BUILD_ROOT/%{_mandir}/man8
|
||||
|
||||
# add vimfiles
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect
|
||||
install -d $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax
|
||||
install -p -m 644 contrib/syntax/vim/doc/dockerfile.txt $RPM_BUILD_ROOT/usr/share/vim/vimfiles/doc/dockerfile.txt
|
||||
install -p -m 644 contrib/syntax/vim/ftdetect/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
|
||||
install -p -m 644 contrib/syntax/vim/syntax/dockerfile.vim $RPM_BUILD_ROOT/usr/share/vim/vimfiles/syntax/dockerfile.vim
|
||||
|
||||
# add nano
|
||||
install -d $RPM_BUILD_ROOT/usr/share/nano
|
||||
install -p -m 644 contrib/syntax/nano/Dockerfile.nanorc $RPM_BUILD_ROOT/usr/share/nano/Dockerfile.nanorc
|
||||
|
||||
# list files owned by the package here
|
||||
%files
|
||||
%doc AUTHORS CHANGELOG.md CONTRIBUTING.md LICENSE MAINTAINERS NOTICE README.md
|
||||
/%{_bindir}/docker
|
||||
/%{_bindir}/dockerd
|
||||
/%{_bindir}/docker-containerd
|
||||
/%{_bindir}/docker-containerd-shim
|
||||
/%{_bindir}/docker-containerd-ctr
|
||||
/%{_bindir}/docker-proxy
|
||||
/%{_bindir}/docker-runc
|
||||
/%{_bindir}/docker-init
|
||||
/%{_sysconfdir}/udev/rules.d/80-docker.rules
|
||||
%if 0%{?is_systemd}
|
||||
/%{_unitdir}/docker.service
|
||||
%else
|
||||
%config(noreplace,missingok) /etc/sysconfig/docker
|
||||
/%{_initddir}/docker
|
||||
%endif
|
||||
/usr/share/bash-completion/completions/docker
|
||||
/usr/share/zsh/vendor-completions/_docker
|
||||
/usr/share/fish/vendor_completions.d/docker.fish
|
||||
%doc
|
||||
/%{_mandir}/man1/*
|
||||
/%{_mandir}/man5/*
|
||||
/%{_mandir}/man8/*
|
||||
/usr/share/vim/vimfiles/doc/dockerfile.txt
|
||||
/usr/share/vim/vimfiles/ftdetect/dockerfile.vim
|
||||
/usr/share/vim/vimfiles/syntax/dockerfile.vim
|
||||
/usr/share/nano/Dockerfile.nanorc
|
||||
|
||||
%post
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_post docker
|
||||
%else
|
||||
# This adds the proper /etc/rc*.d links for the script
|
||||
/sbin/chkconfig --add docker
|
||||
%endif
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
|
||||
%preun
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_preun docker
|
||||
%else
|
||||
if [ $1 -eq 0 ] ; then
|
||||
/sbin/service docker stop >/dev/null 2>&1
|
||||
/sbin/chkconfig --del docker
|
||||
fi
|
||||
%endif
|
||||
|
||||
%postun
|
||||
%if 0%{?is_systemd}
|
||||
%systemd_postun_with_restart docker
|
||||
%else
|
||||
if [ "$1" -ge "1" ] ; then
|
||||
/sbin/service docker condrestart >/dev/null 2>&1 || :
|
||||
fi
|
||||
%endif
|
||||
|
||||
%changelog
|
|
@ -1,91 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# subshell so that we can export PATH and TZ without breaking other things
|
||||
(
|
||||
export TZ=UTC # make sure our "date" variables are UTC-based
|
||||
bundle .integration-daemon-start
|
||||
bundle .detect-daemon-osarch
|
||||
|
||||
# TODO consider using frozen images for the dockercore/builder-deb tags
|
||||
|
||||
tilde='~' # ouch Bash 4.2 vs 4.3, you keel me
|
||||
debVersion="${VERSION//-/$tilde}" # using \~ or '~' here works in 4.3, but not 4.2; just ~ causes $HOME to be inserted, hence the $tilde
|
||||
# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
|
||||
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
gitUnix="$(git log -1 --pretty='%at')"
|
||||
gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
|
||||
gitCommit="$(git log -1 --pretty='%h')"
|
||||
gitVersion="git${gitDate}.0.${gitCommit}"
|
||||
# gitVersion is now something like 'git20150128.112847.0.17e840a'
|
||||
debVersion="$debVersion~$gitVersion"
|
||||
|
||||
# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false
|
||||
# true
|
||||
# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false
|
||||
# true
|
||||
# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false
|
||||
# true
|
||||
|
||||
# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a
|
||||
fi
|
||||
|
||||
debSource="$(awk -F ': ' '$1 == "Source" { print $2; exit }' hack/make/.build-deb/control)"
|
||||
debMaintainer="$(awk -F ': ' '$1 == "Maintainer" { print $2; exit }' hack/make/.build-deb/control)"
|
||||
debDate="$(date --rfc-2822)"
|
||||
|
||||
# if go-md2man is available, pre-generate the man pages
|
||||
make manpages
|
||||
|
||||
builderDir="contrib/builder/deb/${PACKAGE_ARCH}"
|
||||
pkgs=( $(find "${builderDir}/"*/ -type d) )
|
||||
if [ ! -z "$DOCKER_BUILD_PKGS" ]; then
|
||||
pkgs=()
|
||||
for p in $DOCKER_BUILD_PKGS; do
|
||||
pkgs+=( "$builderDir/$p" )
|
||||
done
|
||||
fi
|
||||
for dir in "${pkgs[@]}"; do
|
||||
[ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; }
|
||||
version="$(basename "$dir")"
|
||||
suite="${version##*-}"
|
||||
|
||||
image="dockercore/builder-deb:$version"
|
||||
if ! docker inspect "$image" &> /dev/null; then
|
||||
(
|
||||
# Add the APT_MIRROR args only if the consuming Dockerfile uses it
|
||||
# Otherwise this will cause the build to fail
|
||||
if [ "$(grep 'ARG APT_MIRROR=' $dir/Dockerfile)" ] && [ "$BUILD_APT_MIRROR" ]; then
|
||||
DOCKER_BUILD_ARGS="$DOCKER_BUILD_ARGS $BUILD_APT_MIRROR"
|
||||
fi
|
||||
set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir"
|
||||
)
|
||||
fi
|
||||
|
||||
mkdir -p "$DEST/$version"
|
||||
cat > "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
FROM $image
|
||||
WORKDIR /usr/src/docker
|
||||
COPY . /usr/src/docker
|
||||
ENV DOCKER_GITCOMMIT $GITCOMMIT
|
||||
RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers \
|
||||
&& ln -snf /usr/src/docker /go/src/github.com/docker/docker
|
||||
EOF
|
||||
|
||||
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
# Install runc, containerd, proxy and tini
|
||||
RUN ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini
|
||||
EOF
|
||||
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
RUN cp -aL hack/make/.build-deb debian
|
||||
RUN { echo '$debSource (${debVersion}-0~${version}) $suite; urgency=low'; echo; echo ' * Version: $VERSION'; echo; echo " -- $debMaintainer $debDate"; } > debian/changelog && cat >&2 debian/changelog
|
||||
RUN dpkg-buildpackage -uc -us -I.git
|
||||
EOF
|
||||
tempImage="docker-temp/build-deb:$version"
|
||||
( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f "$DEST/$version/Dockerfile.build" . )
|
||||
docker run --rm "$tempImage" bash -c 'cd .. && tar -c *_*' | tar -xvC "$DEST/$version"
|
||||
docker rmi "$tempImage"
|
||||
done
|
||||
|
||||
bundle .integration-daemon-stop
|
||||
) 2>&1 | tee -a "$DEST/test.log"
|
|
@ -1,148 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# subshell so that we can export PATH and TZ without breaking other things
|
||||
(
|
||||
export TZ=UTC # make sure our "date" variables are UTC-based
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.integration-daemon-start"
|
||||
source "$(dirname "$BASH_SOURCE")/.detect-daemon-osarch"
|
||||
|
||||
# TODO consider using frozen images for the dockercore/builder-rpm tags
|
||||
|
||||
rpmName=docker-engine
|
||||
rpmVersion="$VERSION"
|
||||
rpmRelease=1
|
||||
|
||||
# rpmRelease versioning is as follows
|
||||
# Docker 1.7.0: version=1.7.0, release=1
|
||||
# Docker 1.7.0-rc1: version=1.7.0, release=0.1.rc1
|
||||
# Docker 1.7.0-cs1: version=1.7.0.cs1, release=1
|
||||
# Docker 1.7.0-cs1-rc1: version=1.7.0.cs1, release=0.1.rc1
|
||||
# Docker 1.7.0-dev nightly: version=1.7.0, release=0.0.YYYYMMDD.HHMMSS.gitHASH
|
||||
|
||||
# if we have a "-rc*" suffix, set appropriate release
|
||||
if [[ "$rpmVersion" =~ .*-rc[0-9]+$ ]] ; then
|
||||
rcVersion=${rpmVersion#*-rc}
|
||||
rpmVersion=${rpmVersion%-rc*}
|
||||
rpmRelease="0.${rcVersion}.rc${rcVersion}"
|
||||
fi
|
||||
|
||||
DOCKER_GITCOMMIT=$(git rev-parse --short HEAD)
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
DOCKER_GITCOMMIT="$DOCKER_GITCOMMIT-unsupported"
|
||||
fi
|
||||
|
||||
# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
|
||||
if [[ "$rpmVersion" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
gitUnix="$(git log -1 --pretty='%at')"
|
||||
gitDate="$(date --date "@$gitUnix" +'%Y%m%d.%H%M%S')"
|
||||
gitCommit="$(git log -1 --pretty='%h')"
|
||||
gitVersion="${gitDate}.git${gitCommit}"
|
||||
# gitVersion is now something like '20150128.112847.17e840a'
|
||||
rpmVersion="${rpmVersion%-dev}"
|
||||
rpmRelease="0.0.$gitVersion"
|
||||
fi
|
||||
|
||||
# Replace any other dashes with periods
|
||||
rpmVersion="${rpmVersion/-/.}"
|
||||
|
||||
rpmPackager="$(awk -F ': ' '$1 == "Packager" { print $2; exit }' hack/make/.build-rpm/${rpmName}.spec)"
|
||||
rpmDate="$(date +'%a %b %d %Y')"
|
||||
|
||||
# if go-md2man is available, pre-generate the man pages
|
||||
make manpages
|
||||
|
||||
# Convert the CHANGELOG.md file into RPM changelog format
|
||||
rm -f contrib/builder/rpm/${PACKAGE_ARCH}/changelog
|
||||
VERSION_REGEX="^\W\W (.*) \((.*)\)$"
|
||||
ENTRY_REGEX="^[-+*] (.*)$"
|
||||
while read -r line || [[ -n "$line" ]]; do
|
||||
if [ -z "$line" ]; then continue; fi
|
||||
if [[ "$line" =~ $VERSION_REGEX ]]; then
|
||||
echo >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog
|
||||
echo "* `date -d ${BASH_REMATCH[2]} '+%a %b %d %Y'` ${rpmPackager} - ${BASH_REMATCH[1]}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog
|
||||
fi
|
||||
if [[ "$line" =~ $ENTRY_REGEX ]]; then
|
||||
echo "- ${BASH_REMATCH[1]//\`}" >> contrib/builder/rpm/${PACKAGE_ARCH}/changelog
|
||||
fi
|
||||
done < CHANGELOG.md
|
||||
|
||||
builderDir="contrib/builder/rpm/${PACKAGE_ARCH}"
|
||||
pkgs=( $(find "${builderDir}/"*/ -type d) )
|
||||
if [ ! -z "$DOCKER_BUILD_PKGS" ]; then
|
||||
pkgs=()
|
||||
for p in $DOCKER_BUILD_PKGS; do
|
||||
pkgs+=( "$builderDir/$p" )
|
||||
done
|
||||
fi
|
||||
for dir in "${pkgs[@]}"; do
|
||||
[ -d "$dir" ] || { echo >&2 "skipping nonexistent $dir"; continue; }
|
||||
version="$(basename "$dir")"
|
||||
suite="${version##*-}"
|
||||
|
||||
image="dockercore/builder-rpm:$version"
|
||||
if ! docker inspect "$image" &> /dev/null; then
|
||||
( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$image" "$dir" )
|
||||
fi
|
||||
|
||||
mkdir -p "$DEST/$version"
|
||||
cat > "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
FROM $image
|
||||
COPY . /usr/src/${rpmName}
|
||||
WORKDIR /usr/src/${rpmName}
|
||||
RUN mkdir -p /go/src/github.com/docker && mkdir -p /go/src/github.com/opencontainers
|
||||
EOF
|
||||
|
||||
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
# Install runc, containerd, proxy and tini
|
||||
RUN TMP_GOPATH="/go" ./hack/dockerfile/install-binaries.sh runc-dynamic containerd-dynamic proxy-dynamic tini
|
||||
EOF
|
||||
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
echo 'ENV DOCKER_EXPERIMENTAL 1' >> "$DEST/$version/Dockerfile.build"
|
||||
fi
|
||||
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
RUN mkdir -p /root/rpmbuild/SOURCES \
|
||||
&& echo '%_topdir /root/rpmbuild' > /root/.rpmmacros
|
||||
WORKDIR /root/rpmbuild
|
||||
RUN ln -sfv /usr/src/${rpmName}/hack/make/.build-rpm SPECS
|
||||
WORKDIR /root/rpmbuild/SPECS
|
||||
RUN tar --exclude .git -r -C /usr/src -f /root/rpmbuild/SOURCES/${rpmName}.tar ${rpmName}
|
||||
RUN tar --exclude .git -r -C /go/src/github.com/docker -f /root/rpmbuild/SOURCES/${rpmName}.tar containerd
|
||||
RUN tar --exclude .git -r -C /go/src/github.com/docker/libnetwork/cmd -f /root/rpmbuild/SOURCES/${rpmName}.tar proxy
|
||||
RUN tar --exclude .git -r -C /go/src/github.com/opencontainers -f /root/rpmbuild/SOURCES/${rpmName}.tar runc
|
||||
RUN tar --exclude .git -r -C /go/ -f /root/rpmbuild/SOURCES/${rpmName}.tar tini
|
||||
RUN gzip /root/rpmbuild/SOURCES/${rpmName}.tar
|
||||
RUN { cat /usr/src/${rpmName}/contrib/builder/rpm/${PACKAGE_ARCH}/changelog; } >> ${rpmName}.spec && tail >&2 ${rpmName}.spec
|
||||
RUN rpmbuild -ba \
|
||||
--define '_gitcommit $DOCKER_GITCOMMIT' \
|
||||
--define '_release $rpmRelease' \
|
||||
--define '_version $rpmVersion' \
|
||||
--define '_origversion $VERSION' \
|
||||
--define '_experimental ${DOCKER_EXPERIMENTAL:-0}' \
|
||||
${rpmName}.spec
|
||||
EOF
|
||||
# selinux policy referencing systemd things won't work on non-systemd versions
|
||||
# of centos or rhel, which we don't support anyways
|
||||
if [ "${suite%.*}" -gt 6 ] && [[ "$version" != opensuse* ]]; then
|
||||
if [ -d "./contrib/selinux-$version" ]; then
|
||||
selinuxDir="selinux-${version}"
|
||||
cat >> "$DEST/$version/Dockerfile.build" <<-EOF
|
||||
RUN tar -cz -C /usr/src/${rpmName}/contrib/${selinuxDir} -f /root/rpmbuild/SOURCES/${rpmName}-selinux.tar.gz ${rpmName}-selinux
|
||||
RUN rpmbuild -ba \
|
||||
--define '_gitcommit $DOCKER_GITCOMMIT' \
|
||||
--define '_release $rpmRelease' \
|
||||
--define '_version $rpmVersion' \
|
||||
--define '_origversion $VERSION' \
|
||||
${rpmName}-selinux.spec
|
||||
EOF
|
||||
fi
|
||||
fi
|
||||
tempImage="docker-temp/build-rpm:$version"
|
||||
( set -x && docker build ${DOCKER_BUILD_ARGS} -t "$tempImage" -f $DEST/$version/Dockerfile.build . )
|
||||
docker run --rm "$tempImage" bash -c 'cd /root/rpmbuild && tar -c *RPMS' | tar -xvC "$DEST/$version"
|
||||
docker rmi "$tempImage"
|
||||
done
|
||||
|
||||
source "$(dirname "$BASH_SOURCE")/.integration-daemon-stop"
|
||||
) 2>&1 | tee -a $DEST/test.log
|
|
@ -1,43 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script cleans the experimental pool for the apt repo.
|
||||
# This is useful when there are a lot of old experimental debs and you only want to keep the most recent.
|
||||
#
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
APTDIR=$DOCKER_RELEASE_DIR/apt/repo/pool/experimental
|
||||
: ${DOCKER_ARCHIVE_DIR:=$DEST/archive}
|
||||
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
|
||||
|
||||
latest_versions=$(dpkg-scanpackages "$APTDIR" /dev/null 2>/dev/null | awk -F ': ' '$1 == "Filename" { print $2 }')
|
||||
|
||||
# get the latest version
|
||||
latest_docker_engine_file=$(echo "$latest_versions" | grep docker-engine)
|
||||
latest_docker_engine_version=$(basename ${latest_docker_engine_file%~*})
|
||||
|
||||
echo "latest docker-engine version: $latest_docker_engine_version"
|
||||
|
||||
# remove all the files that are not that version in experimental
|
||||
pool_dir=$(dirname "$latest_docker_engine_file")
|
||||
old_pkgs=( $(ls "$pool_dir" | grep -v "^${latest_docker_engine_version}" | grep "${latest_docker_engine_version%%~git*}") )
|
||||
|
||||
echo "${old_pkgs[@]}"
|
||||
|
||||
mkdir -p "$DOCKER_ARCHIVE_DIR"
|
||||
for old_pkg in "${old_pkgs[@]}"; do
|
||||
echo "moving ${pool_dir}/${old_pkg} to $DOCKER_ARCHIVE_DIR"
|
||||
mv "${pool_dir}/${old_pkg}" "$DOCKER_ARCHIVE_DIR"
|
||||
done
|
||||
|
||||
echo
|
||||
echo "$pool_dir now has contents:"
|
||||
ls "$pool_dir"
|
||||
|
||||
# now regenerate release files for experimental
|
||||
export COMPONENT=experimental
|
||||
source "${DIR}/update-apt-repo"
|
||||
|
||||
echo "You will now want to: "
|
||||
echo " - re-sign the repo with hack/make/sign-repo"
|
||||
echo " - re-generate index files with hack/make/generate-index-listing"
|
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script cleans the experimental pool for the yum repo.
|
||||
# This is useful when there are a lot of old experimental rpms and you only want to keep the most recent.
|
||||
#
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
YUMDIR=$DOCKER_RELEASE_DIR/yum/repo/experimental
|
||||
|
||||
suites=( $(find "$YUMDIR" -mindepth 1 -maxdepth 1 -type d) )
|
||||
|
||||
for suite in "${suites[@]}"; do
|
||||
echo "cleanup in: $suite"
|
||||
( set -x; repomanage -k2 --old "$suite" | xargs rm -f )
|
||||
done
|
||||
|
||||
echo "You will now want to: "
|
||||
echo " - re-sign the repo with hack/make/sign-repo"
|
||||
echo " - re-generate index files with hack/make/generate-index-listing"
|
|
@ -1,15 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
bundle_cover() {
|
||||
coverprofiles=( "$DEST/../"*"/coverprofiles/"* )
|
||||
for p in "${coverprofiles[@]}"; do
|
||||
echo
|
||||
(
|
||||
set -x
|
||||
go tool cover -func="$p"
|
||||
)
|
||||
done
|
||||
}
|
||||
|
||||
bundle_cover 2>&1 | tee "$DEST/report.log"
|
|
@ -1,74 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script generates index files for the directory structure
|
||||
# of the apt and yum repos
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
APTDIR=$DOCKER_RELEASE_DIR/apt
|
||||
YUMDIR=$DOCKER_RELEASE_DIR/yum
|
||||
|
||||
if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then
|
||||
echo >&2 'release-rpm or release-deb must be run before generate-index-listing'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
create_index() {
|
||||
local directory=$1
|
||||
local original=$2
|
||||
local cleaned=${directory#$original}
|
||||
|
||||
# the index file to create
|
||||
local index_file="${directory}/index"
|
||||
|
||||
# cd into dir & touch the index file
|
||||
cd $directory
|
||||
touch $index_file
|
||||
|
||||
# print the html header
|
||||
cat <<-EOF > "$index_file"
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head><title>Index of ${cleaned}/</title></head>
|
||||
<body bgcolor="white">
|
||||
<h1>Index of ${cleaned}/</h1><hr>
|
||||
<pre><a href="../">../</a>
|
||||
EOF
|
||||
|
||||
# start of content output
|
||||
(
|
||||
# change IFS locally within subshell so the for loop saves line correctly to L var
|
||||
IFS=$'\n';
|
||||
|
||||
# pretty sweet, will mimic the normal apache output. skipping "index" and hidden files
|
||||
for L in $(find -L . -mount -depth -maxdepth 1 -type f ! -name 'index' ! -name '.*' -prune -printf "<a href=\"%f\">%f|@_@%Td-%Tb-%TY %Tk:%TM @%f@\n"|sort|column -t -s '|' | sed 's,\([\ ]\+\)@_@,</a>\1,g');
|
||||
do
|
||||
# file
|
||||
F=$(sed -e 's,^.*@\([^@]\+\)@.*$,\1,g'<<<"$L");
|
||||
|
||||
# file with file size
|
||||
F=$(du -bh $F | cut -f1);
|
||||
|
||||
# output with correct format
|
||||
sed -e 's,\ @.*$, '"$F"',g'<<<"$L";
|
||||
done;
|
||||
) >> $index_file;
|
||||
|
||||
# now output a list of all directories in this dir (maxdepth 1) other than '.' outputting in a sorted manner exactly like apache
|
||||
find -L . -mount -depth -maxdepth 1 -type d ! -name '.' -printf "<a href=\"%f\">%-43f@_@%Td-%Tb-%TY %Tk:%TM -\n"|sort -d|sed 's,\([\ ]\+\)@_@,/</a>\1,g' >> $index_file
|
||||
|
||||
# print the footer html
|
||||
echo "</pre><hr></body></html>" >> $index_file
|
||||
|
||||
}
|
||||
|
||||
get_dirs() {
|
||||
local directory=$1
|
||||
|
||||
for d in `find ${directory} -type d`; do
|
||||
create_index $d $directory
|
||||
done
|
||||
}
|
||||
|
||||
get_dirs $APTDIR
|
||||
get_dirs $YUMDIR
|
23
hack/make/install-binary
Executable file → Normal file
23
hack/make/install-binary
Executable file → Normal file
|
@ -3,6 +3,27 @@
|
|||
set -e
|
||||
rm -rf "$DEST"
|
||||
|
||||
install_binary() {
|
||||
local file="$1"
|
||||
local target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/"
|
||||
if [ "$(go env GOOS)" == "linux" ]; then
|
||||
echo "Installing $(basename $file) to ${target}"
|
||||
mkdir -p "$target"
|
||||
cp -f -L "$file" "$target"
|
||||
else
|
||||
echo "Install is only supported on linux"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
(
|
||||
source "${MAKEDIR}/install-binary-daemon"
|
||||
DEST="$(dirname $DEST)/binary-daemon"
|
||||
source "${MAKEDIR}/.binary-setup"
|
||||
install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}"
|
||||
)
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
rm -rf "$DEST"
|
||||
|
||||
install_binary() {
|
||||
local file="$1"
|
||||
local target="${DOCKER_MAKE_INSTALL_PREFIX:=/usr/local}/bin/"
|
||||
if [ "$(go env GOOS)" == "linux" ]; then
|
||||
echo "Installing $(basename $file) to ${target}"
|
||||
mkdir -p "$target"
|
||||
cp -f -L "$file" "$target"
|
||||
else
|
||||
echo "Install is only supported on linux"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
(
|
||||
DEST="$(dirname $DEST)/binary-daemon"
|
||||
source "${MAKEDIR}/.binary-setup"
|
||||
install_binary "${DEST}/${DOCKER_DAEMON_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_RUNC_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_CONTAINERD_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_CONTAINERD_CTR_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_CONTAINERD_SHIM_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_PROXY_BINARY_NAME}"
|
||||
install_binary "${DEST}/${DOCKER_INIT_BINARY_NAME}"
|
||||
)
|
|
@ -1,163 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script creates the apt repos for the .deb files generated by hack/make/build-deb
|
||||
#
|
||||
# The following can then be used as apt sources:
|
||||
# deb http://apt.dockerproject.org/repo $distro-$release $version
|
||||
#
|
||||
# For example:
|
||||
# deb http://apt.dockerproject.org/repo ubuntu-trusty main
|
||||
# deb http://apt.dockerproject.org/repo ubuntu-trusty testing
|
||||
# deb http://apt.dockerproject.org/repo debian-wheezy experimental
|
||||
# deb http://apt.dockerproject.org/repo debian-jessie main
|
||||
#
|
||||
# ... and so on and so forth for the builds created by hack/make/build-deb
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
: ${GPG_KEYID:=releasedocker}
|
||||
APTDIR=$DOCKER_RELEASE_DIR/apt/repo
|
||||
|
||||
# setup the apt repo (if it does not exist)
|
||||
mkdir -p "$APTDIR/conf" "$APTDIR/db" "$APTDIR/dists"
|
||||
|
||||
# supported arches/sections
|
||||
arches=( amd64 i386 armhf ppc64le s390x )
|
||||
|
||||
# Preserve existing components but don't add any non-existing ones
|
||||
for component in main testing experimental ; do
|
||||
exists=$(find "$APTDIR/dists" -mindepth 2 -maxdepth 2 -type d -name "$component" -print -quit)
|
||||
if [ -n "$exists" ] ; then
|
||||
components+=( $component )
|
||||
fi
|
||||
done
|
||||
|
||||
# set the component for the version being released
|
||||
component="main"
|
||||
|
||||
if [[ "$VERSION" == *-rc* ]]; then
|
||||
component="testing"
|
||||
fi
|
||||
|
||||
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
component="experimental"
|
||||
fi
|
||||
|
||||
# Make sure our component is in the list of components
|
||||
if [[ ! "${components[*]}" =~ $component ]] ; then
|
||||
components+=( $component )
|
||||
fi
|
||||
|
||||
# create apt-ftparchive file on every run. This is essential to avoid
|
||||
# using stale versions of the config file that could cause unnecessary
|
||||
# refreshing of bits for EOL-ed releases.
|
||||
cat <<-EOF > "$APTDIR/conf/apt-ftparchive.conf"
|
||||
Dir {
|
||||
ArchiveDir "${APTDIR}";
|
||||
CacheDir "${APTDIR}/db";
|
||||
};
|
||||
|
||||
Default {
|
||||
Packages::Compress ". gzip bzip2";
|
||||
Sources::Compress ". gzip bzip2";
|
||||
Contents::Compress ". gzip bzip2";
|
||||
};
|
||||
|
||||
TreeDefault {
|
||||
BinCacheDB "packages-\$(SECTION)-\$(ARCH).db";
|
||||
Directory "pool/\$(SECTION)";
|
||||
Packages "\$(DIST)/\$(SECTION)/binary-\$(ARCH)/Packages";
|
||||
SrcDirectory "pool/\$(SECTION)";
|
||||
Sources "\$(DIST)/\$(SECTION)/source/Sources";
|
||||
Contents "\$(DIST)/\$(SECTION)/Contents-\$(ARCH)";
|
||||
FileList "$APTDIR/\$(DIST)/\$(SECTION)/filelist";
|
||||
};
|
||||
EOF
|
||||
|
||||
for dir in bundles/$VERSION/build-deb/*/; do
|
||||
version="$(basename "$dir")"
|
||||
suite="${version//debootstrap-}"
|
||||
|
||||
cat <<-EOF
|
||||
Tree "dists/${suite}" {
|
||||
Sections "${components[*]}";
|
||||
Architectures "${arches[*]}";
|
||||
}
|
||||
|
||||
EOF
|
||||
done >> "$APTDIR/conf/apt-ftparchive.conf"
|
||||
|
||||
cat <<-EOF > "$APTDIR/conf/docker-engine-release.conf"
|
||||
APT::FTPArchive::Release::Origin "Docker";
|
||||
APT::FTPArchive::Release::Components "${components[*]}";
|
||||
APT::FTPArchive::Release::Label "Docker APT Repository";
|
||||
APT::FTPArchive::Release::Architectures "${arches[*]}";
|
||||
EOF
|
||||
|
||||
# release the debs
|
||||
for dir in bundles/$VERSION/build-deb/*/; do
|
||||
version="$(basename "$dir")"
|
||||
codename="${version//debootstrap-}"
|
||||
|
||||
tempdir="$(mktemp -d /tmp/tmp-docker-release-deb.XXXXXXXX)"
|
||||
DEBFILE=( "$dir/docker-engine"*.deb )
|
||||
|
||||
# add the deb for each component for the distro version into the
|
||||
# pool (if it is not there already)
|
||||
mkdir -p "$APTDIR/pool/$component/d/docker-engine/"
|
||||
for deb in ${DEBFILE[@]}; do
|
||||
d=$(basename "$deb")
|
||||
# We do not want to generate a new deb if it has already been
|
||||
# copied into the APTDIR
|
||||
if [ ! -f "$APTDIR/pool/$component/d/docker-engine/$d" ]; then
|
||||
cp "$deb" "$tempdir/"
|
||||
# if we have a $GPG_PASSPHRASE we may as well
|
||||
# dpkg-sign before copying the deb into the pool
|
||||
if [ ! -z "$GPG_PASSPHRASE" ]; then
|
||||
dpkg-sig -g "--no-tty --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE'" \
|
||||
-k "$GPG_KEYID" --sign builder "$tempdir/$d"
|
||||
fi
|
||||
mv "$tempdir/$d" "$APTDIR/pool/$component/d/docker-engine/"
|
||||
fi
|
||||
done
|
||||
|
||||
rm -rf "$tempdir"
|
||||
|
||||
# build the right directory structure, needed for apt-ftparchive
|
||||
for arch in "${arches[@]}"; do
|
||||
for c in "${components[@]}"; do
|
||||
mkdir -p "$APTDIR/dists/$codename/$c/binary-$arch"
|
||||
done
|
||||
done
|
||||
|
||||
# update the filelist for this codename/component
|
||||
find "$APTDIR/pool/$component" \
|
||||
-name *~${codename}*.deb -o \
|
||||
-name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist"
|
||||
done
|
||||
|
||||
# run the apt-ftparchive commands so we can have pinning
|
||||
apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf"
|
||||
|
||||
for dir in bundles/$VERSION/build-deb/*/; do
|
||||
version="$(basename "$dir")"
|
||||
codename="${version//debootstrap-}"
|
||||
|
||||
apt-ftparchive \
|
||||
-c "$APTDIR/conf/docker-engine-release.conf" \
|
||||
-o "APT::FTPArchive::Release::Codename=$codename" \
|
||||
-o "APT::FTPArchive::Release::Suite=$codename" \
|
||||
release \
|
||||
"$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release"
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
apt-ftparchive \
|
||||
-c "$APTDIR/conf/docker-engine-release.conf" \
|
||||
-o "APT::FTPArchive::Release::Codename=$codename" \
|
||||
-o "APT::FTPArchive::Release::Suite=$codename" \
|
||||
-o "APT::FTPArchive::Release::Components=$component" \
|
||||
-o "APT::FTPArchive::Release::Architecture=$arch" \
|
||||
release \
|
||||
"$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release"
|
||||
done
|
||||
done
|
|
@ -1,71 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script creates the yum repos for the .rpm files generated by hack/make/build-rpm
|
||||
#
|
||||
# The following can then be used as a yum repo:
|
||||
# http://yum.dockerproject.org/repo/$release/$distro/$distro-version
|
||||
#
|
||||
# For example:
|
||||
# http://yum.dockerproject.org/repo/main/fedora/23
|
||||
# http://yum.dockerproject.org/repo/testing/centos/7
|
||||
# http://yum.dockerproject.org/repo/experimental/fedora/23
|
||||
# http://yum.dockerproject.org/repo/main/centos/7
|
||||
#
|
||||
# ... and so on and so forth for the builds created by hack/make/build-rpm
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
YUMDIR=$DOCKER_RELEASE_DIR/yum/repo
|
||||
: ${GPG_KEYID:=releasedocker}
|
||||
|
||||
# get the release
|
||||
release="main"
|
||||
|
||||
if [[ "$VERSION" == *-rc* ]]; then
|
||||
release="testing"
|
||||
fi
|
||||
|
||||
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
release="experimental"
|
||||
fi
|
||||
|
||||
# Setup the yum repo
|
||||
for dir in bundles/$VERSION/build-rpm/*/; do
|
||||
version="$(basename "$dir")"
|
||||
suite="${version##*-}"
|
||||
distro="${version%-*}"
|
||||
|
||||
REPO=$YUMDIR/$release/$distro
|
||||
|
||||
# if the directory does not exist, initialize the yum repo
|
||||
if [[ ! -d $REPO/$suite/Packages ]]; then
|
||||
mkdir -p "$REPO/$suite/Packages"
|
||||
|
||||
createrepo --pretty "$REPO/$suite"
|
||||
fi
|
||||
|
||||
# path to rpms
|
||||
RPMFILE=( "bundles/$VERSION/build-rpm/$version/RPMS/"*"/docker-engine"*.rpm "bundles/$VERSION/build-rpm/$version/SRPMS/docker-engine"*.rpm )
|
||||
|
||||
# if we have a $GPG_PASSPHRASE we may as well
|
||||
# sign the rpms before adding to repo
|
||||
if [ ! -z $GPG_PASSPHRASE ]; then
|
||||
# export our key to rpm import
|
||||
gpg --armor --export "$GPG_KEYID" > /tmp/gpg
|
||||
rpm --import /tmp/gpg
|
||||
|
||||
# sign the rpms
|
||||
echo "yes" | setsid rpm \
|
||||
--define "_gpg_name $GPG_KEYID" \
|
||||
--define "_signature gpg" \
|
||||
--define "__gpg_check_password_cmd /bin/true" \
|
||||
--define "__gpg_sign_cmd %{__gpg} gpg --batch --no-armor --digest-algo 'sha512' --passphrase '$GPG_PASSPHRASE' --no-secmem-warning -u '%{_gpg_name}' --sign --detach-sign --output %{__signature_filename} %{__plaintext_filename}" \
|
||||
--resign "${RPMFILE[@]}"
|
||||
fi
|
||||
|
||||
# copy the rpms to the packages folder
|
||||
cp "${RPMFILE[@]}" "$REPO/$suite/Packages"
|
||||
|
||||
# update the repo
|
||||
createrepo --pretty --update "$REPO/$suite"
|
||||
done
|
|
@ -1,65 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
# This script signs the deliverables from release-deb and release-rpm
|
||||
# with a designated GPG key.
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
: ${GPG_KEYID:=releasedocker}
|
||||
APTDIR=$DOCKER_RELEASE_DIR/apt/repo
|
||||
YUMDIR=$DOCKER_RELEASE_DIR/yum/repo
|
||||
|
||||
if [ -z "$GPG_PASSPHRASE" ]; then
|
||||
echo >&2 'you need to set GPG_PASSPHRASE in order to sign artifacts'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d $APTDIR ] && [ ! -d $YUMDIR ]; then
|
||||
echo >&2 'release-rpm or release-deb must be run before sign-repos'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sign_packages(){
|
||||
# sign apt repo metadata
|
||||
if [ -d $APTDIR ]; then
|
||||
# create file with public key
|
||||
gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/apt/gpg"
|
||||
|
||||
# sign the repo metadata
|
||||
for F in $(find $APTDIR -name Release); do
|
||||
if test "$F" -nt "$F.gpg" ; then
|
||||
gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
|
||||
--digest-algo "sha512" \
|
||||
--armor --sign --detach-sign \
|
||||
--batch --yes \
|
||||
--output "$F.gpg" "$F"
|
||||
fi
|
||||
inRelease="$(dirname "$F")/InRelease"
|
||||
if test "$F" -nt "$inRelease" ; then
|
||||
gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
|
||||
--digest-algo "sha512" \
|
||||
--clearsign \
|
||||
--batch --yes \
|
||||
--output "$inRelease" "$F"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# sign yum repo metadata
|
||||
if [ -d $YUMDIR ]; then
|
||||
# create file with public key
|
||||
gpg --armor --export "$GPG_KEYID" > "$DOCKER_RELEASE_DIR/yum/gpg"
|
||||
|
||||
# sign the repo metadata
|
||||
for F in $(find $YUMDIR -name repomd.xml); do
|
||||
if test "$F" -nt "$F.asc" ; then
|
||||
gpg -u "$GPG_KEYID" --passphrase "$GPG_PASSPHRASE" \
|
||||
--digest-algo "sha512" \
|
||||
--armor --sign --detach-sign \
|
||||
--batch --yes \
|
||||
--output "$F.asc" "$F"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
sign_packages
|
|
@ -1,29 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
versions=( 1.3.3 1.4.1 1.5.0 1.6.2 )
|
||||
|
||||
install() {
|
||||
local version=$1
|
||||
local tmpdir=$(mktemp -d /tmp/XXXXXXXXXX)
|
||||
local dockerfile="${tmpdir}/Dockerfile"
|
||||
cat <<-EOF > "$dockerfile"
|
||||
FROM debian:jessie
|
||||
ENV VERSION ${version}
|
||||
RUN apt-get update && apt-get install -y \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
--no-install-recommends
|
||||
RUN echo "deb https://get.docker.com/ubuntu docker main" > /etc/apt/sources.list.d/docker.list
|
||||
RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 \
|
||||
--recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
RUN apt-get update && apt-get install -y \
|
||||
lxc-docker-\${VERSION}
|
||||
EOF
|
||||
|
||||
docker build --rm --force-rm --no-cache -t docker-old-repo:${version} -f $dockerfile $tmpdir
|
||||
}
|
||||
|
||||
for v in "${versions[@]}"; do
|
||||
install "$v"
|
||||
done
|
190
hack/make/ubuntu
190
hack/make/ubuntu
|
@ -1,190 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
|
||||
PKGVERSION="${VERSION//-/'~'}"
|
||||
# if we have a "-dev" suffix or have change in Git, let's make this package version more complex so it works better
|
||||
if [[ "$VERSION" == *-dev ]] || [ -n "$(git status --porcelain)" ]; then
|
||||
GIT_UNIX="$(git log -1 --pretty='%at')"
|
||||
GIT_DATE="$(date --date "@$GIT_UNIX" +'%Y%m%d.%H%M%S')"
|
||||
GIT_COMMIT="$(git log -1 --pretty='%h')"
|
||||
GIT_VERSION="git${GIT_DATE}.0.${GIT_COMMIT}"
|
||||
# GIT_VERSION is now something like 'git20150128.112847.0.17e840a'
|
||||
PKGVERSION="$PKGVERSION~$GIT_VERSION"
|
||||
fi
|
||||
|
||||
# $ dpkg --compare-versions 1.5.0 gt 1.5.0~rc1 && echo true || echo false
|
||||
# true
|
||||
# $ dpkg --compare-versions 1.5.0~rc1 gt 1.5.0~git20150128.112847.17e840a && echo true || echo false
|
||||
# true
|
||||
# $ dpkg --compare-versions 1.5.0~git20150128.112847.17e840a gt 1.5.0~dev~git20150128.112847.17e840a && echo true || echo false
|
||||
# true
|
||||
|
||||
# ie, 1.5.0 > 1.5.0~rc1 > 1.5.0~git20150128.112847.17e840a > 1.5.0~dev~git20150128.112847.17e840a
|
||||
|
||||
PACKAGE_ARCHITECTURE="$(dpkg-architecture -qDEB_HOST_ARCH)"
|
||||
PACKAGE_URL="https://www.docker.com/"
|
||||
PACKAGE_MAINTAINER="support@docker.com"
|
||||
PACKAGE_DESCRIPTION="Linux container runtime
|
||||
Docker complements LXC with a high-level API which operates at the process
|
||||
level. It runs unix processes with strong guarantees of isolation and
|
||||
repeatability across servers.
|
||||
Docker is a great building block for automating distributed systems:
|
||||
large-scale web deployments, database clusters, continuous deployment systems,
|
||||
private PaaS, service-oriented architectures, etc."
|
||||
PACKAGE_LICENSE="Apache-2.0"
|
||||
|
||||
# Build docker as an ubuntu package using FPM and REPREPRO (sue me).
|
||||
# bundle_binary must be called first.
|
||||
bundle_ubuntu() {
|
||||
DIR="$ABS_DEST/build"
|
||||
|
||||
# Include our udev rules
|
||||
mkdir -p "$DIR/etc/udev/rules.d"
|
||||
cp contrib/udev/80-docker.rules "$DIR/etc/udev/rules.d/"
|
||||
|
||||
# Include our init scripts
|
||||
mkdir -p "$DIR/etc/init"
|
||||
cp contrib/init/upstart/docker.conf "$DIR/etc/init/"
|
||||
mkdir -p "$DIR/etc/init.d"
|
||||
cp contrib/init/sysvinit-debian/docker "$DIR/etc/init.d/"
|
||||
mkdir -p "$DIR/etc/default"
|
||||
cp contrib/init/sysvinit-debian/docker.default "$DIR/etc/default/docker"
|
||||
mkdir -p "$DIR/lib/systemd/system"
|
||||
cp contrib/init/systemd/docker.{service,socket} "$DIR/lib/systemd/system/"
|
||||
|
||||
# Include contributed completions
|
||||
mkdir -p "$DIR/etc/bash_completion.d"
|
||||
cp contrib/completion/bash/docker "$DIR/etc/bash_completion.d/"
|
||||
mkdir -p "$DIR/usr/share/zsh/vendor-completions"
|
||||
cp contrib/completion/zsh/_docker "$DIR/usr/share/zsh/vendor-completions/"
|
||||
mkdir -p "$DIR/etc/fish/completions"
|
||||
cp contrib/completion/fish/docker.fish "$DIR/etc/fish/completions/"
|
||||
|
||||
# Include man pages
|
||||
make manpages
|
||||
manRoot="$DIR/usr/share/man"
|
||||
mkdir -p "$manRoot"
|
||||
for manDir in man/man?; do
|
||||
manBase="$(basename "$manDir")" # "man1"
|
||||
for manFile in "$manDir"/*; do
|
||||
manName="$(basename "$manFile")" # "docker-build.1"
|
||||
mkdir -p "$manRoot/$manBase"
|
||||
gzip -c "$manFile" > "$manRoot/$manBase/$manName.gz"
|
||||
done
|
||||
done
|
||||
|
||||
# Copy the binary
|
||||
# This will fail if the binary bundle hasn't been built
|
||||
mkdir -p "$DIR/usr/bin"
|
||||
cp "$DEST/../binary/docker-$VERSION" "$DIR/usr/bin/docker"
|
||||
|
||||
# Generate postinst/prerm/postrm scripts
|
||||
cat > "$DEST/postinst" <<'EOF'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
set -u
|
||||
|
||||
if [ "$1" = 'configure' ] && [ -z "$2" ]; then
|
||||
if ! getent group docker > /dev/null; then
|
||||
groupadd --system docker
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! { [ -x /sbin/initctl ] && /sbin/initctl version 2>/dev/null | grep -q upstart; }; then
|
||||
# we only need to do this if upstart isn't in charge
|
||||
update-rc.d docker defaults > /dev/null || true
|
||||
fi
|
||||
if [ -n "$2" ]; then
|
||||
_dh_action=restart
|
||||
else
|
||||
_dh_action=start
|
||||
fi
|
||||
service docker $_dh_action 2>/dev/null || true
|
||||
|
||||
#DEBHELPER#
|
||||
EOF
|
||||
cat > "$DEST/prerm" <<'EOF'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
set -u
|
||||
|
||||
service docker stop 2>/dev/null || true
|
||||
|
||||
#DEBHELPER#
|
||||
EOF
|
||||
cat > "$DEST/postrm" <<'EOF'
|
||||
#!/bin/sh
|
||||
set -e
|
||||
set -u
|
||||
|
||||
if [ "$1" = "purge" ] ; then
|
||||
update-rc.d docker remove > /dev/null || true
|
||||
fi
|
||||
|
||||
# In case this system is running systemd, we make systemd reload the unit files
|
||||
# to pick up changes.
|
||||
if [ -d /run/systemd/system ] ; then
|
||||
systemctl --system daemon-reload > /dev/null || true
|
||||
fi
|
||||
|
||||
#DEBHELPER#
|
||||
EOF
|
||||
# TODO swaths of these were borrowed from debhelper's auto-inserted stuff, because we're still using fpm - we need to use debhelper instead, and somehow reconcile Ubuntu that way
|
||||
chmod +x "$DEST/postinst" "$DEST/prerm" "$DEST/postrm"
|
||||
|
||||
(
|
||||
# switch directories so we create *.deb in the right folder
|
||||
cd "$DEST"
|
||||
|
||||
# create lxc-docker-VERSION package
|
||||
fpm -s dir -C "$DIR" \
|
||||
--name "lxc-docker-$VERSION" --version "$PKGVERSION" \
|
||||
--after-install "$ABS_DEST/postinst" \
|
||||
--before-remove "$ABS_DEST/prerm" \
|
||||
--after-remove "$ABS_DEST/postrm" \
|
||||
--architecture "$PACKAGE_ARCHITECTURE" \
|
||||
--prefix / \
|
||||
--depends iptables \
|
||||
--deb-recommends aufs-tools \
|
||||
--deb-recommends ca-certificates \
|
||||
--deb-recommends git \
|
||||
--deb-recommends xz-utils \
|
||||
--deb-recommends 'cgroupfs-mount | cgroup-lite' \
|
||||
--deb-suggests apparmor \
|
||||
--description "$PACKAGE_DESCRIPTION" \
|
||||
--maintainer "$PACKAGE_MAINTAINER" \
|
||||
--conflicts docker \
|
||||
--conflicts docker.io \
|
||||
--conflicts lxc-docker-virtual-package \
|
||||
--provides lxc-docker \
|
||||
--provides lxc-docker-virtual-package \
|
||||
--replaces lxc-docker \
|
||||
--replaces lxc-docker-virtual-package \
|
||||
--url "$PACKAGE_URL" \
|
||||
--license "$PACKAGE_LICENSE" \
|
||||
--config-files /etc/udev/rules.d/80-docker.rules \
|
||||
--config-files /etc/init/docker.conf \
|
||||
--config-files /etc/init.d/docker \
|
||||
--config-files /etc/default/docker \
|
||||
--deb-compression gz \
|
||||
-t deb .
|
||||
# TODO replace "Suggests: cgroup-lite" with "Recommends: cgroupfs-mount | cgroup-lite" once cgroupfs-mount is available
|
||||
|
||||
# create empty lxc-docker wrapper package
|
||||
fpm -s empty \
|
||||
--name lxc-docker --version "$PKGVERSION" \
|
||||
--architecture "$PACKAGE_ARCHITECTURE" \
|
||||
--depends lxc-docker-$VERSION \
|
||||
--description "$PACKAGE_DESCRIPTION" \
|
||||
--maintainer "$PACKAGE_MAINTAINER" \
|
||||
--url "$PACKAGE_URL" \
|
||||
--license "$PACKAGE_LICENSE" \
|
||||
--deb-compression gz \
|
||||
-t deb
|
||||
)
|
||||
|
||||
# clean up after ourselves so we have a clean output directory
|
||||
rm "$DEST/postinst" "$DEST/prerm" "$DEST/postrm"
|
||||
rm -r "$DIR"
|
||||
}
|
||||
|
||||
bundle_ubuntu
|
|
@ -1,70 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script updates the apt repo in $DOCKER_RELEASE_DIR/apt/repo.
|
||||
# This script is a "fix all" for any sort of problems that might have occurred with
|
||||
# the Release or Package files in the repo.
|
||||
# It should only be used in the rare case of extreme emergencies to regenerate
|
||||
# Release and Package files for the apt repo.
|
||||
#
|
||||
# NOTE: Always be sure to re-sign the repo with hack/make/sign-repos after running
|
||||
# this script.
|
||||
|
||||
: ${DOCKER_RELEASE_DIR:=$DEST}
|
||||
APTDIR=$DOCKER_RELEASE_DIR/apt/repo
|
||||
|
||||
# supported arches/sections
|
||||
arches=( amd64 i386 )
|
||||
|
||||
# Preserve existing components but don't add any non-existing ones
|
||||
for component in main testing experimental ; do
|
||||
if ls "$APTDIR/dists/*/$component" >/dev/null 2>&1 ; then
|
||||
components+=( $component )
|
||||
fi
|
||||
done
|
||||
|
||||
dists=( $(find "${APTDIR}/dists" -maxdepth 1 -mindepth 1 -type d) )
|
||||
|
||||
# override component if it is set
|
||||
if [ "$COMPONENT" ]; then
|
||||
components=( $COMPONENT )
|
||||
fi
|
||||
|
||||
# release the debs
|
||||
for version in "${dists[@]}"; do
|
||||
for component in "${components[@]}"; do
|
||||
codename="${version//debootstrap-}"
|
||||
|
||||
# update the filelist for this codename/component
|
||||
find "$APTDIR/pool/$component" \
|
||||
-name *~${codename#*-}*.deb > "$APTDIR/dists/$codename/$component/filelist"
|
||||
done
|
||||
done
|
||||
|
||||
# run the apt-ftparchive commands so we can have pinning
|
||||
apt-ftparchive generate "$APTDIR/conf/apt-ftparchive.conf"
|
||||
|
||||
for dist in "${dists[@]}"; do
|
||||
version=$(basename "$dist")
|
||||
for component in "${components[@]}"; do
|
||||
codename="${version//debootstrap-}"
|
||||
|
||||
apt-ftparchive \
|
||||
-o "APT::FTPArchive::Release::Codename=$codename" \
|
||||
-o "APT::FTPArchive::Release::Suite=$codename" \
|
||||
-c "$APTDIR/conf/docker-engine-release.conf" \
|
||||
release \
|
||||
"$APTDIR/dists/$codename" > "$APTDIR/dists/$codename/Release"
|
||||
|
||||
for arch in "${arches[@]}"; do
|
||||
apt-ftparchive \
|
||||
-o "APT::FTPArchive::Release::Codename=$codename" \
|
||||
-o "APT::FTPArchive::Release::Suite=$codename" \
|
||||
-o "APT::FTPArchive::Release::Component=$component" \
|
||||
-o "APT::FTPArchive::Release::Architecture=$arch" \
|
||||
-c "$APTDIR/conf/docker-engine-release.conf" \
|
||||
release \
|
||||
"$APTDIR/dists/$codename/$component/binary-$arch" > "$APTDIR/dists/$codename/$component/binary-$arch/Release"
|
||||
done
|
||||
done
|
||||
done
|
|
@ -1,20 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# explicit list of os/arch combos that support being a daemon
|
||||
declare -A daemonSupporting
|
||||
daemonSupporting=(
|
||||
[linux/amd64]=1
|
||||
[windows/amd64]=1
|
||||
)
|
||||
platform="windows/amd64"
|
||||
export DEST="$DEST/$platform" # bundles/VERSION/cross/GOOS/GOARCH/docker-VERSION
|
||||
mkdir -p "$DEST"
|
||||
ABS_DEST="$(cd "$DEST" && pwd -P)"
|
||||
export GOOS=${platform%/*}
|
||||
export GOARCH=${platform##*/}
|
||||
if [ -z "${daemonSupporting[$platform]}" ]; then
|
||||
export LDFLAGS_STATIC_DOCKER="" # we just need a simple client for these platforms
|
||||
export BUILDFLAGS=( "${ORIG_BUILDFLAGS[@]/ daemon/}" ) # remove the "daemon" build tag from platforms that aren't supported
|
||||
fi
|
||||
source "${MAKEDIR}/binary"
|
313
hack/release.sh
313
hack/release.sh
|
@ -1,313 +0,0 @@
|
|||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
# This script looks for bundles built by make.sh, and releases them on a
|
||||
# public S3 bucket.
|
||||
#
|
||||
# Bundles should be available for the VERSION string passed as argument.
|
||||
#
|
||||
# The correct way to call this script is inside a container built by the
|
||||
# official Dockerfile at the root of the Docker source code. The Dockerfile,
|
||||
# make.sh and release.sh should all be from the same source code revision.
|
||||
|
||||
set -o pipefail
|
||||
|
||||
# Print a usage message and exit.
|
||||
usage() {
|
||||
cat >&2 <<'EOF'
|
||||
To run, I need:
|
||||
- to be in a container generated by the Dockerfile at the top of the Docker
|
||||
repository;
|
||||
- to be provided with the location of an S3 bucket and path, in
|
||||
environment variables AWS_S3_BUCKET and AWS_S3_BUCKET_PATH (default: '');
|
||||
- to be provided with AWS credentials for this S3 bucket, in environment
|
||||
variables AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY;
|
||||
- a generous amount of good will and nice manners.
|
||||
The canonical way to run me is to run the image produced by the Dockerfile: e.g.:"
|
||||
|
||||
docker run -e AWS_S3_BUCKET=test.docker.com \
|
||||
-e AWS_ACCESS_KEY_ID \
|
||||
-e AWS_SECRET_ACCESS_KEY \
|
||||
-e AWS_DEFAULT_REGION \
|
||||
-it --privileged \
|
||||
docker ./hack/release.sh
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
[ "$AWS_S3_BUCKET" ] || usage
|
||||
[ "$AWS_ACCESS_KEY_ID" ] || usage
|
||||
[ "$AWS_SECRET_ACCESS_KEY" ] || usage
|
||||
[ -d /go/src/github.com/docker/docker ] || usage
|
||||
cd /go/src/github.com/docker/docker
|
||||
[ -x hack/make.sh ] || usage
|
||||
|
||||
export AWS_DEFAULT_REGION
|
||||
: ${AWS_DEFAULT_REGION:=us-west-1}
|
||||
|
||||
AWS_CLI=${AWS_CLI:-'aws'}
|
||||
|
||||
RELEASE_BUNDLES=(
|
||||
binary
|
||||
cross
|
||||
tgz
|
||||
)
|
||||
|
||||
if [ "$1" != '--release-regardless-of-test-failure' ]; then
|
||||
RELEASE_BUNDLES=(
|
||||
test-unit
|
||||
"${RELEASE_BUNDLES[@]}"
|
||||
test-integration
|
||||
)
|
||||
fi
|
||||
|
||||
VERSION=$(< VERSION)
|
||||
BUCKET=$AWS_S3_BUCKET
|
||||
BUCKET_PATH=$BUCKET
|
||||
[[ -n "$AWS_S3_BUCKET_PATH" ]] && BUCKET_PATH+=/$AWS_S3_BUCKET_PATH
|
||||
|
||||
if command -v git &> /dev/null && git rev-parse &> /dev/null; then
|
||||
if [ -n "$(git status --porcelain --untracked-files=no)" ]; then
|
||||
echo "You cannot run the release script on a repo with uncommitted changes"
|
||||
usage
|
||||
fi
|
||||
fi
|
||||
|
||||
# These are the 2 keys we've used to sign the deb's
|
||||
# release (get.docker.com)
|
||||
# GPG_KEY="36A1D7869245C8950F966E92D8576A8BA88D21E9"
|
||||
# test (test.docker.com)
|
||||
# GPG_KEY="740B314AE3941731B942C66ADF4FD13717AAD7D6"
|
||||
|
||||
setup_s3() {
|
||||
echo "Setting up S3"
|
||||
# Try creating the bucket. Ignore errors (it might already exist).
|
||||
$AWS_CLI s3 mb "s3://$BUCKET" 2>/dev/null || true
|
||||
# Check access to the bucket.
|
||||
$AWS_CLI s3 ls "s3://$BUCKET" >/dev/null
|
||||
# Make the bucket accessible through website endpoints.
|
||||
$AWS_CLI s3 website --index-document index --error-document error "s3://$BUCKET"
|
||||
}
|
||||
|
||||
# write_to_s3 uploads the contents of standard input to the specified S3 url.
|
||||
write_to_s3() {
|
||||
DEST=$1
|
||||
F=`mktemp`
|
||||
cat > "$F"
|
||||
$AWS_CLI s3 cp --acl public-read --content-type 'text/plain' "$F" "$DEST"
|
||||
rm -f "$F"
|
||||
}
|
||||
|
||||
s3_url() {
|
||||
case "$BUCKET" in
|
||||
get.docker.com|test.docker.com|experimental.docker.com)
|
||||
echo "https://$BUCKET_PATH"
|
||||
;;
|
||||
*)
|
||||
BASE_URL="http://${BUCKET}.s3-website-${AWS_DEFAULT_REGION}.amazonaws.com"
|
||||
if [[ -n "$AWS_S3_BUCKET_PATH" ]] ; then
|
||||
echo "$BASE_URL/$AWS_S3_BUCKET_PATH"
|
||||
else
|
||||
echo "$BASE_URL"
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
build_all() {
|
||||
echo "Building release"
|
||||
if ! ./hack/make.sh "${RELEASE_BUNDLES[@]}"; then
|
||||
echo >&2
|
||||
echo >&2 'The build or tests appear to have failed.'
|
||||
echo >&2
|
||||
echo >&2 'You, as the release maintainer, now have a couple options:'
|
||||
echo >&2 '- delay release and fix issues'
|
||||
echo >&2 '- delay release and fix issues'
|
||||
echo >&2 '- did we mention how important this is? issues need fixing :)'
|
||||
echo >&2
|
||||
echo >&2 'As a final LAST RESORT, you (because only you, the release maintainer,'
|
||||
echo >&2 ' really knows all the hairy problems at hand with the current release'
|
||||
echo >&2 ' issues) may bypass this checking by running this script again with the'
|
||||
echo >&2 ' single argument of "--release-regardless-of-test-failure", which will skip'
|
||||
echo >&2 ' running the test suite, and will only build the binaries and packages. Please'
|
||||
echo >&2 ' avoid using this if at all possible.'
|
||||
echo >&2
|
||||
echo >&2 'Regardless, we cannot stress enough the scarcity with which this bypass'
|
||||
echo >&2 ' should be used. If there are release issues, we should always err on the'
|
||||
echo >&2 ' side of caution.'
|
||||
echo >&2
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
upload_release_build() {
|
||||
src="$1"
|
||||
dst="$2"
|
||||
latest="$3"
|
||||
|
||||
echo
|
||||
echo "Uploading $src"
|
||||
echo " to $dst"
|
||||
echo
|
||||
$AWS_CLI s3 cp --follow-symlinks --acl public-read "$src" "$dst"
|
||||
if [ "$latest" ]; then
|
||||
echo
|
||||
echo "Copying to $latest"
|
||||
echo
|
||||
$AWS_CLI s3 cp --acl public-read "$dst" "$latest"
|
||||
fi
|
||||
|
||||
# get hash files too (see hash_files() in hack/make.sh)
|
||||
for hashAlgo in md5 sha256; do
|
||||
if [ -e "$src.$hashAlgo" ]; then
|
||||
echo
|
||||
echo "Uploading $src.$hashAlgo"
|
||||
echo " to $dst.$hashAlgo"
|
||||
echo
|
||||
$AWS_CLI s3 cp --follow-symlinks --acl public-read --content-type='text/plain' "$src.$hashAlgo" "$dst.$hashAlgo"
|
||||
if [ "$latest" ]; then
|
||||
echo
|
||||
echo "Copying to $latest.$hashAlgo"
|
||||
echo
|
||||
$AWS_CLI s3 cp --acl public-read "$dst.$hashAlgo" "$latest.$hashAlgo"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
release_build() {
|
||||
echo "Releasing binaries"
|
||||
GOOS=$1
|
||||
GOARCH=$2
|
||||
|
||||
binDir=bundles/$VERSION/cross/$GOOS/$GOARCH
|
||||
tgzDir=bundles/$VERSION/tgz/$GOOS/$GOARCH
|
||||
binary=docker-$VERSION
|
||||
zipExt=".tgz"
|
||||
binaryExt=""
|
||||
tgz=$binary$zipExt
|
||||
|
||||
latestBase=
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
latestBase=docker-latest
|
||||
fi
|
||||
|
||||
# we need to map our GOOS and GOARCH to uname values
|
||||
# see https://en.wikipedia.org/wiki/Uname
|
||||
# ie, GOOS=linux -> "uname -s"=Linux
|
||||
|
||||
s3Os=$GOOS
|
||||
case "$s3Os" in
|
||||
darwin)
|
||||
s3Os=Darwin
|
||||
;;
|
||||
freebsd)
|
||||
s3Os=FreeBSD
|
||||
;;
|
||||
linux)
|
||||
s3Os=Linux
|
||||
;;
|
||||
windows)
|
||||
# this is windows use the .zip and .exe extensions for the files.
|
||||
s3Os=Windows
|
||||
zipExt=".zip"
|
||||
binaryExt=".exe"
|
||||
tgz=$binary$zipExt
|
||||
binary+=$binaryExt
|
||||
;;
|
||||
*)
|
||||
echo >&2 "error: can't convert $s3Os to an appropriate value for 'uname -s'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
s3Arch=$GOARCH
|
||||
case "$s3Arch" in
|
||||
amd64)
|
||||
s3Arch=x86_64
|
||||
;;
|
||||
386)
|
||||
s3Arch=i386
|
||||
;;
|
||||
arm)
|
||||
s3Arch=armel
|
||||
# someday, we might potentially support multiple GOARM values, in which case we might get armhf here too
|
||||
;;
|
||||
*)
|
||||
echo >&2 "error: can't convert $s3Arch to an appropriate value for 'uname -m'"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
s3Dir="s3://$BUCKET_PATH/builds/$s3Os/$s3Arch"
|
||||
# latest=
|
||||
latestTgz=
|
||||
if [ "$latestBase" ]; then
|
||||
# commented out since we aren't uploading binaries right now.
|
||||
# latest="$s3Dir/$latestBase$binaryExt"
|
||||
# we don't include the $binaryExt because we don't want docker.exe.zip
|
||||
latestTgz="$s3Dir/$latestBase$zipExt"
|
||||
fi
|
||||
|
||||
if [ ! -f "$tgzDir/$tgz" ]; then
|
||||
echo >&2 "error: can't find $tgzDir/$tgz - was it packaged properly?"
|
||||
exit 1
|
||||
fi
|
||||
# disable binary uploads for now. Only providing tgz downloads
|
||||
# upload_release_build "$binDir/$binary" "$s3Dir/$binary" "$latest"
|
||||
upload_release_build "$tgzDir/$tgz" "$s3Dir/$tgz" "$latestTgz"
|
||||
}
|
||||
|
||||
# Upload binaries and tgz files to S3
|
||||
release_binaries() {
|
||||
[ "$(find bundles/$VERSION -path "bundles/$VERSION/cross/*/*/docker-$VERSION")" != "" ] || {
|
||||
echo >&2 './hack/make.sh must be run before release_binaries'
|
||||
exit 1
|
||||
}
|
||||
|
||||
for d in bundles/$VERSION/cross/*/*; do
|
||||
GOARCH="$(basename "$d")"
|
||||
GOOS="$(basename "$(dirname "$d")")"
|
||||
release_build "$GOOS" "$GOARCH"
|
||||
done
|
||||
|
||||
# TODO create redirect from builds/*/i686 to builds/*/i386
|
||||
|
||||
cat <<EOF | write_to_s3 s3://$BUCKET_PATH/builds/index
|
||||
# To install, run the following commands as root:
|
||||
curl -fsSLO $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz && tar --strip-components=1 -xvzf docker-$VERSION.tgz -C /usr/local/bin
|
||||
|
||||
# Then start docker in daemon mode:
|
||||
/usr/local/bin/dockerd
|
||||
EOF
|
||||
|
||||
# Add redirect at /builds/info for URL-backwards-compatibility
|
||||
rm -rf /tmp/emptyfile && touch /tmp/emptyfile
|
||||
$AWS_CLI s3 cp --acl public-read --website-redirect '/builds/' --content-type='text/plain' /tmp/emptyfile "s3://$BUCKET_PATH/builds/info"
|
||||
|
||||
if [ -z "$NOLATEST" ]; then
|
||||
echo "Advertising $VERSION on $BUCKET_PATH as most recent version"
|
||||
echo "$VERSION" | write_to_s3 "s3://$BUCKET_PATH/latest"
|
||||
fi
|
||||
}
|
||||
|
||||
main() {
|
||||
[ "$SKIP_RELEASE_BUILD" = '1' ] || build_all
|
||||
setup_s3
|
||||
release_binaries
|
||||
}
|
||||
|
||||
main
|
||||
|
||||
echo
|
||||
echo
|
||||
echo "Release complete; see $(s3_url)"
|
||||
echo "Use the following text to announce the release:"
|
||||
echo
|
||||
echo "We have just pushed $VERSION to $(s3_url). You can download it with the following:"
|
||||
echo
|
||||
echo "Linux 64bit tgz: $(s3_url)/builds/Linux/x86_64/docker-$VERSION.tgz"
|
||||
echo "Darwin/OSX 64bit client tgz: $(s3_url)/builds/Darwin/x86_64/docker-$VERSION.tgz"
|
||||
echo "Windows 64bit zip: $(s3_url)/builds/Windows/x86_64/docker-$VERSION.zip"
|
||||
echo "Windows 32bit client zip: $(s3_url)/builds/Windows/i386/docker-$VERSION.zip"
|
||||
echo
|
Loading…
Add table
Reference in a new issue