mirror of
https://github.com/moby/moby.git
synced 2022-11-09 12:21:53 -05:00
commit
845b816686
29 changed files with 312 additions and 130 deletions
1
AUTHORS
1
AUTHORS
|
@ -148,6 +148,7 @@ odk- <github@odkurzacz.org>
|
|||
Pascal Borreli <pascal@borreli.com>
|
||||
Paul Bowsher <pbowsher@globalpersonals.co.uk>
|
||||
Paul Hammond <paul@paulhammond.org>
|
||||
Paul Liétar <paul@lietar.net>
|
||||
Paul Nasrat <pnasrat@gmail.com>
|
||||
Phil Spitler <pspitler@gmail.com>
|
||||
Piotr Bogdan <ppbogdan@gmail.com>
|
||||
|
|
28
CHANGELOG.md
28
CHANGELOG.md
|
@ -1,5 +1,33 @@
|
|||
# Changelog
|
||||
|
||||
## 0.7.5 (2014-01-09)
|
||||
|
||||
#### Builder
|
||||
|
||||
* Disable compression for build. More space usage but a much faster upload
|
||||
- Fix ADD caching for certain paths
|
||||
- Do not compress archive from git build
|
||||
|
||||
#### Documentation
|
||||
|
||||
- Fix error in GROUP add example
|
||||
* Make sure the GPG fingerprint is inline in the documentation
|
||||
* Give more specific advice on setting up signing of commits for DCO
|
||||
|
||||
#### Runtime
|
||||
|
||||
- Fix misspelled container names
|
||||
- Do not add hostname when networking is disabled
|
||||
* Return most recent image from the cache by date
|
||||
- Return all errors from docker wait
|
||||
* Add Content-Type Header "application/json" to GET /version and /info responses
|
||||
|
||||
#### Other
|
||||
|
||||
* Update DCO to version 1.1
|
||||
+ Update Makefile to use "docker:GIT_BRANCH" as the generated image name
|
||||
* Update Travis to check for new 1.1 DCO version
|
||||
|
||||
## 0.7.4 (2014-01-07)
|
||||
|
||||
#### Builder
|
||||
|
|
|
@ -113,28 +113,41 @@ pass it on as an open-source patch. The rules are pretty simple: if you
|
|||
can certify the below:
|
||||
|
||||
```
|
||||
Docker Developer Grant and Certificate of Origin 1.0
|
||||
Docker Developer Grant and Certificate of Origin 1.1
|
||||
|
||||
By making a contribution to the Docker Project ("Project"), I represent and warrant that:
|
||||
|
||||
a. The contribution was created in whole or in part by me and I have the right to submit the contribution on my own behalf or on behalf of a third party who has authorized me to submit this contribution to the Project; or
|
||||
|
||||
|
||||
b. The contribution is based upon previous work that, to the best of my knowledge, is covered under an appropriate open source license and I have the right and authorization to submit that work with modifications, whether created in whole or in part by me, under the same open source license (unless I am permitted to submit under a different license) that I have identified in the contribution; or
|
||||
|
||||
c. The contribution was provided directly to me by some other person who represented and warranted (a) or (b) and I have not modified it.
|
||||
|
||||
d. I understand and agree that this Project and the contribution are publicly known and that a record of the contribution (including all personal information I submit with it, including my sign-off record) is maintained indefinitely and may be redistributed consistent with this Project or the open source license(s) involved.
|
||||
|
||||
e. I hereby grant to the Project, Docker, Inc and its successors; and recipients of software distributed by the Project a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable copyright license to reproduce, modify, prepare derivative works of, publicly display, publicly perform, sublicense, and distribute this contribution and such modifications and derivative works consistent with this Project, the open source license indicated in the previous work or other appropriate open source license specified by the Project and approved by the Open Source Initiative(OSI) at http://www.opensource.org.
|
||||
```
|
||||
|
||||
then you just add a line saying
|
||||
then you just add a line to every git commit message:
|
||||
|
||||
Docker-DCO-1.0-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
|
||||
Docker-DCO-1.1-Signed-off-by: Joe Smith <joe.smith@email.com> (github: github_handle)
|
||||
|
||||
using your real name (sorry, no pseudonyms or anonymous contributions.)
|
||||
|
||||
One way to automate this, is customise your get ``commit.template`` by adding
|
||||
the following to your ``.git/hooks/prepare-commit-msg`` script (needs
|
||||
``chmod 755 .git/hooks/prepare-commit-msg`` ) in the docker checkout:
|
||||
|
||||
```
|
||||
#!/bin/sh
|
||||
# Auto sign all commits to allow them to be used by the Docker project.
|
||||
# see https://github.com/dotcloud/docker/blob/master/CONTRIBUTING.md#sign-your-work
|
||||
#
|
||||
GH_USER=$(git config --get github.user)
|
||||
SOB=$(git var GIT_AUTHOR_IDENT | sed -n "s/^\(.*>\).*$/Docker-DCO-1.1-Signed-off-by: \1 \(github: $GH_USER\)/p")
|
||||
grep -qs "^$SOB" "$1" || echo "\n$SOB" >> "$1"
|
||||
|
||||
```
|
||||
|
||||
If you have any questions, please refer to the FAQ in the [docs](http://docs.docker.io)
|
||||
|
||||
|
||||
|
|
10
Makefile
10
Makefile
|
@ -1,6 +1,9 @@
|
|||
.PHONY: all binary build cross default docs shell test
|
||||
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles docker
|
||||
GIT_BRANCH := $(shell git rev-parse --abbrev-ref HEAD)
|
||||
DOCKER_IMAGE := docker:$(GIT_BRANCH)
|
||||
DOCKER_DOCS_IMAGE := docker-docs:$(GIT_BRANCH)
|
||||
DOCKER_RUN_DOCKER := docker run -rm -i -t -privileged -e TESTFLAGS -v $(CURDIR)/bundles:/go/src/github.com/dotcloud/docker/bundles "$(DOCKER_IMAGE)"
|
||||
|
||||
default: binary
|
||||
|
||||
|
@ -14,7 +17,8 @@ cross: build
|
|||
$(DOCKER_RUN_DOCKER) hack/make.sh binary cross
|
||||
|
||||
docs:
|
||||
docker build -t docker-docs docs && docker run -p 8000:8000 docker-docs
|
||||
docker build -rm -t "$(DOCKER_DOCS_IMAGE)" docs
|
||||
docker run -rm -i -t -p 8000:8000 "$(DOCKER_DOCS_IMAGE)"
|
||||
|
||||
test: build
|
||||
$(DOCKER_RUN_DOCKER) hack/make.sh test test-integration
|
||||
|
@ -23,7 +27,7 @@ shell: build
|
|||
$(DOCKER_RUN_DOCKER) bash
|
||||
|
||||
build: bundles
|
||||
docker build -t docker .
|
||||
docker build -rm -t "$(DOCKER_IMAGE)" .
|
||||
|
||||
bundles:
|
||||
mkdir bundles
|
||||
|
|
|
@ -8,15 +8,15 @@ ok "/version": getVersion, 1
|
|||
TODO "/images/viz": getImagesViz, 0 yes
|
||||
TODO "/images/search": getImagesSearch, N
|
||||
#3490 "/images/{name:.*}/get": getImagesGet, 0
|
||||
TODO "/images/{name:.*}/history": getImagesHistory, 1
|
||||
TODO "/images/{name:.*}/history": getImagesHistory, N
|
||||
TODO "/images/{name:.*}/json": getImagesByName, 1
|
||||
TODO "/containers/ps": getContainersJSON, N
|
||||
TODO "/containers/json": getContainersJSON, 1
|
||||
ok "/containers/{name:.*}/export": getContainersExport, 0
|
||||
TODO "/containers/{name:.*}/changes": getContainersChanges, 1
|
||||
TODO "/containers/{name:.*}/changes": getContainersChanges, N
|
||||
TODO "/containers/{name:.*}/json": getContainersByName, 1
|
||||
TODO "/containers/{name:.*}/top": getContainersTop, N
|
||||
TODO "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes
|
||||
#3512 "/containers/{name:.*}/attach/ws": wsContainersAttach, 0 yes
|
||||
|
||||
**POST**
|
||||
TODO "/auth": postAuth, 0 yes
|
||||
|
@ -34,7 +34,7 @@ ok "/containers/{name:.*}/start": postContainersStart, 0
|
|||
ok "/containers/{name:.*}/stop": postContainersStop, 0
|
||||
ok "/containers/{name:.*}/wait": postContainersWait, 0
|
||||
ok "/containers/{name:.*}/resize": postContainersResize, 0
|
||||
TODO "/containers/{name:.*}/attach": postContainersAttach, 0 yes
|
||||
#3512 "/containers/{name:.*}/attach": postContainersAttach, 0 yes
|
||||
TODO "/containers/{name:.*}/copy": postContainersCopy, 0 yes
|
||||
|
||||
**DELETE**
|
||||
|
|
2
VERSION
2
VERSION
|
@ -1 +1 @@
|
|||
0.7.4
|
||||
0.7.5
|
||||
|
|
2
Vagrantfile
vendored
2
Vagrantfile
vendored
|
@ -24,7 +24,7 @@ if [ -z "$user" ]; then
|
|||
fi
|
||||
|
||||
# Adding an apt gpg key is idempotent.
|
||||
wget -q -O - https://get.docker.io/gpg | apt-key add -
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
# Creating the docker.list file is idempotent, but it may overwrite desired
|
||||
# settings if it already exists. This could be solved with md5sum but it
|
||||
|
|
4
api.go
4
api.go
|
@ -140,6 +140,7 @@ func postAuth(srv *Server, version float64, w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func getVersion(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
@ -216,6 +217,7 @@ func getImagesViz(srv *Server, version float64, w http.ResponseWriter, r *http.R
|
|||
}
|
||||
|
||||
func getInfo(srv *Server, version float64, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
srv.Eng.ServeHTTP(w, r)
|
||||
return nil
|
||||
}
|
||||
|
@ -927,7 +929,7 @@ func postBuild(srv *Server, version float64, w http.ResponseWriter, r *http.Requ
|
|||
return fmt.Errorf("Error trying to use git: %s (%s)", err, output)
|
||||
}
|
||||
|
||||
c, err := archive.Tar(root, archive.Bzip2)
|
||||
c, err := archive.Tar(root, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
41
buildfile.go
41
buildfile.go
|
@ -353,8 +353,9 @@ func (b *buildFile) CmdAdd(args string) error {
|
|||
|
||||
// FIXME: do we really need this?
|
||||
var (
|
||||
origPath = orig
|
||||
destPath = dest
|
||||
origPath = orig
|
||||
destPath = dest
|
||||
remoteHash string
|
||||
)
|
||||
|
||||
if utils.IsURL(orig) {
|
||||
|
@ -373,11 +374,20 @@ func (b *buildFile) CmdAdd(args string) error {
|
|||
}
|
||||
defer os.RemoveAll(tmpDirName)
|
||||
if _, err = io.Copy(tmpFile, resp.Body); err != nil {
|
||||
tmpFile.Close()
|
||||
return err
|
||||
}
|
||||
origPath = path.Join(filepath.Base(tmpDirName), filepath.Base(tmpFileName))
|
||||
tmpFile.Close()
|
||||
|
||||
// Process the checksum
|
||||
r, err := archive.Tar(tmpFileName, archive.Uncompressed)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
tarSum := utils.TarSum{Reader: r, DisableCompression: true}
|
||||
remoteHash = tarSum.Sum(nil)
|
||||
|
||||
// If the destination is a directory, figure out the filename.
|
||||
if strings.HasSuffix(dest, "/") {
|
||||
u, err := url.Parse(orig)
|
||||
|
@ -408,20 +418,16 @@ func (b *buildFile) CmdAdd(args string) error {
|
|||
sums = b.context.GetSums()
|
||||
)
|
||||
|
||||
// Has tarsum strips the '.' and './', we put it back for comparaison.
|
||||
for file, sum := range sums {
|
||||
if len(file) == 0 || file[0] != '.' && file[0] != '/' {
|
||||
delete(sums, file)
|
||||
sums["./"+file] = sum
|
||||
}
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||
if remoteHash != "" {
|
||||
hash = remoteHash
|
||||
} else if fi, err := os.Stat(path.Join(b.contextPath, origPath)); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
var subfiles []string
|
||||
for file, sum := range sums {
|
||||
if strings.HasPrefix(file, origPath) {
|
||||
absFile := path.Join(b.contextPath, file)
|
||||
absOrigPath := path.Join(b.contextPath, origPath)
|
||||
if strings.HasPrefix(absFile, absOrigPath) {
|
||||
subfiles = append(subfiles, sum)
|
||||
}
|
||||
}
|
||||
|
@ -430,7 +436,13 @@ func (b *buildFile) CmdAdd(args string) error {
|
|||
hasher.Write([]byte(strings.Join(subfiles, ",")))
|
||||
hash = "dir:" + hex.EncodeToString(hasher.Sum(nil))
|
||||
} else {
|
||||
hash = "file:" + sums[origPath]
|
||||
if origPath[0] == '/' && len(origPath) > 1 {
|
||||
origPath = origPath[1:]
|
||||
}
|
||||
origPath = strings.TrimPrefix(origPath, "./")
|
||||
if h, ok := sums[origPath]; ok {
|
||||
hash = "file:" + h
|
||||
}
|
||||
}
|
||||
b.config.Cmd = []string{"/bin/sh", "-c", fmt.Sprintf("#(nop) ADD %s in %s", hash, dest)}
|
||||
hit, err := b.probeCache()
|
||||
|
@ -604,11 +616,12 @@ func (b *buildFile) Build(context io.Reader) (string, error) {
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
b.context = &utils.TarSum{Reader: context}
|
||||
b.context = &utils.TarSum{Reader: context, DisableCompression: true}
|
||||
if err := archive.Untar(b.context, tmpdirPath, nil); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer os.RemoveAll(tmpdirPath)
|
||||
|
||||
b.contextPath = tmpdirPath
|
||||
filename := path.Join(tmpdirPath, "Dockerfile")
|
||||
if _, err := os.Stat(filename); os.IsNotExist(err) {
|
||||
|
|
|
@ -2510,11 +2510,7 @@ func (cli *DockerCli) LoadConfigFile() (err error) {
|
|||
func waitForExit(cli *DockerCli, containerId string) (int, error) {
|
||||
body, _, err := cli.call("POST", "/containers/"+containerId+"/wait", nil)
|
||||
if err != nil {
|
||||
// If we can't connect, then the daemon probably died.
|
||||
if err != ErrConnectionRefused {
|
||||
return -1, err
|
||||
}
|
||||
return -1, nil
|
||||
return -1, err
|
||||
}
|
||||
|
||||
var out APIWait
|
||||
|
|
|
@ -120,7 +120,7 @@ type BindMap struct {
|
|||
}
|
||||
|
||||
var (
|
||||
ErrContainerStart = errors.New("The container failed to start. Unkown error")
|
||||
ErrContainerStart = errors.New("The container failed to start. Unknown error")
|
||||
ErrContainerStartTimeout = errors.New("The container failed to start due to timed out.")
|
||||
ErrInvalidWorikingDirectory = errors.New("The working directory is invalid. It needs to be an absolute path.")
|
||||
ErrConflictAttachDetach = errors.New("Conflicting options: -a and -d")
|
||||
|
@ -1044,7 +1044,7 @@ ff02::2 ip6-allrouters
|
|||
|
||||
if container.Config.Domainname != "" {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s.%s %s\n", IP, container.Config.Hostname, container.Config.Domainname, container.Config.Hostname)), hostsContent...)
|
||||
} else {
|
||||
} else if !container.Config.NetworkDisabled {
|
||||
hostsContent = append([]byte(fmt.Sprintf("%s\t%s\n", IP, container.Config.Hostname)), hostsContent...)
|
||||
}
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ func main() {
|
|||
|
||||
// Check that the requested process manager is supported
|
||||
if _, exists := templates[*kind]; !exists {
|
||||
panic("Unkown script template")
|
||||
panic("Unknown script template")
|
||||
}
|
||||
|
||||
// Load the requested template
|
||||
|
|
|
@ -125,12 +125,14 @@ Check the logs make sure it is working correctly.
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo docker attach $CONTAINER_ID
|
||||
sudo docker attach -sig-proxy=false $CONTAINER_ID
|
||||
|
||||
Attach to the container to see the results in real-time.
|
||||
|
||||
- **"docker attach**" This will allow us to attach to a background
|
||||
process to see what is going on.
|
||||
- **"-sig-proxy=false"** Do not forward signals to the container; allows
|
||||
us to exit the attachment using Control-C without stopping the container.
|
||||
- **$CONTAINER_ID** The Id of the container we want to attach too.
|
||||
|
||||
Exit from the container attachment by pressing Control-C.
|
||||
|
|
|
@ -62,7 +62,7 @@
|
|||
|
||||
.. code-block:: bash
|
||||
|
||||
docker-playground:~$ echo "DOCKER_OPTS="$DOCKER_OPTS -mtu 1460" | sudo tee -a /etc/defaults/docker
|
||||
docker-playground:~$ echo 'DOCKER_OPTS="$DOCKER_OPTS -mtu 1460"' | sudo tee -a /etc/default/docker
|
||||
docker-playground:~$ sudo service docker restart
|
||||
|
||||
8. Start a new container:
|
||||
|
|
|
@ -68,13 +68,11 @@ easy. **See the :ref:`installmirrors` section below if you are not in
|
|||
the United States.** Other sources of the Debian packages may be
|
||||
faster for you to install.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
First add the Docker repository key to your local keychain.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
|
@ -142,13 +140,11 @@ Docker is available as a Debian package, which makes installation easy.
|
|||
Please note that these instructions have changed for 0.6. If you are upgrading from an earlier version, you will need
|
||||
to follow them again.
|
||||
|
||||
First add the Docker repository key to your local keychain. You can use the
|
||||
``apt-key`` command to check the fingerprint matches: ``36A1 D786 9245 C895 0F96
|
||||
6E92 D857 6A8B A88D 21E9``
|
||||
First add the Docker repository key to your local keychain.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
sudo sh -c "wget -qO- https://get.docker.io/gpg | apt-key add -"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
Add the Docker repository to your apt sources list, update and install the
|
||||
``lxc-docker`` package.
|
||||
|
|
|
@ -26,7 +26,7 @@ use ``apt-get`` to upgrade.
|
|||
.. code-block:: bash
|
||||
|
||||
# Add the Docker repository key to your local keychain
|
||||
sudo sh -c "curl https://get.docker.io/gpg | apt-key add -"
|
||||
sudo apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
|
||||
# Add the Docker repository to your apt sources list.
|
||||
sudo sh -c "echo deb https://get.docker.io/ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
|
|
|
@ -78,11 +78,11 @@ client commands.
|
|||
# Add the docker group if it doesn't already exist.
|
||||
sudo groupadd docker
|
||||
|
||||
# Add the connected user "${USERNAME}" to the docker group.
|
||||
# Add the connected user "${USER}" to the docker group.
|
||||
# Change the user name to match your preferred user.
|
||||
# You may have to logout and log back in again for
|
||||
# this to take effect.
|
||||
sudo gpasswd -a ${USERNAME} docker
|
||||
sudo gpasswd -a ${USER} docker
|
||||
|
||||
# Restart the docker daemon.
|
||||
sudo service docker restart
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
# Release Checklist
|
||||
## A maintainer's guide to releasing Docker
|
||||
|
||||
So you're in charge of a Docker release? Cool. Here's what to do.
|
||||
|
@ -8,9 +9,10 @@ to keep it up-to-date.
|
|||
### 1. Pull from master and create a release branch
|
||||
|
||||
```bash
|
||||
export VERSION=vXXX
|
||||
export VERSION=vX.Y.Z
|
||||
git checkout release
|
||||
git pull
|
||||
git fetch
|
||||
git reset --hard origin/release
|
||||
git checkout -b bump_$VERSION
|
||||
git merge origin/master
|
||||
```
|
||||
|
@ -20,16 +22,13 @@ git merge origin/master
|
|||
You can run this command for reference:
|
||||
|
||||
```bash
|
||||
LAST_VERSION=$(git tag | grep -E "v[0-9\.]+$" | sort -nr | head -n 1)
|
||||
git log $LAST_VERSION..HEAD
|
||||
LAST_VERSION=$(git tag | grep -E 'v[0-9\.]+$' | sort -nr | head -n 1)
|
||||
git log --stat $LAST_VERSION..HEAD
|
||||
```
|
||||
|
||||
Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
|
||||
Each change should be listed under a category heading formatted as `#### CATEGORY`.
|
||||
|
||||
* BULLET is either ```-```, ```+``` or ```*```, to indicate a bugfix,
|
||||
new feature or upgrade, respectively.
|
||||
|
||||
* CATEGORY should describe which part of the project is affected.
|
||||
`CATEGORY` should describe which part of the project is affected.
|
||||
Valid categories are:
|
||||
* Builder
|
||||
* Documentation
|
||||
|
@ -37,19 +36,34 @@ Each change should be formatted as ```BULLET CATEGORY: DESCRIPTION```
|
|||
* Packaging
|
||||
* Remote API
|
||||
* Runtime
|
||||
* Other (please use this category sparingly)
|
||||
|
||||
* DESCRIPTION: a concise description of the change that is relevant to the
|
||||
end-user, using the present tense. Changes should be described in terms
|
||||
of how they affect the user, for example "new feature X which allows Y",
|
||||
"fixed bug which caused X", "increased performance of Y".
|
||||
Each change should be formatted as `BULLET DESCRIPTION`, given:
|
||||
|
||||
* BULLET: either `-`, `+` or `*`, to indicate a bugfix, new feature or
|
||||
upgrade, respectively.
|
||||
|
||||
* DESCRIPTION: a concise description of the change that is relevant to the
|
||||
end-user, using the present tense. Changes should be described in terms
|
||||
of how they affect the user, for example "Add new feature X which allows Y",
|
||||
"Fix bug which caused X", "Increase performance of Y".
|
||||
|
||||
EXAMPLES:
|
||||
|
||||
```
|
||||
+ Builder: 'docker build -t FOO' applies the tag FOO to the newly built
|
||||
container.
|
||||
* Runtime: improve detection of kernel version
|
||||
- Remote API: fix a bug in the optional unix socket transport
|
||||
```markdown
|
||||
## 0.3.6 (1995-12-25)
|
||||
|
||||
#### Builder
|
||||
|
||||
+ 'docker build -t FOO .' applies the tag FOO to the newly built container
|
||||
|
||||
#### Remote API
|
||||
|
||||
- Fix a bug in the optional unix socket transport
|
||||
|
||||
#### Runtime
|
||||
|
||||
* Improve detection of kernel version
|
||||
```
|
||||
|
||||
### 3. Change the contents of the VERSION file
|
||||
|
@ -61,14 +75,14 @@ echo ${VERSION#v} > VERSION
|
|||
### 4. Run all tests
|
||||
|
||||
```bash
|
||||
docker run -privileged docker hack/make.sh test
|
||||
make test
|
||||
```
|
||||
|
||||
### 5. Test the docs
|
||||
|
||||
Make sure that your tree includes documentation for any modified or
|
||||
new features, syntax or semantic changes. Instructions for building
|
||||
the docs are in ``docs/README.md``
|
||||
the docs are in `docs/README.md`.
|
||||
|
||||
### 6. Commit and create a pull request to the "release" branch
|
||||
|
||||
|
@ -76,44 +90,32 @@ the docs are in ``docs/README.md``
|
|||
git add VERSION CHANGELOG.md
|
||||
git commit -m "Bump version to $VERSION"
|
||||
git push origin bump_$VERSION
|
||||
echo "https://github.com/dotcloud/docker/compare/release...bump_$VERSION"
|
||||
```
|
||||
|
||||
That last command will give you the proper link to visit to ensure that you
|
||||
open the PR against the "release" branch instead of accidentally against
|
||||
"master" (like so many brave souls before you already have).
|
||||
|
||||
### 7. Get 2 other maintainers to validate the pull request
|
||||
|
||||
### 8. Apply tag
|
||||
|
||||
```bash
|
||||
git tag -a $VERSION -m $VERSION bump_$VERSION
|
||||
git push origin $VERSION
|
||||
```
|
||||
|
||||
Merging the pull request to the release branch will automatically
|
||||
update the documentation on the "latest" revision of the docs. You
|
||||
should see the updated docs 5-10 minutes after the merge. The docs
|
||||
will appear on http://docs.docker.io/. For more information about
|
||||
documentation releases, see ``docs/README.md``
|
||||
|
||||
### 9. Go to github to merge the bump_$VERSION into release
|
||||
|
||||
Don't forget to push that pretty blue button to delete the leftover
|
||||
branch afterwards!
|
||||
|
||||
### 10. Publish binaries
|
||||
### 8. Publish binaries
|
||||
|
||||
To run this you will need access to the release credentials.
|
||||
Get them from [the infrastructure maintainers](
|
||||
https://github.com/dotcloud/docker/blob/master/hack/infrastructure/MAINTAINERS).
|
||||
|
||||
```bash
|
||||
git checkout release
|
||||
git fetch
|
||||
git reset --hard origin/release
|
||||
docker build -t docker .
|
||||
docker run \
|
||||
export AWS_S3_BUCKET="test.docker.io"
|
||||
export AWS_ACCESS_KEY="$(cat ~/.aws/access_key)"
|
||||
export AWS_SECRET_KEY="$(cat ~/.aws/secret_key)"
|
||||
export GPG_PASSPHRASE=supersecretsesame
|
||||
docker run \
|
||||
-e AWS_S3_BUCKET=test.docker.io \
|
||||
-e AWS_ACCESS_KEY=$(cat ~/.aws/access_key) \
|
||||
-e AWS_SECRET_KEY=$(cat ~/.aws/secret_key) \
|
||||
-e GPG_PASSPHRASE=supersecretsesame \
|
||||
-e AWS_ACCESS_KEY \
|
||||
-e AWS_SECRET_KEY \
|
||||
-e GPG_PASSPHRASE \
|
||||
-i -t -privileged \
|
||||
docker \
|
||||
hack/release.sh
|
||||
|
@ -121,9 +123,78 @@ docker run \
|
|||
|
||||
It will run the test suite one more time, build the binaries and packages,
|
||||
and upload to the specified bucket (you should use test.docker.io for
|
||||
general testing, and once everything is fine, switch to get.docker.io).
|
||||
general testing, and once everything is fine, switch to get.docker.io as
|
||||
noted below).
|
||||
|
||||
### 11. Rejoice and Evangelize!
|
||||
After the binaries and packages are uploaded to test.docker.io, make sure
|
||||
they get tested in both Ubuntu and Debian for any obvious installation
|
||||
issues or runtime issues.
|
||||
|
||||
Announcing on IRC in both `#docker` and `#docker-dev` is a great way to get
|
||||
help testing! An easy way to get some useful links for sharing:
|
||||
|
||||
```bash
|
||||
echo "Ubuntu/Debian install script: curl -sLS https://test.docker.io/ | sh"
|
||||
echo "Linux 64bit binary: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}"
|
||||
echo "Darwin/OSX 64bit client binary: https://test.docker.io/builds/Darwin/x86_64/docker-${VERSION#v}"
|
||||
echo "Darwin/OSX 32bit client binary: https://test.docker.io/builds/Darwin/i386/docker-${VERSION#v}"
|
||||
echo "Linux 64bit tgz: https://test.docker.io/builds/Linux/x86_64/docker-${VERSION#v}.tgz"
|
||||
```
|
||||
|
||||
Once they're tested and reasonably believed to be working, run against
|
||||
get.docker.io:
|
||||
|
||||
```bash
|
||||
docker run \
|
||||
-e AWS_S3_BUCKET=get.docker.io \
|
||||
-e AWS_ACCESS_KEY \
|
||||
-e AWS_SECRET_KEY \
|
||||
-e GPG_PASSPHRASE \
|
||||
-i -t -privileged \
|
||||
docker \
|
||||
hack/release.sh
|
||||
```
|
||||
|
||||
### 9. Apply tag
|
||||
|
||||
```bash
|
||||
git tag -a $VERSION -m $VERSION bump_$VERSION
|
||||
git push origin $VERSION
|
||||
```
|
||||
|
||||
It's very important that we don't make the tag until after the official
|
||||
release is uploaded to get.docker.io!
|
||||
|
||||
### 10. Go to github to merge the `bump_$VERSION` into release
|
||||
|
||||
Merging the pull request to the release branch will automatically
|
||||
update the documentation on the "latest" revision of the docs. You
|
||||
should see the updated docs 5-10 minutes after the merge. The docs
|
||||
will appear on http://docs.docker.io/. For more information about
|
||||
documentation releases, see `docs/README.md`.
|
||||
|
||||
Don't forget to push that pretty blue button to delete the leftover
|
||||
branch afterwards!
|
||||
|
||||
### 11. Create a new pull request to merge release back into master
|
||||
|
||||
```bash
|
||||
git checkout master
|
||||
git fetch
|
||||
git reset --hard origin/master
|
||||
git merge origin/release
|
||||
git checkout -b merge_release_$VERSION
|
||||
echo ${VERSION#v}-dev > VERSION
|
||||
git add VERSION
|
||||
git commit -m "Change version to $(cat VERSION)"
|
||||
git push origin merge_release_$VERSION
|
||||
echo "https://github.com/dotcloud/docker/compare/master...merge_release_$VERSION"
|
||||
```
|
||||
|
||||
Again, get two maintainers to validate, then merge, then push that pretty
|
||||
blue button to delete your branch.
|
||||
|
||||
### 12. Rejoice and Evangelize!
|
||||
|
||||
Congratulations! You're done.
|
||||
|
||||
|
|
|
@ -127,7 +127,7 @@ call('/usr/bin/rsync -aH {} {}@{}:{}'.format(DOCKER_CI_PATH, DO_IMAGE_USER, ip,
|
|||
|
||||
# Install Docker and Buildbot dependencies
|
||||
sudo('mkdir /mnt/docker; ln -s /mnt/docker /var/lib/docker')
|
||||
sudo('wget -q -O - https://get.docker.io/gpg | apt-key add -')
|
||||
sudo('apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9')
|
||||
sudo('echo deb https://get.docker.io/ubuntu docker main >'
|
||||
' /etc/apt/sources.list.d/docker.list')
|
||||
sudo('echo -e "deb http://archive.ubuntu.com/ubuntu raring main universe\n'
|
||||
|
|
|
@ -108,7 +108,11 @@ case "$lsb_dist" in
|
|||
fi
|
||||
(
|
||||
set -x
|
||||
$sh_c "$curl ${url}gpg | apt-key add -"
|
||||
if [ "https://get.docker.io/" = "$url" ]; then
|
||||
$sh_c "apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9"
|
||||
else
|
||||
$sh_c "$curl ${url}gpg | apt-key add -"
|
||||
fi
|
||||
$sh_c "echo deb ${url}ubuntu docker main > /etc/apt/sources.list.d/docker.list"
|
||||
$sh_c 'sleep 3; apt-get update; apt-get install -y -q lxc-docker'
|
||||
)
|
||||
|
|
|
@ -245,7 +245,7 @@ EOF
|
|||
# Add the repository to your APT sources
|
||||
echo deb $(s3_url)/ubuntu docker main > /etc/apt/sources.list.d/docker.list
|
||||
# Then import the repository key
|
||||
curl $(s3_url)/gpg | apt-key add -
|
||||
apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 36A1D7869245C8950F966E92D8576A8BA88D21E9
|
||||
# Install docker
|
||||
apt-get update ; apt-get install -y lxc-docker
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ commits = yaml.load(gitlog)
|
|||
if not commits:
|
||||
exit(0) # what? how can we have no commits?
|
||||
|
||||
DCO = 'Docker-DCO-1.0-Signed-off-by:'
|
||||
DCO = 'Docker-DCO-1.1-Signed-off-by:'
|
||||
|
||||
p = re.compile(r'^{0} ([^<]+) <([^<>@]+@[^<>]+)> \(github: (\S+)\)$'.format(re.escape(DCO)), re.MULTILINE|re.UNICODE)
|
||||
|
||||
|
|
|
@ -49,6 +49,10 @@ func TestGetVersion(t *testing.T) {
|
|||
if result := v.Get("Version"); result != expected {
|
||||
t.Errorf("Expected version %s, %s found", expected, result)
|
||||
}
|
||||
expected = "application/json"
|
||||
if result := r.HeaderMap.Get("Content-Type"); result != expected {
|
||||
t.Errorf("Expected Content-Type %s, %s found", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetInfo(t *testing.T) {
|
||||
|
@ -84,6 +88,10 @@ func TestGetInfo(t *testing.T) {
|
|||
if images := i.GetInt("Images"); images != len(initialImages) {
|
||||
t.Errorf("Expected images: %d, %d found", len(initialImages), images)
|
||||
}
|
||||
expected := "application/json"
|
||||
if result := r.HeaderMap.Get("Content-Type"); result != expected {
|
||||
t.Errorf("Expected Content-Type %s, %s found", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetEvents(t *testing.T) {
|
||||
|
|
|
@ -592,6 +592,26 @@ func TestBuildADDLocalFileWithoutCache(t *testing.T) {
|
|||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestBuildADDCurrentDirectoryWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
add . /usr/lib/bla
|
||||
`,
|
||||
nil, nil}
|
||||
checkCacheBehavior(t, template, true)
|
||||
}
|
||||
|
||||
func TestBuildADDCurrentDirectoryWithoutCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
maintainer dockerio
|
||||
add . /usr/lib/bla
|
||||
`,
|
||||
nil, nil}
|
||||
checkCacheBehavior(t, template, false)
|
||||
}
|
||||
|
||||
func TestBuildADDRemoteFileWithCache(t *testing.T) {
|
||||
template := testContextTemplate{`
|
||||
from {IMAGE}
|
||||
|
|
|
@ -40,7 +40,7 @@ var (
|
|||
// Niels Bohr is the father of quantum theory. http://en.wikipedia.org/wiki/Niels_Bohr.
|
||||
// Nikola Tesla invented the AC electric system and every gaget ever used by a James Bond villain. http://en.wikipedia.org/wiki/Nikola_Tesla
|
||||
// Pierre de Fermat pioneered several aspects of modern mathematics. http://en.wikipedia.org/wiki/Pierre_de_Fermat
|
||||
// Richard Feynmann was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman
|
||||
// Richard Feynman was a key contributor to quantum mechanics and particle physics. http://en.wikipedia.org/wiki/Richard_Feynman
|
||||
// Rob Pike was a key contributor to Unix, Plan 9, the X graphic system, utf-8, and the Go programming language. http://en.wikipedia.org/wiki/Rob_Pike
|
||||
// Stephen Hawking pioneered the field of cosmology by combining general relativity and quantum mechanics. http://en.wikipedia.org/wiki/Stephen_Hawking
|
||||
// Steve Wozniak invented the Apple I and Apple II. http://en.wikipedia.org/wiki/Steve_Wozniak
|
||||
|
@ -49,7 +49,7 @@ var (
|
|||
// http://en.wikipedia.org/wiki/John_Bardeen
|
||||
// http://en.wikipedia.org/wiki/Walter_Houser_Brattain
|
||||
// http://en.wikipedia.org/wiki/William_Shockley
|
||||
right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclide", "newton", "fermat", "archimede", "poincare", "heisenberg", "feynmann", "hawkings", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"}
|
||||
right = [...]string{"lovelace", "franklin", "tesla", "einstein", "bohr", "davinci", "pasteur", "nobel", "curie", "darwin", "turing", "ritchie", "torvalds", "pike", "thompson", "wozniak", "galileo", "euclid", "newton", "fermat", "archimedes", "poincare", "heisenberg", "feynman", "hawking", "fermi", "pare", "mccarthy", "engelbart", "babbage", "albattani", "ptolemy", "bell", "wright", "lumiere", "morse", "mclean", "brown", "bardeen", "brattain", "shockley"}
|
||||
)
|
||||
|
||||
func GenerateRandomName(checker NameChecker) (string, error) {
|
||||
|
|
|
@ -234,7 +234,7 @@ func (s *NetlinkSocket) Send(request *NetlinkRequest) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (s *NetlinkSocket) Recieve() ([]syscall.NetlinkMessage, error) {
|
||||
func (s *NetlinkSocket) Receive() ([]syscall.NetlinkMessage, error) {
|
||||
rb := make([]byte, syscall.Getpagesize())
|
||||
nr, _, err := syscall.Recvfrom(s.fd, rb, 0)
|
||||
if err != nil {
|
||||
|
@ -269,7 +269,7 @@ func (s *NetlinkSocket) HandleAck(seq uint32) error {
|
|||
|
||||
done:
|
||||
for {
|
||||
msgs, err := s.Recieve()
|
||||
msgs, err := s.Receive()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -500,7 +500,7 @@ func NetworkGetRoutes() ([]*net.IPNet, error) {
|
|||
|
||||
done:
|
||||
for {
|
||||
msgs, err := s.Recieve()
|
||||
msgs, err := s.Receive()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
19
server.go
19
server.go
|
@ -22,7 +22,6 @@ import (
|
|||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
|
@ -1697,22 +1696,28 @@ func (srv *Server) ImageGetCached(imgID string, config *Config) (*Image, error)
|
|||
}
|
||||
|
||||
// Store the tree in a map of map (map[parentId][childId])
|
||||
imageMap := make(map[string][]string)
|
||||
imageMap := make(map[string]map[string]struct{})
|
||||
for _, img := range images {
|
||||
imageMap[img.Parent] = append(imageMap[img.Parent], img.ID)
|
||||
if _, exists := imageMap[img.Parent]; !exists {
|
||||
imageMap[img.Parent] = make(map[string]struct{})
|
||||
}
|
||||
imageMap[img.Parent][img.ID] = struct{}{}
|
||||
}
|
||||
sort.Strings(imageMap[imgID])
|
||||
|
||||
// Loop on the children of the given image and check the config
|
||||
for _, elem := range imageMap[imgID] {
|
||||
var match *Image
|
||||
for elem := range imageMap[imgID] {
|
||||
img, err := srv.runtime.graph.Get(elem)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if CompareConfig(&img.ContainerConfig, config) {
|
||||
return img, nil
|
||||
if match == nil || match.Created.Before(img.Created) {
|
||||
match = img
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
return match, nil
|
||||
}
|
||||
|
||||
func (srv *Server) RegisterLinks(container *Container, hostConfig *HostConfig) error {
|
||||
|
|
|
@ -15,16 +15,34 @@ import (
|
|||
|
||||
type TarSum struct {
|
||||
io.Reader
|
||||
tarR *tar.Reader
|
||||
tarW *tar.Writer
|
||||
gz *gzip.Writer
|
||||
bufTar *bytes.Buffer
|
||||
bufGz *bytes.Buffer
|
||||
h hash.Hash
|
||||
sums map[string]string
|
||||
currentFile string
|
||||
finished bool
|
||||
first bool
|
||||
tarR *tar.Reader
|
||||
tarW *tar.Writer
|
||||
gz writeCloseFlusher
|
||||
bufTar *bytes.Buffer
|
||||
bufGz *bytes.Buffer
|
||||
h hash.Hash
|
||||
sums map[string]string
|
||||
currentFile string
|
||||
finished bool
|
||||
first bool
|
||||
DisableCompression bool
|
||||
}
|
||||
|
||||
type writeCloseFlusher interface {
|
||||
io.WriteCloser
|
||||
Flush() error
|
||||
}
|
||||
|
||||
type nopCloseFlusher struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *nopCloseFlusher) Flush() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ts *TarSum) encodeHeader(h *tar.Header) error {
|
||||
|
@ -57,7 +75,11 @@ func (ts *TarSum) Read(buf []byte) (int, error) {
|
|||
ts.bufGz = bytes.NewBuffer([]byte{})
|
||||
ts.tarR = tar.NewReader(ts.Reader)
|
||||
ts.tarW = tar.NewWriter(ts.bufTar)
|
||||
ts.gz = gzip.NewWriter(ts.bufGz)
|
||||
if !ts.DisableCompression {
|
||||
ts.gz = gzip.NewWriter(ts.bufGz)
|
||||
} else {
|
||||
ts.gz = &nopCloseFlusher{Writer: ts.bufGz}
|
||||
}
|
||||
ts.h = sha256.New()
|
||||
ts.h.Reset()
|
||||
ts.first = true
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"index/suffixarray"
|
||||
"io"
|
||||
|
@ -46,14 +45,12 @@ func Go(f func() error) chan error {
|
|||
}
|
||||
|
||||
// Request a given URL and return an io.Reader
|
||||
func Download(url string) (*http.Response, error) {
|
||||
var resp *http.Response
|
||||
var err error
|
||||
func Download(url string) (resp *http.Response, err error) {
|
||||
if resp, err = http.Get(url); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if resp.StatusCode >= 400 {
|
||||
return nil, errors.New("Got HTTP status code >= 400: " + resp.Status)
|
||||
return nil, fmt.Errorf("Got HTTP status code >= 400: %s", resp.Status)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue