1
0
Fork 0
mirror of https://github.com/moby/moby.git synced 2022-11-09 12:21:53 -05:00

Merge pull request #2467 from dotcloud/release_hotfix

0.6.5 hotfixes
This commit is contained in:
Michael Crosby 2013-10-30 13:21:20 -07:00
commit 23b3747945
7 changed files with 103 additions and 146 deletions

View file

@ -187,9 +187,6 @@ func (b *buildFile) CmdCmd(args string) error {
}
func (b *buildFile) CmdExpose(args string) error {
if strings.Contains(args, ":") {
return fmt.Errorf("EXPOSE cannot be used to bind to a host ip or port")
}
ports := strings.Split(args, " ")
b.config.PortSpecs = append(ports, b.config.PortSpecs...)
return b.commit("", b.config.Cmd, fmt.Sprintf("EXPOSE %v", ports))
@ -433,10 +430,13 @@ func (b *buildFile) commit(id string, autoCmd []string, comment string) error {
}
}
container, _, err := b.runtime.Create(b.config, "")
container, warnings, err := b.runtime.Create(b.config, "")
if err != nil {
return err
}
for _, warning := range warnings {
fmt.Fprintf(b.out, " ---> [Warning] %s\n", warning)
}
b.tmpContainers[container.ID] = struct{}{}
fmt.Fprintf(b.out, " ---> Running in %s\n", utils.TruncateID(container.ID))
id = container.ID

View file

@ -14,12 +14,16 @@ Building a redis container to link as a child of our web application.
Building the redis container
----------------------------
We will use a pre-build version of redis from the index under
the name ``crosbymichael/redis``. If you are interested in the
Dockerfile that was used to build this container here it is.
Lets build a redis image with the following Dockerfile.
.. code-block:: bash
git clone https://github.com/antirez/redis.git
cd redis
git checkout 2.6
# Save this Dockerfile to the root of the redis repository.
# Build redis from source
# Make sure you have the redis source code checked out in
# the same directory as this Dockerfile
@ -34,7 +38,6 @@ Dockerfile that was used to build this container here it is.
ADD . /redis
RUN (cd /redis && make)
RUN (cd /redis && make test)
RUN mkdir -p /redis-data
VOLUME ["/redis-data"]
@ -43,6 +46,9 @@ Dockerfile that was used to build this container here it is.
ENTRYPOINT ["/redis/src/redis-server"]
CMD ["--dir", "/redis-data"]
# docker build our new redis image from source
docker build -t redis-2.6 .
We need to ``EXPOSE`` the default port of 6379 so that our link knows what ports
to connect to our redis container on. If you do not expose any ports for the
@ -54,31 +60,28 @@ Run the redis container
.. code-block:: bash
docker run -d -e PASSWORD=docker -name redis crosbymichael/redis --requirepass=docker
docker run -d -e PASSWORD=docker -name redis redis-2.6 --requirepass docker
This will run our redis container using the default port of 6379 and using docker
as password to secure our service. By specifying the ``-name`` flag on run
we will assign the name ``redis`` to this container.
We can issue all the commands that you would expect; start, stop, attach, using the name.
The name also allows us to link other containers into this one. If you do not specify a
name on docker run, docker will automatically generate a name for your container.
This will run our redis container wit the password docker
to secure our service. By specifying the ``-name`` flag on run
we will assign the name ``redis`` to this container. If we do not specify a name for
our container via the ``-name`` flag docker will automatically generate a name for us.
We can issue all the commands that you would expect; start, stop, attach, using the name for our container.
The name also allows us to link other containers into this one.
Linking redis as a child
------------------------
Next we can start a new web application that has a dependency on redis and apply a link
to connect both containers. If you noticed when running our redis service we did not use
the ``-p`` option to publish the redis port to the host system. Redis exposed port 6379
but we did not publish the port. This allows docker to prevent all network traffic to
the redis container except when explicitly specified within a link. This is a big win
for security.
to connect both containers. If you noticed when running our redis server we did not use
the ``-p`` flag to publish the redis port to the host system. Redis exposed port 6379 via the Dockerfile
and this is all we need to establish a link.
Now lets start our web application with a link into redis.
.. code-block:: bash
docker run -t -i -link /redis:db -name webapp ubuntu bash
docker run -t -i -link redis:db -name webapp ubuntu bash
root@4c01db0b339c:/# env
@ -101,22 +104,25 @@ Now lets start our web application with a link into redis.
When we inspect the environment of the linked container we can see a few extra environment
variables have been added. When you specified ``-link /redis:db`` you are telling docker
to link the container named ``/redis`` into this new container with the alias ``db``.
variables have been added. When you specified ``-link redis:db`` you are telling docker
to link the container named ``redis`` into this new container with the alias ``db``.
Environment variables are prefixed with the alias so that the parent container can access
network and environment information from the child.
network and environment information from the containers that are linked into it.
.. code-block:: bash
# The name of the child container
DB_NAME=/webapp/db
# The default protocol, ip, and port of the service running in the container
DB_PORT=tcp://172.17.0.8:6379
# A specific protocol, ip, and port of various services
DB_PORT_6379_TCP=tcp://172.17.0.8:6379
DB_PORT_6379_TCP_PROTO=tcp
DB_PORT_6379_TCP_ADDR=172.17.0.8
DB_PORT_6379_TCP_PORT=6379
# Get environment variables of the container
DB_ENV_PASSWORD=dockerpass

View file

@ -13,113 +13,47 @@ You can use your Docker containers with process managers like ``upstart``,
Introduction
------------
If you want a process manager to manage your containers you will need to run
the docker daemon with the ``-r=false`` so that docker will not automatically
restart your containers when the host is restarted.
When you have finished setting up your image and are happy with your
running container, you may want to use a process manager to manage
it. To help with this, we provide a simple image: ``creack/manager:min``
it. When your run ``docker start -a`` docker will automatically attach
to the process and forward all signals so that the process manager can
detect when a container stops and correctly restart it.
This image takes the container ID as parameter. We also can specify
the kind of process manager and metadata like *Author* and
*Description*. The output will will be text suitable for a
configuration file, echoed to stdout. It is up to you to create the
.conf file (for `upstart
<http://upstart.ubuntu.com/cookbook/#job-configuration-file>`_) or
.service file (for `systemd
<http://0pointer.de/public/systemd-man/systemd.service.html>`_) and
put it in the right place for your system.
Here are a few sample scripts for systemd and upstart to integrate with docker.
Usage
-----
Sample Upstart Script
---------------------
.. code-block:: bash
docker run creack/manager:min [OPTIONS] <container id>
description "Redis container"
author "Me"
start on filesystem and started lxc-net and started docker
stop on runlevel [!2345]
respawn
exec docker start -a 0a7e070b698b
.. program:: docker run creack/manager:min
.. cmdoption:: -a="<none>"
Author of the image
.. cmdoption:: -d="<none>"
Description of the image
.. cmdoption:: -t="upstart"
Type of manager requested: ``upstart`` or ``systemd``
Example Output
..............
Sample systemd Script
---------------------
.. code-block:: bash
docker run creack/manager:min -t="systemd" b28605f2f9a4
[Unit]
Description=<none>
Author=<none>
After=docker.service
[Unit]
Description=Redis container
Author=Me
After=docker.service
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a b28605f2f9a4
ExecStop=/usr/bin/docker stop -t 2 b28605f2f9a4
[Service]
Restart=always
ExecStart=/usr/bin/docker start -a 0a7e070b698b
ExecStop=/usr/bin/docker stop -t 2 0a7e070b698b
[Install]
WantedBy=local.target
[Install]
WantedBy=local.target
Development
-----------
The image ``creack/manager:min`` is a ``busybox`` base with the
compiled binary of ``manager.go`` as the :ref:`Entrypoint
<entrypoint_def>`. It is meant to be light and fast to download.
If you would like to change or add things, you can download the full
``creack/manager`` repository that contains ``creack/manager:min`` and
``creack/manager:dev``.
The Dockerfiles and the sources are available in
`/contrib/host_integration
<https://github.com/dotcloud/docker/tree/master/contrib/host_integration>`_.
Upstart
-------
Upstart is the default process manager. The generated script will
start the container after the ``docker`` daemon. If the container
dies, it will respawn. Start/Restart/Stop/Reload are
supported. Reload will send a SIGHUP to the container.
Example (``upstart`` on Debian)
...............................
.. code-block:: bash
CID=$(docker run -d creack/firefo-vnc)
docker run creack/manager:min -a 'Guillaume J. Charmes <guillaume@dotcloud.com>' -d 'Awesome Firefox in VLC' $CID > /etc/init/firefoxvnc.conf
You can now ``start firefoxvnc`` or ``stop firefoxvnc`` and if the container
dies for some reason, upstart will restart it.
Systemd
-------
In order to generate a systemd script, we need to use the ``-t``
option. The generated script will start the container after docker
daemon. If the container dies, it will respawn.
``Start/Restart/Reload/Stop`` are supported.
Example (``systemd`` on Fedora)
...............................
.. code-block:: bash
CID=$(docker run -d creack/firefo-vnc)
docker run creack/manager:min -t systemd -a 'Guillaume J. Charmes <guillaume@dotcloud.com>' -d 'Awesome Firefox in VLC' $CID > /usr/lib/systemd/system/firefoxvnc.service
You can now run ``systemctl start firefoxvnc`` or ``systemctl stop
firefoxvnc`` and if the container dies for some reason, ``systemd``
will restart it.

View file

@ -22,7 +22,7 @@ func NewLink(parent, child *Container, name, bridgeInterface string) (*Link, err
return nil, fmt.Errorf("Cannot link to self: %s == %s", parent.ID, child.ID)
}
if !child.State.Running {
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.ID, name)
return nil, fmt.Errorf("Cannot link to a non running container: %s AS %s", child.Name, name)
}
ports := make([]Port, len(child.Config.ExposedPorts))

View file

@ -317,22 +317,20 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
return nil, nil, err
}
warnings := []string{}
if img.Config != nil {
if img.Config.PortSpecs != nil && warnings != nil {
for _, p := range img.Config.PortSpecs {
if strings.Contains(p, ":") {
warnings = append(warnings, "This image expects private ports to be mapped to public ports on your host. "+
"This has been deprecated and the public mappings will not be honored."+
"Use -p to publish the ports.")
break
}
}
}
if err := MergeConfig(config, img.Config); err != nil {
return nil, nil, err
}
}
warnings := []string{}
if config.PortSpecs != nil {
for _, p := range config.PortSpecs {
if strings.Contains(p, ":") {
warnings = append(warnings, "The mapping to public ports on your host has been deprecated. Use -p to publish the ports.")
break
}
}
}
if len(config.Entrypoint) != 0 && config.Cmd == nil {
config.Cmd = []string{}
@ -357,6 +355,9 @@ func (runtime *Runtime) Create(config *Config, name string) (*Container, []strin
// Set the enitity in the graph using the default name specified
if _, err := runtime.containerGraph.Set(name, id); err != nil {
if strings.HasSuffix(err.Error(), "name are not unique") {
return nil, nil, fmt.Errorf("Conflict, %s already exists.", name)
}
return nil, nil, err
}

View file

@ -260,6 +260,21 @@ func TestRuntimeCreate(t *testing.T) {
if err != nil {
t.Error(err)
}
// test expose 80:8000
container, warnings, err := runtime.Create(&Config{
Image: GetTestImage(runtime).ID,
Cmd: []string{"ls", "-al"},
PortSpecs: []string{"80:8000"},
},
"",
)
if err != nil {
t.Fatal(err)
}
if warnings == nil || len(warnings) != 1 {
t.Error("Expected a warning, got none")
}
}
func TestDestroy(t *testing.T) {

View file

@ -84,13 +84,13 @@ func (srv *Server) ContainerKill(name string, sig int) error {
// If no signal is passed, perform regular Kill (SIGKILL + wait())
if sig == 0 {
if err := container.Kill(); err != nil {
return fmt.Errorf("Error killing container %s: %s", name, err)
return fmt.Errorf("Cannot kill container %s: %s", name, err)
}
srv.LogEvent("kill", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
} else {
// Otherwise, just send the requested signal
if err := container.kill(sig); err != nil {
return fmt.Errorf("Error killing container %s: %s", name, err)
return fmt.Errorf("Cannot kill container %s: %s", name, err)
}
// FIXME: Add event for signals
}
@ -187,7 +187,7 @@ func (srv *Server) ImagesViz(out io.Writer) error {
for _, image := range images {
parentImage, err = image.GetParent()
if err != nil {
return fmt.Errorf("Error while getting parent image: %v", err)
return err
}
if parentImage != nil {
out.Write([]byte(" \"" + parentImage.ShortID() + "\" -> \"" + image.ShortID() + "\"\n"))
@ -335,7 +335,7 @@ func (srv *Server) ContainerTop(name, ps_args string) (*APITop, error) {
if container := srv.runtime.Get(name); container != nil {
output, err := exec.Command("lxc-ps", "--name", container.ID, "--", ps_args).CombinedOutput()
if err != nil {
return nil, fmt.Errorf("Error trying to use lxc-ps: %s (%s)", err, output)
return nil, fmt.Errorf("lxc-ps: %s (%s)", err, output)
}
procs := APITop{}
for i, line := range strings.Split(string(output), "\n") {
@ -346,7 +346,7 @@ func (srv *Server) ContainerTop(name, ps_args string) (*APITop, error) {
scanner := bufio.NewScanner(strings.NewReader(line))
scanner.Split(bufio.ScanWords)
if !scanner.Scan() {
return nil, fmt.Errorf("Error trying to use lxc-ps")
return nil, fmt.Errorf("Wrong output using lxc-ps")
}
// no scanner.Text because we skip container id
for scanner.Scan() {
@ -819,7 +819,7 @@ func (srv *Server) pushImage(r *registry.Registry, out io.Writer, remote, imgID,
out = utils.NewWriteFlusher(out)
jsonRaw, err := ioutil.ReadFile(path.Join(srv.runtime.graph.Root, imgID, "json"))
if err != nil {
return "", fmt.Errorf("Error while retrieving the path for {%s}: %s", imgID, err)
return "", fmt.Errorf("Cannot retrieve the path for {%s}: %s", imgID, err)
}
out.Write(sf.FormatStatus("", "Pushing %s", imgID))
@ -969,7 +969,7 @@ func (srv *Server) ContainerCreate(config *Config, name string) (string, []strin
func (srv *Server) ContainerRestart(name string, t int) error {
if container := srv.runtime.Get(name); container != nil {
if err := container.Restart(t); err != nil {
return fmt.Errorf("Error restarting container %s: %s", name, err)
return fmt.Errorf("Cannot restart container %s: %s", name, err)
}
srv.LogEvent("restart", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
} else {
@ -986,9 +986,10 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
return fmt.Errorf("No such link: %s", name)
}
name = srv.runtime.getFullName(name)
parent, n := path.Split(name)
if parent == "/" {
return fmt.Errorf("Conflict, cannot remove the default name of the container")
}
pe := srv.runtime.containerGraph.Get(parent)
if pe == nil {
return fmt.Errorf("Cannot get parent %s for name %s", parent, name)
@ -1021,7 +1022,7 @@ func (srv *Server) ContainerDestroy(name string, removeVolume, removeLink bool)
volumes[volumeId] = struct{}{}
}
if err := srv.runtime.Destroy(container); err != nil {
return fmt.Errorf("Error destroying container %s: %s", name, err)
return fmt.Errorf("Cannot destroy container %s: %s", name, err)
}
srv.LogEvent("destroy", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
@ -1165,7 +1166,7 @@ func (srv *Server) ImageDelete(name string, autoPrune bool) ([]APIRmi, error) {
}
if !autoPrune {
if err := srv.runtime.graph.Delete(img.ID); err != nil {
return nil, fmt.Errorf("Error deleting image %s: %s", name, err)
return nil, fmt.Errorf("Cannot delete image %s: %s", name, err)
}
return nil, nil
}
@ -1252,7 +1253,7 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
}
if err := container.Start(hostConfig); err != nil {
return fmt.Errorf("Error starting container %s: %s", name, err)
return fmt.Errorf("Cannot start container %s: %s", name, err)
}
srv.LogEvent("start", container.ShortID(), runtime.repositories.ImageName(container.Image))
@ -1262,7 +1263,7 @@ func (srv *Server) ContainerStart(name string, hostConfig *HostConfig) error {
func (srv *Server) ContainerStop(name string, t int) error {
if container := srv.runtime.Get(name); container != nil {
if err := container.Stop(t); err != nil {
return fmt.Errorf("Error stopping container %s: %s", name, err)
return fmt.Errorf("Cannot stop container %s: %s", name, err)
}
srv.LogEvent("stop", container.ShortID(), srv.runtime.repositories.ImageName(container.Image))
} else {